RDMA v5.17 merge window pull request
Substantially all bug fixes and cleanups: - Update drivers to use common helpers for GUIDs, pkeys, bitmaps, memset_startat, and others - General code cleanups from bots - Simplify some of the rxe pool code in preparation for a larger rework - Clean out old stuff from hns, including all support for hip06 devices - Fix a bug where GID table entries could be missed if the table had holes in it - Rename paths and sessions in rtrs for better understandability - Consolidate the roce source port selection code - NDR speed support in mlx5 -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAmHgct4ACgkQOG33FX4g mxpFuQ//UqtbxowPeDB9bjJ5OLKZ1fGx0FxWkfBeR1cr0apboBNqdK1WOiz5Q7un F2xpASNEsOCr6JMMBhHMOvNiMjRSs33GvydyBj5T7LRx/QGie+0AeSzlS314/mJs NXvOinD21l1YEKIodw4Pfhtdl2QVmEvRpUJnccGyEGUKQ4jpUwVCTfa/tpoMVD5y MsWqv+xOrhsmDahW2nUSXHhBIazVqYETg4EE8O7J1Lb48F98keVOdVkH5wL4nmKj gl6oyN9lkw1sWDJBnom7mgd38L2M42mRtQkiFdMdnpj5D5jbLTcGv30GgBfyMPr6 8tI3sXcAJh3Wk3TUu2jEh2F+SjsHKRTqVjGVwQbkvEuhFK2TSHAhGC+gmP6ueZKG diHKcJVNm6rBX6L/EczYQ7hjOiMzJLlLjhZnr8+2Lqw0X+DzQbN19RUb+XX8iqkP ITM5LPQHf+7N8Rz2W7jcHk1h3wLv1VcKktErc6mUTHdxxpJv/XEsmLP22kqHgSyx So6yAlMtMMMZfP6taWkpTzC6KoduFJwWARf3zYoJreeWmL18F4+Tha2th8xnQMi2 cq0UOu1WnVEFwiIzdMa3aCtTDxXQ6UgPVk1E24RaiZTEBp5hO5+Xmn56du7G89Cb nlZbAudbh3aElbj9ptUsJGSVowGgSLJvvfgFyZz2u+wFBqdJnUk= =EL3r -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma updates from Jason Gunthorpe: "Another small cycle. Mostly cleanups and bug fixes, quite a bit assisted from bots. There are a few new syzkaller splats that haven't been solved yet but they should get into the rcs in a few weeks, I think. Summary: - Update drivers to use common helpers for GUIDs, pkeys, bitmaps, memset_startat, and others - General code cleanups from bots - Simplify some of the rxe pool code in preparation for a larger rework - Clean out old stuff from hns, including all support for hip06 devices - Fix a bug where GID table entries could be missed if the table had holes in it - Rename paths and sessions in rtrs for better understandability - Consolidate the roce source port selection code - NDR speed support in mlx5" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (83 commits) RDMA/irdma: Remove the redundant return RDMA/rxe: Use the standard method to produce udp source port RDMA/irdma: Make the source udp port vary RDMA/hns: Replace get_udp_sport with rdma_get_udp_sport RDMA/core: Calculate UDP source port based on flow label or lqpn/rqpn IB/qib: Fix typos RDMA/rtrs-clt: Rename rtrs_clt to rtrs_clt_sess RDMA/rtrs-srv: Rename rtrs_srv to rtrs_srv_sess RDMA/rtrs-clt: Rename rtrs_clt_sess to rtrs_clt_path RDMA/rtrs-srv: Rename rtrs_srv_sess to rtrs_srv_path RDMA/rtrs: Rename rtrs_sess to rtrs_path RDMA/hns: Modify the hop num of HIP09 EQ to 1 IB/iser: Align coding style across driver IB/iser: Remove un-needed casting to/from void pointer IB/iser: Don't suppress send completions IB/iser: Rename ib_ret local variable IB/iser: Fix RNR errors IB/iser: Remove deprecated pi_guard module param IB/mlx5: Expose NDR speed through MAD RDMA/cxgb4: Set queue pair state when being queried ...
This commit is contained in:
commit
747c19eb75
|
@ -433,7 +433,7 @@ static void msg_conf(void *priv, int errno)
|
|||
schedule_work(&iu->work);
|
||||
}
|
||||
|
||||
static int send_usr_msg(struct rtrs_clt *rtrs, int dir,
|
||||
static int send_usr_msg(struct rtrs_clt_sess *rtrs, int dir,
|
||||
struct rnbd_iu *iu, struct kvec *vec,
|
||||
size_t len, struct scatterlist *sg, unsigned int sg_len,
|
||||
void (*conf)(struct work_struct *work),
|
||||
|
@ -1010,7 +1010,7 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
|
|||
struct request *rq,
|
||||
struct rnbd_iu *iu)
|
||||
{
|
||||
struct rtrs_clt *rtrs = dev->sess->rtrs;
|
||||
struct rtrs_clt_sess *rtrs = dev->sess->rtrs;
|
||||
struct rtrs_permit *permit = iu->permit;
|
||||
struct rnbd_msg_io msg;
|
||||
struct rtrs_clt_req_ops req_ops;
|
||||
|
|
|
@ -75,7 +75,7 @@ struct rnbd_cpu_qlist {
|
|||
|
||||
struct rnbd_clt_session {
|
||||
struct list_head list;
|
||||
struct rtrs_clt *rtrs;
|
||||
struct rtrs_clt_sess *rtrs;
|
||||
wait_queue_head_t rtrs_waitq;
|
||||
bool rtrs_ready;
|
||||
struct rnbd_cpu_qlist __percpu
|
||||
|
|
|
@ -263,15 +263,15 @@ out:
|
|||
kfree(srv_sess);
|
||||
}
|
||||
|
||||
static int create_sess(struct rtrs_srv *rtrs)
|
||||
static int create_sess(struct rtrs_srv_sess *rtrs)
|
||||
{
|
||||
struct rnbd_srv_session *srv_sess;
|
||||
char sessname[NAME_MAX];
|
||||
char pathname[NAME_MAX];
|
||||
int err;
|
||||
|
||||
err = rtrs_srv_get_sess_name(rtrs, sessname, sizeof(sessname));
|
||||
err = rtrs_srv_get_path_name(rtrs, pathname, sizeof(pathname));
|
||||
if (err) {
|
||||
pr_err("rtrs_srv_get_sess_name(%s): %d\n", sessname, err);
|
||||
pr_err("rtrs_srv_get_path_name(%s): %d\n", pathname, err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -284,8 +284,8 @@ static int create_sess(struct rtrs_srv *rtrs)
|
|||
offsetof(struct rnbd_dev_blk_io, bio),
|
||||
BIOSET_NEED_BVECS);
|
||||
if (err) {
|
||||
pr_err("Allocating srv_session for session %s failed\n",
|
||||
sessname);
|
||||
pr_err("Allocating srv_session for path %s failed\n",
|
||||
pathname);
|
||||
kfree(srv_sess);
|
||||
return err;
|
||||
}
|
||||
|
@ -298,14 +298,14 @@ static int create_sess(struct rtrs_srv *rtrs)
|
|||
mutex_unlock(&sess_lock);
|
||||
|
||||
srv_sess->rtrs = rtrs;
|
||||
strscpy(srv_sess->sessname, sessname, sizeof(srv_sess->sessname));
|
||||
strscpy(srv_sess->sessname, pathname, sizeof(srv_sess->sessname));
|
||||
|
||||
rtrs_srv_set_sess_priv(rtrs, srv_sess);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
|
||||
static int rnbd_srv_link_ev(struct rtrs_srv_sess *rtrs,
|
||||
enum rtrs_srv_link_ev ev, void *priv)
|
||||
{
|
||||
struct rnbd_srv_session *srv_sess = priv;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
struct rnbd_srv_session {
|
||||
/* Entry inside global sess_list */
|
||||
struct list_head list;
|
||||
struct rtrs_srv *rtrs;
|
||||
struct rtrs_srv_sess *rtrs;
|
||||
char sessname[NAME_MAX];
|
||||
int queue_depth;
|
||||
struct bio_set sess_bio_set;
|
||||
|
|
|
@ -956,7 +956,7 @@ int rdma_query_gid(struct ib_device *device, u32 port_num,
|
|||
{
|
||||
struct ib_gid_table *table;
|
||||
unsigned long flags;
|
||||
int res = -EINVAL;
|
||||
int res;
|
||||
|
||||
if (!rdma_is_port_valid(device, port_num))
|
||||
return -EINVAL;
|
||||
|
@ -964,9 +964,15 @@ int rdma_query_gid(struct ib_device *device, u32 port_num,
|
|||
table = rdma_gid_table(device, port_num);
|
||||
read_lock_irqsave(&table->rwlock, flags);
|
||||
|
||||
if (index < 0 || index >= table->sz ||
|
||||
!is_gid_entry_valid(table->data_vec[index]))
|
||||
if (index < 0 || index >= table->sz) {
|
||||
res = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!is_gid_entry_valid(table->data_vec[index])) {
|
||||
res = -ENOENT;
|
||||
goto done;
|
||||
}
|
||||
|
||||
memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
|
||||
res = 0;
|
||||
|
|
|
@ -766,6 +766,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
|
|||
unsigned int p;
|
||||
u16 pkey, index;
|
||||
enum ib_port_state port_state;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
cma_dev = NULL;
|
||||
|
@ -784,9 +785,14 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
|
|||
|
||||
if (ib_get_cached_port_state(cur_dev->device, p, &port_state))
|
||||
continue;
|
||||
for (i = 0; !rdma_query_gid(cur_dev->device,
|
||||
p, i, &gid);
|
||||
i++) {
|
||||
|
||||
for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len;
|
||||
++i) {
|
||||
ret = rdma_query_gid(cur_dev->device, p, i,
|
||||
&gid);
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
if (!memcmp(&gid, dgid, sizeof(gid))) {
|
||||
cma_dev = cur_dev;
|
||||
sgid = gid;
|
||||
|
@ -4033,8 +4039,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
|
|||
|
||||
memset(&req, 0, sizeof req);
|
||||
offset = cma_user_data_offset(id_priv);
|
||||
req.private_data_len = offset + conn_param->private_data_len;
|
||||
if (req.private_data_len < conn_param->private_data_len)
|
||||
if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
|
||||
return -EINVAL;
|
||||
|
||||
if (req.private_data_len) {
|
||||
|
@ -4093,8 +4098,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
|
|||
|
||||
memset(&req, 0, sizeof req);
|
||||
offset = cma_user_data_offset(id_priv);
|
||||
req.private_data_len = offset + conn_param->private_data_len;
|
||||
if (req.private_data_len < conn_param->private_data_len)
|
||||
if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
|
||||
return -EINVAL;
|
||||
|
||||
if (req.private_data_len) {
|
||||
|
|
|
@ -2461,7 +2461,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
|
|||
++i) {
|
||||
ret = rdma_query_gid(device, port, i, &tmp_gid);
|
||||
if (ret)
|
||||
return ret;
|
||||
continue;
|
||||
|
||||
if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
|
||||
*port_num = port;
|
||||
if (index)
|
||||
|
|
|
@ -433,6 +433,7 @@ static struct attribute *port_default_attrs[] = {
|
|||
&ib_port_attr_link_layer.attr,
|
||||
NULL
|
||||
};
|
||||
ATTRIBUTE_GROUPS(port_default);
|
||||
|
||||
static ssize_t print_ndev(const struct ib_gid_attr *gid_attr, char *buf)
|
||||
{
|
||||
|
@ -774,7 +775,7 @@ static void ib_port_gid_attr_release(struct kobject *kobj)
|
|||
static struct kobj_type port_type = {
|
||||
.release = ib_port_release,
|
||||
.sysfs_ops = &port_sysfs_ops,
|
||||
.default_attrs = port_default_attrs
|
||||
.default_groups = port_default_groups,
|
||||
};
|
||||
|
||||
static struct kobj_type gid_attr_type = {
|
||||
|
|
|
@ -227,7 +227,6 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
|
|||
const struct mmu_interval_notifier_ops *ops)
|
||||
{
|
||||
struct ib_umem_odp *umem_odp;
|
||||
struct mm_struct *mm;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)))
|
||||
|
@ -241,7 +240,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
|
|||
umem_odp->umem.length = size;
|
||||
umem_odp->umem.address = addr;
|
||||
umem_odp->umem.writable = ib_access_writable(access);
|
||||
umem_odp->umem.owning_mm = mm = current->mm;
|
||||
umem_odp->umem.owning_mm = current->mm;
|
||||
umem_odp->notifier.ops = ops;
|
||||
|
||||
umem_odp->page_shift = PAGE_SHIFT;
|
||||
|
|
|
@ -1399,7 +1399,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
|
|||
attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
|
||||
IB_SIGNAL_REQ_WR;
|
||||
attr.qp_type = cmd->qp_type;
|
||||
attr.create_flags = 0;
|
||||
|
||||
attr.cap.max_send_wr = cmd->max_send_wr;
|
||||
attr.cap.max_recv_wr = cmd->max_recv_wr;
|
||||
|
|
|
@ -262,13 +262,12 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
|
|||
int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
|
||||
u16 index, u16 *pkey)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
|
||||
if (index > 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Ignore port_num */
|
||||
*pkey = IB_DEFAULT_PKEY_FULL;
|
||||
|
||||
memset(pkey, 0, sizeof(*pkey));
|
||||
return bnxt_qplib_get_pkey(&rdev->qplib_res,
|
||||
&rdev->qplib_res.pkey_tbl, index, pkey);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
|
||||
|
|
|
@ -893,7 +893,6 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
|
|||
struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq,
|
||||
qplib_srq);
|
||||
struct ib_event ib_event;
|
||||
int rc = 0;
|
||||
|
||||
ib_event.device = &srq->rdev->ibdev;
|
||||
ib_event.element.srq = &srq->ib_srq;
|
||||
|
@ -907,7 +906,7 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
|
|||
(*srq->ib_srq.event_handler)(&ib_event,
|
||||
srq->ib_srq.srq_context);
|
||||
}
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <rdma/ib_mad.h>
|
||||
|
||||
#include "roce_hsi.h"
|
||||
|
||||
|
@ -1232,7 +1233,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
|||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_modify_qp req;
|
||||
struct creq_modify_qp_resp resp;
|
||||
u16 cmd_flags = 0, pkey;
|
||||
u16 cmd_flags = 0;
|
||||
u32 temp32[4];
|
||||
u32 bmask;
|
||||
int rc;
|
||||
|
@ -1255,11 +1256,9 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
|||
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
|
||||
req.access = qp->access;
|
||||
|
||||
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
|
||||
if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
|
||||
qp->pkey_index, &pkey))
|
||||
req.pkey = cpu_to_le16(pkey);
|
||||
}
|
||||
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
|
||||
req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
|
||||
|
||||
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
|
||||
req.qkey = cpu_to_le32(qp->qkey);
|
||||
|
||||
|
|
|
@ -555,7 +555,7 @@ skip_ctx_setup:
|
|||
|
||||
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
|
||||
{
|
||||
kfree(rcfw->cmdq.cmdq_bitmap);
|
||||
bitmap_free(rcfw->cmdq.cmdq_bitmap);
|
||||
kfree(rcfw->qp_tbl);
|
||||
kfree(rcfw->crsqe_tbl);
|
||||
bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
|
||||
|
@ -572,7 +572,6 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
|
|||
struct bnxt_qplib_sg_info sginfo = {};
|
||||
struct bnxt_qplib_cmdq_ctx *cmdq;
|
||||
struct bnxt_qplib_creq_ctx *creq;
|
||||
u32 bmap_size = 0;
|
||||
|
||||
rcfw->pdev = res->pdev;
|
||||
cmdq = &rcfw->cmdq;
|
||||
|
@ -613,13 +612,10 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
|
|||
if (!rcfw->crsqe_tbl)
|
||||
goto fail;
|
||||
|
||||
bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long);
|
||||
cmdq->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
|
||||
cmdq->cmdq_bitmap = bitmap_zalloc(rcfw->cmdq_depth, GFP_KERNEL);
|
||||
if (!cmdq->cmdq_bitmap)
|
||||
goto fail;
|
||||
|
||||
cmdq->bmap_size = bmap_size;
|
||||
|
||||
/* Allocate one extra to hold the QP1 entries */
|
||||
rcfw->qp_tbl_size = qp_tbl_sz + 1;
|
||||
rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
|
||||
|
@ -667,8 +663,8 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
|
|||
iounmap(cmdq->cmdq_mbox.reg.bar_reg);
|
||||
iounmap(creq->creq_db.reg.bar_reg);
|
||||
|
||||
indx = find_first_bit(cmdq->cmdq_bitmap, cmdq->bmap_size);
|
||||
if (indx != cmdq->bmap_size)
|
||||
indx = find_first_bit(cmdq->cmdq_bitmap, rcfw->cmdq_depth);
|
||||
if (indx != rcfw->cmdq_depth)
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"disabling RCFW with pending cmd-bit %lx\n", indx);
|
||||
|
||||
|
|
|
@ -152,7 +152,6 @@ struct bnxt_qplib_cmdq_ctx {
|
|||
wait_queue_head_t waitq;
|
||||
unsigned long flags;
|
||||
unsigned long *cmdq_bitmap;
|
||||
u32 bmap_size;
|
||||
u32 seq_num;
|
||||
};
|
||||
|
||||
|
|
|
@ -649,31 +649,6 @@ static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
|||
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
|
||||
}
|
||||
|
||||
static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl)
|
||||
{
|
||||
if (!pkey_tbl->tbl)
|
||||
dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
|
||||
else
|
||||
kfree(pkey_tbl->tbl);
|
||||
|
||||
pkey_tbl->tbl = NULL;
|
||||
pkey_tbl->max = 0;
|
||||
pkey_tbl->active = 0;
|
||||
}
|
||||
|
||||
static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl,
|
||||
u16 max)
|
||||
{
|
||||
pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
|
||||
if (!pkey_tbl->tbl)
|
||||
return -ENOMEM;
|
||||
|
||||
pkey_tbl->max = max;
|
||||
return 0;
|
||||
};
|
||||
|
||||
/* PDs */
|
||||
int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
|
||||
{
|
||||
|
@ -843,24 +818,6 @@ unmap_io:
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* PKEYs */
|
||||
static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
|
||||
{
|
||||
memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
|
||||
pkey_tbl->active = 0;
|
||||
}
|
||||
|
||||
static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl)
|
||||
{
|
||||
u16 pkey = 0xFFFF;
|
||||
|
||||
memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
|
||||
|
||||
/* pkey default = 0xFFFF */
|
||||
bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
|
||||
}
|
||||
|
||||
/* Stats */
|
||||
static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_stats *stats)
|
||||
|
@ -891,21 +848,18 @@ static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
|
|||
|
||||
void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
|
||||
{
|
||||
bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
|
||||
bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
|
||||
}
|
||||
|
||||
int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
|
||||
{
|
||||
bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
|
||||
bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
|
||||
{
|
||||
bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
|
||||
bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
|
||||
bnxt_qplib_free_pd_tbl(&res->pd_tbl);
|
||||
bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
|
||||
|
@ -924,10 +878,6 @@ int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
|
|||
if (rc)
|
||||
goto fail;
|
||||
|
||||
rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
|
|
@ -185,12 +185,6 @@ struct bnxt_qplib_sgid_tbl {
|
|||
u8 *vlan;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_pkey_tbl {
|
||||
u16 *tbl;
|
||||
u16 max;
|
||||
u16 active;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_dpi {
|
||||
u32 dpi;
|
||||
void __iomem *dbr;
|
||||
|
@ -258,7 +252,6 @@ struct bnxt_qplib_res {
|
|||
struct bnxt_qplib_rcfw *rcfw;
|
||||
struct bnxt_qplib_pd_tbl pd_tbl;
|
||||
struct bnxt_qplib_sgid_tbl sgid_tbl;
|
||||
struct bnxt_qplib_pkey_tbl pkey_tbl;
|
||||
struct bnxt_qplib_dpi_tbl dpi_tbl;
|
||||
bool prio;
|
||||
bool is_vf;
|
||||
|
|
|
@ -146,17 +146,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
|||
attr->max_srq = le16_to_cpu(sb->max_srq);
|
||||
attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
|
||||
attr->max_srq_sges = sb->max_srq_sge;
|
||||
attr->max_pkey = le32_to_cpu(sb->max_pkeys);
|
||||
/*
|
||||
* Some versions of FW reports more than 0xFFFF.
|
||||
* Restrict it for now to 0xFFFF to avoid
|
||||
* reporting trucated value
|
||||
*/
|
||||
if (attr->max_pkey > 0xFFFF) {
|
||||
/* ib_port_attr::pkey_tbl_len is u16 */
|
||||
attr->max_pkey = 0xFFFF;
|
||||
}
|
||||
|
||||
attr->max_pkey = 1;
|
||||
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
|
||||
attr->l2_db_size = (sb->l2_db_space_size + 1) *
|
||||
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
|
||||
|
@ -414,93 +404,6 @@ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
|||
return rc;
|
||||
}
|
||||
|
||||
/* pkeys */
|
||||
int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
|
||||
u16 *pkey)
|
||||
{
|
||||
if (index == 0xFFFF) {
|
||||
*pkey = 0xFFFF;
|
||||
return 0;
|
||||
}
|
||||
if (index >= pkey_tbl->max) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"Index %d exceeded PKEY table max (%d)\n",
|
||||
index, pkey_tbl->max);
|
||||
return -EINVAL;
|
||||
}
|
||||
memcpy(pkey, &pkey_tbl->tbl[index], sizeof(*pkey));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
|
||||
bool update)
|
||||
{
|
||||
int i, rc = 0;
|
||||
|
||||
if (!pkey_tbl) {
|
||||
dev_err(&res->pdev->dev, "PKEY table not allocated\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Do we need a pkey_lock here? */
|
||||
if (!pkey_tbl->active) {
|
||||
dev_err(&res->pdev->dev, "PKEY table has no active entries\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < pkey_tbl->max; i++) {
|
||||
if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey)))
|
||||
break;
|
||||
}
|
||||
if (i == pkey_tbl->max) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"PKEY 0x%04x not found in the pkey table\n", *pkey);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(&pkey_tbl->tbl[i], 0, sizeof(*pkey));
|
||||
pkey_tbl->active--;
|
||||
|
||||
/* unlock */
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
|
||||
bool update)
|
||||
{
|
||||
int i, free_idx, rc = 0;
|
||||
|
||||
if (!pkey_tbl) {
|
||||
dev_err(&res->pdev->dev, "PKEY table not allocated\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Do we need a pkey_lock here? */
|
||||
if (pkey_tbl->active == pkey_tbl->max) {
|
||||
dev_err(&res->pdev->dev, "PKEY table is full\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
free_idx = pkey_tbl->max;
|
||||
for (i = 0; i < pkey_tbl->max; i++) {
|
||||
if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey)))
|
||||
return -EALREADY;
|
||||
else if (!pkey_tbl->tbl[i] && free_idx == pkey_tbl->max)
|
||||
free_idx = i;
|
||||
}
|
||||
if (free_idx == pkey_tbl->max) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"PKEY table is FULL but count is not MAX??\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* Add PKEY to the pkey_tbl */
|
||||
memcpy(&pkey_tbl->tbl[free_idx], pkey, sizeof(*pkey));
|
||||
pkey_tbl->active++;
|
||||
|
||||
/* unlock */
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* AH */
|
||||
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
|
||||
bool block)
|
||||
|
|
|
@ -255,15 +255,6 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
|||
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct bnxt_qplib_gid *gid, u16 gid_idx,
|
||||
const u8 *smac);
|
||||
int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
|
||||
u16 *pkey);
|
||||
int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
|
||||
bool update);
|
||||
int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
|
||||
bool update);
|
||||
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct bnxt_qplib_dev_attr *attr, bool vf);
|
||||
int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
|
||||
|
|
|
@ -2471,7 +2471,8 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
|
|||
skb_get(skb);
|
||||
rpl = cplhdr(skb);
|
||||
if (!is_t4(adapter_type)) {
|
||||
skb_trim(skb, roundup(sizeof(*rpl5), 16));
|
||||
BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16));
|
||||
skb_trim(skb, sizeof(*rpl5));
|
||||
rpl5 = (void *)rpl;
|
||||
INIT_TP_WR(rpl5, ep->hwtid);
|
||||
} else {
|
||||
|
@ -2487,7 +2488,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
|
|||
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
|
||||
opt2 |= T5_ISS_F;
|
||||
rpl5 = (void *)rpl;
|
||||
memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
|
||||
memset_after(rpl5, 0, iss);
|
||||
if (peer2peer)
|
||||
isn += 4;
|
||||
rpl5->iss = cpu_to_be32(isn);
|
||||
|
|
|
@ -59,7 +59,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
|
|||
alloc->last = obj + 1;
|
||||
if (alloc->last >= alloc->max)
|
||||
alloc->last = 0;
|
||||
set_bit(obj, alloc->table);
|
||||
__set_bit(obj, alloc->table);
|
||||
obj += alloc->start;
|
||||
} else
|
||||
obj = -1;
|
||||
|
@ -75,37 +75,32 @@ void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj)
|
|||
obj -= alloc->start;
|
||||
|
||||
spin_lock_irqsave(&alloc->lock, flags);
|
||||
clear_bit(obj, alloc->table);
|
||||
__clear_bit(obj, alloc->table);
|
||||
spin_unlock_irqrestore(&alloc->lock, flags);
|
||||
}
|
||||
|
||||
int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
|
||||
u32 reserved, u32 flags)
|
||||
{
|
||||
int i;
|
||||
|
||||
alloc->start = start;
|
||||
alloc->flags = flags;
|
||||
if (flags & C4IW_ID_TABLE_F_RANDOM)
|
||||
alloc->last = prandom_u32() % RANDOM_SKIP;
|
||||
else
|
||||
alloc->last = 0;
|
||||
alloc->max = num;
|
||||
alloc->max = num;
|
||||
spin_lock_init(&alloc->lock);
|
||||
alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long),
|
||||
GFP_KERNEL);
|
||||
alloc->table = bitmap_zalloc(num, GFP_KERNEL);
|
||||
if (!alloc->table)
|
||||
return -ENOMEM;
|
||||
|
||||
bitmap_zero(alloc->table, num);
|
||||
if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY))
|
||||
for (i = 0; i < reserved; ++i)
|
||||
set_bit(i, alloc->table);
|
||||
bitmap_set(alloc->table, 0, reserved);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void c4iw_id_table_free(struct c4iw_id_table *alloc)
|
||||
{
|
||||
kfree(alloc->table);
|
||||
bitmap_free(alloc->table);
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <linux/ethtool.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/inetdevice.h>
|
||||
#include <net/addrconf.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
|
@ -264,7 +265,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
|
|||
return -EINVAL;
|
||||
|
||||
dev = to_c4iw_dev(ibdev);
|
||||
memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
|
||||
addrconf_addr_eui48((u8 *)&props->sys_image_guid,
|
||||
dev->rdev.lldi.ports[0]->dev_addr);
|
||||
props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
|
||||
props->fw_ver = dev->rdev.lldi.fw_vers;
|
||||
props->device_cap_flags = dev->device_cap_flags;
|
||||
|
@ -525,8 +527,8 @@ void c4iw_register_device(struct work_struct *work)
|
|||
struct c4iw_dev *dev = ctx->dev;
|
||||
|
||||
pr_debug("c4iw_dev %p\n", dev);
|
||||
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
|
||||
memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
|
||||
addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid,
|
||||
dev->rdev.lldi.ports[0]->dev_addr);
|
||||
dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
|
||||
if (fastreg_support)
|
||||
dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
|
||||
|
|
|
@ -2460,6 +2460,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
memset(attr, 0, sizeof(*attr));
|
||||
memset(init_attr, 0, sizeof(*init_attr));
|
||||
attr->qp_state = to_ib_qp_state(qhp->attr.state);
|
||||
attr->cur_qp_state = to_ib_qp_state(qhp->attr.state);
|
||||
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
|
||||
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
|
||||
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
|
||||
|
|
|
@ -161,9 +161,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
|
|||
if (!pq->reqs)
|
||||
goto pq_reqs_nomem;
|
||||
|
||||
pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size),
|
||||
sizeof(*pq->req_in_use),
|
||||
GFP_KERNEL);
|
||||
pq->req_in_use = bitmap_zalloc(hfi1_sdma_comp_ring_size, GFP_KERNEL);
|
||||
if (!pq->req_in_use)
|
||||
goto pq_reqs_no_in_use;
|
||||
|
||||
|
@ -210,7 +208,7 @@ cq_comps_nomem:
|
|||
cq_nomem:
|
||||
kmem_cache_destroy(pq->txreq_cache);
|
||||
pq_txreq_nomem:
|
||||
kfree(pq->req_in_use);
|
||||
bitmap_free(pq->req_in_use);
|
||||
pq_reqs_no_in_use:
|
||||
kfree(pq->reqs);
|
||||
pq_reqs_nomem:
|
||||
|
@ -257,7 +255,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
|
|||
pq->wait,
|
||||
!atomic_read(&pq->n_reqs));
|
||||
kfree(pq->reqs);
|
||||
kfree(pq->req_in_use);
|
||||
bitmap_free(pq->req_in_use);
|
||||
kmem_cache_destroy(pq->txreq_cache);
|
||||
flush_pq_iowait(pq);
|
||||
kfree(pq);
|
||||
|
|
|
@ -5,22 +5,9 @@ config INFINIBAND_HNS
|
|||
depends on ARM64 || (COMPILE_TEST && 64BIT)
|
||||
depends on (HNS_DSAF && HNS_ENET) || HNS3
|
||||
help
|
||||
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
|
||||
is used in Hisilicon Hip06 and more further ICT SoC based on
|
||||
platform device.
|
||||
This is a RoCE/RDMA driver for the Hisilicon RoCE engine.
|
||||
|
||||
To compile HIP06 or HIP08 driver as module, choose M here.
|
||||
|
||||
config INFINIBAND_HNS_HIP06
|
||||
bool "Hisilicon Hip06 Family RoCE support"
|
||||
depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
|
||||
depends on INFINIBAND_HNS=m || (HNS_DSAF=y && HNS_ENET=y)
|
||||
help
|
||||
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
|
||||
Hip07 SoC. These RoCE engines are platform devices.
|
||||
|
||||
To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
|
||||
module will be called hns-roce-hw-v1
|
||||
To compile HIP08 driver as module, choose M here.
|
||||
|
||||
config INFINIBAND_HNS_HIP08
|
||||
bool "Hisilicon Hip08 Family RoCE support"
|
||||
|
|
|
@ -9,11 +9,6 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
|
|||
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
|
||||
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
|
||||
|
||||
ifdef CONFIG_INFINIBAND_HNS_HIP06
|
||||
hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs)
|
||||
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o
|
||||
endif
|
||||
|
||||
ifdef CONFIG_INFINIBAND_HNS_HIP08
|
||||
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
|
||||
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pci.h>
|
||||
#include <rdma/ib_addr.h>
|
||||
#include <rdma/ib_cache.h>
|
||||
|
@ -61,7 +60,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
|
|||
struct hns_roce_ah *ah = to_hr_ah(ibah);
|
||||
int ret = 0;
|
||||
|
||||
if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 && udata)
|
||||
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ah->av.port = rdma_ah_get_port_num(ah_attr);
|
||||
|
@ -80,7 +79,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
|
|||
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
|
||||
|
||||
/* HIP08 needs to record vlan info in Address Vector */
|
||||
if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) {
|
||||
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
|
||||
ret = rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr,
|
||||
&ah->av.vlan_id, NULL);
|
||||
if (ret)
|
||||
|
|
|
@ -31,10 +31,9 @@
|
|||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include "hns_roce_device.h"
|
||||
#include <rdma/ib_umem.h>
|
||||
#include "hns_roce_device.h"
|
||||
|
||||
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
|
||||
{
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
*/
|
||||
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include "hns_roce_common.h"
|
||||
#include "hns_roce_device.h"
|
||||
#include "hns_roce_cmd.h"
|
||||
|
@ -61,7 +60,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
|
|||
CMD_POLL_TOKEN, 0);
|
||||
if (ret) {
|
||||
dev_err_ratelimited(hr_dev->dev,
|
||||
"failed to post mailbox %x in poll mode, ret = %d.\n",
|
||||
"failed to post mailbox 0x%x in poll mode, ret = %d.\n",
|
||||
op, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -91,7 +90,7 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
|
|||
|
||||
if (unlikely(token != context->token)) {
|
||||
dev_err_ratelimited(hr_dev->dev,
|
||||
"[cmd] invalid ae token %x,context token is %x!\n",
|
||||
"[cmd] invalid ae token 0x%x, context token is 0x%x.\n",
|
||||
token, context->token);
|
||||
return;
|
||||
}
|
||||
|
@ -130,14 +129,14 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
|
|||
context->token, 1);
|
||||
if (ret) {
|
||||
dev_err_ratelimited(dev,
|
||||
"failed to post mailbox %x in event mode, ret = %d.\n",
|
||||
"failed to post mailbox 0x%x in event mode, ret = %d.\n",
|
||||
op, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!wait_for_completion_timeout(&context->done,
|
||||
msecs_to_jiffies(timeout))) {
|
||||
dev_err_ratelimited(dev, "[cmd] token %x mailbox %x timeout.\n",
|
||||
dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n",
|
||||
context->token, op);
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
|
@ -145,7 +144,7 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
|
|||
|
||||
ret = context->result;
|
||||
if (ret)
|
||||
dev_err_ratelimited(dev, "[cmd] token %x mailbox %x error %d\n",
|
||||
dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n",
|
||||
context->token, op, ret);
|
||||
|
||||
out:
|
||||
|
|
|
@ -104,208 +104,6 @@
|
|||
|
||||
#define hr_reg_read(ptr, field) _hr_reg_read(ptr, field)
|
||||
|
||||
#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
|
||||
#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
|
||||
|
||||
#define ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S 5
|
||||
|
||||
#define ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S 6
|
||||
|
||||
#define ROCEE_GLB_CFG_ROCEE_PORT_ST_S 10
|
||||
#define ROCEE_GLB_CFG_ROCEE_PORT_ST_M \
|
||||
(((1UL << 6) - 1) << ROCEE_GLB_CFG_ROCEE_PORT_ST_S)
|
||||
|
||||
#define ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S 16
|
||||
|
||||
#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S 0
|
||||
#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M \
|
||||
(((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S)
|
||||
|
||||
#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S 24
|
||||
#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M \
|
||||
(((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S)
|
||||
|
||||
#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S 0
|
||||
#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M \
|
||||
(((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S)
|
||||
|
||||
#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S 24
|
||||
#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M \
|
||||
(((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S)
|
||||
|
||||
#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S 0
|
||||
#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M \
|
||||
(((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S)
|
||||
|
||||
#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S 16
|
||||
#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M \
|
||||
(((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S)
|
||||
|
||||
#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S 0
|
||||
#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M \
|
||||
(((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S)
|
||||
|
||||
#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S 16
|
||||
#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M \
|
||||
(((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S)
|
||||
|
||||
#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_S 0
|
||||
#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_M \
|
||||
(((1UL << 8) - 1) << ROCEE_RAQ_WL_ROCEE_RAQ_WL_S)
|
||||
|
||||
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S 0
|
||||
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M \
|
||||
(((1UL << 15) - 1) << \
|
||||
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S)
|
||||
|
||||
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S 16
|
||||
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M \
|
||||
(((1UL << 4) - 1) << \
|
||||
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S)
|
||||
|
||||
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S 20
|
||||
|
||||
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE 21
|
||||
|
||||
#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S 0
|
||||
#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M \
|
||||
(((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S)
|
||||
|
||||
#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S 5
|
||||
#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M \
|
||||
(((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S)
|
||||
|
||||
#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S 0
|
||||
#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M \
|
||||
(((1UL << 5) - 1) << ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S)
|
||||
|
||||
#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S 5
|
||||
#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M \
|
||||
(((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S)
|
||||
|
||||
#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S 0
|
||||
#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M \
|
||||
(((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S)
|
||||
|
||||
#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S 8
|
||||
#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M \
|
||||
(((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S)
|
||||
|
||||
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S 0
|
||||
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M \
|
||||
(((1UL << 19) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S)
|
||||
|
||||
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_S 19
|
||||
|
||||
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S 20
|
||||
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M \
|
||||
(((1UL << 2) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S)
|
||||
|
||||
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S 22
|
||||
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M \
|
||||
(((1UL << 5) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S)
|
||||
|
||||
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S 31
|
||||
|
||||
#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S 0
|
||||
#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M \
|
||||
(((1UL << 3) - 1) << ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S)
|
||||
|
||||
#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S 0
|
||||
#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M \
|
||||
(((1UL << 15) - 1) << ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S)
|
||||
|
||||
#define ROCEE_MB6_ROCEE_MB_CMD_S 0
|
||||
#define ROCEE_MB6_ROCEE_MB_CMD_M \
|
||||
(((1UL << 8) - 1) << ROCEE_MB6_ROCEE_MB_CMD_S)
|
||||
|
||||
#define ROCEE_MB6_ROCEE_MB_CMD_MDF_S 8
|
||||
#define ROCEE_MB6_ROCEE_MB_CMD_MDF_M \
|
||||
(((1UL << 4) - 1) << ROCEE_MB6_ROCEE_MB_CMD_MDF_S)
|
||||
|
||||
#define ROCEE_MB6_ROCEE_MB_EVENT_S 14
|
||||
|
||||
#define ROCEE_MB6_ROCEE_MB_HW_RUN_S 15
|
||||
|
||||
#define ROCEE_MB6_ROCEE_MB_TOKEN_S 16
|
||||
#define ROCEE_MB6_ROCEE_MB_TOKEN_M \
|
||||
(((1UL << 16) - 1) << ROCEE_MB6_ROCEE_MB_TOKEN_S)
|
||||
|
||||
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S 0
|
||||
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M \
|
||||
(((1UL << 24) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S)
|
||||
|
||||
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S 24
|
||||
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M \
|
||||
(((1UL << 4) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S)
|
||||
|
||||
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S 28
|
||||
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M \
|
||||
(((1UL << 3) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S)
|
||||
|
||||
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S 31
|
||||
|
||||
#define ROCEE_SMAC_H_ROCEE_SMAC_H_S 0
|
||||
#define ROCEE_SMAC_H_ROCEE_SMAC_H_M \
|
||||
(((1UL << 16) - 1) << ROCEE_SMAC_H_ROCEE_SMAC_H_S)
|
||||
|
||||
#define ROCEE_SMAC_H_ROCEE_PORT_MTU_S 16
|
||||
#define ROCEE_SMAC_H_ROCEE_PORT_MTU_M \
|
||||
(((1UL << 4) - 1) << ROCEE_SMAC_H_ROCEE_PORT_MTU_S)
|
||||
|
||||
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S 0
|
||||
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M \
|
||||
(((1UL << 2) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S)
|
||||
|
||||
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S 8
|
||||
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M \
|
||||
(((1UL << 4) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S)
|
||||
|
||||
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S 17
|
||||
|
||||
#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S 0
|
||||
#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M \
|
||||
(((1UL << 5) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S)
|
||||
|
||||
#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S 16
|
||||
#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M \
|
||||
(((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S)
|
||||
|
||||
#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S 0
|
||||
#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M \
|
||||
(((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S)
|
||||
|
||||
#define ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S 16
|
||||
#define ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S 1
|
||||
#define ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S 0
|
||||
|
||||
#define ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S 0
|
||||
#define ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S 1
|
||||
|
||||
#define ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S 0
|
||||
|
||||
#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S 0
|
||||
#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M \
|
||||
(((1UL << 28) - 1) << ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S)
|
||||
|
||||
#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S 0
|
||||
#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \
|
||||
(((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)
|
||||
|
||||
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0
|
||||
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \
|
||||
(((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S)
|
||||
|
||||
#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S 0
|
||||
#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M \
|
||||
(((1UL << 16) - 1) << ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S)
|
||||
|
||||
#define ROCEE_SDB_CNT_CMP_BITS 16
|
||||
|
||||
#define ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S 20
|
||||
|
||||
#define ROCEE_CNT_CLR_CE_CNT_CLR_CE_S 0
|
||||
|
||||
/*************ROCEE_REG DEFINITION****************/
|
||||
#define ROCEE_VENDOR_ID_REG 0x0
|
||||
#define ROCEE_VENDOR_PART_ID_REG 0x4
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/uverbs_ioctl.h>
|
||||
#include "hns_roce_device.h"
|
||||
|
@ -406,15 +405,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
|
|||
goto err_cqn;
|
||||
}
|
||||
|
||||
/*
|
||||
* For the QP created by kernel space, tptr value should be initialized
|
||||
* to zero; For the QP created by user space, it will cause synchronous
|
||||
* problems if tptr is set to zero here, so we initialize it in user
|
||||
* space.
|
||||
*/
|
||||
if (!udata && hr_cq->tptr_addr)
|
||||
*hr_cq->tptr_addr = 0;
|
||||
|
||||
if (udata) {
|
||||
resp.cqn = hr_cq->cqn;
|
||||
ret = ib_copy_to_udata(udata, &resp,
|
||||
|
@ -441,9 +431,6 @@ int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
|||
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
|
||||
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
|
||||
|
||||
if (hr_dev->hw->destroy_cq)
|
||||
hr_dev->hw->destroy_cq(ib_cq, udata);
|
||||
|
||||
free_cqc(hr_dev, hr_cq);
|
||||
free_cqn(hr_dev, hr_cq->cqn);
|
||||
free_cq_db(hr_dev, hr_cq, udata);
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include "hns_roce_device.h"
|
||||
|
||||
|
|
|
@ -36,36 +36,18 @@
|
|||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/hns-abi.h>
|
||||
|
||||
#define DRV_NAME "hns_roce"
|
||||
|
||||
#define PCI_REVISION_ID_HIP08 0x21
|
||||
#define PCI_REVISION_ID_HIP09 0x30
|
||||
|
||||
#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
|
||||
|
||||
#define HNS_ROCE_MAX_MSG_LEN 0x80000000
|
||||
|
||||
#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
|
||||
|
||||
#define BA_BYTE_LEN 8
|
||||
|
||||
/* Hardware specification only for v1 engine */
|
||||
#define HNS_ROCE_MIN_CQE_NUM 0x40
|
||||
#define HNS_ROCE_MIN_WQE_NUM 0x20
|
||||
#define HNS_ROCE_MIN_SRQ_WQE_NUM 1
|
||||
|
||||
/* Hardware specification only for v1 engine */
|
||||
#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
|
||||
#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
|
||||
|
||||
#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20
|
||||
#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \
|
||||
(5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS)
|
||||
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
|
||||
#define HNS_ROCE_MIN_CQE_CNT 16
|
||||
|
||||
#define HNS_ROCE_RESERVED_SGE 1
|
||||
|
||||
#define HNS_ROCE_MAX_IRQ_NUM 128
|
||||
|
||||
#define HNS_ROCE_SGE_IN_WQE 2
|
||||
|
@ -102,18 +84,12 @@
|
|||
#define HNS_ROCE_FRMR_MAX_PA 512
|
||||
|
||||
#define PKEY_ID 0xffff
|
||||
#define GUID_LEN 8
|
||||
#define NODE_DESC_SIZE 64
|
||||
#define DB_REG_OFFSET 0x1000
|
||||
|
||||
/* Configure to HW for PAGE_SIZE larger than 4KB */
|
||||
#define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
|
||||
|
||||
#define PAGES_SHIFT_8 8
|
||||
#define PAGES_SHIFT_16 16
|
||||
#define PAGES_SHIFT_24 24
|
||||
#define PAGES_SHIFT_32 32
|
||||
|
||||
#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
|
||||
#define SRQ_DB_REG 0x230
|
||||
|
||||
|
@ -122,11 +98,6 @@
|
|||
|
||||
#define CQ_BANKID_SHIFT 2
|
||||
|
||||
/* The chip implementation of the consumer index is calculated
|
||||
* according to twice the actual EQ depth
|
||||
*/
|
||||
#define EQ_DEPTH_COEFF 2
|
||||
|
||||
enum {
|
||||
SERV_TYPE_RC,
|
||||
SERV_TYPE_UC,
|
||||
|
@ -182,6 +153,7 @@ enum {
|
|||
HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
|
||||
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
|
||||
HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
|
||||
HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12),
|
||||
HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14),
|
||||
HNS_ROCE_CAP_FLAG_STASH = BIT(17),
|
||||
};
|
||||
|
@ -227,7 +199,7 @@ struct hns_roce_uar {
|
|||
|
||||
enum hns_roce_mmap_type {
|
||||
HNS_ROCE_MMAP_TYPE_DB = 1,
|
||||
HNS_ROCE_MMAP_TYPE_TPTR,
|
||||
HNS_ROCE_MMAP_TYPE_DWQE,
|
||||
};
|
||||
|
||||
struct hns_user_mmap_entry {
|
||||
|
@ -242,7 +214,6 @@ struct hns_roce_ucontext {
|
|||
struct list_head page_list;
|
||||
struct mutex page_mutex;
|
||||
struct hns_user_mmap_entry *db_mmap_entry;
|
||||
struct hns_user_mmap_entry *tptr_mmap_entry;
|
||||
};
|
||||
|
||||
struct hns_roce_pd {
|
||||
|
@ -345,19 +316,16 @@ struct hns_roce_mw {
|
|||
u32 pbl_buf_pg_sz;
|
||||
};
|
||||
|
||||
/* Only support 4K page size for mr register */
|
||||
#define MR_SIZE_4K 0
|
||||
|
||||
struct hns_roce_mr {
|
||||
struct ib_mr ibmr;
|
||||
u64 iova; /* MR's virtual original addr */
|
||||
u64 size; /* Address range of MR */
|
||||
u32 key; /* Key of MR */
|
||||
u32 pd; /* PD num of MR */
|
||||
u32 access; /* Access permission of MR */
|
||||
u32 access; /* Access permission of MR */
|
||||
int enabled; /* MR's active status */
|
||||
int type; /* MR's register type */
|
||||
u32 pbl_hop_num; /* multi-hop number */
|
||||
int type; /* MR's register type */
|
||||
u32 pbl_hop_num; /* multi-hop number */
|
||||
struct hns_roce_mtr pbl_mtr;
|
||||
u32 npages;
|
||||
dma_addr_t *page_list;
|
||||
|
@ -374,17 +342,17 @@ struct hns_roce_wq {
|
|||
u32 wqe_cnt; /* WQE num */
|
||||
u32 max_gs;
|
||||
u32 rsv_sge;
|
||||
int offset;
|
||||
int wqe_shift; /* WQE size */
|
||||
u32 offset;
|
||||
u32 wqe_shift; /* WQE size */
|
||||
u32 head;
|
||||
u32 tail;
|
||||
void __iomem *db_reg;
|
||||
};
|
||||
|
||||
struct hns_roce_sge {
|
||||
unsigned int sge_cnt; /* SGE num */
|
||||
int offset;
|
||||
int sge_shift; /* SGE size */
|
||||
unsigned int sge_cnt; /* SGE num */
|
||||
u32 offset;
|
||||
u32 sge_shift; /* SGE size */
|
||||
};
|
||||
|
||||
struct hns_roce_buf_list {
|
||||
|
@ -453,7 +421,6 @@ struct hns_roce_cq {
|
|||
u32 cons_index;
|
||||
u32 *set_ci_db;
|
||||
void __iomem *db_reg;
|
||||
u16 *tptr_addr;
|
||||
int arm_sn;
|
||||
int cqe_size;
|
||||
unsigned long cqn;
|
||||
|
@ -468,7 +435,7 @@ struct hns_roce_cq {
|
|||
|
||||
struct hns_roce_idx_que {
|
||||
struct hns_roce_mtr mtr;
|
||||
int entry_shift;
|
||||
u32 entry_shift;
|
||||
unsigned long *bitmap;
|
||||
u32 head;
|
||||
u32 tail;
|
||||
|
@ -480,7 +447,7 @@ struct hns_roce_srq {
|
|||
u32 wqe_cnt;
|
||||
int max_gs;
|
||||
u32 rsv_sge;
|
||||
int wqe_shift;
|
||||
u32 wqe_shift;
|
||||
u32 cqn;
|
||||
u32 xrcdn;
|
||||
void __iomem *db_reg;
|
||||
|
@ -539,10 +506,6 @@ struct hns_roce_srq_table {
|
|||
struct hns_roce_hem_table table;
|
||||
};
|
||||
|
||||
struct hns_roce_raq_table {
|
||||
struct hns_roce_buf_list *e_raq_buf;
|
||||
};
|
||||
|
||||
struct hns_roce_av {
|
||||
u8 port;
|
||||
u8 gid_index;
|
||||
|
@ -627,10 +590,6 @@ struct hns_roce_work {
|
|||
u32 queue_num;
|
||||
};
|
||||
|
||||
enum {
|
||||
HNS_ROCE_QP_CAP_DIRECT_WQE = BIT(5),
|
||||
};
|
||||
|
||||
struct hns_roce_qp {
|
||||
struct ib_qp ibqp;
|
||||
struct hns_roce_wq rq;
|
||||
|
@ -650,9 +609,7 @@ struct hns_roce_qp {
|
|||
u8 sl;
|
||||
u8 resp_depth;
|
||||
u8 state;
|
||||
u32 access_flags;
|
||||
u32 atomic_rd_en;
|
||||
u32 pkey_index;
|
||||
u32 qkey;
|
||||
void (*event)(struct hns_roce_qp *qp,
|
||||
enum hns_roce_event event_type);
|
||||
|
@ -672,9 +629,10 @@ struct hns_roce_qp {
|
|||
unsigned long flush_flag;
|
||||
struct hns_roce_work flush_work;
|
||||
struct hns_roce_rinl_buf rq_inl_buf;
|
||||
struct list_head node; /* all qps are on a list */
|
||||
struct list_head rq_node; /* all recv qps are on a list */
|
||||
struct list_head sq_node; /* all send qps are on a list */
|
||||
struct list_head node; /* all qps are on a list */
|
||||
struct list_head rq_node; /* all recv qps are on a list */
|
||||
struct list_head sq_node; /* all send qps are on a list */
|
||||
struct hns_user_mmap_entry *dwqe_mmap_entry;
|
||||
};
|
||||
|
||||
struct hns_roce_ib_iboe {
|
||||
|
@ -684,11 +642,6 @@ struct hns_roce_ib_iboe {
|
|||
u8 phy_port[HNS_ROCE_MAX_PORTS];
|
||||
};
|
||||
|
||||
enum {
|
||||
HNS_ROCE_EQ_STAT_INVALID = 0,
|
||||
HNS_ROCE_EQ_STAT_VALID = 2,
|
||||
};
|
||||
|
||||
struct hns_roce_ceqe {
|
||||
__le32 comp;
|
||||
__le32 rsv[15];
|
||||
|
@ -720,12 +673,9 @@ struct hns_roce_eq {
|
|||
int type_flag; /* Aeq:1 ceq:0 */
|
||||
int eqn;
|
||||
u32 entries;
|
||||
u32 log_entries;
|
||||
int eqe_size;
|
||||
int irq;
|
||||
int log_page_size;
|
||||
u32 cons_index;
|
||||
struct hns_roce_buf_list *buf_list;
|
||||
int over_ignore;
|
||||
int coalesce;
|
||||
int arm_st;
|
||||
|
@ -740,7 +690,6 @@ struct hns_roce_eq {
|
|||
|
||||
struct hns_roce_eq_table {
|
||||
struct hns_roce_eq *eq;
|
||||
void __iomem **eqc_base; /* only for hw v1 */
|
||||
};
|
||||
|
||||
enum cong_type {
|
||||
|
@ -767,7 +716,7 @@ struct hns_roce_caps {
|
|||
u32 reserved_qps;
|
||||
int num_qpc_timer;
|
||||
int num_cqc_timer;
|
||||
int num_srqs;
|
||||
u32 num_srqs;
|
||||
u32 max_wqes;
|
||||
u32 max_srq_wrs;
|
||||
u32 max_srq_sges;
|
||||
|
@ -781,7 +730,7 @@ struct hns_roce_caps {
|
|||
u32 min_cqes;
|
||||
u32 min_wqes;
|
||||
u32 reserved_cqs;
|
||||
int reserved_srqs;
|
||||
u32 reserved_srqs;
|
||||
int num_aeq_vectors;
|
||||
int num_comp_vectors;
|
||||
int num_other_vectors;
|
||||
|
@ -855,7 +804,7 @@ struct hns_roce_caps {
|
|||
u32 cqc_timer_ba_pg_sz;
|
||||
u32 cqc_timer_buf_pg_sz;
|
||||
u32 cqc_timer_hop_num;
|
||||
u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
|
||||
u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
|
||||
u32 cqe_buf_pg_sz;
|
||||
u32 cqe_hop_num;
|
||||
u32 srqwqe_ba_pg_sz;
|
||||
|
@ -874,7 +823,7 @@ struct hns_roce_caps {
|
|||
u32 gmv_hop_num;
|
||||
u32 sl_num;
|
||||
u32 llm_buf_pg_sz;
|
||||
u32 chunk_sz; /* chunk size in non multihop mode */
|
||||
u32 chunk_sz; /* chunk size in non multihop mode */
|
||||
u64 flags;
|
||||
u16 default_ceq_max_cnt;
|
||||
u16 default_ceq_period;
|
||||
|
@ -897,7 +846,6 @@ enum hns_roce_device_state {
|
|||
};
|
||||
|
||||
struct hns_roce_hw {
|
||||
int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
|
||||
int (*cmq_init)(struct hns_roce_dev *hr_dev);
|
||||
void (*cmq_exit)(struct hns_roce_dev *hr_dev);
|
||||
int (*hw_profile)(struct hns_roce_dev *hr_dev);
|
||||
|
@ -909,14 +857,12 @@ struct hns_roce_hw {
|
|||
int (*poll_mbox_done)(struct hns_roce_dev *hr_dev,
|
||||
unsigned int timeout);
|
||||
bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
|
||||
int (*set_gid)(struct hns_roce_dev *hr_dev, u32 port, int gid_index,
|
||||
int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index,
|
||||
const union ib_gid *gid, const struct ib_gid_attr *attr);
|
||||
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port,
|
||||
const u8 *addr);
|
||||
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
|
||||
enum ib_mtu mtu);
|
||||
int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
|
||||
struct hns_roce_mr *mr, unsigned long mtpt_idx);
|
||||
struct hns_roce_mr *mr);
|
||||
int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mr *mr, int flags,
|
||||
void *mb_buf);
|
||||
|
@ -936,9 +882,6 @@ struct hns_roce_hw {
|
|||
enum ib_qp_state new_state);
|
||||
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_qp *hr_qp);
|
||||
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
|
||||
struct ib_udata *udata);
|
||||
int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
int (*init_eq)(struct hns_roce_dev *hr_dev);
|
||||
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
|
||||
int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
|
||||
|
@ -948,13 +891,11 @@ struct hns_roce_hw {
|
|||
|
||||
struct hns_roce_dev {
|
||||
struct ib_device ib_dev;
|
||||
struct platform_device *pdev;
|
||||
struct pci_dev *pci_dev;
|
||||
struct device *dev;
|
||||
struct hns_roce_uar priv_uar;
|
||||
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
|
||||
spinlock_t sm_lock;
|
||||
spinlock_t bt_cmd_lock;
|
||||
bool active;
|
||||
bool is_reset;
|
||||
bool dis_db;
|
||||
|
@ -1001,8 +942,6 @@ struct hns_roce_dev {
|
|||
int loop_idc;
|
||||
u32 sdb_offset;
|
||||
u32 odb_offset;
|
||||
dma_addr_t tptr_dma_addr; /* only for hw v1 */
|
||||
u32 tptr_size; /* only for hw v1 */
|
||||
const struct hns_roce_hw *hw;
|
||||
void *priv;
|
||||
struct workqueue_struct *irq_workq;
|
||||
|
@ -1010,6 +949,7 @@ struct hns_roce_dev {
|
|||
u32 func_num;
|
||||
u32 is_vf;
|
||||
u32 cong_algo_tmpl_id;
|
||||
u64 dwqe_page;
|
||||
};
|
||||
|
||||
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
|
||||
|
@ -1158,7 +1098,7 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
|
|||
/* hns roce hw need current block and next block addr from mtt */
|
||||
#define MTT_MIN_COUNT 2
|
||||
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
|
||||
int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
|
||||
u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
|
||||
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
|
||||
struct hns_roce_buf_attr *buf_attr,
|
||||
unsigned int page_shift, struct ib_udata *udata,
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include "hns_roce_device.h"
|
||||
#include "hns_roce_hem.h"
|
||||
#include "hns_roce_common.h"
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -678,6 +678,7 @@ static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
|
|||
static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
|
||||
void *wqe)
|
||||
{
|
||||
#define HNS_ROCE_SL_SHIFT 2
|
||||
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
|
||||
|
||||
/* All kinds of DirectWQE have the same header field layout */
|
||||
|
@ -685,7 +686,8 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
|
|||
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M,
|
||||
V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl);
|
||||
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M,
|
||||
V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, qp->sl >> 2);
|
||||
V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S,
|
||||
qp->sl >> HNS_ROCE_SL_SHIFT);
|
||||
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M,
|
||||
V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head);
|
||||
|
||||
|
@ -1305,14 +1307,14 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
|
|||
continue;
|
||||
|
||||
dev_err_ratelimited(hr_dev->dev,
|
||||
"Cmdq IO error, opcode = %x, return = %x\n",
|
||||
"Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
|
||||
desc->opcode, desc_ret);
|
||||
ret = -EIO;
|
||||
}
|
||||
} else {
|
||||
/* FW/HW reset or incorrect number of desc */
|
||||
tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
|
||||
dev_warn(hr_dev->dev, "CMDQ move tail from %d to %d\n",
|
||||
dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n",
|
||||
csq->head, tail);
|
||||
csq->head = tail;
|
||||
|
||||
|
@ -1571,7 +1573,7 @@ static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
|
|||
struct hns_roce_cmq_desc desc;
|
||||
int ret;
|
||||
|
||||
if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) {
|
||||
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
|
||||
hr_dev->func_num = 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -2003,7 +2005,8 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
|
|||
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
|
||||
|
||||
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
|
||||
caps->flags |= HNS_ROCE_CAP_FLAG_STASH;
|
||||
caps->flags |= HNS_ROCE_CAP_FLAG_STASH |
|
||||
HNS_ROCE_CAP_FLAG_DIRECT_WQE;
|
||||
caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE;
|
||||
} else {
|
||||
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
|
||||
|
@ -2144,7 +2147,6 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
|
|||
caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
|
||||
caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
|
||||
|
||||
caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
|
||||
caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
|
||||
caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
|
||||
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
|
||||
|
@ -2161,6 +2163,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
|
|||
(u32)priv->handle->rinfo.num_vectors - 2);
|
||||
|
||||
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
|
||||
caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM;
|
||||
caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
|
||||
caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
|
||||
|
||||
|
@ -2181,6 +2184,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
|
|||
} else {
|
||||
u32 func_num = max_t(u32, 1, hr_dev->func_num);
|
||||
|
||||
caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM;
|
||||
caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
|
||||
caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
|
||||
caps->gid_table_len[0] /= func_num;
|
||||
|
@ -2393,7 +2397,7 @@ static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
|
|||
struct hns_roce_caps *caps = &hr_dev->caps;
|
||||
int ret;
|
||||
|
||||
if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
|
||||
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
|
||||
return 0;
|
||||
|
||||
ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE,
|
||||
|
@ -2967,8 +2971,8 @@ static int config_gmv_table(struct hns_roce_dev *hr_dev,
|
|||
return hns_roce_cmq_send(hr_dev, desc, 2);
|
||||
}
|
||||
|
||||
static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port,
|
||||
int gid_index, const union ib_gid *gid,
|
||||
static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index,
|
||||
const union ib_gid *gid,
|
||||
const struct ib_gid_attr *attr)
|
||||
{
|
||||
enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
|
||||
|
@ -3063,8 +3067,7 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
|
|||
}
|
||||
|
||||
static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
|
||||
void *mb_buf, struct hns_roce_mr *mr,
|
||||
unsigned long mtpt_idx)
|
||||
void *mb_buf, struct hns_roce_mr *mr)
|
||||
{
|
||||
struct hns_roce_v2_mpt_entry *mpt_entry;
|
||||
int ret;
|
||||
|
@ -4488,14 +4491,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
|
||||
{
|
||||
if (!fl)
|
||||
fl = rdma_calc_flow_label(lqpn, rqpn);
|
||||
|
||||
return rdma_flow_label_to_udp_sport(fl);
|
||||
}
|
||||
|
||||
static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
|
||||
u32 *dip_idx)
|
||||
{
|
||||
|
@ -4712,8 +4707,9 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
|
|||
}
|
||||
|
||||
hr_reg_write(context, QPC_UDPSPN,
|
||||
is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num,
|
||||
attr->dest_qp_num) : 0);
|
||||
is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num,
|
||||
attr->dest_qp_num) :
|
||||
0);
|
||||
|
||||
hr_reg_clear(qpc_mask, QPC_UDPSPN);
|
||||
|
||||
|
@ -4739,7 +4735,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
|
|||
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
|
||||
if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
|
||||
ibdev_err(ibdev,
|
||||
"failed to fill QPC, sl (%d) shouldn't be larger than %d.\n",
|
||||
"failed to fill QPC, sl (%u) shouldn't be larger than %d.\n",
|
||||
hr_qp->sl, MAX_SERVICE_LEVEL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -4768,7 +4764,8 @@ static bool check_qp_state(enum ib_qp_state cur_state,
|
|||
[IB_QPS_ERR] = true },
|
||||
[IB_QPS_SQD] = {},
|
||||
[IB_QPS_SQE] = {},
|
||||
[IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
|
||||
[IB_QPS_ERR] = { [IB_QPS_RESET] = true,
|
||||
[IB_QPS_ERR] = true }
|
||||
};
|
||||
|
||||
return sm[cur_state][new_state];
|
||||
|
@ -5868,7 +5865,7 @@ static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
|
|||
roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
|
||||
}
|
||||
|
||||
static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
|
||||
static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn)
|
||||
{
|
||||
struct device *dev = hr_dev->dev;
|
||||
int ret;
|
||||
|
@ -5882,7 +5879,7 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
|
|||
0, HNS_ROCE_CMD_DESTROY_AEQC,
|
||||
HNS_ROCE_CMD_TIMEOUT_MSECS);
|
||||
if (ret)
|
||||
dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
|
||||
dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn);
|
||||
}
|
||||
|
||||
static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
|
||||
|
@ -6394,7 +6391,7 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
|
|||
if (!id)
|
||||
return 0;
|
||||
|
||||
if (id->driver_data && handle->pdev->revision < PCI_REVISION_ID_HIP09)
|
||||
if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08)
|
||||
return 0;
|
||||
|
||||
ret = __hns_roce_hw_v2_init_instance(handle);
|
||||
|
|
|
@ -35,26 +35,15 @@
|
|||
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#define HNS_ROCE_VF_QPC_BT_NUM 256
|
||||
#define HNS_ROCE_VF_SCCC_BT_NUM 64
|
||||
#define HNS_ROCE_VF_SRQC_BT_NUM 64
|
||||
#define HNS_ROCE_VF_CQC_BT_NUM 64
|
||||
#define HNS_ROCE_VF_MPT_BT_NUM 64
|
||||
#define HNS_ROCE_VF_SMAC_NUM 32
|
||||
#define HNS_ROCE_VF_SL_NUM 8
|
||||
#define HNS_ROCE_VF_GMV_BT_NUM 256
|
||||
|
||||
#define HNS_ROCE_V2_MAX_QP_NUM 0x1000
|
||||
#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
|
||||
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
|
||||
#define HNS_ROCE_V2_MAX_SRQ 0x100000
|
||||
#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
|
||||
#define HNS_ROCE_V2_MAX_SRQ_SGE 64
|
||||
#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
|
||||
#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
|
||||
#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
|
||||
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
|
||||
#define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
|
||||
#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64
|
||||
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64
|
||||
#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
|
||||
|
@ -63,13 +52,10 @@
|
|||
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
|
||||
#define HNS_ROCE_V2_UAR_NUM 256
|
||||
#define HNS_ROCE_V2_PHY_UAR_NUM 1
|
||||
#define HNS_ROCE_V2_MAX_IRQ_NUM 65
|
||||
#define HNS_ROCE_V2_COMP_VEC_NUM 63
|
||||
#define HNS_ROCE_V2_AEQE_VEC_NUM 1
|
||||
#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
|
||||
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000
|
||||
#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
|
||||
#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000
|
||||
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
|
||||
#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000
|
||||
#define HNS_ROCE_V2_MAX_PD_NUM 0x1000000
|
||||
|
@ -81,7 +67,6 @@
|
|||
#define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16
|
||||
#define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64
|
||||
#define HNS_ROCE_V2_IRRL_ENTRY_SZ 64
|
||||
#define HNS_ROCE_V2_TRRL_ENTRY_SZ 48
|
||||
#define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100
|
||||
#define HNS_ROCE_V2_CQC_ENTRY_SZ 64
|
||||
#define HNS_ROCE_V2_SRQC_ENTRY_SZ 64
|
||||
|
@ -103,7 +88,6 @@
|
|||
#define HNS_ROCE_INVALID_LKEY 0x0
|
||||
#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
|
||||
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
|
||||
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
|
||||
#define HNS_ROCE_V2_RSV_QPS 8
|
||||
|
||||
#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000
|
||||
|
@ -117,12 +101,14 @@
|
|||
#define HNS_ROCE_CQE_HOP_NUM 1
|
||||
#define HNS_ROCE_SRQWQE_HOP_NUM 1
|
||||
#define HNS_ROCE_PBL_HOP_NUM 2
|
||||
#define HNS_ROCE_EQE_HOP_NUM 2
|
||||
#define HNS_ROCE_IDX_HOP_NUM 1
|
||||
#define HNS_ROCE_SQWQE_HOP_NUM 2
|
||||
#define HNS_ROCE_EXT_SGE_HOP_NUM 1
|
||||
#define HNS_ROCE_RQWQE_HOP_NUM 2
|
||||
|
||||
#define HNS_ROCE_V2_EQE_HOP_NUM 2
|
||||
#define HNS_ROCE_V3_EQE_HOP_NUM 1
|
||||
|
||||
#define HNS_ROCE_BA_PG_SZ_SUPPORTED_256K 6
|
||||
#define HNS_ROCE_BA_PG_SZ_SUPPORTED_16K 2
|
||||
#define HNS_ROCE_V2_GID_INDEX_NUM 16
|
||||
|
@ -1441,7 +1427,7 @@ struct hns_roce_v2_priv {
|
|||
struct hns_roce_dip {
|
||||
u8 dgid[GID_LEN_V2];
|
||||
u32 dip_idx;
|
||||
struct list_head node; /* all dips are on a list */
|
||||
struct list_head node; /* all dips are on a list */
|
||||
};
|
||||
|
||||
/* only for RNR timeout issue of HIP08 */
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
* SOFTWARE.
|
||||
*/
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <rdma/ib_addr.h>
|
||||
|
@ -70,7 +69,7 @@ static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
|
|||
if (port >= hr_dev->caps.num_ports)
|
||||
return -EINVAL;
|
||||
|
||||
ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr);
|
||||
ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -84,7 +83,7 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
|
|||
if (port >= hr_dev->caps.num_ports)
|
||||
return -EINVAL;
|
||||
|
||||
ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, NULL, NULL);
|
||||
ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -152,9 +151,6 @@ static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
|
|||
u8 i;
|
||||
|
||||
for (i = 0; i < hr_dev->caps.num_ports; i++) {
|
||||
if (hr_dev->hw->set_mtu)
|
||||
hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
|
||||
hr_dev->caps.max_mtu);
|
||||
ret = hns_roce_set_mac(hr_dev, i,
|
||||
hr_dev->iboe.netdevs[i]->dev_addr);
|
||||
if (ret)
|
||||
|
@ -270,6 +266,9 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
|
|||
static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index,
|
||||
u16 *pkey)
|
||||
{
|
||||
if (index > 0)
|
||||
return -EINVAL;
|
||||
|
||||
*pkey = PKEY_ID;
|
||||
|
||||
return 0;
|
||||
|
@ -307,9 +306,22 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
|
|||
entry->address = address;
|
||||
entry->mmap_type = mmap_type;
|
||||
|
||||
ret = rdma_user_mmap_entry_insert_exact(
|
||||
ucontext, &entry->rdma_entry, length,
|
||||
mmap_type == HNS_ROCE_MMAP_TYPE_DB ? 0 : 1);
|
||||
switch (mmap_type) {
|
||||
/* pgoff 0 must be used by DB for compatibility */
|
||||
case HNS_ROCE_MMAP_TYPE_DB:
|
||||
ret = rdma_user_mmap_entry_insert_exact(
|
||||
ucontext, &entry->rdma_entry, length, 0);
|
||||
break;
|
||||
case HNS_ROCE_MMAP_TYPE_DWQE:
|
||||
ret = rdma_user_mmap_entry_insert_range(
|
||||
ucontext, &entry->rdma_entry, length, 1,
|
||||
U32_MAX);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
kfree(entry);
|
||||
return NULL;
|
||||
|
@ -323,18 +335,12 @@ static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context)
|
|||
if (context->db_mmap_entry)
|
||||
rdma_user_mmap_entry_remove(
|
||||
&context->db_mmap_entry->rdma_entry);
|
||||
|
||||
if (context->tptr_mmap_entry)
|
||||
rdma_user_mmap_entry_remove(
|
||||
&context->tptr_mmap_entry->rdma_entry);
|
||||
}
|
||||
|
||||
static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
|
||||
{
|
||||
struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
|
||||
u64 address;
|
||||
int ret;
|
||||
|
||||
address = context->uar.pfn << PAGE_SHIFT;
|
||||
context->db_mmap_entry = hns_roce_user_mmap_entry_insert(
|
||||
|
@ -342,27 +348,7 @@ static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
|
|||
if (!context->db_mmap_entry)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* FIXME: using io_remap_pfn_range on the dma address returned
|
||||
* by dma_alloc_coherent is totally wrong.
|
||||
*/
|
||||
context->tptr_mmap_entry =
|
||||
hns_roce_user_mmap_entry_insert(uctx, hr_dev->tptr_dma_addr,
|
||||
hr_dev->tptr_size,
|
||||
HNS_ROCE_MMAP_TYPE_TPTR);
|
||||
if (!context->tptr_mmap_entry) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
hns_roce_dealloc_uar_entry(context);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
|
||||
|
@ -436,10 +422,15 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
|
|||
|
||||
entry = to_hns_mmap(rdma_entry);
|
||||
pfn = entry->address >> PAGE_SHIFT;
|
||||
prot = vma->vm_page_prot;
|
||||
|
||||
if (entry->mmap_type != HNS_ROCE_MMAP_TYPE_TPTR)
|
||||
prot = pgprot_noncached(prot);
|
||||
switch (entry->mmap_type) {
|
||||
case HNS_ROCE_MMAP_TYPE_DB:
|
||||
case HNS_ROCE_MMAP_TYPE_DWQE:
|
||||
prot = pgprot_device(vma->vm_page_prot);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
|
||||
prot, rdma_entry);
|
||||
|
@ -816,7 +807,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
|
|||
int ret;
|
||||
|
||||
spin_lock_init(&hr_dev->sm_lock);
|
||||
spin_lock_init(&hr_dev->bt_cmd_lock);
|
||||
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
|
||||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
|
||||
|
@ -907,20 +897,13 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
|
|||
struct device *dev = hr_dev->dev;
|
||||
int ret;
|
||||
|
||||
if (hr_dev->hw->reset) {
|
||||
ret = hr_dev->hw->reset(hr_dev, true);
|
||||
if (ret) {
|
||||
dev_err(dev, "Reset RoCE engine failed!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
hr_dev->is_reset = false;
|
||||
|
||||
if (hr_dev->hw->cmq_init) {
|
||||
ret = hr_dev->hw->cmq_init(hr_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "Init RoCE Command Queue failed!\n");
|
||||
goto error_failed_cmq_init;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1003,12 +986,6 @@ error_failed_cmd_init:
|
|||
if (hr_dev->hw->cmq_exit)
|
||||
hr_dev->hw->cmq_exit(hr_dev);
|
||||
|
||||
error_failed_cmq_init:
|
||||
if (hr_dev->hw->reset) {
|
||||
if (hr_dev->hw->reset(hr_dev, false))
|
||||
dev_err(dev, "Dereset RoCE engine failed!\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1028,8 +1005,6 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev)
|
|||
hns_roce_cmd_cleanup(hr_dev);
|
||||
if (hr_dev->hw->cmq_exit)
|
||||
hr_dev->hw->cmq_exit(hr_dev);
|
||||
if (hr_dev->hw->reset)
|
||||
hr_dev->hw->reset(hr_dev, false);
|
||||
}
|
||||
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include "hns_roce_device.h"
|
||||
|
@ -81,7 +80,7 @@ static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mr->key = hw_index_to_key(id); /* MR key */
|
||||
mr->key = hw_index_to_key(id); /* MR key */
|
||||
|
||||
err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table,
|
||||
(unsigned long)id);
|
||||
|
@ -173,8 +172,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
|
|||
}
|
||||
|
||||
if (mr->type != MR_TYPE_FRMR)
|
||||
ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
|
||||
mtpt_idx);
|
||||
ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
|
||||
else
|
||||
ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
|
||||
if (ret) {
|
||||
|
@ -363,12 +361,8 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
|
|||
struct hns_roce_mr *mr = to_hr_mr(ibmr);
|
||||
int ret = 0;
|
||||
|
||||
if (hr_dev->hw->dereg_mr) {
|
||||
ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata);
|
||||
} else {
|
||||
hns_roce_mr_free(hr_dev, mr);
|
||||
kfree(mr);
|
||||
}
|
||||
hns_roce_mr_free(hr_dev, mr);
|
||||
kfree(mr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -614,10 +608,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
|
|||
return -ENOBUFS;
|
||||
|
||||
for (i = 0; i < count && npage < max_count; i++) {
|
||||
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
|
||||
addr = to_hr_hw_page_addr(pages[npage]);
|
||||
else
|
||||
addr = pages[npage];
|
||||
addr = pages[npage];
|
||||
|
||||
mtts[i] = cpu_to_le64(addr);
|
||||
npage++;
|
||||
|
@ -824,11 +815,11 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
|
|||
}
|
||||
|
||||
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
|
||||
int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
|
||||
u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
|
||||
{
|
||||
struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
|
||||
int mtt_count, left;
|
||||
int start_index;
|
||||
u32 start_index;
|
||||
int total = 0;
|
||||
__le64 *mtts;
|
||||
u32 npage;
|
||||
|
@ -847,10 +838,7 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
|
|||
continue;
|
||||
|
||||
addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
|
||||
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
|
||||
mtt_buf[total] = to_hr_hw_page_addr(addr);
|
||||
else
|
||||
mtt_buf[total] = addr;
|
||||
mtt_buf[total] = addr;
|
||||
|
||||
total++;
|
||||
}
|
||||
|
@ -884,10 +872,10 @@ done:
|
|||
static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_buf_attr *attr,
|
||||
struct hns_roce_hem_cfg *cfg,
|
||||
unsigned int *buf_page_shift, int unalinged_size)
|
||||
unsigned int *buf_page_shift, u64 unalinged_size)
|
||||
{
|
||||
struct hns_roce_buf_region *r;
|
||||
int first_region_padding;
|
||||
u64 first_region_padding;
|
||||
int page_cnt, region_cnt;
|
||||
unsigned int page_shift;
|
||||
size_t buf_size;
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pci.h>
|
||||
#include "hns_roce_device.h"
|
||||
|
||||
|
@ -86,7 +85,6 @@ int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
|
|||
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
|
||||
{
|
||||
struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
|
||||
struct resource *res;
|
||||
int id;
|
||||
|
||||
/* Using bitmap to manager UAR index */
|
||||
|
@ -104,18 +102,9 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
|
|||
else
|
||||
uar->index = 0;
|
||||
|
||||
if (!dev_is_pci(hr_dev->dev)) {
|
||||
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
ida_free(&uar_ida->ida, id);
|
||||
dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
|
||||
} else {
|
||||
uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2))
|
||||
>> PAGE_SHIFT);
|
||||
}
|
||||
uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) >> PAGE_SHIFT);
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
|
||||
hr_dev->dwqe_page = pci_resource_start(hr_dev->pci_dev, 4);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <rdma/ib_addr.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/uverbs_ioctl.h>
|
||||
|
@ -110,12 +109,11 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
|
|||
return;
|
||||
}
|
||||
|
||||
if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
|
||||
(event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
|
||||
event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
|
||||
event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
|
||||
event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
|
||||
event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) {
|
||||
if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
|
||||
event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
|
||||
event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
|
||||
event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
|
||||
event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) {
|
||||
qp->state = IB_QPS_ERR;
|
||||
|
||||
flush_cqe(hr_dev, qp);
|
||||
|
@ -219,13 +217,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
|
|||
int ret;
|
||||
|
||||
if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
|
||||
/* when hw version is v1, the sqpn is allocated */
|
||||
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
|
||||
num = HNS_ROCE_MAX_PORTS +
|
||||
hr_dev->iboe.phy_port[hr_qp->port];
|
||||
else
|
||||
num = 1;
|
||||
|
||||
num = 1;
|
||||
hr_qp->doorbell_qpn = 1;
|
||||
} else {
|
||||
mutex_lock(&qp_table->bank_mutex);
|
||||
|
@ -324,11 +316,6 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
|
|||
if (!hr_qp->qpn)
|
||||
return -EINVAL;
|
||||
|
||||
/* In v1 engine, GSI QP context is saved in the RoCE hw's register */
|
||||
if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
|
||||
hr_dev->hw_rev == HNS_ROCE_HW_VER1)
|
||||
return 0;
|
||||
|
||||
/* Alloc memory for QPC */
|
||||
ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
|
||||
if (ret) {
|
||||
|
@ -379,6 +366,11 @@ err_out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp)
|
||||
{
|
||||
rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry);
|
||||
}
|
||||
|
||||
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
|
||||
{
|
||||
struct xarray *xa = &hr_dev->qp_table_xa;
|
||||
|
@ -402,11 +394,6 @@ static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
|
|||
{
|
||||
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
|
||||
|
||||
/* In v1 engine, GSI QP context is saved in the RoCE hw's register */
|
||||
if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
|
||||
hr_dev->hw_rev == HNS_ROCE_HW_VER1)
|
||||
return;
|
||||
|
||||
if (hr_dev->caps.trrl_entry_sz)
|
||||
hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
|
||||
hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
|
||||
|
@ -535,11 +522,6 @@ static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
|
|||
|
||||
hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
|
||||
|
||||
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
|
||||
hr_qp->sq.max_gs = HNS_ROCE_SGE_IN_WQE;
|
||||
return;
|
||||
}
|
||||
|
||||
hr_qp->sq.max_gs = max(1U, cap->max_send_sge);
|
||||
|
||||
wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp);
|
||||
|
@ -780,7 +762,11 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
|
|||
goto err_inline;
|
||||
}
|
||||
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
|
||||
hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE;
|
||||
|
||||
return 0;
|
||||
|
||||
err_inline:
|
||||
free_rq_inline_buf(hr_qp);
|
||||
|
||||
|
@ -822,6 +808,35 @@ static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
|
|||
hns_roce_qp_has_rq(init_attr));
|
||||
}
|
||||
|
||||
static int qp_mmap_entry(struct hns_roce_qp *hr_qp,
|
||||
struct hns_roce_dev *hr_dev,
|
||||
struct ib_udata *udata,
|
||||
struct hns_roce_ib_create_qp_resp *resp)
|
||||
{
|
||||
struct hns_roce_ucontext *uctx =
|
||||
rdma_udata_to_drv_context(udata,
|
||||
struct hns_roce_ucontext, ibucontext);
|
||||
struct rdma_user_mmap_entry *rdma_entry;
|
||||
u64 address;
|
||||
|
||||
address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE;
|
||||
|
||||
hr_qp->dwqe_mmap_entry =
|
||||
hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address,
|
||||
HNS_ROCE_DWQE_SIZE,
|
||||
HNS_ROCE_MMAP_TYPE_DWQE);
|
||||
|
||||
if (!hr_qp->dwqe_mmap_entry) {
|
||||
ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry;
|
||||
resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_qp *hr_qp,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
|
@ -909,10 +924,16 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
|
|||
hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB;
|
||||
|
||||
if (udata) {
|
||||
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) {
|
||||
ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd,
|
||||
resp);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_remove_qp;
|
||||
} else {
|
||||
ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr);
|
||||
if (ret)
|
||||
|
@ -920,6 +941,12 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
|
|||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_remove_qp:
|
||||
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
|
||||
qp_user_mmap_entry_remove(hr_qp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
|
||||
|
@ -933,6 +960,8 @@ static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
|
|||
hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
|
||||
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
|
||||
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
|
||||
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
|
||||
qp_user_mmap_entry_remove(hr_qp);
|
||||
} else {
|
||||
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
|
||||
hns_roce_free_db(hr_dev, &hr_qp->rdb);
|
||||
|
@ -1158,7 +1187,7 @@ static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
|
|||
goto out;
|
||||
break;
|
||||
case IB_QPT_UD:
|
||||
if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 &&
|
||||
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 &&
|
||||
is_user)
|
||||
goto out;
|
||||
break;
|
||||
|
@ -1391,7 +1420,7 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
|
|||
}
|
||||
}
|
||||
|
||||
static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
|
||||
static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset)
|
||||
{
|
||||
return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
|
||||
}
|
||||
|
|
|
@ -198,7 +198,7 @@ static void i40iw_remove(struct auxiliary_device *aux_dev)
|
|||
aux_dev);
|
||||
struct i40e_info *cdev_info = i40e_adev->ldev;
|
||||
|
||||
return i40e_client_device_unregister(cdev_info);
|
||||
i40e_client_device_unregister(cdev_info);
|
||||
}
|
||||
|
||||
static const struct auxiliary_device_id i40iw_auxiliary_id_table[] = {
|
||||
|
|
|
@ -69,7 +69,7 @@ struct irdma_add_page_info {
|
|||
struct irdma_chunk {
|
||||
struct list_head list;
|
||||
struct irdma_dma_info dmainfo;
|
||||
void *bitmapbuf;
|
||||
unsigned long *bitmapbuf;
|
||||
|
||||
u32 sizeofbitmap;
|
||||
u64 size;
|
||||
|
|
|
@ -21,7 +21,8 @@ static int irdma_query_device(struct ib_device *ibdev,
|
|||
return -EINVAL;
|
||||
|
||||
memset(props, 0, sizeof(*props));
|
||||
ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
|
||||
addrconf_addr_eui48((u8 *)&props->sys_image_guid,
|
||||
iwdev->netdev->dev_addr);
|
||||
props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
|
||||
irdma_fw_minor_ver(&rf->sc_dev);
|
||||
props->device_cap_flags = iwdev->device_cap_flags;
|
||||
|
@ -1170,6 +1171,10 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
udp_info->ttl = attr->ah_attr.grh.hop_limit;
|
||||
udp_info->flow_label = attr->ah_attr.grh.flow_label;
|
||||
udp_info->tos = attr->ah_attr.grh.traffic_class;
|
||||
udp_info->src_port =
|
||||
rdma_get_udp_sport(udp_info->flow_label,
|
||||
ibqp->qp_num,
|
||||
roce_info->dest_qp);
|
||||
irdma_qp_rem_qos(&iwqp->sc_qp);
|
||||
dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
|
||||
ctx_info->user_pri = rt_tos2priority(udp_info->tos);
|
||||
|
@ -4321,24 +4326,6 @@ static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
|
|||
return IB_LINK_LAYER_ETHERNET;
|
||||
}
|
||||
|
||||
static __be64 irdma_mac_to_guid(struct net_device *ndev)
|
||||
{
|
||||
const unsigned char *mac = ndev->dev_addr;
|
||||
__be64 guid;
|
||||
unsigned char *dst = (unsigned char *)&guid;
|
||||
|
||||
dst[0] = mac[0] ^ 2;
|
||||
dst[1] = mac[1];
|
||||
dst[2] = mac[2];
|
||||
dst[3] = 0xff;
|
||||
dst[4] = 0xfe;
|
||||
dst[5] = mac[3];
|
||||
dst[6] = mac[4];
|
||||
dst[7] = mac[5];
|
||||
|
||||
return guid;
|
||||
}
|
||||
|
||||
static const struct ib_device_ops irdma_roce_dev_ops = {
|
||||
.attach_mcast = irdma_attach_mcast,
|
||||
.create_ah = irdma_create_ah,
|
||||
|
@ -4408,7 +4395,8 @@ static const struct ib_device_ops irdma_dev_ops = {
|
|||
static void irdma_init_roce_device(struct irdma_device *iwdev)
|
||||
{
|
||||
iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
|
||||
iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev);
|
||||
addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
|
||||
iwdev->netdev->dev_addr);
|
||||
ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
|
||||
}
|
||||
|
||||
|
@ -4421,7 +4409,8 @@ static int irdma_init_iw_device(struct irdma_device *iwdev)
|
|||
struct net_device *netdev = iwdev->netdev;
|
||||
|
||||
iwdev->ibdev.node_type = RDMA_NODE_RNIC;
|
||||
ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, netdev->dev_addr);
|
||||
addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
|
||||
netdev->dev_addr);
|
||||
iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref;
|
||||
iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref;
|
||||
iwdev->ibdev.ops.iw_get_qp = irdma_get_qp;
|
||||
|
|
|
@ -85,14 +85,6 @@ static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
|
|||
|
||||
static struct workqueue_struct *wq;
|
||||
|
||||
static void init_query_mad(struct ib_smp *mad)
|
||||
{
|
||||
mad->base_version = 1;
|
||||
mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
|
||||
mad->class_version = 1;
|
||||
mad->method = IB_MGMT_METHOD_GET;
|
||||
}
|
||||
|
||||
static int check_flow_steering_support(struct mlx4_dev *dev)
|
||||
{
|
||||
int eth_num_ports = 0;
|
||||
|
@ -471,7 +463,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
|
|||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
|
||||
|
||||
err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
|
||||
|
@ -669,7 +661,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u32 port,
|
|||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
|
||||
in_mad->attr_mod = cpu_to_be32(port);
|
||||
|
||||
|
@ -721,7 +713,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u32 port,
|
|||
|
||||
/* If reported active speed is QDR, check if is FDR-10 */
|
||||
if (props->active_speed == IB_SPEED_QDR) {
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
|
||||
in_mad->attr_mod = cpu_to_be32(port);
|
||||
|
||||
|
@ -848,7 +840,7 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
|
|||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
|
||||
in_mad->attr_mod = cpu_to_be32(port);
|
||||
|
||||
|
@ -870,7 +862,7 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
|
|||
}
|
||||
}
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
|
||||
in_mad->attr_mod = cpu_to_be32(index / 8);
|
||||
|
||||
|
@ -917,7 +909,7 @@ static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port,
|
|||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
|
||||
in_mad->attr_mod = 0;
|
||||
|
||||
|
@ -971,7 +963,7 @@ int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
|
|||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
|
||||
in_mad->attr_mod = cpu_to_be32(index / 32);
|
||||
|
||||
|
@ -1990,7 +1982,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
|
|||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
|
||||
if (mlx4_is_master(dev->dev))
|
||||
mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
|
||||
|
@ -2784,10 +2776,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|||
if (err)
|
||||
goto err_counter;
|
||||
|
||||
ibdev->ib_uc_qpns_bitmap =
|
||||
kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count),
|
||||
sizeof(long),
|
||||
GFP_KERNEL);
|
||||
ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count,
|
||||
GFP_KERNEL);
|
||||
if (!ibdev->ib_uc_qpns_bitmap)
|
||||
goto err_steer_qp_release;
|
||||
|
||||
|
@ -2875,7 +2865,7 @@ err_diag_counters:
|
|||
mlx4_ib_diag_cleanup(ibdev);
|
||||
|
||||
err_steer_free_bitmap:
|
||||
kfree(ibdev->ib_uc_qpns_bitmap);
|
||||
bitmap_free(ibdev->ib_uc_qpns_bitmap);
|
||||
|
||||
err_steer_qp_release:
|
||||
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
|
||||
|
@ -2988,7 +2978,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
|
|||
|
||||
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
|
||||
ibdev->steer_qpn_count);
|
||||
kfree(ibdev->ib_uc_qpns_bitmap);
|
||||
bitmap_free(ibdev->ib_uc_qpns_bitmap);
|
||||
|
||||
iounmap(ibdev->uar_map);
|
||||
for (p = 0; p < ibdev->num_ports; ++p)
|
||||
|
|
|
@ -328,8 +328,11 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
|
|||
}
|
||||
|
||||
wc->vendor_err = cqe->vendor_err_synd;
|
||||
if (dump)
|
||||
if (dump) {
|
||||
mlx5_ib_warn(dev, "WC error: %d, Message: %s\n", wc->status,
|
||||
ib_wc_status_msg(wc->status));
|
||||
dump_cqe(dev, cqe);
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
|
||||
|
|
|
@ -291,7 +291,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port)
|
|||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
|
||||
in_mad->attr_mod = cpu_to_be32(port);
|
||||
|
||||
|
@ -318,7 +318,7 @@ static int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
|
|||
if (!in_mad)
|
||||
return -ENOMEM;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
|
||||
|
||||
err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
|
||||
|
@ -405,7 +405,7 @@ int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
|
|||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
|
||||
|
||||
err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
|
||||
|
@ -430,7 +430,7 @@ int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
|
|||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
|
||||
|
||||
err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
|
||||
|
@ -456,7 +456,7 @@ int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
|
|||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
|
||||
in_mad->attr_mod = cpu_to_be32(index / 32);
|
||||
|
||||
|
@ -485,7 +485,7 @@ int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
|
|||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
|
||||
in_mad->attr_mod = cpu_to_be32(port);
|
||||
|
||||
|
@ -496,7 +496,7 @@ int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
|
|||
|
||||
memcpy(gid->raw, out_mad->data + 8, 8);
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
|
||||
in_mad->attr_mod = cpu_to_be32(index / 8);
|
||||
|
||||
|
@ -530,7 +530,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
|
|||
|
||||
/* props being zeroed by the caller, avoid zeroing it here */
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
|
||||
in_mad->attr_mod = cpu_to_be32(port);
|
||||
|
||||
|
@ -584,6 +584,11 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
|
|||
props->port_cap_flags2 & IB_PORT_LINK_SPEED_HDR_SUP)
|
||||
props->active_speed = IB_SPEED_HDR;
|
||||
break;
|
||||
case 8:
|
||||
if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP &&
|
||||
props->port_cap_flags2 & IB_PORT_LINK_SPEED_NDR_SUP)
|
||||
props->active_speed = IB_SPEED_NDR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -591,7 +596,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
|
|||
if (props->active_speed == 4) {
|
||||
if (dev->port_caps[port - 1].ext_port_cap &
|
||||
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
|
||||
in_mad->attr_mod = cpu_to_be32(port);
|
||||
|
||||
|
|
|
@ -665,9 +665,9 @@ struct mlx5_ib_mr {
|
|||
|
||||
/* User MR data */
|
||||
struct mlx5_cache_ent *cache_ent;
|
||||
/* Everything after cache_ent is zero'd when MR allocated */
|
||||
struct ib_umem *umem;
|
||||
|
||||
/* This is zero'd when the MR is allocated */
|
||||
union {
|
||||
/* Used only while the MR is in the cache */
|
||||
struct {
|
||||
|
@ -719,7 +719,7 @@ struct mlx5_ib_mr {
|
|||
/* Zero the fields in the mr that are variant depending on usage */
|
||||
static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr)
|
||||
{
|
||||
memset(mr->out, 0, sizeof(*mr) - offsetof(struct mlx5_ib_mr, out));
|
||||
memset_after(mr, 0, cache_ent);
|
||||
}
|
||||
|
||||
static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
|
||||
|
@ -1466,14 +1466,6 @@ extern const struct uapi_definition mlx5_ib_flow_defs[];
|
|||
extern const struct uapi_definition mlx5_ib_qos_defs[];
|
||||
extern const struct uapi_definition mlx5_ib_std_types_defs[];
|
||||
|
||||
static inline void init_query_mad(struct ib_smp *mad)
|
||||
{
|
||||
mad->base_version = 1;
|
||||
mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
|
||||
mad->class_version = 1;
|
||||
mad->method = IB_MGMT_METHOD_GET;
|
||||
}
|
||||
|
||||
static inline int is_qp1(enum ib_qp_type qp_type)
|
||||
{
|
||||
return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
|
||||
|
|
|
@ -51,7 +51,7 @@ u32 mthca_alloc(struct mthca_alloc *alloc)
|
|||
}
|
||||
|
||||
if (obj < alloc->max) {
|
||||
set_bit(obj, alloc->table);
|
||||
__set_bit(obj, alloc->table);
|
||||
obj |= alloc->top;
|
||||
} else
|
||||
obj = -1;
|
||||
|
@ -69,7 +69,7 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj)
|
|||
|
||||
spin_lock_irqsave(&alloc->lock, flags);
|
||||
|
||||
clear_bit(obj, alloc->table);
|
||||
__clear_bit(obj, alloc->table);
|
||||
alloc->last = min(alloc->last, obj);
|
||||
alloc->top = (alloc->top + alloc->max) & alloc->mask;
|
||||
|
||||
|
@ -79,8 +79,6 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj)
|
|||
int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
|
||||
u32 reserved)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* num must be a power of 2 */
|
||||
if (num != 1 << (ffs(num) - 1))
|
||||
return -EINVAL;
|
||||
|
@ -90,21 +88,18 @@ int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
|
|||
alloc->max = num;
|
||||
alloc->mask = mask;
|
||||
spin_lock_init(&alloc->lock);
|
||||
alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long),
|
||||
GFP_KERNEL);
|
||||
alloc->table = bitmap_zalloc(num, GFP_KERNEL);
|
||||
if (!alloc->table)
|
||||
return -ENOMEM;
|
||||
|
||||
bitmap_zero(alloc->table, num);
|
||||
for (i = 0; i < reserved; ++i)
|
||||
set_bit(i, alloc->table);
|
||||
bitmap_set(alloc->table, 0, reserved);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mthca_alloc_cleanup(struct mthca_alloc *alloc)
|
||||
{
|
||||
kfree(alloc->table);
|
||||
bitmap_free(alloc->table);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -101,13 +101,13 @@ static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
|
|||
return -1;
|
||||
|
||||
found:
|
||||
clear_bit(seg, buddy->bits[o]);
|
||||
__clear_bit(seg, buddy->bits[o]);
|
||||
--buddy->num_free[o];
|
||||
|
||||
while (o > order) {
|
||||
--o;
|
||||
seg <<= 1;
|
||||
set_bit(seg ^ 1, buddy->bits[o]);
|
||||
__set_bit(seg ^ 1, buddy->bits[o]);
|
||||
++buddy->num_free[o];
|
||||
}
|
||||
|
||||
|
@ -125,13 +125,13 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
|
|||
spin_lock(&buddy->lock);
|
||||
|
||||
while (test_bit(seg ^ 1, buddy->bits[order])) {
|
||||
clear_bit(seg ^ 1, buddy->bits[order]);
|
||||
__clear_bit(seg ^ 1, buddy->bits[order]);
|
||||
--buddy->num_free[order];
|
||||
seg >>= 1;
|
||||
++order;
|
||||
}
|
||||
|
||||
set_bit(seg, buddy->bits[order]);
|
||||
__set_bit(seg, buddy->bits[order]);
|
||||
++buddy->num_free[order];
|
||||
|
||||
spin_unlock(&buddy->lock);
|
||||
|
@ -139,7 +139,7 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
|
|||
|
||||
static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
|
||||
{
|
||||
int i, s;
|
||||
int i;
|
||||
|
||||
buddy->max_order = max_order;
|
||||
spin_lock_init(&buddy->lock);
|
||||
|
@ -152,22 +152,20 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
|
|||
goto err_out;
|
||||
|
||||
for (i = 0; i <= buddy->max_order; ++i) {
|
||||
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
|
||||
buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL);
|
||||
buddy->bits[i] = bitmap_zalloc(1 << (buddy->max_order - i),
|
||||
GFP_KERNEL);
|
||||
if (!buddy->bits[i])
|
||||
goto err_out_free;
|
||||
bitmap_zero(buddy->bits[i],
|
||||
1 << (buddy->max_order - i));
|
||||
}
|
||||
|
||||
set_bit(0, buddy->bits[buddy->max_order]);
|
||||
__set_bit(0, buddy->bits[buddy->max_order]);
|
||||
buddy->num_free[buddy->max_order] = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_free:
|
||||
for (i = 0; i <= buddy->max_order; ++i)
|
||||
kfree(buddy->bits[i]);
|
||||
bitmap_free(buddy->bits[i]);
|
||||
|
||||
err_out:
|
||||
kfree(buddy->bits);
|
||||
|
@ -181,7 +179,7 @@ static void mthca_buddy_cleanup(struct mthca_buddy *buddy)
|
|||
int i;
|
||||
|
||||
for (i = 0; i <= buddy->max_order; ++i)
|
||||
kfree(buddy->bits[i]);
|
||||
bitmap_free(buddy->bits[i]);
|
||||
|
||||
kfree(buddy->bits);
|
||||
kfree(buddy->num_free);
|
||||
|
@ -469,8 +467,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
|
|||
mpt_entry->start = cpu_to_be64(iova);
|
||||
mpt_entry->length = cpu_to_be64(total_size);
|
||||
|
||||
memset(&mpt_entry->lkey, 0,
|
||||
sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
|
||||
memset_startat(mpt_entry, 0, lkey);
|
||||
|
||||
if (mr->mtt)
|
||||
mpt_entry->mtt_seg =
|
||||
|
|
|
@ -50,14 +50,6 @@
|
|||
#include <rdma/mthca-abi.h>
|
||||
#include "mthca_memfree.h"
|
||||
|
||||
static void init_query_mad(struct ib_smp *mad)
|
||||
{
|
||||
mad->base_version = 1;
|
||||
mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
|
||||
mad->class_version = 1;
|
||||
mad->method = IB_MGMT_METHOD_GET;
|
||||
}
|
||||
|
||||
static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
|
||||
struct ib_udata *uhw)
|
||||
{
|
||||
|
@ -78,7 +70,7 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
|
|||
|
||||
props->fw_ver = mdev->fw_ver;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
|
||||
|
||||
err = mthca_MAD_IFC(mdev, 1, 1,
|
||||
|
@ -140,7 +132,7 @@ static int mthca_query_port(struct ib_device *ibdev,
|
|||
|
||||
/* props being zeroed by the caller, avoid zeroing it here */
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
|
||||
in_mad->attr_mod = cpu_to_be32(port);
|
||||
|
||||
|
@ -234,7 +226,7 @@ static int mthca_query_pkey(struct ib_device *ibdev,
|
|||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
|
||||
in_mad->attr_mod = cpu_to_be32(index / 32);
|
||||
|
||||
|
@ -263,7 +255,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u32 port,
|
|||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
|
||||
in_mad->attr_mod = cpu_to_be32(port);
|
||||
|
||||
|
@ -274,7 +266,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u32 port,
|
|||
|
||||
memcpy(gid->raw, out_mad->data + 8, 8);
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
|
||||
in_mad->attr_mod = cpu_to_be32(index / 8);
|
||||
|
||||
|
@ -1006,7 +998,7 @@ static int mthca_init_node_data(struct mthca_dev *dev)
|
|||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
ib_init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
|
||||
|
||||
err = mthca_MAD_IFC(dev, 1, 1,
|
||||
|
|
|
@ -1506,7 +1506,6 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
|
|||
static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
|
||||
{
|
||||
int status = -ENOMEM;
|
||||
size_t pd_bitmap_size;
|
||||
struct ocrdma_alloc_pd_range *cmd;
|
||||
struct ocrdma_alloc_pd_range_rsp *rsp;
|
||||
|
||||
|
@ -1528,10 +1527,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
|
|||
dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
|
||||
OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
|
||||
dev->pd_mgr->max_dpp_pd = rsp->pd_count;
|
||||
pd_bitmap_size =
|
||||
BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
|
||||
dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
|
||||
GFP_KERNEL);
|
||||
dev->pd_mgr->pd_dpp_bitmap = bitmap_zalloc(rsp->pd_count,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
kfree(cmd);
|
||||
}
|
||||
|
@ -1547,9 +1544,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
|
|||
dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
|
||||
OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
|
||||
dev->pd_mgr->max_normal_pd = rsp->pd_count;
|
||||
pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
|
||||
dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
|
||||
GFP_KERNEL);
|
||||
dev->pd_mgr->pd_norm_bitmap = bitmap_zalloc(rsp->pd_count,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
kfree(cmd);
|
||||
|
||||
|
@ -1611,8 +1607,8 @@ void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
|
|||
static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
|
||||
{
|
||||
ocrdma_mbx_dealloc_pd_range(dev);
|
||||
kfree(dev->pd_mgr->pd_norm_bitmap);
|
||||
kfree(dev->pd_mgr->pd_dpp_bitmap);
|
||||
bitmap_free(dev->pd_mgr->pd_norm_bitmap);
|
||||
bitmap_free(dev->pd_mgr->pd_dpp_bitmap);
|
||||
kfree(dev->pd_mgr);
|
||||
}
|
||||
|
||||
|
|
|
@ -62,20 +62,6 @@ MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
|
|||
MODULE_AUTHOR("Emulex Corporation");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
||||
void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
|
||||
{
|
||||
u8 mac_addr[6];
|
||||
|
||||
memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
|
||||
guid[0] = mac_addr[0] ^ 2;
|
||||
guid[1] = mac_addr[1];
|
||||
guid[2] = mac_addr[2];
|
||||
guid[3] = 0xff;
|
||||
guid[4] = 0xfe;
|
||||
guid[5] = mac_addr[3];
|
||||
guid[6] = mac_addr[4];
|
||||
guid[7] = mac_addr[5];
|
||||
}
|
||||
static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
|
||||
u32 port_num)
|
||||
{
|
||||
|
@ -203,7 +189,8 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
|
||||
addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid,
|
||||
dev->nic_info.mac_addr);
|
||||
BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
|
||||
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
|
||||
sizeof(OCRDMA_NODE_DESC));
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <net/addrconf.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include <rdma/iw_cm.h>
|
||||
|
@ -74,7 +75,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
|
|||
memset(attr, 0, sizeof *attr);
|
||||
memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
|
||||
min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
|
||||
ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
|
||||
addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
|
||||
dev->nic_info.mac_addr);
|
||||
attr->max_mr_size = dev->attr.max_mr_size;
|
||||
attr->page_size_cap = 0xffff000;
|
||||
attr->vendor_id = dev->nic_info.pdev->vendor;
|
||||
|
@ -245,13 +247,13 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
|
|||
static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
|
||||
{
|
||||
u16 pd_bitmap_idx = 0;
|
||||
const unsigned long *pd_bitmap;
|
||||
unsigned long *pd_bitmap;
|
||||
|
||||
if (dpp_pool) {
|
||||
pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
|
||||
pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
|
||||
dev->pd_mgr->max_dpp_pd);
|
||||
__set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
|
||||
__set_bit(pd_bitmap_idx, pd_bitmap);
|
||||
dev->pd_mgr->pd_dpp_count++;
|
||||
if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
|
||||
dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
|
||||
|
@ -259,7 +261,7 @@ static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
|
|||
pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
|
||||
pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
|
||||
dev->pd_mgr->max_normal_pd);
|
||||
__set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
|
||||
__set_bit(pd_bitmap_idx, pd_bitmap);
|
||||
dev->pd_mgr->pd_norm_count++;
|
||||
if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
|
||||
dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
|
||||
|
@ -1844,12 +1846,10 @@ int ocrdma_modify_srq(struct ib_srq *ibsrq,
|
|||
|
||||
int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
|
||||
{
|
||||
int status;
|
||||
struct ocrdma_srq *srq;
|
||||
|
||||
srq = get_ocrdma_srq(ibsrq);
|
||||
status = ocrdma_mbx_query_srq(srq, srq_attr);
|
||||
return status;
|
||||
return ocrdma_mbx_query_srq(srq, srq_attr);
|
||||
}
|
||||
|
||||
int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
|
||||
|
@ -1960,7 +1960,6 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
|
|||
static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
|
||||
const struct ib_send_wr *wr)
|
||||
{
|
||||
int status;
|
||||
struct ocrdma_sge *sge;
|
||||
u32 wqe_size = sizeof(*hdr);
|
||||
|
||||
|
@ -1972,8 +1971,7 @@ static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
|
|||
sge = (struct ocrdma_sge *)(hdr + 1);
|
||||
}
|
||||
|
||||
status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
|
||||
return status;
|
||||
return ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
|
||||
}
|
||||
|
||||
static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
|
||||
|
|
|
@ -59,7 +59,6 @@ int ocrdma_query_port(struct ib_device *ibdev, u32 port,
|
|||
enum rdma_protocol_type
|
||||
ocrdma_query_protocol(struct ib_device *device, u32 port_num);
|
||||
|
||||
void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);
|
||||
int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
|
||||
|
||||
int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
|
||||
|
|
|
@ -1931,6 +1931,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
|
|||
/* db offset was calculated in copy_qp_uresp, now set in the user q */
|
||||
if (qedr_qp_has_sq(qp)) {
|
||||
qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
|
||||
qp->sq.max_wr = attrs->cap.max_send_wr;
|
||||
rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
|
||||
&qp->usq.db_rec_data->db_data,
|
||||
DB_REC_WIDTH_32B,
|
||||
|
@ -1941,6 +1942,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
|
|||
|
||||
if (qedr_qp_has_rq(qp)) {
|
||||
qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
|
||||
qp->rq.max_wr = attrs->cap.max_recv_wr;
|
||||
rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
|
||||
&qp->urq.db_rec_data->db_data,
|
||||
DB_REC_WIDTH_32B,
|
||||
|
|
|
@ -3030,7 +3030,7 @@ static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
|
|||
|
||||
/* Does read/modify/write to appropriate registers to
|
||||
* set output and direction bits selected by mask.
|
||||
* these are in their canonical postions (e.g. lsb of
|
||||
* these are in their canonical positions (e.g. lsb of
|
||||
* dir will end up in D48 of extctrl on existing chips).
|
||||
* returns contents of GP Inputs.
|
||||
*/
|
||||
|
|
|
@ -3742,7 +3742,7 @@ static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
|
|||
/*
|
||||
* Does read/modify/write to appropriate registers to
|
||||
* set output and direction bits selected by mask.
|
||||
* these are in their canonical postions (e.g. lsb of
|
||||
* these are in their canonical positions (e.g. lsb of
|
||||
* dir will end up in D48 of extctrl on existing chips).
|
||||
* returns contents of GP Inputs.
|
||||
*/
|
||||
|
|
|
@ -5665,7 +5665,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
|
|||
/*
|
||||
* Does read/modify/write to appropriate registers to
|
||||
* set output and direction bits selected by mask.
|
||||
* these are in their canonical postions (e.g. lsb of
|
||||
* these are in their canonical positions (e.g. lsb of
|
||||
* dir will end up in D48 of extctrl on existing chips).
|
||||
* returns contents of GP Inputs.
|
||||
*/
|
||||
|
|
|
@ -243,10 +243,11 @@ static struct attribute *usnic_ib_qpn_default_attrs[] = {
|
|||
&qpn_attr_summary.attr,
|
||||
NULL
|
||||
};
|
||||
ATTRIBUTE_GROUPS(usnic_ib_qpn_default);
|
||||
|
||||
static struct kobj_type usnic_ib_qpn_type = {
|
||||
.sysfs_ops = &usnic_ib_qpn_sysfs_ops,
|
||||
.default_attrs = usnic_ib_qpn_default_attrs
|
||||
.default_groups = usnic_ib_qpn_default_groups,
|
||||
};
|
||||
|
||||
int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
|
||||
|
|
|
@ -442,12 +442,10 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
|
|||
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
{
|
||||
struct usnic_ib_pd *pd = to_upd(ibpd);
|
||||
void *umem_pd;
|
||||
|
||||
umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
|
||||
if (IS_ERR_OR_NULL(umem_pd)) {
|
||||
return umem_pd ? PTR_ERR(umem_pd) : -ENOMEM;
|
||||
}
|
||||
pd->umem_pd = usnic_uiom_alloc_pd();
|
||||
if (IS_ERR(pd->umem_pd))
|
||||
return PTR_ERR(pd->umem_pd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -63,12 +63,12 @@ int pvrdma_uar_table_init(struct pvrdma_dev *dev)
|
|||
tbl->max = num;
|
||||
tbl->mask = mask;
|
||||
spin_lock_init(&tbl->lock);
|
||||
tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL);
|
||||
tbl->table = bitmap_zalloc(num, GFP_KERNEL);
|
||||
if (!tbl->table)
|
||||
return -ENOMEM;
|
||||
|
||||
/* 0th UAR is taken by the device. */
|
||||
set_bit(0, tbl->table);
|
||||
__set_bit(0, tbl->table);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev)
|
|||
{
|
||||
struct pvrdma_id_table *tbl = &dev->uar_table.tbl;
|
||||
|
||||
kfree(tbl->table);
|
||||
bitmap_free(tbl->table);
|
||||
}
|
||||
|
||||
int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
|
||||
|
@ -100,7 +100,7 @@ int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
set_bit(obj, tbl->table);
|
||||
__set_bit(obj, tbl->table);
|
||||
obj |= tbl->top;
|
||||
|
||||
spin_unlock_irqrestore(&tbl->lock, flags);
|
||||
|
@ -120,7 +120,7 @@ void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
|
|||
|
||||
obj = uar->index & (tbl->max - 1);
|
||||
spin_lock_irqsave(&tbl->lock, flags);
|
||||
clear_bit(obj, tbl->table);
|
||||
__clear_bit(obj, tbl->table);
|
||||
tbl->last = min(tbl->last, obj);
|
||||
tbl->top = (tbl->top + tbl->max) & tbl->mask;
|
||||
spin_unlock_irqrestore(&tbl->lock, flags);
|
||||
|
|
|
@ -22,5 +22,4 @@ rdma_rxe-y := \
|
|||
rxe_mcast.o \
|
||||
rxe_task.o \
|
||||
rxe_net.o \
|
||||
rxe_sysfs.o \
|
||||
rxe_hw_counters.o
|
||||
|
|
|
@ -13,8 +13,6 @@ MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
|
|||
MODULE_DESCRIPTION("Soft RDMA transport");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
||||
bool rxe_initialized;
|
||||
|
||||
/* free resources for a rxe device all objects created for this device must
|
||||
* have been destroyed
|
||||
*/
|
||||
|
@ -290,7 +288,6 @@ static int __init rxe_module_init(void)
|
|||
return err;
|
||||
|
||||
rdma_link_register(&rxe_link_ops);
|
||||
rxe_initialized = true;
|
||||
pr_info("loaded\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -301,7 +298,6 @@ static void __exit rxe_module_exit(void)
|
|||
ib_unregister_driver(RDMA_DRIVER_RXE);
|
||||
rxe_net_exit();
|
||||
|
||||
rxe_initialized = false;
|
||||
pr_info("unloaded\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -39,8 +39,6 @@
|
|||
|
||||
#define RXE_ROCE_V2_SPORT (0xc000)
|
||||
|
||||
extern bool rxe_initialized;
|
||||
|
||||
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
|
||||
|
||||
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
|
||||
|
|
|
@ -458,8 +458,6 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
|
|||
struct rxe_pkt_info *pkt,
|
||||
struct rxe_send_wqe *wqe)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (wqe->has_rd_atomic) {
|
||||
wqe->has_rd_atomic = 0;
|
||||
atomic_inc(&qp->req.rd_atomic);
|
||||
|
@ -472,11 +470,11 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
|
|||
|
||||
if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
|
||||
/* state_lock used by requester & completer */
|
||||
spin_lock_irqsave(&qp->state_lock, flags);
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
if ((qp->req.state == QP_STATE_DRAIN) &&
|
||||
(qp->comp.psn == qp->req.psn)) {
|
||||
qp->req.state = QP_STATE_DRAINED;
|
||||
spin_unlock_irqrestore(&qp->state_lock, flags);
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
|
||||
if (qp->ibqp.event_handler) {
|
||||
struct ib_event ev;
|
||||
|
@ -488,7 +486,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
|
|||
qp->ibqp.qp_context);
|
||||
}
|
||||
} else {
|
||||
spin_unlock_irqrestore(&qp->state_lock, flags);
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,14 +42,13 @@ err1:
|
|||
static void rxe_send_complete(struct tasklet_struct *t)
|
||||
{
|
||||
struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cq->cq_lock, flags);
|
||||
spin_lock_bh(&cq->cq_lock);
|
||||
if (cq->is_dying) {
|
||||
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
||||
spin_unlock_bh(&cq->cq_lock);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
||||
spin_unlock_bh(&cq->cq_lock);
|
||||
|
||||
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
|
||||
}
|
||||
|
@ -106,15 +105,14 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
|
|||
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
|
||||
{
|
||||
struct ib_event ev;
|
||||
unsigned long flags;
|
||||
int full;
|
||||
void *addr;
|
||||
|
||||
spin_lock_irqsave(&cq->cq_lock, flags);
|
||||
spin_lock_bh(&cq->cq_lock);
|
||||
|
||||
full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
|
||||
if (unlikely(full)) {
|
||||
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
||||
spin_unlock_bh(&cq->cq_lock);
|
||||
if (cq->ibcq.event_handler) {
|
||||
ev.device = cq->ibcq.device;
|
||||
ev.element.cq = &cq->ibcq;
|
||||
|
@ -130,7 +128,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
|
|||
|
||||
queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
|
||||
|
||||
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
||||
spin_unlock_bh(&cq->cq_lock);
|
||||
|
||||
if ((cq->notify == IB_CQ_NEXT_COMP) ||
|
||||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
|
||||
|
@ -143,16 +141,14 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
|
|||
|
||||
void rxe_cq_disable(struct rxe_cq *cq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cq->cq_lock, flags);
|
||||
spin_lock_bh(&cq->cq_lock);
|
||||
cq->is_dying = true;
|
||||
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
||||
spin_unlock_bh(&cq->cq_lock);
|
||||
}
|
||||
|
||||
void rxe_cq_cleanup(struct rxe_pool_entry *arg)
|
||||
void rxe_cq_cleanup(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
|
||||
struct rxe_cq *cq = container_of(elem, typeof(*cq), elem);
|
||||
|
||||
if (cq->queue)
|
||||
rxe_queue_cleanup(cq->queue);
|
||||
|
|
|
@ -37,7 +37,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
|
|||
|
||||
void rxe_cq_disable(struct rxe_cq *cq);
|
||||
|
||||
void rxe_cq_cleanup(struct rxe_pool_entry *arg);
|
||||
void rxe_cq_cleanup(struct rxe_pool_elem *arg);
|
||||
|
||||
/* rxe_mcast.c */
|
||||
int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
|
||||
|
@ -51,7 +51,7 @@ int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
|
|||
|
||||
void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
|
||||
|
||||
void rxe_mc_cleanup(struct rxe_pool_entry *arg);
|
||||
void rxe_mc_cleanup(struct rxe_pool_elem *arg);
|
||||
|
||||
/* rxe_mmap.c */
|
||||
struct rxe_mmap_info {
|
||||
|
@ -89,7 +89,7 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey);
|
|||
int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
|
||||
int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr);
|
||||
int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
|
||||
void rxe_mr_cleanup(struct rxe_pool_entry *arg);
|
||||
void rxe_mr_cleanup(struct rxe_pool_elem *arg);
|
||||
|
||||
/* rxe_mw.c */
|
||||
int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
|
||||
|
@ -97,7 +97,7 @@ int rxe_dealloc_mw(struct ib_mw *ibmw);
|
|||
int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
|
||||
int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey);
|
||||
struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey);
|
||||
void rxe_mw_cleanup(struct rxe_pool_entry *arg);
|
||||
void rxe_mw_cleanup(struct rxe_pool_elem *arg);
|
||||
|
||||
/* rxe_net.c */
|
||||
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
|
||||
|
@ -131,7 +131,7 @@ void rxe_qp_error(struct rxe_qp *qp);
|
|||
|
||||
void rxe_qp_destroy(struct rxe_qp *qp);
|
||||
|
||||
void rxe_qp_cleanup(struct rxe_pool_entry *arg);
|
||||
void rxe_qp_cleanup(struct rxe_pool_elem *elem);
|
||||
|
||||
static inline int qp_num(struct rxe_qp *qp)
|
||||
{
|
||||
|
|
|
@ -40,12 +40,11 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
|
|||
int err;
|
||||
struct rxe_mc_grp *grp;
|
||||
struct rxe_pool *pool = &rxe->mc_grp_pool;
|
||||
unsigned long flags;
|
||||
|
||||
if (rxe->attr.max_mcast_qp_attach == 0)
|
||||
return -EINVAL;
|
||||
|
||||
write_lock_irqsave(&pool->pool_lock, flags);
|
||||
write_lock_bh(&pool->pool_lock);
|
||||
|
||||
grp = rxe_pool_get_key_locked(pool, mgid);
|
||||
if (grp)
|
||||
|
@ -53,13 +52,13 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
|
|||
|
||||
grp = create_grp(rxe, pool, mgid);
|
||||
if (IS_ERR(grp)) {
|
||||
write_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
write_unlock_bh(&pool->pool_lock);
|
||||
err = PTR_ERR(grp);
|
||||
return err;
|
||||
}
|
||||
|
||||
done:
|
||||
write_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
write_unlock_bh(&pool->pool_lock);
|
||||
*grp_p = grp;
|
||||
return 0;
|
||||
}
|
||||
|
@ -169,9 +168,9 @@ void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
|
|||
}
|
||||
}
|
||||
|
||||
void rxe_mc_cleanup(struct rxe_pool_entry *arg)
|
||||
void rxe_mc_cleanup(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_mc_grp *grp = container_of(arg, typeof(*grp), pelem);
|
||||
struct rxe_mc_grp *grp = container_of(elem, typeof(*grp), elem);
|
||||
struct rxe_dev *rxe = grp->rxe;
|
||||
|
||||
rxe_drop_key(grp);
|
||||
|
|
|
@ -50,7 +50,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
|
|||
|
||||
static void rxe_mr_init(int access, struct rxe_mr *mr)
|
||||
{
|
||||
u32 lkey = mr->pelem.index << 8 | rxe_get_next_key(-1);
|
||||
u32 lkey = mr->elem.index << 8 | rxe_get_next_key(-1);
|
||||
u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
|
||||
|
||||
/* set ibmr->l/rkey and also copy into private l/rkey
|
||||
|
@ -697,9 +697,9 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void rxe_mr_cleanup(struct rxe_pool_entry *arg)
|
||||
void rxe_mr_cleanup(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
|
||||
struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
|
||||
|
||||
ib_umem_release(mr->umem);
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
|
|||
}
|
||||
|
||||
rxe_add_index(mw);
|
||||
mw->rkey = ibmw->rkey = (mw->pelem.index << 8) | rxe_get_next_key(-1);
|
||||
mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1);
|
||||
mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ?
|
||||
RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
|
||||
spin_lock_init(&mw->lock);
|
||||
|
@ -56,11 +56,10 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
|
|||
{
|
||||
struct rxe_mw *mw = to_rmw(ibmw);
|
||||
struct rxe_pd *pd = to_rpd(ibmw->pd);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&mw->lock, flags);
|
||||
spin_lock_bh(&mw->lock);
|
||||
rxe_do_dealloc_mw(mw);
|
||||
spin_unlock_irqrestore(&mw->lock, flags);
|
||||
spin_unlock_bh(&mw->lock);
|
||||
|
||||
rxe_drop_ref(mw);
|
||||
rxe_drop_ref(pd);
|
||||
|
@ -197,7 +196,6 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
|||
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
|
||||
u32 mw_rkey = wqe->wr.wr.mw.mw_rkey;
|
||||
u32 mr_lkey = wqe->wr.wr.mw.mr_lkey;
|
||||
unsigned long flags;
|
||||
|
||||
mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8);
|
||||
if (unlikely(!mw)) {
|
||||
|
@ -225,7 +223,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
|||
mr = NULL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&mw->lock, flags);
|
||||
spin_lock_bh(&mw->lock);
|
||||
|
||||
ret = rxe_check_bind_mw(qp, wqe, mw, mr);
|
||||
if (ret)
|
||||
|
@ -233,7 +231,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
|||
|
||||
rxe_do_bind_mw(qp, wqe, mw, mr);
|
||||
err_unlock:
|
||||
spin_unlock_irqrestore(&mw->lock, flags);
|
||||
spin_unlock_bh(&mw->lock);
|
||||
err_drop_mr:
|
||||
if (mr)
|
||||
rxe_drop_ref(mr);
|
||||
|
@ -280,7 +278,6 @@ static void rxe_do_invalidate_mw(struct rxe_mw *mw)
|
|||
int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
|
||||
{
|
||||
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
|
||||
unsigned long flags;
|
||||
struct rxe_mw *mw;
|
||||
int ret;
|
||||
|
||||
|
@ -295,7 +292,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
|
|||
goto err_drop_ref;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&mw->lock, flags);
|
||||
spin_lock_bh(&mw->lock);
|
||||
|
||||
ret = rxe_check_invalidate_mw(qp, mw);
|
||||
if (ret)
|
||||
|
@ -303,7 +300,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
|
|||
|
||||
rxe_do_invalidate_mw(mw);
|
||||
err_unlock:
|
||||
spin_unlock_irqrestore(&mw->lock, flags);
|
||||
spin_unlock_bh(&mw->lock);
|
||||
err_drop_ref:
|
||||
rxe_drop_ref(mw);
|
||||
err:
|
||||
|
@ -333,9 +330,9 @@ struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey)
|
|||
return mw;
|
||||
}
|
||||
|
||||
void rxe_mw_cleanup(struct rxe_pool_entry *elem)
|
||||
void rxe_mw_cleanup(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_mw *mw = container_of(elem, typeof(*mw), pelem);
|
||||
struct rxe_mw *mw = container_of(elem, typeof(*mw), elem);
|
||||
|
||||
rxe_drop_index(mw);
|
||||
}
|
||||
|
|
|
@ -22,24 +22,20 @@ static struct rxe_recv_sockets recv_sockets;
|
|||
|
||||
int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
|
||||
{
|
||||
int err;
|
||||
unsigned char ll_addr[ETH_ALEN];
|
||||
|
||||
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
|
||||
err = dev_mc_add(rxe->ndev, ll_addr);
|
||||
|
||||
return err;
|
||||
return dev_mc_add(rxe->ndev, ll_addr);
|
||||
}
|
||||
|
||||
int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
|
||||
{
|
||||
int err;
|
||||
unsigned char ll_addr[ETH_ALEN];
|
||||
|
||||
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
|
||||
err = dev_mc_del(rxe->ndev, ll_addr);
|
||||
|
||||
return err;
|
||||
return dev_mc_del(rxe->ndev, ll_addr);
|
||||
}
|
||||
|
||||
static struct dst_entry *rxe_find_route4(struct net_device *ndev,
|
||||
|
@ -444,7 +440,6 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
|
|||
else
|
||||
err = rxe_send(skb, pkt);
|
||||
if (err) {
|
||||
rxe->xmit_errors++;
|
||||
rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
|
||||
return err;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -5,13 +5,14 @@
|
|||
*/
|
||||
|
||||
#include "rxe.h"
|
||||
#include "rxe_loc.h"
|
||||
|
||||
#define RXE_POOL_ALIGN (16)
|
||||
|
||||
static const struct rxe_type_info {
|
||||
const char *name;
|
||||
size_t size;
|
||||
size_t elem_offset;
|
||||
void (*cleanup)(struct rxe_pool_entry *obj);
|
||||
void (*cleanup)(struct rxe_pool_elem *obj);
|
||||
enum rxe_pool_flags flags;
|
||||
u32 min_index;
|
||||
u32 max_index;
|
||||
|
@ -21,19 +22,19 @@ static const struct rxe_type_info {
|
|||
[RXE_TYPE_UC] = {
|
||||
.name = "rxe-uc",
|
||||
.size = sizeof(struct rxe_ucontext),
|
||||
.elem_offset = offsetof(struct rxe_ucontext, pelem),
|
||||
.elem_offset = offsetof(struct rxe_ucontext, elem),
|
||||
.flags = RXE_POOL_NO_ALLOC,
|
||||
},
|
||||
[RXE_TYPE_PD] = {
|
||||
.name = "rxe-pd",
|
||||
.size = sizeof(struct rxe_pd),
|
||||
.elem_offset = offsetof(struct rxe_pd, pelem),
|
||||
.elem_offset = offsetof(struct rxe_pd, elem),
|
||||
.flags = RXE_POOL_NO_ALLOC,
|
||||
},
|
||||
[RXE_TYPE_AH] = {
|
||||
.name = "rxe-ah",
|
||||
.size = sizeof(struct rxe_ah),
|
||||
.elem_offset = offsetof(struct rxe_ah, pelem),
|
||||
.elem_offset = offsetof(struct rxe_ah, elem),
|
||||
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
|
||||
.min_index = RXE_MIN_AH_INDEX,
|
||||
.max_index = RXE_MAX_AH_INDEX,
|
||||
|
@ -41,7 +42,7 @@ static const struct rxe_type_info {
|
|||
[RXE_TYPE_SRQ] = {
|
||||
.name = "rxe-srq",
|
||||
.size = sizeof(struct rxe_srq),
|
||||
.elem_offset = offsetof(struct rxe_srq, pelem),
|
||||
.elem_offset = offsetof(struct rxe_srq, elem),
|
||||
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
|
||||
.min_index = RXE_MIN_SRQ_INDEX,
|
||||
.max_index = RXE_MAX_SRQ_INDEX,
|
||||
|
@ -49,7 +50,7 @@ static const struct rxe_type_info {
|
|||
[RXE_TYPE_QP] = {
|
||||
.name = "rxe-qp",
|
||||
.size = sizeof(struct rxe_qp),
|
||||
.elem_offset = offsetof(struct rxe_qp, pelem),
|
||||
.elem_offset = offsetof(struct rxe_qp, elem),
|
||||
.cleanup = rxe_qp_cleanup,
|
||||
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
|
||||
.min_index = RXE_MIN_QP_INDEX,
|
||||
|
@ -58,14 +59,14 @@ static const struct rxe_type_info {
|
|||
[RXE_TYPE_CQ] = {
|
||||
.name = "rxe-cq",
|
||||
.size = sizeof(struct rxe_cq),
|
||||
.elem_offset = offsetof(struct rxe_cq, pelem),
|
||||
.elem_offset = offsetof(struct rxe_cq, elem),
|
||||
.flags = RXE_POOL_NO_ALLOC,
|
||||
.cleanup = rxe_cq_cleanup,
|
||||
},
|
||||
[RXE_TYPE_MR] = {
|
||||
.name = "rxe-mr",
|
||||
.size = sizeof(struct rxe_mr),
|
||||
.elem_offset = offsetof(struct rxe_mr, pelem),
|
||||
.elem_offset = offsetof(struct rxe_mr, elem),
|
||||
.cleanup = rxe_mr_cleanup,
|
||||
.flags = RXE_POOL_INDEX,
|
||||
.min_index = RXE_MIN_MR_INDEX,
|
||||
|
@ -74,7 +75,7 @@ static const struct rxe_type_info {
|
|||
[RXE_TYPE_MW] = {
|
||||
.name = "rxe-mw",
|
||||
.size = sizeof(struct rxe_mw),
|
||||
.elem_offset = offsetof(struct rxe_mw, pelem),
|
||||
.elem_offset = offsetof(struct rxe_mw, elem),
|
||||
.cleanup = rxe_mw_cleanup,
|
||||
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
|
||||
.min_index = RXE_MIN_MW_INDEX,
|
||||
|
@ -83,7 +84,7 @@ static const struct rxe_type_info {
|
|||
[RXE_TYPE_MC_GRP] = {
|
||||
.name = "rxe-mc_grp",
|
||||
.size = sizeof(struct rxe_mc_grp),
|
||||
.elem_offset = offsetof(struct rxe_mc_grp, pelem),
|
||||
.elem_offset = offsetof(struct rxe_mc_grp, elem),
|
||||
.cleanup = rxe_mc_cleanup,
|
||||
.flags = RXE_POOL_KEY,
|
||||
.key_offset = offsetof(struct rxe_mc_grp, mgid),
|
||||
|
@ -92,15 +93,10 @@ static const struct rxe_type_info {
|
|||
[RXE_TYPE_MC_ELEM] = {
|
||||
.name = "rxe-mc_elem",
|
||||
.size = sizeof(struct rxe_mc_elem),
|
||||
.elem_offset = offsetof(struct rxe_mc_elem, pelem),
|
||||
.elem_offset = offsetof(struct rxe_mc_elem, elem),
|
||||
},
|
||||
};
|
||||
|
||||
static inline const char *pool_name(struct rxe_pool *pool)
|
||||
{
|
||||
return rxe_type_info[pool->type].name;
|
||||
}
|
||||
|
||||
static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
|
||||
{
|
||||
int err = 0;
|
||||
|
@ -130,35 +126,36 @@ int rxe_pool_init(
|
|||
enum rxe_elem_type type,
|
||||
unsigned int max_elem)
|
||||
{
|
||||
const struct rxe_type_info *info = &rxe_type_info[type];
|
||||
int err = 0;
|
||||
size_t size = rxe_type_info[type].size;
|
||||
|
||||
memset(pool, 0, sizeof(*pool));
|
||||
|
||||
pool->rxe = rxe;
|
||||
pool->name = info->name;
|
||||
pool->type = type;
|
||||
pool->max_elem = max_elem;
|
||||
pool->elem_size = ALIGN(size, RXE_POOL_ALIGN);
|
||||
pool->flags = rxe_type_info[type].flags;
|
||||
pool->index.tree = RB_ROOT;
|
||||
pool->key.tree = RB_ROOT;
|
||||
pool->cleanup = rxe_type_info[type].cleanup;
|
||||
pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN);
|
||||
pool->elem_offset = info->elem_offset;
|
||||
pool->flags = info->flags;
|
||||
pool->cleanup = info->cleanup;
|
||||
|
||||
atomic_set(&pool->num_elem, 0);
|
||||
|
||||
rwlock_init(&pool->pool_lock);
|
||||
|
||||
if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
|
||||
err = rxe_pool_init_index(pool,
|
||||
rxe_type_info[type].max_index,
|
||||
rxe_type_info[type].min_index);
|
||||
if (pool->flags & RXE_POOL_INDEX) {
|
||||
pool->index.tree = RB_ROOT;
|
||||
err = rxe_pool_init_index(pool, info->max_index,
|
||||
info->min_index);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rxe_type_info[type].flags & RXE_POOL_KEY) {
|
||||
pool->key.key_offset = rxe_type_info[type].key_offset;
|
||||
pool->key.key_size = rxe_type_info[type].key_size;
|
||||
if (pool->flags & RXE_POOL_KEY) {
|
||||
pool->key.tree = RB_ROOT;
|
||||
pool->key.key_offset = info->key_offset;
|
||||
pool->key.key_size = info->key_size;
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -169,9 +166,10 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
|
|||
{
|
||||
if (atomic_read(&pool->num_elem) > 0)
|
||||
pr_warn("%s pool destroyed with unfree'd elem\n",
|
||||
pool_name(pool));
|
||||
pool->name);
|
||||
|
||||
bitmap_free(pool->index.table);
|
||||
if (pool->flags & RXE_POOL_INDEX)
|
||||
bitmap_free(pool->index.table);
|
||||
}
|
||||
|
||||
static u32 alloc_index(struct rxe_pool *pool)
|
||||
|
@ -189,15 +187,15 @@ static u32 alloc_index(struct rxe_pool *pool)
|
|||
return index + pool->index.min_index;
|
||||
}
|
||||
|
||||
static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
|
||||
static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_elem *new)
|
||||
{
|
||||
struct rb_node **link = &pool->index.tree.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct rxe_pool_entry *elem;
|
||||
struct rxe_pool_elem *elem;
|
||||
|
||||
while (*link) {
|
||||
parent = *link;
|
||||
elem = rb_entry(parent, struct rxe_pool_entry, index_node);
|
||||
elem = rb_entry(parent, struct rxe_pool_elem, index_node);
|
||||
|
||||
if (elem->index == new->index) {
|
||||
pr_warn("element already exists!\n");
|
||||
|
@ -216,19 +214,20 @@ static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
|
||||
static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_elem *new)
|
||||
{
|
||||
struct rb_node **link = &pool->key.tree.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct rxe_pool_entry *elem;
|
||||
struct rxe_pool_elem *elem;
|
||||
int cmp;
|
||||
|
||||
while (*link) {
|
||||
parent = *link;
|
||||
elem = rb_entry(parent, struct rxe_pool_entry, key_node);
|
||||
elem = rb_entry(parent, struct rxe_pool_elem, key_node);
|
||||
|
||||
cmp = memcmp((u8 *)elem + pool->key.key_offset,
|
||||
(u8 *)new + pool->key.key_offset, pool->key.key_size);
|
||||
(u8 *)new + pool->key.key_offset,
|
||||
pool->key.key_size);
|
||||
|
||||
if (cmp == 0) {
|
||||
pr_warn("key already exists!\n");
|
||||
|
@ -247,7 +246,7 @@ static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
|
||||
int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key)
|
||||
{
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
int err;
|
||||
|
@ -258,37 +257,35 @@ int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
|
|||
return err;
|
||||
}
|
||||
|
||||
int __rxe_add_key(struct rxe_pool_entry *elem, void *key)
|
||||
int __rxe_add_key(struct rxe_pool_elem *elem, void *key)
|
||||
{
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
write_lock_irqsave(&pool->pool_lock, flags);
|
||||
write_lock_bh(&pool->pool_lock);
|
||||
err = __rxe_add_key_locked(elem, key);
|
||||
write_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
write_unlock_bh(&pool->pool_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void __rxe_drop_key_locked(struct rxe_pool_entry *elem)
|
||||
void __rxe_drop_key_locked(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
|
||||
rb_erase(&elem->key_node, &pool->key.tree);
|
||||
}
|
||||
|
||||
void __rxe_drop_key(struct rxe_pool_entry *elem)
|
||||
void __rxe_drop_key(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&pool->pool_lock, flags);
|
||||
write_lock_bh(&pool->pool_lock);
|
||||
__rxe_drop_key_locked(elem);
|
||||
write_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
write_unlock_bh(&pool->pool_lock);
|
||||
}
|
||||
|
||||
int __rxe_add_index_locked(struct rxe_pool_entry *elem)
|
||||
int __rxe_add_index_locked(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
int err;
|
||||
|
@ -299,20 +296,19 @@ int __rxe_add_index_locked(struct rxe_pool_entry *elem)
|
|||
return err;
|
||||
}
|
||||
|
||||
int __rxe_add_index(struct rxe_pool_entry *elem)
|
||||
int __rxe_add_index(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
write_lock_irqsave(&pool->pool_lock, flags);
|
||||
write_lock_bh(&pool->pool_lock);
|
||||
err = __rxe_add_index_locked(elem);
|
||||
write_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
write_unlock_bh(&pool->pool_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void __rxe_drop_index_locked(struct rxe_pool_entry *elem)
|
||||
void __rxe_drop_index_locked(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
|
||||
|
@ -320,32 +316,31 @@ void __rxe_drop_index_locked(struct rxe_pool_entry *elem)
|
|||
rb_erase(&elem->index_node, &pool->index.tree);
|
||||
}
|
||||
|
||||
void __rxe_drop_index(struct rxe_pool_entry *elem)
|
||||
void __rxe_drop_index(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&pool->pool_lock, flags);
|
||||
write_lock_bh(&pool->pool_lock);
|
||||
__rxe_drop_index_locked(elem);
|
||||
write_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
write_unlock_bh(&pool->pool_lock);
|
||||
}
|
||||
|
||||
void *rxe_alloc_locked(struct rxe_pool *pool)
|
||||
{
|
||||
const struct rxe_type_info *info = &rxe_type_info[pool->type];
|
||||
struct rxe_pool_entry *elem;
|
||||
u8 *obj;
|
||||
struct rxe_pool_elem *elem;
|
||||
void *obj;
|
||||
|
||||
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
|
||||
goto out_cnt;
|
||||
|
||||
obj = kzalloc(info->size, GFP_ATOMIC);
|
||||
obj = kzalloc(pool->elem_size, GFP_ATOMIC);
|
||||
if (!obj)
|
||||
goto out_cnt;
|
||||
|
||||
elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
|
||||
elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
|
||||
|
||||
elem->pool = pool;
|
||||
elem->obj = obj;
|
||||
kref_init(&elem->ref_cnt);
|
||||
|
||||
return obj;
|
||||
|
@ -357,20 +352,20 @@ out_cnt:
|
|||
|
||||
void *rxe_alloc(struct rxe_pool *pool)
|
||||
{
|
||||
const struct rxe_type_info *info = &rxe_type_info[pool->type];
|
||||
struct rxe_pool_entry *elem;
|
||||
u8 *obj;
|
||||
struct rxe_pool_elem *elem;
|
||||
void *obj;
|
||||
|
||||
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
|
||||
goto out_cnt;
|
||||
|
||||
obj = kzalloc(info->size, GFP_KERNEL);
|
||||
obj = kzalloc(pool->elem_size, GFP_KERNEL);
|
||||
if (!obj)
|
||||
goto out_cnt;
|
||||
|
||||
elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
|
||||
elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
|
||||
|
||||
elem->pool = pool;
|
||||
elem->obj = obj;
|
||||
kref_init(&elem->ref_cnt);
|
||||
|
||||
return obj;
|
||||
|
@ -380,12 +375,13 @@ out_cnt:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
|
||||
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
|
||||
{
|
||||
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
|
||||
goto out_cnt;
|
||||
|
||||
elem->pool = pool;
|
||||
elem->obj = (u8 *)elem - pool->elem_offset;
|
||||
kref_init(&elem->ref_cnt);
|
||||
|
||||
return 0;
|
||||
|
@ -397,17 +393,16 @@ out_cnt:
|
|||
|
||||
void rxe_elem_release(struct kref *kref)
|
||||
{
|
||||
struct rxe_pool_entry *elem =
|
||||
container_of(kref, struct rxe_pool_entry, ref_cnt);
|
||||
struct rxe_pool_elem *elem =
|
||||
container_of(kref, struct rxe_pool_elem, ref_cnt);
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
const struct rxe_type_info *info = &rxe_type_info[pool->type];
|
||||
u8 *obj;
|
||||
void *obj;
|
||||
|
||||
if (pool->cleanup)
|
||||
pool->cleanup(elem);
|
||||
|
||||
if (!(pool->flags & RXE_POOL_NO_ALLOC)) {
|
||||
obj = (u8 *)elem - info->elem_offset;
|
||||
obj = elem->obj;
|
||||
kfree(obj);
|
||||
}
|
||||
|
||||
|
@ -416,15 +411,14 @@ void rxe_elem_release(struct kref *kref)
|
|||
|
||||
void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
|
||||
{
|
||||
const struct rxe_type_info *info = &rxe_type_info[pool->type];
|
||||
struct rb_node *node;
|
||||
struct rxe_pool_entry *elem;
|
||||
u8 *obj;
|
||||
struct rxe_pool_elem *elem;
|
||||
void *obj;
|
||||
|
||||
node = pool->index.tree.rb_node;
|
||||
|
||||
while (node) {
|
||||
elem = rb_entry(node, struct rxe_pool_entry, index_node);
|
||||
elem = rb_entry(node, struct rxe_pool_elem, index_node);
|
||||
|
||||
if (elem->index > index)
|
||||
node = node->rb_left;
|
||||
|
@ -436,7 +430,7 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
|
|||
|
||||
if (node) {
|
||||
kref_get(&elem->ref_cnt);
|
||||
obj = (u8 *)elem - info->elem_offset;
|
||||
obj = elem->obj;
|
||||
} else {
|
||||
obj = NULL;
|
||||
}
|
||||
|
@ -446,28 +440,26 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
|
|||
|
||||
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
|
||||
{
|
||||
u8 *obj;
|
||||
unsigned long flags;
|
||||
void *obj;
|
||||
|
||||
read_lock_irqsave(&pool->pool_lock, flags);
|
||||
read_lock_bh(&pool->pool_lock);
|
||||
obj = rxe_pool_get_index_locked(pool, index);
|
||||
read_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
read_unlock_bh(&pool->pool_lock);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
|
||||
{
|
||||
const struct rxe_type_info *info = &rxe_type_info[pool->type];
|
||||
struct rb_node *node;
|
||||
struct rxe_pool_entry *elem;
|
||||
u8 *obj;
|
||||
struct rxe_pool_elem *elem;
|
||||
void *obj;
|
||||
int cmp;
|
||||
|
||||
node = pool->key.tree.rb_node;
|
||||
|
||||
while (node) {
|
||||
elem = rb_entry(node, struct rxe_pool_entry, key_node);
|
||||
elem = rb_entry(node, struct rxe_pool_elem, key_node);
|
||||
|
||||
cmp = memcmp((u8 *)elem + pool->key.key_offset,
|
||||
key, pool->key.key_size);
|
||||
|
@ -482,7 +474,7 @@ void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
|
|||
|
||||
if (node) {
|
||||
kref_get(&elem->ref_cnt);
|
||||
obj = (u8 *)elem - info->elem_offset;
|
||||
obj = elem->obj;
|
||||
} else {
|
||||
obj = NULL;
|
||||
}
|
||||
|
@ -492,12 +484,11 @@ void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
|
|||
|
||||
void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
|
||||
{
|
||||
u8 *obj;
|
||||
unsigned long flags;
|
||||
void *obj;
|
||||
|
||||
read_lock_irqsave(&pool->pool_lock, flags);
|
||||
read_lock_bh(&pool->pool_lock);
|
||||
obj = rxe_pool_get_key_locked(pool, key);
|
||||
read_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
read_unlock_bh(&pool->pool_lock);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
|
|
@ -7,9 +7,6 @@
|
|||
#ifndef RXE_POOL_H
|
||||
#define RXE_POOL_H
|
||||
|
||||
#define RXE_POOL_ALIGN (16)
|
||||
#define RXE_POOL_CACHE_FLAGS (0)
|
||||
|
||||
enum rxe_pool_flags {
|
||||
RXE_POOL_INDEX = BIT(1),
|
||||
RXE_POOL_KEY = BIT(2),
|
||||
|
@ -30,10 +27,9 @@ enum rxe_elem_type {
|
|||
RXE_NUM_TYPES, /* keep me last */
|
||||
};
|
||||
|
||||
struct rxe_pool_entry;
|
||||
|
||||
struct rxe_pool_entry {
|
||||
struct rxe_pool_elem {
|
||||
struct rxe_pool *pool;
|
||||
void *obj;
|
||||
struct kref ref_cnt;
|
||||
struct list_head list;
|
||||
|
||||
|
@ -47,14 +43,16 @@ struct rxe_pool_entry {
|
|||
|
||||
struct rxe_pool {
|
||||
struct rxe_dev *rxe;
|
||||
const char *name;
|
||||
rwlock_t pool_lock; /* protects pool add/del/search */
|
||||
size_t elem_size;
|
||||
void (*cleanup)(struct rxe_pool_entry *obj);
|
||||
void (*cleanup)(struct rxe_pool_elem *obj);
|
||||
enum rxe_pool_flags flags;
|
||||
enum rxe_elem_type type;
|
||||
|
||||
unsigned int max_elem;
|
||||
atomic_t num_elem;
|
||||
size_t elem_size;
|
||||
size_t elem_offset;
|
||||
|
||||
/* only used if indexed */
|
||||
struct {
|
||||
|
@ -89,51 +87,51 @@ void *rxe_alloc_locked(struct rxe_pool *pool);
|
|||
void *rxe_alloc(struct rxe_pool *pool);
|
||||
|
||||
/* connect already allocated object to pool */
|
||||
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
|
||||
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem);
|
||||
|
||||
#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->pelem)
|
||||
#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem)
|
||||
|
||||
/* assign an index to an indexed object and insert object into
|
||||
* pool's rb tree holding and not holding the pool_lock
|
||||
*/
|
||||
int __rxe_add_index_locked(struct rxe_pool_entry *elem);
|
||||
int __rxe_add_index_locked(struct rxe_pool_elem *elem);
|
||||
|
||||
#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->pelem)
|
||||
#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->elem)
|
||||
|
||||
int __rxe_add_index(struct rxe_pool_entry *elem);
|
||||
int __rxe_add_index(struct rxe_pool_elem *elem);
|
||||
|
||||
#define rxe_add_index(obj) __rxe_add_index(&(obj)->pelem)
|
||||
#define rxe_add_index(obj) __rxe_add_index(&(obj)->elem)
|
||||
|
||||
/* drop an index and remove object from rb tree
|
||||
* holding and not holding the pool_lock
|
||||
*/
|
||||
void __rxe_drop_index_locked(struct rxe_pool_entry *elem);
|
||||
void __rxe_drop_index_locked(struct rxe_pool_elem *elem);
|
||||
|
||||
#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->pelem)
|
||||
#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->elem)
|
||||
|
||||
void __rxe_drop_index(struct rxe_pool_entry *elem);
|
||||
void __rxe_drop_index(struct rxe_pool_elem *elem);
|
||||
|
||||
#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->pelem)
|
||||
#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->elem)
|
||||
|
||||
/* assign a key to a keyed object and insert object into
|
||||
* pool's rb tree holding and not holding pool_lock
|
||||
*/
|
||||
int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key);
|
||||
int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key);
|
||||
|
||||
#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->pelem, key)
|
||||
#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->elem, key)
|
||||
|
||||
int __rxe_add_key(struct rxe_pool_entry *elem, void *key);
|
||||
int __rxe_add_key(struct rxe_pool_elem *elem, void *key);
|
||||
|
||||
#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key)
|
||||
#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->elem, key)
|
||||
|
||||
/* remove elem from rb tree holding and not holding the pool_lock */
|
||||
void __rxe_drop_key_locked(struct rxe_pool_entry *elem);
|
||||
void __rxe_drop_key_locked(struct rxe_pool_elem *elem);
|
||||
|
||||
#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->pelem)
|
||||
#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->elem)
|
||||
|
||||
void __rxe_drop_key(struct rxe_pool_entry *elem);
|
||||
void __rxe_drop_key(struct rxe_pool_elem *elem);
|
||||
|
||||
#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->pelem)
|
||||
#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->elem)
|
||||
|
||||
/* lookup an indexed object from index holding and not holding the pool_lock.
|
||||
* takes a reference on object
|
||||
|
@ -153,9 +151,9 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key);
|
|||
void rxe_elem_release(struct kref *kref);
|
||||
|
||||
/* take a reference on an object */
|
||||
#define rxe_add_ref(elem) kref_get(&(elem)->pelem.ref_cnt)
|
||||
#define rxe_add_ref(obj) kref_get(&(obj)->elem.ref_cnt)
|
||||
|
||||
/* drop a reference on an object */
|
||||
#define rxe_drop_ref(elem) kref_put(&(elem)->pelem.ref_cnt, rxe_elem_release)
|
||||
#define rxe_drop_ref(obj) kref_put(&(obj)->elem.ref_cnt, rxe_elem_release)
|
||||
|
||||
#endif /* RXE_POOL_H */
|
||||
|
|
|
@ -167,7 +167,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
|
|||
qp->attr.path_mtu = 1;
|
||||
qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
|
||||
|
||||
qpn = qp->pelem.index;
|
||||
qpn = qp->elem.index;
|
||||
port = &rxe->port;
|
||||
|
||||
switch (init->qp_type) {
|
||||
|
@ -832,9 +832,9 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
|
|||
}
|
||||
|
||||
/* called when the last reference to the qp is dropped */
|
||||
void rxe_qp_cleanup(struct rxe_pool_entry *arg)
|
||||
void rxe_qp_cleanup(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
|
||||
struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
|
||||
|
||||
execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
|
||||
}
|
||||
|
|
|
@ -151,7 +151,6 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
|
|||
struct rxe_queue *new_q;
|
||||
unsigned int num_elem = *num_elem_p;
|
||||
int err;
|
||||
unsigned long flags = 0, flags1;
|
||||
|
||||
new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type);
|
||||
if (!new_q)
|
||||
|
@ -165,17 +164,17 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
|
|||
goto err1;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(consumer_lock, flags1);
|
||||
spin_lock_bh(consumer_lock);
|
||||
|
||||
if (producer_lock) {
|
||||
spin_lock_irqsave(producer_lock, flags);
|
||||
spin_lock_bh(producer_lock);
|
||||
err = resize_finish(q, new_q, num_elem);
|
||||
spin_unlock_irqrestore(producer_lock, flags);
|
||||
spin_unlock_bh(producer_lock);
|
||||
} else {
|
||||
err = resize_finish(q, new_q, num_elem);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(consumer_lock, flags1);
|
||||
spin_unlock_bh(consumer_lock);
|
||||
|
||||
rxe_queue_cleanup(new_q); /* new/old dep on err */
|
||||
if (err)
|
||||
|
|
|
@ -110,7 +110,6 @@ void rnr_nak_timer(struct timer_list *t)
|
|||
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
|
||||
{
|
||||
struct rxe_send_wqe *wqe;
|
||||
unsigned long flags;
|
||||
struct rxe_queue *q = qp->sq.queue;
|
||||
unsigned int index = qp->req.wqe_index;
|
||||
unsigned int cons;
|
||||
|
@ -124,25 +123,23 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
|
|||
/* check to see if we are drained;
|
||||
* state_lock used by requester and completer
|
||||
*/
|
||||
spin_lock_irqsave(&qp->state_lock, flags);
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
do {
|
||||
if (qp->req.state != QP_STATE_DRAIN) {
|
||||
/* comp just finished */
|
||||
spin_unlock_irqrestore(&qp->state_lock,
|
||||
flags);
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
if (wqe && ((index != cons) ||
|
||||
(wqe->state != wqe_state_posted))) {
|
||||
/* comp not done yet */
|
||||
spin_unlock_irqrestore(&qp->state_lock,
|
||||
flags);
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
qp->req.state = QP_STATE_DRAINED;
|
||||
spin_unlock_irqrestore(&qp->state_lock, flags);
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
|
||||
if (qp->ibqp.event_handler) {
|
||||
struct ib_event ev;
|
||||
|
@ -372,7 +369,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
|
|||
int pad = (-payload) & 0x3;
|
||||
int paylen;
|
||||
int solicited;
|
||||
u16 pkey;
|
||||
u32 qp_num;
|
||||
int ack_req;
|
||||
|
||||
|
@ -404,8 +400,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
|
|||
(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
|
||||
(RXE_WRITE_MASK | RXE_IMMDT_MASK));
|
||||
|
||||
pkey = IB_DEFAULT_PKEY_FULL;
|
||||
|
||||
qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
|
||||
qp->attr.dest_qp_num;
|
||||
|
||||
|
@ -414,7 +408,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
|
|||
if (ack_req)
|
||||
qp->req.noack_pkts = 0;
|
||||
|
||||
bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
|
||||
bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num,
|
||||
ack_req, pkt->psn);
|
||||
|
||||
/* init optional headers */
|
||||
|
|
|
@ -83,7 +83,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
|
|||
srq->ibsrq.event_handler = init->event_handler;
|
||||
srq->ibsrq.srq_context = init->srq_context;
|
||||
srq->limit = init->attr.srq_limit;
|
||||
srq->srq_num = srq->pelem.index;
|
||||
srq->srq_num = srq->elem.index;
|
||||
srq->rq.max_wr = init->attr.max_wr;
|
||||
srq->rq.max_sge = init->attr.max_sge;
|
||||
|
||||
|
|
|
@ -1,119 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
/*
|
||||
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
|
||||
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "rxe.h"
|
||||
#include "rxe_net.h"
|
||||
|
||||
/* Copy argument and remove trailing CR. Return the new length. */
|
||||
static int sanitize_arg(const char *val, char *intf, int intf_len)
|
||||
{
|
||||
int len;
|
||||
|
||||
if (!val)
|
||||
return 0;
|
||||
|
||||
/* Remove newline. */
|
||||
for (len = 0; len < intf_len - 1 && val[len] && val[len] != '\n'; len++)
|
||||
intf[len] = val[len];
|
||||
intf[len] = 0;
|
||||
|
||||
if (len == 0 || (val[len] != 0 && val[len] != '\n'))
|
||||
return 0;
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
int len;
|
||||
int err = 0;
|
||||
char intf[32];
|
||||
struct net_device *ndev;
|
||||
struct rxe_dev *exists;
|
||||
|
||||
if (!rxe_initialized) {
|
||||
pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
len = sanitize_arg(val, intf, sizeof(intf));
|
||||
if (!len) {
|
||||
pr_err("add: invalid interface name\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ndev = dev_get_by_name(&init_net, intf);
|
||||
if (!ndev) {
|
||||
pr_err("interface %s not found\n", intf);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (is_vlan_dev(ndev)) {
|
||||
pr_err("rxe creation allowed on top of a real device only\n");
|
||||
err = -EPERM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
exists = rxe_get_dev_from_net(ndev);
|
||||
if (exists) {
|
||||
ib_device_put(&exists->ib_dev);
|
||||
pr_err("already configured on %s\n", intf);
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
err = rxe_net_add("rxe%d", ndev);
|
||||
if (err) {
|
||||
pr_err("failed to add %s\n", intf);
|
||||
goto err;
|
||||
}
|
||||
|
||||
err:
|
||||
dev_put(ndev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
int len;
|
||||
char intf[32];
|
||||
struct ib_device *ib_dev;
|
||||
|
||||
len = sanitize_arg(val, intf, sizeof(intf));
|
||||
if (!len) {
|
||||
pr_err("add: invalid interface name\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (strncmp("all", intf, len) == 0) {
|
||||
pr_info("rxe_sys: remove all");
|
||||
ib_unregister_driver(RDMA_DRIVER_RXE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ib_dev = ib_device_get_by_name(intf, RDMA_DRIVER_RXE);
|
||||
if (!ib_dev) {
|
||||
pr_err("not configured on %s\n", intf);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ib_unregister_device_and_put(ib_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct kernel_param_ops rxe_add_ops = {
|
||||
.set = rxe_param_set_add,
|
||||
};
|
||||
|
||||
static const struct kernel_param_ops rxe_remove_ops = {
|
||||
.set = rxe_param_set_remove,
|
||||
};
|
||||
|
||||
module_param_cb(add, &rxe_add_ops, NULL, 0200);
|
||||
MODULE_PARM_DESC(add, "DEPRECATED. Create RXE device over network interface");
|
||||
module_param_cb(remove, &rxe_remove_ops, NULL, 0200);
|
||||
MODULE_PARM_DESC(remove, "DEPRECATED. Remove RXE device over network interface");
|
|
@ -32,25 +32,24 @@ void rxe_do_task(struct tasklet_struct *t)
|
|||
{
|
||||
int cont;
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
struct rxe_task *task = from_tasklet(task, t, tasklet);
|
||||
|
||||
spin_lock_irqsave(&task->state_lock, flags);
|
||||
spin_lock_bh(&task->state_lock);
|
||||
switch (task->state) {
|
||||
case TASK_STATE_START:
|
||||
task->state = TASK_STATE_BUSY;
|
||||
spin_unlock_irqrestore(&task->state_lock, flags);
|
||||
spin_unlock_bh(&task->state_lock);
|
||||
break;
|
||||
|
||||
case TASK_STATE_BUSY:
|
||||
task->state = TASK_STATE_ARMED;
|
||||
fallthrough;
|
||||
case TASK_STATE_ARMED:
|
||||
spin_unlock_irqrestore(&task->state_lock, flags);
|
||||
spin_unlock_bh(&task->state_lock);
|
||||
return;
|
||||
|
||||
default:
|
||||
spin_unlock_irqrestore(&task->state_lock, flags);
|
||||
spin_unlock_bh(&task->state_lock);
|
||||
pr_warn("%s failed with bad state %d\n", __func__, task->state);
|
||||
return;
|
||||
}
|
||||
|
@ -59,7 +58,7 @@ void rxe_do_task(struct tasklet_struct *t)
|
|||
cont = 0;
|
||||
ret = task->func(task->arg);
|
||||
|
||||
spin_lock_irqsave(&task->state_lock, flags);
|
||||
spin_lock_bh(&task->state_lock);
|
||||
switch (task->state) {
|
||||
case TASK_STATE_BUSY:
|
||||
if (ret)
|
||||
|
@ -81,7 +80,7 @@ void rxe_do_task(struct tasklet_struct *t)
|
|||
pr_warn("%s failed with bad state %d\n", __func__,
|
||||
task->state);
|
||||
}
|
||||
spin_unlock_irqrestore(&task->state_lock, flags);
|
||||
spin_unlock_bh(&task->state_lock);
|
||||
} while (cont);
|
||||
|
||||
task->ret = ret;
|
||||
|
@ -106,7 +105,6 @@ int rxe_init_task(void *obj, struct rxe_task *task,
|
|||
|
||||
void rxe_cleanup_task(struct rxe_task *task)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool idle;
|
||||
|
||||
/*
|
||||
|
@ -116,9 +114,9 @@ void rxe_cleanup_task(struct rxe_task *task)
|
|||
task->destroyed = true;
|
||||
|
||||
do {
|
||||
spin_lock_irqsave(&task->state_lock, flags);
|
||||
spin_lock_bh(&task->state_lock);
|
||||
idle = (task->state == TASK_STATE_START);
|
||||
spin_unlock_irqrestore(&task->state_lock, flags);
|
||||
spin_unlock_bh(&task->state_lock);
|
||||
} while (!idle);
|
||||
|
||||
tasklet_kill(&task->tasklet);
|
||||
|
|
|
@ -182,7 +182,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
|
|||
|
||||
/* create index > 0 */
|
||||
rxe_add_index(ah);
|
||||
ah->ah_num = ah->pelem.index;
|
||||
ah->ah_num = ah->elem.index;
|
||||
|
||||
if (uresp) {
|
||||
/* only if new user provider */
|
||||
|
@ -383,10 +383,9 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
|
|||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
int err = 0;
|
||||
unsigned long flags;
|
||||
struct rxe_srq *srq = to_rsrq(ibsrq);
|
||||
|
||||
spin_lock_irqsave(&srq->rq.producer_lock, flags);
|
||||
spin_lock_bh(&srq->rq.producer_lock);
|
||||
|
||||
while (wr) {
|
||||
err = post_one_recv(&srq->rq, wr);
|
||||
|
@ -395,7 +394,7 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
|
|||
wr = wr->next;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
|
||||
spin_unlock_bh(&srq->rq.producer_lock);
|
||||
|
||||
if (err)
|
||||
*bad_wr = wr;
|
||||
|
@ -469,6 +468,11 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
if (err)
|
||||
goto err1;
|
||||
|
||||
if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH))
|
||||
qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
|
||||
qp->ibqp.qp_num,
|
||||
qp->attr.dest_qp_num);
|
||||
|
||||
return 0;
|
||||
|
||||
err1:
|
||||
|
@ -634,19 +638,18 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
|
|||
int err;
|
||||
struct rxe_sq *sq = &qp->sq;
|
||||
struct rxe_send_wqe *send_wqe;
|
||||
unsigned long flags;
|
||||
int full;
|
||||
|
||||
err = validate_send_wr(qp, ibwr, mask, length);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
spin_lock_irqsave(&qp->sq.sq_lock, flags);
|
||||
spin_lock_bh(&qp->sq.sq_lock);
|
||||
|
||||
full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER);
|
||||
|
||||
if (unlikely(full)) {
|
||||
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
|
||||
spin_unlock_bh(&qp->sq.sq_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -655,7 +658,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
|
|||
|
||||
queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER);
|
||||
|
||||
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
|
||||
spin_unlock_bh(&qp->sq.sq_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -735,7 +738,6 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
|||
int err = 0;
|
||||
struct rxe_qp *qp = to_rqp(ibqp);
|
||||
struct rxe_rq *rq = &qp->rq;
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
|
||||
*bad_wr = wr;
|
||||
|
@ -749,7 +751,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
|||
goto err1;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&rq->producer_lock, flags);
|
||||
spin_lock_bh(&rq->producer_lock);
|
||||
|
||||
while (wr) {
|
||||
err = post_one_recv(rq, wr);
|
||||
|
@ -760,7 +762,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
|||
wr = wr->next;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&rq->producer_lock, flags);
|
||||
spin_unlock_bh(&rq->producer_lock);
|
||||
|
||||
if (qp->resp.state == QP_STATE_ERROR)
|
||||
rxe_run_task(&qp->resp.task, 1);
|
||||
|
@ -841,9 +843,8 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
|
|||
int i;
|
||||
struct rxe_cq *cq = to_rcq(ibcq);
|
||||
struct rxe_cqe *cqe;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cq->cq_lock, flags);
|
||||
spin_lock_bh(&cq->cq_lock);
|
||||
for (i = 0; i < num_entries; i++) {
|
||||
cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER);
|
||||
if (!cqe)
|
||||
|
@ -852,7 +853,7 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
|
|||
memcpy(wc++, &cqe->ibwc, sizeof(*wc));
|
||||
queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER);
|
||||
}
|
||||
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
||||
spin_unlock_bh(&cq->cq_lock);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
@ -870,11 +871,10 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
|
|||
static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
|
||||
{
|
||||
struct rxe_cq *cq = to_rcq(ibcq);
|
||||
unsigned long irq_flags;
|
||||
int ret = 0;
|
||||
int empty;
|
||||
|
||||
spin_lock_irqsave(&cq->cq_lock, irq_flags);
|
||||
spin_lock_bh(&cq->cq_lock);
|
||||
if (cq->notify != IB_CQ_NEXT_COMP)
|
||||
cq->notify = flags & IB_CQ_SOLICITED_MASK;
|
||||
|
||||
|
@ -883,7 +883,7 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
|
|||
if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
|
||||
ret = 1;
|
||||
|
||||
spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
|
||||
spin_unlock_bh(&cq->cq_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -35,17 +35,17 @@ static inline int psn_compare(u32 psn_a, u32 psn_b)
|
|||
|
||||
struct rxe_ucontext {
|
||||
struct ib_ucontext ibuc;
|
||||
struct rxe_pool_entry pelem;
|
||||
struct rxe_pool_elem elem;
|
||||
};
|
||||
|
||||
struct rxe_pd {
|
||||
struct ib_pd ibpd;
|
||||
struct rxe_pool_entry pelem;
|
||||
struct rxe_pool_elem elem;
|
||||
};
|
||||
|
||||
struct rxe_ah {
|
||||
struct ib_ah ibah;
|
||||
struct rxe_pool_entry pelem;
|
||||
struct rxe_pool_elem elem;
|
||||
struct rxe_av av;
|
||||
bool is_user;
|
||||
int ah_num;
|
||||
|
@ -60,7 +60,7 @@ struct rxe_cqe {
|
|||
|
||||
struct rxe_cq {
|
||||
struct ib_cq ibcq;
|
||||
struct rxe_pool_entry pelem;
|
||||
struct rxe_pool_elem elem;
|
||||
struct rxe_queue *queue;
|
||||
spinlock_t cq_lock;
|
||||
u8 notify;
|
||||
|
@ -95,7 +95,7 @@ struct rxe_rq {
|
|||
|
||||
struct rxe_srq {
|
||||
struct ib_srq ibsrq;
|
||||
struct rxe_pool_entry pelem;
|
||||
struct rxe_pool_elem elem;
|
||||
struct rxe_pd *pd;
|
||||
struct rxe_rq rq;
|
||||
u32 srq_num;
|
||||
|
@ -209,7 +209,7 @@ struct rxe_resp_info {
|
|||
|
||||
struct rxe_qp {
|
||||
struct ib_qp ibqp;
|
||||
struct rxe_pool_entry pelem;
|
||||
struct rxe_pool_elem elem;
|
||||
struct ib_qp_attr attr;
|
||||
unsigned int valid;
|
||||
unsigned int mtu;
|
||||
|
@ -309,7 +309,7 @@ static inline int rkey_is_mw(u32 rkey)
|
|||
}
|
||||
|
||||
struct rxe_mr {
|
||||
struct rxe_pool_entry pelem;
|
||||
struct rxe_pool_elem elem;
|
||||
struct ib_mr ibmr;
|
||||
|
||||
struct ib_umem *umem;
|
||||
|
@ -342,7 +342,7 @@ enum rxe_mw_state {
|
|||
|
||||
struct rxe_mw {
|
||||
struct ib_mw ibmw;
|
||||
struct rxe_pool_entry pelem;
|
||||
struct rxe_pool_elem elem;
|
||||
spinlock_t lock;
|
||||
enum rxe_mw_state state;
|
||||
struct rxe_qp *qp; /* Type 2 only */
|
||||
|
@ -354,7 +354,7 @@ struct rxe_mw {
|
|||
};
|
||||
|
||||
struct rxe_mc_grp {
|
||||
struct rxe_pool_entry pelem;
|
||||
struct rxe_pool_elem elem;
|
||||
spinlock_t mcg_lock; /* guard group */
|
||||
struct rxe_dev *rxe;
|
||||
struct list_head qp_list;
|
||||
|
@ -365,7 +365,7 @@ struct rxe_mc_grp {
|
|||
};
|
||||
|
||||
struct rxe_mc_elem {
|
||||
struct rxe_pool_entry pelem;
|
||||
struct rxe_pool_elem elem;
|
||||
struct list_head qp_list;
|
||||
struct list_head grp_list;
|
||||
struct rxe_qp *qp;
|
||||
|
@ -392,8 +392,6 @@ struct rxe_dev {
|
|||
|
||||
struct net_device *ndev;
|
||||
|
||||
int xmit_errors;
|
||||
|
||||
struct rxe_pool uc_pool;
|
||||
struct rxe_pool pd_pool;
|
||||
struct rxe_pool ah_pool;
|
||||
|
@ -484,6 +482,6 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
|
|||
|
||||
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
|
||||
|
||||
void rxe_mc_cleanup(struct rxe_pool_entry *arg);
|
||||
void rxe_mc_cleanup(struct rxe_pool_elem *elem);
|
||||
|
||||
#endif /* RXE_VERBS_H */
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/xarray.h>
|
||||
#include <net/addrconf.h>
|
||||
|
||||
#include <rdma/iw_cm.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
@ -155,7 +156,8 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
|
|||
attr->vendor_id = SIW_VENDOR_ID;
|
||||
attr->vendor_part_id = sdev->vendor_part_id;
|
||||
|
||||
memcpy(&attr->sys_image_guid, sdev->netdev->dev_addr, 6);
|
||||
addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
|
||||
sdev->netdev->dev_addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -660,7 +662,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
|
|||
kbuf += core_sge->length;
|
||||
core_sge++;
|
||||
}
|
||||
sqe->sge[0].length = bytes > 0 ? bytes : 0;
|
||||
sqe->sge[0].length = max(bytes, 0);
|
||||
sqe->num_sge = bytes > 0 ? 1 : 0;
|
||||
|
||||
return bytes;
|
||||
|
|
|
@ -113,10 +113,6 @@ bool iser_pi_enable = false;
|
|||
module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
|
||||
|
||||
int iser_pi_guard;
|
||||
module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]");
|
||||
|
||||
static int iscsi_iser_set(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
int ret;
|
||||
|
@ -139,9 +135,8 @@ static int iscsi_iser_set(const char *val, const struct kernel_param *kp)
|
|||
* Notes: In case of data length errors or iscsi PDU completion failures
|
||||
* this routine will signal iscsi layer of connection failure.
|
||||
*/
|
||||
void
|
||||
iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
|
||||
char *rx_data, int rx_data_len)
|
||||
void iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
|
||||
char *rx_data, int rx_data_len)
|
||||
{
|
||||
int rc = 0;
|
||||
int datalen;
|
||||
|
@ -176,8 +171,7 @@ error:
|
|||
* Netes: This routine can't fail, just assign iscsi task
|
||||
* hdr and max hdr size.
|
||||
*/
|
||||
static int
|
||||
iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
|
||||
static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
|
||||
{
|
||||
struct iscsi_iser_task *iser_task = task->dd_data;
|
||||
|
||||
|
@ -198,9 +192,8 @@ iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
|
|||
* state mutex to avoid dereferencing the IB device which
|
||||
* may have already been terminated.
|
||||
*/
|
||||
int
|
||||
iser_initialize_task_headers(struct iscsi_task *task,
|
||||
struct iser_tx_desc *tx_desc)
|
||||
int iser_initialize_task_headers(struct iscsi_task *task,
|
||||
struct iser_tx_desc *tx_desc)
|
||||
{
|
||||
struct iser_conn *iser_conn = task->conn->dd_data;
|
||||
struct iser_device *device = iser_conn->ib_conn.device;
|
||||
|
@ -237,8 +230,7 @@ iser_initialize_task_headers(struct iscsi_task *task,
|
|||
* Return: Returns zero on success or -ENOMEM when failing
|
||||
* to init task headers (dma mapping error).
|
||||
*/
|
||||
static int
|
||||
iscsi_iser_task_init(struct iscsi_task *task)
|
||||
static int iscsi_iser_task_init(struct iscsi_task *task)
|
||||
{
|
||||
struct iscsi_iser_task *iser_task = task->dd_data;
|
||||
int ret;
|
||||
|
@ -272,8 +264,8 @@ iscsi_iser_task_init(struct iscsi_task *task)
|
|||
* xmit.
|
||||
*
|
||||
**/
|
||||
static int
|
||||
iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
|
||||
static int iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
|
||||
struct iscsi_task *task)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
|
@ -290,9 +282,8 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
|
|||
return error;
|
||||
}
|
||||
|
||||
static int
|
||||
iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
|
||||
struct iscsi_task *task)
|
||||
static int iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
|
||||
struct iscsi_task *task)
|
||||
{
|
||||
struct iscsi_r2t_info *r2t = &task->unsol_r2t;
|
||||
struct iscsi_data hdr;
|
||||
|
@ -326,8 +317,7 @@ iscsi_iser_task_xmit_unsol_data_exit:
|
|||
*
|
||||
* Return: zero on success or escalates $error on failure.
|
||||
*/
|
||||
static int
|
||||
iscsi_iser_task_xmit(struct iscsi_task *task)
|
||||
static int iscsi_iser_task_xmit(struct iscsi_task *task)
|
||||
{
|
||||
struct iscsi_conn *conn = task->conn;
|
||||
struct iscsi_iser_task *iser_task = task->dd_data;
|
||||
|
@ -410,8 +400,7 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
|
|||
*
|
||||
* In addition the error sector is marked.
|
||||
*/
|
||||
static u8
|
||||
iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
|
||||
static u8 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
|
||||
{
|
||||
struct iscsi_iser_task *iser_task = task->dd_data;
|
||||
enum iser_data_dir dir = iser_task->dir[ISER_DIR_IN] ?
|
||||
|
@ -460,11 +449,9 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session,
|
|||
* -EINVAL in case end-point doesn't exsits anymore or iser connection
|
||||
* state is not UP (teardown already started).
|
||||
*/
|
||||
static int
|
||||
iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
|
||||
struct iscsi_cls_conn *cls_conn,
|
||||
uint64_t transport_eph,
|
||||
int is_leading)
|
||||
static int iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
|
||||
struct iscsi_cls_conn *cls_conn,
|
||||
uint64_t transport_eph, int is_leading)
|
||||
{
|
||||
struct iscsi_conn *conn = cls_conn->dd_data;
|
||||
struct iser_conn *iser_conn;
|
||||
|
@ -519,8 +506,7 @@ out:
|
|||
* from this point iscsi must call conn_stop in session/connection
|
||||
* teardown so iser transport must wait for it.
|
||||
*/
|
||||
static int
|
||||
iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
|
||||
static int iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
|
||||
{
|
||||
struct iscsi_conn *iscsi_conn;
|
||||
struct iser_conn *iser_conn;
|
||||
|
@ -542,8 +528,7 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
|
|||
* handle, so we call it under iser the state lock to protect against
|
||||
* this kind of race.
|
||||
*/
|
||||
static void
|
||||
iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
|
||||
static void iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
|
||||
{
|
||||
struct iscsi_conn *conn = cls_conn->dd_data;
|
||||
struct iser_conn *iser_conn = conn->dd_data;
|
||||
|
@ -578,8 +563,7 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
|
|||
*
|
||||
* Removes and free iscsi host.
|
||||
*/
|
||||
static void
|
||||
iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
|
||||
static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
|
||||
{
|
||||
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
|
||||
|
||||
|
@ -588,8 +572,7 @@ iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
|
|||
iscsi_host_free(shost);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
iser_dif_prot_caps(int prot_caps)
|
||||
static inline unsigned int iser_dif_prot_caps(int prot_caps)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
@ -708,9 +691,8 @@ free_host:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
|
||||
enum iscsi_param param, char *buf, int buflen)
|
||||
static int iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
|
||||
enum iscsi_param param, char *buf, int buflen)
|
||||
{
|
||||
int value;
|
||||
|
||||
|
@ -760,8 +742,8 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
|
|||
*
|
||||
* Output connection statistics.
|
||||
*/
|
||||
static void
|
||||
iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
|
||||
static void iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn,
|
||||
struct iscsi_stats *stats)
|
||||
{
|
||||
struct iscsi_conn *conn = cls_conn->dd_data;
|
||||
|
||||
|
@ -812,9 +794,9 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
|
|||
* Return: iscsi_endpoint created by iscsi layer or ERR_PTR(error)
|
||||
* if fails.
|
||||
*/
|
||||
static struct iscsi_endpoint *
|
||||
iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
|
||||
int non_blocking)
|
||||
static struct iscsi_endpoint *iscsi_iser_ep_connect(struct Scsi_Host *shost,
|
||||
struct sockaddr *dst_addr,
|
||||
int non_blocking)
|
||||
{
|
||||
int err;
|
||||
struct iser_conn *iser_conn;
|
||||
|
@ -857,8 +839,7 @@ failure:
|
|||
* or more likely iser connection state transitioned to TEMINATING or
|
||||
* DOWN during the wait period.
|
||||
*/
|
||||
static int
|
||||
iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
|
||||
static int iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
|
||||
{
|
||||
struct iser_conn *iser_conn = ep->dd_data;
|
||||
int rc;
|
||||
|
@ -893,8 +874,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
|
|||
* and cleanup or actually call it immediately in case we didn't pass
|
||||
* iscsi conn bind/start stage, thus it is safe.
|
||||
*/
|
||||
static void
|
||||
iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
|
||||
static void iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
|
||||
{
|
||||
struct iser_conn *iser_conn = ep->dd_data;
|
||||
|
||||
|
|
|
@ -119,8 +119,6 @@
|
|||
|
||||
#define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX)
|
||||
|
||||
#define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2)
|
||||
|
||||
/* the max TX (send) WR supported by the iSER QP is defined by *
|
||||
* max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
|
||||
* to have at max for SCSI command. The tx posting & completion handling code *
|
||||
|
@ -148,8 +146,6 @@
|
|||
- ISER_MAX_RX_MISC_PDUS) / \
|
||||
(1 + ISER_INFLIGHT_DATAOUTS))
|
||||
|
||||
#define ISER_SIGNAL_CMD_COUNT 32
|
||||
|
||||
/* Constant PDU lengths calculations */
|
||||
#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + sizeof(struct iscsi_hdr))
|
||||
|
||||
|
@ -366,9 +362,6 @@ struct iser_fr_pool {
|
|||
* @qp: Connection Queue-pair
|
||||
* @cq: Connection completion queue
|
||||
* @cq_size: The number of max outstanding completions
|
||||
* @post_recv_buf_count: post receive counter
|
||||
* @sig_count: send work request signal count
|
||||
* @rx_wr: receive work request for batch posts
|
||||
* @device: reference to iser device
|
||||
* @fr_pool: connection fast registration poool
|
||||
* @pi_support: Indicate device T10-PI support
|
||||
|
@ -379,9 +372,6 @@ struct ib_conn {
|
|||
struct ib_qp *qp;
|
||||
struct ib_cq *cq;
|
||||
u32 cq_size;
|
||||
int post_recv_buf_count;
|
||||
u8 sig_count;
|
||||
struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
|
||||
struct iser_device *device;
|
||||
struct iser_fr_pool fr_pool;
|
||||
bool pi_support;
|
||||
|
@ -397,8 +387,6 @@ struct ib_conn {
|
|||
* @state: connection logical state
|
||||
* @qp_max_recv_dtos: maximum number of data outs, corresponds
|
||||
* to max number of post recvs
|
||||
* @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1)
|
||||
* @min_posted_rx: (qp_max_recv_dtos >> 2)
|
||||
* @max_cmds: maximum cmds allowed for this connection
|
||||
* @name: connection peer portal
|
||||
* @release_work: deffered work for release job
|
||||
|
@ -409,7 +397,6 @@ struct ib_conn {
|
|||
* (state is ISER_CONN_UP)
|
||||
* @conn_list: entry in ig conn list
|
||||
* @login_desc: login descriptor
|
||||
* @rx_desc_head: head of rx_descs cyclic buffer
|
||||
* @rx_descs: rx buffers array (cyclic buffer)
|
||||
* @num_rx_descs: number of rx descriptors
|
||||
* @scsi_sg_tablesize: scsi host sg_tablesize
|
||||
|
@ -422,8 +409,6 @@ struct iser_conn {
|
|||
struct iscsi_endpoint *ep;
|
||||
enum iser_conn_state state;
|
||||
unsigned qp_max_recv_dtos;
|
||||
unsigned qp_max_recv_dtos_mask;
|
||||
unsigned min_posted_rx;
|
||||
u16 max_cmds;
|
||||
char name[ISER_OBJECT_NAME_SIZE];
|
||||
struct work_struct release_work;
|
||||
|
@ -433,7 +418,6 @@ struct iser_conn {
|
|||
struct completion up_completion;
|
||||
struct list_head conn_list;
|
||||
struct iser_login_desc login_desc;
|
||||
unsigned int rx_desc_head;
|
||||
struct iser_rx_desc *rx_descs;
|
||||
u32 num_rx_descs;
|
||||
unsigned short scsi_sg_tablesize;
|
||||
|
@ -486,7 +470,6 @@ struct iser_global {
|
|||
extern struct iser_global ig;
|
||||
extern int iser_debug_level;
|
||||
extern bool iser_pi_enable;
|
||||
extern int iser_pi_guard;
|
||||
extern unsigned int iser_max_sectors;
|
||||
extern bool iser_always_reg;
|
||||
|
||||
|
@ -543,9 +526,9 @@ int iser_connect(struct iser_conn *iser_conn,
|
|||
int non_blocking);
|
||||
|
||||
int iser_post_recvl(struct iser_conn *iser_conn);
|
||||
int iser_post_recvm(struct iser_conn *iser_conn, int count);
|
||||
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
|
||||
bool signal);
|
||||
int iser_post_recvm(struct iser_conn *iser_conn,
|
||||
struct iser_rx_desc *rx_desc);
|
||||
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc);
|
||||
|
||||
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
|
||||
struct iser_data_buf *data,
|
||||
|
|
|
@ -95,11 +95,8 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
|
|||
* task->data[ISER_DIR_OUT].data_len, Protection size
|
||||
* is stored at task->prot[ISER_DIR_OUT].data_len
|
||||
*/
|
||||
static int
|
||||
iser_prepare_write_cmd(struct iscsi_task *task,
|
||||
unsigned int imm_sz,
|
||||
unsigned int unsol_sz,
|
||||
unsigned int edtl)
|
||||
static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz,
|
||||
unsigned int unsol_sz, unsigned int edtl)
|
||||
{
|
||||
struct iscsi_iser_task *iser_task = task->dd_data;
|
||||
struct iser_mem_reg *mem_reg;
|
||||
|
@ -160,8 +157,8 @@ iser_prepare_write_cmd(struct iscsi_task *task,
|
|||
}
|
||||
|
||||
/* creates a new tx descriptor and adds header regd buffer */
|
||||
static void iser_create_send_desc(struct iser_conn *iser_conn,
|
||||
struct iser_tx_desc *tx_desc)
|
||||
static void iser_create_send_desc(struct iser_conn *iser_conn,
|
||||
struct iser_tx_desc *tx_desc)
|
||||
{
|
||||
struct iser_device *device = iser_conn->ib_conn.device;
|
||||
|
||||
|
@ -247,8 +244,6 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
|
|||
struct iser_device *device = ib_conn->device;
|
||||
|
||||
iser_conn->qp_max_recv_dtos = session->cmds_max;
|
||||
iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
|
||||
iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
|
||||
|
||||
if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max,
|
||||
iser_conn->pages_per_mr))
|
||||
|
@ -280,7 +275,6 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
|
|||
rx_sg->lkey = device->pd->local_dma_lkey;
|
||||
}
|
||||
|
||||
iser_conn->rx_desc_head = 0;
|
||||
return 0;
|
||||
|
||||
rx_desc_dma_map_failed:
|
||||
|
@ -322,37 +316,35 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
|
|||
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
|
||||
{
|
||||
struct iser_conn *iser_conn = conn->dd_data;
|
||||
struct ib_conn *ib_conn = &iser_conn->ib_conn;
|
||||
struct iscsi_session *session = conn->session;
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
|
||||
/* check if this is the last login - going to full feature phase */
|
||||
if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Check that there is one posted recv buffer
|
||||
* (for the last login response).
|
||||
*/
|
||||
WARN_ON(ib_conn->post_recv_buf_count != 1);
|
||||
goto out;
|
||||
|
||||
if (session->discovery_sess) {
|
||||
iser_info("Discovery session, re-using login RX buffer\n");
|
||||
return 0;
|
||||
} else
|
||||
iser_info("Normal session, posting batch of RX %d buffers\n",
|
||||
iser_conn->min_posted_rx);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Initial post receive buffers */
|
||||
if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx))
|
||||
return -ENOMEM;
|
||||
iser_info("Normal session, posting batch of RX %d buffers\n",
|
||||
iser_conn->qp_max_recv_dtos - 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool iser_signal_comp(u8 sig_count)
|
||||
{
|
||||
return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0);
|
||||
/*
|
||||
* Initial post receive buffers.
|
||||
* There is one already posted recv buffer (for the last login
|
||||
* response). Therefore, the first recv buffer is skipped here.
|
||||
*/
|
||||
for (i = 1; i < iser_conn->qp_max_recv_dtos; i++) {
|
||||
err = iser_post_recvm(iser_conn, &iser_conn->rx_descs[i]);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -360,8 +352,7 @@ static inline bool iser_signal_comp(u8 sig_count)
|
|||
* @conn: link to matching iscsi connection
|
||||
* @task: SCSI command task
|
||||
*/
|
||||
int iser_send_command(struct iscsi_conn *conn,
|
||||
struct iscsi_task *task)
|
||||
int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task)
|
||||
{
|
||||
struct iser_conn *iser_conn = conn->dd_data;
|
||||
struct iscsi_iser_task *iser_task = task->dd_data;
|
||||
|
@ -371,7 +362,6 @@ int iser_send_command(struct iscsi_conn *conn,
|
|||
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
|
||||
struct scsi_cmnd *sc = task->sc;
|
||||
struct iser_tx_desc *tx_desc = &iser_task->desc;
|
||||
u8 sig_count = ++iser_conn->ib_conn.sig_count;
|
||||
|
||||
edtl = ntohl(hdr->data_length);
|
||||
|
||||
|
@ -418,8 +408,7 @@ int iser_send_command(struct iscsi_conn *conn,
|
|||
|
||||
iser_task->status = ISER_TASK_STATUS_STARTED;
|
||||
|
||||
err = iser_post_send(&iser_conn->ib_conn, tx_desc,
|
||||
iser_signal_comp(sig_count));
|
||||
err = iser_post_send(&iser_conn->ib_conn, tx_desc);
|
||||
if (!err)
|
||||
return 0;
|
||||
|
||||
|
@ -434,8 +423,7 @@ send_command_error:
|
|||
* @task: SCSI command task
|
||||
* @hdr: pointer to the LLD's iSCSI message header
|
||||
*/
|
||||
int iser_send_data_out(struct iscsi_conn *conn,
|
||||
struct iscsi_task *task,
|
||||
int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task,
|
||||
struct iscsi_data *hdr)
|
||||
{
|
||||
struct iser_conn *iser_conn = conn->dd_data;
|
||||
|
@ -487,7 +475,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
|
|||
itt, buf_offset, data_seg_len);
|
||||
|
||||
|
||||
err = iser_post_send(&iser_conn->ib_conn, tx_desc, true);
|
||||
err = iser_post_send(&iser_conn->ib_conn, tx_desc);
|
||||
if (!err)
|
||||
return 0;
|
||||
|
||||
|
@ -497,8 +485,7 @@ send_data_out_error:
|
|||
return err;
|
||||
}
|
||||
|
||||
int iser_send_control(struct iscsi_conn *conn,
|
||||
struct iscsi_task *task)
|
||||
int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task)
|
||||
{
|
||||
struct iser_conn *iser_conn = conn->dd_data;
|
||||
struct iscsi_iser_task *iser_task = task->dd_data;
|
||||
|
@ -550,7 +537,7 @@ int iser_send_control(struct iscsi_conn *conn,
|
|||
goto send_control_error;
|
||||
}
|
||||
|
||||
err = iser_post_send(&iser_conn->ib_conn, mdesc, true);
|
||||
err = iser_post_send(&iser_conn->ib_conn, mdesc);
|
||||
if (!err)
|
||||
return 0;
|
||||
|
||||
|
@ -590,11 +577,14 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
|
|||
desc->rsp_dma, ISER_RX_LOGIN_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
ib_conn->post_recv_buf_count--;
|
||||
if (iser_conn->iscsi_conn->session->discovery_sess)
|
||||
return;
|
||||
|
||||
/* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
|
||||
iser_post_recvm(iser_conn, iser_conn->rx_descs);
|
||||
}
|
||||
|
||||
static inline int
|
||||
iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
|
||||
static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
|
||||
{
|
||||
if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) ||
|
||||
(desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) {
|
||||
|
@ -607,10 +597,8 @@ iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
iser_check_remote_inv(struct iser_conn *iser_conn,
|
||||
struct ib_wc *wc,
|
||||
struct iscsi_hdr *hdr)
|
||||
static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc,
|
||||
struct iscsi_hdr *hdr)
|
||||
{
|
||||
if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
|
||||
struct iscsi_task *task;
|
||||
|
@ -657,8 +645,7 @@ void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
|
|||
struct iser_conn *iser_conn = to_iser_conn(ib_conn);
|
||||
struct iser_rx_desc *desc = iser_rx(wc->wr_cqe);
|
||||
struct iscsi_hdr *hdr;
|
||||
int length;
|
||||
int outstanding, count, err;
|
||||
int length, err;
|
||||
|
||||
if (unlikely(wc->status != IB_WC_SUCCESS)) {
|
||||
iser_err_comp(wc, "task_rsp");
|
||||
|
@ -687,20 +674,9 @@ void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
|
|||
desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
/* decrementing conn->post_recv_buf_count only --after-- freeing the *
|
||||
* task eliminates the need to worry on tasks which are completed in *
|
||||
* parallel to the execution of iser_conn_term. So the code that waits *
|
||||
* for the posted rx bufs refcount to become zero handles everything */
|
||||
ib_conn->post_recv_buf_count--;
|
||||
|
||||
outstanding = ib_conn->post_recv_buf_count;
|
||||
if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) {
|
||||
count = min(iser_conn->qp_max_recv_dtos - outstanding,
|
||||
iser_conn->min_posted_rx);
|
||||
err = iser_post_recvm(iser_conn, count);
|
||||
if (err)
|
||||
iser_err("posting %d rx bufs err %d\n", count, err);
|
||||
}
|
||||
err = iser_post_recvm(iser_conn, desc);
|
||||
if (err)
|
||||
iser_err("posting rx buffer err %d\n", err);
|
||||
}
|
||||
|
||||
void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc)
|
||||
|
|
|
@ -44,8 +44,7 @@ void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
|
|||
iser_err_comp(wc, "memreg");
|
||||
}
|
||||
|
||||
static struct iser_fr_desc *
|
||||
iser_reg_desc_get_fr(struct ib_conn *ib_conn)
|
||||
static struct iser_fr_desc *iser_reg_desc_get_fr(struct ib_conn *ib_conn)
|
||||
{
|
||||
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
|
||||
struct iser_fr_desc *desc;
|
||||
|
@ -60,9 +59,8 @@ iser_reg_desc_get_fr(struct ib_conn *ib_conn)
|
|||
return desc;
|
||||
}
|
||||
|
||||
static void
|
||||
iser_reg_desc_put_fr(struct ib_conn *ib_conn,
|
||||
struct iser_fr_desc *desc)
|
||||
static void iser_reg_desc_put_fr(struct ib_conn *ib_conn,
|
||||
struct iser_fr_desc *desc)
|
||||
{
|
||||
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
|
||||
unsigned long flags;
|
||||
|
@ -73,9 +71,9 @@ iser_reg_desc_put_fr(struct ib_conn *ib_conn,
|
|||
}
|
||||
|
||||
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
|
||||
struct iser_data_buf *data,
|
||||
enum iser_data_dir iser_dir,
|
||||
enum dma_data_direction dma_dir)
|
||||
struct iser_data_buf *data,
|
||||
enum iser_data_dir iser_dir,
|
||||
enum dma_data_direction dma_dir)
|
||||
{
|
||||
struct ib_device *dev;
|
||||
|
||||
|
@ -100,9 +98,8 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
|
|||
ib_dma_unmap_sg(dev, data->sg, data->size, dir);
|
||||
}
|
||||
|
||||
static int
|
||||
iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
|
||||
struct iser_mem_reg *reg)
|
||||
static int iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
|
||||
struct iser_mem_reg *reg)
|
||||
{
|
||||
struct scatterlist *sg = mem->sg;
|
||||
|
||||
|
@ -154,8 +151,8 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
|
|||
reg->mem_h = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
|
||||
static void iser_set_dif_domain(struct scsi_cmnd *sc,
|
||||
struct ib_sig_domain *domain)
|
||||
{
|
||||
domain->sig_type = IB_SIG_TYPE_T10_DIF;
|
||||
domain->sig.dif.pi_interval = scsi_prot_interval(sc);
|
||||
|
@ -171,8 +168,8 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
|
|||
domain->sig.dif.ref_remap = true;
|
||||
}
|
||||
|
||||
static int
|
||||
iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
|
||||
static int iser_set_sig_attrs(struct scsi_cmnd *sc,
|
||||
struct ib_sig_attrs *sig_attrs)
|
||||
{
|
||||
switch (scsi_get_prot_op(sc)) {
|
||||
case SCSI_PROT_WRITE_INSERT:
|
||||
|
@ -205,8 +202,7 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
|
||||
static inline void iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
|
||||
{
|
||||
*mask = 0;
|
||||
if (sc->prot_flags & SCSI_PROT_REF_CHECK)
|
||||
|
@ -215,11 +211,8 @@ iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
|
|||
*mask |= IB_SIG_CHECK_GUARD;
|
||||
}
|
||||
|
||||
static inline void
|
||||
iser_inv_rkey(struct ib_send_wr *inv_wr,
|
||||
struct ib_mr *mr,
|
||||
struct ib_cqe *cqe,
|
||||
struct ib_send_wr *next_wr)
|
||||
static inline void iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr,
|
||||
struct ib_cqe *cqe, struct ib_send_wr *next_wr)
|
||||
{
|
||||
inv_wr->opcode = IB_WR_LOCAL_INV;
|
||||
inv_wr->wr_cqe = cqe;
|
||||
|
@ -229,12 +222,11 @@ iser_inv_rkey(struct ib_send_wr *inv_wr,
|
|||
inv_wr->next = next_wr;
|
||||
}
|
||||
|
||||
static int
|
||||
iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
|
||||
struct iser_data_buf *mem,
|
||||
struct iser_data_buf *sig_mem,
|
||||
struct iser_reg_resources *rsc,
|
||||
struct iser_mem_reg *sig_reg)
|
||||
static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
|
||||
struct iser_data_buf *mem,
|
||||
struct iser_data_buf *sig_mem,
|
||||
struct iser_reg_resources *rsc,
|
||||
struct iser_mem_reg *sig_reg)
|
||||
{
|
||||
struct iser_tx_desc *tx_desc = &iser_task->desc;
|
||||
struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
|
||||
|
@ -335,12 +327,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
iser_reg_data_sg(struct iscsi_iser_task *task,
|
||||
struct iser_data_buf *mem,
|
||||
struct iser_fr_desc *desc,
|
||||
bool use_dma_key,
|
||||
struct iser_mem_reg *reg)
|
||||
static int iser_reg_data_sg(struct iscsi_iser_task *task,
|
||||
struct iser_data_buf *mem,
|
||||
struct iser_fr_desc *desc, bool use_dma_key,
|
||||
struct iser_mem_reg *reg)
|
||||
{
|
||||
struct iser_device *device = task->iser_conn->ib_conn.device;
|
||||
|
||||
|
|
|
@ -265,14 +265,14 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
|
|||
memset(&init_attr, 0, sizeof(init_attr));
|
||||
|
||||
init_attr.event_handler = iser_qp_event_callback;
|
||||
init_attr.qp_context = (void *)ib_conn;
|
||||
init_attr.send_cq = ib_conn->cq;
|
||||
init_attr.recv_cq = ib_conn->cq;
|
||||
init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
|
||||
init_attr.qp_context = (void *)ib_conn;
|
||||
init_attr.send_cq = ib_conn->cq;
|
||||
init_attr.recv_cq = ib_conn->cq;
|
||||
init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
|
||||
init_attr.cap.max_send_sge = 2;
|
||||
init_attr.cap.max_recv_sge = 1;
|
||||
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
|
||||
init_attr.qp_type = IB_QPT_RC;
|
||||
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
|
||||
init_attr.qp_type = IB_QPT_RC;
|
||||
init_attr.cap.max_send_wr = max_send_wr;
|
||||
if (ib_conn->pi_support)
|
||||
init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
|
||||
|
@ -283,9 +283,8 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
|
|||
goto out_err;
|
||||
|
||||
ib_conn->qp = ib_conn->cma_id->qp;
|
||||
iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n",
|
||||
ib_conn, ib_conn->cma_id,
|
||||
ib_conn->cma_id->qp, max_send_wr);
|
||||
iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", ib_conn,
|
||||
ib_conn->cma_id, ib_conn->cma_id->qp, max_send_wr);
|
||||
return ret;
|
||||
|
||||
out_err:
|
||||
|
@ -313,7 +312,7 @@ struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
|
|||
goto inc_refcnt;
|
||||
|
||||
device = kzalloc(sizeof *device, GFP_KERNEL);
|
||||
if (device == NULL)
|
||||
if (!device)
|
||||
goto out;
|
||||
|
||||
/* assign this device to the device */
|
||||
|
@ -392,8 +391,7 @@ void iser_release_work(struct work_struct *work)
|
|||
* so the cm_id removal is out of here. It is Safe to
|
||||
* be invoked multiple times.
|
||||
*/
|
||||
static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
|
||||
bool destroy)
|
||||
static void iser_free_ib_conn_res(struct iser_conn *iser_conn, bool destroy)
|
||||
{
|
||||
struct ib_conn *ib_conn = &iser_conn->ib_conn;
|
||||
struct iser_device *device = ib_conn->device;
|
||||
|
@ -401,7 +399,7 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
|
|||
iser_info("freeing conn %p cma_id %p qp %p\n",
|
||||
iser_conn, ib_conn->cma_id, ib_conn->qp);
|
||||
|
||||
if (ib_conn->qp != NULL) {
|
||||
if (ib_conn->qp) {
|
||||
rdma_destroy_qp(ib_conn->cma_id);
|
||||
ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
|
||||
ib_conn->qp = NULL;
|
||||
|
@ -411,7 +409,7 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
|
|||
if (iser_conn->rx_descs)
|
||||
iser_free_rx_descriptors(iser_conn);
|
||||
|
||||
if (device != NULL) {
|
||||
if (device) {
|
||||
iser_device_try_release(device);
|
||||
ib_conn->device = NULL;
|
||||
}
|
||||
|
@ -445,7 +443,7 @@ void iser_conn_release(struct iser_conn *iser_conn)
|
|||
iser_free_ib_conn_res(iser_conn, true);
|
||||
mutex_unlock(&iser_conn->state_mutex);
|
||||
|
||||
if (ib_conn->cma_id != NULL) {
|
||||
if (ib_conn->cma_id) {
|
||||
rdma_destroy_id(ib_conn->cma_id);
|
||||
ib_conn->cma_id = NULL;
|
||||
}
|
||||
|
@ -501,13 +499,12 @@ static void iser_connect_error(struct rdma_cm_id *cma_id)
|
|||
{
|
||||
struct iser_conn *iser_conn;
|
||||
|
||||
iser_conn = (struct iser_conn *)cma_id->context;
|
||||
iser_conn = cma_id->context;
|
||||
iser_conn->state = ISER_CONN_TERMINATING;
|
||||
}
|
||||
|
||||
static void
|
||||
iser_calc_scsi_params(struct iser_conn *iser_conn,
|
||||
unsigned int max_sectors)
|
||||
static void iser_calc_scsi_params(struct iser_conn *iser_conn,
|
||||
unsigned int max_sectors)
|
||||
{
|
||||
struct iser_device *device = iser_conn->ib_conn.device;
|
||||
struct ib_device_attr *attr = &device->ib_device->attrs;
|
||||
|
@ -545,11 +542,11 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
|
|||
static void iser_addr_handler(struct rdma_cm_id *cma_id)
|
||||
{
|
||||
struct iser_device *device;
|
||||
struct iser_conn *iser_conn;
|
||||
struct ib_conn *ib_conn;
|
||||
struct iser_conn *iser_conn;
|
||||
struct ib_conn *ib_conn;
|
||||
int ret;
|
||||
|
||||
iser_conn = (struct iser_conn *)cma_id->context;
|
||||
iser_conn = cma_id->context;
|
||||
if (iser_conn->state != ISER_CONN_PENDING)
|
||||
/* bailout */
|
||||
return;
|
||||
|
@ -593,9 +590,9 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
|
|||
static void iser_route_handler(struct rdma_cm_id *cma_id)
|
||||
{
|
||||
struct rdma_conn_param conn_param;
|
||||
int ret;
|
||||
int ret;
|
||||
struct iser_cm_hdr req_hdr;
|
||||
struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
|
||||
struct iser_conn *iser_conn = cma_id->context;
|
||||
struct ib_conn *ib_conn = &iser_conn->ib_conn;
|
||||
struct ib_device *ib_dev = ib_conn->device->ib_device;
|
||||
|
||||
|
@ -609,9 +606,9 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
|
|||
|
||||
memset(&conn_param, 0, sizeof conn_param);
|
||||
conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom;
|
||||
conn_param.initiator_depth = 1;
|
||||
conn_param.retry_count = 7;
|
||||
conn_param.rnr_retry_count = 6;
|
||||
conn_param.initiator_depth = 1;
|
||||
conn_param.retry_count = 7;
|
||||
conn_param.rnr_retry_count = 6;
|
||||
|
||||
memset(&req_hdr, 0, sizeof(req_hdr));
|
||||
req_hdr.flags = ISER_ZBVA_NOT_SUP;
|
||||
|
@ -638,7 +635,7 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id,
|
|||
struct ib_qp_attr attr;
|
||||
struct ib_qp_init_attr init_attr;
|
||||
|
||||
iser_conn = (struct iser_conn *)cma_id->context;
|
||||
iser_conn = cma_id->context;
|
||||
if (iser_conn->state != ISER_CONN_PENDING)
|
||||
/* bailout */
|
||||
return;
|
||||
|
@ -661,7 +658,7 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id,
|
|||
|
||||
static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
|
||||
{
|
||||
struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
|
||||
struct iser_conn *iser_conn = cma_id->context;
|
||||
|
||||
if (iser_conn_terminate(iser_conn)) {
|
||||
if (iser_conn->iscsi_conn)
|
||||
|
@ -675,7 +672,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
|
|||
static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
|
||||
bool destroy)
|
||||
{
|
||||
struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
|
||||
struct iser_conn *iser_conn = cma_id->context;
|
||||
|
||||
/*
|
||||
* We are not guaranteed that we visited disconnected_handler
|
||||
|
@ -687,12 +684,13 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
|
|||
complete(&iser_conn->ib_completion);
|
||||
}
|
||||
|
||||
static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
||||
static int iser_cma_handler(struct rdma_cm_id *cma_id,
|
||||
struct rdma_cm_event *event)
|
||||
{
|
||||
struct iser_conn *iser_conn;
|
||||
int ret = 0;
|
||||
|
||||
iser_conn = (struct iser_conn *)cma_id->context;
|
||||
iser_conn = cma_id->context;
|
||||
iser_info("%s (%d): status %d conn %p id %p\n",
|
||||
rdma_event_msg(event->event), event->event,
|
||||
event->status, cma_id->context, cma_id);
|
||||
|
@ -757,7 +755,6 @@ void iser_conn_init(struct iser_conn *iser_conn)
|
|||
INIT_LIST_HEAD(&iser_conn->conn_list);
|
||||
mutex_init(&iser_conn->state_mutex);
|
||||
|
||||
ib_conn->post_recv_buf_count = 0;
|
||||
ib_conn->reg_cqe.done = iser_reg_comp;
|
||||
}
|
||||
|
||||
|
@ -765,10 +762,8 @@ void iser_conn_init(struct iser_conn *iser_conn)
|
|||
* starts the process of connecting to the target
|
||||
* sleeps until the connection is established or rejected
|
||||
*/
|
||||
int iser_connect(struct iser_conn *iser_conn,
|
||||
struct sockaddr *src_addr,
|
||||
struct sockaddr *dst_addr,
|
||||
int non_blocking)
|
||||
int iser_connect(struct iser_conn *iser_conn, struct sockaddr *src_addr,
|
||||
struct sockaddr *dst_addr, int non_blocking)
|
||||
{
|
||||
struct ib_conn *ib_conn = &iser_conn->ib_conn;
|
||||
int err = 0;
|
||||
|
@ -785,8 +780,7 @@ int iser_connect(struct iser_conn *iser_conn,
|
|||
iser_conn->state = ISER_CONN_PENDING;
|
||||
|
||||
ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler,
|
||||
(void *)iser_conn,
|
||||
RDMA_PS_TCP, IB_QPT_RC);
|
||||
iser_conn, RDMA_PS_TCP, IB_QPT_RC);
|
||||
if (IS_ERR(ib_conn->cma_id)) {
|
||||
err = PTR_ERR(ib_conn->cma_id);
|
||||
iser_err("rdma_create_id failed: %d\n", err);
|
||||
|
@ -829,7 +823,7 @@ int iser_post_recvl(struct iser_conn *iser_conn)
|
|||
struct ib_conn *ib_conn = &iser_conn->ib_conn;
|
||||
struct iser_login_desc *desc = &iser_conn->login_desc;
|
||||
struct ib_recv_wr wr;
|
||||
int ib_ret;
|
||||
int ret;
|
||||
|
||||
desc->sge.addr = desc->rsp_dma;
|
||||
desc->sge.length = ISER_RX_LOGIN_SIZE;
|
||||
|
@ -841,46 +835,30 @@ int iser_post_recvl(struct iser_conn *iser_conn)
|
|||
wr.num_sge = 1;
|
||||
wr.next = NULL;
|
||||
|
||||
ib_conn->post_recv_buf_count++;
|
||||
ib_ret = ib_post_recv(ib_conn->qp, &wr, NULL);
|
||||
if (ib_ret) {
|
||||
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
|
||||
ib_conn->post_recv_buf_count--;
|
||||
}
|
||||
ret = ib_post_recv(ib_conn->qp, &wr, NULL);
|
||||
if (unlikely(ret))
|
||||
iser_err("ib_post_recv login failed ret=%d\n", ret);
|
||||
|
||||
return ib_ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iser_post_recvm(struct iser_conn *iser_conn, int count)
|
||||
int iser_post_recvm(struct iser_conn *iser_conn, struct iser_rx_desc *rx_desc)
|
||||
{
|
||||
struct ib_conn *ib_conn = &iser_conn->ib_conn;
|
||||
unsigned int my_rx_head = iser_conn->rx_desc_head;
|
||||
struct iser_rx_desc *rx_desc;
|
||||
struct ib_recv_wr *wr;
|
||||
int i, ib_ret;
|
||||
struct ib_recv_wr wr;
|
||||
int ret;
|
||||
|
||||
for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) {
|
||||
rx_desc = &iser_conn->rx_descs[my_rx_head];
|
||||
rx_desc->cqe.done = iser_task_rsp;
|
||||
wr->wr_cqe = &rx_desc->cqe;
|
||||
wr->sg_list = &rx_desc->rx_sg;
|
||||
wr->num_sge = 1;
|
||||
wr->next = wr + 1;
|
||||
my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
|
||||
}
|
||||
rx_desc->cqe.done = iser_task_rsp;
|
||||
wr.wr_cqe = &rx_desc->cqe;
|
||||
wr.sg_list = &rx_desc->rx_sg;
|
||||
wr.num_sge = 1;
|
||||
wr.next = NULL;
|
||||
|
||||
wr--;
|
||||
wr->next = NULL; /* mark end of work requests list */
|
||||
ret = ib_post_recv(ib_conn->qp, &wr, NULL);
|
||||
if (unlikely(ret))
|
||||
iser_err("ib_post_recv failed ret=%d\n", ret);
|
||||
|
||||
ib_conn->post_recv_buf_count += count;
|
||||
ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL);
|
||||
if (unlikely(ib_ret)) {
|
||||
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
|
||||
ib_conn->post_recv_buf_count -= count;
|
||||
} else
|
||||
iser_conn->rx_desc_head = my_rx_head;
|
||||
|
||||
return ib_ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
@ -888,16 +866,14 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
|
|||
* iser_post_send - Initiate a Send DTO operation
|
||||
* @ib_conn: connection RDMA resources
|
||||
* @tx_desc: iSER TX descriptor
|
||||
* @signal: true to send work request as SIGNALED
|
||||
*
|
||||
* Return: 0 on success, -1 on failure
|
||||
*/
|
||||
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
|
||||
bool signal)
|
||||
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc)
|
||||
{
|
||||
struct ib_send_wr *wr = &tx_desc->send_wr;
|
||||
struct ib_send_wr *first_wr;
|
||||
int ib_ret;
|
||||
int ret;
|
||||
|
||||
ib_dma_sync_single_for_device(ib_conn->device->ib_device,
|
||||
tx_desc->dma_addr, ISER_HEADERS_LEN,
|
||||
|
@ -908,7 +884,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
|
|||
wr->sg_list = tx_desc->tx_sg;
|
||||
wr->num_sge = tx_desc->num_sge;
|
||||
wr->opcode = IB_WR_SEND;
|
||||
wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
|
||||
wr->send_flags = IB_SEND_SIGNALED;
|
||||
|
||||
if (tx_desc->inv_wr.next)
|
||||
first_wr = &tx_desc->inv_wr;
|
||||
|
@ -917,12 +893,12 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
|
|||
else
|
||||
first_wr = wr;
|
||||
|
||||
ib_ret = ib_post_send(ib_conn->qp, first_wr, NULL);
|
||||
if (unlikely(ib_ret))
|
||||
ret = ib_post_send(ib_conn->qp, first_wr, NULL);
|
||||
if (unlikely(ret))
|
||||
iser_err("ib_post_send failed, ret:%d opcode:%d\n",
|
||||
ib_ret, wr->opcode);
|
||||
ret, wr->opcode);
|
||||
|
||||
return ib_ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
|
||||
void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
|
||||
{
|
||||
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
|
||||
struct rtrs_clt_stats *stats = sess->stats;
|
||||
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
|
||||
struct rtrs_clt_stats *stats = clt_path->stats;
|
||||
struct rtrs_clt_stats_pcpu *s;
|
||||
int cpu;
|
||||
|
||||
|
@ -180,8 +180,8 @@ static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
|
|||
void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
|
||||
{
|
||||
struct rtrs_clt_con *con = req->con;
|
||||
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
|
||||
struct rtrs_clt_stats *stats = sess->stats;
|
||||
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
|
||||
struct rtrs_clt_stats *stats = clt_path->stats;
|
||||
unsigned int len;
|
||||
|
||||
len = req->usr_len + req->data_len;
|
||||
|
|
|
@ -16,21 +16,21 @@
|
|||
#define MIN_MAX_RECONN_ATT -1
|
||||
#define MAX_MAX_RECONN_ATT 9999
|
||||
|
||||
static void rtrs_clt_sess_release(struct kobject *kobj)
|
||||
static void rtrs_clt_path_release(struct kobject *kobj)
|
||||
{
|
||||
struct rtrs_clt_sess *sess;
|
||||
struct rtrs_clt_path *clt_path;
|
||||
|
||||
sess = container_of(kobj, struct rtrs_clt_sess, kobj);
|
||||
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
|
||||
|
||||
free_sess(sess);
|
||||
free_path(clt_path);
|
||||
}
|
||||
|
||||
static struct kobj_type ktype_sess = {
|
||||
.sysfs_ops = &kobj_sysfs_ops,
|
||||
.release = rtrs_clt_sess_release
|
||||
.release = rtrs_clt_path_release
|
||||
};
|
||||
|
||||
static void rtrs_clt_sess_stats_release(struct kobject *kobj)
|
||||
static void rtrs_clt_path_stats_release(struct kobject *kobj)
|
||||
{
|
||||
struct rtrs_clt_stats *stats;
|
||||
|
||||
|
@ -43,14 +43,15 @@ static void rtrs_clt_sess_stats_release(struct kobject *kobj)
|
|||
|
||||
static struct kobj_type ktype_stats = {
|
||||
.sysfs_ops = &kobj_sysfs_ops,
|
||||
.release = rtrs_clt_sess_stats_release,
|
||||
.release = rtrs_clt_path_stats_release,
|
||||
};
|
||||
|
||||
static ssize_t max_reconnect_attempts_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
|
||||
struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
|
||||
dev);
|
||||
|
||||
return sysfs_emit(page, "%d\n",
|
||||
rtrs_clt_get_max_reconnect_attempts(clt));
|
||||
|
@ -63,7 +64,8 @@ static ssize_t max_reconnect_attempts_store(struct device *dev,
|
|||
{
|
||||
int value;
|
||||
int ret;
|
||||
struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
|
||||
struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
|
||||
dev);
|
||||
|
||||
ret = kstrtoint(buf, 10, &value);
|
||||
if (ret) {
|
||||
|
@ -90,9 +92,9 @@ static ssize_t mpath_policy_show(struct device *dev,
|
|||
struct device_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct rtrs_clt *clt;
|
||||
struct rtrs_clt_sess *clt;
|
||||
|
||||
clt = container_of(dev, struct rtrs_clt, dev);
|
||||
clt = container_of(dev, struct rtrs_clt_sess, dev);
|
||||
|
||||
switch (clt->mp_policy) {
|
||||
case MP_POLICY_RR:
|
||||
|
@ -114,12 +116,12 @@ static ssize_t mpath_policy_store(struct device *dev,
|
|||
const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
struct rtrs_clt *clt;
|
||||
struct rtrs_clt_sess *clt;
|
||||
int value;
|
||||
int ret;
|
||||
size_t len = 0;
|
||||
|
||||
clt = container_of(dev, struct rtrs_clt, dev);
|
||||
clt = container_of(dev, struct rtrs_clt_sess, dev);
|
||||
|
||||
ret = kstrtoint(buf, 10, &value);
|
||||
if (!ret && (value == MP_POLICY_RR ||
|
||||
|
@ -169,12 +171,12 @@ static ssize_t add_path_store(struct device *dev,
|
|||
.src = &srcaddr,
|
||||
.dst = &dstaddr
|
||||
};
|
||||
struct rtrs_clt *clt;
|
||||
struct rtrs_clt_sess *clt;
|
||||
const char *nl;
|
||||
size_t len;
|
||||
int err;
|
||||
|
||||
clt = container_of(dev, struct rtrs_clt, dev);
|
||||
clt = container_of(dev, struct rtrs_clt_sess, dev);
|
||||
|
||||
nl = strchr(buf, '\n');
|
||||
if (nl)
|
||||
|
@ -197,10 +199,10 @@ static DEVICE_ATTR_RW(add_path);
|
|||
static ssize_t rtrs_clt_state_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *page)
|
||||
{
|
||||
struct rtrs_clt_sess *sess;
|
||||
struct rtrs_clt_path *clt_path;
|
||||
|
||||
sess = container_of(kobj, struct rtrs_clt_sess, kobj);
|
||||
if (sess->state == RTRS_CLT_CONNECTED)
|
||||
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
|
||||
if (clt_path->state == RTRS_CLT_CONNECTED)
|
||||
return sysfs_emit(page, "connected\n");
|
||||
|
||||
return sysfs_emit(page, "disconnected\n");
|
||||
|
@ -219,16 +221,16 @@ static ssize_t rtrs_clt_reconnect_store(struct kobject *kobj,
|
|||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct rtrs_clt_sess *sess;
|
||||
struct rtrs_clt_path *clt_path;
|
||||
int ret;
|
||||
|
||||
sess = container_of(kobj, struct rtrs_clt_sess, kobj);
|
||||
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
|
||||
if (!sysfs_streq(buf, "1")) {
|
||||
rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
|
||||
rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
|
||||
attr->attr.name, buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = rtrs_clt_reconnect_from_sysfs(sess);
|
||||
ret = rtrs_clt_reconnect_from_sysfs(clt_path);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -249,15 +251,15 @@ static ssize_t rtrs_clt_disconnect_store(struct kobject *kobj,
|
|||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct rtrs_clt_sess *sess;
|
||||
struct rtrs_clt_path *clt_path;
|
||||
|
||||
sess = container_of(kobj, struct rtrs_clt_sess, kobj);
|
||||
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
|
||||
if (!sysfs_streq(buf, "1")) {
|
||||
rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
|
||||
rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
|
||||
attr->attr.name, buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
rtrs_clt_close_conns(sess, true);
|
||||
rtrs_clt_close_conns(clt_path, true);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -276,16 +278,16 @@ static ssize_t rtrs_clt_remove_path_store(struct kobject *kobj,
|
|||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct rtrs_clt_sess *sess;
|
||||
struct rtrs_clt_path *clt_path;
|
||||
int ret;
|
||||
|
||||
sess = container_of(kobj, struct rtrs_clt_sess, kobj);
|
||||
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
|
||||
if (!sysfs_streq(buf, "1")) {
|
||||
rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
|
||||
rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
|
||||
attr->attr.name, buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = rtrs_clt_remove_path_from_sysfs(sess, &attr->attr);
|
||||
ret = rtrs_clt_remove_path_from_sysfs(clt_path, &attr->attr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -333,11 +335,11 @@ static ssize_t rtrs_clt_hca_port_show(struct kobject *kobj,
|
|||
struct kobj_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct rtrs_clt_sess *sess;
|
||||
struct rtrs_clt_path *clt_path;
|
||||
|
||||
sess = container_of(kobj, typeof(*sess), kobj);
|
||||
clt_path = container_of(kobj, typeof(*clt_path), kobj);
|
||||
|
||||
return sysfs_emit(page, "%u\n", sess->hca_port);
|
||||
return sysfs_emit(page, "%u\n", clt_path->hca_port);
|
||||
}
|
||||
|
||||
static struct kobj_attribute rtrs_clt_hca_port_attr =
|
||||
|
@ -347,11 +349,11 @@ static ssize_t rtrs_clt_hca_name_show(struct kobject *kobj,
|
|||
struct kobj_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct rtrs_clt_sess *sess;
|
||||
struct rtrs_clt_path *clt_path;
|
||||
|
||||
sess = container_of(kobj, struct rtrs_clt_sess, kobj);
|
||||
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
|
||||
|
||||
return sysfs_emit(page, "%s\n", sess->hca_name);
|
||||
return sysfs_emit(page, "%s\n", clt_path->hca_name);
|
||||
}
|
||||
|
||||
static struct kobj_attribute rtrs_clt_hca_name_attr =
|
||||
|
@ -361,12 +363,12 @@ static ssize_t rtrs_clt_cur_latency_show(struct kobject *kobj,
|
|||
struct kobj_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct rtrs_clt_sess *sess;
|
||||
struct rtrs_clt_path *clt_path;
|
||||
|
||||
sess = container_of(kobj, struct rtrs_clt_sess, kobj);
|
||||
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
|
||||
|
||||
return sysfs_emit(page, "%lld ns\n",
|
||||
ktime_to_ns(sess->s.hb_cur_latency));
|
||||
ktime_to_ns(clt_path->s.hb_cur_latency));
|
||||
}
|
||||
|
||||
static struct kobj_attribute rtrs_clt_cur_latency_attr =
|
||||
|
@ -376,11 +378,11 @@ static ssize_t rtrs_clt_src_addr_show(struct kobject *kobj,
|
|||
struct kobj_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct rtrs_clt_sess *sess;
|
||||
struct rtrs_clt_path *clt_path;
|
||||
int len;
|
||||
|
||||
sess = container_of(kobj, struct rtrs_clt_sess, kobj);
|
||||
len = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr, page,
|
||||
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
|
||||
len = sockaddr_to_str((struct sockaddr *)&clt_path->s.src_addr, page,
|
||||
PAGE_SIZE);
|
||||
len += sysfs_emit_at(page, len, "\n");
|
||||
return len;
|
||||
|
@ -393,11 +395,11 @@ static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj,
|
|||
struct kobj_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct rtrs_clt_sess *sess;
|
||||
struct rtrs_clt_path *clt_path;
|
||||
int len;
|
||||
|
||||
sess = container_of(kobj, struct rtrs_clt_sess, kobj);
|
||||
len = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, page,
|
||||
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
|
||||
len = sockaddr_to_str((struct sockaddr *)&clt_path->s.dst_addr, page,
|
||||
PAGE_SIZE);
|
||||
len += sysfs_emit_at(page, len, "\n");
|
||||
return len;
|
||||
|
@ -406,7 +408,7 @@ static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj,
|
|||
static struct kobj_attribute rtrs_clt_dst_addr_attr =
|
||||
__ATTR(dst_addr, 0444, rtrs_clt_dst_addr_show, NULL);
|
||||
|
||||
static struct attribute *rtrs_clt_sess_attrs[] = {
|
||||
static struct attribute *rtrs_clt_path_attrs[] = {
|
||||
&rtrs_clt_hca_name_attr.attr,
|
||||
&rtrs_clt_hca_port_attr.attr,
|
||||
&rtrs_clt_src_addr_attr.attr,
|
||||
|
@ -419,42 +421,43 @@ static struct attribute *rtrs_clt_sess_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group rtrs_clt_sess_attr_group = {
|
||||
.attrs = rtrs_clt_sess_attrs,
|
||||
static const struct attribute_group rtrs_clt_path_attr_group = {
|
||||
.attrs = rtrs_clt_path_attrs,
|
||||
};
|
||||
|
||||
int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
|
||||
int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path)
|
||||
{
|
||||
struct rtrs_clt *clt = sess->clt;
|
||||
struct rtrs_clt_sess *clt = clt_path->clt;
|
||||
char str[NAME_MAX];
|
||||
int err;
|
||||
struct rtrs_addr path = {
|
||||
.src = &sess->s.src_addr,
|
||||
.dst = &sess->s.dst_addr,
|
||||
.src = &clt_path->s.src_addr,
|
||||
.dst = &clt_path->s.dst_addr,
|
||||
};
|
||||
|
||||
rtrs_addr_to_str(&path, str, sizeof(str));
|
||||
err = kobject_init_and_add(&sess->kobj, &ktype_sess, clt->kobj_paths,
|
||||
err = kobject_init_and_add(&clt_path->kobj, &ktype_sess,
|
||||
clt->kobj_paths,
|
||||
"%s", str);
|
||||
if (err) {
|
||||
pr_err("kobject_init_and_add: %d\n", err);
|
||||
kobject_put(&sess->kobj);
|
||||
kobject_put(&clt_path->kobj);
|
||||
return err;
|
||||
}
|
||||
err = sysfs_create_group(&sess->kobj, &rtrs_clt_sess_attr_group);
|
||||
err = sysfs_create_group(&clt_path->kobj, &rtrs_clt_path_attr_group);
|
||||
if (err) {
|
||||
pr_err("sysfs_create_group(): %d\n", err);
|
||||
goto put_kobj;
|
||||
}
|
||||
err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats,
|
||||
&sess->kobj, "stats");
|
||||
err = kobject_init_and_add(&clt_path->stats->kobj_stats, &ktype_stats,
|
||||
&clt_path->kobj, "stats");
|
||||
if (err) {
|
||||
pr_err("kobject_init_and_add: %d\n", err);
|
||||
kobject_put(&sess->stats->kobj_stats);
|
||||
kobject_put(&clt_path->stats->kobj_stats);
|
||||
goto remove_group;
|
||||
}
|
||||
|
||||
err = sysfs_create_group(&sess->stats->kobj_stats,
|
||||
err = sysfs_create_group(&clt_path->stats->kobj_stats,
|
||||
&rtrs_clt_stats_attr_group);
|
||||
if (err) {
|
||||
pr_err("failed to create stats sysfs group, err: %d\n", err);
|
||||
|
@ -464,25 +467,25 @@ int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
|
|||
return 0;
|
||||
|
||||
put_kobj_stats:
|
||||
kobject_del(&sess->stats->kobj_stats);
|
||||
kobject_put(&sess->stats->kobj_stats);
|
||||
kobject_del(&clt_path->stats->kobj_stats);
|
||||
kobject_put(&clt_path->stats->kobj_stats);
|
||||
remove_group:
|
||||
sysfs_remove_group(&sess->kobj, &rtrs_clt_sess_attr_group);
|
||||
sysfs_remove_group(&clt_path->kobj, &rtrs_clt_path_attr_group);
|
||||
put_kobj:
|
||||
kobject_del(&sess->kobj);
|
||||
kobject_put(&sess->kobj);
|
||||
kobject_del(&clt_path->kobj);
|
||||
kobject_put(&clt_path->kobj);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
|
||||
void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path,
|
||||
const struct attribute *sysfs_self)
|
||||
{
|
||||
kobject_del(&sess->stats->kobj_stats);
|
||||
kobject_put(&sess->stats->kobj_stats);
|
||||
kobject_del(&clt_path->stats->kobj_stats);
|
||||
kobject_put(&clt_path->stats->kobj_stats);
|
||||
if (sysfs_self)
|
||||
sysfs_remove_file_self(&sess->kobj, sysfs_self);
|
||||
kobject_del(&sess->kobj);
|
||||
sysfs_remove_file_self(&clt_path->kobj, sysfs_self);
|
||||
kobject_del(&clt_path->kobj);
|
||||
}
|
||||
|
||||
static struct attribute *rtrs_clt_attrs[] = {
|
||||
|
@ -496,12 +499,12 @@ static const struct attribute_group rtrs_clt_attr_group = {
|
|||
.attrs = rtrs_clt_attrs,
|
||||
};
|
||||
|
||||
int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt)
|
||||
int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt)
|
||||
{
|
||||
return sysfs_create_group(&clt->dev.kobj, &rtrs_clt_attr_group);
|
||||
}
|
||||
|
||||
void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt)
|
||||
void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt)
|
||||
{
|
||||
sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group);
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -124,9 +124,9 @@ struct rtrs_rbuf {
|
|||
u32 rkey;
|
||||
};
|
||||
|
||||
struct rtrs_clt_sess {
|
||||
struct rtrs_sess s;
|
||||
struct rtrs_clt *clt;
|
||||
struct rtrs_clt_path {
|
||||
struct rtrs_path s;
|
||||
struct rtrs_clt_sess *clt;
|
||||
wait_queue_head_t state_wq;
|
||||
enum rtrs_clt_state state;
|
||||
atomic_t connected_cnt;
|
||||
|
@ -153,10 +153,10 @@ struct rtrs_clt_sess {
|
|||
*mp_skip_entry;
|
||||
};
|
||||
|
||||
struct rtrs_clt {
|
||||
struct rtrs_clt_sess {
|
||||
struct list_head paths_list; /* rcu protected list */
|
||||
size_t paths_num;
|
||||
struct rtrs_clt_sess
|
||||
struct rtrs_clt_path
|
||||
__rcu * __percpu *pcpu_path;
|
||||
uuid_t paths_uuid;
|
||||
int paths_up;
|
||||
|
@ -186,31 +186,32 @@ static inline struct rtrs_clt_con *to_clt_con(struct rtrs_con *c)
|
|||
return container_of(c, struct rtrs_clt_con, c);
|
||||
}
|
||||
|
||||
static inline struct rtrs_clt_sess *to_clt_sess(struct rtrs_sess *s)
|
||||
static inline struct rtrs_clt_path *to_clt_path(struct rtrs_path *s)
|
||||
{
|
||||
return container_of(s, struct rtrs_clt_sess, s);
|
||||
return container_of(s, struct rtrs_clt_path, s);
|
||||
}
|
||||
|
||||
static inline int permit_size(struct rtrs_clt *clt)
|
||||
static inline int permit_size(struct rtrs_clt_sess *clt)
|
||||
{
|
||||
return sizeof(struct rtrs_permit) + clt->pdu_sz;
|
||||
}
|
||||
|
||||
static inline struct rtrs_permit *get_permit(struct rtrs_clt *clt, int idx)
|
||||
static inline struct rtrs_permit *get_permit(struct rtrs_clt_sess *clt,
|
||||
int idx)
|
||||
{
|
||||
return (struct rtrs_permit *)(clt->permits + permit_size(clt) * idx);
|
||||
}
|
||||
|
||||
int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess);
|
||||
void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait);
|
||||
int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
|
||||
int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *path);
|
||||
void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait);
|
||||
int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt,
|
||||
struct rtrs_addr *addr);
|
||||
int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
|
||||
int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *path,
|
||||
const struct attribute *sysfs_self);
|
||||
|
||||
void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value);
|
||||
int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt);
|
||||
void free_sess(struct rtrs_clt_sess *sess);
|
||||
void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value);
|
||||
int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt);
|
||||
void free_path(struct rtrs_clt_path *clt_path);
|
||||
|
||||
/* rtrs-clt-stats.c */
|
||||
|
||||
|
@ -239,11 +240,11 @@ ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats,
|
|||
|
||||
/* rtrs-clt-sysfs.c */
|
||||
|
||||
int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt);
|
||||
void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt);
|
||||
int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt);
|
||||
void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt);
|
||||
|
||||
int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess);
|
||||
void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
|
||||
int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path);
|
||||
void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path,
|
||||
const struct attribute *sysfs_self);
|
||||
|
||||
#endif /* RTRS_CLT_H */
|
||||
|
|
|
@ -90,7 +90,7 @@ struct rtrs_ib_dev {
|
|||
};
|
||||
|
||||
struct rtrs_con {
|
||||
struct rtrs_sess *sess;
|
||||
struct rtrs_path *path;
|
||||
struct ib_qp *qp;
|
||||
struct ib_cq *cq;
|
||||
struct rdma_cm_id *cm_id;
|
||||
|
@ -100,7 +100,7 @@ struct rtrs_con {
|
|||
atomic_t sq_wr_avail;
|
||||
};
|
||||
|
||||
struct rtrs_sess {
|
||||
struct rtrs_path {
|
||||
struct list_head entry;
|
||||
struct sockaddr_storage dst_addr;
|
||||
struct sockaddr_storage src_addr;
|
||||
|
@ -229,11 +229,11 @@ struct rtrs_msg_conn_rsp {
|
|||
/**
|
||||
* struct rtrs_msg_info_req
|
||||
* @type: @RTRS_MSG_INFO_REQ
|
||||
* @sessname: Session name chosen by client
|
||||
* @pathname: Path name chosen by client
|
||||
*/
|
||||
struct rtrs_msg_info_req {
|
||||
__le16 type;
|
||||
u8 sessname[NAME_MAX];
|
||||
u8 pathname[NAME_MAX];
|
||||
u8 reserved[15];
|
||||
};
|
||||
|
||||
|
@ -313,19 +313,19 @@ int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
|
|||
|
||||
int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe);
|
||||
|
||||
int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
|
||||
int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con,
|
||||
u32 max_send_sge, int cq_vector, int nr_cqe,
|
||||
u32 max_send_wr, u32 max_recv_wr,
|
||||
enum ib_poll_context poll_ctx);
|
||||
void rtrs_cq_qp_destroy(struct rtrs_con *con);
|
||||
|
||||
void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
|
||||
void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe,
|
||||
unsigned int interval_ms, unsigned int missed_max,
|
||||
void (*err_handler)(struct rtrs_con *con),
|
||||
struct workqueue_struct *wq);
|
||||
void rtrs_start_hb(struct rtrs_sess *sess);
|
||||
void rtrs_stop_hb(struct rtrs_sess *sess);
|
||||
void rtrs_send_hb_ack(struct rtrs_sess *sess);
|
||||
void rtrs_start_hb(struct rtrs_path *path);
|
||||
void rtrs_stop_hb(struct rtrs_path *path);
|
||||
void rtrs_send_hb_ack(struct rtrs_path *path);
|
||||
|
||||
void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
|
||||
struct rtrs_rdma_dev_pd *pool);
|
||||
|
|
|
@ -15,10 +15,10 @@
|
|||
|
||||
static void rtrs_srv_release(struct kobject *kobj)
|
||||
{
|
||||
struct rtrs_srv_sess *sess;
|
||||
struct rtrs_srv_path *srv_path;
|
||||
|
||||
sess = container_of(kobj, struct rtrs_srv_sess, kobj);
|
||||
kfree(sess);
|
||||
srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
|
||||
kfree(srv_path);
|
||||
}
|
||||
|
||||
static struct kobj_type ktype = {
|
||||
|
@ -36,24 +36,25 @@ static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj,
|
|||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct rtrs_srv_sess *sess;
|
||||
struct rtrs_sess *s;
|
||||
struct rtrs_srv_path *srv_path;
|
||||
struct rtrs_path *s;
|
||||
char str[MAXHOSTNAMELEN];
|
||||
|
||||
sess = container_of(kobj, struct rtrs_srv_sess, kobj);
|
||||
s = &sess->s;
|
||||
srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
|
||||
s = &srv_path->s;
|
||||
if (!sysfs_streq(buf, "1")) {
|
||||
rtrs_err(s, "%s: invalid value: '%s'\n",
|
||||
attr->attr.name, buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, str, sizeof(str));
|
||||
sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr, str,
|
||||
sizeof(str));
|
||||
|
||||
rtrs_info(s, "disconnect for path %s requested\n", str);
|
||||
/* first remove sysfs itself to avoid deadlock */
|
||||
sysfs_remove_file_self(&sess->kobj, &attr->attr);
|
||||
close_sess(sess);
|
||||
sysfs_remove_file_self(&srv_path->kobj, &attr->attr);
|
||||
close_path(srv_path);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -66,11 +67,11 @@ static ssize_t rtrs_srv_hca_port_show(struct kobject *kobj,
|
|||
struct kobj_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct rtrs_srv_sess *sess;
|
||||
struct rtrs_srv_path *srv_path;
|
||||
struct rtrs_con *usr_con;
|
||||
|
||||
sess = container_of(kobj, typeof(*sess), kobj);
|
||||
usr_con = sess->s.con[0];
|
||||
srv_path = container_of(kobj, typeof(*srv_path), kobj);
|
||||
usr_con = srv_path->s.con[0];
|
||||
|
||||
return sysfs_emit(page, "%u\n", usr_con->cm_id->port_num);
|
||||
}
|
||||
|
@ -82,11 +83,11 @@ static ssize_t rtrs_srv_hca_name_show(struct kobject *kobj,
|
|||
struct kobj_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct rtrs_srv_sess *sess;
|
||||
struct rtrs_srv_path *srv_path;
|
||||
|
||||
sess = container_of(kobj, struct rtrs_srv_sess, kobj);
|
||||
srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
|
||||
|
||||
return sysfs_emit(page, "%s\n", sess->s.dev->ib_dev->name);
|
||||
return sysfs_emit(page, "%s\n", srv_path->s.dev->ib_dev->name);
|
||||
}
|
||||
|
||||
static struct kobj_attribute rtrs_srv_hca_name_attr =
|
||||
|
@ -96,11 +97,11 @@ static ssize_t rtrs_srv_src_addr_show(struct kobject *kobj,
|
|||
struct kobj_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct rtrs_srv_sess *sess;
|
||||
struct rtrs_srv_path *srv_path;
|
||||
int cnt;
|
||||
|
||||
sess = container_of(kobj, struct rtrs_srv_sess, kobj);
|
||||
cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
|
||||
srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
|
||||
cnt = sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr,
|
||||
page, PAGE_SIZE);
|
||||
return cnt + sysfs_emit_at(page, cnt, "\n");
|
||||
}
|
||||
|
@ -112,11 +113,11 @@ static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj,
|
|||
struct kobj_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct rtrs_srv_sess *sess;
|
||||
struct rtrs_srv_path *srv_path;
|
||||
int len;
|
||||
|
||||
sess = container_of(kobj, struct rtrs_srv_sess, kobj);
|
||||
len = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr, page,
|
||||
srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
|
||||
len = sockaddr_to_str((struct sockaddr *)&srv_path->s.src_addr, page,
|
||||
PAGE_SIZE);
|
||||
len += sysfs_emit_at(page, len, "\n");
|
||||
return len;
|
||||
|
@ -125,7 +126,7 @@ static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj,
|
|||
static struct kobj_attribute rtrs_srv_dst_addr_attr =
|
||||
__ATTR(dst_addr, 0444, rtrs_srv_dst_addr_show, NULL);
|
||||
|
||||
static struct attribute *rtrs_srv_sess_attrs[] = {
|
||||
static struct attribute *rtrs_srv_path_attrs[] = {
|
||||
&rtrs_srv_hca_name_attr.attr,
|
||||
&rtrs_srv_hca_port_attr.attr,
|
||||
&rtrs_srv_src_addr_attr.attr,
|
||||
|
@ -134,8 +135,8 @@ static struct attribute *rtrs_srv_sess_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group rtrs_srv_sess_attr_group = {
|
||||
.attrs = rtrs_srv_sess_attrs,
|
||||
static const struct attribute_group rtrs_srv_path_attr_group = {
|
||||
.attrs = rtrs_srv_path_attrs,
|
||||
};
|
||||
|
||||
STAT_ATTR(struct rtrs_srv_stats, rdma,
|
||||
|
@ -151,9 +152,9 @@ static const struct attribute_group rtrs_srv_stats_attr_group = {
|
|||
.attrs = rtrs_srv_stats_attrs,
|
||||
};
|
||||
|
||||
static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
|
||||
static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_path *srv_path)
|
||||
{
|
||||
struct rtrs_srv *srv = sess->srv;
|
||||
struct rtrs_srv_sess *srv = srv_path->srv;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&srv->paths_mutex);
|
||||
|
@ -164,7 +165,7 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
|
|||
goto unlock;
|
||||
}
|
||||
srv->dev.class = rtrs_dev_class;
|
||||
err = dev_set_name(&srv->dev, "%s", sess->s.sessname);
|
||||
err = dev_set_name(&srv->dev, "%s", srv_path->s.sessname);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
|
@ -196,9 +197,9 @@ unlock:
|
|||
}
|
||||
|
||||
static void
|
||||
rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
|
||||
rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_path *srv_path)
|
||||
{
|
||||
struct rtrs_srv *srv = sess->srv;
|
||||
struct rtrs_srv_sess *srv = srv_path->srv;
|
||||
|
||||
mutex_lock(&srv->paths_mutex);
|
||||
if (!--srv->dev_ref) {
|
||||
|
@ -213,7 +214,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
|
|||
}
|
||||
}
|
||||
|
||||
static void rtrs_srv_sess_stats_release(struct kobject *kobj)
|
||||
static void rtrs_srv_path_stats_release(struct kobject *kobj)
|
||||
{
|
||||
struct rtrs_srv_stats *stats;
|
||||
|
||||
|
@ -224,22 +225,22 @@ static void rtrs_srv_sess_stats_release(struct kobject *kobj)
|
|||
|
||||
static struct kobj_type ktype_stats = {
|
||||
.sysfs_ops = &kobj_sysfs_ops,
|
||||
.release = rtrs_srv_sess_stats_release,
|
||||
.release = rtrs_srv_path_stats_release,
|
||||
};
|
||||
|
||||
static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess)
|
||||
static int rtrs_srv_create_stats_files(struct rtrs_srv_path *srv_path)
|
||||
{
|
||||
int err;
|
||||
struct rtrs_sess *s = &sess->s;
|
||||
struct rtrs_path *s = &srv_path->s;
|
||||
|
||||
err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats,
|
||||
&sess->kobj, "stats");
|
||||
err = kobject_init_and_add(&srv_path->stats->kobj_stats, &ktype_stats,
|
||||
&srv_path->kobj, "stats");
|
||||
if (err) {
|
||||
rtrs_err(s, "kobject_init_and_add(): %d\n", err);
|
||||
kobject_put(&sess->stats->kobj_stats);
|
||||
kobject_put(&srv_path->stats->kobj_stats);
|
||||
return err;
|
||||
}
|
||||
err = sysfs_create_group(&sess->stats->kobj_stats,
|
||||
err = sysfs_create_group(&srv_path->stats->kobj_stats,
|
||||
&rtrs_srv_stats_attr_group);
|
||||
if (err) {
|
||||
rtrs_err(s, "sysfs_create_group(): %d\n", err);
|
||||
|
@ -249,64 +250,64 @@ static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess)
|
|||
return 0;
|
||||
|
||||
err:
|
||||
kobject_del(&sess->stats->kobj_stats);
|
||||
kobject_put(&sess->stats->kobj_stats);
|
||||
kobject_del(&srv_path->stats->kobj_stats);
|
||||
kobject_put(&srv_path->stats->kobj_stats);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess)
|
||||
int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path)
|
||||
{
|
||||
struct rtrs_srv *srv = sess->srv;
|
||||
struct rtrs_sess *s = &sess->s;
|
||||
struct rtrs_srv_sess *srv = srv_path->srv;
|
||||
struct rtrs_path *s = &srv_path->s;
|
||||
char str[NAME_MAX];
|
||||
int err;
|
||||
struct rtrs_addr path = {
|
||||
.src = &sess->s.dst_addr,
|
||||
.dst = &sess->s.src_addr,
|
||||
.src = &srv_path->s.dst_addr,
|
||||
.dst = &srv_path->s.src_addr,
|
||||
};
|
||||
|
||||
rtrs_addr_to_str(&path, str, sizeof(str));
|
||||
err = rtrs_srv_create_once_sysfs_root_folders(sess);
|
||||
err = rtrs_srv_create_once_sysfs_root_folders(srv_path);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = kobject_init_and_add(&sess->kobj, &ktype, srv->kobj_paths,
|
||||
err = kobject_init_and_add(&srv_path->kobj, &ktype, srv->kobj_paths,
|
||||
"%s", str);
|
||||
if (err) {
|
||||
rtrs_err(s, "kobject_init_and_add(): %d\n", err);
|
||||
goto destroy_root;
|
||||
}
|
||||
err = sysfs_create_group(&sess->kobj, &rtrs_srv_sess_attr_group);
|
||||
err = sysfs_create_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
|
||||
if (err) {
|
||||
rtrs_err(s, "sysfs_create_group(): %d\n", err);
|
||||
goto put_kobj;
|
||||
}
|
||||
err = rtrs_srv_create_stats_files(sess);
|
||||
err = rtrs_srv_create_stats_files(srv_path);
|
||||
if (err)
|
||||
goto remove_group;
|
||||
|
||||
return 0;
|
||||
|
||||
remove_group:
|
||||
sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
|
||||
sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
|
||||
put_kobj:
|
||||
kobject_del(&sess->kobj);
|
||||
kobject_del(&srv_path->kobj);
|
||||
destroy_root:
|
||||
kobject_put(&sess->kobj);
|
||||
rtrs_srv_destroy_once_sysfs_root_folders(sess);
|
||||
kobject_put(&srv_path->kobj);
|
||||
rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess)
|
||||
void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path)
|
||||
{
|
||||
if (sess->kobj.state_in_sysfs) {
|
||||
kobject_del(&sess->stats->kobj_stats);
|
||||
kobject_put(&sess->stats->kobj_stats);
|
||||
sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
|
||||
kobject_put(&sess->kobj);
|
||||
if (srv_path->kobj.state_in_sysfs) {
|
||||
kobject_del(&srv_path->stats->kobj_stats);
|
||||
kobject_put(&srv_path->stats->kobj_stats);
|
||||
sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
|
||||
kobject_put(&srv_path->kobj);
|
||||
|
||||
rtrs_srv_destroy_once_sysfs_root_folders(sess);
|
||||
rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -37,7 +37,7 @@ struct rtrs_srv_stats_rdma_stats {
|
|||
struct rtrs_srv_stats {
|
||||
struct kobject kobj_stats;
|
||||
struct rtrs_srv_stats_rdma_stats rdma_stats;
|
||||
struct rtrs_srv_sess *sess;
|
||||
struct rtrs_srv_path *srv_path;
|
||||
};
|
||||
|
||||
struct rtrs_srv_con {
|
||||
|
@ -71,9 +71,9 @@ struct rtrs_srv_mr {
|
|||
struct rtrs_iu *iu; /* send buffer for new rkey msg */
|
||||
};
|
||||
|
||||
struct rtrs_srv_sess {
|
||||
struct rtrs_sess s;
|
||||
struct rtrs_srv *srv;
|
||||
struct rtrs_srv_path {
|
||||
struct rtrs_path s;
|
||||
struct rtrs_srv_sess *srv;
|
||||
struct work_struct close_work;
|
||||
enum rtrs_srv_state state;
|
||||
spinlock_t state_lock;
|
||||
|
@ -90,7 +90,7 @@ struct rtrs_srv_sess {
|
|||
struct rtrs_srv_stats *stats;
|
||||
};
|
||||
|
||||
struct rtrs_srv {
|
||||
struct rtrs_srv_sess {
|
||||
struct list_head paths_list;
|
||||
int paths_up;
|
||||
struct mutex paths_ev_mutex;
|
||||
|
@ -125,7 +125,7 @@ struct rtrs_srv_ib_ctx {
|
|||
|
||||
extern struct class *rtrs_dev_class;
|
||||
|
||||
void close_sess(struct rtrs_srv_sess *sess);
|
||||
void close_path(struct rtrs_srv_path *srv_path);
|
||||
|
||||
static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
|
||||
size_t size, int d)
|
||||
|
@ -142,7 +142,7 @@ ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats,
|
|||
char *page, size_t len);
|
||||
|
||||
/* functions which are implemented in rtrs-srv-sysfs.c */
|
||||
int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess);
|
||||
void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess);
|
||||
int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path);
|
||||
void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path);
|
||||
|
||||
#endif /* RTRS_SRV_H */
|
||||
|
|
|
@ -69,16 +69,16 @@ EXPORT_SYMBOL_GPL(rtrs_iu_free);
|
|||
|
||||
int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu)
|
||||
{
|
||||
struct rtrs_sess *sess = con->sess;
|
||||
struct rtrs_path *path = con->path;
|
||||
struct ib_recv_wr wr;
|
||||
struct ib_sge list;
|
||||
|
||||
list.addr = iu->dma_addr;
|
||||
list.length = iu->size;
|
||||
list.lkey = sess->dev->ib_pd->local_dma_lkey;
|
||||
list.lkey = path->dev->ib_pd->local_dma_lkey;
|
||||
|
||||
if (list.length == 0) {
|
||||
rtrs_wrn(con->sess,
|
||||
rtrs_wrn(con->path,
|
||||
"Posting receive work request failed, sg list is empty\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ static int rtrs_post_send(struct ib_qp *qp, struct ib_send_wr *head,
|
|||
int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
|
||||
struct ib_send_wr *head)
|
||||
{
|
||||
struct rtrs_sess *sess = con->sess;
|
||||
struct rtrs_path *path = con->path;
|
||||
struct ib_send_wr wr;
|
||||
struct ib_sge list;
|
||||
|
||||
|
@ -135,7 +135,7 @@ int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
|
|||
|
||||
list.addr = iu->dma_addr;
|
||||
list.length = size;
|
||||
list.lkey = sess->dev->ib_pd->local_dma_lkey;
|
||||
list.lkey = path->dev->ib_pd->local_dma_lkey;
|
||||
|
||||
wr = (struct ib_send_wr) {
|
||||
.wr_cqe = &iu->cqe,
|
||||
|
@ -188,11 +188,11 @@ static int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con,
|
|||
struct ib_send_wr *head)
|
||||
{
|
||||
struct ib_rdma_wr wr;
|
||||
struct rtrs_sess *sess = con->sess;
|
||||
struct rtrs_path *path = con->path;
|
||||
enum ib_send_flags sflags;
|
||||
|
||||
atomic_dec_if_positive(&con->sq_wr_avail);
|
||||
sflags = (atomic_inc_return(&con->wr_cnt) % sess->signal_interval) ?
|
||||
sflags = (atomic_inc_return(&con->wr_cnt) % path->signal_interval) ?
|
||||
0 : IB_SEND_SIGNALED;
|
||||
|
||||
wr = (struct ib_rdma_wr) {
|
||||
|
@ -211,12 +211,12 @@ static void qp_event_handler(struct ib_event *ev, void *ctx)
|
|||
|
||||
switch (ev->event) {
|
||||
case IB_EVENT_COMM_EST:
|
||||
rtrs_info(con->sess, "QP event %s (%d) received\n",
|
||||
rtrs_info(con->path, "QP event %s (%d) received\n",
|
||||
ib_event_msg(ev->event), ev->event);
|
||||
rdma_notify(con->cm_id, IB_EVENT_COMM_EST);
|
||||
break;
|
||||
default:
|
||||
rtrs_info(con->sess, "Unhandled QP event %s (%d) received\n",
|
||||
rtrs_info(con->path, "Unhandled QP event %s (%d) received\n",
|
||||
ib_event_msg(ev->event), ev->event);
|
||||
break;
|
||||
}
|
||||
|
@ -224,7 +224,7 @@ static void qp_event_handler(struct ib_event *ev, void *ctx)
|
|||
|
||||
static bool is_pollqueue(struct rtrs_con *con)
|
||||
{
|
||||
return con->cid >= con->sess->irq_con_num;
|
||||
return con->cid >= con->path->irq_con_num;
|
||||
}
|
||||
|
||||
static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
|
||||
|
@ -240,7 +240,7 @@ static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
|
|||
cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx);
|
||||
|
||||
if (IS_ERR(cq)) {
|
||||
rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n",
|
||||
rtrs_err(con->path, "Creating completion queue failed, errno: %ld\n",
|
||||
PTR_ERR(cq));
|
||||
return PTR_ERR(cq);
|
||||
}
|
||||
|
@ -271,7 +271,7 @@ static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
|
|||
|
||||
ret = rdma_create_qp(cm_id, pd, &init_attr);
|
||||
if (ret) {
|
||||
rtrs_err(con->sess, "Creating QP failed, err: %d\n", ret);
|
||||
rtrs_err(con->path, "Creating QP failed, err: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
con->qp = cm_id->qp;
|
||||
|
@ -290,7 +290,7 @@ static void destroy_cq(struct rtrs_con *con)
|
|||
con->cq = NULL;
|
||||
}
|
||||
|
||||
int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
|
||||
int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con,
|
||||
u32 max_send_sge, int cq_vector, int nr_cqe,
|
||||
u32 max_send_wr, u32 max_recv_wr,
|
||||
enum ib_poll_context poll_ctx)
|
||||
|
@ -301,13 +301,13 @@ int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr,
|
||||
err = create_qp(con, path->dev->ib_pd, max_send_wr, max_recv_wr,
|
||||
max_send_sge);
|
||||
if (err) {
|
||||
destroy_cq(con);
|
||||
return err;
|
||||
}
|
||||
con->sess = sess;
|
||||
con->path = path;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -323,24 +323,24 @@ void rtrs_cq_qp_destroy(struct rtrs_con *con)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy);
|
||||
|
||||
static void schedule_hb(struct rtrs_sess *sess)
|
||||
static void schedule_hb(struct rtrs_path *path)
|
||||
{
|
||||
queue_delayed_work(sess->hb_wq, &sess->hb_dwork,
|
||||
msecs_to_jiffies(sess->hb_interval_ms));
|
||||
queue_delayed_work(path->hb_wq, &path->hb_dwork,
|
||||
msecs_to_jiffies(path->hb_interval_ms));
|
||||
}
|
||||
|
||||
void rtrs_send_hb_ack(struct rtrs_sess *sess)
|
||||
void rtrs_send_hb_ack(struct rtrs_path *path)
|
||||
{
|
||||
struct rtrs_con *usr_con = sess->con[0];
|
||||
struct rtrs_con *usr_con = path->con[0];
|
||||
u32 imm;
|
||||
int err;
|
||||
|
||||
imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0);
|
||||
err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
|
||||
err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm,
|
||||
NULL);
|
||||
if (err) {
|
||||
rtrs_err(sess, "send HB ACK failed, errno: %d\n", err);
|
||||
sess->hb_err_handler(usr_con);
|
||||
rtrs_err(path, "send HB ACK failed, errno: %d\n", err);
|
||||
path->hb_err_handler(usr_con);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -349,63 +349,63 @@ EXPORT_SYMBOL_GPL(rtrs_send_hb_ack);
|
|||
static void hb_work(struct work_struct *work)
|
||||
{
|
||||
struct rtrs_con *usr_con;
|
||||
struct rtrs_sess *sess;
|
||||
struct rtrs_path *path;
|
||||
u32 imm;
|
||||
int err;
|
||||
|
||||
sess = container_of(to_delayed_work(work), typeof(*sess), hb_dwork);
|
||||
usr_con = sess->con[0];
|
||||
path = container_of(to_delayed_work(work), typeof(*path), hb_dwork);
|
||||
usr_con = path->con[0];
|
||||
|
||||
if (sess->hb_missed_cnt > sess->hb_missed_max) {
|
||||
rtrs_err(sess, "HB missed max reached.\n");
|
||||
sess->hb_err_handler(usr_con);
|
||||
if (path->hb_missed_cnt > path->hb_missed_max) {
|
||||
rtrs_err(path, "HB missed max reached.\n");
|
||||
path->hb_err_handler(usr_con);
|
||||
return;
|
||||
}
|
||||
if (sess->hb_missed_cnt++) {
|
||||
if (path->hb_missed_cnt++) {
|
||||
/* Reschedule work without sending hb */
|
||||
schedule_hb(sess);
|
||||
schedule_hb(path);
|
||||
return;
|
||||
}
|
||||
|
||||
sess->hb_last_sent = ktime_get();
|
||||
path->hb_last_sent = ktime_get();
|
||||
|
||||
imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
|
||||
err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
|
||||
err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm,
|
||||
NULL);
|
||||
if (err) {
|
||||
rtrs_err(sess, "HB send failed, errno: %d\n", err);
|
||||
sess->hb_err_handler(usr_con);
|
||||
rtrs_err(path, "HB send failed, errno: %d\n", err);
|
||||
path->hb_err_handler(usr_con);
|
||||
return;
|
||||
}
|
||||
|
||||
schedule_hb(sess);
|
||||
schedule_hb(path);
|
||||
}
|
||||
|
||||
void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
|
||||
void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe,
|
||||
unsigned int interval_ms, unsigned int missed_max,
|
||||
void (*err_handler)(struct rtrs_con *con),
|
||||
struct workqueue_struct *wq)
|
||||
{
|
||||
sess->hb_cqe = cqe;
|
||||
sess->hb_interval_ms = interval_ms;
|
||||
sess->hb_err_handler = err_handler;
|
||||
sess->hb_wq = wq;
|
||||
sess->hb_missed_max = missed_max;
|
||||
sess->hb_missed_cnt = 0;
|
||||
INIT_DELAYED_WORK(&sess->hb_dwork, hb_work);
|
||||
path->hb_cqe = cqe;
|
||||
path->hb_interval_ms = interval_ms;
|
||||
path->hb_err_handler = err_handler;
|
||||
path->hb_wq = wq;
|
||||
path->hb_missed_max = missed_max;
|
||||
path->hb_missed_cnt = 0;
|
||||
INIT_DELAYED_WORK(&path->hb_dwork, hb_work);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rtrs_init_hb);
|
||||
|
||||
void rtrs_start_hb(struct rtrs_sess *sess)
|
||||
void rtrs_start_hb(struct rtrs_path *path)
|
||||
{
|
||||
schedule_hb(sess);
|
||||
schedule_hb(path);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rtrs_start_hb);
|
||||
|
||||
void rtrs_stop_hb(struct rtrs_sess *sess)
|
||||
void rtrs_stop_hb(struct rtrs_path *path)
|
||||
{
|
||||
cancel_delayed_work_sync(&sess->hb_dwork);
|
||||
sess->hb_missed_cnt = 0;
|
||||
cancel_delayed_work_sync(&path->hb_dwork);
|
||||
path->hb_missed_cnt = 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rtrs_stop_hb);
|
||||
|
||||
|
|
|
@ -13,9 +13,9 @@
|
|||
#include <linux/scatterlist.h>
|
||||
|
||||
struct rtrs_permit;
|
||||
struct rtrs_clt;
|
||||
struct rtrs_clt_sess;
|
||||
struct rtrs_srv_ctx;
|
||||
struct rtrs_srv;
|
||||
struct rtrs_srv_sess;
|
||||
struct rtrs_srv_op;
|
||||
|
||||
/*
|
||||
|
@ -52,14 +52,14 @@ struct rtrs_clt_ops {
|
|||
void (*link_ev)(void *priv, enum rtrs_clt_link_ev ev);
|
||||
};
|
||||
|
||||
struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
|
||||
const char *sessname,
|
||||
struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops,
|
||||
const char *pathname,
|
||||
const struct rtrs_addr *paths,
|
||||
size_t path_cnt, u16 port,
|
||||
size_t pdu_sz, u8 reconnect_delay_sec,
|
||||
s16 max_reconnect_attempts, u32 nr_poll_queues);
|
||||
|
||||
void rtrs_clt_close(struct rtrs_clt *sess);
|
||||
void rtrs_clt_close(struct rtrs_clt_sess *clt);
|
||||
|
||||
enum wait_type {
|
||||
RTRS_PERMIT_NOWAIT = 0,
|
||||
|
@ -77,11 +77,12 @@ enum rtrs_clt_con_type {
|
|||
RTRS_IO_CON
|
||||
};
|
||||
|
||||
struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *sess,
|
||||
enum rtrs_clt_con_type con_type,
|
||||
enum wait_type wait);
|
||||
struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *sess,
|
||||
enum rtrs_clt_con_type con_type,
|
||||
enum wait_type wait);
|
||||
|
||||
void rtrs_clt_put_permit(struct rtrs_clt *sess, struct rtrs_permit *permit);
|
||||
void rtrs_clt_put_permit(struct rtrs_clt_sess *sess,
|
||||
struct rtrs_permit *permit);
|
||||
|
||||
/**
|
||||
* rtrs_clt_req_ops - it holds the request confirmation callback
|
||||
|
@ -98,10 +99,10 @@ struct rtrs_clt_req_ops {
|
|||
};
|
||||
|
||||
int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
|
||||
struct rtrs_clt *sess, struct rtrs_permit *permit,
|
||||
struct rtrs_clt_sess *sess, struct rtrs_permit *permit,
|
||||
const struct kvec *vec, size_t nr, size_t len,
|
||||
struct scatterlist *sg, unsigned int sg_cnt);
|
||||
int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index);
|
||||
int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index);
|
||||
|
||||
/**
|
||||
* rtrs_attrs - RTRS session attributes
|
||||
|
@ -112,7 +113,7 @@ struct rtrs_attrs {
|
|||
u32 max_segments;
|
||||
};
|
||||
|
||||
int rtrs_clt_query(struct rtrs_clt *sess, struct rtrs_attrs *attr);
|
||||
int rtrs_clt_query(struct rtrs_clt_sess *sess, struct rtrs_attrs *attr);
|
||||
|
||||
/*
|
||||
* Here goes RTRS server API
|
||||
|
@ -163,7 +164,7 @@ struct rtrs_srv_ops {
|
|||
* @priv: Private data from user if previously set with
|
||||
* rtrs_srv_set_sess_priv()
|
||||
*/
|
||||
int (*link_ev)(struct rtrs_srv *sess, enum rtrs_srv_link_ev ev,
|
||||
int (*link_ev)(struct rtrs_srv_sess *sess, enum rtrs_srv_link_ev ev,
|
||||
void *priv);
|
||||
};
|
||||
|
||||
|
@ -173,11 +174,12 @@ void rtrs_srv_close(struct rtrs_srv_ctx *ctx);
|
|||
|
||||
bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int errno);
|
||||
|
||||
void rtrs_srv_set_sess_priv(struct rtrs_srv *sess, void *priv);
|
||||
void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *sess, void *priv);
|
||||
|
||||
int rtrs_srv_get_sess_name(struct rtrs_srv *sess, char *sessname, size_t len);
|
||||
int rtrs_srv_get_path_name(struct rtrs_srv_sess *sess, char *pathname,
|
||||
size_t len);
|
||||
|
||||
int rtrs_srv_get_queue_depth(struct rtrs_srv *sess);
|
||||
int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *sess);
|
||||
|
||||
int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
|
||||
struct rtrs_addr *addr);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue