v6.4 merge window RDMA pull request
Usual wide collection of unrelated items in drivers: - Driver bug fixes and treewide cleanups in hfi1, siw, qib, mlx5, rxe, usnic, usnic, bnxt_re, ocrdma, iser * Unnecessary NULL checks * kmap obsolescence * pci_enable_pcie_error_reporting() obsolescence * Unused variables and macros * trace event related warnings * casting warnings - Code cleanups for irdm and erdma - EFA reporting of 128 byte PCIe TLP support - mlx5 more agressively uses the out of order HW feature - Big rework of how state machines and tasks work in rxe - Fix a syzkaller found crash netdev refcount leak in siw - bnxt_re revises their HW description header - Congestion control for bnxt_re - Use mmu_notifiers more safely in hfi1 - mlx5 gets better support for PCIe relaxed ordering inside VMs -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCZEva5wAKCRCFwuHvBreF YZFmAQC9T3b/XQ3bRknYciuzbatC98o9xB0FTqmEFYGj+Y2lVAD9EEVe3HKfHfi3 t/GxXYB5r22oxg5bgsblZfEdEdTVCg8= =akMm -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma updates from Jason Gunthorpe: "Usual wide collection of unrelated items in drivers: - Driver bug fixes and treewide cleanups in hfi1, siw, qib, mlx5, rxe, usnic, usnic, bnxt_re, ocrdma, iser: - remove unnecessary NULL checks - kmap obsolescence - pci_enable_pcie_error_reporting() obsolescence - unused variables and macros - trace event related warnings - casting warnings - Code cleanups for irdm and erdma - EFA reporting of 128 byte PCIe TLP support - mlx5 more agressively uses the out of order HW feature - Big rework of how state machines and tasks work in rxe - Fix a syzkaller found crash netdev refcount leak in siw - bnxt_re revises their HW description header - Congestion control for bnxt_re - Use mmu_notifiers more safely in hfi1 - mlx5 gets better support for PCIe relaxed ordering inside VMs" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (81 commits) RDMA/efa: Add rdma write capability to device caps RDMA/mlx5: Use correct device num_ports when modify DC RDMA/irdma: Drop spurious WQ_UNBOUND from alloc_ordered_workqueue() call RDMA/rxe: Fix spinlock recursion deadlock on requester RDMA/mlx5: Fix flow counter query via DEVX RDMA/rxe: Protect QP state with qp->state_lock RDMA/rxe: Move code to check if drained to subroutine RDMA/rxe: Remove qp->req.state RDMA/rxe: Remove qp->comp.state RDMA/rxe: Remove qp->resp.state RDMA/mlx5: Allow relaxed ordering read in VFs and VMs net/mlx5: Update relaxed ordering read HCA capabilities RDMA/mlx5: Check pcie_relaxed_ordering_enabled() in UMR RDMA/mlx5: Remove pcie_relaxed_ordering_enabled() check for RO write RDMA: Add ib_virt_dma_to_page() RDMA/rxe: Fix the error "trying to register non-static key in rxe_cleanup_task" RDMA/irdma: Slightly optimize irdma_form_ah_cm_frame() RDMA/rxe: Fix incorrect TASKLET_STATE_SCHED check in rxe_task.c IB/hfi1: Place struct mmu_rb_handler on cache line start IB/hfi1: Fix bugs with non-PAGE_SIZE-end multi-iovec user SDMA requests ...
This commit is contained in:
commit
af3877265d
|
@ -2912,6 +2912,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
|
|||
(ari && ari_length > IB_CM_REJ_ARI_LENGTH))
|
||||
return -EINVAL;
|
||||
|
||||
trace_icm_send_rej(&cm_id_priv->id, reason);
|
||||
|
||||
switch (state) {
|
||||
case IB_CM_REQ_SENT:
|
||||
case IB_CM_MRA_REQ_RCVD:
|
||||
|
@ -2942,7 +2944,6 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
trace_icm_send_rej(&cm_id_priv->id, reason);
|
||||
ret = ib_post_send_mad(msg, NULL);
|
||||
if (ret) {
|
||||
cm_free_msg(msg);
|
||||
|
|
|
@ -709,8 +709,7 @@ cma_validate_port(struct ib_device *device, u32 port,
|
|||
}
|
||||
|
||||
sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev);
|
||||
if (ndev)
|
||||
dev_put(ndev);
|
||||
dev_put(ndev);
|
||||
return sgid_attr;
|
||||
}
|
||||
|
||||
|
@ -2429,8 +2428,7 @@ err_unlock:
|
|||
mutex_unlock(&listen_id->handler_mutex);
|
||||
|
||||
net_dev_put:
|
||||
if (net_dev)
|
||||
dev_put(net_dev);
|
||||
dev_put(net_dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -131,6 +131,11 @@ struct ib_umad_packet {
|
|||
struct ib_user_mad mad;
|
||||
};
|
||||
|
||||
struct ib_rmpp_mad_hdr {
|
||||
struct ib_mad_hdr mad_hdr;
|
||||
struct ib_rmpp_hdr rmpp_hdr;
|
||||
} __packed;
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/ib_umad.h>
|
||||
|
||||
|
@ -494,11 +499,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
|||
size_t count, loff_t *pos)
|
||||
{
|
||||
struct ib_umad_file *file = filp->private_data;
|
||||
struct ib_rmpp_mad_hdr *rmpp_mad_hdr;
|
||||
struct ib_umad_packet *packet;
|
||||
struct ib_mad_agent *agent;
|
||||
struct rdma_ah_attr ah_attr;
|
||||
struct ib_ah *ah;
|
||||
struct ib_rmpp_mad *rmpp_mad;
|
||||
__be64 *tid;
|
||||
int ret, data_len, hdr_len, copy_offset, rmpp_active;
|
||||
u8 base_version;
|
||||
|
@ -506,7 +511,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
|||
if (count < hdr_size(file) + IB_MGMT_RMPP_HDR)
|
||||
return -EINVAL;
|
||||
|
||||
packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
|
||||
packet = kzalloc(sizeof(*packet) + IB_MGMT_RMPP_HDR, GFP_KERNEL);
|
||||
if (!packet)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -560,13 +565,13 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
|||
goto err_up;
|
||||
}
|
||||
|
||||
rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
|
||||
hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
|
||||
rmpp_mad_hdr = (struct ib_rmpp_mad_hdr *)packet->mad.data;
|
||||
hdr_len = ib_get_mad_data_offset(rmpp_mad_hdr->mad_hdr.mgmt_class);
|
||||
|
||||
if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
|
||||
if (ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class)
|
||||
&& ib_mad_kernel_rmpp_agent(agent)) {
|
||||
copy_offset = IB_MGMT_RMPP_HDR;
|
||||
rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
|
||||
rmpp_active = ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) &
|
||||
IB_MGMT_RMPP_FLAG_ACTIVE;
|
||||
} else {
|
||||
copy_offset = IB_MGMT_MAD_HDR;
|
||||
|
@ -615,12 +620,12 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
|||
tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
|
||||
*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
|
||||
(be64_to_cpup(tid) & 0xffffffff));
|
||||
rmpp_mad->mad_hdr.tid = *tid;
|
||||
rmpp_mad_hdr->mad_hdr.tid = *tid;
|
||||
}
|
||||
|
||||
if (!ib_mad_kernel_rmpp_agent(agent)
|
||||
&& ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
|
||||
&& (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
|
||||
&& ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class)
|
||||
&& (ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
|
||||
spin_lock_irq(&file->send_lock);
|
||||
list_add_tail(&packet->list, &file->send_list);
|
||||
spin_unlock_irq(&file->send_lock);
|
||||
|
|
|
@ -2912,6 +2912,106 @@ fail:
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = cq->rdev;
|
||||
|
||||
bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
|
||||
|
||||
cq->qplib_cq.max_wqe = cq->resize_cqe;
|
||||
if (cq->resize_umem) {
|
||||
ib_umem_release(cq->umem);
|
||||
cq->umem = cq->resize_umem;
|
||||
cq->resize_umem = NULL;
|
||||
cq->resize_cqe = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
|
||||
{
|
||||
struct bnxt_qplib_sg_info sg_info = {};
|
||||
struct bnxt_qplib_dpi *orig_dpi = NULL;
|
||||
struct bnxt_qplib_dev_attr *dev_attr;
|
||||
struct bnxt_re_ucontext *uctx = NULL;
|
||||
struct bnxt_re_resize_cq_req req;
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct bnxt_re_cq *cq;
|
||||
int rc, entries;
|
||||
|
||||
cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
|
||||
rdev = cq->rdev;
|
||||
dev_attr = &rdev->dev_attr;
|
||||
if (!ibcq->uobject) {
|
||||
ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (cq->resize_umem) {
|
||||
ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - Busy",
|
||||
cq->qplib_cq.id);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Check the requested cq depth out of supported depth */
|
||||
if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
|
||||
ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d",
|
||||
cq->qplib_cq.id, cqe);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
entries = roundup_pow_of_two(cqe + 1);
|
||||
if (entries > dev_attr->max_cq_wqes + 1)
|
||||
entries = dev_attr->max_cq_wqes + 1;
|
||||
|
||||
uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
|
||||
ib_uctx);
|
||||
/* uverbs consumer */
|
||||
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
|
||||
rc = -EFAULT;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
cq->resize_umem = ib_umem_get(&rdev->ibdev, req.cq_va,
|
||||
entries * sizeof(struct cq_base),
|
||||
IB_ACCESS_LOCAL_WRITE);
|
||||
if (IS_ERR(cq->resize_umem)) {
|
||||
rc = PTR_ERR(cq->resize_umem);
|
||||
cq->resize_umem = NULL;
|
||||
ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n",
|
||||
__func__, rc);
|
||||
goto fail;
|
||||
}
|
||||
cq->resize_cqe = entries;
|
||||
memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info));
|
||||
orig_dpi = cq->qplib_cq.dpi;
|
||||
|
||||
cq->qplib_cq.sg_info.umem = cq->resize_umem;
|
||||
cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
|
||||
cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
|
||||
cq->qplib_cq.dpi = &uctx->dpi;
|
||||
|
||||
rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
|
||||
if (rc) {
|
||||
ibdev_err(&rdev->ibdev, "Resize HW CQ %#x failed!",
|
||||
cq->qplib_cq.id);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
cq->ib_cq.cqe = cq->resize_cqe;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
if (cq->resize_umem) {
|
||||
ib_umem_release(cq->resize_umem);
|
||||
cq->resize_umem = NULL;
|
||||
cq->resize_cqe = 0;
|
||||
memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info));
|
||||
cq->qplib_cq.dpi = orig_dpi;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static u8 __req_to_ib_wc_status(u8 qstatus)
|
||||
{
|
||||
switch (qstatus) {
|
||||
|
@ -3425,6 +3525,15 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
|
|||
struct bnxt_re_sqp_entries *sqp_entry = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
/* User CQ; the only processing we do is to
|
||||
* complete any pending CQ resize operation.
|
||||
*/
|
||||
if (cq->umem) {
|
||||
if (cq->resize_umem)
|
||||
bnxt_re_resize_cq_complete(cq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&cq->cq_lock, flags);
|
||||
budget = min_t(u32, num_entries, cq->max_cql);
|
||||
num_entries = budget;
|
||||
|
|
|
@ -104,6 +104,8 @@ struct bnxt_re_cq {
|
|||
#define MAX_CQL_PER_POLL 1024
|
||||
u32 max_cql;
|
||||
struct ib_umem *umem;
|
||||
struct ib_umem *resize_umem;
|
||||
int resize_cqe;
|
||||
};
|
||||
|
||||
struct bnxt_re_mr {
|
||||
|
@ -191,6 +193,7 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
|
|||
const struct ib_recv_wr **bad_recv_wr);
|
||||
int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
|
||||
int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
|
||||
int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
|
||||
|
|
|
@ -553,6 +553,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
|
|||
.query_srq = bnxt_re_query_srq,
|
||||
.reg_user_mr = bnxt_re_reg_user_mr,
|
||||
.req_notify_cq = bnxt_re_req_notify_cq,
|
||||
.resize_cq = bnxt_re_resize_cq,
|
||||
INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
|
||||
|
@ -584,6 +585,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
|
|||
return ret;
|
||||
|
||||
dma_set_max_seg_size(&rdev->en_dev->pdev->dev, UINT_MAX);
|
||||
ibdev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ);
|
||||
return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev);
|
||||
}
|
||||
|
||||
|
@ -919,49 +921,6 @@ static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
|
|||
}
|
||||
}
|
||||
|
||||
#define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02
|
||||
static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
|
||||
u64 *cid_map)
|
||||
{
|
||||
struct hwrm_queue_pri2cos_qcfg_input req = {0};
|
||||
struct hwrm_queue_pri2cos_qcfg_output resp;
|
||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||
struct bnxt_fw_msg fw_msg;
|
||||
u32 flags = 0;
|
||||
u8 *qcfgmap, *tmp_map;
|
||||
int rc = 0, i;
|
||||
|
||||
if (!cid_map)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&fw_msg, 0, sizeof(fw_msg));
|
||||
bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
|
||||
HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
|
||||
flags |= (dir & 0x01);
|
||||
flags |= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN;
|
||||
req.flags = cpu_to_le32(flags);
|
||||
req.port_id = en_dev->pf_port_id;
|
||||
|
||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
|
||||
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
|
||||
rc = bnxt_send_msg(en_dev, &fw_msg);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (resp.queue_cfg_info) {
|
||||
ibdev_warn(&rdev->ibdev,
|
||||
"Asymmetric cos queue configuration detected");
|
||||
ibdev_warn(&rdev->ibdev,
|
||||
" on device, QoS may not be fully functional\n");
|
||||
}
|
||||
qcfgmap = &resp.pri0_cos_queue_id;
|
||||
tmp_map = (u8 *)cid_map;
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
|
||||
tmp_map[i] = qcfgmap[i];
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
|
||||
struct bnxt_re_qp *qp)
|
||||
{
|
||||
|
@ -1054,26 +1013,9 @@ static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
|
|||
return prio_map;
|
||||
}
|
||||
|
||||
static void bnxt_re_parse_cid_map(u8 prio_map, u8 *cid_map, u16 *cosq)
|
||||
{
|
||||
u16 prio;
|
||||
u8 id;
|
||||
|
||||
for (prio = 0, id = 0; prio < 8; prio++) {
|
||||
if (prio_map & (1 << prio)) {
|
||||
cosq[id] = cid_map[prio];
|
||||
id++;
|
||||
if (id == 2) /* Max 2 tcs supported */
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
u8 prio_map = 0;
|
||||
u64 cid_map;
|
||||
int rc;
|
||||
|
||||
/* Get priority for roce */
|
||||
prio_map = bnxt_re_get_priority_mask(rdev);
|
||||
|
@ -1081,23 +1023,6 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
|
|||
if (prio_map == rdev->cur_prio_map)
|
||||
return 0;
|
||||
rdev->cur_prio_map = prio_map;
|
||||
/* Get cosq id for this priority */
|
||||
rc = bnxt_re_query_hwrm_pri2cos(rdev, 0, &cid_map);
|
||||
if (rc) {
|
||||
ibdev_warn(&rdev->ibdev, "no cos for p_mask %x\n", prio_map);
|
||||
return rc;
|
||||
}
|
||||
/* Parse CoS IDs for app priority */
|
||||
bnxt_re_parse_cid_map(prio_map, (u8 *)&cid_map, rdev->cosq);
|
||||
|
||||
/* Config BONO. */
|
||||
rc = bnxt_qplib_map_tc2cos(&rdev->qplib_res, rdev->cosq);
|
||||
if (rc) {
|
||||
ibdev_warn(&rdev->ibdev, "no tc for cos{%x, %x}\n",
|
||||
rdev->cosq[0], rdev->cosq[1]);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Actual priorities are not programmed as they are already
|
||||
* done by L2 driver; just enable or disable priority vlan tagging
|
||||
*/
|
||||
|
@ -1407,6 +1332,27 @@ exit:
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
|
||||
{
|
||||
struct bnxt_qplib_cc_param cc_param = {};
|
||||
|
||||
/* Currently enabling only for GenP5 adapters */
|
||||
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
|
||||
return;
|
||||
|
||||
if (enable) {
|
||||
cc_param.enable = 1;
|
||||
cc_param.cc_mode = CMDQ_MODIFY_ROCE_CC_CC_MODE_PROBABILISTIC_CC_MODE;
|
||||
}
|
||||
|
||||
cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE |
|
||||
CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC |
|
||||
CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN);
|
||||
|
||||
if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param))
|
||||
ibdev_err(&rdev->ibdev, "Failed to setup CC enable = %d\n", enable);
|
||||
}
|
||||
|
||||
/*
|
||||
* "Notifier chain callback can be invoked for the same chain from
|
||||
* different CPUs at the same time".
|
||||
|
@ -1475,7 +1421,7 @@ static void bnxt_re_remove(struct auxiliary_device *adev)
|
|||
*/
|
||||
goto skip_remove;
|
||||
}
|
||||
|
||||
bnxt_re_setup_cc(rdev, false);
|
||||
ib_unregister_device(&rdev->ibdev);
|
||||
ib_dealloc_device(&rdev->ibdev);
|
||||
bnxt_re_dev_uninit(rdev);
|
||||
|
@ -1507,6 +1453,7 @@ static int bnxt_re_probe(struct auxiliary_device *adev,
|
|||
goto err;
|
||||
}
|
||||
|
||||
bnxt_re_setup_cc(rdev, true);
|
||||
mutex_unlock(&bnxt_re_mutex);
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -300,8 +300,6 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
|
|||
{
|
||||
struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
|
||||
struct bnxt_qplib_hwq *hwq = &nq->hwq;
|
||||
int num_srqne_processed = 0;
|
||||
int num_cqne_processed = 0;
|
||||
struct bnxt_qplib_cq *cq;
|
||||
int budget = nq->budget;
|
||||
u32 sw_cons, raw_cons;
|
||||
|
@ -340,9 +338,7 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
|
|||
DBC_DBC_TYPE_CQ_ARMENA);
|
||||
spin_lock_bh(&cq->compl_lock);
|
||||
atomic_set(&cq->arm_state, 0);
|
||||
if (!nq->cqn_handler(nq, (cq)))
|
||||
num_cqne_processed++;
|
||||
else
|
||||
if (nq->cqn_handler(nq, (cq)))
|
||||
dev_warn(&nq->pdev->dev,
|
||||
"cqn - type 0x%x not handled\n", type);
|
||||
cq->cnq_events++;
|
||||
|
@ -361,11 +357,9 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
|
|||
srq = (struct bnxt_qplib_srq *)q_handle;
|
||||
bnxt_qplib_armen_db(&srq->dbinfo,
|
||||
DBC_DBC_TYPE_SRQ_ARMENA);
|
||||
if (!nq->srqn_handler(nq,
|
||||
(struct bnxt_qplib_srq *)q_handle,
|
||||
nqsrqe->event))
|
||||
num_srqne_processed++;
|
||||
else
|
||||
if (nq->srqn_handler(nq,
|
||||
(struct bnxt_qplib_srq *)q_handle,
|
||||
nqsrqe->event))
|
||||
dev_warn(&nq->pdev->dev,
|
||||
"SRQ event 0x%x not handled\n",
|
||||
nqsrqe->event);
|
||||
|
@ -581,18 +575,20 @@ void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
|
|||
struct bnxt_qplib_srq *srq)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_destroy_srq req;
|
||||
struct creq_destroy_srq_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
struct creq_destroy_srq_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_destroy_srq req = {};
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_DESTROY_SRQ,
|
||||
sizeof(req));
|
||||
|
||||
/* Configure the request */
|
||||
req.srq_cid = cpu_to_le32(srq->id);
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req,
|
||||
(struct creq_base *)&resp, NULL, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
kfree(srq->swq);
|
||||
if (rc)
|
||||
return;
|
||||
|
@ -604,10 +600,10 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
|
|||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct bnxt_qplib_hwq_attr hwq_attr = {};
|
||||
struct creq_create_srq_resp resp;
|
||||
struct cmdq_create_srq req;
|
||||
struct creq_create_srq_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_create_srq req = {};
|
||||
struct bnxt_qplib_pbl *pbl;
|
||||
u16 cmd_flags = 0;
|
||||
u16 pg_sz_lvl;
|
||||
int rc, idx;
|
||||
|
||||
|
@ -627,7 +623,9 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_CREATE_SRQ,
|
||||
sizeof(req));
|
||||
|
||||
/* Configure the request */
|
||||
req.dpi = cpu_to_le32(srq->dpi->dpi);
|
||||
|
@ -644,8 +642,8 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
|
|||
req.pd_id = cpu_to_le32(srq->pd->id);
|
||||
req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
|
@ -700,14 +698,16 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
|
|||
struct bnxt_qplib_srq *srq)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_query_srq req;
|
||||
struct creq_query_srq_resp resp;
|
||||
struct creq_query_srq_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct bnxt_qplib_rcfw_sbuf *sbuf;
|
||||
struct creq_query_srq_resp_sb *sb;
|
||||
u16 cmd_flags = 0;
|
||||
struct cmdq_query_srq req = {};
|
||||
int rc = 0;
|
||||
|
||||
RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_QUERY_SRQ,
|
||||
sizeof(req));
|
||||
|
||||
/* Configure the request */
|
||||
sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
|
||||
|
@ -716,8 +716,9 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
|
|||
req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
|
||||
req.srq_cid = cpu_to_le32(srq->id);
|
||||
sb = sbuf->sb;
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
|
||||
(void *)sbuf, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
srq->threshold = le16_to_cpu(sb->srq_limit);
|
||||
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
|
||||
|
||||
|
@ -811,19 +812,20 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
|||
{
|
||||
struct bnxt_qplib_hwq_attr hwq_attr = {};
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct creq_create_qp1_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct bnxt_qplib_q *sq = &qp->sq;
|
||||
struct bnxt_qplib_q *rq = &qp->rq;
|
||||
struct creq_create_qp1_resp resp;
|
||||
struct cmdq_create_qp1 req;
|
||||
struct cmdq_create_qp1 req = {};
|
||||
struct bnxt_qplib_pbl *pbl;
|
||||
u16 cmd_flags = 0;
|
||||
u32 qp_flags = 0;
|
||||
u8 pg_sz_lvl;
|
||||
u32 tbl_indx;
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
|
||||
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_CREATE_QP1,
|
||||
sizeof(req));
|
||||
/* General */
|
||||
req.type = qp->type;
|
||||
req.dpi = cpu_to_le32(qp->dpi->dpi);
|
||||
|
@ -891,8 +893,8 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
|||
req.qp_flags = cpu_to_le32(qp_flags);
|
||||
req.pd_id = cpu_to_le32(qp->pd->id);
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
|
@ -952,20 +954,22 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
|||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct bnxt_qplib_hwq_attr hwq_attr = {};
|
||||
struct bnxt_qplib_sg_info sginfo = {};
|
||||
struct creq_create_qp_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct bnxt_qplib_q *sq = &qp->sq;
|
||||
struct bnxt_qplib_q *rq = &qp->rq;
|
||||
struct creq_create_qp_resp resp;
|
||||
struct cmdq_create_qp req = {};
|
||||
int rc, req_size, psn_sz = 0;
|
||||
struct bnxt_qplib_hwq *xrrq;
|
||||
struct bnxt_qplib_pbl *pbl;
|
||||
struct cmdq_create_qp req;
|
||||
u16 cmd_flags = 0;
|
||||
u32 qp_flags = 0;
|
||||
u8 pg_sz_lvl;
|
||||
u32 tbl_indx;
|
||||
u16 nsge;
|
||||
|
||||
RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_CREATE_QP,
|
||||
sizeof(req));
|
||||
|
||||
/* General */
|
||||
req.type = qp->type;
|
||||
|
@ -1098,8 +1102,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
|||
}
|
||||
req.pd_id = cpu_to_le32(qp->pd->id);
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
|
@ -1231,14 +1236,16 @@ static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
|
|||
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_modify_qp req;
|
||||
struct creq_modify_qp_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
struct creq_modify_qp_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_modify_qp req = {};
|
||||
u32 temp32[4];
|
||||
u32 bmask;
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_MODIFY_QP,
|
||||
sizeof(req));
|
||||
|
||||
/* Filter out the qp_attr_mask based on the state->new transition */
|
||||
__filter_modify_flags(qp);
|
||||
|
@ -1286,7 +1293,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
|||
memcpy(req.dest_mac, qp->ah.dmac, 6);
|
||||
|
||||
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
|
||||
req.path_mtu = qp->path_mtu;
|
||||
req.path_mtu_pingpong_push_enable |= qp->path_mtu;
|
||||
|
||||
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
|
||||
req.timeout = qp->timeout;
|
||||
|
@ -1324,8 +1331,8 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
|||
|
||||
req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
return rc;
|
||||
qp->cur_qp_state = qp->state;
|
||||
|
@ -1335,15 +1342,17 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
|||
int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_query_qp req;
|
||||
struct creq_query_qp_resp resp;
|
||||
struct creq_query_qp_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct bnxt_qplib_rcfw_sbuf *sbuf;
|
||||
struct creq_query_qp_resp_sb *sb;
|
||||
u16 cmd_flags = 0;
|
||||
struct cmdq_query_qp req = {};
|
||||
u32 temp32[4];
|
||||
int i, rc = 0;
|
||||
|
||||
RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_QUERY_QP,
|
||||
sizeof(req));
|
||||
|
||||
sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
|
||||
if (!sbuf)
|
||||
|
@ -1352,8 +1361,9 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
|||
|
||||
req.qp_cid = cpu_to_le32(qp->id);
|
||||
req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
|
||||
(void *)sbuf, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
goto bail;
|
||||
/* Extract the context from the side buffer */
|
||||
|
@ -1460,9 +1470,9 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
|
|||
struct bnxt_qplib_qp *qp)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_destroy_qp req;
|
||||
struct creq_destroy_qp_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
struct creq_destroy_qp_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_destroy_qp req = {};
|
||||
u32 tbl_indx;
|
||||
int rc;
|
||||
|
||||
|
@ -1470,11 +1480,14 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
|
|||
rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
|
||||
rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
|
||||
|
||||
RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_DESTROY_QP,
|
||||
sizeof(req));
|
||||
|
||||
req.qp_cid = cpu_to_le32(qp->id);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc) {
|
||||
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
|
||||
rcfw->qp_tbl[tbl_indx].qp_handle = qp;
|
||||
|
@ -2036,10 +2049,10 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
|
|||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct bnxt_qplib_hwq_attr hwq_attr = {};
|
||||
struct creq_create_cq_resp resp;
|
||||
struct creq_create_cq_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_create_cq req = {};
|
||||
struct bnxt_qplib_pbl *pbl;
|
||||
struct cmdq_create_cq req;
|
||||
u16 cmd_flags = 0;
|
||||
u32 pg_sz_lvl;
|
||||
int rc;
|
||||
|
||||
|
@ -2052,7 +2065,9 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
|
|||
if (rc)
|
||||
goto exit;
|
||||
|
||||
RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_CREATE_CQ,
|
||||
sizeof(req));
|
||||
|
||||
if (!cq->dpi) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
|
@ -2071,9 +2086,9 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
|
|||
req.cq_fco_cnq_id = cpu_to_le32(
|
||||
(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
|
||||
CMDQ_CREATE_CQ_CNQ_ID_SFT);
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
|
@ -2100,20 +2115,70 @@ exit:
|
|||
return rc;
|
||||
}
|
||||
|
||||
void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_cq *cq)
|
||||
{
|
||||
bnxt_qplib_free_hwq(res, &cq->hwq);
|
||||
memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
|
||||
}
|
||||
|
||||
int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
|
||||
int new_cqes)
|
||||
{
|
||||
struct bnxt_qplib_hwq_attr hwq_attr = {};
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct creq_resize_cq_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_resize_cq req = {};
|
||||
struct bnxt_qplib_pbl *pbl;
|
||||
u32 pg_sz, lvl, new_sz;
|
||||
int rc;
|
||||
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_RESIZE_CQ,
|
||||
sizeof(req));
|
||||
hwq_attr.sginfo = &cq->sg_info;
|
||||
hwq_attr.res = res;
|
||||
hwq_attr.depth = new_cqes;
|
||||
hwq_attr.stride = sizeof(struct cq_base);
|
||||
hwq_attr.type = HWQ_TYPE_QUEUE;
|
||||
rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
req.cq_cid = cpu_to_le32(cq->id);
|
||||
pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
|
||||
pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
|
||||
lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
|
||||
CMDQ_RESIZE_CQ_LVL_MASK;
|
||||
new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
|
||||
CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
|
||||
req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
|
||||
req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
|
||||
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_destroy_cq req;
|
||||
struct creq_destroy_cq_resp resp;
|
||||
struct creq_destroy_cq_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_destroy_cq req = {};
|
||||
u16 total_cnq_events;
|
||||
u16 cmd_flags = 0;
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_DESTROY_CQ,
|
||||
sizeof(req));
|
||||
|
||||
req.cq_cid = cpu_to_le32(cq->id);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
return rc;
|
||||
total_cnq_events = le16_to_cpu(resp.total_cnq_events);
|
||||
|
|
|
@ -400,6 +400,7 @@ struct bnxt_qplib_cq {
|
|||
u16 count;
|
||||
u16 period;
|
||||
struct bnxt_qplib_hwq hwq;
|
||||
struct bnxt_qplib_hwq resize_hwq;
|
||||
u32 cnq_hw_ring_id;
|
||||
struct bnxt_qplib_nq *nq;
|
||||
bool resize_in_progress;
|
||||
|
@ -532,6 +533,10 @@ void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp);
|
|||
int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
|
||||
struct bnxt_qplib_swqe *wqe);
|
||||
int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
|
||||
int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
|
||||
int new_cqes);
|
||||
void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_cq *cq);
|
||||
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
|
||||
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
|
||||
int num, struct bnxt_qplib_qp **qp);
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
#include "qplib_rcfw.h"
|
||||
#include "qplib_sp.h"
|
||||
#include "qplib_fp.h"
|
||||
#include "qplib_tlv.h"
|
||||
|
||||
static void bnxt_qplib_service_creq(struct tasklet_struct *t);
|
||||
|
||||
|
@ -85,8 +86,8 @@ done:
|
|||
return count ? 0 : -ETIMEDOUT;
|
||||
};
|
||||
|
||||
static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
|
||||
struct creq_base *resp, void *sb, u8 is_block)
|
||||
static int __send_message(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct bnxt_qplib_cmdqmsg *msg)
|
||||
{
|
||||
struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
|
||||
struct bnxt_qplib_hwq *hwq = &cmdq->hwq;
|
||||
|
@ -95,13 +96,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
|
|||
u32 sw_prod, cmdq_prod;
|
||||
struct pci_dev *pdev;
|
||||
unsigned long flags;
|
||||
u32 size, opcode;
|
||||
u32 bsize, opcode;
|
||||
u16 cookie, cbit;
|
||||
u8 *preq;
|
||||
|
||||
pdev = rcfw->pdev;
|
||||
|
||||
opcode = req->opcode;
|
||||
opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz);
|
||||
if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
|
||||
(opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
|
||||
opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
|
||||
|
@ -124,7 +125,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
|
|||
* cmdqe
|
||||
*/
|
||||
spin_lock_irqsave(&hwq->lock, flags);
|
||||
if (req->cmd_size >= HWQ_FREE_SLOTS(hwq)) {
|
||||
if (msg->req->cmd_size >= HWQ_FREE_SLOTS(hwq)) {
|
||||
dev_err(&pdev->dev, "RCFW: CMDQ is full!\n");
|
||||
spin_unlock_irqrestore(&hwq->lock, flags);
|
||||
return -EAGAIN;
|
||||
|
@ -133,36 +134,34 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
|
|||
|
||||
cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
|
||||
cbit = cookie % rcfw->cmdq_depth;
|
||||
if (is_block)
|
||||
if (msg->block)
|
||||
cookie |= RCFW_CMD_IS_BLOCKING;
|
||||
|
||||
set_bit(cbit, cmdq->cmdq_bitmap);
|
||||
req->cookie = cpu_to_le16(cookie);
|
||||
__set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie));
|
||||
crsqe = &rcfw->crsqe_tbl[cbit];
|
||||
if (crsqe->resp) {
|
||||
spin_unlock_irqrestore(&hwq->lock, flags);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
size = req->cmd_size;
|
||||
/* change the cmd_size to the number of 16byte cmdq unit.
|
||||
* req->cmd_size is modified here
|
||||
*/
|
||||
bnxt_qplib_set_cmd_slots(req);
|
||||
bsize = bnxt_qplib_set_cmd_slots(msg->req);
|
||||
|
||||
memset(resp, 0, sizeof(*resp));
|
||||
crsqe->resp = (struct creq_qp_event *)resp;
|
||||
crsqe->resp->cookie = req->cookie;
|
||||
crsqe->req_size = req->cmd_size;
|
||||
if (req->resp_size && sb) {
|
||||
struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
|
||||
|
||||
req->resp_addr = cpu_to_le64(sbuf->dma_addr);
|
||||
req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
|
||||
BNXT_QPLIB_CMDQE_UNITS;
|
||||
memset(msg->resp, 0, sizeof(*msg->resp));
|
||||
crsqe->resp = (struct creq_qp_event *)msg->resp;
|
||||
crsqe->resp->cookie = cpu_to_le16(cookie);
|
||||
crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz);
|
||||
if (__get_cmdq_base_resp_size(msg->req, msg->req_sz) && msg->sb) {
|
||||
struct bnxt_qplib_rcfw_sbuf *sbuf = msg->sb;
|
||||
__set_cmdq_base_resp_addr(msg->req, msg->req_sz, cpu_to_le64(sbuf->dma_addr));
|
||||
__set_cmdq_base_resp_size(msg->req, msg->req_sz,
|
||||
ALIGN(sbuf->size, BNXT_QPLIB_CMDQE_UNITS));
|
||||
}
|
||||
|
||||
preq = (u8 *)req;
|
||||
preq = (u8 *)msg->req;
|
||||
do {
|
||||
/* Locate the next cmdq slot */
|
||||
sw_prod = HWQ_CMP(hwq->prod, hwq);
|
||||
|
@ -174,11 +173,11 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
|
|||
}
|
||||
/* Copy a segment of the req cmd to the cmdq */
|
||||
memset(cmdqe, 0, sizeof(*cmdqe));
|
||||
memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe)));
|
||||
preq += min_t(u32, size, sizeof(*cmdqe));
|
||||
size -= min_t(u32, size, sizeof(*cmdqe));
|
||||
memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe)));
|
||||
preq += min_t(u32, bsize, sizeof(*cmdqe));
|
||||
bsize -= min_t(u32, bsize, sizeof(*cmdqe));
|
||||
hwq->prod++;
|
||||
} while (size > 0);
|
||||
} while (bsize > 0);
|
||||
cmdq->seq_num++;
|
||||
|
||||
cmdq_prod = hwq->prod;
|
||||
|
@ -191,7 +190,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
|
|||
cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
|
||||
clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
|
||||
}
|
||||
|
||||
/* ring CMDQ DB */
|
||||
wmb();
|
||||
writel(cmdq_prod, cmdq->cmdq_mbox.prod);
|
||||
|
@ -203,11 +201,9 @@ done:
|
|||
}
|
||||
|
||||
int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct cmdq_base *req,
|
||||
struct creq_base *resp,
|
||||
void *sb, u8 is_block)
|
||||
struct bnxt_qplib_cmdqmsg *msg)
|
||||
{
|
||||
struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
|
||||
struct creq_qp_event *evnt = (struct creq_qp_event *)msg->resp;
|
||||
u16 cookie;
|
||||
u8 opcode, retry_cnt = 0xFF;
|
||||
int rc = 0;
|
||||
|
@ -217,23 +213,23 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
|
|||
return 0;
|
||||
|
||||
do {
|
||||
opcode = req->opcode;
|
||||
rc = __send_message(rcfw, req, resp, sb, is_block);
|
||||
cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
|
||||
opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz);
|
||||
rc = __send_message(rcfw, msg);
|
||||
cookie = le16_to_cpu(__get_cmdq_base_cookie(msg->req, msg->req_sz)) &
|
||||
RCFW_MAX_COOKIE_VALUE;
|
||||
if (!rc)
|
||||
break;
|
||||
|
||||
if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
|
||||
/* send failed */
|
||||
dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x send failed\n",
|
||||
cookie, opcode);
|
||||
return rc;
|
||||
}
|
||||
is_block ? mdelay(1) : usleep_range(500, 1000);
|
||||
msg->block ? mdelay(1) : usleep_range(500, 1000);
|
||||
|
||||
} while (retry_cnt--);
|
||||
|
||||
if (is_block)
|
||||
if (msg->block)
|
||||
rc = __block_for_resp(rcfw, cookie);
|
||||
else
|
||||
rc = __wait_for_resp(rcfw, cookie);
|
||||
|
@ -452,14 +448,17 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
|
|||
/* RCFW */
|
||||
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
|
||||
{
|
||||
struct cmdq_deinitialize_fw req;
|
||||
struct creq_deinitialize_fw_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
struct creq_deinitialize_fw_resp resp = {};
|
||||
struct cmdq_deinitialize_fw req = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
|
||||
NULL, 0);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_DEINITIALIZE_FW,
|
||||
sizeof(req));
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL,
|
||||
sizeof(req), sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -470,13 +469,15 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
|
|||
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct bnxt_qplib_ctx *ctx, int is_virtfn)
|
||||
{
|
||||
struct creq_initialize_fw_resp resp;
|
||||
struct cmdq_initialize_fw req;
|
||||
u16 cmd_flags = 0;
|
||||
struct creq_initialize_fw_resp resp = {};
|
||||
struct cmdq_initialize_fw req = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
u8 pgsz, lvl;
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_INITIALIZE_FW,
|
||||
sizeof(req));
|
||||
/* Supply (log-base-2-of-host-page-size - base-page-shift)
|
||||
* to bono to adjust the doorbell page sizes.
|
||||
*/
|
||||
|
@ -545,8 +546,8 @@ config_vf_res:
|
|||
|
||||
skip_ctx_setup:
|
||||
req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
|
||||
NULL, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
return rc;
|
||||
set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
|
||||
|
|
|
@ -39,6 +39,8 @@
|
|||
#ifndef __BNXT_QPLIB_RCFW_H__
|
||||
#define __BNXT_QPLIB_RCFW_H__
|
||||
|
||||
#include "qplib_tlv.h"
|
||||
|
||||
#define RCFW_CMDQ_TRIG_VAL 1
|
||||
#define RCFW_COMM_PCI_BAR_REGION 0
|
||||
#define RCFW_COMM_CONS_PCI_BAR_REGION 2
|
||||
|
@ -51,25 +53,25 @@
|
|||
#define RCFW_DBR_PCI_BAR_REGION 2
|
||||
#define RCFW_DBR_BASE_PAGE_SHIFT 12
|
||||
|
||||
#define RCFW_CMD_PREP(req, CMD, cmd_flags) \
|
||||
do { \
|
||||
memset(&(req), 0, sizeof((req))); \
|
||||
(req).opcode = CMDQ_BASE_OPCODE_##CMD; \
|
||||
(req).cmd_size = sizeof((req)); \
|
||||
(req).flags = cpu_to_le16(cmd_flags); \
|
||||
} while (0)
|
||||
|
||||
#define RCFW_CMD_WAIT_TIME_MS 20000 /* 20 Seconds timeout */
|
||||
|
||||
/* Cmdq contains a fix number of a 16-Byte slots */
|
||||
struct bnxt_qplib_cmdqe {
|
||||
u8 data[16];
|
||||
};
|
||||
|
||||
#define BNXT_QPLIB_CMDQE_UNITS sizeof(struct bnxt_qplib_cmdqe)
|
||||
|
||||
static inline void bnxt_qplib_rcfw_cmd_prep(struct cmdq_base *req,
|
||||
u8 opcode, u8 cmd_size)
|
||||
{
|
||||
req->opcode = opcode;
|
||||
req->cmd_size = cmd_size;
|
||||
}
|
||||
|
||||
#define RCFW_CMD_WAIT_TIME_MS 20000 /* 20 Seconds timeout */
|
||||
|
||||
/* CMDQ elements */
|
||||
#define BNXT_QPLIB_CMDQE_MAX_CNT_256 256
|
||||
#define BNXT_QPLIB_CMDQE_MAX_CNT_8192 8192
|
||||
#define BNXT_QPLIB_CMDQE_UNITS sizeof(struct bnxt_qplib_cmdqe)
|
||||
#define BNXT_QPLIB_CMDQE_BYTES(depth) ((depth) * BNXT_QPLIB_CMDQE_UNITS)
|
||||
|
||||
static inline u32 bnxt_qplib_cmdqe_npages(u32 depth)
|
||||
|
@ -87,11 +89,21 @@ static inline u32 bnxt_qplib_cmdqe_page_size(u32 depth)
|
|||
return (bnxt_qplib_cmdqe_npages(depth) * PAGE_SIZE);
|
||||
}
|
||||
|
||||
/* Set the cmd_size to a factor of CMDQE unit */
|
||||
static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
|
||||
static inline u32 bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
|
||||
{
|
||||
req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
|
||||
BNXT_QPLIB_CMDQE_UNITS;
|
||||
u32 cmd_byte = 0;
|
||||
|
||||
if (HAS_TLV_HEADER(req)) {
|
||||
struct roce_tlv *tlv_req = (struct roce_tlv *)req;
|
||||
|
||||
cmd_byte = tlv_req->total_size * BNXT_QPLIB_CMDQE_UNITS;
|
||||
} else {
|
||||
cmd_byte = req->cmd_size;
|
||||
req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
|
||||
BNXT_QPLIB_CMDQE_UNITS;
|
||||
}
|
||||
|
||||
return cmd_byte;
|
||||
}
|
||||
|
||||
#define RCFW_MAX_COOKIE_VALUE 0x7FFF
|
||||
|
@ -190,6 +202,27 @@ struct bnxt_qplib_rcfw {
|
|||
u32 cmdq_depth;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_cmdqmsg {
|
||||
struct cmdq_base *req;
|
||||
struct creq_base *resp;
|
||||
void *sb;
|
||||
u32 req_sz;
|
||||
u32 res_sz;
|
||||
u8 block;
|
||||
};
|
||||
|
||||
static inline void bnxt_qplib_fill_cmdqmsg(struct bnxt_qplib_cmdqmsg *msg,
|
||||
void *req, void *resp, void *sb,
|
||||
u32 req_sz, u32 res_sz, u8 block)
|
||||
{
|
||||
msg->req = req;
|
||||
msg->resp = resp;
|
||||
msg->sb = sb;
|
||||
msg->req_sz = req_sz;
|
||||
msg->res_sz = res_sz;
|
||||
msg->block = block;
|
||||
}
|
||||
|
||||
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
|
||||
int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_rcfw *rcfw,
|
||||
|
@ -210,8 +243,7 @@ struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
|
|||
void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct bnxt_qplib_rcfw_sbuf *sbuf);
|
||||
int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct cmdq_base *req, struct creq_base *resp,
|
||||
void *sbuf, u8 is_block);
|
||||
struct bnxt_qplib_cmdqmsg *msg);
|
||||
|
||||
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
|
||||
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
#include "qplib_res.h"
|
||||
#include "qplib_rcfw.h"
|
||||
#include "qplib_sp.h"
|
||||
#include "qplib_tlv.h"
|
||||
|
||||
const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0 } };
|
||||
|
@ -68,15 +69,17 @@ static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
|
|||
static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
|
||||
char *fw_ver)
|
||||
{
|
||||
struct cmdq_query_version req;
|
||||
struct creq_query_version_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
struct creq_query_version_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_query_version req = {};
|
||||
int rc = 0;
|
||||
|
||||
RCFW_CMD_PREP(req, QUERY_VERSION, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_QUERY_VERSION,
|
||||
sizeof(req));
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
return;
|
||||
fw_ver[0] = resp.fw_maj;
|
||||
|
@ -88,16 +91,18 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
|
|||
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct bnxt_qplib_dev_attr *attr, bool vf)
|
||||
{
|
||||
struct cmdq_query_func req;
|
||||
struct creq_query_func_resp resp;
|
||||
struct bnxt_qplib_rcfw_sbuf *sbuf;
|
||||
struct creq_query_func_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct creq_query_func_resp_sb *sb;
|
||||
u16 cmd_flags = 0;
|
||||
u32 temp;
|
||||
struct bnxt_qplib_rcfw_sbuf *sbuf;
|
||||
struct cmdq_query_func req = {};
|
||||
u8 *tqm_alloc;
|
||||
int i, rc = 0;
|
||||
u32 temp;
|
||||
|
||||
RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_QUERY_FUNC,
|
||||
sizeof(req));
|
||||
|
||||
sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
|
||||
if (!sbuf) {
|
||||
|
@ -108,8 +113,9 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
|||
|
||||
sb = sbuf->sb;
|
||||
req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
|
||||
(void *)sbuf, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
goto bail;
|
||||
|
||||
|
@ -174,12 +180,14 @@ int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
|
|||
struct bnxt_qplib_rcfw *rcfw,
|
||||
struct bnxt_qplib_ctx *ctx)
|
||||
{
|
||||
struct cmdq_set_func_resources req;
|
||||
struct creq_set_func_resources_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
struct creq_set_func_resources_resp resp = {};
|
||||
struct cmdq_set_func_resources req = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
int rc = 0;
|
||||
|
||||
RCFW_CMD_PREP(req, SET_FUNC_RESOURCES, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES,
|
||||
sizeof(req));
|
||||
|
||||
req.number_of_qp = cpu_to_le32(ctx->qpc_count);
|
||||
req.number_of_mrw = cpu_to_le32(ctx->mrw_count);
|
||||
|
@ -192,9 +200,9 @@ int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
|
|||
req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
|
||||
req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp,
|
||||
NULL, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc) {
|
||||
dev_err(&res->pdev->dev, "Failed to set function resources\n");
|
||||
}
|
||||
|
@ -245,20 +253,23 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
|||
}
|
||||
/* Remove GID from the SGID table */
|
||||
if (update) {
|
||||
struct cmdq_delete_gid req;
|
||||
struct creq_delete_gid_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
struct creq_delete_gid_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_delete_gid req = {};
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_DELETE_GID,
|
||||
sizeof(req));
|
||||
if (sgid_tbl->hw_id[index] == 0xFFFF) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"GID entry contains an invalid HW id\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
@ -315,12 +326,14 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
|||
return -ENOMEM;
|
||||
}
|
||||
if (update) {
|
||||
struct cmdq_add_gid req;
|
||||
struct creq_add_gid_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
struct creq_add_gid_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_add_gid req = {};
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_ADD_GID,
|
||||
sizeof(req));
|
||||
|
||||
req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
|
||||
req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
|
||||
|
@ -345,8 +358,9 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
|||
req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
|
||||
req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
return rc;
|
||||
sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid);
|
||||
|
@ -375,12 +389,14 @@ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
|||
struct bnxt_qplib_res,
|
||||
sgid_tbl);
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct creq_modify_gid_resp resp;
|
||||
struct cmdq_modify_gid req;
|
||||
struct creq_modify_gid_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_modify_gid req = {};
|
||||
int rc;
|
||||
u16 cmd_flags = 0;
|
||||
|
||||
RCFW_CMD_PREP(req, MODIFY_GID, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_MODIFY_GID,
|
||||
sizeof(req));
|
||||
|
||||
req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
|
||||
req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
|
||||
|
@ -399,8 +415,9 @@ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
|||
|
||||
req.gid_index = cpu_to_le16(gid_idx);
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -409,14 +426,16 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
|
|||
bool block)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_create_ah req;
|
||||
struct creq_create_ah_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
struct creq_create_ah_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_create_ah req = {};
|
||||
u32 temp32[4];
|
||||
u16 temp16[3];
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, CREATE_AH, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_CREATE_AH,
|
||||
sizeof(req));
|
||||
|
||||
memcpy(temp32, ah->dgid.data, sizeof(struct bnxt_qplib_gid));
|
||||
req.dgid[0] = cpu_to_le32(temp32[0]);
|
||||
|
@ -439,8 +458,9 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
|
|||
req.dest_mac[1] = cpu_to_le16(temp16[1]);
|
||||
req.dest_mac[2] = cpu_to_le16(temp16[2]);
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
|
||||
NULL, block);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), block);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -452,26 +472,29 @@ void bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
|
|||
bool block)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_destroy_ah req;
|
||||
struct creq_destroy_ah_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
struct creq_destroy_ah_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_destroy_ah req = {};
|
||||
|
||||
/* Clean up the AH table in the device */
|
||||
RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_DESTROY_AH,
|
||||
sizeof(req));
|
||||
|
||||
req.ah_cid = cpu_to_le32(ah->id);
|
||||
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL,
|
||||
block);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), block);
|
||||
bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
}
|
||||
|
||||
/* MRW */
|
||||
int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
|
||||
{
|
||||
struct creq_deallocate_key_resp resp = {};
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_deallocate_key req;
|
||||
struct creq_deallocate_key_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
struct cmdq_deallocate_key req = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
int rc;
|
||||
|
||||
if (mrw->lkey == 0xFFFFFFFF) {
|
||||
|
@ -479,7 +502,9 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
|
|||
return 0;
|
||||
}
|
||||
|
||||
RCFW_CMD_PREP(req, DEALLOCATE_KEY, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_DEALLOCATE_KEY,
|
||||
sizeof(req));
|
||||
|
||||
req.mrw_flags = mrw->type;
|
||||
|
||||
|
@ -490,8 +515,9 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
|
|||
else
|
||||
req.key = cpu_to_le32(mrw->lkey);
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
|
||||
NULL, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -505,13 +531,15 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
|
|||
int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_allocate_mrw req;
|
||||
struct creq_allocate_mrw_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
struct creq_allocate_mrw_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_allocate_mrw req = {};
|
||||
unsigned long tmp;
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_ALLOCATE_MRW,
|
||||
sizeof(req));
|
||||
|
||||
req.pd_id = cpu_to_le32(mrw->pd->id);
|
||||
req.mrw_flags = mrw->type;
|
||||
|
@ -523,8 +551,9 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
|
|||
tmp = (unsigned long)mrw;
|
||||
req.mrw_handle = cpu_to_le64(tmp);
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -541,16 +570,19 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
|
|||
bool block)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_deregister_mr req;
|
||||
struct creq_deregister_mr_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
struct creq_deregister_mr_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_deregister_mr req = {};
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_DEREGISTER_MR,
|
||||
sizeof(req));
|
||||
|
||||
req.lkey = cpu_to_le32(mrw->lkey);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, block);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), block);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -570,11 +602,12 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
|
|||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct bnxt_qplib_hwq_attr hwq_attr = {};
|
||||
struct bnxt_qplib_sg_info sginfo = {};
|
||||
struct creq_register_mr_resp resp;
|
||||
struct cmdq_register_mr req;
|
||||
u16 cmd_flags = 0, level;
|
||||
struct creq_register_mr_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_register_mr req = {};
|
||||
int pages, rc;
|
||||
u32 pg_size;
|
||||
u16 level;
|
||||
|
||||
if (num_pbls) {
|
||||
pages = roundup_pow_of_two(num_pbls);
|
||||
|
@ -602,7 +635,9 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
|
|||
}
|
||||
}
|
||||
|
||||
RCFW_CMD_PREP(req, REGISTER_MR, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_REGISTER_MR,
|
||||
sizeof(req));
|
||||
|
||||
/* Configure the request */
|
||||
if (mr->hwq.level == PBL_LVL_MAX) {
|
||||
|
@ -627,8 +662,9 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
|
|||
req.key = cpu_to_le32(mr->lkey);
|
||||
req.mr_size = cpu_to_le64(mr->total_size);
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, false);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
|
@ -679,32 +715,19 @@ int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_map_tc_to_cos req;
|
||||
struct creq_map_tc_to_cos_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
|
||||
RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
|
||||
req.cos0 = cpu_to_le16(cids[0]);
|
||||
req.cos1 = cpu_to_le16(cids[1]);
|
||||
|
||||
return bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
|
||||
NULL, 0);
|
||||
}
|
||||
|
||||
int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct bnxt_qplib_roce_stats *stats)
|
||||
{
|
||||
struct cmdq_query_roce_stats req;
|
||||
struct creq_query_roce_stats_resp resp;
|
||||
struct bnxt_qplib_rcfw_sbuf *sbuf;
|
||||
struct creq_query_roce_stats_resp resp = {};
|
||||
struct creq_query_roce_stats_resp_sb *sb;
|
||||
u16 cmd_flags = 0;
|
||||
struct cmdq_query_roce_stats req = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct bnxt_qplib_rcfw_sbuf *sbuf;
|
||||
int rc = 0;
|
||||
|
||||
RCFW_CMD_PREP(req, QUERY_ROCE_STATS, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_QUERY_ROCE_STATS,
|
||||
sizeof(req));
|
||||
|
||||
sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
|
||||
if (!sbuf) {
|
||||
|
@ -715,8 +738,9 @@ int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
|
|||
|
||||
sb = sbuf->sb;
|
||||
req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
|
||||
(void *)sbuf, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
goto bail;
|
||||
/* Extract the context from the side buffer */
|
||||
|
@ -780,8 +804,8 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
|
|||
struct creq_query_roce_stats_ext_resp resp = {};
|
||||
struct creq_query_roce_stats_ext_resp_sb *sb;
|
||||
struct cmdq_query_roce_stats_ext req = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct bnxt_qplib_rcfw_sbuf *sbuf;
|
||||
u16 cmd_flags = 0;
|
||||
int rc;
|
||||
|
||||
sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
|
||||
|
@ -791,15 +815,18 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
RCFW_CMD_PREP(req, QUERY_ROCE_STATS_EXT, cmd_flags);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS,
|
||||
sizeof(req));
|
||||
|
||||
req.resp_size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
|
||||
req.resp_addr = cpu_to_le64(sbuf->dma_addr);
|
||||
req.function_id = cpu_to_le32(fid);
|
||||
req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID);
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, (void *)sbuf, 0);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
goto bail;
|
||||
|
||||
|
@ -823,3 +850,111 @@ bail:
|
|||
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void bnxt_qplib_fill_cc_gen1(struct cmdq_modify_roce_cc_gen1_tlv *ext_req,
|
||||
struct bnxt_qplib_cc_param_ext *cc_ext)
|
||||
{
|
||||
ext_req->modify_mask = cpu_to_le64(cc_ext->ext_mask);
|
||||
cc_ext->ext_mask = 0;
|
||||
ext_req->inactivity_th_hi = cpu_to_le16(cc_ext->inact_th_hi);
|
||||
ext_req->min_time_between_cnps = cpu_to_le16(cc_ext->min_delta_cnp);
|
||||
ext_req->init_cp = cpu_to_le16(cc_ext->init_cp);
|
||||
ext_req->tr_update_mode = cc_ext->tr_update_mode;
|
||||
ext_req->tr_update_cycles = cc_ext->tr_update_cyls;
|
||||
ext_req->fr_num_rtts = cc_ext->fr_rtt;
|
||||
ext_req->ai_rate_increase = cc_ext->ai_rate_incr;
|
||||
ext_req->reduction_relax_rtts_th = cpu_to_le16(cc_ext->rr_rtt_th);
|
||||
ext_req->additional_relax_cr_th = cpu_to_le16(cc_ext->ar_cr_th);
|
||||
ext_req->cr_min_th = cpu_to_le16(cc_ext->cr_min_th);
|
||||
ext_req->bw_avg_weight = cc_ext->bw_avg_weight;
|
||||
ext_req->actual_cr_factor = cc_ext->cr_factor;
|
||||
ext_req->max_cp_cr_th = cpu_to_le16(cc_ext->cr_th_max_cp);
|
||||
ext_req->cp_bias_en = cc_ext->cp_bias_en;
|
||||
ext_req->cp_bias = cc_ext->cp_bias;
|
||||
ext_req->cnp_ecn = cc_ext->cnp_ecn;
|
||||
ext_req->rtt_jitter_en = cc_ext->rtt_jitter_en;
|
||||
ext_req->link_bytes_per_usec = cpu_to_le16(cc_ext->bytes_per_usec);
|
||||
ext_req->reset_cc_cr_th = cpu_to_le16(cc_ext->cc_cr_reset_th);
|
||||
ext_req->cr_width = cc_ext->cr_width;
|
||||
ext_req->quota_period_min = cc_ext->min_quota;
|
||||
ext_req->quota_period_max = cc_ext->max_quota;
|
||||
ext_req->quota_period_abs_max = cc_ext->abs_max_quota;
|
||||
ext_req->tr_lower_bound = cpu_to_le16(cc_ext->tr_lb);
|
||||
ext_req->cr_prob_factor = cc_ext->cr_prob_fac;
|
||||
ext_req->tr_prob_factor = cc_ext->tr_prob_fac;
|
||||
ext_req->fairness_cr_th = cpu_to_le16(cc_ext->fair_cr_th);
|
||||
ext_req->red_div = cc_ext->red_div;
|
||||
ext_req->cnp_ratio_th = cc_ext->cnp_ratio_th;
|
||||
ext_req->exp_ai_rtts = cpu_to_le16(cc_ext->ai_ext_rtt);
|
||||
ext_req->exp_ai_cr_cp_ratio = cc_ext->exp_crcp_ratio;
|
||||
ext_req->use_rate_table = cc_ext->low_rate_en;
|
||||
ext_req->cp_exp_update_th = cpu_to_le16(cc_ext->cpcr_update_th);
|
||||
ext_req->high_exp_ai_rtts_th1 = cpu_to_le16(cc_ext->ai_rtt_th1);
|
||||
ext_req->high_exp_ai_rtts_th2 = cpu_to_le16(cc_ext->ai_rtt_th2);
|
||||
ext_req->actual_cr_cong_free_rtts_th = cpu_to_le16(cc_ext->cf_rtt_th);
|
||||
ext_req->severe_cong_cr_th1 = cpu_to_le16(cc_ext->sc_cr_th1);
|
||||
ext_req->severe_cong_cr_th2 = cpu_to_le16(cc_ext->sc_cr_th2);
|
||||
ext_req->link64B_per_rtt = cpu_to_le32(cc_ext->l64B_per_rtt);
|
||||
ext_req->cc_ack_bytes = cc_ext->cc_ack_bytes;
|
||||
}
|
||||
|
||||
int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_cc_param *cc_param)
|
||||
{
|
||||
struct bnxt_qplib_tlv_modify_cc_req tlv_req = {};
|
||||
struct creq_modify_roce_cc_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_modify_roce_cc *req;
|
||||
int req_size;
|
||||
void *cmd;
|
||||
int rc;
|
||||
|
||||
/* Prepare the older base command */
|
||||
req = &tlv_req.base_req;
|
||||
cmd = req;
|
||||
req_size = sizeof(*req);
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)req, CMDQ_BASE_OPCODE_MODIFY_ROCE_CC,
|
||||
sizeof(*req));
|
||||
req->modify_mask = cpu_to_le32(cc_param->mask);
|
||||
req->enable_cc = cc_param->enable;
|
||||
req->g = cc_param->g;
|
||||
req->num_phases_per_state = cc_param->nph_per_state;
|
||||
req->time_per_phase = cc_param->time_pph;
|
||||
req->pkts_per_phase = cc_param->pkts_pph;
|
||||
req->init_cr = cpu_to_le16(cc_param->init_cr);
|
||||
req->init_tr = cpu_to_le16(cc_param->init_tr);
|
||||
req->tos_dscp_tos_ecn = (cc_param->tos_dscp << CMDQ_MODIFY_ROCE_CC_TOS_DSCP_SFT) |
|
||||
(cc_param->tos_ecn & CMDQ_MODIFY_ROCE_CC_TOS_ECN_MASK);
|
||||
req->alt_vlan_pcp = cc_param->alt_vlan_pcp;
|
||||
req->alt_tos_dscp = cpu_to_le16(cc_param->alt_tos_dscp);
|
||||
req->rtt = cpu_to_le16(cc_param->rtt);
|
||||
req->tcp_cp = cpu_to_le16(cc_param->tcp_cp);
|
||||
req->cc_mode = cc_param->cc_mode;
|
||||
req->inactivity_th = cpu_to_le16(cc_param->inact_th);
|
||||
|
||||
/* For chip gen P5 onwards fill extended cmd and header */
|
||||
if (bnxt_qplib_is_chip_gen_p5(res->cctx)) {
|
||||
struct roce_tlv *hdr;
|
||||
u32 payload;
|
||||
u32 chunks;
|
||||
|
||||
cmd = &tlv_req;
|
||||
req_size = sizeof(tlv_req);
|
||||
/* Prepare primary tlv header */
|
||||
hdr = &tlv_req.tlv_hdr;
|
||||
chunks = CHUNKS(sizeof(struct bnxt_qplib_tlv_modify_cc_req));
|
||||
payload = sizeof(struct cmdq_modify_roce_cc);
|
||||
__roce_1st_tlv_prep(hdr, chunks, payload, true);
|
||||
/* Prepare secondary tlv header */
|
||||
hdr = (struct roce_tlv *)&tlv_req.ext_req;
|
||||
payload = sizeof(struct cmdq_modify_roce_cc_gen1_tlv) -
|
||||
sizeof(struct roce_tlv);
|
||||
__roce_ext_tlv_prep(hdr, TLV_TYPE_MODIFY_ROCE_CC_GEN1, payload, false, true);
|
||||
bnxt_qplib_fill_cc_gen1(&tlv_req.ext_req, &cc_param->cc_ext);
|
||||
}
|
||||
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, cmd, &resp, NULL, req_size,
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg);
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -244,6 +244,71 @@ struct bnxt_qplib_ext_stat {
|
|||
u64 rx_ecn_marked;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_cc_param_ext {
|
||||
u64 ext_mask;
|
||||
u16 inact_th_hi;
|
||||
u16 min_delta_cnp;
|
||||
u16 init_cp;
|
||||
u8 tr_update_mode;
|
||||
u8 tr_update_cyls;
|
||||
u8 fr_rtt;
|
||||
u8 ai_rate_incr;
|
||||
u16 rr_rtt_th;
|
||||
u16 ar_cr_th;
|
||||
u16 cr_min_th;
|
||||
u8 bw_avg_weight;
|
||||
u8 cr_factor;
|
||||
u16 cr_th_max_cp;
|
||||
u8 cp_bias_en;
|
||||
u8 cp_bias;
|
||||
u8 cnp_ecn;
|
||||
u8 rtt_jitter_en;
|
||||
u16 bytes_per_usec;
|
||||
u16 cc_cr_reset_th;
|
||||
u8 cr_width;
|
||||
u8 min_quota;
|
||||
u8 max_quota;
|
||||
u8 abs_max_quota;
|
||||
u16 tr_lb;
|
||||
u8 cr_prob_fac;
|
||||
u8 tr_prob_fac;
|
||||
u16 fair_cr_th;
|
||||
u8 red_div;
|
||||
u8 cnp_ratio_th;
|
||||
u16 ai_ext_rtt;
|
||||
u8 exp_crcp_ratio;
|
||||
u8 low_rate_en;
|
||||
u16 cpcr_update_th;
|
||||
u16 ai_rtt_th1;
|
||||
u16 ai_rtt_th2;
|
||||
u16 cf_rtt_th;
|
||||
u16 sc_cr_th1; /* severe congestion cr threshold 1 */
|
||||
u16 sc_cr_th2; /* severe congestion cr threshold 2 */
|
||||
u32 l64B_per_rtt;
|
||||
u8 cc_ack_bytes;
|
||||
u16 reduce_cf_rtt_th;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_cc_param {
|
||||
u8 alt_vlan_pcp;
|
||||
u16 alt_tos_dscp;
|
||||
u8 cc_mode;
|
||||
u8 enable;
|
||||
u16 inact_th;
|
||||
u16 init_cr;
|
||||
u16 init_tr;
|
||||
u16 rtt;
|
||||
u8 g;
|
||||
u8 nph_per_state;
|
||||
u8 time_pph;
|
||||
u8 pkts_pph;
|
||||
u8 tos_ecn;
|
||||
u8 tos_dscp;
|
||||
u16 tcp_cp;
|
||||
struct bnxt_qplib_cc_param_ext cc_ext;
|
||||
u32 mask;
|
||||
};
|
||||
|
||||
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
|
||||
struct bnxt_qplib_gid *gid);
|
||||
|
@ -277,10 +342,11 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
|
|||
struct bnxt_qplib_frpl *frpl, int max);
|
||||
int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_frpl *frpl);
|
||||
int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids);
|
||||
int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct bnxt_qplib_roce_stats *stats);
|
||||
int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
|
||||
struct bnxt_qplib_ext_stat *estat);
|
||||
int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_cc_param *cc_param);
|
||||
|
||||
#endif /* __BNXT_QPLIB_SP_H__*/
|
||||
|
|
|
@ -0,0 +1,162 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
|
||||
|
||||
#ifndef __QPLIB_TLV_H__
|
||||
#define __QPLIB_TLV_H__
|
||||
|
||||
struct roce_tlv {
|
||||
struct tlv tlv;
|
||||
u8 total_size; // in units of 16 byte chunks
|
||||
u8 unused[7]; // for 16 byte alignment
|
||||
};
|
||||
|
||||
#define CHUNK_SIZE 16
|
||||
#define CHUNKS(x) (((x) + CHUNK_SIZE - 1) / CHUNK_SIZE)
|
||||
|
||||
static inline void __roce_1st_tlv_prep(struct roce_tlv *rtlv, u8 tot_chunks,
|
||||
u16 content_bytes, u8 flags)
|
||||
{
|
||||
rtlv->tlv.cmd_discr = cpu_to_le16(CMD_DISCR_TLV_ENCAP);
|
||||
rtlv->tlv.tlv_type = cpu_to_le16(TLV_TYPE_ROCE_SP_COMMAND);
|
||||
rtlv->tlv.length = cpu_to_le16(content_bytes);
|
||||
rtlv->tlv.flags = TLV_FLAGS_REQUIRED;
|
||||
rtlv->tlv.flags |= flags ? TLV_FLAGS_MORE : 0;
|
||||
rtlv->total_size = (tot_chunks);
|
||||
}
|
||||
|
||||
static inline void __roce_ext_tlv_prep(struct roce_tlv *rtlv, u16 tlv_type,
|
||||
u16 content_bytes, u8 more, u8 flags)
|
||||
{
|
||||
rtlv->tlv.cmd_discr = cpu_to_le16(CMD_DISCR_TLV_ENCAP);
|
||||
rtlv->tlv.tlv_type = cpu_to_le16(tlv_type);
|
||||
rtlv->tlv.length = cpu_to_le16(content_bytes);
|
||||
rtlv->tlv.flags |= more ? TLV_FLAGS_MORE : 0;
|
||||
rtlv->tlv.flags |= flags ? TLV_FLAGS_REQUIRED : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* TLV size in units of 16 byte chunks
|
||||
*/
|
||||
#define TLV_SIZE ((sizeof(struct roce_tlv) + 15) / 16)
|
||||
/*
|
||||
* TLV length in bytes
|
||||
*/
|
||||
#define TLV_BYTES (TLV_SIZE * 16)
|
||||
|
||||
#define HAS_TLV_HEADER(msg) (le16_to_cpu(((struct tlv *)(msg))->cmd_discr) == CMD_DISCR_TLV_ENCAP)
|
||||
#define GET_TLV_DATA(tlv) ((void *)&((uint8_t *)(tlv))[TLV_BYTES])
|
||||
|
||||
static inline u8 __get_cmdq_base_opcode(struct cmdq_base *req, u32 size)
|
||||
{
|
||||
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
|
||||
return ((struct cmdq_base *)GET_TLV_DATA(req))->opcode;
|
||||
else
|
||||
return req->opcode;
|
||||
}
|
||||
|
||||
static inline void __set_cmdq_base_opcode(struct cmdq_base *req,
|
||||
u32 size, u8 val)
|
||||
{
|
||||
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
|
||||
((struct cmdq_base *)GET_TLV_DATA(req))->opcode = val;
|
||||
else
|
||||
req->opcode = val;
|
||||
}
|
||||
|
||||
static inline __le16 __get_cmdq_base_cookie(struct cmdq_base *req, u32 size)
|
||||
{
|
||||
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
|
||||
return ((struct cmdq_base *)GET_TLV_DATA(req))->cookie;
|
||||
else
|
||||
return req->cookie;
|
||||
}
|
||||
|
||||
static inline void __set_cmdq_base_cookie(struct cmdq_base *req,
|
||||
u32 size, __le16 val)
|
||||
{
|
||||
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
|
||||
((struct cmdq_base *)GET_TLV_DATA(req))->cookie = val;
|
||||
else
|
||||
req->cookie = val;
|
||||
}
|
||||
|
||||
static inline __le64 __get_cmdq_base_resp_addr(struct cmdq_base *req, u32 size)
|
||||
{
|
||||
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
|
||||
return ((struct cmdq_base *)GET_TLV_DATA(req))->resp_addr;
|
||||
else
|
||||
return req->resp_addr;
|
||||
}
|
||||
|
||||
static inline void __set_cmdq_base_resp_addr(struct cmdq_base *req,
|
||||
u32 size, __le64 val)
|
||||
{
|
||||
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
|
||||
((struct cmdq_base *)GET_TLV_DATA(req))->resp_addr = val;
|
||||
else
|
||||
req->resp_addr = val;
|
||||
}
|
||||
|
||||
static inline u8 __get_cmdq_base_resp_size(struct cmdq_base *req, u32 size)
|
||||
{
|
||||
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
|
||||
return ((struct cmdq_base *)GET_TLV_DATA(req))->resp_size;
|
||||
else
|
||||
return req->resp_size;
|
||||
}
|
||||
|
||||
static inline void __set_cmdq_base_resp_size(struct cmdq_base *req,
|
||||
u32 size, u8 val)
|
||||
{
|
||||
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
|
||||
((struct cmdq_base *)GET_TLV_DATA(req))->resp_size = val;
|
||||
else
|
||||
req->resp_size = val;
|
||||
}
|
||||
|
||||
static inline u8 __get_cmdq_base_cmd_size(struct cmdq_base *req, u32 size)
|
||||
{
|
||||
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
|
||||
return ((struct roce_tlv *)(req))->total_size;
|
||||
else
|
||||
return req->cmd_size;
|
||||
}
|
||||
|
||||
static inline void __set_cmdq_base_cmd_size(struct cmdq_base *req,
|
||||
u32 size, u8 val)
|
||||
{
|
||||
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
|
||||
((struct cmdq_base *)GET_TLV_DATA(req))->cmd_size = val;
|
||||
else
|
||||
req->cmd_size = val;
|
||||
}
|
||||
|
||||
static inline __le16 __get_cmdq_base_flags(struct cmdq_base *req, u32 size)
|
||||
{
|
||||
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
|
||||
return ((struct cmdq_base *)GET_TLV_DATA(req))->flags;
|
||||
else
|
||||
return req->flags;
|
||||
}
|
||||
|
||||
static inline void __set_cmdq_base_flags(struct cmdq_base *req,
|
||||
u32 size, __le16 val)
|
||||
{
|
||||
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
|
||||
((struct cmdq_base *)GET_TLV_DATA(req))->flags = val;
|
||||
else
|
||||
req->flags = val;
|
||||
}
|
||||
|
||||
struct bnxt_qplib_tlv_modify_cc_req {
|
||||
struct roce_tlv tlv_hdr;
|
||||
struct cmdq_modify_roce_cc base_req;
|
||||
__le64 tlvpad;
|
||||
struct cmdq_modify_roce_cc_gen1_tlv ext_req;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_tlv_query_rcc_sb {
|
||||
struct roce_tlv tlv_hdr;
|
||||
struct creq_query_roce_cc_resp_sb base_sb;
|
||||
struct creq_query_roce_cc_gen1_resp_sb_tlv gen1_sb;
|
||||
};
|
||||
#endif /* __QPLIB_TLV_H__ */
|
File diff suppressed because it is too large
Load Diff
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
|
||||
/*
|
||||
* Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
|
||||
* Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _EFA_ADMIN_CMDS_H_
|
||||
|
@ -376,7 +376,9 @@ struct efa_admin_reg_mr_cmd {
|
|||
* 0 : local_write_enable - Local write permissions:
|
||||
* must be set for RQ buffers and buffers posted for
|
||||
* RDMA Read requests
|
||||
* 1 : reserved1 - MBZ
|
||||
* 1 : remote_write_enable - Remote write
|
||||
* permissions: must be set to enable RDMA write to
|
||||
* the region
|
||||
* 2 : remote_read_enable - Remote read permissions:
|
||||
* must be set to enable RDMA read from the region
|
||||
* 7:3 : reserved2 - MBZ
|
||||
|
@ -618,7 +620,11 @@ struct efa_admin_feature_device_attr_desc {
|
|||
* TX queues
|
||||
* 1 : rnr_retry - If set, RNR retry is supported on
|
||||
* modify QP command
|
||||
* 31:2 : reserved - MBZ
|
||||
* 2 : data_polling_128 - If set, 128 bytes data
|
||||
* polling is supported
|
||||
* 3 : rdma_write - If set, RDMA Write is supported
|
||||
* on TX queues
|
||||
* 31:4 : reserved - MBZ
|
||||
*/
|
||||
u32 device_caps;
|
||||
|
||||
|
@ -672,7 +678,7 @@ struct efa_admin_feature_queue_attr_desc {
|
|||
/* The maximum size of LLQ in bytes */
|
||||
u32 max_llq_size;
|
||||
|
||||
/* Maximum number of SGEs for a single RDMA read WQE */
|
||||
/* Maximum number of SGEs for a single RDMA read/write WQE */
|
||||
u16 max_wr_rdma_sges;
|
||||
|
||||
/*
|
||||
|
@ -977,6 +983,7 @@ struct efa_admin_host_info {
|
|||
#define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK GENMASK(4, 0)
|
||||
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK BIT(7)
|
||||
#define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK BIT(0)
|
||||
#define EFA_ADMIN_REG_MR_CMD_REMOTE_WRITE_ENABLE_MASK BIT(1)
|
||||
#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK BIT(2)
|
||||
|
||||
/* create_cq_cmd */
|
||||
|
@ -991,6 +998,8 @@ struct efa_admin_host_info {
|
|||
/* feature_device_attr_desc */
|
||||
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0)
|
||||
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK BIT(1)
|
||||
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_DATA_POLLING_128_MASK BIT(2)
|
||||
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_WRITE_MASK BIT(3)
|
||||
|
||||
/* create_eq_cmd */
|
||||
#define EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
|
||||
/*
|
||||
* Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
|
||||
* Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _EFA_IO_H_
|
||||
|
@ -23,6 +23,8 @@ enum efa_io_send_op_type {
|
|||
EFA_IO_SEND = 0,
|
||||
/* RDMA read */
|
||||
EFA_IO_RDMA_READ = 1,
|
||||
/* RDMA write */
|
||||
EFA_IO_RDMA_WRITE = 2,
|
||||
};
|
||||
|
||||
enum efa_io_comp_status {
|
||||
|
@ -62,8 +64,7 @@ struct efa_io_tx_meta_desc {
|
|||
|
||||
/*
|
||||
* control flags
|
||||
* 3:0 : op_type - operation type: send/rdma/fast mem
|
||||
* ops/etc
|
||||
* 3:0 : op_type - enum efa_io_send_op_type
|
||||
* 4 : has_imm - immediate_data field carries valid
|
||||
* data.
|
||||
* 5 : inline_msg - inline mode - inline message data
|
||||
|
@ -219,21 +220,22 @@ struct efa_io_cdesc_common {
|
|||
* 2:1 : q_type - enum efa_io_queue_type: send/recv
|
||||
* 3 : has_imm - indicates that immediate data is
|
||||
* present - for RX completions only
|
||||
* 7:4 : reserved28 - MBZ
|
||||
* 6:4 : op_type - enum efa_io_send_op_type
|
||||
* 7 : reserved31 - MBZ
|
||||
*/
|
||||
u8 flags;
|
||||
|
||||
/* local QP number */
|
||||
u16 qp_num;
|
||||
|
||||
/* Transferred length */
|
||||
u16 length;
|
||||
};
|
||||
|
||||
/* Tx completion descriptor */
|
||||
struct efa_io_tx_cdesc {
|
||||
/* Common completion info */
|
||||
struct efa_io_cdesc_common common;
|
||||
|
||||
/* MBZ */
|
||||
u16 reserved16;
|
||||
};
|
||||
|
||||
/* Rx Completion Descriptor */
|
||||
|
@ -241,6 +243,9 @@ struct efa_io_rx_cdesc {
|
|||
/* Common completion info */
|
||||
struct efa_io_cdesc_common common;
|
||||
|
||||
/* Transferred length bits[15:0] */
|
||||
u16 length;
|
||||
|
||||
/* Remote Address Handle FW index, 0xFFFF indicates invalid ah */
|
||||
u16 ah;
|
||||
|
||||
|
@ -250,16 +255,26 @@ struct efa_io_rx_cdesc {
|
|||
u32 imm;
|
||||
};
|
||||
|
||||
/* Rx Completion Descriptor RDMA write info */
|
||||
struct efa_io_rx_cdesc_rdma_write {
|
||||
/* Transferred length bits[31:16] */
|
||||
u16 length_hi;
|
||||
};
|
||||
|
||||
/* Extended Rx Completion Descriptor */
|
||||
struct efa_io_rx_cdesc_ex {
|
||||
/* Base RX completion info */
|
||||
struct efa_io_rx_cdesc rx_cdesc_base;
|
||||
struct efa_io_rx_cdesc base;
|
||||
|
||||
/*
|
||||
* Valid only in case of unknown AH (0xFFFF) and CQ set_src_addr is
|
||||
* enabled.
|
||||
*/
|
||||
u8 src_addr[16];
|
||||
union {
|
||||
struct efa_io_rx_cdesc_rdma_write rdma_write;
|
||||
|
||||
/*
|
||||
* Valid only in case of unknown AH (0xFFFF) and CQ
|
||||
* set_src_addr is enabled.
|
||||
*/
|
||||
u8 src_addr[16];
|
||||
} u;
|
||||
};
|
||||
|
||||
/* tx_meta_desc */
|
||||
|
@ -285,5 +300,6 @@ struct efa_io_rx_cdesc_ex {
|
|||
#define EFA_IO_CDESC_COMMON_PHASE_MASK BIT(0)
|
||||
#define EFA_IO_CDESC_COMMON_Q_TYPE_MASK GENMASK(2, 1)
|
||||
#define EFA_IO_CDESC_COMMON_HAS_IMM_MASK BIT(3)
|
||||
#define EFA_IO_CDESC_COMMON_OP_TYPE_MASK GENMASK(6, 4)
|
||||
|
||||
#endif /* _EFA_IO_H_ */
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
/*
|
||||
* Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
|
||||
* Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
|
@ -250,6 +250,12 @@ int efa_query_device(struct ib_device *ibdev,
|
|||
if (EFA_DEV_CAP(dev, RNR_RETRY))
|
||||
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY;
|
||||
|
||||
if (EFA_DEV_CAP(dev, DATA_POLLING_128))
|
||||
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128;
|
||||
|
||||
if (EFA_DEV_CAP(dev, RDMA_WRITE))
|
||||
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_WRITE;
|
||||
|
||||
if (dev->neqs)
|
||||
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS;
|
||||
|
||||
|
@ -1569,7 +1575,8 @@ static struct efa_mr *efa_alloc_mr(struct ib_pd *ibpd, int access_flags,
|
|||
|
||||
supp_access_flags =
|
||||
IB_ACCESS_LOCAL_WRITE |
|
||||
(EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0);
|
||||
(EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0) |
|
||||
(EFA_DEV_CAP(dev, RDMA_WRITE) ? IB_ACCESS_REMOTE_WRITE : 0);
|
||||
|
||||
access_flags &= ~IB_ACCESS_OPTIONAL;
|
||||
if (access_flags & ~supp_access_flags) {
|
||||
|
|
|
@ -32,7 +32,7 @@ struct erdma_eq {
|
|||
atomic64_t event_num;
|
||||
atomic64_t notify_num;
|
||||
|
||||
u64 __iomem *db_addr;
|
||||
void __iomem *db;
|
||||
u64 *db_record;
|
||||
};
|
||||
|
||||
|
|
|
@ -33,11 +33,11 @@ struct mpa_rr_params {
|
|||
* MPA request/response Hdr bits & fields
|
||||
*/
|
||||
enum {
|
||||
MPA_RR_FLAG_MARKERS = __cpu_to_be16(0x8000),
|
||||
MPA_RR_FLAG_CRC = __cpu_to_be16(0x4000),
|
||||
MPA_RR_FLAG_REJECT = __cpu_to_be16(0x2000),
|
||||
MPA_RR_RESERVED = __cpu_to_be16(0x1f00),
|
||||
MPA_RR_MASK_REVISION = __cpu_to_be16(0x00ff)
|
||||
MPA_RR_FLAG_MARKERS = cpu_to_be16(0x8000),
|
||||
MPA_RR_FLAG_CRC = cpu_to_be16(0x4000),
|
||||
MPA_RR_FLAG_REJECT = cpu_to_be16(0x2000),
|
||||
MPA_RR_RESERVED = cpu_to_be16(0x1f00),
|
||||
MPA_RR_MASK_REVISION = cpu_to_be16(0x00ff)
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -166,8 +166,7 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev)
|
|||
spin_lock_init(&eq->lock);
|
||||
atomic64_set(&eq->event_num, 0);
|
||||
|
||||
eq->db_addr =
|
||||
(u64 __iomem *)(dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG);
|
||||
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG;
|
||||
eq->db_record = (u64 *)(eq->qbuf + buf_size);
|
||||
|
||||
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG,
|
||||
|
@ -183,9 +182,8 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev)
|
|||
|
||||
int erdma_cmdq_init(struct erdma_dev *dev)
|
||||
{
|
||||
int err, i;
|
||||
struct erdma_cmdq *cmdq = &dev->cmdq;
|
||||
u32 sts, ctrl;
|
||||
int err;
|
||||
|
||||
cmdq->max_outstandings = ERDMA_CMDQ_MAX_OUTSTANDING;
|
||||
cmdq->use_event = false;
|
||||
|
@ -208,34 +206,10 @@ int erdma_cmdq_init(struct erdma_dev *dev)
|
|||
if (err)
|
||||
goto err_destroy_cq;
|
||||
|
||||
ctrl = FIELD_PREP(ERDMA_REG_DEV_CTRL_INIT_MASK, 1);
|
||||
erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG, ctrl);
|
||||
|
||||
for (i = 0; i < ERDMA_WAIT_DEV_DONE_CNT; i++) {
|
||||
sts = erdma_reg_read32_filed(dev, ERDMA_REGS_DEV_ST_REG,
|
||||
ERDMA_REG_DEV_ST_INIT_DONE_MASK);
|
||||
if (sts)
|
||||
break;
|
||||
|
||||
msleep(ERDMA_REG_ACCESS_WAIT_MS);
|
||||
}
|
||||
|
||||
if (i == ERDMA_WAIT_DEV_DONE_CNT) {
|
||||
dev_err(&dev->pdev->dev, "wait init done failed.\n");
|
||||
err = -ETIMEDOUT;
|
||||
goto err_destroy_eq;
|
||||
}
|
||||
|
||||
set_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
|
||||
|
||||
return 0;
|
||||
|
||||
err_destroy_eq:
|
||||
dma_free_coherent(&dev->pdev->dev,
|
||||
(cmdq->eq.depth << EQE_SHIFT) +
|
||||
ERDMA_EXTRA_BUFFER_SIZE,
|
||||
cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
|
||||
|
||||
err_destroy_cq:
|
||||
dma_free_coherent(&dev->pdev->dev,
|
||||
(cmdq->cq.depth << CQE_SHIFT) +
|
||||
|
@ -283,7 +257,7 @@ static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq)
|
|||
__be32 *cqe = get_queue_entry(cmdq->cq.qbuf, cmdq->cq.ci,
|
||||
cmdq->cq.depth, CQE_SHIFT);
|
||||
u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
|
||||
__be32_to_cpu(READ_ONCE(*cqe)));
|
||||
be32_to_cpu(READ_ONCE(*cqe)));
|
||||
|
||||
return owner ^ !!(cmdq->cq.ci & cmdq->cq.depth) ? cqe : NULL;
|
||||
}
|
||||
|
@ -319,7 +293,6 @@ static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
|
|||
__be32 *cqe;
|
||||
u16 ctx_id;
|
||||
u64 *sqe;
|
||||
int i;
|
||||
|
||||
cqe = get_next_valid_cmdq_cqe(cmdq);
|
||||
if (!cqe)
|
||||
|
@ -328,8 +301,8 @@ static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
|
|||
cmdq->cq.ci++;
|
||||
|
||||
dma_rmb();
|
||||
hdr0 = __be32_to_cpu(*cqe);
|
||||
sqe_idx = __be32_to_cpu(*(cqe + 1));
|
||||
hdr0 = be32_to_cpu(*cqe);
|
||||
sqe_idx = be32_to_cpu(*(cqe + 1));
|
||||
|
||||
sqe = get_queue_entry(cmdq->sq.qbuf, sqe_idx, cmdq->sq.depth,
|
||||
SQEBB_SHIFT);
|
||||
|
@ -341,9 +314,8 @@ static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
|
|||
comp_wait->cmd_status = ERDMA_CMD_STATUS_FINISHED;
|
||||
comp_wait->comp_status = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, hdr0);
|
||||
cmdq->sq.ci += cmdq->sq.wqebb_cnt;
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
comp_wait->comp_data[i] = __be32_to_cpu(*(cqe + 2 + i));
|
||||
/* Copy 16B comp data after cqe hdr to outer */
|
||||
be32_to_cpu_array(comp_wait->comp_data, cqe + 2, 4);
|
||||
|
||||
if (cmdq->use_event)
|
||||
complete(&comp_wait->wait_event);
|
||||
|
|
|
@ -11,7 +11,7 @@ static void *get_next_valid_cqe(struct erdma_cq *cq)
|
|||
__be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci,
|
||||
cq->depth, CQE_SHIFT);
|
||||
u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
|
||||
__be32_to_cpu(READ_ONCE(*cqe)));
|
||||
be32_to_cpu(READ_ONCE(*cqe)));
|
||||
|
||||
return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL;
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ void notify_eq(struct erdma_eq *eq)
|
|||
FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
|
||||
|
||||
*eq->db_record = db_data;
|
||||
writeq(db_data, eq->db_addr);
|
||||
writeq(db_data, eq->db);
|
||||
|
||||
atomic64_inc(&eq->notify_num);
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ int erdma_aeq_init(struct erdma_dev *dev)
|
|||
atomic64_set(&eq->event_num, 0);
|
||||
atomic64_set(&eq->notify_num, 0);
|
||||
|
||||
eq->db_addr = (u64 __iomem *)(dev->func_bar + ERDMA_REGS_AEQ_DB_REG);
|
||||
eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
|
||||
eq->db_record = (u64 *)(eq->qbuf + buf_size);
|
||||
|
||||
erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
|
||||
|
@ -243,9 +243,8 @@ static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
|
|||
atomic64_set(&eq->notify_num, 0);
|
||||
|
||||
eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
|
||||
eq->db_addr =
|
||||
(u64 __iomem *)(dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
|
||||
(ceqn + 1) * ERDMA_DB_SIZE);
|
||||
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
|
||||
(ceqn + 1) * ERDMA_DB_SIZE;
|
||||
eq->db_record = (u64 *)(eq->qbuf + buf_size);
|
||||
eq->ci = 0;
|
||||
dev->ceqs[ceqn].dev = dev;
|
||||
|
|
|
@ -112,6 +112,10 @@
|
|||
|
||||
#define ERDMA_PAGE_SIZE_SUPPORT 0x7FFFF000
|
||||
|
||||
/* Hardware page size definition */
|
||||
#define ERDMA_HW_PAGE_SHIFT 12
|
||||
#define ERDMA_HW_PAGE_SIZE 4096
|
||||
|
||||
/* WQE related. */
|
||||
#define EQE_SIZE 16
|
||||
#define EQE_SHIFT 4
|
||||
|
|
|
@ -211,13 +211,36 @@ static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void erdma_device_uninit(struct erdma_dev *dev)
|
||||
static void erdma_hw_reset(struct erdma_dev *dev)
|
||||
{
|
||||
u32 ctrl = FIELD_PREP(ERDMA_REG_DEV_CTRL_RESET_MASK, 1);
|
||||
|
||||
erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG, ctrl);
|
||||
}
|
||||
|
||||
static int erdma_wait_hw_init_done(struct erdma_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG,
|
||||
FIELD_PREP(ERDMA_REG_DEV_CTRL_INIT_MASK, 1));
|
||||
|
||||
for (i = 0; i < ERDMA_WAIT_DEV_DONE_CNT; i++) {
|
||||
if (erdma_reg_read32_filed(dev, ERDMA_REGS_DEV_ST_REG,
|
||||
ERDMA_REG_DEV_ST_INIT_DONE_MASK))
|
||||
break;
|
||||
|
||||
msleep(ERDMA_REG_ACCESS_WAIT_MS);
|
||||
}
|
||||
|
||||
if (i == ERDMA_WAIT_DEV_DONE_CNT) {
|
||||
dev_err(&dev->pdev->dev, "wait init done failed.\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct pci_device_id erdma_pci_tbl[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_ALIBABA, 0x107f) },
|
||||
{}
|
||||
|
@ -293,16 +316,22 @@ static int erdma_probe_dev(struct pci_dev *pdev)
|
|||
if (err)
|
||||
goto err_uninit_aeq;
|
||||
|
||||
err = erdma_ceqs_init(dev);
|
||||
err = erdma_wait_hw_init_done(dev);
|
||||
if (err)
|
||||
goto err_uninit_cmdq;
|
||||
|
||||
err = erdma_ceqs_init(dev);
|
||||
if (err)
|
||||
goto err_reset_hw;
|
||||
|
||||
erdma_finish_cmdq_init(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_reset_hw:
|
||||
erdma_hw_reset(dev);
|
||||
|
||||
err_uninit_cmdq:
|
||||
erdma_device_uninit(dev);
|
||||
erdma_cmdq_destroy(dev);
|
||||
|
||||
err_uninit_aeq:
|
||||
|
@ -334,9 +363,7 @@ static void erdma_remove_dev(struct pci_dev *pdev)
|
|||
struct erdma_dev *dev = pci_get_drvdata(pdev);
|
||||
|
||||
erdma_ceqs_uninit(dev);
|
||||
|
||||
erdma_device_uninit(dev);
|
||||
|
||||
erdma_hw_reset(dev);
|
||||
erdma_cmdq_destroy(dev);
|
||||
erdma_aeq_destroy(dev);
|
||||
erdma_comm_irq_uninit(dev);
|
||||
|
|
|
@ -38,7 +38,7 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
|
|||
FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn);
|
||||
|
||||
if (rdma_is_kernel_res(&qp->ibqp.res)) {
|
||||
u32 pgsz_range = ilog2(SZ_1M) - PAGE_SHIFT;
|
||||
u32 pgsz_range = ilog2(SZ_1M) - ERDMA_HW_PAGE_SHIFT;
|
||||
|
||||
req.sq_cqn_mtt_cfg =
|
||||
FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
|
||||
|
@ -66,13 +66,13 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
|
|||
user_qp = &qp->user_qp;
|
||||
req.sq_cqn_mtt_cfg = FIELD_PREP(
|
||||
ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
|
||||
ilog2(user_qp->sq_mtt.page_size) - PAGE_SHIFT);
|
||||
ilog2(user_qp->sq_mtt.page_size) - ERDMA_HW_PAGE_SHIFT);
|
||||
req.sq_cqn_mtt_cfg |=
|
||||
FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
|
||||
|
||||
req.rq_cqn_mtt_cfg = FIELD_PREP(
|
||||
ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
|
||||
ilog2(user_qp->rq_mtt.page_size) - PAGE_SHIFT);
|
||||
ilog2(user_qp->rq_mtt.page_size) - ERDMA_HW_PAGE_SHIFT);
|
||||
req.rq_cqn_mtt_cfg |=
|
||||
FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
|
||||
|
||||
|
@ -162,7 +162,7 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
|
|||
if (rdma_is_kernel_res(&cq->ibcq.res)) {
|
||||
page_size = SZ_32M;
|
||||
req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
|
||||
ilog2(page_size) - PAGE_SHIFT);
|
||||
ilog2(page_size) - ERDMA_HW_PAGE_SHIFT);
|
||||
req.qbuf_addr_l = lower_32_bits(cq->kern_cq.qbuf_dma_addr);
|
||||
req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr);
|
||||
|
||||
|
@ -175,8 +175,9 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
|
|||
cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT);
|
||||
} else {
|
||||
mtt = &cq->user_cq.qbuf_mtt;
|
||||
req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
|
||||
ilog2(mtt->page_size) - PAGE_SHIFT);
|
||||
req.cfg0 |=
|
||||
FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
|
||||
ilog2(mtt->page_size) - ERDMA_HW_PAGE_SHIFT);
|
||||
if (mtt->mtt_nents == 1) {
|
||||
req.qbuf_addr_l = lower_32_bits(*(u64 *)mtt->mtt_buf);
|
||||
req.qbuf_addr_h = upper_32_bits(*(u64 *)mtt->mtt_buf);
|
||||
|
@ -636,7 +637,7 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
|
|||
u32 rq_offset;
|
||||
int ret;
|
||||
|
||||
if (len < (PAGE_ALIGN(qp->attrs.sq_size * SQEBB_SIZE) +
|
||||
if (len < (ALIGN(qp->attrs.sq_size * SQEBB_SIZE, ERDMA_HW_PAGE_SIZE) +
|
||||
qp->attrs.rq_size * RQE_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -646,7 +647,7 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
rq_offset = PAGE_ALIGN(qp->attrs.sq_size << SQEBB_SHIFT);
|
||||
rq_offset = ALIGN(qp->attrs.sq_size << SQEBB_SHIFT, ERDMA_HW_PAGE_SIZE);
|
||||
qp->user_qp.rq_offset = rq_offset;
|
||||
|
||||
ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mtt, va + rq_offset,
|
||||
|
|
|
@ -12135,7 +12135,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
|
|||
set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
|
||||
IS_RCVURGENT_START + rcd->ctxt, false);
|
||||
|
||||
hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
|
||||
hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx", ctxt, rcvctrl);
|
||||
write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
|
||||
|
||||
/* work around sticky RcvCtxtStatus.BlockedRHQFull */
|
||||
|
@ -12205,10 +12205,10 @@ u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
|
|||
hfi1_cdbg(CNTR, "reading %s", entry->name);
|
||||
if (entry->flags & CNTR_DISABLED) {
|
||||
/* Nothing */
|
||||
hfi1_cdbg(CNTR, "\tDisabled\n");
|
||||
hfi1_cdbg(CNTR, "\tDisabled");
|
||||
} else {
|
||||
if (entry->flags & CNTR_VL) {
|
||||
hfi1_cdbg(CNTR, "\tPer VL\n");
|
||||
hfi1_cdbg(CNTR, "\tPer VL");
|
||||
for (j = 0; j < C_VL_COUNT; j++) {
|
||||
val = entry->rw_cntr(entry,
|
||||
dd, j,
|
||||
|
@ -12216,21 +12216,21 @@ u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
|
|||
0);
|
||||
hfi1_cdbg(
|
||||
CNTR,
|
||||
"\t\tRead 0x%llx for %d\n",
|
||||
"\t\tRead 0x%llx for %d",
|
||||
val, j);
|
||||
dd->cntrs[entry->offset + j] =
|
||||
val;
|
||||
}
|
||||
} else if (entry->flags & CNTR_SDMA) {
|
||||
hfi1_cdbg(CNTR,
|
||||
"\t Per SDMA Engine\n");
|
||||
"\t Per SDMA Engine");
|
||||
for (j = 0; j < chip_sdma_engines(dd);
|
||||
j++) {
|
||||
val =
|
||||
entry->rw_cntr(entry, dd, j,
|
||||
CNTR_MODE_R, 0);
|
||||
hfi1_cdbg(CNTR,
|
||||
"\t\tRead 0x%llx for %d\n",
|
||||
"\t\tRead 0x%llx for %d",
|
||||
val, j);
|
||||
dd->cntrs[entry->offset + j] =
|
||||
val;
|
||||
|
@ -12271,7 +12271,7 @@ u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
|
|||
hfi1_cdbg(CNTR, "reading %s", entry->name);
|
||||
if (entry->flags & CNTR_DISABLED) {
|
||||
/* Nothing */
|
||||
hfi1_cdbg(CNTR, "\tDisabled\n");
|
||||
hfi1_cdbg(CNTR, "\tDisabled");
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -12513,7 +12513,7 @@ static void do_update_synth_timer(struct work_struct *work)
|
|||
|
||||
hfi1_cdbg(
|
||||
CNTR,
|
||||
"[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
|
||||
"[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx",
|
||||
dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
|
||||
|
||||
if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
|
||||
|
@ -12527,7 +12527,7 @@ static void do_update_synth_timer(struct work_struct *work)
|
|||
} else {
|
||||
total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
|
||||
hfi1_cdbg(CNTR,
|
||||
"[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
|
||||
"[%d] total flits 0x%llx limit 0x%llx", dd->unit,
|
||||
total_flits, (u64)CNTR_32BIT_MAX);
|
||||
if (total_flits >= CNTR_32BIT_MAX) {
|
||||
hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
|
||||
|
|
|
@ -1597,7 +1597,7 @@ static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
|
|||
|
||||
return 0;
|
||||
drop:
|
||||
hfi1_cdbg(PKT, "%s: packet dropped\n", __func__);
|
||||
hfi1_cdbg(PKT, "%s: packet dropped", __func__);
|
||||
ibp->rvp.n_pkt_drops++;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -975,7 +975,7 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
|
|||
ret = -ENOMEM;
|
||||
goto ctxdata_free;
|
||||
}
|
||||
hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index,
|
||||
hfi1_cdbg(PROC, "allocated send context %u(%u)", uctxt->sc->sw_index,
|
||||
uctxt->sc->hw_context);
|
||||
ret = sc_enable(uctxt->sc);
|
||||
if (ret)
|
||||
|
|
|
@ -342,7 +342,7 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
|
|||
INIT_LIST_HEAD(&rcd->flow_queue.queue_head);
|
||||
INIT_LIST_HEAD(&rcd->rarr_queue.queue_head);
|
||||
|
||||
hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt);
|
||||
hfi1_cdbg(PROC, "setting up context %u", rcd->ctxt);
|
||||
|
||||
/*
|
||||
* Calculate the context's RcvArray entry starting point.
|
||||
|
@ -400,7 +400,7 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
|
|||
rcd->egrbufs.count = MAX_EAGER_ENTRIES;
|
||||
}
|
||||
hfi1_cdbg(PROC,
|
||||
"ctxt%u: max Eager buffer RcvArray entries: %u\n",
|
||||
"ctxt%u: max Eager buffer RcvArray entries: %u",
|
||||
rcd->ctxt, rcd->egrbufs.count);
|
||||
|
||||
/*
|
||||
|
@ -432,7 +432,7 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
|
|||
if (rcd->egrbufs.size < hfi1_max_mtu) {
|
||||
rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
|
||||
hfi1_cdbg(PROC,
|
||||
"ctxt%u: eager bufs size too small. Adjusting to %u\n",
|
||||
"ctxt%u: eager bufs size too small. Adjusting to %u",
|
||||
rcd->ctxt, rcd->egrbufs.size);
|
||||
}
|
||||
rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
|
||||
|
@ -1920,7 +1920,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
|
|||
rcd->egrbufs.size = alloced_bytes;
|
||||
|
||||
hfi1_cdbg(PROC,
|
||||
"ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB\n",
|
||||
"ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB",
|
||||
rcd->ctxt, rcd->egrbufs.alloced,
|
||||
rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
|
||||
|
||||
|
@ -1943,13 +1943,13 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
|
|||
rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
|
||||
|
||||
rcd->expected_base = rcd->eager_base + egrtop;
|
||||
hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
|
||||
hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u",
|
||||
rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
|
||||
rcd->eager_base, rcd->expected_base);
|
||||
|
||||
if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
|
||||
hfi1_cdbg(PROC,
|
||||
"ctxt%u: current Eager buffer size is invalid %u\n",
|
||||
"ctxt%u: current Eager buffer size is invalid %u",
|
||||
rcd->ctxt, rcd->egrbufs.rcvtid_size);
|
||||
ret = -EINVAL;
|
||||
goto bail_rcvegrbuf_phys;
|
||||
|
|
|
@ -215,6 +215,7 @@ static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
|
|||
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
||||
ret = sdma_txadd_page(dd,
|
||||
NULL,
|
||||
txreq,
|
||||
skb_frag_page(frag),
|
||||
frag->bv_offset,
|
||||
|
@ -737,10 +738,13 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
|
|||
txq->tx_ring.shift = ilog2(tx_item_size);
|
||||
txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq);
|
||||
tx_ring = &txq->tx_ring;
|
||||
for (j = 0; j < tx_ring_size; j++)
|
||||
for (j = 0; j < tx_ring_size; j++) {
|
||||
hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr =
|
||||
kzalloc_node(sizeof(*tx->sdma_hdr),
|
||||
GFP_KERNEL, priv->dd->node);
|
||||
if (!hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr)
|
||||
goto free_txqs;
|
||||
}
|
||||
|
||||
netif_napi_add_tx(dev, &txq->napi, hfi1_ipoib_poll_tx_ring);
|
||||
}
|
||||
|
|
|
@ -46,12 +46,14 @@ int hfi1_mmu_rb_register(void *ops_arg,
|
|||
struct mmu_rb_handler **handler)
|
||||
{
|
||||
struct mmu_rb_handler *h;
|
||||
void *free_ptr;
|
||||
int ret;
|
||||
|
||||
h = kzalloc(sizeof(*h), GFP_KERNEL);
|
||||
if (!h)
|
||||
free_ptr = kzalloc(sizeof(*h) + cache_line_size() - 1, GFP_KERNEL);
|
||||
if (!free_ptr)
|
||||
return -ENOMEM;
|
||||
|
||||
h = PTR_ALIGN(free_ptr, cache_line_size());
|
||||
h->root = RB_ROOT_CACHED;
|
||||
h->ops = ops;
|
||||
h->ops_arg = ops_arg;
|
||||
|
@ -62,10 +64,11 @@ int hfi1_mmu_rb_register(void *ops_arg,
|
|||
INIT_LIST_HEAD(&h->del_list);
|
||||
INIT_LIST_HEAD(&h->lru_list);
|
||||
h->wq = wq;
|
||||
h->free_ptr = free_ptr;
|
||||
|
||||
ret = mmu_notifier_register(&h->mn, current->mm);
|
||||
if (ret) {
|
||||
kfree(h);
|
||||
kfree(free_ptr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -108,7 +111,7 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
|
|||
/* Now the mm may be freed. */
|
||||
mmdrop(handler->mn.mm);
|
||||
|
||||
kfree(handler);
|
||||
kfree(handler->free_ptr);
|
||||
}
|
||||
|
||||
int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
|
||||
|
@ -126,11 +129,11 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
|
|||
spin_lock_irqsave(&handler->lock, flags);
|
||||
node = __mmu_rb_search(handler, mnode->addr, mnode->len);
|
||||
if (node) {
|
||||
ret = -EINVAL;
|
||||
ret = -EEXIST;
|
||||
goto unlock;
|
||||
}
|
||||
__mmu_int_rb_insert(mnode, &handler->root);
|
||||
list_add(&mnode->list, &handler->lru_list);
|
||||
list_add_tail(&mnode->list, &handler->lru_list);
|
||||
|
||||
ret = handler->ops->insert(handler->ops_arg, mnode);
|
||||
if (ret) {
|
||||
|
@ -143,6 +146,19 @@ unlock:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Caller must hold handler lock */
|
||||
struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
struct mmu_rb_node *node;
|
||||
|
||||
trace_hfi1_mmu_rb_search(addr, len);
|
||||
node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1);
|
||||
if (node)
|
||||
list_move_tail(&node->list, &handler->lru_list);
|
||||
return node;
|
||||
}
|
||||
|
||||
/* Caller must hold handler lock */
|
||||
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
|
||||
unsigned long addr,
|
||||
|
@ -167,32 +183,6 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
|
|||
return node;
|
||||
}
|
||||
|
||||
bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
|
||||
unsigned long addr, unsigned long len,
|
||||
struct mmu_rb_node **rb_node)
|
||||
{
|
||||
struct mmu_rb_node *node;
|
||||
unsigned long flags;
|
||||
bool ret = false;
|
||||
|
||||
if (current->mm != handler->mn.mm)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
node = __mmu_rb_search(handler, addr, len);
|
||||
if (node) {
|
||||
if (node->addr == addr && node->len == len)
|
||||
goto unlock;
|
||||
__mmu_int_rb_remove(node, &handler->root);
|
||||
list_del(&node->list); /* remove from LRU list */
|
||||
ret = true;
|
||||
}
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
*rb_node = node;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
|
||||
{
|
||||
struct mmu_rb_node *rbnode, *ptr;
|
||||
|
@ -206,8 +196,7 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
|
|||
INIT_LIST_HEAD(&del_list);
|
||||
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
|
||||
list) {
|
||||
list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
|
||||
if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
|
||||
&stop)) {
|
||||
__mmu_int_rb_remove(rbnode, &handler->root);
|
||||
|
@ -219,36 +208,11 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
|
|||
}
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
|
||||
while (!list_empty(&del_list)) {
|
||||
rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
|
||||
list_del(&rbnode->list);
|
||||
list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
|
||||
handler->ops->remove(handler->ops_arg, rbnode);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* It is up to the caller to ensure that this function does not race with the
|
||||
* mmu invalidate notifier which may be calling the users remove callback on
|
||||
* 'node'.
|
||||
*/
|
||||
void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
|
||||
struct mmu_rb_node *node)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (current->mm != handler->mn.mm)
|
||||
return;
|
||||
|
||||
/* Validity of handler and node pointers has been checked by caller. */
|
||||
trace_hfi1_mmu_rb_remove(node->addr, node->len);
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
__mmu_int_rb_remove(node, &handler->root);
|
||||
list_del(&node->list); /* remove from LRU list */
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
|
||||
handler->ops->remove(handler->ops_arg, node);
|
||||
}
|
||||
|
||||
static int mmu_notifier_range_start(struct mmu_notifier *mn,
|
||||
const struct mmu_notifier_range *range)
|
||||
{
|
||||
|
|
|
@ -33,15 +33,25 @@ struct mmu_rb_ops {
|
|||
};
|
||||
|
||||
struct mmu_rb_handler {
|
||||
/*
|
||||
* struct mmu_notifier is 56 bytes, and spinlock_t is 4 bytes, so
|
||||
* they fit together in one cache line. mn is relatively rarely
|
||||
* accessed, so co-locating the spinlock with it achieves much of
|
||||
* the cacheline contention reduction of giving the spinlock its own
|
||||
* cacheline without the overhead of doing so.
|
||||
*/
|
||||
struct mmu_notifier mn;
|
||||
struct rb_root_cached root;
|
||||
void *ops_arg;
|
||||
spinlock_t lock; /* protect the RB tree */
|
||||
|
||||
/* Begin on a new cachline boundary here */
|
||||
struct rb_root_cached root ____cacheline_aligned_in_smp;
|
||||
void *ops_arg;
|
||||
struct mmu_rb_ops *ops;
|
||||
struct list_head lru_list;
|
||||
struct work_struct del_work;
|
||||
struct list_head del_list;
|
||||
struct workqueue_struct *wq;
|
||||
void *free_ptr;
|
||||
};
|
||||
|
||||
int hfi1_mmu_rb_register(void *ops_arg,
|
||||
|
@ -52,10 +62,8 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
|
|||
int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
|
||||
struct mmu_rb_node *mnode);
|
||||
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
|
||||
void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
|
||||
struct mmu_rb_node *mnode);
|
||||
bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
|
||||
unsigned long addr, unsigned long len,
|
||||
struct mmu_rb_node **rb_node);
|
||||
struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
|
||||
unsigned long addr,
|
||||
unsigned long len);
|
||||
|
||||
#endif /* _HFI1_MMU_RB_H */
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/aer.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "hfi.h"
|
||||
|
@ -65,7 +64,6 @@ int hfi1_pcie_init(struct hfi1_devdata *dd)
|
|||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
(void)pci_enable_pcie_error_reporting(pdev);
|
||||
return 0;
|
||||
|
||||
bail:
|
||||
|
|
|
@ -820,7 +820,7 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
|
|||
}
|
||||
|
||||
hfi1_cdbg(PIO,
|
||||
"Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n",
|
||||
"Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u",
|
||||
sw_index,
|
||||
hw_context,
|
||||
sc_type_name(type),
|
||||
|
|
|
@ -1593,22 +1593,7 @@ static inline void sdma_unmap_desc(
|
|||
struct hfi1_devdata *dd,
|
||||
struct sdma_desc *descp)
|
||||
{
|
||||
switch (sdma_mapping_type(descp)) {
|
||||
case SDMA_MAP_SINGLE:
|
||||
dma_unmap_single(
|
||||
&dd->pcidev->dev,
|
||||
sdma_mapping_addr(descp),
|
||||
sdma_mapping_len(descp),
|
||||
DMA_TO_DEVICE);
|
||||
break;
|
||||
case SDMA_MAP_PAGE:
|
||||
dma_unmap_page(
|
||||
&dd->pcidev->dev,
|
||||
sdma_mapping_addr(descp),
|
||||
sdma_mapping_len(descp),
|
||||
DMA_TO_DEVICE);
|
||||
break;
|
||||
}
|
||||
system_descriptor_complete(dd, descp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3128,7 +3113,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
|
|||
|
||||
/* Add descriptor for coalesce buffer */
|
||||
tx->desc_limit = MAX_DESC;
|
||||
return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
|
||||
return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx,
|
||||
addr, tx->tlen);
|
||||
}
|
||||
|
||||
|
@ -3167,10 +3152,12 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
|
|||
return rval;
|
||||
}
|
||||
}
|
||||
|
||||
/* finish the one just added */
|
||||
make_tx_sdma_desc(
|
||||
tx,
|
||||
SDMA_MAP_NONE,
|
||||
NULL,
|
||||
dd->sdma_pad_phys,
|
||||
sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
|
||||
tx->num_desc++;
|
||||
|
|
|
@ -594,6 +594,7 @@ static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
|
|||
static inline void make_tx_sdma_desc(
|
||||
struct sdma_txreq *tx,
|
||||
int type,
|
||||
void *pinning_ctx,
|
||||
dma_addr_t addr,
|
||||
size_t len)
|
||||
{
|
||||
|
@ -612,6 +613,7 @@ static inline void make_tx_sdma_desc(
|
|||
<< SDMA_DESC0_PHY_ADDR_SHIFT) |
|
||||
(((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
|
||||
<< SDMA_DESC0_BYTE_COUNT_SHIFT);
|
||||
desc->pinning_ctx = pinning_ctx;
|
||||
}
|
||||
|
||||
/* helper to extend txreq */
|
||||
|
@ -643,6 +645,7 @@ static inline void _sdma_close_tx(struct hfi1_devdata *dd,
|
|||
static inline int _sdma_txadd_daddr(
|
||||
struct hfi1_devdata *dd,
|
||||
int type,
|
||||
void *pinning_ctx,
|
||||
struct sdma_txreq *tx,
|
||||
dma_addr_t addr,
|
||||
u16 len)
|
||||
|
@ -652,6 +655,7 @@ static inline int _sdma_txadd_daddr(
|
|||
make_tx_sdma_desc(
|
||||
tx,
|
||||
type,
|
||||
pinning_ctx,
|
||||
addr, len);
|
||||
WARN_ON(len > tx->tlen);
|
||||
tx->num_desc++;
|
||||
|
@ -672,6 +676,7 @@ static inline int _sdma_txadd_daddr(
|
|||
/**
|
||||
* sdma_txadd_page() - add a page to the sdma_txreq
|
||||
* @dd: the device to use for mapping
|
||||
* @pinning_ctx: context to be released at descriptor retirement
|
||||
* @tx: tx request to which the page is added
|
||||
* @page: page to map
|
||||
* @offset: offset within the page
|
||||
|
@ -687,6 +692,7 @@ static inline int _sdma_txadd_daddr(
|
|||
*/
|
||||
static inline int sdma_txadd_page(
|
||||
struct hfi1_devdata *dd,
|
||||
void *pinning_ctx,
|
||||
struct sdma_txreq *tx,
|
||||
struct page *page,
|
||||
unsigned long offset,
|
||||
|
@ -714,8 +720,7 @@ static inline int sdma_txadd_page(
|
|||
return -ENOSPC;
|
||||
}
|
||||
|
||||
return _sdma_txadd_daddr(
|
||||
dd, SDMA_MAP_PAGE, tx, addr, len);
|
||||
return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, pinning_ctx, tx, addr, len);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -749,7 +754,8 @@ static inline int sdma_txadd_daddr(
|
|||
return rval;
|
||||
}
|
||||
|
||||
return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len);
|
||||
return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, NULL, tx,
|
||||
addr, len);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -795,8 +801,7 @@ static inline int sdma_txadd_kvaddr(
|
|||
return -ENOSPC;
|
||||
}
|
||||
|
||||
return _sdma_txadd_daddr(
|
||||
dd, SDMA_MAP_SINGLE, tx, addr, len);
|
||||
return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx, addr, len);
|
||||
}
|
||||
|
||||
struct iowait_work;
|
||||
|
@ -1030,4 +1035,5 @@ extern uint mod_num_sdma;
|
|||
|
||||
void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
|
||||
|
||||
void system_descriptor_complete(struct hfi1_devdata *dd, struct sdma_desc *descp);
|
||||
#endif
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
struct sdma_desc {
|
||||
/* private: don't use directly */
|
||||
u64 qw[2];
|
||||
void *pinning_ctx;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -22,6 +22,11 @@
|
|||
|
||||
#define MAX_MSG_LEN 512
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#ifndef __clang__
|
||||
#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
|
||||
#endif
|
||||
|
||||
DECLARE_EVENT_CLASS(hfi1_trace_template,
|
||||
TP_PROTO(const char *function, struct va_format *vaf),
|
||||
TP_ARGS(function, vaf),
|
||||
|
@ -36,6 +41,8 @@ DECLARE_EVENT_CLASS(hfi1_trace_template,
|
|||
__get_str(msg))
|
||||
);
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
/*
|
||||
* It may be nice to macroize the __hfi1_trace but the va_* stuff requires an
|
||||
* actual function to work and can not be in a macro.
|
||||
|
|
|
@ -37,10 +37,6 @@ DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_search,
|
|||
TP_PROTO(unsigned long addr, unsigned long len),
|
||||
TP_ARGS(addr, len));
|
||||
|
||||
DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_remove,
|
||||
TP_PROTO(unsigned long addr, unsigned long len),
|
||||
TP_ARGS(addr, len));
|
||||
|
||||
DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_mem_invalidate,
|
||||
TP_PROTO(unsigned long addr, unsigned long len),
|
||||
TP_ARGS(addr, len));
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
|
||||
#include "hfi.h"
|
||||
#include "sdma.h"
|
||||
#include "mmu_rb.h"
|
||||
#include "user_sdma.h"
|
||||
#include "verbs.h" /* for the headers */
|
||||
#include "common.h" /* for struct hfi1_tid_info */
|
||||
|
@ -39,11 +38,7 @@ static unsigned initial_pkt_count = 8;
|
|||
static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
|
||||
static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
|
||||
static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
|
||||
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
|
||||
static int pin_vector_pages(struct user_sdma_request *req,
|
||||
struct user_sdma_iovec *iovec);
|
||||
static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
|
||||
unsigned start, unsigned npages);
|
||||
static void user_sdma_free_request(struct user_sdma_request *req);
|
||||
static int check_header_template(struct user_sdma_request *req,
|
||||
struct hfi1_pkt_header *hdr, u32 lrhlen,
|
||||
u32 datalen);
|
||||
|
@ -81,6 +76,11 @@ static struct mmu_rb_ops sdma_rb_ops = {
|
|||
.invalidate = sdma_rb_invalidate
|
||||
};
|
||||
|
||||
static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
|
||||
struct user_sdma_txreq *tx,
|
||||
struct user_sdma_iovec *iovec,
|
||||
u32 *pkt_remaining);
|
||||
|
||||
static int defer_packet_queue(
|
||||
struct sdma_engine *sde,
|
||||
struct iowait_work *wait,
|
||||
|
@ -410,6 +410,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
|
|||
ret = -EINVAL;
|
||||
goto free_req;
|
||||
}
|
||||
|
||||
/* Copy the header from the user buffer */
|
||||
ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
|
||||
sizeof(req->hdr));
|
||||
|
@ -484,9 +485,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
|
|||
memcpy(&req->iovs[i].iov,
|
||||
iovec + idx++,
|
||||
sizeof(req->iovs[i].iov));
|
||||
ret = pin_vector_pages(req, &req->iovs[i]);
|
||||
if (ret) {
|
||||
req->data_iovs = i;
|
||||
if (req->iovs[i].iov.iov_len == 0) {
|
||||
ret = -EINVAL;
|
||||
goto free_req;
|
||||
}
|
||||
req->data_len += req->iovs[i].iov.iov_len;
|
||||
|
@ -584,7 +584,7 @@ free_req:
|
|||
if (req->seqsubmitted)
|
||||
wait_event(pq->busy.wait_dma,
|
||||
(req->seqcomp == req->seqsubmitted - 1));
|
||||
user_sdma_free_request(req, true);
|
||||
user_sdma_free_request(req);
|
||||
pq_update(pq);
|
||||
set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
|
||||
}
|
||||
|
@ -696,48 +696,6 @@ static int user_sdma_txadd_ahg(struct user_sdma_request *req,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int user_sdma_txadd(struct user_sdma_request *req,
|
||||
struct user_sdma_txreq *tx,
|
||||
struct user_sdma_iovec *iovec, u32 datalen,
|
||||
u32 *queued_ptr, u32 *data_sent_ptr,
|
||||
u64 *iov_offset_ptr)
|
||||
{
|
||||
int ret;
|
||||
unsigned int pageidx, len;
|
||||
unsigned long base, offset;
|
||||
u64 iov_offset = *iov_offset_ptr;
|
||||
u32 queued = *queued_ptr, data_sent = *data_sent_ptr;
|
||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||
|
||||
base = (unsigned long)iovec->iov.iov_base;
|
||||
offset = offset_in_page(base + iovec->offset + iov_offset);
|
||||
pageidx = (((iovec->offset + iov_offset + base) - (base & PAGE_MASK)) >>
|
||||
PAGE_SHIFT);
|
||||
len = offset + req->info.fragsize > PAGE_SIZE ?
|
||||
PAGE_SIZE - offset : req->info.fragsize;
|
||||
len = min((datalen - queued), len);
|
||||
ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx],
|
||||
offset, len);
|
||||
if (ret) {
|
||||
SDMA_DBG(req, "SDMA txreq add page failed %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
iov_offset += len;
|
||||
queued += len;
|
||||
data_sent += len;
|
||||
if (unlikely(queued < datalen && pageidx == iovec->npages &&
|
||||
req->iov_idx < req->data_iovs - 1)) {
|
||||
iovec->offset += iov_offset;
|
||||
iovec = &req->iovs[++req->iov_idx];
|
||||
iov_offset = 0;
|
||||
}
|
||||
|
||||
*queued_ptr = queued;
|
||||
*data_sent_ptr = data_sent;
|
||||
*iov_offset_ptr = iov_offset;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -769,8 +727,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
|
|||
maxpkts = req->info.npkts - req->seqnum;
|
||||
|
||||
while (npkts < maxpkts) {
|
||||
u32 datalen = 0, queued = 0, data_sent = 0;
|
||||
u64 iov_offset = 0;
|
||||
u32 datalen = 0;
|
||||
|
||||
/*
|
||||
* Check whether any of the completions have come back
|
||||
|
@ -863,27 +820,17 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
|
|||
goto free_txreq;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the request contains any data vectors, add up to
|
||||
* fragsize bytes to the descriptor.
|
||||
*/
|
||||
while (queued < datalen &&
|
||||
(req->sent + data_sent) < req->data_len) {
|
||||
ret = user_sdma_txadd(req, tx, iovec, datalen,
|
||||
&queued, &data_sent, &iov_offset);
|
||||
if (ret)
|
||||
goto free_txreq;
|
||||
}
|
||||
/*
|
||||
* The txreq was submitted successfully so we can update
|
||||
* the counters.
|
||||
*/
|
||||
req->koffset += datalen;
|
||||
if (req_opcode(req->info.ctrl) == EXPECTED)
|
||||
req->tidoffset += datalen;
|
||||
req->sent += data_sent;
|
||||
if (req->data_len)
|
||||
iovec->offset += iov_offset;
|
||||
req->sent += datalen;
|
||||
while (datalen) {
|
||||
ret = add_system_pages_to_sdma_packet(req, tx, iovec,
|
||||
&datalen);
|
||||
if (ret)
|
||||
goto free_txreq;
|
||||
iovec = &req->iovs[req->iov_idx];
|
||||
}
|
||||
list_add_tail(&tx->txreq.list, &req->txps);
|
||||
/*
|
||||
* It is important to increment this here as it is used to
|
||||
|
@ -920,133 +867,14 @@ free_tx:
|
|||
static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
|
||||
{
|
||||
struct evict_data evict_data;
|
||||
struct mmu_rb_handler *handler = pq->handler;
|
||||
|
||||
evict_data.cleared = 0;
|
||||
evict_data.target = npages;
|
||||
hfi1_mmu_rb_evict(pq->handler, &evict_data);
|
||||
hfi1_mmu_rb_evict(handler, &evict_data);
|
||||
return evict_data.cleared;
|
||||
}
|
||||
|
||||
static int pin_sdma_pages(struct user_sdma_request *req,
|
||||
struct user_sdma_iovec *iovec,
|
||||
struct sdma_mmu_node *node,
|
||||
int npages)
|
||||
{
|
||||
int pinned, cleared;
|
||||
struct page **pages;
|
||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||
|
||||
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
memcpy(pages, node->pages, node->npages * sizeof(*pages));
|
||||
|
||||
npages -= node->npages;
|
||||
retry:
|
||||
if (!hfi1_can_pin_pages(pq->dd, current->mm,
|
||||
atomic_read(&pq->n_locked), npages)) {
|
||||
cleared = sdma_cache_evict(pq, npages);
|
||||
if (cleared >= npages)
|
||||
goto retry;
|
||||
}
|
||||
pinned = hfi1_acquire_user_pages(current->mm,
|
||||
((unsigned long)iovec->iov.iov_base +
|
||||
(node->npages * PAGE_SIZE)), npages, 0,
|
||||
pages + node->npages);
|
||||
if (pinned < 0) {
|
||||
kfree(pages);
|
||||
return pinned;
|
||||
}
|
||||
if (pinned != npages) {
|
||||
unpin_vector_pages(current->mm, pages, node->npages, pinned);
|
||||
return -EFAULT;
|
||||
}
|
||||
kfree(node->pages);
|
||||
node->rb.len = iovec->iov.iov_len;
|
||||
node->pages = pages;
|
||||
atomic_add(pinned, &pq->n_locked);
|
||||
return pinned;
|
||||
}
|
||||
|
||||
static void unpin_sdma_pages(struct sdma_mmu_node *node)
|
||||
{
|
||||
if (node->npages) {
|
||||
unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
|
||||
node->npages);
|
||||
atomic_sub(node->npages, &node->pq->n_locked);
|
||||
}
|
||||
}
|
||||
|
||||
static int pin_vector_pages(struct user_sdma_request *req,
|
||||
struct user_sdma_iovec *iovec)
|
||||
{
|
||||
int ret = 0, pinned, npages;
|
||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||
struct sdma_mmu_node *node = NULL;
|
||||
struct mmu_rb_node *rb_node;
|
||||
struct iovec *iov;
|
||||
bool extracted;
|
||||
|
||||
extracted =
|
||||
hfi1_mmu_rb_remove_unless_exact(pq->handler,
|
||||
(unsigned long)
|
||||
iovec->iov.iov_base,
|
||||
iovec->iov.iov_len, &rb_node);
|
||||
if (rb_node) {
|
||||
node = container_of(rb_node, struct sdma_mmu_node, rb);
|
||||
if (!extracted) {
|
||||
atomic_inc(&node->refcount);
|
||||
iovec->pages = node->pages;
|
||||
iovec->npages = node->npages;
|
||||
iovec->node = node;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (!node) {
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
node->rb.addr = (unsigned long)iovec->iov.iov_base;
|
||||
node->pq = pq;
|
||||
atomic_set(&node->refcount, 0);
|
||||
}
|
||||
|
||||
iov = &iovec->iov;
|
||||
npages = num_user_pages((unsigned long)iov->iov_base, iov->iov_len);
|
||||
if (node->npages < npages) {
|
||||
pinned = pin_sdma_pages(req, iovec, node, npages);
|
||||
if (pinned < 0) {
|
||||
ret = pinned;
|
||||
goto bail;
|
||||
}
|
||||
node->npages += pinned;
|
||||
npages = node->npages;
|
||||
}
|
||||
iovec->pages = node->pages;
|
||||
iovec->npages = npages;
|
||||
iovec->node = node;
|
||||
|
||||
ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
|
||||
if (ret) {
|
||||
iovec->node = NULL;
|
||||
goto bail;
|
||||
}
|
||||
return 0;
|
||||
bail:
|
||||
unpin_sdma_pages(node);
|
||||
kfree(node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
|
||||
unsigned start, unsigned npages)
|
||||
{
|
||||
hfi1_release_user_pages(mm, pages + start, npages, false);
|
||||
kfree(pages);
|
||||
}
|
||||
|
||||
static int check_header_template(struct user_sdma_request *req,
|
||||
struct hfi1_pkt_header *hdr, u32 lrhlen,
|
||||
u32 datalen)
|
||||
|
@ -1388,7 +1216,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
|
|||
if (req->seqcomp != req->info.npkts - 1)
|
||||
return;
|
||||
|
||||
user_sdma_free_request(req, false);
|
||||
user_sdma_free_request(req);
|
||||
set_comp_state(pq, cq, req->info.comp_idx, state, status);
|
||||
pq_update(pq);
|
||||
}
|
||||
|
@ -1399,10 +1227,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
|
|||
wake_up(&pq->wait);
|
||||
}
|
||||
|
||||
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
|
||||
static void user_sdma_free_request(struct user_sdma_request *req)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!list_empty(&req->txps)) {
|
||||
struct sdma_txreq *t, *p;
|
||||
|
||||
|
@ -1415,21 +1241,6 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
|
|||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < req->data_iovs; i++) {
|
||||
struct sdma_mmu_node *node = req->iovs[i].node;
|
||||
|
||||
if (!node)
|
||||
continue;
|
||||
|
||||
req->iovs[i].node = NULL;
|
||||
|
||||
if (unpin)
|
||||
hfi1_mmu_rb_remove(req->pq->handler,
|
||||
&node->rb);
|
||||
else
|
||||
atomic_dec(&node->refcount);
|
||||
}
|
||||
|
||||
kfree(req->tids);
|
||||
clear_bit(req->info.comp_idx, req->pq->req_in_use);
|
||||
}
|
||||
|
@ -1447,6 +1258,368 @@ static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
|
|||
idx, state, ret);
|
||||
}
|
||||
|
||||
static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
|
||||
unsigned int start, unsigned int npages)
|
||||
{
|
||||
hfi1_release_user_pages(mm, pages + start, npages, false);
|
||||
kfree(pages);
|
||||
}
|
||||
|
||||
static void free_system_node(struct sdma_mmu_node *node)
|
||||
{
|
||||
if (node->npages) {
|
||||
unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
|
||||
node->npages);
|
||||
atomic_sub(node->npages, &node->pq->n_locked);
|
||||
}
|
||||
kfree(node);
|
||||
}
|
||||
|
||||
static inline void acquire_node(struct sdma_mmu_node *node)
|
||||
{
|
||||
atomic_inc(&node->refcount);
|
||||
WARN_ON(atomic_read(&node->refcount) < 0);
|
||||
}
|
||||
|
||||
static inline void release_node(struct mmu_rb_handler *handler,
|
||||
struct sdma_mmu_node *node)
|
||||
{
|
||||
atomic_dec(&node->refcount);
|
||||
WARN_ON(atomic_read(&node->refcount) < 0);
|
||||
}
|
||||
|
||||
static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct mmu_rb_node *rb_node;
|
||||
struct sdma_mmu_node *node;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
rb_node = hfi1_mmu_rb_get_first(handler, start, (end - start));
|
||||
if (!rb_node) {
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
return NULL;
|
||||
}
|
||||
node = container_of(rb_node, struct sdma_mmu_node, rb);
|
||||
acquire_node(node);
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
static int pin_system_pages(struct user_sdma_request *req,
|
||||
uintptr_t start_address, size_t length,
|
||||
struct sdma_mmu_node *node, int npages)
|
||||
{
|
||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||
int pinned, cleared;
|
||||
struct page **pages;
|
||||
|
||||
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
retry:
|
||||
if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked),
|
||||
npages)) {
|
||||
SDMA_DBG(req, "Evicting: nlocked %u npages %u",
|
||||
atomic_read(&pq->n_locked), npages);
|
||||
cleared = sdma_cache_evict(pq, npages);
|
||||
if (cleared >= npages)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
SDMA_DBG(req, "Acquire user pages start_address %lx node->npages %u npages %u",
|
||||
start_address, node->npages, npages);
|
||||
pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0,
|
||||
pages);
|
||||
|
||||
if (pinned < 0) {
|
||||
kfree(pages);
|
||||
SDMA_DBG(req, "pinned %d", pinned);
|
||||
return pinned;
|
||||
}
|
||||
if (pinned != npages) {
|
||||
unpin_vector_pages(current->mm, pages, node->npages, pinned);
|
||||
SDMA_DBG(req, "npages %u pinned %d", npages, pinned);
|
||||
return -EFAULT;
|
||||
}
|
||||
node->rb.addr = start_address;
|
||||
node->rb.len = length;
|
||||
node->pages = pages;
|
||||
node->npages = npages;
|
||||
atomic_add(pinned, &pq->n_locked);
|
||||
SDMA_DBG(req, "done. pinned %d", pinned);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int add_system_pinning(struct user_sdma_request *req,
|
||||
struct sdma_mmu_node **node_p,
|
||||
unsigned long start, unsigned long len)
|
||||
|
||||
{
|
||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||
struct sdma_mmu_node *node;
|
||||
int ret;
|
||||
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
node->pq = pq;
|
||||
ret = pin_system_pages(req, start, len, node, PFN_DOWN(len));
|
||||
if (ret == 0) {
|
||||
ret = hfi1_mmu_rb_insert(pq->handler, &node->rb);
|
||||
if (ret)
|
||||
free_system_node(node);
|
||||
else
|
||||
*node_p = node;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
kfree(node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int get_system_cache_entry(struct user_sdma_request *req,
|
||||
struct sdma_mmu_node **node_p,
|
||||
size_t req_start, size_t req_len)
|
||||
{
|
||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||
u64 start = ALIGN_DOWN(req_start, PAGE_SIZE);
|
||||
u64 end = PFN_ALIGN(req_start + req_len);
|
||||
struct mmu_rb_handler *handler = pq->handler;
|
||||
int ret;
|
||||
|
||||
if ((end - start) == 0) {
|
||||
SDMA_DBG(req,
|
||||
"Request for empty cache entry req_start %lx req_len %lx start %llx end %llx",
|
||||
req_start, req_len, start, end);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
SDMA_DBG(req, "req_start %lx req_len %lu", req_start, req_len);
|
||||
|
||||
while (1) {
|
||||
struct sdma_mmu_node *node =
|
||||
find_system_node(handler, start, end);
|
||||
u64 prepend_len = 0;
|
||||
|
||||
SDMA_DBG(req, "node %p start %llx end %llu", node, start, end);
|
||||
if (!node) {
|
||||
ret = add_system_pinning(req, node_p, start,
|
||||
end - start);
|
||||
if (ret == -EEXIST) {
|
||||
/*
|
||||
* Another execution context has inserted a
|
||||
* conficting entry first.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (node->rb.addr <= start) {
|
||||
/*
|
||||
* This entry covers at least part of the region. If it doesn't extend
|
||||
* to the end, then this will be called again for the next segment.
|
||||
*/
|
||||
*node_p = node;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SDMA_DBG(req, "prepend: node->rb.addr %lx, node->refcount %d",
|
||||
node->rb.addr, atomic_read(&node->refcount));
|
||||
prepend_len = node->rb.addr - start;
|
||||
|
||||
/*
|
||||
* This node will not be returned, instead a new node
|
||||
* will be. So release the reference.
|
||||
*/
|
||||
release_node(handler, node);
|
||||
|
||||
/* Prepend a node to cover the beginning of the allocation */
|
||||
ret = add_system_pinning(req, node_p, start, prepend_len);
|
||||
if (ret == -EEXIST) {
|
||||
/* Another execution context has inserted a conficting entry first. */
|
||||
continue;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
static int add_mapping_to_sdma_packet(struct user_sdma_request *req,
|
||||
struct user_sdma_txreq *tx,
|
||||
struct sdma_mmu_node *cache_entry,
|
||||
size_t start,
|
||||
size_t from_this_cache_entry)
|
||||
{
|
||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||
unsigned int page_offset;
|
||||
unsigned int from_this_page;
|
||||
size_t page_index;
|
||||
void *ctx;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Because the cache may be more fragmented than the memory that is being accessed,
|
||||
* it's not strictly necessary to have a descriptor per cache entry.
|
||||
*/
|
||||
|
||||
while (from_this_cache_entry) {
|
||||
page_index = PFN_DOWN(start - cache_entry->rb.addr);
|
||||
|
||||
if (page_index >= cache_entry->npages) {
|
||||
SDMA_DBG(req,
|
||||
"Request for page_index %zu >= cache_entry->npages %u",
|
||||
page_index, cache_entry->npages);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
page_offset = start - ALIGN_DOWN(start, PAGE_SIZE);
|
||||
from_this_page = PAGE_SIZE - page_offset;
|
||||
|
||||
if (from_this_page < from_this_cache_entry) {
|
||||
ctx = NULL;
|
||||
} else {
|
||||
/*
|
||||
* In the case they are equal the next line has no practical effect,
|
||||
* but it's better to do a register to register copy than a conditional
|
||||
* branch.
|
||||
*/
|
||||
from_this_page = from_this_cache_entry;
|
||||
ctx = cache_entry;
|
||||
}
|
||||
|
||||
ret = sdma_txadd_page(pq->dd, ctx, &tx->txreq,
|
||||
cache_entry->pages[page_index],
|
||||
page_offset, from_this_page);
|
||||
if (ret) {
|
||||
/*
|
||||
* When there's a failure, the entire request is freed by
|
||||
* user_sdma_send_pkts().
|
||||
*/
|
||||
SDMA_DBG(req,
|
||||
"sdma_txadd_page failed %d page_index %lu page_offset %u from_this_page %u",
|
||||
ret, page_index, page_offset, from_this_page);
|
||||
return ret;
|
||||
}
|
||||
start += from_this_page;
|
||||
from_this_cache_entry -= from_this_page;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
|
||||
struct user_sdma_txreq *tx,
|
||||
struct user_sdma_iovec *iovec,
|
||||
size_t from_this_iovec)
|
||||
{
|
||||
struct mmu_rb_handler *handler = req->pq->handler;
|
||||
|
||||
while (from_this_iovec > 0) {
|
||||
struct sdma_mmu_node *cache_entry;
|
||||
size_t from_this_cache_entry;
|
||||
size_t start;
|
||||
int ret;
|
||||
|
||||
start = (uintptr_t)iovec->iov.iov_base + iovec->offset;
|
||||
ret = get_system_cache_entry(req, &cache_entry, start,
|
||||
from_this_iovec);
|
||||
if (ret) {
|
||||
SDMA_DBG(req, "pin system segment failed %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
from_this_cache_entry = cache_entry->rb.len - (start - cache_entry->rb.addr);
|
||||
if (from_this_cache_entry > from_this_iovec)
|
||||
from_this_cache_entry = from_this_iovec;
|
||||
|
||||
ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start,
|
||||
from_this_cache_entry);
|
||||
if (ret) {
|
||||
/*
|
||||
* We're guaranteed that there will be no descriptor
|
||||
* completion callback that releases this node
|
||||
* because only the last descriptor referencing it
|
||||
* has a context attached, and a failure means the
|
||||
* last descriptor was never added.
|
||||
*/
|
||||
release_node(handler, cache_entry);
|
||||
SDMA_DBG(req, "add system segment failed %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
iovec->offset += from_this_cache_entry;
|
||||
from_this_iovec -= from_this_cache_entry;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
|
||||
struct user_sdma_txreq *tx,
|
||||
struct user_sdma_iovec *iovec,
|
||||
u32 *pkt_data_remaining)
|
||||
{
|
||||
size_t remaining_to_add = *pkt_data_remaining;
|
||||
/*
|
||||
* Walk through iovec entries, ensure the associated pages
|
||||
* are pinned and mapped, add data to the packet until no more
|
||||
* data remains to be added.
|
||||
*/
|
||||
while (remaining_to_add > 0) {
|
||||
struct user_sdma_iovec *cur_iovec;
|
||||
size_t from_this_iovec;
|
||||
int ret;
|
||||
|
||||
cur_iovec = iovec;
|
||||
from_this_iovec = iovec->iov.iov_len - iovec->offset;
|
||||
|
||||
if (from_this_iovec > remaining_to_add) {
|
||||
from_this_iovec = remaining_to_add;
|
||||
} else {
|
||||
/* The current iovec entry will be consumed by this pass. */
|
||||
req->iov_idx++;
|
||||
iovec++;
|
||||
}
|
||||
|
||||
ret = add_system_iovec_to_sdma_packet(req, tx, cur_iovec,
|
||||
from_this_iovec);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
remaining_to_add -= from_this_iovec;
|
||||
}
|
||||
*pkt_data_remaining = remaining_to_add;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void system_descriptor_complete(struct hfi1_devdata *dd,
|
||||
struct sdma_desc *descp)
|
||||
{
|
||||
switch (sdma_mapping_type(descp)) {
|
||||
case SDMA_MAP_SINGLE:
|
||||
dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp),
|
||||
sdma_mapping_len(descp), DMA_TO_DEVICE);
|
||||
break;
|
||||
case SDMA_MAP_PAGE:
|
||||
dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp),
|
||||
sdma_mapping_len(descp), DMA_TO_DEVICE);
|
||||
break;
|
||||
}
|
||||
|
||||
if (descp->pinning_ctx) {
|
||||
struct sdma_mmu_node *node = descp->pinning_ctx;
|
||||
|
||||
release_node(node->rb.handler, node);
|
||||
}
|
||||
}
|
||||
|
||||
static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
|
||||
unsigned long len)
|
||||
{
|
||||
|
@ -1493,8 +1666,7 @@ static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
|
|||
struct sdma_mmu_node *node =
|
||||
container_of(mnode, struct sdma_mmu_node, rb);
|
||||
|
||||
unpin_sdma_pages(node);
|
||||
kfree(node);
|
||||
free_system_node(node);
|
||||
}
|
||||
|
||||
static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
|
||||
|
|
|
@ -112,16 +112,11 @@ struct sdma_mmu_node {
|
|||
struct user_sdma_iovec {
|
||||
struct list_head list;
|
||||
struct iovec iov;
|
||||
/* number of pages in this vector */
|
||||
unsigned int npages;
|
||||
/* array of pinned pages for this vector */
|
||||
struct page **pages;
|
||||
/*
|
||||
* offset into the virtual address space of the vector at
|
||||
* which we last left off.
|
||||
*/
|
||||
u64 offset;
|
||||
struct sdma_mmu_node *node;
|
||||
};
|
||||
|
||||
/* evict operation argument */
|
||||
|
|
|
@ -778,8 +778,8 @@ static int build_verbs_tx_desc(
|
|||
|
||||
/* add icrc, lt byte, and padding to flit */
|
||||
if (extra_bytes)
|
||||
ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
|
||||
sde->dd->sdma_pad_phys, extra_bytes);
|
||||
ret = sdma_txadd_daddr(sde->dd, &tx->txreq, sde->dd->sdma_pad_phys,
|
||||
extra_bytes);
|
||||
|
||||
bail_txadd:
|
||||
return ret;
|
||||
|
|
|
@ -64,6 +64,7 @@ static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
|
|||
|
||||
/* combine physically continuous fragments later? */
|
||||
ret = sdma_txadd_page(sde->dd,
|
||||
NULL,
|
||||
&tx->txreq,
|
||||
skb_frag_page(frag),
|
||||
skb_frag_off(frag),
|
||||
|
|
|
@ -1960,100 +1960,6 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
|
|||
return hns_roce_cmq_send(hr_dev, &desc, 1);
|
||||
}
|
||||
|
||||
/* Use default caps when hns_roce_query_pf_caps() failed or init VF profile */
|
||||
static void set_default_caps(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_caps *caps = &hr_dev->caps;
|
||||
|
||||
caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
|
||||
caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
|
||||
caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
|
||||
caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
|
||||
caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
|
||||
caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
|
||||
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
|
||||
caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
|
||||
|
||||
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
|
||||
caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
|
||||
caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
|
||||
caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
|
||||
caps->num_comp_vectors = 0;
|
||||
|
||||
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
|
||||
caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
|
||||
caps->qpc_timer_bt_num = HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM;
|
||||
caps->cqc_timer_bt_num = HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM;
|
||||
|
||||
caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
|
||||
caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
|
||||
caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
|
||||
caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
|
||||
caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
|
||||
caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
|
||||
caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
|
||||
caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
|
||||
caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
|
||||
caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ;
|
||||
caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
|
||||
caps->reserved_lkey = 0;
|
||||
caps->reserved_pds = 0;
|
||||
caps->reserved_mrws = 1;
|
||||
caps->reserved_uars = 0;
|
||||
caps->reserved_cqs = 0;
|
||||
caps->reserved_srqs = 0;
|
||||
caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
|
||||
|
||||
caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
|
||||
caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
|
||||
caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
|
||||
caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
|
||||
caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
|
||||
|
||||
caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
|
||||
caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM;
|
||||
caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM;
|
||||
caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM;
|
||||
caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
|
||||
caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
|
||||
caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
|
||||
caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
|
||||
|
||||
caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
|
||||
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
|
||||
HNS_ROCE_CAP_FLAG_CQ_RECORD_DB |
|
||||
HNS_ROCE_CAP_FLAG_QP_RECORD_DB;
|
||||
|
||||
caps->pkey_table_len[0] = 1;
|
||||
caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
|
||||
caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
|
||||
caps->local_ca_ack_delay = 0;
|
||||
caps->max_mtu = IB_MTU_4096;
|
||||
|
||||
caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
|
||||
caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
|
||||
|
||||
caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
|
||||
HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
|
||||
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
|
||||
|
||||
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
|
||||
|
||||
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
|
||||
caps->flags |= HNS_ROCE_CAP_FLAG_STASH |
|
||||
HNS_ROCE_CAP_FLAG_DIRECT_WQE |
|
||||
HNS_ROCE_CAP_FLAG_XRC;
|
||||
caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE;
|
||||
} else {
|
||||
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
|
||||
|
||||
/* The following configuration are only valid for HIP08 */
|
||||
caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
|
||||
caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
|
||||
caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num,
|
||||
u32 *buf_page_size, u32 *bt_page_size, u32 hem_type)
|
||||
{
|
||||
|
@ -2239,7 +2145,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
|
|||
set_hem_page_size(hr_dev);
|
||||
}
|
||||
|
||||
static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
|
||||
static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
|
||||
struct hns_roce_caps *caps = &hr_dev->caps;
|
||||
|
@ -2248,15 +2154,17 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
|
|||
struct hns_roce_query_pf_caps_c *resp_c;
|
||||
struct hns_roce_query_pf_caps_d *resp_d;
|
||||
struct hns_roce_query_pf_caps_e *resp_e;
|
||||
enum hns_roce_opcode_type cmd;
|
||||
int ctx_hop_num;
|
||||
int pbl_hop_num;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
cmd = hr_dev->is_vf ? HNS_ROCE_OPC_QUERY_VF_CAPS_NUM :
|
||||
HNS_ROCE_OPC_QUERY_PF_CAPS_NUM;
|
||||
|
||||
for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
|
||||
hns_roce_cmq_setup_basic_desc(&desc[i],
|
||||
HNS_ROCE_OPC_QUERY_PF_CAPS_NUM,
|
||||
true);
|
||||
hns_roce_cmq_setup_basic_desc(&desc[i], cmd, true);
|
||||
if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
|
||||
desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
|
||||
else
|
||||
|
@ -2273,35 +2181,33 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
|
|||
resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
|
||||
resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
|
||||
|
||||
caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
|
||||
caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
|
||||
caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
|
||||
caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
|
||||
caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
|
||||
caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
|
||||
caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
|
||||
caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
|
||||
caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
|
||||
caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
|
||||
caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
|
||||
caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
|
||||
caps->num_aeq_vectors = resp_a->num_aeq_vectors;
|
||||
caps->num_other_vectors = resp_a->num_other_vectors;
|
||||
caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
|
||||
caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
|
||||
caps->cqe_sz = resp_a->cqe_sz;
|
||||
caps->num_aeq_vectors = resp_a->num_aeq_vectors;
|
||||
caps->num_other_vectors = resp_a->num_other_vectors;
|
||||
caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
|
||||
caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
|
||||
|
||||
caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
|
||||
caps->irrl_entry_sz = resp_b->irrl_entry_sz;
|
||||
caps->trrl_entry_sz = resp_b->trrl_entry_sz;
|
||||
caps->cqc_entry_sz = resp_b->cqc_entry_sz;
|
||||
caps->srqc_entry_sz = resp_b->srqc_entry_sz;
|
||||
caps->idx_entry_sz = resp_b->idx_entry_sz;
|
||||
caps->sccc_sz = resp_b->sccc_sz;
|
||||
caps->max_mtu = resp_b->max_mtu;
|
||||
caps->qpc_sz = le16_to_cpu(resp_b->qpc_sz);
|
||||
caps->min_cqes = resp_b->min_cqes;
|
||||
caps->min_wqes = resp_b->min_wqes;
|
||||
caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
|
||||
caps->pkey_table_len[0] = resp_b->pkey_table_len;
|
||||
caps->phy_num_uars = resp_b->phy_num_uars;
|
||||
ctx_hop_num = resp_b->ctx_hop_num;
|
||||
pbl_hop_num = resp_b->pbl_hop_num;
|
||||
caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
|
||||
caps->irrl_entry_sz = resp_b->irrl_entry_sz;
|
||||
caps->trrl_entry_sz = resp_b->trrl_entry_sz;
|
||||
caps->cqc_entry_sz = resp_b->cqc_entry_sz;
|
||||
caps->srqc_entry_sz = resp_b->srqc_entry_sz;
|
||||
caps->idx_entry_sz = resp_b->idx_entry_sz;
|
||||
caps->sccc_sz = resp_b->sccc_sz;
|
||||
caps->max_mtu = resp_b->max_mtu;
|
||||
caps->min_cqes = resp_b->min_cqes;
|
||||
caps->min_wqes = resp_b->min_wqes;
|
||||
caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
|
||||
caps->pkey_table_len[0] = resp_b->pkey_table_len;
|
||||
caps->phy_num_uars = resp_b->phy_num_uars;
|
||||
ctx_hop_num = resp_b->ctx_hop_num;
|
||||
pbl_hop_num = resp_b->pbl_hop_num;
|
||||
|
||||
caps->num_pds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_PDS);
|
||||
|
||||
|
@ -2324,8 +2230,6 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
|
|||
caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH);
|
||||
caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS);
|
||||
caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH);
|
||||
caps->default_aeq_arm_st = hr_reg_read(resp_d, PF_CAPS_D_AEQ_ARM_ST);
|
||||
caps->default_ceq_arm_st = hr_reg_read(resp_d, PF_CAPS_D_CEQ_ARM_ST);
|
||||
caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS);
|
||||
caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS);
|
||||
caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS);
|
||||
|
@ -2336,10 +2240,6 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
|
|||
caps->reserved_cqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_CQS);
|
||||
caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS);
|
||||
caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS);
|
||||
caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
|
||||
caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
|
||||
caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
|
||||
caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
|
||||
|
||||
caps->qpc_hop_num = ctx_hop_num;
|
||||
caps->sccc_hop_num = ctx_hop_num;
|
||||
|
@ -2357,6 +2257,19 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
|
|||
if (!(caps->page_size_cap & PAGE_SIZE))
|
||||
caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
|
||||
|
||||
if (!hr_dev->is_vf) {
|
||||
caps->cqe_sz = resp_a->cqe_sz;
|
||||
caps->qpc_sz = le16_to_cpu(resp_b->qpc_sz);
|
||||
caps->default_aeq_arm_st =
|
||||
hr_reg_read(resp_d, PF_CAPS_D_AEQ_ARM_ST);
|
||||
caps->default_ceq_arm_st =
|
||||
hr_reg_read(resp_d, PF_CAPS_D_CEQ_ARM_ST);
|
||||
caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
|
||||
caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
|
||||
caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
|
||||
caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2404,7 +2317,11 @@ static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev)
|
|||
|
||||
hr_dev->func_num = 1;
|
||||
|
||||
set_default_caps(hr_dev);
|
||||
ret = hns_roce_query_caps(hr_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to query VF caps, ret = %d.\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hns_roce_query_vf_resource(hr_dev);
|
||||
if (ret) {
|
||||
|
@ -2444,9 +2361,11 @@ static int hns_roce_v2_pf_profile(struct hns_roce_dev *hr_dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = hns_roce_query_pf_caps(hr_dev);
|
||||
if (ret)
|
||||
set_default_caps(hr_dev);
|
||||
ret = hns_roce_query_caps(hr_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to query PF caps, ret = %d.\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hns_roce_query_pf_resource(hr_dev);
|
||||
if (ret) {
|
||||
|
|
|
@ -35,43 +35,15 @@
|
|||
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#define HNS_ROCE_V2_MAX_QP_NUM 0x1000
|
||||
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
|
||||
#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
|
||||
#define HNS_ROCE_V2_MAX_SRQ_SGE 64
|
||||
#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
|
||||
#define HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM 0x100
|
||||
#define HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM 0x100
|
||||
#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
|
||||
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
|
||||
#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64
|
||||
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64
|
||||
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
|
||||
#define HNS_ROCE_V3_MAX_SQ_INLINE 0x400
|
||||
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
|
||||
#define HNS_ROCE_V2_UAR_NUM 256
|
||||
#define HNS_ROCE_V2_PHY_UAR_NUM 1
|
||||
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
|
||||
#define HNS_ROCE_V2_AEQE_VEC_NUM 1
|
||||
#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
|
||||
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000
|
||||
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
|
||||
#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000
|
||||
#define HNS_ROCE_V2_MAX_PD_NUM 0x1000000
|
||||
#define HNS_ROCE_V2_MAX_XRCD_NUM 0x1000000
|
||||
#define HNS_ROCE_V2_RSV_XRCD_NUM 0
|
||||
#define HNS_ROCE_V2_MAX_QP_INIT_RDMA 128
|
||||
#define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128
|
||||
#define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64
|
||||
#define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16
|
||||
#define HNS_ROCE_V2_IRRL_ENTRY_SZ 64
|
||||
#define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100
|
||||
#define HNS_ROCE_V2_CQC_ENTRY_SZ 64
|
||||
#define HNS_ROCE_V2_SRQC_ENTRY_SZ 64
|
||||
#define HNS_ROCE_V2_MTPT_ENTRY_SZ 64
|
||||
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
|
||||
#define HNS_ROCE_V2_IDX_ENTRY_SZ 4
|
||||
|
||||
#define HNS_ROCE_V2_SCCC_SZ 32
|
||||
#define HNS_ROCE_V3_SCCC_SZ 64
|
||||
#define HNS_ROCE_V3_GMV_ENTRY_SZ 32
|
||||
|
||||
|
@ -232,6 +204,7 @@ enum hns_roce_opcode_type {
|
|||
HNS_ROCE_OPC_QUERY_FUNC_INFO = 0x8407,
|
||||
HNS_ROCE_OPC_QUERY_PF_CAPS_NUM = 0x8408,
|
||||
HNS_ROCE_OPC_CFG_ENTRY_SIZE = 0x8409,
|
||||
HNS_ROCE_OPC_QUERY_VF_CAPS_NUM = 0x8410,
|
||||
HNS_ROCE_OPC_CFG_SGID_TB = 0x8500,
|
||||
HNS_ROCE_OPC_CFG_SMAC_TB = 0x8501,
|
||||
HNS_ROCE_OPC_POST_MB = 0x8504,
|
||||
|
|
|
@ -337,7 +337,7 @@ static struct irdma_puda_buf *irdma_form_ah_cm_frame(struct irdma_cm_node *cm_no
|
|||
|
||||
pktsize = sizeof(*tcph) + opts_len + hdr_len + pd_len;
|
||||
|
||||
memset(buf, 0, pktsize);
|
||||
memset(buf, 0, sizeof(*tcph));
|
||||
|
||||
sqbuf->totallen = pktsize;
|
||||
sqbuf->tcphlen = sizeof(*tcph) + opts_len;
|
||||
|
|
|
@ -1867,8 +1867,6 @@ void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
|
|||
vsi->mtu = info->params->mtu;
|
||||
vsi->exception_lan_q = info->exception_lan_q;
|
||||
vsi->vsi_idx = info->pf_data_vsi_num;
|
||||
if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
|
||||
vsi->fcn_id = info->dev->hmc_fn_id;
|
||||
|
||||
irdma_set_qos_info(vsi, info->params);
|
||||
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
|
||||
|
@ -1887,32 +1885,56 @@ void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
|
|||
}
|
||||
|
||||
/**
|
||||
* irdma_get_fcn_id - Return the function id
|
||||
* irdma_get_stats_idx - Return stats index
|
||||
* @vsi: pointer to the vsi
|
||||
*/
|
||||
static u8 irdma_get_fcn_id(struct irdma_sc_vsi *vsi)
|
||||
static u8 irdma_get_stats_idx(struct irdma_sc_vsi *vsi)
|
||||
{
|
||||
struct irdma_stats_inst_info stats_info = {};
|
||||
struct irdma_sc_dev *dev = vsi->dev;
|
||||
u8 fcn_id = IRDMA_INVALID_FCN_ID;
|
||||
u8 start_idx, max_stats, i;
|
||||
u8 i;
|
||||
|
||||
if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
|
||||
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
|
||||
if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE,
|
||||
&stats_info))
|
||||
return stats_info.stats_idx;
|
||||
}
|
||||
|
||||
start_idx = 1;
|
||||
max_stats = 16;
|
||||
for (i = start_idx; i < max_stats; i++)
|
||||
if (!dev->fcn_id_array[i]) {
|
||||
fcn_id = i;
|
||||
dev->fcn_id_array[i] = true;
|
||||
break;
|
||||
for (i = 0; i < IRDMA_MAX_STATS_COUNT_GEN_1; i++) {
|
||||
if (!dev->stats_idx_array[i]) {
|
||||
dev->stats_idx_array[i] = true;
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return fcn_id;
|
||||
return IRDMA_INVALID_STATS_IDX;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_hw_stats_init_gen1 - Initialize stat reg table used for gen1
|
||||
* @vsi: vsi structure where hw_regs are set
|
||||
*
|
||||
* Populate the HW stats table
|
||||
*/
|
||||
static void irdma_hw_stats_init_gen1(struct irdma_sc_vsi *vsi)
|
||||
{
|
||||
struct irdma_sc_dev *dev = vsi->dev;
|
||||
const struct irdma_hw_stat_map *map;
|
||||
u64 *stat_reg = vsi->hw_stats_regs;
|
||||
u64 *regs = dev->hw_stats_regs;
|
||||
u16 i, stats_reg_set = vsi->stats_idx;
|
||||
|
||||
map = dev->hw_stats_map;
|
||||
|
||||
/* First 4 stat instances are reserved for port level statistics. */
|
||||
stats_reg_set += vsi->stats_inst_alloc ? IRDMA_FIRST_NON_PF_STAT : 0;
|
||||
|
||||
for (i = 0; i < dev->hw_attrs.max_stat_idx; i++) {
|
||||
if (map[i].bitmask <= IRDMA_MAX_STATS_32)
|
||||
stat_reg[i] = regs[i] + stats_reg_set * sizeof(u32);
|
||||
else
|
||||
stat_reg[i] = regs[i] + stats_reg_set * sizeof(u64);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1923,7 +1945,6 @@ static u8 irdma_get_fcn_id(struct irdma_sc_vsi *vsi)
|
|||
int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
|
||||
struct irdma_vsi_stats_info *info)
|
||||
{
|
||||
u8 fcn_id = info->fcn_id;
|
||||
struct irdma_dma_mem *stats_buff_mem;
|
||||
|
||||
vsi->pestat = info->pestat;
|
||||
|
@ -1944,26 +1965,24 @@ int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
|
|||
IRDMA_GATHER_STATS_BUF_SIZE);
|
||||
|
||||
irdma_hw_stats_start_timer(vsi);
|
||||
if (info->alloc_fcn_id)
|
||||
fcn_id = irdma_get_fcn_id(vsi);
|
||||
if (fcn_id == IRDMA_INVALID_FCN_ID)
|
||||
goto stats_error;
|
||||
|
||||
vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
|
||||
vsi->fcn_id = fcn_id;
|
||||
if (info->alloc_fcn_id) {
|
||||
vsi->pestat->gather_info.use_stats_inst = true;
|
||||
vsi->pestat->gather_info.stats_inst_index = fcn_id;
|
||||
/* when stat allocation is not required default to fcn_id. */
|
||||
vsi->stats_idx = info->fcn_id;
|
||||
if (info->alloc_stats_inst) {
|
||||
u8 stats_idx = irdma_get_stats_idx(vsi);
|
||||
|
||||
if (stats_idx != IRDMA_INVALID_STATS_IDX) {
|
||||
vsi->stats_inst_alloc = true;
|
||||
vsi->stats_idx = stats_idx;
|
||||
vsi->pestat->gather_info.use_stats_inst = true;
|
||||
vsi->pestat->gather_info.stats_inst_index = stats_idx;
|
||||
}
|
||||
}
|
||||
|
||||
if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
|
||||
irdma_hw_stats_init_gen1(vsi);
|
||||
|
||||
return 0;
|
||||
|
||||
stats_error:
|
||||
dma_free_coherent(vsi->pestat->hw->device, stats_buff_mem->size,
|
||||
stats_buff_mem->va, stats_buff_mem->pa);
|
||||
stats_buff_mem->va = NULL;
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1973,19 +1992,19 @@ stats_error:
|
|||
void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
|
||||
{
|
||||
struct irdma_stats_inst_info stats_info = {};
|
||||
u8 fcn_id = vsi->fcn_id;
|
||||
struct irdma_sc_dev *dev = vsi->dev;
|
||||
u8 stats_idx = vsi->stats_idx;
|
||||
|
||||
if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
|
||||
if (vsi->stats_fcn_id_alloc) {
|
||||
stats_info.stats_idx = vsi->fcn_id;
|
||||
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
|
||||
if (vsi->stats_inst_alloc) {
|
||||
stats_info.stats_idx = vsi->stats_idx;
|
||||
irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE,
|
||||
&stats_info);
|
||||
}
|
||||
} else {
|
||||
if (vsi->stats_fcn_id_alloc &&
|
||||
fcn_id < vsi->dev->hw_attrs.max_stat_inst)
|
||||
vsi->dev->fcn_id_array[fcn_id] = false;
|
||||
if (vsi->stats_inst_alloc &&
|
||||
stats_idx < vsi->dev->hw_attrs.max_stat_inst)
|
||||
vsi->dev->stats_idx_array[stats_idx] = false;
|
||||
}
|
||||
|
||||
if (!vsi->pestat)
|
||||
|
@ -5297,7 +5316,8 @@ void sc_vsi_update_stats(struct irdma_sc_vsi *vsi)
|
|||
gather_stats = vsi->pestat->gather_info.gather_stats_va;
|
||||
last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va;
|
||||
irdma_update_stats(&vsi->pestat->hw_stats, gather_stats,
|
||||
last_gather_stats);
|
||||
last_gather_stats, vsi->dev->hw_stats_map,
|
||||
vsi->dev->hw_attrs.max_stat_idx);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -5404,186 +5424,62 @@ int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
|
|||
return ret_code;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_stat_val - Extract HW counter value from statistics buffer
|
||||
* @stats_val: pointer to statistics buffer
|
||||
* @byteoff: byte offset of counter value in the buffer (8B-aligned)
|
||||
* @bitoff: bit offset of counter value within 8B entry
|
||||
* @bitmask: maximum counter value (e.g. 0xffffff for 24-bit counter)
|
||||
*/
|
||||
static inline u64 irdma_stat_val(const u64 *stats_val, u16 byteoff, u8 bitoff,
|
||||
u64 bitmask)
|
||||
{
|
||||
u16 idx = byteoff / sizeof(*stats_val);
|
||||
|
||||
return (stats_val[idx] >> bitoff) & bitmask;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_stat_delta - Calculate counter delta
|
||||
* @new_val: updated counter value
|
||||
* @old_val: last counter value
|
||||
* @max_val: maximum counter value (e.g. 0xffffff for 24-bit counter)
|
||||
*/
|
||||
static inline u64 irdma_stat_delta(u64 new_val, u64 old_val, u64 max_val)
|
||||
{
|
||||
if (new_val >= old_val)
|
||||
return new_val - old_val;
|
||||
|
||||
/* roll-over case */
|
||||
return max_val - old_val + new_val + 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_update_stats - Update statistics
|
||||
* @hw_stats: hw_stats instance to update
|
||||
* @gather_stats: updated stat counters
|
||||
* @last_gather_stats: last stat counters
|
||||
* @map: HW stat map (hw_stats => gather_stats)
|
||||
* @max_stat_idx: number of HW stats
|
||||
*/
|
||||
void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
|
||||
struct irdma_gather_stats *gather_stats,
|
||||
struct irdma_gather_stats *last_gather_stats)
|
||||
struct irdma_gather_stats *last_gather_stats,
|
||||
const struct irdma_hw_stat_map *map, u16 max_stat_idx)
|
||||
{
|
||||
u64 *stats_val = hw_stats->stats_val_32;
|
||||
u64 *stats_val = hw_stats->stats_val;
|
||||
u16 i;
|
||||
|
||||
for (i = 0; i < max_stat_idx; i++) {
|
||||
u64 new_val = irdma_stat_val(gather_stats->val, map[i].byteoff,
|
||||
map[i].bitoff, map[i].bitmask);
|
||||
u64 last_val = irdma_stat_val(last_gather_stats->val,
|
||||
map[i].byteoff, map[i].bitoff,
|
||||
map[i].bitmask);
|
||||
|
||||
stats_val[i] +=
|
||||
irdma_stat_delta(new_val, last_val, map[i].bitmask);
|
||||
}
|
||||
|
||||
stats_val[IRDMA_HW_STAT_INDEX_RXVLANERR] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->rxvlanerr,
|
||||
last_gather_stats->rxvlanerr,
|
||||
IRDMA_MAX_STATS_32);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip4rxdiscard,
|
||||
last_gather_stats->ip4rxdiscard,
|
||||
IRDMA_MAX_STATS_32);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip4rxtrunc,
|
||||
last_gather_stats->ip4rxtrunc,
|
||||
IRDMA_MAX_STATS_32);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip4txnoroute,
|
||||
last_gather_stats->ip4txnoroute,
|
||||
IRDMA_MAX_STATS_32);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip6rxdiscard,
|
||||
last_gather_stats->ip6rxdiscard,
|
||||
IRDMA_MAX_STATS_32);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip6rxtrunc,
|
||||
last_gather_stats->ip6rxtrunc,
|
||||
IRDMA_MAX_STATS_32);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip6txnoroute,
|
||||
last_gather_stats->ip6txnoroute,
|
||||
IRDMA_MAX_STATS_32);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_TCPRTXSEG] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->tcprtxseg,
|
||||
last_gather_stats->tcprtxseg,
|
||||
IRDMA_MAX_STATS_32);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->tcprxopterr,
|
||||
last_gather_stats->tcprxopterr,
|
||||
IRDMA_MAX_STATS_32);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->tcprxprotoerr,
|
||||
last_gather_stats->tcprxprotoerr,
|
||||
IRDMA_MAX_STATS_32);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->rxrpcnphandled,
|
||||
last_gather_stats->rxrpcnphandled,
|
||||
IRDMA_MAX_STATS_32);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->rxrpcnpignored,
|
||||
last_gather_stats->rxrpcnpignored,
|
||||
IRDMA_MAX_STATS_32);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_TXNPCNPSENT] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->txnpcnpsent,
|
||||
last_gather_stats->txnpcnpsent,
|
||||
IRDMA_MAX_STATS_32);
|
||||
stats_val = hw_stats->stats_val_64;
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP4RXOCTS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip4rxocts,
|
||||
last_gather_stats->ip4rxocts,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP4RXPKTS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip4rxpkts,
|
||||
last_gather_stats->ip4rxpkts,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP4RXFRAGS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip4txfrag,
|
||||
last_gather_stats->ip4txfrag,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip4rxmcpkts,
|
||||
last_gather_stats->ip4rxmcpkts,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP4TXOCTS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip4txocts,
|
||||
last_gather_stats->ip4txocts,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP4TXPKTS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip4txpkts,
|
||||
last_gather_stats->ip4txpkts,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP4TXFRAGS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip4txfrag,
|
||||
last_gather_stats->ip4txfrag,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip4txmcpkts,
|
||||
last_gather_stats->ip4txmcpkts,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP6RXOCTS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip6rxocts,
|
||||
last_gather_stats->ip6rxocts,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP6RXPKTS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip6rxpkts,
|
||||
last_gather_stats->ip6rxpkts,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP6RXFRAGS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip6txfrags,
|
||||
last_gather_stats->ip6txfrags,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip6rxmcpkts,
|
||||
last_gather_stats->ip6rxmcpkts,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP6TXOCTS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip6txocts,
|
||||
last_gather_stats->ip6txocts,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP6TXPKTS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip6txpkts,
|
||||
last_gather_stats->ip6txpkts,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP6TXFRAGS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip6txfrags,
|
||||
last_gather_stats->ip6txfrags,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->ip6txmcpkts,
|
||||
last_gather_stats->ip6txmcpkts,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_TCPRXSEGS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->tcprxsegs,
|
||||
last_gather_stats->tcprxsegs,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_TCPTXSEG] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->tcptxsegs,
|
||||
last_gather_stats->tcptxsegs,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_RDMARXRDS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->rdmarxrds,
|
||||
last_gather_stats->rdmarxrds,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_RDMARXSNDS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->rdmarxsnds,
|
||||
last_gather_stats->rdmarxsnds,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_RDMARXWRS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->rdmarxwrs,
|
||||
last_gather_stats->rdmarxwrs,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_RDMATXRDS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->rdmatxrds,
|
||||
last_gather_stats->rdmatxrds,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_RDMATXSNDS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->rdmatxsnds,
|
||||
last_gather_stats->rdmatxsnds,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_RDMATXWRS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->rdmatxwrs,
|
||||
last_gather_stats->rdmatxwrs,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_RDMAVBND] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->rdmavbn,
|
||||
last_gather_stats->rdmavbn,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_RDMAVINV] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->rdmavinv,
|
||||
last_gather_stats->rdmavinv,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_UDPRXPKTS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->udprxpkts,
|
||||
last_gather_stats->udprxpkts,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_UDPTXPKTS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->udptxpkts,
|
||||
last_gather_stats->udptxpkts,
|
||||
IRDMA_MAX_STATS_48);
|
||||
stats_val[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] +=
|
||||
IRDMA_STATS_DELTA(gather_stats->rxnpecnmrkpkts,
|
||||
last_gather_stats->rxnpecnmrkpkts,
|
||||
IRDMA_MAX_STATS_48);
|
||||
memcpy(last_gather_stats, gather_stats, sizeof(*last_gather_stats));
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ enum irdma_protocol_used {
|
|||
#define IRDMA_QP_STATE_ERROR 6
|
||||
|
||||
#define IRDMA_MAX_TRAFFIC_CLASS 8
|
||||
#define IRDMA_MAX_STATS_COUNT_GEN_1 12
|
||||
#define IRDMA_MAX_USER_PRIORITY 8
|
||||
#define IRDMA_MAX_APPS 8
|
||||
#define IRDMA_MAX_STATS_COUNT 128
|
||||
|
@ -365,9 +366,11 @@ enum irdma_cqp_op_type {
|
|||
#define FLD_RS_32(dev, val, field) \
|
||||
((u64)((val) & (dev)->hw_masks[field ## _M]) >> (dev)->hw_shifts[field ## _S])
|
||||
|
||||
#define IRDMA_STATS_DELTA(a, b, c) ((a) >= (b) ? (a) - (b) : (a) + (c) - (b))
|
||||
#define IRDMA_MAX_STATS_32 0xFFFFFFFFULL
|
||||
#define IRDMA_MAX_STATS_48 0xFFFFFFFFFFFFULL
|
||||
#define IRDMA_MAX_STATS_24 0xffffffULL
|
||||
#define IRDMA_MAX_STATS_32 0xffffffffULL
|
||||
#define IRDMA_MAX_STATS_48 0xffffffffffffULL
|
||||
#define IRDMA_MAX_STATS_56 0xffffffffffffffULL
|
||||
#define IRDMA_MAX_STATS_64 0xffffffffffffffffULL
|
||||
|
||||
#define IRDMA_MAX_CQ_READ_THRESH 0x3FFFF
|
||||
#define IRDMA_CQPSQ_QHASH_VLANID GENMASK_ULL(43, 32)
|
||||
|
|
|
@ -1092,14 +1092,19 @@ static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
|
|||
int status;
|
||||
|
||||
if (rf->msix_shared && !ceq_id) {
|
||||
snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
|
||||
"irdma-%s-AEQCEQ-0", dev_name(&rf->pcidev->dev));
|
||||
tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
|
||||
status = request_irq(msix_vec->irq, irdma_irq_handler, 0,
|
||||
"AEQCEQ", rf);
|
||||
msix_vec->name, rf);
|
||||
} else {
|
||||
snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
|
||||
"irdma-%s-CEQ-%d",
|
||||
dev_name(&rf->pcidev->dev), ceq_id);
|
||||
tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc);
|
||||
|
||||
status = request_irq(msix_vec->irq, irdma_ceq_handler, 0,
|
||||
"CEQ", iwceq);
|
||||
msix_vec->name, iwceq);
|
||||
}
|
||||
cpumask_clear(&msix_vec->mask);
|
||||
cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
|
||||
|
@ -1128,9 +1133,11 @@ static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
|
|||
u32 ret = 0;
|
||||
|
||||
if (!rf->msix_shared) {
|
||||
snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
|
||||
"irdma-%s-AEQ", dev_name(&rf->pcidev->dev));
|
||||
tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
|
||||
ret = request_irq(msix_vec->irq, irdma_irq_handler, 0,
|
||||
"irdma", rf);
|
||||
msix_vec->name, rf);
|
||||
}
|
||||
if (ret) {
|
||||
ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
|
||||
|
@ -1904,8 +1911,8 @@ int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
|
|||
break;
|
||||
rf->init_state = CEQ0_CREATED;
|
||||
/* Handles processing of CQP completions */
|
||||
rf->cqp_cmpl_wq = alloc_ordered_workqueue("cqp_cmpl_wq",
|
||||
WQ_HIGHPRI | WQ_UNBOUND);
|
||||
rf->cqp_cmpl_wq =
|
||||
alloc_ordered_workqueue("cqp_cmpl_wq", WQ_HIGHPRI);
|
||||
if (!rf->cqp_cmpl_wq) {
|
||||
status = -ENOMEM;
|
||||
break;
|
||||
|
|
|
@ -32,7 +32,7 @@ static u32 i40iw_regs[IRDMA_MAX_REGS] = {
|
|||
0xffffffff /* PFINT_RATEN not used in FPK */
|
||||
};
|
||||
|
||||
static u32 i40iw_stat_offsets_32[IRDMA_HW_STAT_INDEX_MAX_32] = {
|
||||
static u32 i40iw_stat_offsets[] = {
|
||||
I40E_GLPES_PFIP4RXDISCARD(0),
|
||||
I40E_GLPES_PFIP4RXTRUNC(0),
|
||||
I40E_GLPES_PFIP4TXNOROUTE(0),
|
||||
|
@ -42,10 +42,8 @@ static u32 i40iw_stat_offsets_32[IRDMA_HW_STAT_INDEX_MAX_32] = {
|
|||
I40E_GLPES_PFTCPRTXSEG(0),
|
||||
I40E_GLPES_PFTCPRXOPTERR(0),
|
||||
I40E_GLPES_PFTCPRXPROTOERR(0),
|
||||
I40E_GLPES_PFRXVLANERR(0)
|
||||
};
|
||||
I40E_GLPES_PFRXVLANERR(0),
|
||||
|
||||
static u32 i40iw_stat_offsets_64[IRDMA_HW_STAT_INDEX_MAX_64] = {
|
||||
I40E_GLPES_PFIP4RXOCTSLO(0),
|
||||
I40E_GLPES_PFIP4RXPKTSLO(0),
|
||||
I40E_GLPES_PFIP4RXFRAGSLO(0),
|
||||
|
@ -158,6 +156,51 @@ static const struct irdma_irq_ops i40iw_irq_ops = {
|
|||
.irdma_en_irq = i40iw_ena_irq,
|
||||
};
|
||||
|
||||
static const struct irdma_hw_stat_map i40iw_hw_stat_map[] = {
|
||||
[IRDMA_HW_STAT_INDEX_RXVLANERR] = { 0, 0, IRDMA_MAX_STATS_24 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXOCTS] = { 8, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXPKTS] = { 16, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = { 24, 0, IRDMA_MAX_STATS_32 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = { 32, 0, IRDMA_MAX_STATS_32 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = { 40, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { 48, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXOCTS] = { 56, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXPKTS] = { 64, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = { 72, 0, IRDMA_MAX_STATS_32 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = { 80, 0, IRDMA_MAX_STATS_32 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = { 88, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { 96, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXOCTS] = { 104, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXPKTS] = { 112, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = { 120, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { 128, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXOCTS] = { 136, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXPKTS] = { 144, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = { 152, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { 160, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = { 168, 0, IRDMA_MAX_STATS_24 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = { 176, 0, IRDMA_MAX_STATS_24 },
|
||||
[IRDMA_HW_STAT_INDEX_TCPRXSEGS] = { 184, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = { 192, 0, IRDMA_MAX_STATS_24 },
|
||||
[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = { 200, 0, IRDMA_MAX_STATS_24 },
|
||||
[IRDMA_HW_STAT_INDEX_TCPTXSEG] = { 208, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_TCPRTXSEG] = { 216, 0, IRDMA_MAX_STATS_32 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMARXWRS] = { 224, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMARXRDS] = { 232, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMARXSNDS] = { 240, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMATXWRS] = { 248, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMATXRDS] = { 256, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMATXSNDS] = { 264, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMAVBND] = { 272, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMAVINV] = { 280, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { 288, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { 296, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { 304, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { 312, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_UDPRXPKTS] = { 320, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_UDPTXPKTS] = { 328, 0, IRDMA_MAX_STATS_48 },
|
||||
};
|
||||
|
||||
void i40iw_init_hw(struct irdma_sc_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
@ -172,11 +215,8 @@ void i40iw_init_hw(struct irdma_sc_dev *dev)
|
|||
dev->hw_regs[i] = (u32 __iomem *)(i40iw_regs[i] + hw_addr);
|
||||
}
|
||||
|
||||
for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_32; ++i)
|
||||
dev->hw_stats_regs_32[i] = i40iw_stat_offsets_32[i];
|
||||
|
||||
for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_64; ++i)
|
||||
dev->hw_stats_regs_64[i] = i40iw_stat_offsets_64[i];
|
||||
for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_GEN_1; ++i)
|
||||
dev->hw_stats_regs[i] = i40iw_stat_offsets[i];
|
||||
|
||||
dev->hw_attrs.first_hw_vf_fpm_id = I40IW_FIRST_VF_FPM_ID;
|
||||
dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
|
||||
|
@ -195,6 +235,7 @@ void i40iw_init_hw(struct irdma_sc_dev *dev)
|
|||
dev->ceq_itr_mask_db = NULL;
|
||||
dev->aeq_itr_mask_db = NULL;
|
||||
dev->irq_ops = &i40iw_irq_ops;
|
||||
dev->hw_stats_map = i40iw_hw_stat_map;
|
||||
|
||||
/* Setup the hardware limits, hmc may limit further */
|
||||
dev->hw_attrs.uk_attrs.max_hw_wq_frags = I40IW_MAX_WQ_FRAGMENT_COUNT;
|
||||
|
@ -210,6 +251,7 @@ void i40iw_init_hw(struct irdma_sc_dev *dev)
|
|||
dev->hw_attrs.uk_attrs.max_hw_sq_chunk = I40IW_MAX_QUANTA_PER_WR;
|
||||
dev->hw_attrs.max_hw_pds = I40IW_MAX_PDS;
|
||||
dev->hw_attrs.max_stat_inst = I40IW_MAX_STATS_COUNT;
|
||||
dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_1;
|
||||
dev->hw_attrs.max_hw_outbound_msg_size = I40IW_MAX_OUTBOUND_MSG_SIZE;
|
||||
dev->hw_attrs.max_hw_inbound_msg_size = I40IW_MAX_INBOUND_MSG_SIZE;
|
||||
dev->hw_attrs.max_qp_wr = I40IW_MAX_QP_WRS;
|
||||
|
|
|
@ -111,6 +111,55 @@ static const struct irdma_irq_ops icrdma_irq_ops = {
|
|||
.irdma_en_irq = icrdma_ena_irq,
|
||||
};
|
||||
|
||||
static const struct irdma_hw_stat_map icrdma_hw_stat_map[] = {
|
||||
[IRDMA_HW_STAT_INDEX_RXVLANERR] = { 0, 32, IRDMA_MAX_STATS_24 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXOCTS] = { 8, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXPKTS] = { 16, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = { 24, 32, IRDMA_MAX_STATS_32 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = { 24, 0, IRDMA_MAX_STATS_32 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = { 32, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { 40, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { 48, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXOCTS] = { 56, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXPKTS] = { 64, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = { 72, 32, IRDMA_MAX_STATS_32 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = { 72, 0, IRDMA_MAX_STATS_32 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = { 80, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { 88, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { 96, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXOCTS] = { 104, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXPKTS] = { 112, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = { 120, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { 128, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { 136, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXOCTS] = { 144, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXPKTS] = { 152, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = { 160, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { 168, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { 176, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = { 184, 32, IRDMA_MAX_STATS_24 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = { 184, 0, IRDMA_MAX_STATS_24 },
|
||||
[IRDMA_HW_STAT_INDEX_TCPRXSEGS] = { 192, 32, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = { 200, 32, IRDMA_MAX_STATS_24 },
|
||||
[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = { 200, 0, IRDMA_MAX_STATS_24 },
|
||||
[IRDMA_HW_STAT_INDEX_TCPTXSEG] = { 208, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_TCPRTXSEG] = { 216, 32, IRDMA_MAX_STATS_32 },
|
||||
[IRDMA_HW_STAT_INDEX_UDPRXPKTS] = { 224, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_UDPTXPKTS] = { 232, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMARXWRS] = { 240, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMARXRDS] = { 248, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMARXSNDS] = { 256, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMATXWRS] = { 264, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMATXRDS] = { 272, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMATXSNDS] = { 280, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMAVBND] = { 288, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMAVINV] = { 296, 0, IRDMA_MAX_STATS_48 },
|
||||
[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = { 304, 0, IRDMA_MAX_STATS_56 },
|
||||
[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = { 312, 32, IRDMA_MAX_STATS_24 },
|
||||
[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = { 312, 0, IRDMA_MAX_STATS_32 },
|
||||
[IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = { 320, 0, IRDMA_MAX_STATS_32 },
|
||||
};
|
||||
|
||||
void icrdma_init_hw(struct irdma_sc_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
@ -140,9 +189,11 @@ void icrdma_init_hw(struct irdma_sc_dev *dev)
|
|||
dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
|
||||
dev->irq_ops = &icrdma_irq_ops;
|
||||
dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
|
||||
dev->hw_stats_map = icrdma_hw_stat_map;
|
||||
dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
|
||||
dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
|
||||
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
|
||||
dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
|
||||
|
||||
dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
|
||||
dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
|
||||
|
|
|
@ -147,6 +147,7 @@ struct irdma_hw_attrs {
|
|||
u32 max_sleep_count;
|
||||
u32 max_cqp_compl_wait_time_ms;
|
||||
u16 max_stat_inst;
|
||||
u16 max_stat_idx;
|
||||
};
|
||||
|
||||
void i40iw_init_hw(struct irdma_sc_dev *dev);
|
||||
|
|
|
@ -115,6 +115,8 @@ extern struct auxiliary_driver i40iw_auxiliary_drv;
|
|||
#define IRDMA_REFLUSH BIT(2)
|
||||
#define IRDMA_FLUSH_WAIT BIT(3)
|
||||
|
||||
#define IRDMA_IRQ_NAME_STR_LEN (64)
|
||||
|
||||
enum init_completion_state {
|
||||
INVALID_STATE = 0,
|
||||
INITIAL_STATE,
|
||||
|
@ -212,6 +214,7 @@ struct irdma_msix_vector {
|
|||
u32 cpu_affinity;
|
||||
u32 ceq_id;
|
||||
cpumask_t mask;
|
||||
char name[IRDMA_IRQ_NAME_STR_LEN];
|
||||
};
|
||||
|
||||
struct irdma_mc_table_info {
|
||||
|
|
|
@ -423,15 +423,15 @@ static int get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
|
|||
* get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
|
||||
* @pble_rsrc: pble resources
|
||||
* @palloc: contains all inforamtion regarding pble (idx + pble addr)
|
||||
* @level1_only: flag for a level 1 PBLE
|
||||
* @lvl: Bitmask for requested pble level
|
||||
*/
|
||||
static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
|
||||
struct irdma_pble_alloc *palloc, bool level1_only)
|
||||
struct irdma_pble_alloc *palloc, u8 lvl)
|
||||
{
|
||||
int status = 0;
|
||||
|
||||
status = get_lvl1_pble(pble_rsrc, palloc);
|
||||
if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE)
|
||||
if (!status || lvl == PBLE_LEVEL_1 || palloc->total_cnt <= PBLE_PER_PAGE)
|
||||
return status;
|
||||
|
||||
status = get_lvl2_pble(pble_rsrc, palloc);
|
||||
|
@ -444,11 +444,11 @@ static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
|
|||
* @pble_rsrc: pble resources
|
||||
* @palloc: contains all inforamtion regarding pble (idx + pble addr)
|
||||
* @pble_cnt: #of pbles requested
|
||||
* @level1_only: true if only pble level 1 to acquire
|
||||
* @lvl: requested pble level mask
|
||||
*/
|
||||
int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
|
||||
struct irdma_pble_alloc *palloc, u32 pble_cnt,
|
||||
bool level1_only)
|
||||
u8 lvl)
|
||||
{
|
||||
int status = 0;
|
||||
int max_sds = 0;
|
||||
|
@ -462,7 +462,7 @@ int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
|
|||
/*check first to see if we can get pble's without acquiring
|
||||
* additional sd's
|
||||
*/
|
||||
status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
|
||||
status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl);
|
||||
if (!status)
|
||||
goto exit;
|
||||
|
||||
|
@ -472,9 +472,9 @@ int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
|
|||
if (status)
|
||||
break;
|
||||
|
||||
status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
|
||||
status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl);
|
||||
/* if level1_only, only go through it once */
|
||||
if (!status || level1_only)
|
||||
if (!status || lvl)
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
|
|||
struct irdma_pble_alloc *palloc);
|
||||
int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
|
||||
struct irdma_pble_alloc *palloc, u32 pble_cnt,
|
||||
bool level1_only);
|
||||
u8 lvl);
|
||||
int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
|
||||
struct irdma_chunk *pchunk);
|
||||
int irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
|
||||
|
|
|
@ -28,9 +28,7 @@ int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
|
|||
void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev,
|
||||
struct irdma_vsi_pestat *pestat);
|
||||
void irdma_hw_stats_read_all(struct irdma_vsi_pestat *stats,
|
||||
struct irdma_dev_hw_stats *stats_values,
|
||||
u64 *hw_stats_regs_32, u64 *hw_stats_regs_64,
|
||||
u8 hw_rev);
|
||||
const u64 *hw_stats_regs);
|
||||
int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
|
||||
struct irdma_ws_node_info *node_info);
|
||||
int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
|
||||
|
@ -43,7 +41,9 @@ u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev);
|
|||
void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id);
|
||||
void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
|
||||
struct irdma_gather_stats *gather_stats,
|
||||
struct irdma_gather_stats *last_gather_stats);
|
||||
struct irdma_gather_stats *last_gather_stats,
|
||||
const struct irdma_hw_stat_map *map, u16 max_stat_idx);
|
||||
|
||||
/* vsi functions */
|
||||
int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
|
||||
struct irdma_vsi_stats_info *info);
|
||||
|
|
|
@ -101,7 +101,8 @@ enum irdma_qp_event_type {
|
|||
IRDMA_QP_EVENT_REQ_ERR,
|
||||
};
|
||||
|
||||
enum irdma_hw_stats_index_32b {
|
||||
enum irdma_hw_stats_index {
|
||||
/* gen1 - 32-bit */
|
||||
IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0,
|
||||
IRDMA_HW_STAT_INDEX_IP4RXTRUNC = 1,
|
||||
IRDMA_HW_STAT_INDEX_IP4TXNOROUTE = 2,
|
||||
|
@ -111,50 +112,48 @@ enum irdma_hw_stats_index_32b {
|
|||
IRDMA_HW_STAT_INDEX_TCPRTXSEG = 6,
|
||||
IRDMA_HW_STAT_INDEX_TCPRXOPTERR = 7,
|
||||
IRDMA_HW_STAT_INDEX_TCPRXPROTOERR = 8,
|
||||
IRDMA_HW_STAT_INDEX_MAX_32_GEN_1 = 9, /* Must be same value as next entry */
|
||||
IRDMA_HW_STAT_INDEX_RXVLANERR = 9,
|
||||
IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 10,
|
||||
IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 11,
|
||||
IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 12,
|
||||
IRDMA_HW_STAT_INDEX_MAX_32, /* Must be last entry */
|
||||
};
|
||||
|
||||
enum irdma_hw_stats_index_64b {
|
||||
IRDMA_HW_STAT_INDEX_IP4RXOCTS = 0,
|
||||
IRDMA_HW_STAT_INDEX_IP4RXPKTS = 1,
|
||||
IRDMA_HW_STAT_INDEX_IP4RXFRAGS = 2,
|
||||
IRDMA_HW_STAT_INDEX_IP4RXMCPKTS = 3,
|
||||
IRDMA_HW_STAT_INDEX_IP4TXOCTS = 4,
|
||||
IRDMA_HW_STAT_INDEX_IP4TXPKTS = 5,
|
||||
IRDMA_HW_STAT_INDEX_IP4TXFRAGS = 6,
|
||||
IRDMA_HW_STAT_INDEX_IP4TXMCPKTS = 7,
|
||||
IRDMA_HW_STAT_INDEX_IP6RXOCTS = 8,
|
||||
IRDMA_HW_STAT_INDEX_IP6RXPKTS = 9,
|
||||
IRDMA_HW_STAT_INDEX_IP6RXFRAGS = 10,
|
||||
IRDMA_HW_STAT_INDEX_IP6RXMCPKTS = 11,
|
||||
IRDMA_HW_STAT_INDEX_IP6TXOCTS = 12,
|
||||
IRDMA_HW_STAT_INDEX_IP6TXPKTS = 13,
|
||||
IRDMA_HW_STAT_INDEX_IP6TXFRAGS = 14,
|
||||
IRDMA_HW_STAT_INDEX_IP6TXMCPKTS = 15,
|
||||
IRDMA_HW_STAT_INDEX_TCPRXSEGS = 16,
|
||||
IRDMA_HW_STAT_INDEX_TCPTXSEG = 17,
|
||||
IRDMA_HW_STAT_INDEX_RDMARXRDS = 18,
|
||||
IRDMA_HW_STAT_INDEX_RDMARXSNDS = 19,
|
||||
IRDMA_HW_STAT_INDEX_RDMARXWRS = 20,
|
||||
IRDMA_HW_STAT_INDEX_RDMATXRDS = 21,
|
||||
IRDMA_HW_STAT_INDEX_RDMATXSNDS = 22,
|
||||
IRDMA_HW_STAT_INDEX_RDMATXWRS = 23,
|
||||
IRDMA_HW_STAT_INDEX_RDMAVBND = 24,
|
||||
IRDMA_HW_STAT_INDEX_RDMAVINV = 25,
|
||||
IRDMA_HW_STAT_INDEX_MAX_64_GEN_1 = 26, /* Must be same value as next entry */
|
||||
IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 26,
|
||||
IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 27,
|
||||
IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 28,
|
||||
IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 29,
|
||||
IRDMA_HW_STAT_INDEX_UDPRXPKTS = 30,
|
||||
IRDMA_HW_STAT_INDEX_UDPTXPKTS = 31,
|
||||
IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 32,
|
||||
IRDMA_HW_STAT_INDEX_MAX_64, /* Must be last entry */
|
||||
/* gen1 - 64-bit */
|
||||
IRDMA_HW_STAT_INDEX_IP4RXOCTS = 10,
|
||||
IRDMA_HW_STAT_INDEX_IP4RXPKTS = 11,
|
||||
IRDMA_HW_STAT_INDEX_IP4RXFRAGS = 12,
|
||||
IRDMA_HW_STAT_INDEX_IP4RXMCPKTS = 13,
|
||||
IRDMA_HW_STAT_INDEX_IP4TXOCTS = 14,
|
||||
IRDMA_HW_STAT_INDEX_IP4TXPKTS = 15,
|
||||
IRDMA_HW_STAT_INDEX_IP4TXFRAGS = 16,
|
||||
IRDMA_HW_STAT_INDEX_IP4TXMCPKTS = 17,
|
||||
IRDMA_HW_STAT_INDEX_IP6RXOCTS = 18,
|
||||
IRDMA_HW_STAT_INDEX_IP6RXPKTS = 19,
|
||||
IRDMA_HW_STAT_INDEX_IP6RXFRAGS = 20,
|
||||
IRDMA_HW_STAT_INDEX_IP6RXMCPKTS = 21,
|
||||
IRDMA_HW_STAT_INDEX_IP6TXOCTS = 22,
|
||||
IRDMA_HW_STAT_INDEX_IP6TXPKTS = 23,
|
||||
IRDMA_HW_STAT_INDEX_IP6TXFRAGS = 24,
|
||||
IRDMA_HW_STAT_INDEX_IP6TXMCPKTS = 25,
|
||||
IRDMA_HW_STAT_INDEX_TCPRXSEGS = 26,
|
||||
IRDMA_HW_STAT_INDEX_TCPTXSEG = 27,
|
||||
IRDMA_HW_STAT_INDEX_RDMARXRDS = 28,
|
||||
IRDMA_HW_STAT_INDEX_RDMARXSNDS = 29,
|
||||
IRDMA_HW_STAT_INDEX_RDMARXWRS = 30,
|
||||
IRDMA_HW_STAT_INDEX_RDMATXRDS = 31,
|
||||
IRDMA_HW_STAT_INDEX_RDMATXSNDS = 32,
|
||||
IRDMA_HW_STAT_INDEX_RDMATXWRS = 33,
|
||||
IRDMA_HW_STAT_INDEX_RDMAVBND = 34,
|
||||
IRDMA_HW_STAT_INDEX_RDMAVINV = 35,
|
||||
IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 36,
|
||||
IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 37,
|
||||
IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 38,
|
||||
IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 39,
|
||||
IRDMA_HW_STAT_INDEX_UDPRXPKTS = 40,
|
||||
IRDMA_HW_STAT_INDEX_UDPTXPKTS = 41,
|
||||
IRDMA_HW_STAT_INDEX_MAX_GEN_1 = 42, /* Must be same value as next entry */
|
||||
/* gen2 - 64-bit */
|
||||
IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 42,
|
||||
/* gen2 - 32-bit */
|
||||
IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 43,
|
||||
IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 44,
|
||||
IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 45,
|
||||
IRDMA_HW_STAT_INDEX_MAX_GEN_2 = 46,
|
||||
};
|
||||
|
||||
enum irdma_feature_type {
|
||||
|
@ -274,65 +273,21 @@ struct irdma_cq_shadow_area {
|
|||
};
|
||||
|
||||
struct irdma_dev_hw_stats_offsets {
|
||||
u32 stats_offset_32[IRDMA_HW_STAT_INDEX_MAX_32];
|
||||
u32 stats_offset_64[IRDMA_HW_STAT_INDEX_MAX_64];
|
||||
u32 stats_offset[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
|
||||
};
|
||||
|
||||
struct irdma_dev_hw_stats {
|
||||
u64 stats_val_32[IRDMA_HW_STAT_INDEX_MAX_32];
|
||||
u64 stats_val_64[IRDMA_HW_STAT_INDEX_MAX_64];
|
||||
u64 stats_val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
|
||||
};
|
||||
|
||||
struct irdma_gather_stats {
|
||||
u32 rsvd1;
|
||||
u32 rxvlanerr;
|
||||
u64 ip4rxocts;
|
||||
u64 ip4rxpkts;
|
||||
u32 ip4rxtrunc;
|
||||
u32 ip4rxdiscard;
|
||||
u64 ip4rxfrags;
|
||||
u64 ip4rxmcocts;
|
||||
u64 ip4rxmcpkts;
|
||||
u64 ip6rxocts;
|
||||
u64 ip6rxpkts;
|
||||
u32 ip6rxtrunc;
|
||||
u32 ip6rxdiscard;
|
||||
u64 ip6rxfrags;
|
||||
u64 ip6rxmcocts;
|
||||
u64 ip6rxmcpkts;
|
||||
u64 ip4txocts;
|
||||
u64 ip4txpkts;
|
||||
u64 ip4txfrag;
|
||||
u64 ip4txmcocts;
|
||||
u64 ip4txmcpkts;
|
||||
u64 ip6txocts;
|
||||
u64 ip6txpkts;
|
||||
u64 ip6txfrags;
|
||||
u64 ip6txmcocts;
|
||||
u64 ip6txmcpkts;
|
||||
u32 ip6txnoroute;
|
||||
u32 ip4txnoroute;
|
||||
u64 tcprxsegs;
|
||||
u32 tcprxprotoerr;
|
||||
u32 tcprxopterr;
|
||||
u64 tcptxsegs;
|
||||
u32 rsvd2;
|
||||
u32 tcprtxseg;
|
||||
u64 udprxpkts;
|
||||
u64 udptxpkts;
|
||||
u64 rdmarxwrs;
|
||||
u64 rdmarxrds;
|
||||
u64 rdmarxsnds;
|
||||
u64 rdmatxwrs;
|
||||
u64 rdmatxrds;
|
||||
u64 rdmatxsnds;
|
||||
u64 rdmavbn;
|
||||
u64 rdmavinv;
|
||||
u64 rxnpecnmrkpkts;
|
||||
u32 rxrpcnphandled;
|
||||
u32 rxrpcnpignored;
|
||||
u32 txnpcnpsent;
|
||||
u32 rsvd3[88];
|
||||
u64 val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
|
||||
};
|
||||
|
||||
struct irdma_hw_stat_map {
|
||||
u16 byteoff;
|
||||
u8 bitoff;
|
||||
u64 bitmask;
|
||||
};
|
||||
|
||||
struct irdma_stats_gather_info {
|
||||
|
@ -584,7 +539,7 @@ struct irdma_qos {
|
|||
bool valid;
|
||||
};
|
||||
|
||||
#define IRDMA_INVALID_FCN_ID 0xff
|
||||
#define IRDMA_INVALID_STATS_IDX 0xff
|
||||
struct irdma_sc_vsi {
|
||||
u16 vsi_idx;
|
||||
struct irdma_sc_dev *dev;
|
||||
|
@ -598,11 +553,9 @@ struct irdma_sc_vsi {
|
|||
u32 exception_lan_q;
|
||||
u16 mtu;
|
||||
u16 vm_id;
|
||||
u8 fcn_id;
|
||||
enum irdma_vm_vf_type vm_vf_type;
|
||||
bool stats_fcn_id_alloc:1;
|
||||
bool stats_inst_alloc:1;
|
||||
bool tc_change_pending:1;
|
||||
struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
|
||||
struct irdma_vsi_pestat *pestat;
|
||||
atomic_t qp_suspend_reqs;
|
||||
int (*register_qset)(struct irdma_sc_vsi *vsi,
|
||||
|
@ -611,14 +564,17 @@ struct irdma_sc_vsi {
|
|||
struct irdma_ws_node *tc_node);
|
||||
u8 qos_rel_bw;
|
||||
u8 qos_prio_type;
|
||||
u8 stats_idx;
|
||||
u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
|
||||
struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
|
||||
u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
|
||||
bool dscp_mode:1;
|
||||
};
|
||||
|
||||
struct irdma_sc_dev {
|
||||
struct list_head cqp_cmd_head; /* head of the CQP command list */
|
||||
spinlock_t cqp_lock; /* protect CQP list access */
|
||||
bool fcn_id_array[IRDMA_MAX_STATS_COUNT];
|
||||
bool stats_idx_array[IRDMA_MAX_STATS_COUNT_GEN_1];
|
||||
struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT];
|
||||
u64 fpm_query_buf_pa;
|
||||
u64 fpm_commit_buf_pa;
|
||||
|
@ -637,8 +593,8 @@ struct irdma_sc_dev {
|
|||
u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
|
||||
u64 hw_masks[IRDMA_MAX_MASKS];
|
||||
u64 hw_shifts[IRDMA_MAX_SHIFTS];
|
||||
u64 hw_stats_regs_32[IRDMA_HW_STAT_INDEX_MAX_32];
|
||||
u64 hw_stats_regs_64[IRDMA_HW_STAT_INDEX_MAX_64];
|
||||
const struct irdma_hw_stat_map *hw_stats_map;
|
||||
u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
|
||||
u64 feature_info[IRDMA_MAX_FEATURES];
|
||||
u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
|
||||
struct irdma_hw_attrs hw_attrs;
|
||||
|
@ -763,7 +719,7 @@ struct irdma_vsi_init_info {
|
|||
struct irdma_vsi_stats_info {
|
||||
struct irdma_vsi_pestat *pestat;
|
||||
u8 fcn_id;
|
||||
bool alloc_fcn_id;
|
||||
bool alloc_stats_inst;
|
||||
};
|
||||
|
||||
struct irdma_device_init_info {
|
||||
|
|
|
@ -1634,10 +1634,10 @@ static void irdma_hw_stats_timeout(struct timer_list *t)
|
|||
from_timer(pf_devstat, t, stats_timer);
|
||||
struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
|
||||
|
||||
if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
|
||||
irdma_cqp_gather_stats_gen1(sc_vsi->dev, sc_vsi->pestat);
|
||||
else
|
||||
if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
|
||||
irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false);
|
||||
else
|
||||
irdma_cqp_gather_stats_gen1(sc_vsi->dev, sc_vsi->pestat);
|
||||
|
||||
mod_timer(&pf_devstat->stats_timer,
|
||||
jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
|
||||
|
@ -1686,164 +1686,28 @@ void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev,
|
|||
{
|
||||
struct irdma_gather_stats *gather_stats =
|
||||
pestat->gather_info.gather_stats_va;
|
||||
const struct irdma_hw_stat_map *map = dev->hw_stats_map;
|
||||
u16 max_stats_idx = dev->hw_attrs.max_stat_idx;
|
||||
u32 stats_inst_offset_32;
|
||||
u32 stats_inst_offset_64;
|
||||
u64 new_val;
|
||||
u16 i;
|
||||
|
||||
stats_inst_offset_32 = (pestat->gather_info.use_stats_inst) ?
|
||||
pestat->gather_info.stats_inst_index :
|
||||
pestat->hw->hmc.hmc_fn_id;
|
||||
pestat->gather_info.stats_inst_index :
|
||||
pestat->hw->hmc.hmc_fn_id;
|
||||
stats_inst_offset_32 *= 4;
|
||||
stats_inst_offset_64 = stats_inst_offset_32 * 2;
|
||||
|
||||
gather_stats->rxvlanerr =
|
||||
rd32(dev->hw,
|
||||
dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_RXVLANERR]
|
||||
+ stats_inst_offset_32);
|
||||
gather_stats->ip4rxdiscard =
|
||||
rd32(dev->hw,
|
||||
dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4RXDISCARD]
|
||||
+ stats_inst_offset_32);
|
||||
gather_stats->ip4rxtrunc =
|
||||
rd32(dev->hw,
|
||||
dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4RXTRUNC]
|
||||
+ stats_inst_offset_32);
|
||||
gather_stats->ip4txnoroute =
|
||||
rd32(dev->hw,
|
||||
dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE]
|
||||
+ stats_inst_offset_32);
|
||||
gather_stats->ip6rxdiscard =
|
||||
rd32(dev->hw,
|
||||
dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6RXDISCARD]
|
||||
+ stats_inst_offset_32);
|
||||
gather_stats->ip6rxtrunc =
|
||||
rd32(dev->hw,
|
||||
dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6RXTRUNC]
|
||||
+ stats_inst_offset_32);
|
||||
gather_stats->ip6txnoroute =
|
||||
rd32(dev->hw,
|
||||
dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE]
|
||||
+ stats_inst_offset_32);
|
||||
gather_stats->tcprtxseg =
|
||||
rd32(dev->hw,
|
||||
dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_TCPRTXSEG]
|
||||
+ stats_inst_offset_32);
|
||||
gather_stats->tcprxopterr =
|
||||
rd32(dev->hw,
|
||||
dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_TCPRXOPTERR]
|
||||
+ stats_inst_offset_32);
|
||||
|
||||
gather_stats->ip4rxocts =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXOCTS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->ip4rxpkts =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXPKTS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->ip4txfrag =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXFRAGS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->ip4rxmcpkts =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->ip4txocts =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXOCTS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->ip4txpkts =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXPKTS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->ip4txfrag =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXFRAGS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->ip4txmcpkts =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->ip6rxocts =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXOCTS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->ip6rxpkts =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXPKTS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->ip6txfrags =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXFRAGS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->ip6rxmcpkts =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->ip6txocts =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXOCTS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->ip6txpkts =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXPKTS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->ip6txfrags =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXFRAGS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->ip6txmcpkts =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->tcprxsegs =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_TCPRXSEGS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->tcptxsegs =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_TCPTXSEG]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->rdmarxrds =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXRDS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->rdmarxsnds =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXSNDS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->rdmarxwrs =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXWRS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->rdmatxrds =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXRDS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->rdmatxsnds =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXSNDS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->rdmatxwrs =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXWRS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->rdmavbn =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMAVBND]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->rdmavinv =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMAVINV]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->udprxpkts =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_UDPRXPKTS]
|
||||
+ stats_inst_offset_64);
|
||||
gather_stats->udptxpkts =
|
||||
rd64(dev->hw,
|
||||
dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_UDPTXPKTS]
|
||||
+ stats_inst_offset_64);
|
||||
for (i = 0; i < max_stats_idx; i++) {
|
||||
if (map[i].bitmask <= IRDMA_MAX_STATS_32)
|
||||
new_val = rd32(dev->hw,
|
||||
dev->hw_stats_regs[i] + stats_inst_offset_32);
|
||||
else
|
||||
new_val = rd64(dev->hw,
|
||||
dev->hw_stats_regs[i] + stats_inst_offset_64);
|
||||
gather_stats->val[map[i].byteoff / sizeof(u64)] = new_val;
|
||||
}
|
||||
|
||||
irdma_process_stats(pestat);
|
||||
}
|
||||
|
|
|
@ -1226,10 +1226,6 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
udp_info->ipv4 = false;
|
||||
irdma_copy_ip_ntohl(local_ip, daddr);
|
||||
|
||||
udp_info->arp_idx = irdma_arp_table(iwdev->rf,
|
||||
&local_ip[0],
|
||||
false, NULL,
|
||||
IRDMA_ARP_RESOLVE);
|
||||
} else if (av->net_type == RDMA_NETWORK_IPV4) {
|
||||
__be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
|
||||
__be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
|
||||
|
@ -2329,11 +2325,10 @@ static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
|
|||
* irdma_setup_pbles - copy user pg address to pble's
|
||||
* @rf: RDMA PCI function
|
||||
* @iwmr: mr pointer for this memory registration
|
||||
* @use_pbles: flag if to use pble's
|
||||
* @lvl_1_only: request only level 1 pble if true
|
||||
* @lvl: requested pble levels
|
||||
*/
|
||||
static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
|
||||
bool use_pbles, bool lvl_1_only)
|
||||
u8 lvl)
|
||||
{
|
||||
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
|
||||
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
|
||||
|
@ -2342,9 +2337,9 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
|
|||
int status;
|
||||
enum irdma_pble_level level = PBLE_LEVEL_1;
|
||||
|
||||
if (use_pbles) {
|
||||
if (lvl) {
|
||||
status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
|
||||
lvl_1_only);
|
||||
lvl);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
|
@ -2359,7 +2354,7 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
|
|||
|
||||
irdma_copy_user_pgaddrs(iwmr, pbl, level);
|
||||
|
||||
if (use_pbles)
|
||||
if (lvl)
|
||||
iwmr->pgaddrmem[0] = *pbl;
|
||||
|
||||
return 0;
|
||||
|
@ -2370,11 +2365,11 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
|
|||
* @iwdev: irdma device
|
||||
* @req: information for q memory management
|
||||
* @iwpbl: pble struct
|
||||
* @use_pbles: flag to use pble
|
||||
* @lvl: pble level mask
|
||||
*/
|
||||
static int irdma_handle_q_mem(struct irdma_device *iwdev,
|
||||
struct irdma_mem_reg_req *req,
|
||||
struct irdma_pbl *iwpbl, bool use_pbles)
|
||||
struct irdma_pbl *iwpbl, u8 lvl)
|
||||
{
|
||||
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
|
||||
struct irdma_mr *iwmr = iwpbl->iwmr;
|
||||
|
@ -2387,11 +2382,11 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
|
|||
bool ret = true;
|
||||
|
||||
pg_size = iwmr->page_size;
|
||||
err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, true);
|
||||
err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (use_pbles)
|
||||
if (lvl)
|
||||
arr = palloc->level1.addr;
|
||||
|
||||
switch (iwmr->type) {
|
||||
|
@ -2400,7 +2395,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
|
|||
hmc_p = &qpmr->sq_pbl;
|
||||
qpmr->shadow = (dma_addr_t)arr[total];
|
||||
|
||||
if (use_pbles) {
|
||||
if (lvl) {
|
||||
ret = irdma_check_mem_contiguous(arr, req->sq_pages,
|
||||
pg_size);
|
||||
if (ret)
|
||||
|
@ -2425,7 +2420,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
|
|||
if (!cqmr->split)
|
||||
cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
|
||||
|
||||
if (use_pbles)
|
||||
if (lvl)
|
||||
ret = irdma_check_mem_contiguous(arr, req->cq_pages,
|
||||
pg_size);
|
||||
|
||||
|
@ -2439,7 +2434,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
|
|||
err = -EINVAL;
|
||||
}
|
||||
|
||||
if (use_pbles && ret) {
|
||||
if (lvl && ret) {
|
||||
irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
|
||||
iwpbl->pbl_allocated = false;
|
||||
}
|
||||
|
@ -2749,17 +2744,17 @@ static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
|
|||
{
|
||||
struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
|
||||
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
|
||||
bool use_pbles;
|
||||
u32 stag;
|
||||
u8 lvl;
|
||||
int err;
|
||||
|
||||
use_pbles = iwmr->page_cnt != 1;
|
||||
lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
|
||||
|
||||
err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
|
||||
err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (use_pbles) {
|
||||
if (lvl) {
|
||||
err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
|
||||
iwmr->page_size);
|
||||
if (err) {
|
||||
|
@ -2843,17 +2838,17 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
|
|||
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
|
||||
struct irdma_ucontext *ucontext = NULL;
|
||||
unsigned long flags;
|
||||
bool use_pbles;
|
||||
u32 total;
|
||||
int err;
|
||||
u8 lvl;
|
||||
|
||||
total = req.sq_pages + req.rq_pages + 1;
|
||||
if (total > iwmr->page_cnt)
|
||||
return -EINVAL;
|
||||
|
||||
total = req.sq_pages + req.rq_pages;
|
||||
use_pbles = total > 2;
|
||||
err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
|
||||
lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
|
||||
err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -2876,9 +2871,9 @@ static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
|
|||
struct irdma_ucontext *ucontext = NULL;
|
||||
u8 shadow_pgcnt = 1;
|
||||
unsigned long flags;
|
||||
bool use_pbles;
|
||||
u32 total;
|
||||
int err;
|
||||
u8 lvl;
|
||||
|
||||
if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
|
||||
shadow_pgcnt = 0;
|
||||
|
@ -2886,8 +2881,8 @@ static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
|
|||
if (total > iwmr->page_cnt)
|
||||
return -EINVAL;
|
||||
|
||||
use_pbles = req.cq_pages > 1;
|
||||
err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
|
||||
lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
|
||||
err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -3708,89 +3703,59 @@ static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct rdma_stat_desc irdma_hw_stat_descs[] = {
|
||||
/* 32bit names */
|
||||
[IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors",
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards",
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name = "ip4OutNoRoutes",
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards",
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes",
|
||||
[IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs",
|
||||
[IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors",
|
||||
[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors",
|
||||
[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled",
|
||||
[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name = "cnpIgnored",
|
||||
[IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name = "cnpSent",
|
||||
static const struct rdma_stat_desc irdma_hw_stat_names[] = {
|
||||
/* gen1 - 32-bit */
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards",
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name = "ip4OutNoRoutes",
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards",
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes",
|
||||
[IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs",
|
||||
[IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors",
|
||||
[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors",
|
||||
[IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors",
|
||||
/* gen1 - 64-bit */
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXOCTS].name = "ip4InOctets",
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXPKTS].name = "ip4InPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXFRAGS].name = "ip4InReasmRqd",
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS].name = "ip4InMcastPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXOCTS].name = "ip4OutOctets",
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXPKTS].name = "ip4OutPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXFRAGS].name = "ip4OutSegRqd",
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS].name = "ip4OutMcastPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXOCTS].name = "ip6InOctets",
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXPKTS].name = "ip6InPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXFRAGS].name = "ip6InReasmRqd",
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS].name = "ip6InMcastPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXOCTS].name = "ip6OutOctets",
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXPKTS].name = "ip6OutPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXFRAGS].name = "ip6OutSegRqd",
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS].name = "ip6OutMcastPkts",
|
||||
[IRDMA_HW_STAT_INDEX_TCPRXSEGS].name = "tcpInSegs",
|
||||
[IRDMA_HW_STAT_INDEX_TCPTXSEG].name = "tcpOutSegs",
|
||||
[IRDMA_HW_STAT_INDEX_RDMARXRDS].name = "iwInRdmaReads",
|
||||
[IRDMA_HW_STAT_INDEX_RDMARXSNDS].name = "iwInRdmaSends",
|
||||
[IRDMA_HW_STAT_INDEX_RDMARXWRS].name = "iwInRdmaWrites",
|
||||
[IRDMA_HW_STAT_INDEX_RDMATXRDS].name = "iwOutRdmaReads",
|
||||
[IRDMA_HW_STAT_INDEX_RDMATXSNDS].name = "iwOutRdmaSends",
|
||||
[IRDMA_HW_STAT_INDEX_RDMATXWRS].name = "iwOutRdmaWrites",
|
||||
[IRDMA_HW_STAT_INDEX_RDMAVBND].name = "iwRdmaBnd",
|
||||
[IRDMA_HW_STAT_INDEX_RDMAVINV].name = "iwRdmaInv",
|
||||
|
||||
/* gen2 - 32-bit */
|
||||
[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled",
|
||||
[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name = "cnpIgnored",
|
||||
[IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name = "cnpSent",
|
||||
/* gen2 - 64-bit */
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXMCOCTS].name = "ip4InMcastOctets",
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXMCOCTS].name = "ip4OutMcastOctets",
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXMCOCTS].name = "ip6InMcastOctets",
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXMCOCTS].name = "ip6OutMcastOctets",
|
||||
[IRDMA_HW_STAT_INDEX_UDPRXPKTS].name = "RxUDP",
|
||||
[IRDMA_HW_STAT_INDEX_UDPTXPKTS].name = "TxUDP",
|
||||
[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS].name = "RxECNMrkd",
|
||||
|
||||
/* 64bit names */
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip4InOctets",
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip4InPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip4InReasmRqd",
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip4InMcastOctets",
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip4InMcastPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip4OutOctets",
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip4OutPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip4OutSegRqd",
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip4OutMcastOctets",
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip4OutMcastPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip6InOctets",
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip6InPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip6InReasmRqd",
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip6InMcastOctets",
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip6InMcastPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip6OutOctets",
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip6OutPkts",
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip6OutSegRqd",
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip6OutMcastOctets",
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"ip6OutMcastPkts",
|
||||
[IRDMA_HW_STAT_INDEX_TCPRXSEGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"tcpInSegs",
|
||||
[IRDMA_HW_STAT_INDEX_TCPTXSEG + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"tcpOutSegs",
|
||||
[IRDMA_HW_STAT_INDEX_RDMARXRDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"iwInRdmaReads",
|
||||
[IRDMA_HW_STAT_INDEX_RDMARXSNDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"iwInRdmaSends",
|
||||
[IRDMA_HW_STAT_INDEX_RDMARXWRS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"iwInRdmaWrites",
|
||||
[IRDMA_HW_STAT_INDEX_RDMATXRDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"iwOutRdmaReads",
|
||||
[IRDMA_HW_STAT_INDEX_RDMATXSNDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"iwOutRdmaSends",
|
||||
[IRDMA_HW_STAT_INDEX_RDMATXWRS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"iwOutRdmaWrites",
|
||||
[IRDMA_HW_STAT_INDEX_RDMAVBND + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"iwRdmaBnd",
|
||||
[IRDMA_HW_STAT_INDEX_RDMAVINV + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"iwRdmaInv",
|
||||
[IRDMA_HW_STAT_INDEX_UDPRXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"RxUDP",
|
||||
[IRDMA_HW_STAT_INDEX_UDPTXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
|
||||
"TxUDP",
|
||||
[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS + IRDMA_HW_STAT_INDEX_MAX_32]
|
||||
.name = "RxECNMrkd",
|
||||
};
|
||||
|
||||
static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
|
||||
|
@ -3810,14 +3775,13 @@ static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
|
|||
static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
|
||||
u32 port_num)
|
||||
{
|
||||
int num_counters = IRDMA_HW_STAT_INDEX_MAX_32 +
|
||||
IRDMA_HW_STAT_INDEX_MAX_64;
|
||||
struct irdma_device *iwdev = to_iwdev(ibdev);
|
||||
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
|
||||
|
||||
int num_counters = dev->hw_attrs.max_stat_idx;
|
||||
unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(irdma_hw_stat_descs) !=
|
||||
(IRDMA_HW_STAT_INDEX_MAX_32 + IRDMA_HW_STAT_INDEX_MAX_64));
|
||||
|
||||
return rdma_alloc_hw_stats_struct(irdma_hw_stat_descs, num_counters,
|
||||
return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
|
||||
lifespan);
|
||||
}
|
||||
|
||||
|
@ -3840,7 +3804,7 @@ static int irdma_get_hw_stats(struct ib_device *ibdev,
|
|||
else
|
||||
irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat);
|
||||
|
||||
memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
|
||||
memcpy(&stats->value[0], hw_stats, sizeof(u64) * stats->num_counters);
|
||||
|
||||
return stats->num_counters;
|
||||
}
|
||||
|
@ -4054,7 +4018,7 @@ static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
|
|||
mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
|
||||
if (vlan_id < VLAN_N_VID)
|
||||
mc_qht_elem->mc_grp_ctx.vlan_valid = true;
|
||||
mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->vsi.fcn_id;
|
||||
mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id;
|
||||
mc_qht_elem->mc_grp_ctx.qs_handle =
|
||||
iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
|
||||
ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
|
||||
|
|
|
@ -447,9 +447,13 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev,
|
|||
struct mlx4_ib_qp *qp,
|
||||
struct mlx4_ib_create_qp *ucmd)
|
||||
{
|
||||
u32 cnt;
|
||||
|
||||
/* Sanity check SQ size before proceeding */
|
||||
if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes ||
|
||||
ucmd->log_sq_stride >
|
||||
if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
|
||||
cnt > dev->dev->caps.max_wqes)
|
||||
return -EINVAL;
|
||||
if (ucmd->log_sq_stride >
|
||||
ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
|
||||
ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
#include "mlx5_ib.h"
|
||||
#include <linux/mlx5/eswitch.h>
|
||||
#include <linux/mlx5/vport.h>
|
||||
#include "counters.h"
|
||||
#include "ib_rep.h"
|
||||
#include "qp.h"
|
||||
|
@ -18,6 +19,10 @@ struct mlx5_ib_counter {
|
|||
#define INIT_Q_COUNTER(_name) \
|
||||
{ .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
|
||||
|
||||
#define INIT_VPORT_Q_COUNTER(_name) \
|
||||
{ .name = "vport_" #_name, .offset = \
|
||||
MLX5_BYTE_OFF(query_q_counter_out, _name)}
|
||||
|
||||
static const struct mlx5_ib_counter basic_q_cnts[] = {
|
||||
INIT_Q_COUNTER(rx_write_requests),
|
||||
INIT_Q_COUNTER(rx_read_requests),
|
||||
|
@ -37,6 +42,25 @@ static const struct mlx5_ib_counter retrans_q_cnts[] = {
|
|||
INIT_Q_COUNTER(local_ack_timeout_err),
|
||||
};
|
||||
|
||||
static const struct mlx5_ib_counter vport_basic_q_cnts[] = {
|
||||
INIT_VPORT_Q_COUNTER(rx_write_requests),
|
||||
INIT_VPORT_Q_COUNTER(rx_read_requests),
|
||||
INIT_VPORT_Q_COUNTER(rx_atomic_requests),
|
||||
INIT_VPORT_Q_COUNTER(out_of_buffer),
|
||||
};
|
||||
|
||||
static const struct mlx5_ib_counter vport_out_of_seq_q_cnts[] = {
|
||||
INIT_VPORT_Q_COUNTER(out_of_sequence),
|
||||
};
|
||||
|
||||
static const struct mlx5_ib_counter vport_retrans_q_cnts[] = {
|
||||
INIT_VPORT_Q_COUNTER(duplicate_request),
|
||||
INIT_VPORT_Q_COUNTER(rnr_nak_retry_err),
|
||||
INIT_VPORT_Q_COUNTER(packet_seq_err),
|
||||
INIT_VPORT_Q_COUNTER(implied_nak_seq_err),
|
||||
INIT_VPORT_Q_COUNTER(local_ack_timeout_err),
|
||||
};
|
||||
|
||||
#define INIT_CONG_COUNTER(_name) \
|
||||
{ .name = #_name, .offset = \
|
||||
MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)}
|
||||
|
@ -67,6 +91,25 @@ static const struct mlx5_ib_counter roce_accl_cnts[] = {
|
|||
INIT_Q_COUNTER(roce_slow_restart_trans),
|
||||
};
|
||||
|
||||
static const struct mlx5_ib_counter vport_extended_err_cnts[] = {
|
||||
INIT_VPORT_Q_COUNTER(resp_local_length_error),
|
||||
INIT_VPORT_Q_COUNTER(resp_cqe_error),
|
||||
INIT_VPORT_Q_COUNTER(req_cqe_error),
|
||||
INIT_VPORT_Q_COUNTER(req_remote_invalid_request),
|
||||
INIT_VPORT_Q_COUNTER(req_remote_access_errors),
|
||||
INIT_VPORT_Q_COUNTER(resp_remote_access_errors),
|
||||
INIT_VPORT_Q_COUNTER(resp_cqe_flush_error),
|
||||
INIT_VPORT_Q_COUNTER(req_cqe_flush_error),
|
||||
};
|
||||
|
||||
static const struct mlx5_ib_counter vport_roce_accl_cnts[] = {
|
||||
INIT_VPORT_Q_COUNTER(roce_adp_retrans),
|
||||
INIT_VPORT_Q_COUNTER(roce_adp_retrans_to),
|
||||
INIT_VPORT_Q_COUNTER(roce_slow_restart),
|
||||
INIT_VPORT_Q_COUNTER(roce_slow_restart_cnps),
|
||||
INIT_VPORT_Q_COUNTER(roce_slow_restart_trans),
|
||||
};
|
||||
|
||||
#define INIT_EXT_PPCNT_COUNTER(_name) \
|
||||
{ .name = #_name, .offset = \
|
||||
MLX5_BYTE_OFF(ppcnt_reg, \
|
||||
|
@ -153,12 +196,20 @@ static int mlx5_ib_create_counters(struct ib_counters *counters,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool vport_qcounters_supported(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
return MLX5_CAP_GEN(dev->mdev, q_counter_other_vport) &&
|
||||
MLX5_CAP_GEN(dev->mdev, q_counter_aggregation);
|
||||
}
|
||||
|
||||
static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
|
||||
u32 port_num)
|
||||
{
|
||||
return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts :
|
||||
&dev->port[port_num].cnts;
|
||||
if ((is_mdev_switchdev_mode(dev->mdev) &&
|
||||
!vport_qcounters_supported(dev)) || !port_num)
|
||||
return &dev->port[0].cnts;
|
||||
|
||||
return &dev->port[port_num - 1].cnts;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -172,7 +223,7 @@ static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
|
|||
*/
|
||||
u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num)
|
||||
{
|
||||
const struct mlx5_ib_counters *cnts = get_counters(dev, port_num);
|
||||
const struct mlx5_ib_counters *cnts = get_counters(dev, port_num + 1);
|
||||
|
||||
return cnts->set_id;
|
||||
}
|
||||
|
@ -270,12 +321,44 @@ free:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int mlx5_ib_query_q_counters_vport(struct mlx5_ib_dev *dev,
|
||||
u32 port_num,
|
||||
const struct mlx5_ib_counters *cnts,
|
||||
struct rdma_hw_stats *stats)
|
||||
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
|
||||
__be32 val;
|
||||
int ret, i;
|
||||
|
||||
if (!dev->port[port_num].rep ||
|
||||
dev->port[port_num].rep->vport == MLX5_VPORT_UPLINK)
|
||||
return 0;
|
||||
|
||||
MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
|
||||
MLX5_SET(query_q_counter_in, in, other_vport, 1);
|
||||
MLX5_SET(query_q_counter_in, in, vport_number,
|
||||
dev->port[port_num].rep->vport);
|
||||
MLX5_SET(query_q_counter_in, in, aggregate, 1);
|
||||
ret = mlx5_cmd_exec_inout(dev->mdev, query_q_counter, in, out);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < cnts->num_q_counters; i++) {
|
||||
val = *(__be32 *)((void *)out + cnts->offsets[i]);
|
||||
stats->value[i] = (u64)be32_to_cpu(val);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_get_hw_stats(struct ib_device *ibdev,
|
||||
struct rdma_hw_stats *stats,
|
||||
u32 port_num, int index)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
||||
const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
|
||||
const struct mlx5_ib_counters *cnts = get_counters(dev, port_num);
|
||||
struct mlx5_core_dev *mdev;
|
||||
int ret, num_counters;
|
||||
|
||||
|
@ -286,11 +369,19 @@ static int do_get_hw_stats(struct ib_device *ibdev,
|
|||
cnts->num_cong_counters +
|
||||
cnts->num_ext_ppcnt_counters;
|
||||
|
||||
/* q_counters are per IB device, query the master mdev */
|
||||
ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, cnts->set_id);
|
||||
if (is_mdev_switchdev_mode(dev->mdev) && dev->is_rep && port_num != 0)
|
||||
ret = mlx5_ib_query_q_counters_vport(dev, port_num - 1, cnts,
|
||||
stats);
|
||||
else
|
||||
ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats,
|
||||
cnts->set_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* We don't expose device counters over Vports */
|
||||
if (is_mdev_switchdev_mode(dev->mdev) && port_num != 0)
|
||||
goto done;
|
||||
|
||||
if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
|
||||
ret = mlx5_ib_query_ext_ppcnt_counters(dev, cnts, stats);
|
||||
if (ret)
|
||||
|
@ -335,7 +426,8 @@ static int do_get_op_stat(struct ib_device *ibdev,
|
|||
u32 type;
|
||||
int ret;
|
||||
|
||||
cnts = get_counters(dev, port_num - 1);
|
||||
cnts = get_counters(dev, port_num);
|
||||
|
||||
opfcs = cnts->opfcs;
|
||||
type = *(u32 *)cnts->descs[index].priv;
|
||||
if (type >= MLX5_IB_OPCOUNTER_MAX)
|
||||
|
@ -362,7 +454,7 @@ static int do_get_op_stats(struct ib_device *ibdev,
|
|||
const struct mlx5_ib_counters *cnts;
|
||||
int index, ret, num_hw_counters;
|
||||
|
||||
cnts = get_counters(dev, port_num - 1);
|
||||
cnts = get_counters(dev, port_num);
|
||||
num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
|
||||
cnts->num_ext_ppcnt_counters;
|
||||
for (index = num_hw_counters;
|
||||
|
@ -383,7 +475,7 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
|
|||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
||||
const struct mlx5_ib_counters *cnts;
|
||||
|
||||
cnts = get_counters(dev, port_num - 1);
|
||||
cnts = get_counters(dev, port_num);
|
||||
num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
|
||||
cnts->num_ext_ppcnt_counters;
|
||||
num_counters = num_hw_counters + cnts->num_op_counters;
|
||||
|
@ -410,8 +502,7 @@ static struct rdma_hw_stats *
|
|||
mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(counter->device);
|
||||
const struct mlx5_ib_counters *cnts =
|
||||
get_counters(dev, counter->port - 1);
|
||||
const struct mlx5_ib_counters *cnts = get_counters(dev, counter->port);
|
||||
|
||||
return do_alloc_stats(cnts);
|
||||
}
|
||||
|
@ -419,8 +510,7 @@ mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
|
|||
static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(counter->device);
|
||||
const struct mlx5_ib_counters *cnts =
|
||||
get_counters(dev, counter->port - 1);
|
||||
const struct mlx5_ib_counters *cnts = get_counters(dev, counter->port);
|
||||
|
||||
return mlx5_ib_query_q_counters(dev->mdev, cnts,
|
||||
counter->stats, counter->id);
|
||||
|
@ -479,44 +569,55 @@ static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp)
|
|||
}
|
||||
|
||||
static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
|
||||
struct rdma_stat_desc *descs, size_t *offsets)
|
||||
struct rdma_stat_desc *descs, size_t *offsets,
|
||||
u32 port_num)
|
||||
{
|
||||
int i;
|
||||
int j = 0;
|
||||
bool is_vport = is_mdev_switchdev_mode(dev->mdev) &&
|
||||
port_num != MLX5_VPORT_PF;
|
||||
const struct mlx5_ib_counter *names;
|
||||
int j = 0, i;
|
||||
|
||||
names = is_vport ? vport_basic_q_cnts : basic_q_cnts;
|
||||
for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
|
||||
descs[j].name = basic_q_cnts[i].name;
|
||||
descs[j].name = names[i].name;
|
||||
offsets[j] = basic_q_cnts[i].offset;
|
||||
}
|
||||
|
||||
names = is_vport ? vport_out_of_seq_q_cnts : out_of_seq_q_cnts;
|
||||
if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
|
||||
for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
|
||||
descs[j].name = out_of_seq_q_cnts[i].name;
|
||||
descs[j].name = names[i].name;
|
||||
offsets[j] = out_of_seq_q_cnts[i].offset;
|
||||
}
|
||||
}
|
||||
|
||||
names = is_vport ? vport_retrans_q_cnts : retrans_q_cnts;
|
||||
if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
|
||||
for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
|
||||
descs[j].name = retrans_q_cnts[i].name;
|
||||
descs[j].name = names[i].name;
|
||||
offsets[j] = retrans_q_cnts[i].offset;
|
||||
}
|
||||
}
|
||||
|
||||
names = is_vport ? vport_extended_err_cnts : extended_err_cnts;
|
||||
if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
|
||||
for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
|
||||
descs[j].name = extended_err_cnts[i].name;
|
||||
descs[j].name = names[i].name;
|
||||
offsets[j] = extended_err_cnts[i].offset;
|
||||
}
|
||||
}
|
||||
|
||||
names = is_vport ? vport_roce_accl_cnts : roce_accl_cnts;
|
||||
if (MLX5_CAP_GEN(dev->mdev, roce_accl)) {
|
||||
for (i = 0; i < ARRAY_SIZE(roce_accl_cnts); i++, j++) {
|
||||
descs[j].name = roce_accl_cnts[i].name;
|
||||
descs[j].name = names[i].name;
|
||||
offsets[j] = roce_accl_cnts[i].offset;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_vport)
|
||||
return;
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
|
||||
for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
|
||||
descs[j].name = cong_cnts[i].name;
|
||||
|
@ -558,9 +659,9 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
|
|||
|
||||
|
||||
static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_ib_counters *cnts)
|
||||
struct mlx5_ib_counters *cnts, u32 port_num)
|
||||
{
|
||||
u32 num_counters, num_op_counters;
|
||||
u32 num_counters, num_op_counters = 0;
|
||||
|
||||
num_counters = ARRAY_SIZE(basic_q_cnts);
|
||||
|
||||
|
@ -578,6 +679,9 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
|
|||
|
||||
cnts->num_q_counters = num_counters;
|
||||
|
||||
if (is_mdev_switchdev_mode(dev->mdev) && port_num != MLX5_VPORT_PF)
|
||||
goto skip_non_qcounters;
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
|
||||
cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
|
||||
num_counters += ARRAY_SIZE(cong_cnts);
|
||||
|
@ -597,6 +701,7 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
|
|||
ft_field_support_2_nic_transmit_rdma.bth_opcode))
|
||||
num_op_counters += ARRAY_SIZE(rdmatx_cnp_op_cnts);
|
||||
|
||||
skip_non_qcounters:
|
||||
cnts->num_op_counters = num_op_counters;
|
||||
num_counters += num_op_counters;
|
||||
cnts->descs = kcalloc(num_counters,
|
||||
|
@ -623,7 +728,8 @@ static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
|
|||
int num_cnt_ports;
|
||||
int i, j;
|
||||
|
||||
num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
|
||||
num_cnt_ports = (!is_mdev_switchdev_mode(dev->mdev) ||
|
||||
vport_qcounters_supported(dev)) ? dev->num_ports : 1;
|
||||
|
||||
MLX5_SET(dealloc_q_counter_in, in, opcode,
|
||||
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
|
||||
|
@ -662,15 +768,16 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
|
|||
|
||||
MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
|
||||
is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
|
||||
num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
|
||||
num_cnt_ports = (!is_mdev_switchdev_mode(dev->mdev) ||
|
||||
vport_qcounters_supported(dev)) ? dev->num_ports : 1;
|
||||
|
||||
for (i = 0; i < num_cnt_ports; i++) {
|
||||
err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
|
||||
err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts, i);
|
||||
if (err)
|
||||
goto err_alloc;
|
||||
|
||||
mlx5_ib_fill_counters(dev, dev->port[i].cnts.descs,
|
||||
dev->port[i].cnts.offsets);
|
||||
dev->port[i].cnts.offsets, i);
|
||||
|
||||
MLX5_SET(alloc_q_counter_in, in, uid,
|
||||
is_shared ? MLX5_SHARED_RESOURCE_UID : 0);
|
||||
|
@ -889,6 +996,10 @@ static const struct ib_device_ops hw_stats_ops = {
|
|||
mlx5_ib_modify_stat : NULL,
|
||||
};
|
||||
|
||||
static const struct ib_device_ops hw_switchdev_vport_op = {
|
||||
.alloc_hw_port_stats = mlx5_ib_alloc_hw_port_stats,
|
||||
};
|
||||
|
||||
static const struct ib_device_ops hw_switchdev_stats_ops = {
|
||||
.alloc_hw_device_stats = mlx5_ib_alloc_hw_device_stats,
|
||||
.get_hw_stats = mlx5_ib_get_hw_stats,
|
||||
|
@ -914,9 +1025,11 @@ int mlx5_ib_counters_init(struct mlx5_ib_dev *dev)
|
|||
if (!MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
|
||||
return 0;
|
||||
|
||||
if (is_mdev_switchdev_mode(dev->mdev))
|
||||
if (is_mdev_switchdev_mode(dev->mdev)) {
|
||||
ib_set_device_ops(&dev->ib_dev, &hw_switchdev_stats_ops);
|
||||
else
|
||||
if (vport_qcounters_supported(dev))
|
||||
ib_set_device_ops(&dev->ib_dev, &hw_switchdev_vport_op);
|
||||
} else
|
||||
ib_set_device_ops(&dev->ib_dev, &hw_stats_ops);
|
||||
return mlx5_ib_alloc_counters(dev);
|
||||
}
|
||||
|
|
|
@ -666,7 +666,21 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
|
|||
obj_id;
|
||||
|
||||
case MLX5_IB_OBJECT_DEVX_OBJ:
|
||||
return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
|
||||
{
|
||||
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
|
||||
struct devx_obj *devx_uobj = uobj->object;
|
||||
|
||||
if (opcode == MLX5_CMD_OP_QUERY_FLOW_COUNTER &&
|
||||
devx_uobj->flow_counter_bulk_size) {
|
||||
u64 end;
|
||||
|
||||
end = devx_uobj->obj_id +
|
||||
devx_uobj->flow_counter_bulk_size;
|
||||
return devx_uobj->obj_id <= obj_id && end > obj_id;
|
||||
}
|
||||
|
||||
return devx_uobj->obj_id == obj_id;
|
||||
}
|
||||
|
||||
default:
|
||||
return false;
|
||||
|
@ -1517,10 +1531,17 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
|
|||
goto obj_free;
|
||||
|
||||
if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
|
||||
u8 bulk = MLX5_GET(alloc_flow_counter_in,
|
||||
cmd_in,
|
||||
flow_counter_bulk);
|
||||
obj->flow_counter_bulk_size = 128UL * bulk;
|
||||
u32 bulk = MLX5_GET(alloc_flow_counter_in,
|
||||
cmd_in,
|
||||
flow_counter_bulk_log_size);
|
||||
|
||||
if (bulk)
|
||||
bulk = 1 << bulk;
|
||||
else
|
||||
bulk = 128UL * MLX5_GET(alloc_flow_counter_in,
|
||||
cmd_in,
|
||||
flow_counter_bulk);
|
||||
obj->flow_counter_bulk_size = bulk;
|
||||
}
|
||||
|
||||
uobj->object = obj;
|
||||
|
@ -1993,7 +2014,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
|
|||
int redirect_fd;
|
||||
bool use_eventfd = false;
|
||||
int num_events;
|
||||
int num_alloc_xa_entries = 0;
|
||||
u16 obj_type = 0;
|
||||
u64 cookie = 0;
|
||||
u32 obj_id = 0;
|
||||
|
@ -2075,7 +2095,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
|
|||
if (err)
|
||||
goto err;
|
||||
|
||||
num_alloc_xa_entries++;
|
||||
event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
|
||||
if (!event_sub) {
|
||||
err = -ENOMEM;
|
||||
|
|
|
@ -67,11 +67,14 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
|
|||
MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
|
||||
MLX5_SET(mkc, mkc, lr, 1);
|
||||
|
||||
if ((acc & IB_ACCESS_RELAXED_ORDERING) &&
|
||||
pcie_relaxed_ordering_enabled(dev->mdev->pdev)) {
|
||||
if (acc & IB_ACCESS_RELAXED_ORDERING) {
|
||||
if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
|
||||
MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
|
||||
if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
|
||||
(MLX5_CAP_GEN(dev->mdev,
|
||||
relaxed_ordering_read_pci_enabled) &&
|
||||
pcie_relaxed_ordering_enabled(dev->mdev->pdev)))
|
||||
MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
|
||||
}
|
||||
|
||||
|
@ -791,7 +794,8 @@ static int get_unchangeable_access_flags(struct mlx5_ib_dev *dev,
|
|||
ret |= IB_ACCESS_RELAXED_ORDERING;
|
||||
|
||||
if ((access_flags & IB_ACCESS_RELAXED_ORDERING) &&
|
||||
MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
|
||||
(MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
|
||||
MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) &&
|
||||
!MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
|
||||
ret |= IB_ACCESS_RELAXED_ORDERING;
|
||||
|
||||
|
|
|
@ -60,6 +60,10 @@ enum raw_qp_set_mask_map {
|
|||
MLX5_RAW_QP_RATE_LIMIT = 1UL << 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_QP_RM_GO_BACK_N = 0x1,
|
||||
};
|
||||
|
||||
struct mlx5_modify_raw_qp_param {
|
||||
u16 operation;
|
||||
|
||||
|
@ -2519,6 +2523,10 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|||
if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO)
|
||||
MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
|
||||
|
||||
if (qp->flags & IB_QP_CREATE_INTEGRITY_EN &&
|
||||
MLX5_CAP_GEN(mdev, go_back_n))
|
||||
MLX5_SET(qpc, qpc, retry_mode, MLX5_QP_RM_GO_BACK_N);
|
||||
|
||||
err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
|
||||
kvfree(in);
|
||||
if (err)
|
||||
|
@ -2846,9 +2854,9 @@ static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
|
|||
case MLX5_QP_FLAG_SCATTER_CQE:
|
||||
case MLX5_QP_FLAG_ALLOW_SCATTER_CQE:
|
||||
/*
|
||||
* We don't return error if these flags were provided,
|
||||
* and mlx5 doesn't have right capability.
|
||||
*/
|
||||
* We don't return error if these flags were provided,
|
||||
* and mlx5 doesn't have right capability.
|
||||
*/
|
||||
*flags &= ~(MLX5_QP_FLAG_SCATTER_CQE |
|
||||
MLX5_QP_FLAG_ALLOW_SCATTER_CQE);
|
||||
return;
|
||||
|
@ -4485,7 +4493,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
return -EINVAL;
|
||||
|
||||
if (attr->port_num == 0 ||
|
||||
attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) {
|
||||
attr->port_num > dev->num_ports) {
|
||||
mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
|
||||
attr->port_num, dev->num_ports);
|
||||
return -EINVAL;
|
||||
|
@ -5592,8 +5600,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
|
|||
if (wq_attr->flags_mask & IB_WQ_FLAGS_CVLAN_STRIPPING) {
|
||||
if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
|
||||
MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
|
||||
mlx5_ib_dbg(dev, "VLAN offloads are not "
|
||||
"supported\n");
|
||||
mlx5_ib_dbg(dev, "VLAN offloads are not supported\n");
|
||||
err = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -380,6 +380,10 @@ static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
|
|||
struct mlx5_mkey_seg *seg,
|
||||
unsigned int access_flags)
|
||||
{
|
||||
bool ro_read = (access_flags & IB_ACCESS_RELAXED_ORDERING) &&
|
||||
(MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
|
||||
pcie_relaxed_ordering_enabled(dev->mdev->pdev));
|
||||
|
||||
MLX5_SET(mkc, seg, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
|
||||
MLX5_SET(mkc, seg, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
|
||||
MLX5_SET(mkc, seg, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
|
||||
|
@ -387,8 +391,7 @@ static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
|
|||
MLX5_SET(mkc, seg, lr, 1);
|
||||
MLX5_SET(mkc, seg, relaxed_ordering_write,
|
||||
!!(access_flags & IB_ACCESS_RELAXED_ORDERING));
|
||||
MLX5_SET(mkc, seg, relaxed_ordering_read,
|
||||
!!(access_flags & IB_ACCESS_RELAXED_ORDERING));
|
||||
MLX5_SET(mkc, seg, relaxed_ordering_read, ro_read);
|
||||
}
|
||||
|
||||
int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
|
||||
|
|
|
@ -62,7 +62,8 @@ static inline bool mlx5r_umr_can_reconfig(struct mlx5_ib_dev *dev,
|
|||
return false;
|
||||
|
||||
if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
|
||||
MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
|
||||
(MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
|
||||
MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) &&
|
||||
!MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
|
||||
return false;
|
||||
|
||||
|
|
|
@ -1589,7 +1589,6 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
|
|||
{
|
||||
unsigned long cq_flags;
|
||||
unsigned long flags;
|
||||
int discard_cnt = 0;
|
||||
u32 cur_getp, stop_getp;
|
||||
struct ocrdma_cqe *cqe;
|
||||
u32 qpn = 0, wqe_idx = 0;
|
||||
|
@ -1641,7 +1640,6 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
|
|||
/* mark cqe discarded so that it is not picked up later
|
||||
* in the poll_cq().
|
||||
*/
|
||||
discard_cnt += 1;
|
||||
cqe->cmn.qpn = 0;
|
||||
skip_cqe:
|
||||
cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
|
||||
|
|
|
@ -484,7 +484,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
|
|||
const struct qib_tid_info *ti)
|
||||
{
|
||||
int ret = 0;
|
||||
u32 tid, ctxttid, cnt, limit, tidcnt;
|
||||
u32 tid, ctxttid, limit, tidcnt;
|
||||
struct qib_devdata *dd = rcd->dd;
|
||||
u64 __iomem *tidbase;
|
||||
unsigned long tidmap[8];
|
||||
|
@ -520,7 +520,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
|
|||
/* just in case size changes in future */
|
||||
limit = tidcnt;
|
||||
tid = find_first_bit(tidmap, limit);
|
||||
for (cnt = 0; tid < limit; tid++) {
|
||||
for (; tid < limit; tid++) {
|
||||
/*
|
||||
* small optimization; if we detect a run of 3 or so without
|
||||
* any set, use find_first_bit again. That's mainly to
|
||||
|
@ -530,7 +530,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
|
|||
*/
|
||||
if (!test_bit(tid, tidmap))
|
||||
continue;
|
||||
cnt++;
|
||||
|
||||
if (dd->pageshadow[ctxttid + tid]) {
|
||||
struct page *p;
|
||||
dma_addr_t phys;
|
||||
|
@ -1768,7 +1768,7 @@ static void unlock_expected_tids(struct qib_ctxtdata *rcd)
|
|||
{
|
||||
struct qib_devdata *dd = rcd->dd;
|
||||
int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
|
||||
int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
|
||||
int i, maxtid = ctxt_tidbase + dd->rcvtidcnt;
|
||||
|
||||
for (i = ctxt_tidbase; i < maxtid; i++) {
|
||||
struct page *p = dd->pageshadow[i];
|
||||
|
@ -1783,7 +1783,6 @@ static void unlock_expected_tids(struct qib_ctxtdata *rcd)
|
|||
dma_unmap_page(&dd->pcidev->dev, phys, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
qib_release_user_pages(&p, 1);
|
||||
cnt++;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,6 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/aer.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "qib.h"
|
||||
|
@ -105,13 +104,6 @@ int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
ret = pci_enable_pcie_error_reporting(pdev);
|
||||
if (ret) {
|
||||
qib_early_err(&pdev->dev,
|
||||
"Unable to enable pcie error reporting: %d\n",
|
||||
ret);
|
||||
ret = 0;
|
||||
}
|
||||
goto done;
|
||||
|
||||
bail:
|
||||
|
|
|
@ -320,7 +320,6 @@ static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
|
|||
unpin_user_page(page);
|
||||
} else {
|
||||
/* coalesce case */
|
||||
kunmap(page);
|
||||
__free_page(page);
|
||||
}
|
||||
ret = -ENOMEM;
|
||||
|
@ -572,7 +571,7 @@ static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
|
|||
goto done;
|
||||
}
|
||||
|
||||
mpage = kmap(page);
|
||||
mpage = page_address(page);
|
||||
mpage_save = mpage;
|
||||
for (i = 0; i < niov; i++) {
|
||||
int cfur;
|
||||
|
@ -581,7 +580,7 @@ static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
|
|||
iov[i].iov_base, iov[i].iov_len);
|
||||
if (cfur) {
|
||||
ret = -EFAULT;
|
||||
goto free_unmap;
|
||||
goto page_free;
|
||||
}
|
||||
|
||||
mpage += iov[i].iov_len;
|
||||
|
@ -592,8 +591,7 @@ static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
|
|||
page, 0, 0, len, mpage_save);
|
||||
goto done;
|
||||
|
||||
free_unmap:
|
||||
kunmap(page);
|
||||
page_free:
|
||||
__free_page(page);
|
||||
done:
|
||||
return ret;
|
||||
|
@ -627,9 +625,6 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
|
|||
pkt->addr[i].dma_length,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (pkt->addr[i].kvaddr)
|
||||
kunmap(pkt->addr[i].page);
|
||||
|
||||
if (pkt->addr[i].put_page)
|
||||
unpin_user_page(pkt->addr[i].page);
|
||||
else
|
||||
|
|
|
@ -602,7 +602,6 @@ out_clean_vnic:
|
|||
usnic_vnic_free(vf->vnic);
|
||||
out_release_regions:
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
pci_clear_master(pdev);
|
||||
pci_release_regions(pdev);
|
||||
out_disable_device:
|
||||
pci_disable_device(pdev);
|
||||
|
@ -623,7 +622,6 @@ static void usnic_ib_pci_remove(struct pci_dev *pdev)
|
|||
kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf);
|
||||
usnic_vnic_free(vf->vnic);
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
pci_clear_master(pdev);
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
kfree(vf);
|
||||
|
|
|
@ -464,8 +464,6 @@ void rvt_qp_exit(struct rvt_dev_info *rdi)
|
|||
if (qps_inuse)
|
||||
rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
|
||||
qps_inuse);
|
||||
if (!rdi->qp_dev)
|
||||
return;
|
||||
|
||||
kfree(rdi->qp_dev->qp_table);
|
||||
free_qpn_table(&rdi->qp_dev->qpn_table);
|
||||
|
@ -2040,7 +2038,7 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
|
|||
wqe = rvt_get_swqe_ptr(qp, qp->s_head);
|
||||
|
||||
/* cplen has length from above */
|
||||
memcpy(&wqe->wr, wr, cplen);
|
||||
memcpy(&wqe->ud_wr, wr, cplen);
|
||||
|
||||
wqe->length = 0;
|
||||
j = 0;
|
||||
|
|
|
@ -160,6 +160,8 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
|
|||
|
||||
port->attr.active_mtu = mtu;
|
||||
port->mtu_cap = ib_mtu_enum_to_int(mtu);
|
||||
|
||||
rxe_info_dev(rxe, "Set mtu to %d", port->mtu_cap);
|
||||
}
|
||||
|
||||
/* called by ifc layer to create new rxe device.
|
||||
|
@ -175,26 +177,26 @@ int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name)
|
|||
|
||||
static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
|
||||
{
|
||||
struct rxe_dev *exists;
|
||||
struct rxe_dev *rxe;
|
||||
int err = 0;
|
||||
|
||||
if (is_vlan_dev(ndev)) {
|
||||
pr_err("rxe creation allowed on top of a real device only\n");
|
||||
rxe_err("rxe creation allowed on top of a real device only");
|
||||
err = -EPERM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
exists = rxe_get_dev_from_net(ndev);
|
||||
if (exists) {
|
||||
ib_device_put(&exists->ib_dev);
|
||||
rxe_dbg(exists, "already configured on %s\n", ndev->name);
|
||||
rxe = rxe_get_dev_from_net(ndev);
|
||||
if (rxe) {
|
||||
ib_device_put(&rxe->ib_dev);
|
||||
rxe_err_dev(rxe, "already configured on %s", ndev->name);
|
||||
err = -EEXIST;
|
||||
goto err;
|
||||
}
|
||||
|
||||
err = rxe_net_add(ibdev_name, ndev);
|
||||
if (err) {
|
||||
rxe_dbg(exists, "failed to add %s\n", ndev->name);
|
||||
rxe_err("failed to add %s\n", ndev->name);
|
||||
goto err;
|
||||
}
|
||||
err:
|
||||
|
|
|
@ -38,7 +38,8 @@
|
|||
|
||||
#define RXE_ROCE_V2_SPORT (0xc000)
|
||||
|
||||
#define rxe_dbg(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \
|
||||
#define rxe_dbg(fmt, ...) pr_debug("%s: " fmt "\n", __func__, ##__VA_ARGS__)
|
||||
#define rxe_dbg_dev(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \
|
||||
"%s: " fmt, __func__, ##__VA_ARGS__)
|
||||
#define rxe_dbg_uc(uc, fmt, ...) ibdev_dbg((uc)->ibuc.device, \
|
||||
"uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
|
||||
|
@ -57,6 +58,48 @@
|
|||
#define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \
|
||||
"mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
|
||||
|
||||
#define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt "\n", __func__, \
|
||||
##__VA_ARGS__)
|
||||
#define rxe_err_dev(rxe, fmt, ...) ibdev_err_ratelimited(&(rxe)->ib_dev, \
|
||||
"%s: " fmt, __func__, ##__VA_ARGS__)
|
||||
#define rxe_err_uc(uc, fmt, ...) ibdev_err_ratelimited((uc)->ibuc.device, \
|
||||
"uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
|
||||
#define rxe_err_pd(pd, fmt, ...) ibdev_err_ratelimited((pd)->ibpd.device, \
|
||||
"pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__)
|
||||
#define rxe_err_ah(ah, fmt, ...) ibdev_err_ratelimited((ah)->ibah.device, \
|
||||
"ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__)
|
||||
#define rxe_err_srq(srq, fmt, ...) ibdev_err_ratelimited((srq)->ibsrq.device, \
|
||||
"srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
|
||||
#define rxe_err_qp(qp, fmt, ...) ibdev_err_ratelimited((qp)->ibqp.device, \
|
||||
"qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__)
|
||||
#define rxe_err_cq(cq, fmt, ...) ibdev_err_ratelimited((cq)->ibcq.device, \
|
||||
"cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
|
||||
#define rxe_err_mr(mr, fmt, ...) ibdev_err_ratelimited((mr)->ibmr.device, \
|
||||
"mr#%d %s: " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__)
|
||||
#define rxe_err_mw(mw, fmt, ...) ibdev_err_ratelimited((mw)->ibmw.device, \
|
||||
"mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
|
||||
|
||||
#define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt "\n", __func__, \
|
||||
##__VA_ARGS__)
|
||||
#define rxe_info_dev(rxe, fmt, ...) ibdev_info_ratelimited(&(rxe)->ib_dev, \
|
||||
"%s: " fmt, __func__, ##__VA_ARGS__)
|
||||
#define rxe_info_uc(uc, fmt, ...) ibdev_info_ratelimited((uc)->ibuc.device, \
|
||||
"uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
|
||||
#define rxe_info_pd(pd, fmt, ...) ibdev_info_ratelimited((pd)->ibpd.device, \
|
||||
"pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__)
|
||||
#define rxe_info_ah(ah, fmt, ...) ibdev_info_ratelimited((ah)->ibah.device, \
|
||||
"ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__)
|
||||
#define rxe_info_srq(srq, fmt, ...) ibdev_info_ratelimited((srq)->ibsrq.device, \
|
||||
"srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
|
||||
#define rxe_info_qp(qp, fmt, ...) ibdev_info_ratelimited((qp)->ibqp.device, \
|
||||
"qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__)
|
||||
#define rxe_info_cq(cq, fmt, ...) ibdev_info_ratelimited((cq)->ibcq.device, \
|
||||
"cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
|
||||
#define rxe_info_mr(mr, fmt, ...) ibdev_info_ratelimited((mr)->ibmr.device, \
|
||||
"mr#%d %s: " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__)
|
||||
#define rxe_info_mw(mw, fmt, ...) ibdev_info_ratelimited((mw)->ibmw.device, \
|
||||
"mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
|
||||
|
||||
/* responder states */
|
||||
enum resp_states {
|
||||
RESPST_NONE,
|
||||
|
@ -90,7 +133,6 @@ enum resp_states {
|
|||
RESPST_ERR_LENGTH,
|
||||
RESPST_ERR_CQ_OVERFLOW,
|
||||
RESPST_ERROR,
|
||||
RESPST_RESET,
|
||||
RESPST_DONE,
|
||||
RESPST_EXIT,
|
||||
};
|
||||
|
|
|
@ -118,10 +118,12 @@ void retransmit_timer(struct timer_list *t)
|
|||
|
||||
rxe_dbg_qp(qp, "retransmit timer fired\n");
|
||||
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
if (qp->valid) {
|
||||
qp->comp.timeout = 1;
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
}
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
}
|
||||
|
||||
void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
|
||||
|
@ -322,7 +324,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
|
|||
qp->comp.psn = pkt->psn;
|
||||
if (qp->req.wait_psn) {
|
||||
qp->req.wait_psn = 0;
|
||||
rxe_run_task(&qp->req.task);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
}
|
||||
}
|
||||
return COMPST_ERROR_RETRY;
|
||||
|
@ -428,6 +430,10 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
|||
uwc->wc_flags = IB_WC_WITH_IMM;
|
||||
uwc->byte_len = wqe->dma.length;
|
||||
}
|
||||
} else {
|
||||
if (wqe->status != IB_WC_WR_FLUSH_ERR)
|
||||
rxe_err_qp(qp, "non-flush error status = %d",
|
||||
wqe->status);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -469,10 +475,33 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
|||
*/
|
||||
if (qp->req.wait_fence) {
|
||||
qp->req.wait_fence = 0;
|
||||
rxe_run_task(&qp->req.task);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
}
|
||||
}
|
||||
|
||||
static void comp_check_sq_drain_done(struct rxe_qp *qp)
|
||||
{
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
|
||||
if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) {
|
||||
qp->attr.sq_draining = 0;
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
|
||||
if (qp->ibqp.event_handler) {
|
||||
struct ib_event ev;
|
||||
|
||||
ev.device = qp->ibqp.device;
|
||||
ev.element.qp = &qp->ibqp;
|
||||
ev.event = IB_EVENT_SQ_DRAINED;
|
||||
qp->ibqp.event_handler(&ev,
|
||||
qp->ibqp.qp_context);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
}
|
||||
|
||||
static inline enum comp_state complete_ack(struct rxe_qp *qp,
|
||||
struct rxe_pkt_info *pkt,
|
||||
struct rxe_send_wqe *wqe)
|
||||
|
@ -483,31 +512,11 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
|
|||
if (qp->req.need_rd_atomic) {
|
||||
qp->comp.timeout_retry = 0;
|
||||
qp->req.need_rd_atomic = 0;
|
||||
rxe_run_task(&qp->req.task);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
|
||||
/* state_lock used by requester & completer */
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
if ((qp->req.state == QP_STATE_DRAIN) &&
|
||||
(qp->comp.psn == qp->req.psn)) {
|
||||
qp->req.state = QP_STATE_DRAINED;
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
|
||||
if (qp->ibqp.event_handler) {
|
||||
struct ib_event ev;
|
||||
|
||||
ev.device = qp->ibqp.device;
|
||||
ev.element.qp = &qp->ibqp;
|
||||
ev.event = IB_EVENT_SQ_DRAINED;
|
||||
qp->ibqp.event_handler(&ev,
|
||||
qp->ibqp.qp_context);
|
||||
}
|
||||
} else {
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
}
|
||||
}
|
||||
comp_check_sq_drain_done(qp);
|
||||
|
||||
do_complete(qp, wqe);
|
||||
|
||||
|
@ -538,25 +547,60 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
|
|||
return COMPST_GET_WQE;
|
||||
}
|
||||
|
||||
static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
|
||||
/* drain incoming response packet queue */
|
||||
static void drain_resp_pkts(struct rxe_qp *qp)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct rxe_send_wqe *wqe;
|
||||
struct rxe_queue *q = qp->sq.queue;
|
||||
|
||||
while ((skb = skb_dequeue(&qp->resp_pkts))) {
|
||||
rxe_put(qp);
|
||||
kfree_skb(skb);
|
||||
ib_device_put(qp->ibqp.device);
|
||||
}
|
||||
}
|
||||
|
||||
/* complete send wqe with flush error */
|
||||
static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
||||
{
|
||||
struct rxe_cqe cqe = {};
|
||||
struct ib_wc *wc = &cqe.ibwc;
|
||||
struct ib_uverbs_wc *uwc = &cqe.uibwc;
|
||||
int err;
|
||||
|
||||
if (qp->is_user) {
|
||||
uwc->wr_id = wqe->wr.wr_id;
|
||||
uwc->status = IB_WC_WR_FLUSH_ERR;
|
||||
uwc->qp_num = qp->ibqp.qp_num;
|
||||
} else {
|
||||
wc->wr_id = wqe->wr.wr_id;
|
||||
wc->status = IB_WC_WR_FLUSH_ERR;
|
||||
wc->qp = &qp->ibqp;
|
||||
}
|
||||
|
||||
err = rxe_cq_post(qp->scq, &cqe, 0);
|
||||
if (err)
|
||||
rxe_dbg_cq(qp->scq, "post cq failed, err = %d", err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* drain and optionally complete the send queue
|
||||
* if unable to complete a wqe, i.e. cq is full, stop
|
||||
* completing and flush the remaining wqes
|
||||
*/
|
||||
static void flush_send_queue(struct rxe_qp *qp, bool notify)
|
||||
{
|
||||
struct rxe_send_wqe *wqe;
|
||||
struct rxe_queue *q = qp->sq.queue;
|
||||
int err;
|
||||
|
||||
while ((wqe = queue_head(q, q->type))) {
|
||||
if (notify) {
|
||||
wqe->status = IB_WC_WR_FLUSH_ERR;
|
||||
do_complete(qp, wqe);
|
||||
} else {
|
||||
queue_advance_consumer(q, q->type);
|
||||
err = flush_send_wqe(qp, wqe);
|
||||
if (err)
|
||||
notify = 0;
|
||||
}
|
||||
queue_advance_consumer(q, q->type);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -571,9 +615,28 @@ static void free_pkt(struct rxe_pkt_info *pkt)
|
|||
ib_device_put(dev);
|
||||
}
|
||||
|
||||
int rxe_completer(void *arg)
|
||||
/* reset the retry timer if
|
||||
* - QP is type RC
|
||||
* - there is a packet sent by the requester that
|
||||
* might be acked (we still might get spurious
|
||||
* timeouts but try to keep them as few as possible)
|
||||
* - the timeout parameter is set
|
||||
* - the QP is alive
|
||||
*/
|
||||
static void reset_retry_timer(struct rxe_qp *qp)
|
||||
{
|
||||
if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) {
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
if (qp_state(qp) >= IB_QPS_RTS &&
|
||||
psn_compare(qp->req.psn, qp->comp.psn) > 0)
|
||||
mod_timer(&qp->retrans_timer,
|
||||
jiffies + qp->qp_timeout_jiffies);
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
}
|
||||
}
|
||||
|
||||
int rxe_completer(struct rxe_qp *qp)
|
||||
{
|
||||
struct rxe_qp *qp = (struct rxe_qp *)arg;
|
||||
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
|
||||
struct rxe_send_wqe *wqe = NULL;
|
||||
struct sk_buff *skb = NULL;
|
||||
|
@ -581,15 +644,17 @@ int rxe_completer(void *arg)
|
|||
enum comp_state state;
|
||||
int ret;
|
||||
|
||||
if (!rxe_get(qp))
|
||||
return -EAGAIN;
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
|
||||
qp_state(qp) == IB_QPS_RESET) {
|
||||
bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
|
||||
|
||||
if (!qp->valid || qp->comp.state == QP_STATE_ERROR ||
|
||||
qp->comp.state == QP_STATE_RESET) {
|
||||
rxe_drain_resp_pkts(qp, qp->valid &&
|
||||
qp->comp.state == QP_STATE_ERROR);
|
||||
drain_resp_pkts(qp);
|
||||
flush_send_queue(qp, notify);
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
goto exit;
|
||||
}
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
|
||||
if (qp->comp.timeout) {
|
||||
qp->comp.timeout_retry = 1;
|
||||
|
@ -677,20 +742,7 @@ int rxe_completer(void *arg)
|
|||
break;
|
||||
}
|
||||
|
||||
/* re reset the timeout counter if
|
||||
* (1) QP is type RC
|
||||
* (2) the QP is alive
|
||||
* (3) there is a packet sent by the requester that
|
||||
* might be acked (we still might get spurious
|
||||
* timeouts but try to keep them as few as possible)
|
||||
* (4) the timeout parameter is set
|
||||
*/
|
||||
if ((qp_type(qp) == IB_QPT_RC) &&
|
||||
(qp->req.state == QP_STATE_READY) &&
|
||||
(psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
|
||||
qp->qp_timeout_jiffies)
|
||||
mod_timer(&qp->retrans_timer,
|
||||
jiffies + qp->qp_timeout_jiffies);
|
||||
reset_retry_timer(qp);
|
||||
goto exit;
|
||||
|
||||
case COMPST_ERROR_RETRY:
|
||||
|
@ -730,7 +782,7 @@ int rxe_completer(void *arg)
|
|||
RXE_CNT_COMP_RETRY);
|
||||
qp->req.need_retry = 1;
|
||||
qp->comp.started_retry = 1;
|
||||
rxe_run_task(&qp->req.task);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
}
|
||||
goto done;
|
||||
|
||||
|
@ -752,6 +804,7 @@ int rxe_completer(void *arg)
|
|||
*/
|
||||
qp->req.wait_for_rnr_timer = 1;
|
||||
rxe_dbg_qp(qp, "set rnr nak timer\n");
|
||||
// TODO who protects from destroy_qp??
|
||||
mod_timer(&qp->rnr_nak_timer,
|
||||
jiffies + rnrnak_jiffies(aeth_syn(pkt)
|
||||
& ~AETH_TYPE_MASK));
|
||||
|
@ -784,7 +837,5 @@ exit:
|
|||
out:
|
||||
if (pkt)
|
||||
free_pkt(pkt);
|
||||
rxe_put(qp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -14,12 +14,12 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
|
|||
int count;
|
||||
|
||||
if (cqe <= 0) {
|
||||
rxe_dbg(rxe, "cqe(%d) <= 0\n", cqe);
|
||||
rxe_dbg_dev(rxe, "cqe(%d) <= 0\n", cqe);
|
||||
goto err1;
|
||||
}
|
||||
|
||||
if (cqe > rxe->attr.max_cqe) {
|
||||
rxe_dbg(rxe, "cqe(%d) > max_cqe(%d)\n",
|
||||
rxe_dbg_dev(rxe, "cqe(%d) > max_cqe(%d)\n",
|
||||
cqe, rxe->attr.max_cqe);
|
||||
goto err1;
|
||||
}
|
||||
|
@ -39,21 +39,6 @@ err1:
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void rxe_send_complete(struct tasklet_struct *t)
|
||||
{
|
||||
struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cq->cq_lock, flags);
|
||||
if (cq->is_dying) {
|
||||
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
||||
|
||||
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
|
||||
}
|
||||
|
||||
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
||||
int comp_vector, struct ib_udata *udata,
|
||||
struct rxe_create_cq_resp __user *uresp)
|
||||
|
@ -65,7 +50,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
|||
cq->queue = rxe_queue_init(rxe, &cqe,
|
||||
sizeof(struct rxe_cqe), type);
|
||||
if (!cq->queue) {
|
||||
rxe_dbg(rxe, "unable to create cq\n");
|
||||
rxe_dbg_dev(rxe, "unable to create cq\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -79,10 +64,6 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
|||
|
||||
cq->is_user = uresp;
|
||||
|
||||
cq->is_dying = false;
|
||||
|
||||
tasklet_setup(&cq->comp_task, rxe_send_complete);
|
||||
|
||||
spin_lock_init(&cq->cq_lock);
|
||||
cq->ibcq.cqe = cqe;
|
||||
return 0;
|
||||
|
@ -103,6 +84,7 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
|
|||
return err;
|
||||
}
|
||||
|
||||
/* caller holds reference to cq */
|
||||
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
|
||||
{
|
||||
struct ib_event ev;
|
||||
|
@ -114,6 +96,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
|
|||
|
||||
full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
|
||||
if (unlikely(full)) {
|
||||
rxe_err_cq(cq, "queue full");
|
||||
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
||||
if (cq->ibcq.event_handler) {
|
||||
ev.device = cq->ibcq.device;
|
||||
|
@ -135,21 +118,13 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
|
|||
if ((cq->notify == IB_CQ_NEXT_COMP) ||
|
||||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
|
||||
cq->notify = 0;
|
||||
tasklet_schedule(&cq->comp_task);
|
||||
|
||||
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rxe_cq_disable(struct rxe_cq *cq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cq->cq_lock, flags);
|
||||
cq->is_dying = true;
|
||||
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
||||
}
|
||||
|
||||
void rxe_cq_cleanup(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_cq *cq = container_of(elem, typeof(*cq), elem);
|
||||
|
|
|
@ -21,7 +21,7 @@ int rxe_icrc_init(struct rxe_dev *rxe)
|
|||
|
||||
tfm = crypto_alloc_shash("crc32", 0, 0);
|
||||
if (IS_ERR(tfm)) {
|
||||
rxe_dbg(rxe, "failed to init crc32 algorithm err: %ld\n",
|
||||
rxe_dbg_dev(rxe, "failed to init crc32 algorithm err: %ld\n",
|
||||
PTR_ERR(tfm));
|
||||
return PTR_ERR(tfm);
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ static __be32 rxe_crc32(struct rxe_dev *rxe, __be32 crc, void *next, size_t len)
|
|||
*(__be32 *)shash_desc_ctx(shash) = crc;
|
||||
err = crypto_shash_update(shash, next, len);
|
||||
if (unlikely(err)) {
|
||||
rxe_dbg(rxe, "failed crc calculation, err: %d\n", err);
|
||||
rxe_dbg_dev(rxe, "failed crc calculation, err: %d\n", err);
|
||||
return (__force __be32)crc32_le((__force u32)crc, next, len);
|
||||
}
|
||||
|
||||
|
|
|
@ -80,7 +80,6 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
|
|||
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
|
||||
int rxe_invalidate_mr(struct rxe_qp *qp, u32 key);
|
||||
int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
|
||||
int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
|
||||
void rxe_mr_cleanup(struct rxe_pool_elem *elem);
|
||||
|
||||
/* rxe_mw.c */
|
||||
|
@ -171,9 +170,9 @@ void rxe_srq_cleanup(struct rxe_pool_elem *elem);
|
|||
|
||||
void rxe_dealloc(struct ib_device *ib_dev);
|
||||
|
||||
int rxe_completer(void *arg);
|
||||
int rxe_requester(void *arg);
|
||||
int rxe_responder(void *arg);
|
||||
int rxe_completer(struct rxe_qp *qp);
|
||||
int rxe_requester(struct rxe_qp *qp);
|
||||
int rxe_responder(struct rxe_qp *qp);
|
||||
|
||||
/* rxe_icrc.c */
|
||||
int rxe_icrc_init(struct rxe_dev *rxe);
|
||||
|
|
|
@ -79,7 +79,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
|||
|
||||
/* Don't allow a mmap larger than the object. */
|
||||
if (size > ip->info.size) {
|
||||
rxe_dbg(rxe, "mmap region is larger than the object!\n");
|
||||
rxe_dbg_dev(rxe, "mmap region is larger than the object!\n");
|
||||
spin_unlock_bh(&rxe->pending_lock);
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
|
@ -87,7 +87,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
|||
|
||||
goto found_it;
|
||||
}
|
||||
rxe_dbg(rxe, "unable to find pending mmap info\n");
|
||||
rxe_dbg_dev(rxe, "unable to find pending mmap info\n");
|
||||
spin_unlock_bh(&rxe->pending_lock);
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
|
@ -98,7 +98,7 @@ found_it:
|
|||
|
||||
ret = remap_vmalloc_range(vma, ip->obj, 0);
|
||||
if (ret) {
|
||||
rxe_dbg(rxe, "err %d from remap_vmalloc_range\n", ret);
|
||||
rxe_dbg_dev(rxe, "err %d from remap_vmalloc_range\n", ret);
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
|
|
@ -210,10 +210,10 @@ err1:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int rxe_set_page(struct ib_mr *ibmr, u64 iova)
|
||||
static int rxe_set_page(struct ib_mr *ibmr, u64 dma_addr)
|
||||
{
|
||||
struct rxe_mr *mr = to_rmr(ibmr);
|
||||
struct page *page = virt_to_page(iova & mr->page_mask);
|
||||
struct page *page = ib_virt_dma_to_page(dma_addr);
|
||||
bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
|
||||
int err;
|
||||
|
||||
|
@ -279,16 +279,16 @@ static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr,
|
||||
static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 dma_addr, void *addr,
|
||||
unsigned int length, enum rxe_mr_copy_dir dir)
|
||||
{
|
||||
unsigned int page_offset = iova & (PAGE_SIZE - 1);
|
||||
unsigned int page_offset = dma_addr & (PAGE_SIZE - 1);
|
||||
unsigned int bytes;
|
||||
struct page *page;
|
||||
u8 *va;
|
||||
|
||||
while (length) {
|
||||
page = virt_to_page(iova & mr->page_mask);
|
||||
page = ib_virt_dma_to_page(dma_addr);
|
||||
bytes = min_t(unsigned int, length,
|
||||
PAGE_SIZE - page_offset);
|
||||
va = kmap_local_page(page);
|
||||
|
@ -300,7 +300,7 @@ static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr,
|
|||
|
||||
kunmap_local(va);
|
||||
page_offset = 0;
|
||||
iova += bytes;
|
||||
dma_addr += bytes;
|
||||
addr += bytes;
|
||||
length -= bytes;
|
||||
}
|
||||
|
@ -488,7 +488,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
|
|||
|
||||
if (mr->ibmr.type == IB_MR_TYPE_DMA) {
|
||||
page_offset = iova & (PAGE_SIZE - 1);
|
||||
page = virt_to_page(iova & PAGE_MASK);
|
||||
page = ib_virt_dma_to_page(iova);
|
||||
} else {
|
||||
unsigned long index;
|
||||
int err;
|
||||
|
@ -545,7 +545,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
|
|||
|
||||
if (mr->ibmr.type == IB_MR_TYPE_DMA) {
|
||||
page_offset = iova & (PAGE_SIZE - 1);
|
||||
page = virt_to_page(iova & PAGE_MASK);
|
||||
page = ib_virt_dma_to_page(iova);
|
||||
} else {
|
||||
unsigned long index;
|
||||
int err;
|
||||
|
@ -722,19 +722,6 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
|
||||
{
|
||||
struct rxe_mr *mr = to_rmr(ibmr);
|
||||
|
||||
/* See IBA 10.6.7.2.6 */
|
||||
if (atomic_read(&mr->num_mw) > 0)
|
||||
return -EINVAL;
|
||||
|
||||
rxe_cleanup(mr);
|
||||
kfree_rcu(mr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rxe_mr_cleanup(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
|
||||
|
|
|
@ -413,11 +413,14 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
|
|||
int is_request = pkt->mask & RXE_REQ_MASK;
|
||||
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
|
||||
|
||||
if ((is_request && (qp->req.state != QP_STATE_READY)) ||
|
||||
(!is_request && (qp->resp.state != QP_STATE_READY))) {
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
if ((is_request && (qp_state(qp) < IB_QPS_RTS)) ||
|
||||
(!is_request && (qp_state(qp) < IB_QPS_RTR))) {
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n");
|
||||
goto drop;
|
||||
}
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
|
||||
rxe_icrc_generate(skb, pkt);
|
||||
|
||||
|
@ -596,7 +599,7 @@ static int rxe_notify(struct notifier_block *not_blk,
|
|||
rxe_port_down(rxe);
|
||||
break;
|
||||
case NETDEV_CHANGEMTU:
|
||||
rxe_dbg(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu);
|
||||
rxe_dbg_dev(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu);
|
||||
rxe_set_mtu(rxe, ndev->mtu);
|
||||
break;
|
||||
case NETDEV_CHANGE:
|
||||
|
@ -608,7 +611,7 @@ static int rxe_notify(struct notifier_block *not_blk,
|
|||
case NETDEV_CHANGENAME:
|
||||
case NETDEV_FEAT_CHANGE:
|
||||
default:
|
||||
rxe_dbg(rxe, "ignoring netdev event = %ld for %s\n",
|
||||
rxe_dbg_dev(rxe, "ignoring netdev event = %ld for %s\n",
|
||||
event, ndev->name);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -19,33 +19,33 @@ static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
|
|||
int has_srq)
|
||||
{
|
||||
if (cap->max_send_wr > rxe->attr.max_qp_wr) {
|
||||
rxe_dbg(rxe, "invalid send wr = %u > %d\n",
|
||||
rxe_dbg_dev(rxe, "invalid send wr = %u > %d\n",
|
||||
cap->max_send_wr, rxe->attr.max_qp_wr);
|
||||
goto err1;
|
||||
}
|
||||
|
||||
if (cap->max_send_sge > rxe->attr.max_send_sge) {
|
||||
rxe_dbg(rxe, "invalid send sge = %u > %d\n",
|
||||
rxe_dbg_dev(rxe, "invalid send sge = %u > %d\n",
|
||||
cap->max_send_sge, rxe->attr.max_send_sge);
|
||||
goto err1;
|
||||
}
|
||||
|
||||
if (!has_srq) {
|
||||
if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
|
||||
rxe_dbg(rxe, "invalid recv wr = %u > %d\n",
|
||||
rxe_dbg_dev(rxe, "invalid recv wr = %u > %d\n",
|
||||
cap->max_recv_wr, rxe->attr.max_qp_wr);
|
||||
goto err1;
|
||||
}
|
||||
|
||||
if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
|
||||
rxe_dbg(rxe, "invalid recv sge = %u > %d\n",
|
||||
rxe_dbg_dev(rxe, "invalid recv sge = %u > %d\n",
|
||||
cap->max_recv_sge, rxe->attr.max_recv_sge);
|
||||
goto err1;
|
||||
}
|
||||
}
|
||||
|
||||
if (cap->max_inline_data > rxe->max_inline_data) {
|
||||
rxe_dbg(rxe, "invalid max inline data = %u > %d\n",
|
||||
rxe_dbg_dev(rxe, "invalid max inline data = %u > %d\n",
|
||||
cap->max_inline_data, rxe->max_inline_data);
|
||||
goto err1;
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
|
|||
}
|
||||
|
||||
if (!init->recv_cq || !init->send_cq) {
|
||||
rxe_dbg(rxe, "missing cq\n");
|
||||
rxe_dbg_dev(rxe, "missing cq\n");
|
||||
goto err1;
|
||||
}
|
||||
|
||||
|
@ -82,14 +82,14 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
|
|||
|
||||
if (init->qp_type == IB_QPT_GSI) {
|
||||
if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
|
||||
rxe_dbg(rxe, "invalid port = %d\n", port_num);
|
||||
rxe_dbg_dev(rxe, "invalid port = %d\n", port_num);
|
||||
goto err1;
|
||||
}
|
||||
|
||||
port = &rxe->port;
|
||||
|
||||
if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
|
||||
rxe_dbg(rxe, "GSI QP exists for port %d\n", port_num);
|
||||
rxe_dbg_dev(rxe, "GSI QP exists for port %d\n", port_num);
|
||||
goto err1;
|
||||
}
|
||||
}
|
||||
|
@ -231,8 +231,6 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
|
|||
qp->req.wqe_index = queue_get_producer(qp->sq.queue,
|
||||
QUEUE_TYPE_FROM_CLIENT);
|
||||
|
||||
qp->req.state = QP_STATE_RESET;
|
||||
qp->comp.state = QP_STATE_RESET;
|
||||
qp->req.opcode = -1;
|
||||
qp->comp.opcode = -1;
|
||||
|
||||
|
@ -287,7 +285,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
|
|||
|
||||
qp->resp.opcode = OPCODE_NONE;
|
||||
qp->resp.msn = 0;
|
||||
qp->resp.state = QP_STATE_RESET;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -328,8 +325,10 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
|
|||
if (err)
|
||||
goto err2;
|
||||
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
qp->attr.qp_state = IB_QPS_RESET;
|
||||
qp->valid = 1;
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -380,30 +379,9 @@ int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* called by the modify qp verb, this routine checks all the parameters before
|
||||
* making any changes
|
||||
*/
|
||||
int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
struct ib_qp_attr *attr, int mask)
|
||||
{
|
||||
enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
|
||||
attr->cur_qp_state : qp->attr.qp_state;
|
||||
enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
|
||||
attr->qp_state : cur_state;
|
||||
|
||||
if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
|
||||
rxe_dbg_qp(qp, "invalid mask or state\n");
|
||||
goto err1;
|
||||
}
|
||||
|
||||
if (mask & IB_QP_STATE) {
|
||||
if (cur_state == IB_QPS_SQD) {
|
||||
if (qp->req.state == QP_STATE_DRAIN &&
|
||||
new_state != IB_QPS_ERR)
|
||||
goto err1;
|
||||
}
|
||||
}
|
||||
|
||||
if (mask & IB_QP_PORT) {
|
||||
if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
|
||||
rxe_dbg_qp(qp, "invalid port %d\n", attr->port_num);
|
||||
|
@ -473,29 +451,18 @@ static void rxe_qp_reset(struct rxe_qp *qp)
|
|||
{
|
||||
/* stop tasks from running */
|
||||
rxe_disable_task(&qp->resp.task);
|
||||
rxe_disable_task(&qp->comp.task);
|
||||
rxe_disable_task(&qp->req.task);
|
||||
|
||||
/* stop request/comp */
|
||||
if (qp->sq.queue) {
|
||||
if (qp_type(qp) == IB_QPT_RC)
|
||||
rxe_disable_task(&qp->comp.task);
|
||||
rxe_disable_task(&qp->req.task);
|
||||
}
|
||||
/* drain work and packet queuesc */
|
||||
rxe_requester(qp);
|
||||
rxe_completer(qp);
|
||||
rxe_responder(qp);
|
||||
|
||||
/* move qp to the reset state */
|
||||
qp->req.state = QP_STATE_RESET;
|
||||
qp->comp.state = QP_STATE_RESET;
|
||||
qp->resp.state = QP_STATE_RESET;
|
||||
|
||||
/* let state machines reset themselves drain work and packet queues
|
||||
* etc.
|
||||
*/
|
||||
__rxe_do_task(&qp->resp.task);
|
||||
|
||||
if (qp->sq.queue) {
|
||||
__rxe_do_task(&qp->comp.task);
|
||||
__rxe_do_task(&qp->req.task);
|
||||
if (qp->rq.queue)
|
||||
rxe_queue_reset(qp->rq.queue);
|
||||
if (qp->sq.queue)
|
||||
rxe_queue_reset(qp->sq.queue);
|
||||
}
|
||||
|
||||
/* cleanup attributes */
|
||||
atomic_set(&qp->ssn, 0);
|
||||
|
@ -518,54 +485,103 @@ static void rxe_qp_reset(struct rxe_qp *qp)
|
|||
|
||||
/* reenable tasks */
|
||||
rxe_enable_task(&qp->resp.task);
|
||||
|
||||
if (qp->sq.queue) {
|
||||
if (qp_type(qp) == IB_QPT_RC)
|
||||
rxe_enable_task(&qp->comp.task);
|
||||
|
||||
rxe_enable_task(&qp->req.task);
|
||||
}
|
||||
}
|
||||
|
||||
/* drain the send queue */
|
||||
static void rxe_qp_drain(struct rxe_qp *qp)
|
||||
{
|
||||
if (qp->sq.queue) {
|
||||
if (qp->req.state != QP_STATE_DRAINED) {
|
||||
qp->req.state = QP_STATE_DRAIN;
|
||||
if (qp_type(qp) == IB_QPT_RC)
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
else
|
||||
__rxe_do_task(&qp->comp.task);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
}
|
||||
}
|
||||
rxe_enable_task(&qp->comp.task);
|
||||
rxe_enable_task(&qp->req.task);
|
||||
}
|
||||
|
||||
/* move the qp to the error state */
|
||||
void rxe_qp_error(struct rxe_qp *qp)
|
||||
{
|
||||
qp->req.state = QP_STATE_ERROR;
|
||||
qp->resp.state = QP_STATE_ERROR;
|
||||
qp->comp.state = QP_STATE_ERROR;
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
qp->attr.qp_state = IB_QPS_ERR;
|
||||
|
||||
/* drain work and packet queues */
|
||||
rxe_sched_task(&qp->resp.task);
|
||||
|
||||
if (qp_type(qp) == IB_QPT_RC)
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
else
|
||||
__rxe_do_task(&qp->comp.task);
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
}
|
||||
|
||||
static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr,
|
||||
int mask)
|
||||
{
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
qp->attr.sq_draining = 1;
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
}
|
||||
|
||||
/* caller should hold qp->state_lock */
|
||||
static int __qp_chk_state(struct rxe_qp *qp, struct ib_qp_attr *attr,
|
||||
int mask)
|
||||
{
|
||||
enum ib_qp_state cur_state;
|
||||
enum ib_qp_state new_state;
|
||||
|
||||
cur_state = (mask & IB_QP_CUR_STATE) ?
|
||||
attr->cur_qp_state : qp->attr.qp_state;
|
||||
new_state = (mask & IB_QP_STATE) ?
|
||||
attr->qp_state : cur_state;
|
||||
|
||||
if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask))
|
||||
return -EINVAL;
|
||||
|
||||
if (mask & IB_QP_STATE && cur_state == IB_QPS_SQD) {
|
||||
if (qp->attr.sq_draining && new_state != IB_QPS_ERR)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char *const qps2str[] = {
|
||||
[IB_QPS_RESET] = "RESET",
|
||||
[IB_QPS_INIT] = "INIT",
|
||||
[IB_QPS_RTR] = "RTR",
|
||||
[IB_QPS_RTS] = "RTS",
|
||||
[IB_QPS_SQD] = "SQD",
|
||||
[IB_QPS_SQE] = "SQE",
|
||||
[IB_QPS_ERR] = "ERR",
|
||||
};
|
||||
|
||||
/* called by the modify qp verb */
|
||||
int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (mask & IB_QP_CUR_STATE)
|
||||
qp->attr.cur_qp_state = attr->qp_state;
|
||||
|
||||
if (mask & IB_QP_STATE) {
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
err = __qp_chk_state(qp, attr, mask);
|
||||
if (!err) {
|
||||
qp->attr.qp_state = attr->qp_state;
|
||||
rxe_dbg_qp(qp, "state -> %s\n",
|
||||
qps2str[attr->qp_state]);
|
||||
}
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
switch (attr->qp_state) {
|
||||
case IB_QPS_RESET:
|
||||
rxe_qp_reset(qp);
|
||||
break;
|
||||
case IB_QPS_SQD:
|
||||
rxe_qp_sqd(qp, attr, mask);
|
||||
break;
|
||||
case IB_QPS_ERR:
|
||||
rxe_qp_error(qp);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
|
||||
int max_rd_atomic = attr->max_rd_atomic ?
|
||||
roundup_pow_of_two(attr->max_rd_atomic) : 0;
|
||||
|
@ -587,9 +603,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
|
|||
return err;
|
||||
}
|
||||
|
||||
if (mask & IB_QP_CUR_STATE)
|
||||
qp->attr.cur_qp_state = attr->qp_state;
|
||||
|
||||
if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
|
||||
qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
|
||||
|
||||
|
@ -669,50 +682,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
|
|||
if (mask & IB_QP_DEST_QPN)
|
||||
qp->attr.dest_qp_num = attr->dest_qp_num;
|
||||
|
||||
if (mask & IB_QP_STATE) {
|
||||
qp->attr.qp_state = attr->qp_state;
|
||||
|
||||
switch (attr->qp_state) {
|
||||
case IB_QPS_RESET:
|
||||
rxe_dbg_qp(qp, "state -> RESET\n");
|
||||
rxe_qp_reset(qp);
|
||||
break;
|
||||
|
||||
case IB_QPS_INIT:
|
||||
rxe_dbg_qp(qp, "state -> INIT\n");
|
||||
qp->req.state = QP_STATE_INIT;
|
||||
qp->resp.state = QP_STATE_INIT;
|
||||
qp->comp.state = QP_STATE_INIT;
|
||||
break;
|
||||
|
||||
case IB_QPS_RTR:
|
||||
rxe_dbg_qp(qp, "state -> RTR\n");
|
||||
qp->resp.state = QP_STATE_READY;
|
||||
break;
|
||||
|
||||
case IB_QPS_RTS:
|
||||
rxe_dbg_qp(qp, "state -> RTS\n");
|
||||
qp->req.state = QP_STATE_READY;
|
||||
qp->comp.state = QP_STATE_READY;
|
||||
break;
|
||||
|
||||
case IB_QPS_SQD:
|
||||
rxe_dbg_qp(qp, "state -> SQD\n");
|
||||
rxe_qp_drain(qp);
|
||||
break;
|
||||
|
||||
case IB_QPS_SQE:
|
||||
rxe_dbg_qp(qp, "state -> SQE !!?\n");
|
||||
/* Not possible from modify_qp. */
|
||||
break;
|
||||
|
||||
case IB_QPS_ERR:
|
||||
rxe_dbg_qp(qp, "state -> ERR\n");
|
||||
rxe_qp_error(qp);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -736,18 +705,15 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
|
|||
rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
|
||||
rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
|
||||
|
||||
if (qp->req.state == QP_STATE_DRAIN) {
|
||||
attr->sq_draining = 1;
|
||||
/* applications that get this state
|
||||
* typically spin on it. yield the
|
||||
* processor
|
||||
*/
|
||||
/* Applications that get this state typically spin on it.
|
||||
* Yield the processor
|
||||
*/
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
if (qp->attr.sq_draining) {
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
cond_resched();
|
||||
} else {
|
||||
attr->sq_draining = 0;
|
||||
}
|
||||
|
||||
rxe_dbg_qp(qp, "attr->sq_draining = %d\n", attr->sq_draining);
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -771,26 +737,29 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
|
|||
{
|
||||
struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
|
||||
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
qp->valid = 0;
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
qp->qp_timeout_jiffies = 0;
|
||||
rxe_cleanup_task(&qp->resp.task);
|
||||
|
||||
if (qp_type(qp) == IB_QPT_RC) {
|
||||
del_timer_sync(&qp->retrans_timer);
|
||||
del_timer_sync(&qp->rnr_nak_timer);
|
||||
}
|
||||
|
||||
rxe_cleanup_task(&qp->req.task);
|
||||
rxe_cleanup_task(&qp->comp.task);
|
||||
if (qp->resp.task.func)
|
||||
rxe_cleanup_task(&qp->resp.task);
|
||||
|
||||
if (qp->req.task.func)
|
||||
rxe_cleanup_task(&qp->req.task);
|
||||
|
||||
if (qp->comp.task.func)
|
||||
rxe_cleanup_task(&qp->comp.task);
|
||||
|
||||
/* flush out any receive wr's or pending requests */
|
||||
if (qp->req.task.func)
|
||||
__rxe_do_task(&qp->req.task);
|
||||
|
||||
if (qp->sq.queue) {
|
||||
__rxe_do_task(&qp->comp.task);
|
||||
__rxe_do_task(&qp->req.task);
|
||||
}
|
||||
rxe_requester(qp);
|
||||
rxe_completer(qp);
|
||||
rxe_responder(qp);
|
||||
|
||||
if (qp->sq.queue)
|
||||
rxe_queue_cleanup(qp->sq.queue);
|
||||
|
|
|
@ -61,11 +61,11 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
|
|||
|
||||
/* num_elem == 0 is allowed, but uninteresting */
|
||||
if (*num_elem < 0)
|
||||
goto err1;
|
||||
return NULL;
|
||||
|
||||
q = kzalloc(sizeof(*q), GFP_KERNEL);
|
||||
if (!q)
|
||||
goto err1;
|
||||
return NULL;
|
||||
|
||||
q->rxe = rxe;
|
||||
q->type = type;
|
||||
|
@ -100,7 +100,6 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
|
|||
|
||||
err2:
|
||||
kfree(q);
|
||||
err1:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,12 +38,19 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
if (pkt->mask & RXE_REQ_MASK) {
|
||||
if (unlikely(qp->resp.state != QP_STATE_READY))
|
||||
if (unlikely(qp_state(qp) < IB_QPS_RTR)) {
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
return -EINVAL;
|
||||
} else if (unlikely(qp->req.state < QP_STATE_READY ||
|
||||
qp->req.state > QP_STATE_DRAINED))
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -102,44 +102,44 @@ void rnr_nak_timer(struct timer_list *t)
|
|||
|
||||
rxe_dbg_qp(qp, "nak timer fired\n");
|
||||
|
||||
/* request a send queue retry */
|
||||
qp->req.need_retry = 1;
|
||||
qp->req.wait_for_rnr_timer = 0;
|
||||
rxe_sched_task(&qp->req.task);
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
if (qp->valid) {
|
||||
/* request a send queue retry */
|
||||
qp->req.need_retry = 1;
|
||||
qp->req.wait_for_rnr_timer = 0;
|
||||
rxe_sched_task(&qp->req.task);
|
||||
}
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
}
|
||||
|
||||
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
|
||||
static void req_check_sq_drain_done(struct rxe_qp *qp)
|
||||
{
|
||||
struct rxe_send_wqe *wqe;
|
||||
struct rxe_queue *q = qp->sq.queue;
|
||||
unsigned int index = qp->req.wqe_index;
|
||||
struct rxe_queue *q;
|
||||
unsigned int index;
|
||||
unsigned int cons;
|
||||
unsigned int prod;
|
||||
struct rxe_send_wqe *wqe;
|
||||
|
||||
wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
|
||||
cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
|
||||
prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
if (qp_state(qp) == IB_QPS_SQD) {
|
||||
q = qp->sq.queue;
|
||||
index = qp->req.wqe_index;
|
||||
cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
|
||||
wqe = queue_addr_from_index(q, cons);
|
||||
|
||||
if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
|
||||
/* check to see if we are drained;
|
||||
* state_lock used by requester and completer
|
||||
*/
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
do {
|
||||
if (qp->req.state != QP_STATE_DRAIN) {
|
||||
if (!qp->attr.sq_draining)
|
||||
/* comp just finished */
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
if (wqe && ((index != cons) ||
|
||||
(wqe->state != wqe_state_posted))) {
|
||||
(wqe->state != wqe_state_posted)))
|
||||
/* comp not done yet */
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
qp->req.state = QP_STATE_DRAINED;
|
||||
qp->attr.sq_draining = 0;
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
|
||||
if (qp->ibqp.event_handler) {
|
||||
|
@ -151,19 +151,43 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
|
|||
qp->ibqp.event_handler(&ev,
|
||||
qp->ibqp.qp_context);
|
||||
}
|
||||
return;
|
||||
} while (0);
|
||||
}
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
}
|
||||
|
||||
static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
|
||||
{
|
||||
struct rxe_queue *q = qp->sq.queue;
|
||||
unsigned int index = qp->req.wqe_index;
|
||||
unsigned int prod;
|
||||
|
||||
prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
|
||||
if (index == prod)
|
||||
return NULL;
|
||||
else
|
||||
return queue_addr_from_index(q, index);
|
||||
}
|
||||
|
||||
wqe = queue_addr_from_index(q, index);
|
||||
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
|
||||
{
|
||||
struct rxe_send_wqe *wqe;
|
||||
|
||||
if (unlikely((qp->req.state == QP_STATE_DRAIN ||
|
||||
qp->req.state == QP_STATE_DRAINED) &&
|
||||
(wqe->state != wqe_state_processing)))
|
||||
req_check_sq_drain_done(qp);
|
||||
|
||||
wqe = __req_next_wqe(qp);
|
||||
if (wqe == NULL)
|
||||
return NULL;
|
||||
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
|
||||
(wqe->state != wqe_state_processing))) {
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
return NULL;
|
||||
}
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
|
||||
wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
|
||||
return wqe;
|
||||
}
|
||||
|
@ -635,9 +659,8 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int rxe_requester(void *arg)
|
||||
int rxe_requester(struct rxe_qp *qp)
|
||||
{
|
||||
struct rxe_qp *qp = (struct rxe_qp *)arg;
|
||||
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
|
||||
struct rxe_pkt_info pkt;
|
||||
struct sk_buff *skb;
|
||||
|
@ -654,24 +677,22 @@ int rxe_requester(void *arg)
|
|||
struct rxe_ah *ah;
|
||||
struct rxe_av *av;
|
||||
|
||||
if (!rxe_get(qp))
|
||||
return -EAGAIN;
|
||||
|
||||
if (unlikely(!qp->valid))
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
if (unlikely(!qp->valid)) {
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (unlikely(qp->req.state == QP_STATE_ERROR)) {
|
||||
wqe = req_next_wqe(qp);
|
||||
if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
|
||||
wqe = __req_next_wqe(qp);
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
if (wqe)
|
||||
/*
|
||||
* Generate an error completion for error qp state
|
||||
*/
|
||||
goto err;
|
||||
else
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (unlikely(qp->req.state == QP_STATE_RESET)) {
|
||||
if (unlikely(qp_state(qp) == IB_QPS_RESET)) {
|
||||
qp->req.wqe_index = queue_get_consumer(q,
|
||||
QUEUE_TYPE_FROM_CLIENT);
|
||||
qp->req.opcode = -1;
|
||||
|
@ -679,8 +700,10 @@ int rxe_requester(void *arg)
|
|||
qp->req.wait_psn = 0;
|
||||
qp->req.need_retry = 0;
|
||||
qp->req.wait_for_rnr_timer = 0;
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
goto exit;
|
||||
}
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
|
||||
/* we come here if the retransmit timer has fired
|
||||
* or if the rnr timer has fired. If the retransmit
|
||||
|
@ -757,7 +780,7 @@ int rxe_requester(void *arg)
|
|||
qp->req.wqe_index);
|
||||
wqe->state = wqe_state_done;
|
||||
wqe->status = IB_WC_SUCCESS;
|
||||
rxe_run_task(&qp->comp.task);
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
goto done;
|
||||
}
|
||||
payload = mtu;
|
||||
|
@ -840,12 +863,9 @@ err:
|
|||
/* update wqe_index for each wqe completion */
|
||||
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
|
||||
wqe->state = wqe_state_error;
|
||||
qp->req.state = QP_STATE_ERROR;
|
||||
rxe_run_task(&qp->comp.task);
|
||||
rxe_qp_error(qp);
|
||||
exit:
|
||||
ret = -EAGAIN;
|
||||
out:
|
||||
rxe_put(qp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -42,7 +42,6 @@ static char *resp_state_name[] = {
|
|||
[RESPST_ERR_LENGTH] = "ERR_LENGTH",
|
||||
[RESPST_ERR_CQ_OVERFLOW] = "ERR_CQ_OVERFLOW",
|
||||
[RESPST_ERROR] = "ERROR",
|
||||
[RESPST_RESET] = "RESET",
|
||||
[RESPST_DONE] = "DONE",
|
||||
[RESPST_EXIT] = "EXIT",
|
||||
};
|
||||
|
@ -69,17 +68,6 @@ static inline enum resp_states get_req(struct rxe_qp *qp,
|
|||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (qp->resp.state == QP_STATE_ERROR) {
|
||||
while ((skb = skb_dequeue(&qp->req_pkts))) {
|
||||
rxe_put(qp);
|
||||
kfree_skb(skb);
|
||||
ib_device_put(qp->ibqp.device);
|
||||
}
|
||||
|
||||
/* go drain recv wr queue */
|
||||
return RESPST_CHK_RESOURCE;
|
||||
}
|
||||
|
||||
skb = skb_peek(&qp->req_pkts);
|
||||
if (!skb)
|
||||
return RESPST_EXIT;
|
||||
|
@ -334,24 +322,6 @@ static enum resp_states check_resource(struct rxe_qp *qp,
|
|||
{
|
||||
struct rxe_srq *srq = qp->srq;
|
||||
|
||||
if (qp->resp.state == QP_STATE_ERROR) {
|
||||
if (qp->resp.wqe) {
|
||||
qp->resp.status = IB_WC_WR_FLUSH_ERR;
|
||||
return RESPST_COMPLETE;
|
||||
} else if (!srq) {
|
||||
qp->resp.wqe = queue_head(qp->rq.queue,
|
||||
QUEUE_TYPE_FROM_CLIENT);
|
||||
if (qp->resp.wqe) {
|
||||
qp->resp.status = IB_WC_WR_FLUSH_ERR;
|
||||
return RESPST_COMPLETE;
|
||||
} else {
|
||||
return RESPST_EXIT;
|
||||
}
|
||||
} else {
|
||||
return RESPST_EXIT;
|
||||
}
|
||||
}
|
||||
|
||||
if (pkt->mask & (RXE_READ_OR_ATOMIC_MASK | RXE_ATOMIC_WRITE_MASK)) {
|
||||
/* it is the requesters job to not send
|
||||
* too many read/atomic ops, we just
|
||||
|
@ -1151,6 +1121,10 @@ static enum resp_states do_complete(struct rxe_qp *qp,
|
|||
|
||||
wc->port_num = qp->attr.port_num;
|
||||
}
|
||||
} else {
|
||||
if (wc->status != IB_WC_WR_FLUSH_ERR)
|
||||
rxe_err_qp(qp, "non-flush error status = %d",
|
||||
wc->status);
|
||||
}
|
||||
|
||||
/* have copy for srq and reference for !srq */
|
||||
|
@ -1163,8 +1137,13 @@ static enum resp_states do_complete(struct rxe_qp *qp,
|
|||
return RESPST_ERR_CQ_OVERFLOW;
|
||||
|
||||
finish:
|
||||
if (unlikely(qp->resp.state == QP_STATE_ERROR))
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
return RESPST_CHK_RESOURCE;
|
||||
}
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
|
||||
if (unlikely(!pkt))
|
||||
return RESPST_DONE;
|
||||
if (qp_type(qp) == IB_QPT_RC)
|
||||
|
@ -1421,49 +1400,90 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
|
|||
}
|
||||
}
|
||||
|
||||
static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
|
||||
/* drain incoming request packet queue */
|
||||
static void drain_req_pkts(struct rxe_qp *qp)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct rxe_queue *q = qp->rq.queue;
|
||||
|
||||
while ((skb = skb_dequeue(&qp->req_pkts))) {
|
||||
rxe_put(qp);
|
||||
kfree_skb(skb);
|
||||
ib_device_put(qp->ibqp.device);
|
||||
}
|
||||
|
||||
if (notify)
|
||||
return;
|
||||
|
||||
while (!qp->srq && q && queue_head(q, q->type))
|
||||
queue_advance_consumer(q, q->type);
|
||||
}
|
||||
|
||||
int rxe_responder(void *arg)
|
||||
/* complete receive wqe with flush error */
|
||||
static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
|
||||
{
|
||||
struct rxe_cqe cqe = {};
|
||||
struct ib_wc *wc = &cqe.ibwc;
|
||||
struct ib_uverbs_wc *uwc = &cqe.uibwc;
|
||||
int err;
|
||||
|
||||
if (qp->rcq->is_user) {
|
||||
uwc->wr_id = wqe->wr_id;
|
||||
uwc->status = IB_WC_WR_FLUSH_ERR;
|
||||
uwc->qp_num = qp_num(qp);
|
||||
} else {
|
||||
wc->wr_id = wqe->wr_id;
|
||||
wc->status = IB_WC_WR_FLUSH_ERR;
|
||||
wc->qp = &qp->ibqp;
|
||||
}
|
||||
|
||||
err = rxe_cq_post(qp->rcq, &cqe, 0);
|
||||
if (err)
|
||||
rxe_dbg_cq(qp->rcq, "post cq failed err = %d", err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* drain and optionally complete the recive queue
|
||||
* if unable to complete a wqe stop completing and
|
||||
* just flush the remaining wqes
|
||||
*/
|
||||
static void flush_recv_queue(struct rxe_qp *qp, bool notify)
|
||||
{
|
||||
struct rxe_queue *q = qp->rq.queue;
|
||||
struct rxe_recv_wqe *wqe;
|
||||
int err;
|
||||
|
||||
if (qp->srq)
|
||||
return;
|
||||
|
||||
while ((wqe = queue_head(q, q->type))) {
|
||||
if (notify) {
|
||||
err = flush_recv_wqe(qp, wqe);
|
||||
if (err)
|
||||
notify = 0;
|
||||
}
|
||||
queue_advance_consumer(q, q->type);
|
||||
}
|
||||
|
||||
qp->resp.wqe = NULL;
|
||||
}
|
||||
|
||||
int rxe_responder(struct rxe_qp *qp)
|
||||
{
|
||||
struct rxe_qp *qp = (struct rxe_qp *)arg;
|
||||
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
|
||||
enum resp_states state;
|
||||
struct rxe_pkt_info *pkt = NULL;
|
||||
int ret;
|
||||
|
||||
if (!rxe_get(qp))
|
||||
return -EAGAIN;
|
||||
spin_lock_bh(&qp->state_lock);
|
||||
if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
|
||||
qp_state(qp) == IB_QPS_RESET) {
|
||||
bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
|
||||
|
||||
drain_req_pkts(qp);
|
||||
flush_recv_queue(qp, notify);
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
goto exit;
|
||||
}
|
||||
spin_unlock_bh(&qp->state_lock);
|
||||
|
||||
qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
|
||||
|
||||
if (!qp->valid)
|
||||
goto exit;
|
||||
|
||||
switch (qp->resp.state) {
|
||||
case QP_STATE_RESET:
|
||||
state = RESPST_RESET;
|
||||
break;
|
||||
|
||||
default:
|
||||
state = RESPST_GET_REQ;
|
||||
break;
|
||||
}
|
||||
state = RESPST_GET_REQ;
|
||||
|
||||
while (1) {
|
||||
rxe_dbg_qp(qp, "state = %s\n", resp_state_name[state]);
|
||||
|
@ -1622,11 +1642,6 @@ int rxe_responder(void *arg)
|
|||
|
||||
goto exit;
|
||||
|
||||
case RESPST_RESET:
|
||||
rxe_drain_req_pkts(qp, false);
|
||||
qp->resp.wqe = NULL;
|
||||
goto exit;
|
||||
|
||||
case RESPST_ERROR:
|
||||
qp->resp.goto_error = 0;
|
||||
rxe_dbg_qp(qp, "moved to error state\n");
|
||||
|
@ -1648,6 +1663,5 @@ done:
|
|||
exit:
|
||||
ret = -EAGAIN;
|
||||
out:
|
||||
rxe_put(qp);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -13,13 +13,13 @@ int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init)
|
|||
struct ib_srq_attr *attr = &init->attr;
|
||||
|
||||
if (attr->max_wr > rxe->attr.max_srq_wr) {
|
||||
rxe_dbg(rxe, "max_wr(%d) > max_srq_wr(%d)\n",
|
||||
rxe_dbg_dev(rxe, "max_wr(%d) > max_srq_wr(%d)\n",
|
||||
attr->max_wr, rxe->attr.max_srq_wr);
|
||||
goto err1;
|
||||
}
|
||||
|
||||
if (attr->max_wr <= 0) {
|
||||
rxe_dbg(rxe, "max_wr(%d) <= 0\n", attr->max_wr);
|
||||
rxe_dbg_dev(rxe, "max_wr(%d) <= 0\n", attr->max_wr);
|
||||
goto err1;
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,7 @@ int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init)
|
|||
attr->max_wr = RXE_MIN_SRQ_WR;
|
||||
|
||||
if (attr->max_sge > rxe->attr.max_srq_sge) {
|
||||
rxe_dbg(rxe, "max_sge(%d) > max_srq_sge(%d)\n",
|
||||
rxe_dbg_dev(rxe, "max_sge(%d) > max_srq_sge(%d)\n",
|
||||
attr->max_sge, rxe->attr.max_srq_sge);
|
||||
goto err1;
|
||||
}
|
||||
|
|
|
@ -6,69 +6,128 @@
|
|||
|
||||
#include "rxe.h"
|
||||
|
||||
int __rxe_do_task(struct rxe_task *task)
|
||||
|
||||
/* Check if task is idle i.e. not running, not scheduled in
|
||||
* tasklet queue and not draining. If so move to busy to
|
||||
* reserve a slot in do_task() by setting to busy and taking
|
||||
* a qp reference to cover the gap from now until the task finishes.
|
||||
* state will move out of busy if task returns a non zero value
|
||||
* in do_task(). If state is already busy it is raised to armed
|
||||
* to indicate to do_task that additional pass should be made
|
||||
* over the task.
|
||||
* Context: caller should hold task->lock.
|
||||
* Returns: true if state transitioned from idle to busy else false.
|
||||
*/
|
||||
static bool __reserve_if_idle(struct rxe_task *task)
|
||||
{
|
||||
int ret;
|
||||
WARN_ON(rxe_read(task->qp) <= 0);
|
||||
|
||||
while ((ret = task->func(task->arg)) == 0)
|
||||
;
|
||||
if (task->tasklet.state & BIT(TASKLET_STATE_SCHED))
|
||||
return false;
|
||||
|
||||
task->ret = ret;
|
||||
if (task->state == TASK_STATE_IDLE) {
|
||||
rxe_get(task->qp);
|
||||
task->state = TASK_STATE_BUSY;
|
||||
task->num_sched++;
|
||||
return true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
if (task->state == TASK_STATE_BUSY)
|
||||
task->state = TASK_STATE_ARMED;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* this locking is due to a potential race where
|
||||
* a second caller finds the task already running
|
||||
* but looks just after the last call to func
|
||||
/* check if task is idle or drained and not currently
|
||||
* scheduled in the tasklet queue. This routine is
|
||||
* called by rxe_cleanup_task or rxe_disable_task to
|
||||
* see if the queue is empty.
|
||||
* Context: caller should hold task->lock.
|
||||
* Returns true if done else false.
|
||||
*/
|
||||
static bool __is_done(struct rxe_task *task)
|
||||
{
|
||||
if (task->tasklet.state & BIT(TASKLET_STATE_SCHED))
|
||||
return false;
|
||||
|
||||
if (task->state == TASK_STATE_IDLE ||
|
||||
task->state == TASK_STATE_DRAINED) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* a locked version of __is_done */
|
||||
static bool is_done(struct rxe_task *task)
|
||||
{
|
||||
unsigned long flags;
|
||||
int done;
|
||||
|
||||
spin_lock_irqsave(&task->lock, flags);
|
||||
done = __is_done(task);
|
||||
spin_unlock_irqrestore(&task->lock, flags);
|
||||
|
||||
return done;
|
||||
}
|
||||
|
||||
/* do_task is a wrapper for the three tasks (requester,
|
||||
* completer, responder) and calls them in a loop until
|
||||
* they return a non-zero value. It is called either
|
||||
* directly by rxe_run_task or indirectly if rxe_sched_task
|
||||
* schedules the task. They must call __reserve_if_idle to
|
||||
* move the task to busy before calling or scheduling.
|
||||
* The task can also be moved to drained or invalid
|
||||
* by calls to rxe-cleanup_task or rxe_disable_task.
|
||||
* In that case tasks which get here are not executed but
|
||||
* just flushed. The tasks are designed to look to see if
|
||||
* there is work to do and do part of it before returning
|
||||
* here with a return value of zero until all the work
|
||||
* has been consumed then it retuens a non-zero value.
|
||||
* The number of times the task can be run is limited by
|
||||
* max iterations so one task cannot hold the cpu forever.
|
||||
*/
|
||||
static void do_task(struct tasklet_struct *t)
|
||||
{
|
||||
int cont;
|
||||
int ret;
|
||||
struct rxe_task *task = from_tasklet(task, t, tasklet);
|
||||
struct rxe_qp *qp = (struct rxe_qp *)task->arg;
|
||||
unsigned int iterations = RXE_MAX_ITERATIONS;
|
||||
unsigned int iterations;
|
||||
unsigned long flags;
|
||||
int resched = 0;
|
||||
|
||||
spin_lock_bh(&task->lock);
|
||||
switch (task->state) {
|
||||
case TASK_STATE_START:
|
||||
task->state = TASK_STATE_BUSY;
|
||||
spin_unlock_bh(&task->lock);
|
||||
break;
|
||||
WARN_ON(rxe_read(task->qp) <= 0);
|
||||
|
||||
case TASK_STATE_BUSY:
|
||||
task->state = TASK_STATE_ARMED;
|
||||
fallthrough;
|
||||
case TASK_STATE_ARMED:
|
||||
spin_unlock_bh(&task->lock);
|
||||
return;
|
||||
|
||||
default:
|
||||
spin_unlock_bh(&task->lock);
|
||||
rxe_dbg_qp(qp, "failed with bad state %d\n", task->state);
|
||||
spin_lock_irqsave(&task->lock, flags);
|
||||
if (task->state >= TASK_STATE_DRAINED) {
|
||||
rxe_put(task->qp);
|
||||
task->num_done++;
|
||||
spin_unlock_irqrestore(&task->lock, flags);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&task->lock, flags);
|
||||
|
||||
do {
|
||||
iterations = RXE_MAX_ITERATIONS;
|
||||
cont = 0;
|
||||
ret = task->func(task->arg);
|
||||
|
||||
spin_lock_bh(&task->lock);
|
||||
do {
|
||||
ret = task->func(task->qp);
|
||||
} while (ret == 0 && iterations-- > 0);
|
||||
|
||||
spin_lock_irqsave(&task->lock, flags);
|
||||
switch (task->state) {
|
||||
case TASK_STATE_BUSY:
|
||||
if (ret) {
|
||||
task->state = TASK_STATE_START;
|
||||
} else if (iterations--) {
|
||||
cont = 1;
|
||||
task->state = TASK_STATE_IDLE;
|
||||
} else {
|
||||
/* reschedule the tasklet and exit
|
||||
/* This can happen if the client
|
||||
* can add work faster than the
|
||||
* tasklet can finish it.
|
||||
* Reschedule the tasklet and exit
|
||||
* the loop to give up the cpu
|
||||
*/
|
||||
tasklet_schedule(&task->tasklet);
|
||||
task->state = TASK_STATE_START;
|
||||
task->state = TASK_STATE_IDLE;
|
||||
resched = 1;
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -81,71 +140,158 @@ static void do_task(struct tasklet_struct *t)
|
|||
cont = 1;
|
||||
break;
|
||||
|
||||
case TASK_STATE_DRAINING:
|
||||
if (ret)
|
||||
task->state = TASK_STATE_DRAINED;
|
||||
else
|
||||
cont = 1;
|
||||
break;
|
||||
|
||||
default:
|
||||
rxe_dbg_qp(qp, "failed with bad state %d\n",
|
||||
task->state);
|
||||
WARN_ON(1);
|
||||
rxe_info_qp(task->qp, "unexpected task state = %d", task->state);
|
||||
}
|
||||
spin_unlock_bh(&task->lock);
|
||||
|
||||
if (!cont) {
|
||||
task->num_done++;
|
||||
if (WARN_ON(task->num_done != task->num_sched))
|
||||
rxe_err_qp(task->qp, "%ld tasks scheduled, %ld tasks done",
|
||||
task->num_sched, task->num_done);
|
||||
}
|
||||
spin_unlock_irqrestore(&task->lock, flags);
|
||||
} while (cont);
|
||||
|
||||
task->ret = ret;
|
||||
|
||||
if (resched)
|
||||
rxe_sched_task(task);
|
||||
|
||||
rxe_put(task->qp);
|
||||
}
|
||||
|
||||
int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *))
|
||||
int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
|
||||
int (*func)(struct rxe_qp *))
|
||||
{
|
||||
task->arg = arg;
|
||||
task->func = func;
|
||||
task->destroyed = false;
|
||||
WARN_ON(rxe_read(qp) <= 0);
|
||||
|
||||
task->qp = qp;
|
||||
task->func = func;
|
||||
|
||||
tasklet_setup(&task->tasklet, do_task);
|
||||
|
||||
task->state = TASK_STATE_START;
|
||||
task->state = TASK_STATE_IDLE;
|
||||
spin_lock_init(&task->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* rxe_cleanup_task is only called from rxe_do_qp_cleanup in
|
||||
* process context. The qp is already completed with no
|
||||
* remaining references. Once the queue is drained the
|
||||
* task is moved to invalid and returns. The qp cleanup
|
||||
* code then calls the task functions directly without
|
||||
* using the task struct to drain any late arriving packets
|
||||
* or work requests.
|
||||
*/
|
||||
void rxe_cleanup_task(struct rxe_task *task)
|
||||
{
|
||||
bool idle;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Mark the task, then wait for it to finish. It might be
|
||||
* running in a non-tasklet (direct call) context.
|
||||
spin_lock_irqsave(&task->lock, flags);
|
||||
if (!__is_done(task) && task->state < TASK_STATE_DRAINED) {
|
||||
task->state = TASK_STATE_DRAINING;
|
||||
} else {
|
||||
task->state = TASK_STATE_INVALID;
|
||||
spin_unlock_irqrestore(&task->lock, flags);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&task->lock, flags);
|
||||
|
||||
/* now the task cannot be scheduled or run just wait
|
||||
* for the previously scheduled tasks to finish.
|
||||
*/
|
||||
task->destroyed = true;
|
||||
|
||||
do {
|
||||
spin_lock_bh(&task->lock);
|
||||
idle = (task->state == TASK_STATE_START);
|
||||
spin_unlock_bh(&task->lock);
|
||||
} while (!idle);
|
||||
while (!is_done(task))
|
||||
cond_resched();
|
||||
|
||||
tasklet_kill(&task->tasklet);
|
||||
|
||||
spin_lock_irqsave(&task->lock, flags);
|
||||
task->state = TASK_STATE_INVALID;
|
||||
spin_unlock_irqrestore(&task->lock, flags);
|
||||
}
|
||||
|
||||
/* run the task inline if it is currently idle
|
||||
* cannot call do_task holding the lock
|
||||
*/
|
||||
void rxe_run_task(struct rxe_task *task)
|
||||
{
|
||||
if (task->destroyed)
|
||||
return;
|
||||
unsigned long flags;
|
||||
int run;
|
||||
|
||||
do_task(&task->tasklet);
|
||||
WARN_ON(rxe_read(task->qp) <= 0);
|
||||
|
||||
spin_lock_irqsave(&task->lock, flags);
|
||||
run = __reserve_if_idle(task);
|
||||
spin_unlock_irqrestore(&task->lock, flags);
|
||||
|
||||
if (run)
|
||||
do_task(&task->tasklet);
|
||||
}
|
||||
|
||||
/* schedule the task to run later as a tasklet.
|
||||
* the tasklet)schedule call can be called holding
|
||||
* the lock.
|
||||
*/
|
||||
void rxe_sched_task(struct rxe_task *task)
|
||||
{
|
||||
if (task->destroyed)
|
||||
return;
|
||||
unsigned long flags;
|
||||
|
||||
tasklet_schedule(&task->tasklet);
|
||||
WARN_ON(rxe_read(task->qp) <= 0);
|
||||
|
||||
spin_lock_irqsave(&task->lock, flags);
|
||||
if (__reserve_if_idle(task))
|
||||
tasklet_schedule(&task->tasklet);
|
||||
spin_unlock_irqrestore(&task->lock, flags);
|
||||
}
|
||||
|
||||
/* rxe_disable/enable_task are only called from
|
||||
* rxe_modify_qp in process context. Task is moved
|
||||
* to the drained state by do_task.
|
||||
*/
|
||||
void rxe_disable_task(struct rxe_task *task)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(rxe_read(task->qp) <= 0);
|
||||
|
||||
spin_lock_irqsave(&task->lock, flags);
|
||||
if (!__is_done(task) && task->state < TASK_STATE_DRAINED) {
|
||||
task->state = TASK_STATE_DRAINING;
|
||||
} else {
|
||||
task->state = TASK_STATE_DRAINED;
|
||||
spin_unlock_irqrestore(&task->lock, flags);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&task->lock, flags);
|
||||
|
||||
while (!is_done(task))
|
||||
cond_resched();
|
||||
|
||||
tasklet_disable(&task->tasklet);
|
||||
}
|
||||
|
||||
void rxe_enable_task(struct rxe_task *task)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(rxe_read(task->qp) <= 0);
|
||||
|
||||
spin_lock_irqsave(&task->lock, flags);
|
||||
if (task->state == TASK_STATE_INVALID) {
|
||||
spin_unlock_irqrestore(&task->lock, flags);
|
||||
return;
|
||||
}
|
||||
task->state = TASK_STATE_IDLE;
|
||||
tasklet_enable(&task->tasklet);
|
||||
spin_unlock_irqrestore(&task->lock, flags);
|
||||
}
|
||||
|
|
|
@ -8,9 +8,12 @@
|
|||
#define RXE_TASK_H
|
||||
|
||||
enum {
|
||||
TASK_STATE_START = 0,
|
||||
TASK_STATE_IDLE = 0,
|
||||
TASK_STATE_BUSY = 1,
|
||||
TASK_STATE_ARMED = 2,
|
||||
TASK_STATE_DRAINING = 3,
|
||||
TASK_STATE_DRAINED = 4,
|
||||
TASK_STATE_INVALID = 5,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -22,28 +25,24 @@ struct rxe_task {
|
|||
struct tasklet_struct tasklet;
|
||||
int state;
|
||||
spinlock_t lock;
|
||||
void *arg;
|
||||
int (*func)(void *arg);
|
||||
struct rxe_qp *qp;
|
||||
int (*func)(struct rxe_qp *qp);
|
||||
int ret;
|
||||
bool destroyed;
|
||||
long num_sched;
|
||||
long num_done;
|
||||
};
|
||||
|
||||
/*
|
||||
* init rxe_task structure
|
||||
* arg => parameter to pass to fcn
|
||||
* qp => parameter to pass to func
|
||||
* func => function to call until it returns != 0
|
||||
*/
|
||||
int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *));
|
||||
int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
|
||||
int (*func)(struct rxe_qp *));
|
||||
|
||||
/* cleanup task */
|
||||
void rxe_cleanup_task(struct rxe_task *task);
|
||||
|
||||
/*
|
||||
* raw call to func in loop without any checking
|
||||
* can call when tasklets are disabled
|
||||
*/
|
||||
int __rxe_do_task(struct rxe_task *task);
|
||||
|
||||
void rxe_run_task(struct rxe_task *task);
|
||||
|
||||
void rxe_sched_task(struct rxe_task *task);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -63,9 +63,7 @@ struct rxe_cq {
|
|||
struct rxe_queue *queue;
|
||||
spinlock_t cq_lock;
|
||||
u8 notify;
|
||||
bool is_dying;
|
||||
bool is_user;
|
||||
struct tasklet_struct comp_task;
|
||||
atomic_t num_wq;
|
||||
};
|
||||
|
||||
|
@ -104,17 +102,7 @@ struct rxe_srq {
|
|||
int error;
|
||||
};
|
||||
|
||||
enum rxe_qp_state {
|
||||
QP_STATE_RESET,
|
||||
QP_STATE_INIT,
|
||||
QP_STATE_READY,
|
||||
QP_STATE_DRAIN, /* req only */
|
||||
QP_STATE_DRAINED, /* req only */
|
||||
QP_STATE_ERROR
|
||||
};
|
||||
|
||||
struct rxe_req_info {
|
||||
enum rxe_qp_state state;
|
||||
int wqe_index;
|
||||
u32 psn;
|
||||
int opcode;
|
||||
|
@ -129,7 +117,6 @@ struct rxe_req_info {
|
|||
};
|
||||
|
||||
struct rxe_comp_info {
|
||||
enum rxe_qp_state state;
|
||||
u32 psn;
|
||||
int opcode;
|
||||
int timeout;
|
||||
|
@ -175,7 +162,6 @@ struct resp_res {
|
|||
};
|
||||
|
||||
struct rxe_resp_info {
|
||||
enum rxe_qp_state state;
|
||||
u32 msn;
|
||||
u32 psn;
|
||||
u32 ack_psn;
|
||||
|
|
|
@ -437,9 +437,6 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
|
|||
|
||||
dev_dbg(&netdev->dev, "siw: event %lu\n", event);
|
||||
|
||||
if (dev_net(netdev) != &init_net)
|
||||
return NOTIFY_OK;
|
||||
|
||||
base_dev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_SIW);
|
||||
if (!base_dev)
|
||||
return NOTIFY_OK;
|
||||
|
|
|
@ -139,7 +139,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
|
|||
break;
|
||||
|
||||
bytes = min(bytes, len);
|
||||
if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) ==
|
||||
if (siw_rx_kva(srx, ib_virt_dma_to_ptr(buf_addr), bytes) ==
|
||||
bytes) {
|
||||
copied += bytes;
|
||||
offset += bytes;
|
||||
|
@ -487,7 +487,7 @@ int siw_proc_send(struct siw_qp *qp)
|
|||
mem_p = *mem;
|
||||
if (mem_p->mem_obj == NULL)
|
||||
rv = siw_rx_kva(srx,
|
||||
(void *)(uintptr_t)(sge->laddr + frx->sge_off),
|
||||
ib_virt_dma_to_ptr(sge->laddr + frx->sge_off),
|
||||
sge_bytes);
|
||||
else if (!mem_p->is_pbl)
|
||||
rv = siw_rx_umem(srx, mem_p->umem,
|
||||
|
@ -852,7 +852,7 @@ int siw_proc_rresp(struct siw_qp *qp)
|
|||
|
||||
if (mem_p->mem_obj == NULL)
|
||||
rv = siw_rx_kva(srx,
|
||||
(void *)(uintptr_t)(sge->laddr + wqe->processed),
|
||||
ib_virt_dma_to_ptr(sge->laddr + wqe->processed),
|
||||
bytes);
|
||||
else if (!mem_p->is_pbl)
|
||||
rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,
|
||||
|
|
|
@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
|
|||
dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
|
||||
|
||||
if (paddr)
|
||||
return virt_to_page((void *)(uintptr_t)paddr);
|
||||
return ib_virt_dma_to_page(paddr);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -56,8 +56,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
|
|||
|
||||
if (!mem->mem_obj) {
|
||||
/* Kernel client using kva */
|
||||
memcpy(paddr,
|
||||
(const void *)(uintptr_t)sge->laddr, bytes);
|
||||
memcpy(paddr, ib_virt_dma_to_ptr(sge->laddr), bytes);
|
||||
} else if (c_tx->in_syscall) {
|
||||
if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
|
||||
bytes))
|
||||
|
@ -477,7 +476,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
|||
* or memory region with assigned kernel buffer
|
||||
*/
|
||||
iov[seg].iov_base =
|
||||
(void *)(uintptr_t)(sge->laddr + sge_off);
|
||||
ib_virt_dma_to_ptr(sge->laddr + sge_off);
|
||||
iov[seg].iov_len = sge_len;
|
||||
|
||||
if (do_crc)
|
||||
|
@ -537,19 +536,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
|||
* Cast to an uintptr_t to preserve all 64 bits
|
||||
* in sge->laddr.
|
||||
*/
|
||||
uintptr_t va = (uintptr_t)(sge->laddr + sge_off);
|
||||
u64 va = sge->laddr + sge_off;
|
||||
|
||||
/*
|
||||
* virt_to_page() takes a (void *) pointer
|
||||
* so cast to a (void *) meaning it will be 64
|
||||
* bits on a 64 bit platform and 32 bits on a
|
||||
* 32 bit platform.
|
||||
*/
|
||||
page_array[seg] = virt_to_page((void *)(va & PAGE_MASK));
|
||||
page_array[seg] = ib_virt_dma_to_page(va);
|
||||
if (do_crc)
|
||||
crypto_shash_update(
|
||||
c_tx->mpa_crc_hd,
|
||||
(void *)va,
|
||||
ib_virt_dma_to_ptr(va),
|
||||
plen);
|
||||
}
|
||||
|
||||
|
@ -558,7 +551,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
|||
data_len -= plen;
|
||||
fp_off = 0;
|
||||
|
||||
if (++seg > (int)MAX_ARRAY) {
|
||||
if (++seg >= (int)MAX_ARRAY) {
|
||||
siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
|
||||
siw_unmap_pages(iov, kmap_mask, seg-1);
|
||||
wqe->processed -= c_tx->bytes_unsent;
|
||||
|
|
|
@ -660,7 +660,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
|
|||
bytes = -EINVAL;
|
||||
break;
|
||||
}
|
||||
memcpy(kbuf, (void *)(uintptr_t)core_sge->addr,
|
||||
memcpy(kbuf, ib_virt_dma_to_ptr(core_sge->addr),
|
||||
core_sge->length);
|
||||
|
||||
kbuf += core_sge->length;
|
||||
|
@ -1523,7 +1523,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
|
|||
}
|
||||
siw_dbg_mem(mem,
|
||||
"sge[%d], size %u, addr 0x%p, total %lu\n",
|
||||
i, pble->size, (void *)(uintptr_t)pble->addr,
|
||||
i, pble->size, ib_virt_dma_to_ptr(pble->addr),
|
||||
pbl_size);
|
||||
}
|
||||
rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
|
||||
|
|
|
@ -141,10 +141,14 @@ out_err:
|
|||
|
||||
/* creates a new tx descriptor and adds header regd buffer */
|
||||
static void iser_create_send_desc(struct iser_conn *iser_conn,
|
||||
struct iser_tx_desc *tx_desc)
|
||||
struct iser_tx_desc *tx_desc, enum iser_desc_type type,
|
||||
void (*done)(struct ib_cq *cq, struct ib_wc *wc))
|
||||
{
|
||||
struct iser_device *device = iser_conn->ib_conn.device;
|
||||
|
||||
tx_desc->type = type;
|
||||
tx_desc->cqe.done = done;
|
||||
|
||||
ib_dma_sync_single_for_cpu(device->ib_device,
|
||||
tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
|
||||
|
||||
|
@ -349,9 +353,8 @@ int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task)
|
|||
edtl = ntohl(hdr->data_length);
|
||||
|
||||
/* build the tx desc regd header and add it to the tx desc dto */
|
||||
tx_desc->type = ISCSI_TX_SCSI_COMMAND;
|
||||
tx_desc->cqe.done = iser_cmd_comp;
|
||||
iser_create_send_desc(iser_conn, tx_desc);
|
||||
iser_create_send_desc(iser_conn, tx_desc, ISCSI_TX_SCSI_COMMAND,
|
||||
iser_cmd_comp);
|
||||
|
||||
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
|
||||
data_buf = &iser_task->data[ISER_DIR_IN];
|
||||
|
@ -457,7 +460,6 @@ int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task,
|
|||
iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
|
||||
itt, buf_offset, data_seg_len);
|
||||
|
||||
|
||||
err = iser_post_send(&iser_conn->ib_conn, tx_desc);
|
||||
if (!err)
|
||||
return 0;
|
||||
|
@ -478,9 +480,8 @@ int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task)
|
|||
struct iser_device *device;
|
||||
|
||||
/* build the tx desc regd header and add it to the tx desc dto */
|
||||
mdesc->type = ISCSI_TX_CONTROL;
|
||||
mdesc->cqe.done = iser_ctrl_comp;
|
||||
iser_create_send_desc(iser_conn, mdesc);
|
||||
iser_create_send_desc(iser_conn, mdesc, ISCSI_TX_CONTROL,
|
||||
iser_ctrl_comp);
|
||||
|
||||
device = iser_conn->ib_conn.device;
|
||||
|
||||
|
|
|
@ -37,12 +37,6 @@
|
|||
|
||||
#include "iscsi_iser.h"
|
||||
|
||||
#define ISCSI_ISER_MAX_CONN 8
|
||||
#define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
|
||||
#define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
|
||||
#define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
|
||||
ISCSI_ISER_MAX_CONN)
|
||||
|
||||
static void iser_qp_event_callback(struct ib_event *cause, void *context)
|
||||
{
|
||||
iser_err("qp event %s (%d)\n",
|
||||
|
|
|
@ -549,6 +549,7 @@ static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
|
|||
*/
|
||||
static int srpt_refresh_port(struct srpt_port *sport)
|
||||
{
|
||||
struct ib_mad_agent *mad_agent;
|
||||
struct ib_mad_reg_req reg_req;
|
||||
struct ib_port_modify port_modify;
|
||||
struct ib_port_attr port_attr;
|
||||
|
@ -593,24 +594,26 @@ static int srpt_refresh_port(struct srpt_port *sport)
|
|||
set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
|
||||
set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
|
||||
|
||||
sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
|
||||
sport->port,
|
||||
IB_QPT_GSI,
|
||||
®_req, 0,
|
||||
srpt_mad_send_handler,
|
||||
srpt_mad_recv_handler,
|
||||
sport, 0);
|
||||
if (IS_ERR(sport->mad_agent)) {
|
||||
mad_agent = ib_register_mad_agent(sport->sdev->device,
|
||||
sport->port,
|
||||
IB_QPT_GSI,
|
||||
®_req, 0,
|
||||
srpt_mad_send_handler,
|
||||
srpt_mad_recv_handler,
|
||||
sport, 0);
|
||||
if (IS_ERR(mad_agent)) {
|
||||
pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
|
||||
dev_name(&sport->sdev->device->dev), sport->port,
|
||||
PTR_ERR(sport->mad_agent));
|
||||
PTR_ERR(mad_agent));
|
||||
sport->mad_agent = NULL;
|
||||
memset(&port_modify, 0, sizeof(port_modify));
|
||||
port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
|
||||
ib_modify_port(sport->sdev->device, sport->port, 0,
|
||||
&port_modify);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
sport->mad_agent = mad_agent;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -10321,6 +10321,87 @@ struct hwrm_selftest_irq_output {
|
|||
u8 valid;
|
||||
};
|
||||
|
||||
/* dbc_dbc (size:64b/8B) */
|
||||
struct dbc_dbc {
|
||||
u32 index;
|
||||
#define DBC_DBC_INDEX_MASK 0xffffffUL
|
||||
#define DBC_DBC_INDEX_SFT 0
|
||||
#define DBC_DBC_EPOCH 0x1000000UL
|
||||
#define DBC_DBC_TOGGLE_MASK 0x6000000UL
|
||||
#define DBC_DBC_TOGGLE_SFT 25
|
||||
u32 type_path_xid;
|
||||
#define DBC_DBC_XID_MASK 0xfffffUL
|
||||
#define DBC_DBC_XID_SFT 0
|
||||
#define DBC_DBC_PATH_MASK 0x3000000UL
|
||||
#define DBC_DBC_PATH_SFT 24
|
||||
#define DBC_DBC_PATH_ROCE (0x0UL << 24)
|
||||
#define DBC_DBC_PATH_L2 (0x1UL << 24)
|
||||
#define DBC_DBC_PATH_ENGINE (0x2UL << 24)
|
||||
#define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE
|
||||
#define DBC_DBC_VALID 0x4000000UL
|
||||
#define DBC_DBC_DEBUG_TRACE 0x8000000UL
|
||||
#define DBC_DBC_TYPE_MASK 0xf0000000UL
|
||||
#define DBC_DBC_TYPE_SFT 28
|
||||
#define DBC_DBC_TYPE_SQ (0x0UL << 28)
|
||||
#define DBC_DBC_TYPE_RQ (0x1UL << 28)
|
||||
#define DBC_DBC_TYPE_SRQ (0x2UL << 28)
|
||||
#define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28)
|
||||
#define DBC_DBC_TYPE_CQ (0x4UL << 28)
|
||||
#define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28)
|
||||
#define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28)
|
||||
#define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28)
|
||||
#define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28)
|
||||
#define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28)
|
||||
#define DBC_DBC_TYPE_NQ (0xaUL << 28)
|
||||
#define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28)
|
||||
#define DBC_DBC_TYPE_NQ_MASK (0xeUL << 28)
|
||||
#define DBC_DBC_TYPE_NULL (0xfUL << 28)
|
||||
#define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL
|
||||
};
|
||||
|
||||
/* db_push_start (size:64b/8B) */
|
||||
struct db_push_start {
|
||||
u64 db;
|
||||
#define DB_PUSH_START_DB_INDEX_MASK 0xffffffUL
|
||||
#define DB_PUSH_START_DB_INDEX_SFT 0
|
||||
#define DB_PUSH_START_DB_PI_LO_MASK 0xff000000UL
|
||||
#define DB_PUSH_START_DB_PI_LO_SFT 24
|
||||
#define DB_PUSH_START_DB_XID_MASK 0xfffff00000000ULL
|
||||
#define DB_PUSH_START_DB_XID_SFT 32
|
||||
#define DB_PUSH_START_DB_PI_HI_MASK 0xf0000000000000ULL
|
||||
#define DB_PUSH_START_DB_PI_HI_SFT 52
|
||||
#define DB_PUSH_START_DB_TYPE_MASK 0xf000000000000000ULL
|
||||
#define DB_PUSH_START_DB_TYPE_SFT 60
|
||||
#define DB_PUSH_START_DB_TYPE_PUSH_START (0xcULL << 60)
|
||||
#define DB_PUSH_START_DB_TYPE_PUSH_END (0xdULL << 60)
|
||||
#define DB_PUSH_START_DB_TYPE_LAST DB_PUSH_START_DB_TYPE_PUSH_END
|
||||
};
|
||||
|
||||
/* db_push_end (size:64b/8B) */
|
||||
struct db_push_end {
|
||||
u64 db;
|
||||
#define DB_PUSH_END_DB_INDEX_MASK 0xffffffUL
|
||||
#define DB_PUSH_END_DB_INDEX_SFT 0
|
||||
#define DB_PUSH_END_DB_PI_LO_MASK 0xff000000UL
|
||||
#define DB_PUSH_END_DB_PI_LO_SFT 24
|
||||
#define DB_PUSH_END_DB_XID_MASK 0xfffff00000000ULL
|
||||
#define DB_PUSH_END_DB_XID_SFT 32
|
||||
#define DB_PUSH_END_DB_PI_HI_MASK 0xf0000000000000ULL
|
||||
#define DB_PUSH_END_DB_PI_HI_SFT 52
|
||||
#define DB_PUSH_END_DB_PATH_MASK 0x300000000000000ULL
|
||||
#define DB_PUSH_END_DB_PATH_SFT 56
|
||||
#define DB_PUSH_END_DB_PATH_ROCE (0x0ULL << 56)
|
||||
#define DB_PUSH_END_DB_PATH_L2 (0x1ULL << 56)
|
||||
#define DB_PUSH_END_DB_PATH_ENGINE (0x2ULL << 56)
|
||||
#define DB_PUSH_END_DB_PATH_LAST DB_PUSH_END_DB_PATH_ENGINE
|
||||
#define DB_PUSH_END_DB_DEBUG_TRACE 0x800000000000000ULL
|
||||
#define DB_PUSH_END_DB_TYPE_MASK 0xf000000000000000ULL
|
||||
#define DB_PUSH_END_DB_TYPE_SFT 60
|
||||
#define DB_PUSH_END_DB_TYPE_PUSH_START (0xcULL << 60)
|
||||
#define DB_PUSH_END_DB_TYPE_PUSH_END (0xdULL << 60)
|
||||
#define DB_PUSH_END_DB_TYPE_LAST DB_PUSH_END_DB_TYPE_PUSH_END
|
||||
};
|
||||
|
||||
/* db_push_info (size:64b/8B) */
|
||||
struct db_push_info {
|
||||
u32 push_size_push_index;
|
||||
|
|
|
@ -930,8 +930,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
|
|||
static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
|
||||
{
|
||||
bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
|
||||
bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
|
||||
MLX5_CAP_GEN(mdev, relaxed_ordering_write);
|
||||
bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
|
||||
|
||||
return ro && lro_en ?
|
||||
MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue