Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: (29 commits) IB/mthca: Simplify use of size0 in work request posting IB/mthca: Factor out setting WQE UD segment entries IB/mthca: Factor out setting WQE remote address and atomic segment entries IB/mlx4: Factor out setting other WQE segments IB/mlx4: Factor out setting WQE data segment entries IB/mthca: Factor out setting WQE data segment entries IB/mlx4: Return receive queue sizes for userspace QPs from query QP IB/mlx4: Increase max outstanding RDMA reads as target RDMA/cma: Remove local write permission from QP access flags IB/mthca: Use uninitialized_var() for f0 IB/cm: Make internal function cm_get_ack_delay() static IB/ipath: Remove ipath_get_user_pages_nocopy() IB/ipath: Make a few functions static mlx4_core: Reset device when internal error is detected IB/iser: Make a couple of functions static IB/mthca: Fix printk format used for firmware version in warning IB/mthca: Schedule MSI support for removal IB/ehca: Fix warnings issued by checkpatch.pl IB/ehca: Restructure ehca_set_pagebuf() IB/ehca: MR/MW structure refactoring ...
This commit is contained in:
commit
d796e641a3
|
@ -310,3 +310,13 @@ Why: The arch/powerpc tree is the merged architecture for ppc32 and ppc64
|
|||
Who: linuxppc-dev@ozlabs.org
|
||||
|
||||
---------------------------
|
||||
|
||||
What: mthca driver's MSI support
|
||||
When: January 2008
|
||||
Files: drivers/infiniband/hw/mthca/*.[ch]
|
||||
Why: All mthca hardware also supports MSI-X, which provides
|
||||
strictly more functionality than MSI. So there is no point in
|
||||
having both MSI-X and MSI support in the driver.
|
||||
Who: Roland Dreier <rolandd@cisco.com>
|
||||
|
||||
---------------------------
|
||||
|
|
|
@ -3374,7 +3374,7 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_cm_init_qp_attr);
|
||||
|
||||
void cm_get_ack_delay(struct cm_device *cm_dev)
|
||||
static void cm_get_ack_delay(struct cm_device *cm_dev)
|
||||
{
|
||||
struct ib_device_attr attr;
|
||||
|
||||
|
|
|
@ -573,7 +573,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
|
|||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
if (!id_priv->cm_id.iw) {
|
||||
qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
|
||||
qp_attr->qp_access_flags = 0;
|
||||
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
|
||||
} else
|
||||
ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
|
||||
|
|
|
@ -1914,6 +1914,7 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
|
|||
fail3:
|
||||
cxgb3_free_stid(ep->com.tdev, ep->stid);
|
||||
fail2:
|
||||
cm_id->rem_ref(cm_id);
|
||||
put_ep(&ep->com);
|
||||
fail1:
|
||||
out:
|
||||
|
|
|
@ -204,8 +204,8 @@ struct ehca_mr {
|
|||
spinlock_t mrlock;
|
||||
|
||||
enum ehca_mr_flag flags;
|
||||
u32 num_pages; /* number of MR pages */
|
||||
u32 num_4k; /* number of 4k "page" portions to form MR */
|
||||
u32 num_kpages; /* number of kernel pages */
|
||||
u32 num_hwpages; /* number of hw pages to form MR */
|
||||
int acl; /* ACL (stored here for usage in reregister) */
|
||||
u64 *start; /* virtual start address (stored here for */
|
||||
/* usage in reregister) */
|
||||
|
@ -217,9 +217,6 @@ struct ehca_mr {
|
|||
/* fw specific data */
|
||||
struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
|
||||
struct h_galpas galpas;
|
||||
/* data for userspace bridge */
|
||||
u32 nr_of_pages;
|
||||
void *pagearray;
|
||||
};
|
||||
|
||||
struct ehca_mw {
|
||||
|
@ -241,26 +238,29 @@ enum ehca_mr_pgi_type {
|
|||
|
||||
struct ehca_mr_pginfo {
|
||||
enum ehca_mr_pgi_type type;
|
||||
u64 num_pages;
|
||||
u64 page_cnt;
|
||||
u64 num_4k; /* number of 4k "page" portions */
|
||||
u64 page_4k_cnt; /* counter for 4k "page" portions */
|
||||
u64 next_4k; /* next 4k "page" portion in buffer/chunk/listelem */
|
||||
u64 num_kpages;
|
||||
u64 kpage_cnt;
|
||||
u64 num_hwpages; /* number of hw pages */
|
||||
u64 hwpage_cnt; /* counter for hw pages */
|
||||
u64 next_hwpage; /* next hw page in buffer/chunk/listelem */
|
||||
|
||||
/* type EHCA_MR_PGI_PHYS section */
|
||||
union {
|
||||
struct { /* type EHCA_MR_PGI_PHYS section */
|
||||
int num_phys_buf;
|
||||
struct ib_phys_buf *phys_buf_array;
|
||||
u64 next_buf;
|
||||
|
||||
/* type EHCA_MR_PGI_USER section */
|
||||
} phy;
|
||||
struct { /* type EHCA_MR_PGI_USER section */
|
||||
struct ib_umem *region;
|
||||
struct ib_umem_chunk *next_chunk;
|
||||
u64 next_nmap;
|
||||
|
||||
/* type EHCA_MR_PGI_FMR section */
|
||||
} usr;
|
||||
struct { /* type EHCA_MR_PGI_FMR section */
|
||||
u64 fmr_pgsize;
|
||||
u64 *page_list;
|
||||
u64 next_listelem;
|
||||
/* next_4k also used within EHCA_MR_PGI_FMR */
|
||||
} fmr;
|
||||
} u;
|
||||
};
|
||||
|
||||
/* output parameters for MR/FMR hipz calls */
|
||||
|
|
|
@ -96,7 +96,8 @@ int ehca_create_eq(struct ehca_shca *shca,
|
|||
for (i = 0; i < nr_pages; i++) {
|
||||
u64 rpage;
|
||||
|
||||
if (!(vpage = ipz_qpageit_get_inc(&eq->ipz_queue))) {
|
||||
vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
|
||||
if (!vpage) {
|
||||
ret = H_RESOURCE;
|
||||
goto create_eq_exit2;
|
||||
}
|
||||
|
|
|
@ -127,6 +127,7 @@ int ehca_query_port(struct ib_device *ibdev,
|
|||
u8 port, struct ib_port_attr *props)
|
||||
{
|
||||
int ret = 0;
|
||||
u64 h_ret;
|
||||
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
|
||||
ib_device);
|
||||
struct hipz_query_port *rblock;
|
||||
|
@ -137,7 +138,8 @@ int ehca_query_port(struct ib_device *ibdev,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
|
||||
h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
|
||||
if (h_ret != H_SUCCESS) {
|
||||
ehca_err(&shca->ib_device, "Can't query port properties");
|
||||
ret = -EINVAL;
|
||||
goto query_port1;
|
||||
|
@ -197,6 +199,7 @@ int ehca_query_sma_attr(struct ehca_shca *shca,
|
|||
u8 port, struct ehca_sma_attr *attr)
|
||||
{
|
||||
int ret = 0;
|
||||
u64 h_ret;
|
||||
struct hipz_query_port *rblock;
|
||||
|
||||
rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
|
||||
|
@ -205,7 +208,8 @@ int ehca_query_sma_attr(struct ehca_shca *shca,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
|
||||
h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
|
||||
if (h_ret != H_SUCCESS) {
|
||||
ehca_err(&shca->ib_device, "Can't query port properties");
|
||||
ret = -EINVAL;
|
||||
goto query_sma_attr1;
|
||||
|
@ -230,9 +234,11 @@ query_sma_attr1:
|
|||
int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
|
||||
{
|
||||
int ret = 0;
|
||||
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
|
||||
u64 h_ret;
|
||||
struct ehca_shca *shca;
|
||||
struct hipz_query_port *rblock;
|
||||
|
||||
shca = container_of(ibdev, struct ehca_shca, ib_device);
|
||||
if (index > 16) {
|
||||
ehca_err(&shca->ib_device, "Invalid index: %x.", index);
|
||||
return -EINVAL;
|
||||
|
@ -244,7 +250,8 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
|
||||
h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
|
||||
if (h_ret != H_SUCCESS) {
|
||||
ehca_err(&shca->ib_device, "Can't query port properties");
|
||||
ret = -EINVAL;
|
||||
goto query_pkey1;
|
||||
|
@ -262,6 +269,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
|
|||
int index, union ib_gid *gid)
|
||||
{
|
||||
int ret = 0;
|
||||
u64 h_ret;
|
||||
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
|
||||
ib_device);
|
||||
struct hipz_query_port *rblock;
|
||||
|
@ -277,7 +285,8 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
|
||||
h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
|
||||
if (h_ret != H_SUCCESS) {
|
||||
ehca_err(&shca->ib_device, "Can't query port properties");
|
||||
ret = -EINVAL;
|
||||
goto query_gid1;
|
||||
|
@ -302,11 +311,12 @@ int ehca_modify_port(struct ib_device *ibdev,
|
|||
struct ib_port_modify *props)
|
||||
{
|
||||
int ret = 0;
|
||||
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
|
||||
struct ehca_shca *shca;
|
||||
struct hipz_query_port *rblock;
|
||||
u32 cap;
|
||||
u64 hret;
|
||||
|
||||
shca = container_of(ibdev, struct ehca_shca, ib_device);
|
||||
if ((props->set_port_cap_mask | props->clr_port_cap_mask)
|
||||
& ~allowed_port_caps) {
|
||||
ehca_err(&shca->ib_device, "Non-changeable bits set in masks "
|
||||
|
@ -325,7 +335,8 @@ int ehca_modify_port(struct ib_device *ibdev,
|
|||
goto modify_port1;
|
||||
}
|
||||
|
||||
if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
|
||||
hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
|
||||
if (hret != H_SUCCESS) {
|
||||
ehca_err(&shca->ib_device, "Can't query port properties");
|
||||
ret = -EINVAL;
|
||||
goto modify_port2;
|
||||
|
@ -337,7 +348,8 @@ int ehca_modify_port(struct ib_device *ibdev,
|
|||
hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
|
||||
cap, props->init_type, port_modify_mask);
|
||||
if (hret != H_SUCCESS) {
|
||||
ehca_err(&shca->ib_device, "Modify port failed hret=%lx", hret);
|
||||
ehca_err(&shca->ib_device, "Modify port failed hret=%lx",
|
||||
hret);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -81,8 +81,9 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
|
|||
int num_phys_buf,
|
||||
int mr_access_flags, u64 *iova_start);
|
||||
|
||||
struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
|
||||
int mr_access_flags, struct ib_udata *udata);
|
||||
struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt, int mr_access_flags,
|
||||
struct ib_udata *udata);
|
||||
|
||||
int ehca_rereg_phys_mr(struct ib_mr *mr,
|
||||
int mr_rereg_mask,
|
||||
|
|
|
@ -107,7 +107,7 @@ static DEFINE_SPINLOCK(shca_list_lock);
|
|||
static struct timer_list poll_eqs_timer;
|
||||
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
static struct kmem_cache *ctblk_cache = NULL;
|
||||
static struct kmem_cache *ctblk_cache;
|
||||
|
||||
void *ehca_alloc_fw_ctrlblock(gfp_t flags)
|
||||
{
|
||||
|
@ -263,22 +263,27 @@ int ehca_sense_attributes(struct ehca_shca *shca)
|
|||
|
||||
ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid);
|
||||
|
||||
if ((hcaaver == 1) && (revid == 0))
|
||||
shca->hw_level = 0x11;
|
||||
else if ((hcaaver == 1) && (revid == 1))
|
||||
shca->hw_level = 0x12;
|
||||
else if ((hcaaver == 1) && (revid == 2))
|
||||
shca->hw_level = 0x13;
|
||||
else if ((hcaaver == 2) && (revid == 0))
|
||||
if (hcaaver == 1) {
|
||||
if (revid <= 3)
|
||||
shca->hw_level = 0x10 | (revid + 1);
|
||||
else
|
||||
shca->hw_level = 0x14;
|
||||
} else if (hcaaver == 2) {
|
||||
if (revid == 0)
|
||||
shca->hw_level = 0x21;
|
||||
else if ((hcaaver == 2) && (revid == 0x10))
|
||||
else if (revid == 0x10)
|
||||
shca->hw_level = 0x22;
|
||||
else {
|
||||
else if (revid == 0x20 || revid == 0x21)
|
||||
shca->hw_level = 0x23;
|
||||
}
|
||||
|
||||
if (!shca->hw_level) {
|
||||
ehca_gen_warn("unknown hardware version"
|
||||
" - assuming default level");
|
||||
shca->hw_level = 0x22;
|
||||
}
|
||||
}
|
||||
} else
|
||||
shca->hw_level = ehca_hw_level;
|
||||
ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
|
||||
|
||||
shca->sport[0].rate = IB_RATE_30_GBPS;
|
||||
|
@ -863,18 +868,21 @@ int __init ehca_module_init(void)
|
|||
printk(KERN_INFO "eHCA Infiniband Device Driver "
|
||||
"(Rel.: SVNEHCA_0023)\n");
|
||||
|
||||
if ((ret = ehca_create_comp_pool())) {
|
||||
ret = ehca_create_comp_pool();
|
||||
if (ret) {
|
||||
ehca_gen_err("Cannot create comp pool.");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = ehca_create_slab_caches())) {
|
||||
ret = ehca_create_slab_caches();
|
||||
if (ret) {
|
||||
ehca_gen_err("Cannot create SLAB caches");
|
||||
ret = -ENOMEM;
|
||||
goto module_init1;
|
||||
}
|
||||
|
||||
if ((ret = ibmebus_register_driver(&ehca_driver))) {
|
||||
ret = ibmebus_register_driver(&ehca_driver);
|
||||
if (ret) {
|
||||
ehca_gen_err("Cannot register eHCA device driver");
|
||||
ret = -EINVAL;
|
||||
goto module_init2;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -101,15 +101,10 @@ int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
|
|||
u64 *page_list,
|
||||
int list_len);
|
||||
|
||||
int ehca_set_pagebuf(struct ehca_mr *e_mr,
|
||||
struct ehca_mr_pginfo *pginfo,
|
||||
int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
|
||||
u32 number,
|
||||
u64 *kpage);
|
||||
|
||||
int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
|
||||
struct ehca_mr_pginfo *pginfo,
|
||||
u64 *rpage);
|
||||
|
||||
int ehca_mr_is_maxmr(u64 size,
|
||||
u64 *iova_start);
|
||||
|
||||
|
@ -121,20 +116,6 @@ void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl);
|
|||
void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
|
||||
int *ib_acl);
|
||||
|
||||
int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc);
|
||||
|
||||
int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc);
|
||||
|
||||
int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc);
|
||||
|
||||
int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc);
|
||||
|
||||
int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc);
|
||||
|
||||
int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc);
|
||||
|
||||
int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc);
|
||||
|
||||
void ehca_mr_deletenew(struct ehca_mr *mr);
|
||||
|
||||
#endif /*_EHCA_MRMW_H_*/
|
||||
|
|
|
@ -602,10 +602,10 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
|
|||
/* UD circumvention */
|
||||
parms.act_nr_send_sges -= 2;
|
||||
parms.act_nr_recv_sges -= 2;
|
||||
swqe_size = offsetof(struct ehca_wqe,
|
||||
u.ud_av.sg_list[parms.act_nr_send_sges]);
|
||||
rwqe_size = offsetof(struct ehca_wqe,
|
||||
u.ud_av.sg_list[parms.act_nr_recv_sges]);
|
||||
swqe_size = offsetof(struct ehca_wqe, u.ud_av.sg_list[
|
||||
parms.act_nr_send_sges]);
|
||||
rwqe_size = offsetof(struct ehca_wqe, u.ud_av.sg_list[
|
||||
parms.act_nr_recv_sges]);
|
||||
}
|
||||
|
||||
if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
|
||||
|
@ -690,8 +690,8 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
|
|||
if (my_qp->send_cq) {
|
||||
ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
|
||||
if (ret) {
|
||||
ehca_err(pd->device, "Couldn't assign qp to send_cq ret=%x",
|
||||
ret);
|
||||
ehca_err(pd->device,
|
||||
"Couldn't assign qp to send_cq ret=%x", ret);
|
||||
goto create_qp_exit4;
|
||||
}
|
||||
}
|
||||
|
@ -1019,7 +1019,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
|
|||
goto modify_qp_exit1;
|
||||
}
|
||||
|
||||
if ((mqpcb->qp_state = ib2ehca_qp_state(qp_new_state)))
|
||||
mqpcb->qp_state = ib2ehca_qp_state(qp_new_state);
|
||||
if (mqpcb->qp_state)
|
||||
update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
|
||||
else {
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -79,7 +79,8 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
|
|||
}
|
||||
|
||||
if (ehca_debug_level) {
|
||||
ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue);
|
||||
ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
|
||||
ipz_rqueue);
|
||||
ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
|
||||
}
|
||||
|
||||
|
@ -534,9 +535,11 @@ poll_cq_one_read_cqe:
|
|||
|
||||
cqe_count++;
|
||||
if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
|
||||
struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number);
|
||||
struct ehca_qp *qp;
|
||||
int purgeflag;
|
||||
unsigned long flags;
|
||||
|
||||
qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
|
||||
if (!qp) {
|
||||
ehca_err(cq->device, "cq_num=%x qp_num=%x "
|
||||
"could not find qp -> ignore cqe",
|
||||
|
@ -551,8 +554,8 @@ poll_cq_one_read_cqe:
|
|||
spin_unlock_irqrestore(&qp->spinlock_s, flags);
|
||||
|
||||
if (purgeflag) {
|
||||
ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x "
|
||||
"src_qp=%x",
|
||||
ehca_dbg(cq->device,
|
||||
"Got CQE with purged bit qp_num=%x src_qp=%x",
|
||||
cqe->local_qp_number, cqe->remote_qp_number);
|
||||
if (ehca_debug_level)
|
||||
ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
|
||||
|
|
|
@ -119,7 +119,7 @@ extern int ehca_debug_level;
|
|||
unsigned int l = (unsigned int)(len); \
|
||||
unsigned char *deb = (unsigned char *)(adr); \
|
||||
for (x = 0; x < l; x += 16) { \
|
||||
printk("EHCA_DMP:%s " format \
|
||||
printk(KERN_INFO "EHCA_DMP:%s " format \
|
||||
" adr=%p ofs=%04x %016lx %016lx\n", \
|
||||
__FUNCTION__, ##args, deb, x, \
|
||||
*((u64 *)&deb[0]), *((u64 *)&deb[8])); \
|
||||
|
@ -137,7 +137,7 @@ extern int ehca_debug_level;
|
|||
#define EHCA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
|
||||
|
||||
/* internal function, don't use */
|
||||
#define EHCA_BMASK_MASK(mask) (0xffffffffffffffffULL >> ((64-(mask))&0xffff))
|
||||
#define EHCA_BMASK_MASK(mask) (~0ULL >> ((64 - (mask)) & 0xffff))
|
||||
|
||||
/**
|
||||
* EHCA_BMASK_SET - return value shifted and masked by mask
|
||||
|
@ -161,8 +161,11 @@ static inline int ehca2ib_return_code(u64 ehca_rc)
|
|||
switch (ehca_rc) {
|
||||
case H_SUCCESS:
|
||||
return 0;
|
||||
case H_RESOURCE: /* Resource in use */
|
||||
case H_BUSY:
|
||||
return -EBUSY;
|
||||
case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
|
||||
case H_CONSTRAINED: /* resource constraint */
|
||||
case H_NO_MEM:
|
||||
return -ENOMEM;
|
||||
default:
|
||||
|
|
|
@ -215,7 +215,8 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
|
|||
case 2: /* qp rqueue_addr */
|
||||
ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
|
||||
qp->ib_qp.qp_num);
|
||||
ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, &qp->mm_count_rqueue);
|
||||
ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
|
||||
&qp->mm_count_rqueue);
|
||||
if (unlikely(ret)) {
|
||||
ehca_err(qp->ib_qp.device,
|
||||
"ehca_mmap_queue(rq) failed rc=%x qp_num=%x",
|
||||
|
@ -227,7 +228,8 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
|
|||
case 3: /* qp squeue_addr */
|
||||
ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
|
||||
qp->ib_qp.qp_num);
|
||||
ret = ehca_mmap_queue(vma, &qp->ipz_squeue, &qp->mm_count_squeue);
|
||||
ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
|
||||
&qp->mm_count_squeue);
|
||||
if (unlikely(ret)) {
|
||||
ehca_err(qp->ib_qp.device,
|
||||
"ehca_mmap_queue(sq) failed rc=%x qp_num=%x",
|
||||
|
|
|
@ -1889,7 +1889,7 @@ void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
|
|||
/* Below is "non-zero" to force override, but both actual LEDs are off */
|
||||
#define LED_OVER_BOTH_OFF (8)
|
||||
|
||||
void ipath_run_led_override(unsigned long opaque)
|
||||
static void ipath_run_led_override(unsigned long opaque)
|
||||
{
|
||||
struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
|
||||
int timeoff;
|
||||
|
|
|
@ -426,7 +426,7 @@ bail:
|
|||
* @buffer: data to write
|
||||
* @len: number of bytes to write
|
||||
*/
|
||||
int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
|
||||
static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
|
||||
const void *buffer, int len)
|
||||
{
|
||||
u8 single_byte;
|
||||
|
|
|
@ -70,7 +70,7 @@ static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum)
|
|||
* If rewrite is true, and bits are set in the sendbufferror registers,
|
||||
* we'll write to the buffer, for error recovery on parity errors.
|
||||
*/
|
||||
void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
|
||||
static void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
|
||||
{
|
||||
u32 piobcnt;
|
||||
unsigned long sbuf[4];
|
||||
|
|
|
@ -776,7 +776,6 @@ void ipath_get_eeprom_info(struct ipath_devdata *);
|
|||
int ipath_update_eeprom_log(struct ipath_devdata *dd);
|
||||
void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
|
||||
u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
|
||||
void ipath_disarm_senderrbufs(struct ipath_devdata *, int);
|
||||
|
||||
/*
|
||||
* Set LED override, only the two LSBs have "public" meaning, but
|
||||
|
@ -820,7 +819,6 @@ static inline u64 ipath_mdio_req(int cmd, int dev, int reg, int data)
|
|||
#define IPATH_MDIO_CTRL_8355_REG_10 0x1D
|
||||
|
||||
int ipath_get_user_pages(unsigned long, size_t, struct page **);
|
||||
int ipath_get_user_pages_nocopy(unsigned long, struct page **);
|
||||
void ipath_release_user_pages(struct page **, size_t);
|
||||
void ipath_release_user_pages_on_close(struct page **, size_t);
|
||||
int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
|
||||
|
|
|
@ -507,7 +507,7 @@ static int want_buffer(struct ipath_devdata *dd)
|
|||
*
|
||||
* Called when we run out of PIO buffers.
|
||||
*/
|
||||
void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
|
||||
static void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
|
|
@ -171,32 +171,6 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ipath_get_user_pages_nocopy - lock a single page for I/O and mark shared
|
||||
* @start_page: the page to lock
|
||||
* @p: the output page structure
|
||||
*
|
||||
* This is similar to ipath_get_user_pages, but it's always one page, and we
|
||||
* mark the page as locked for I/O, and shared. This is used for the user
|
||||
* process page that contains the destination address for the rcvhdrq tail
|
||||
* update, so we need to have the vma. If we don't do this, the page can be
|
||||
* taken away from us on fork, even if the child never touches it, and then
|
||||
* the user process never sees the tail register updates.
|
||||
*/
|
||||
int ipath_get_user_pages_nocopy(unsigned long page, struct page **p)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
int ret;
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
|
||||
ret = __get_user_pages(page, 1, p, &vma);
|
||||
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ipath_release_user_pages(struct page **p, size_t num_pages)
|
||||
{
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
|
|
|
@ -488,7 +488,7 @@ bail:;
|
|||
* This is called from ipath_do_rcv_timer() at interrupt level to check for
|
||||
* QPs which need retransmits and to collect performance numbers.
|
||||
*/
|
||||
void ipath_ib_timer(struct ipath_ibdev *dev)
|
||||
static void ipath_ib_timer(struct ipath_ibdev *dev)
|
||||
{
|
||||
struct ipath_qp *resend = NULL;
|
||||
struct list_head *last;
|
||||
|
|
|
@ -782,8 +782,6 @@ void ipath_update_mmap_info(struct ipath_ibdev *dev,
|
|||
|
||||
int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||
|
||||
void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev);
|
||||
|
||||
void ipath_insert_rnr_queue(struct ipath_qp *qp);
|
||||
|
||||
int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
|
||||
|
@ -807,8 +805,6 @@ void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);
|
|||
|
||||
int ipath_ib_piobufavail(struct ipath_ibdev *);
|
||||
|
||||
void ipath_ib_timer(struct ipath_ibdev *);
|
||||
|
||||
unsigned ipath_get_npkeys(struct ipath_devdata *);
|
||||
|
||||
u32 ipath_get_cr_errpkey(struct ipath_devdata *);
|
||||
|
|
|
@ -1183,6 +1183,43 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq
|
|||
return cur + nreq >= wq->max_post;
|
||||
}
|
||||
|
||||
static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
|
||||
u64 remote_addr, u32 rkey)
|
||||
{
|
||||
rseg->raddr = cpu_to_be64(remote_addr);
|
||||
rseg->rkey = cpu_to_be32(rkey);
|
||||
rseg->reserved = 0;
|
||||
}
|
||||
|
||||
static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
|
||||
{
|
||||
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
|
||||
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
|
||||
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
} else {
|
||||
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
aseg->compare = 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
|
||||
struct ib_send_wr *wr)
|
||||
{
|
||||
memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
|
||||
dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
|
||||
dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
|
||||
|
||||
}
|
||||
|
||||
static void set_data_seg(struct mlx4_wqe_data_seg *dseg,
|
||||
struct ib_sge *sg)
|
||||
{
|
||||
dseg->byte_count = cpu_to_be32(sg->length);
|
||||
dseg->lkey = cpu_to_be32(sg->lkey);
|
||||
dseg->addr = cpu_to_be64(sg->addr);
|
||||
}
|
||||
|
||||
int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
{
|
||||
|
@ -1238,26 +1275,13 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
switch (wr->opcode) {
|
||||
case IB_WR_ATOMIC_CMP_AND_SWP:
|
||||
case IB_WR_ATOMIC_FETCH_AND_ADD:
|
||||
((struct mlx4_wqe_raddr_seg *) wqe)->raddr =
|
||||
cpu_to_be64(wr->wr.atomic.remote_addr);
|
||||
((struct mlx4_wqe_raddr_seg *) wqe)->rkey =
|
||||
cpu_to_be32(wr->wr.atomic.rkey);
|
||||
((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0;
|
||||
|
||||
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
|
||||
wr->wr.atomic.rkey);
|
||||
wqe += sizeof (struct mlx4_wqe_raddr_seg);
|
||||
|
||||
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
|
||||
((struct mlx4_wqe_atomic_seg *) wqe)->swap_add =
|
||||
cpu_to_be64(wr->wr.atomic.swap);
|
||||
((struct mlx4_wqe_atomic_seg *) wqe)->compare =
|
||||
cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
} else {
|
||||
((struct mlx4_wqe_atomic_seg *) wqe)->swap_add =
|
||||
cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
((struct mlx4_wqe_atomic_seg *) wqe)->compare = 0;
|
||||
}
|
||||
|
||||
set_atomic_seg(wqe, wr);
|
||||
wqe += sizeof (struct mlx4_wqe_atomic_seg);
|
||||
|
||||
size += (sizeof (struct mlx4_wqe_raddr_seg) +
|
||||
sizeof (struct mlx4_wqe_atomic_seg)) / 16;
|
||||
|
||||
|
@ -1266,15 +1290,10 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
case IB_WR_RDMA_READ:
|
||||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
((struct mlx4_wqe_raddr_seg *) wqe)->raddr =
|
||||
cpu_to_be64(wr->wr.rdma.remote_addr);
|
||||
((struct mlx4_wqe_raddr_seg *) wqe)->rkey =
|
||||
cpu_to_be32(wr->wr.rdma.rkey);
|
||||
((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0;
|
||||
|
||||
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
|
||||
wr->wr.rdma.rkey);
|
||||
wqe += sizeof (struct mlx4_wqe_raddr_seg);
|
||||
size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -1284,13 +1303,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
break;
|
||||
|
||||
case IB_QPT_UD:
|
||||
memcpy(((struct mlx4_wqe_datagram_seg *) wqe)->av,
|
||||
&to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
|
||||
((struct mlx4_wqe_datagram_seg *) wqe)->dqpn =
|
||||
cpu_to_be32(wr->wr.ud.remote_qpn);
|
||||
((struct mlx4_wqe_datagram_seg *) wqe)->qkey =
|
||||
cpu_to_be32(wr->wr.ud.remote_qkey);
|
||||
|
||||
set_datagram_seg(wqe, wr);
|
||||
wqe += sizeof (struct mlx4_wqe_datagram_seg);
|
||||
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
|
||||
break;
|
||||
|
@ -1313,12 +1326,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
}
|
||||
|
||||
for (i = 0; i < wr->num_sge; ++i) {
|
||||
((struct mlx4_wqe_data_seg *) wqe)->byte_count =
|
||||
cpu_to_be32(wr->sg_list[i].length);
|
||||
((struct mlx4_wqe_data_seg *) wqe)->lkey =
|
||||
cpu_to_be32(wr->sg_list[i].lkey);
|
||||
((struct mlx4_wqe_data_seg *) wqe)->addr =
|
||||
cpu_to_be64(wr->sg_list[i].addr);
|
||||
set_data_seg(wqe, wr->sg_list + i);
|
||||
|
||||
wqe += sizeof (struct mlx4_wqe_data_seg);
|
||||
size += sizeof (struct mlx4_wqe_data_seg) / 16;
|
||||
|
@ -1498,7 +1506,7 @@ static int to_ib_qp_access_flags(int mlx4_flags)
|
|||
static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr,
|
||||
struct mlx4_qp_path *path)
|
||||
{
|
||||
memset(ib_ah_attr, 0, sizeof *path);
|
||||
memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
|
||||
ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1;
|
||||
|
||||
if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
|
||||
|
@ -1515,7 +1523,7 @@ static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr,
|
|||
ib_ah_attr->grh.traffic_class =
|
||||
(be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
|
||||
ib_ah_attr->grh.flow_label =
|
||||
be32_to_cpu(path->tclass_flowlabel) & 0xffffff;
|
||||
be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
|
||||
memcpy(ib_ah_attr->grh.dgid.raw,
|
||||
path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
|
||||
}
|
||||
|
@ -1560,6 +1568,9 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
|
|||
}
|
||||
|
||||
qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f;
|
||||
if (qp_attr->qp_state == IB_QPS_INIT)
|
||||
qp_attr->port_num = qp->port;
|
||||
else
|
||||
qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1;
|
||||
|
||||
/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
|
||||
|
@ -1578,17 +1589,25 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
|
|||
|
||||
done:
|
||||
qp_attr->cur_qp_state = qp_attr->qp_state;
|
||||
qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
|
||||
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
|
||||
|
||||
if (!ibqp->uobject) {
|
||||
qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
|
||||
qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
|
||||
qp_attr->cap.max_send_sge = qp->sq.max_gs;
|
||||
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
|
||||
qp_attr->cap.max_inline_data = (1 << qp->sq.wqe_shift) -
|
||||
send_wqe_overhead(qp->ibqp.qp_type) -
|
||||
sizeof (struct mlx4_wqe_inline_seg);
|
||||
qp_init_attr->cap = qp_attr->cap;
|
||||
} else {
|
||||
qp_attr->cap.max_send_wr = 0;
|
||||
qp_attr->cap.max_send_sge = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't support inline sends for kernel QPs (yet), and we
|
||||
* don't know what userspace's value should be.
|
||||
*/
|
||||
qp_attr->cap.max_inline_data = 0;
|
||||
|
||||
qp_init_attr->cap = qp_attr->cap;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
|
|||
|
||||
static int msi = 0;
|
||||
module_param(msi, int, 0444);
|
||||
MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero");
|
||||
MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero (deprecated, use MSI-X instead)");
|
||||
|
||||
#else /* CONFIG_PCI_MSI */
|
||||
|
||||
|
@ -1117,9 +1117,21 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
|
|||
|
||||
if (msi_x && !mthca_enable_msi_x(mdev))
|
||||
mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
|
||||
if (msi && !(mdev->mthca_flags & MTHCA_FLAG_MSI_X) &&
|
||||
!pci_enable_msi(pdev))
|
||||
else if (msi) {
|
||||
static int warned;
|
||||
|
||||
if (!warned) {
|
||||
printk(KERN_WARNING PFX "WARNING: MSI support will be "
|
||||
"removed from the ib_mthca driver in January 2008.\n");
|
||||
printk(KERN_WARNING " If you are using MSI and cannot "
|
||||
"switch to MSI-X, please tell "
|
||||
"<general@lists.openfabrics.org>.\n");
|
||||
++warned;
|
||||
}
|
||||
|
||||
if (!pci_enable_msi(pdev))
|
||||
mdev->mthca_flags |= MTHCA_FLAG_MSI;
|
||||
}
|
||||
|
||||
if (mthca_cmd_init(mdev)) {
|
||||
mthca_err(mdev, "Failed to init command interface, aborting.\n");
|
||||
|
@ -1135,7 +1147,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
|
|||
goto err_cmd;
|
||||
|
||||
if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
|
||||
mthca_warn(mdev, "HCA FW version %d.%d.%3d is old (%d.%d.%3d is current).\n",
|
||||
mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n",
|
||||
(int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
|
||||
(int) (mdev->fw_ver & 0xffff),
|
||||
(int) (mthca_hca_table[hca_type].latest_fw >> 32),
|
||||
|
|
|
@ -1578,6 +1578,45 @@ static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
|
|||
return cur + nreq >= wq->max;
|
||||
}
|
||||
|
||||
static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
|
||||
u64 remote_addr, u32 rkey)
|
||||
{
|
||||
rseg->raddr = cpu_to_be64(remote_addr);
|
||||
rseg->rkey = cpu_to_be32(rkey);
|
||||
rseg->reserved = 0;
|
||||
}
|
||||
|
||||
static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
|
||||
struct ib_send_wr *wr)
|
||||
{
|
||||
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
|
||||
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
|
||||
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
} else {
|
||||
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
aseg->compare = 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
|
||||
struct ib_send_wr *wr)
|
||||
{
|
||||
useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
|
||||
useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
|
||||
useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
|
||||
useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
|
||||
|
||||
}
|
||||
|
||||
static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
|
||||
struct ib_send_wr *wr)
|
||||
{
|
||||
memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
|
||||
useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
|
||||
useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
|
||||
}
|
||||
|
||||
int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
{
|
||||
|
@ -1590,8 +1629,15 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
int nreq;
|
||||
int i;
|
||||
int size;
|
||||
int size0 = 0;
|
||||
u32 f0 = 0;
|
||||
/*
|
||||
* f0 and size0 are only used if nreq != 0, and they will
|
||||
* always be initialized the first time through the main loop
|
||||
* before nreq is incremented. So nreq cannot become non-zero
|
||||
* without initializing f0 and size0, and they are in fact
|
||||
* never used uninitialized.
|
||||
*/
|
||||
int uninitialized_var(size0);
|
||||
u32 uninitialized_var(f0);
|
||||
int ind;
|
||||
u8 op0 = 0;
|
||||
|
||||
|
@ -1636,25 +1682,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
switch (wr->opcode) {
|
||||
case IB_WR_ATOMIC_CMP_AND_SWP:
|
||||
case IB_WR_ATOMIC_FETCH_AND_ADD:
|
||||
((struct mthca_raddr_seg *) wqe)->raddr =
|
||||
cpu_to_be64(wr->wr.atomic.remote_addr);
|
||||
((struct mthca_raddr_seg *) wqe)->rkey =
|
||||
cpu_to_be32(wr->wr.atomic.rkey);
|
||||
((struct mthca_raddr_seg *) wqe)->reserved = 0;
|
||||
|
||||
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
|
||||
wr->wr.atomic.rkey);
|
||||
wqe += sizeof (struct mthca_raddr_seg);
|
||||
|
||||
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
|
||||
((struct mthca_atomic_seg *) wqe)->swap_add =
|
||||
cpu_to_be64(wr->wr.atomic.swap);
|
||||
((struct mthca_atomic_seg *) wqe)->compare =
|
||||
cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
} else {
|
||||
((struct mthca_atomic_seg *) wqe)->swap_add =
|
||||
cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
((struct mthca_atomic_seg *) wqe)->compare = 0;
|
||||
}
|
||||
|
||||
set_atomic_seg(wqe, wr);
|
||||
wqe += sizeof (struct mthca_atomic_seg);
|
||||
size += (sizeof (struct mthca_raddr_seg) +
|
||||
sizeof (struct mthca_atomic_seg)) / 16;
|
||||
|
@ -1663,11 +1695,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
case IB_WR_RDMA_READ:
|
||||
((struct mthca_raddr_seg *) wqe)->raddr =
|
||||
cpu_to_be64(wr->wr.rdma.remote_addr);
|
||||
((struct mthca_raddr_seg *) wqe)->rkey =
|
||||
cpu_to_be32(wr->wr.rdma.rkey);
|
||||
((struct mthca_raddr_seg *) wqe)->reserved = 0;
|
||||
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
|
||||
wr->wr.rdma.rkey);
|
||||
wqe += sizeof (struct mthca_raddr_seg);
|
||||
size += sizeof (struct mthca_raddr_seg) / 16;
|
||||
break;
|
||||
|
@ -1683,11 +1712,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
switch (wr->opcode) {
|
||||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
((struct mthca_raddr_seg *) wqe)->raddr =
|
||||
cpu_to_be64(wr->wr.rdma.remote_addr);
|
||||
((struct mthca_raddr_seg *) wqe)->rkey =
|
||||
cpu_to_be32(wr->wr.rdma.rkey);
|
||||
((struct mthca_raddr_seg *) wqe)->reserved = 0;
|
||||
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
|
||||
wr->wr.rdma.rkey);
|
||||
wqe += sizeof (struct mthca_raddr_seg);
|
||||
size += sizeof (struct mthca_raddr_seg) / 16;
|
||||
break;
|
||||
|
@ -1700,15 +1726,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
break;
|
||||
|
||||
case UD:
|
||||
((struct mthca_tavor_ud_seg *) wqe)->lkey =
|
||||
cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
|
||||
((struct mthca_tavor_ud_seg *) wqe)->av_addr =
|
||||
cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
|
||||
((struct mthca_tavor_ud_seg *) wqe)->dqpn =
|
||||
cpu_to_be32(wr->wr.ud.remote_qpn);
|
||||
((struct mthca_tavor_ud_seg *) wqe)->qkey =
|
||||
cpu_to_be32(wr->wr.ud.remote_qkey);
|
||||
|
||||
set_tavor_ud_seg(wqe, wr);
|
||||
wqe += sizeof (struct mthca_tavor_ud_seg);
|
||||
size += sizeof (struct mthca_tavor_ud_seg) / 16;
|
||||
break;
|
||||
|
@ -1734,12 +1752,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
}
|
||||
|
||||
for (i = 0; i < wr->num_sge; ++i) {
|
||||
((struct mthca_data_seg *) wqe)->byte_count =
|
||||
cpu_to_be32(wr->sg_list[i].length);
|
||||
((struct mthca_data_seg *) wqe)->lkey =
|
||||
cpu_to_be32(wr->sg_list[i].lkey);
|
||||
((struct mthca_data_seg *) wqe)->addr =
|
||||
cpu_to_be64(wr->sg_list[i].addr);
|
||||
mthca_set_data_seg(wqe, wr->sg_list + i);
|
||||
wqe += sizeof (struct mthca_data_seg);
|
||||
size += sizeof (struct mthca_data_seg) / 16;
|
||||
}
|
||||
|
@ -1768,11 +1781,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
mthca_opcode[wr->opcode]);
|
||||
wmb();
|
||||
((struct mthca_next_seg *) prev_wqe)->ee_nds =
|
||||
cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size |
|
||||
cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size |
|
||||
((wr->send_flags & IB_SEND_FENCE) ?
|
||||
MTHCA_NEXT_FENCE : 0));
|
||||
|
||||
if (!size0) {
|
||||
if (!nreq) {
|
||||
size0 = size;
|
||||
op0 = mthca_opcode[wr->opcode];
|
||||
f0 = wr->send_flags & IB_SEND_FENCE ?
|
||||
|
@ -1822,7 +1835,14 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|||
int nreq;
|
||||
int i;
|
||||
int size;
|
||||
int size0 = 0;
|
||||
/*
|
||||
* size0 is only used if nreq != 0, and it will always be
|
||||
* initialized the first time through the main loop before
|
||||
* nreq is incremented. So nreq cannot become non-zero
|
||||
* without initializing size0, and it is in fact never used
|
||||
* uninitialized.
|
||||
*/
|
||||
int uninitialized_var(size0);
|
||||
int ind;
|
||||
void *wqe;
|
||||
void *prev_wqe;
|
||||
|
@ -1863,12 +1883,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|||
}
|
||||
|
||||
for (i = 0; i < wr->num_sge; ++i) {
|
||||
((struct mthca_data_seg *) wqe)->byte_count =
|
||||
cpu_to_be32(wr->sg_list[i].length);
|
||||
((struct mthca_data_seg *) wqe)->lkey =
|
||||
cpu_to_be32(wr->sg_list[i].lkey);
|
||||
((struct mthca_data_seg *) wqe)->addr =
|
||||
cpu_to_be64(wr->sg_list[i].addr);
|
||||
mthca_set_data_seg(wqe, wr->sg_list + i);
|
||||
wqe += sizeof (struct mthca_data_seg);
|
||||
size += sizeof (struct mthca_data_seg) / 16;
|
||||
}
|
||||
|
@ -1881,7 +1896,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|||
((struct mthca_next_seg *) prev_wqe)->ee_nds =
|
||||
cpu_to_be32(MTHCA_NEXT_DBD | size);
|
||||
|
||||
if (!size0)
|
||||
if (!nreq)
|
||||
size0 = size;
|
||||
|
||||
++ind;
|
||||
|
@ -1903,7 +1918,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|||
|
||||
qp->rq.next_ind = ind;
|
||||
qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
|
||||
size0 = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1945,8 +1959,15 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
int nreq;
|
||||
int i;
|
||||
int size;
|
||||
int size0 = 0;
|
||||
u32 f0 = 0;
|
||||
/*
|
||||
* f0 and size0 are only used if nreq != 0, and they will
|
||||
* always be initialized the first time through the main loop
|
||||
* before nreq is incremented. So nreq cannot become non-zero
|
||||
* without initializing f0 and size0, and they are in fact
|
||||
* never used uninitialized.
|
||||
*/
|
||||
int uninitialized_var(size0);
|
||||
u32 uninitialized_var(f0);
|
||||
int ind;
|
||||
u8 op0 = 0;
|
||||
|
||||
|
@ -1966,7 +1987,6 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
|
||||
|
||||
qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
|
||||
size0 = 0;
|
||||
|
||||
/*
|
||||
* Make sure that descriptors are written before
|
||||
|
@ -2017,25 +2037,11 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
switch (wr->opcode) {
|
||||
case IB_WR_ATOMIC_CMP_AND_SWP:
|
||||
case IB_WR_ATOMIC_FETCH_AND_ADD:
|
||||
((struct mthca_raddr_seg *) wqe)->raddr =
|
||||
cpu_to_be64(wr->wr.atomic.remote_addr);
|
||||
((struct mthca_raddr_seg *) wqe)->rkey =
|
||||
cpu_to_be32(wr->wr.atomic.rkey);
|
||||
((struct mthca_raddr_seg *) wqe)->reserved = 0;
|
||||
|
||||
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
|
||||
wr->wr.atomic.rkey);
|
||||
wqe += sizeof (struct mthca_raddr_seg);
|
||||
|
||||
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
|
||||
((struct mthca_atomic_seg *) wqe)->swap_add =
|
||||
cpu_to_be64(wr->wr.atomic.swap);
|
||||
((struct mthca_atomic_seg *) wqe)->compare =
|
||||
cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
} else {
|
||||
((struct mthca_atomic_seg *) wqe)->swap_add =
|
||||
cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
((struct mthca_atomic_seg *) wqe)->compare = 0;
|
||||
}
|
||||
|
||||
set_atomic_seg(wqe, wr);
|
||||
wqe += sizeof (struct mthca_atomic_seg);
|
||||
size += (sizeof (struct mthca_raddr_seg) +
|
||||
sizeof (struct mthca_atomic_seg)) / 16;
|
||||
|
@ -2044,11 +2050,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
case IB_WR_RDMA_READ:
|
||||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
((struct mthca_raddr_seg *) wqe)->raddr =
|
||||
cpu_to_be64(wr->wr.rdma.remote_addr);
|
||||
((struct mthca_raddr_seg *) wqe)->rkey =
|
||||
cpu_to_be32(wr->wr.rdma.rkey);
|
||||
((struct mthca_raddr_seg *) wqe)->reserved = 0;
|
||||
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
|
||||
wr->wr.rdma.rkey);
|
||||
wqe += sizeof (struct mthca_raddr_seg);
|
||||
size += sizeof (struct mthca_raddr_seg) / 16;
|
||||
break;
|
||||
|
@ -2064,11 +2067,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
switch (wr->opcode) {
|
||||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
((struct mthca_raddr_seg *) wqe)->raddr =
|
||||
cpu_to_be64(wr->wr.rdma.remote_addr);
|
||||
((struct mthca_raddr_seg *) wqe)->rkey =
|
||||
cpu_to_be32(wr->wr.rdma.rkey);
|
||||
((struct mthca_raddr_seg *) wqe)->reserved = 0;
|
||||
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
|
||||
wr->wr.rdma.rkey);
|
||||
wqe += sizeof (struct mthca_raddr_seg);
|
||||
size += sizeof (struct mthca_raddr_seg) / 16;
|
||||
break;
|
||||
|
@ -2081,13 +2081,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
break;
|
||||
|
||||
case UD:
|
||||
memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
|
||||
to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
|
||||
((struct mthca_arbel_ud_seg *) wqe)->dqpn =
|
||||
cpu_to_be32(wr->wr.ud.remote_qpn);
|
||||
((struct mthca_arbel_ud_seg *) wqe)->qkey =
|
||||
cpu_to_be32(wr->wr.ud.remote_qkey);
|
||||
|
||||
set_arbel_ud_seg(wqe, wr);
|
||||
wqe += sizeof (struct mthca_arbel_ud_seg);
|
||||
size += sizeof (struct mthca_arbel_ud_seg) / 16;
|
||||
break;
|
||||
|
@ -2113,12 +2107,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
}
|
||||
|
||||
for (i = 0; i < wr->num_sge; ++i) {
|
||||
((struct mthca_data_seg *) wqe)->byte_count =
|
||||
cpu_to_be32(wr->sg_list[i].length);
|
||||
((struct mthca_data_seg *) wqe)->lkey =
|
||||
cpu_to_be32(wr->sg_list[i].lkey);
|
||||
((struct mthca_data_seg *) wqe)->addr =
|
||||
cpu_to_be64(wr->sg_list[i].addr);
|
||||
mthca_set_data_seg(wqe, wr->sg_list + i);
|
||||
wqe += sizeof (struct mthca_data_seg);
|
||||
size += sizeof (struct mthca_data_seg) / 16;
|
||||
}
|
||||
|
@ -2151,7 +2140,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
((wr->send_flags & IB_SEND_FENCE) ?
|
||||
MTHCA_NEXT_FENCE : 0));
|
||||
|
||||
if (!size0) {
|
||||
if (!nreq) {
|
||||
size0 = size;
|
||||
op0 = mthca_opcode[wr->opcode];
|
||||
f0 = wr->send_flags & IB_SEND_FENCE ?
|
||||
|
@ -2241,20 +2230,12 @@ int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|||
}
|
||||
|
||||
for (i = 0; i < wr->num_sge; ++i) {
|
||||
((struct mthca_data_seg *) wqe)->byte_count =
|
||||
cpu_to_be32(wr->sg_list[i].length);
|
||||
((struct mthca_data_seg *) wqe)->lkey =
|
||||
cpu_to_be32(wr->sg_list[i].lkey);
|
||||
((struct mthca_data_seg *) wqe)->addr =
|
||||
cpu_to_be64(wr->sg_list[i].addr);
|
||||
mthca_set_data_seg(wqe, wr->sg_list + i);
|
||||
wqe += sizeof (struct mthca_data_seg);
|
||||
}
|
||||
|
||||
if (i < qp->rq.max_gs) {
|
||||
((struct mthca_data_seg *) wqe)->byte_count = 0;
|
||||
((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
|
||||
((struct mthca_data_seg *) wqe)->addr = 0;
|
||||
}
|
||||
if (i < qp->rq.max_gs)
|
||||
mthca_set_data_seg_inval(wqe);
|
||||
|
||||
qp->wrid[ind] = wr->wr_id;
|
||||
|
||||
|
|
|
@ -543,20 +543,12 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
|
|||
}
|
||||
|
||||
for (i = 0; i < wr->num_sge; ++i) {
|
||||
((struct mthca_data_seg *) wqe)->byte_count =
|
||||
cpu_to_be32(wr->sg_list[i].length);
|
||||
((struct mthca_data_seg *) wqe)->lkey =
|
||||
cpu_to_be32(wr->sg_list[i].lkey);
|
||||
((struct mthca_data_seg *) wqe)->addr =
|
||||
cpu_to_be64(wr->sg_list[i].addr);
|
||||
mthca_set_data_seg(wqe, wr->sg_list + i);
|
||||
wqe += sizeof (struct mthca_data_seg);
|
||||
}
|
||||
|
||||
if (i < srq->max_gs) {
|
||||
((struct mthca_data_seg *) wqe)->byte_count = 0;
|
||||
((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
|
||||
((struct mthca_data_seg *) wqe)->addr = 0;
|
||||
}
|
||||
if (i < srq->max_gs)
|
||||
mthca_set_data_seg_inval(wqe);
|
||||
|
||||
((struct mthca_next_seg *) prev_wqe)->nda_op =
|
||||
cpu_to_be32((ind << srq->wqe_shift) | 1);
|
||||
|
@ -662,20 +654,12 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
|
|||
}
|
||||
|
||||
for (i = 0; i < wr->num_sge; ++i) {
|
||||
((struct mthca_data_seg *) wqe)->byte_count =
|
||||
cpu_to_be32(wr->sg_list[i].length);
|
||||
((struct mthca_data_seg *) wqe)->lkey =
|
||||
cpu_to_be32(wr->sg_list[i].lkey);
|
||||
((struct mthca_data_seg *) wqe)->addr =
|
||||
cpu_to_be64(wr->sg_list[i].addr);
|
||||
mthca_set_data_seg(wqe, wr->sg_list + i);
|
||||
wqe += sizeof (struct mthca_data_seg);
|
||||
}
|
||||
|
||||
if (i < srq->max_gs) {
|
||||
((struct mthca_data_seg *) wqe)->byte_count = 0;
|
||||
((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
|
||||
((struct mthca_data_seg *) wqe)->addr = 0;
|
||||
}
|
||||
if (i < srq->max_gs)
|
||||
mthca_set_data_seg_inval(wqe);
|
||||
|
||||
srq->wrid[ind] = wr->wr_id;
|
||||
srq->first_free = next_ind;
|
||||
|
|
|
@ -113,4 +113,19 @@ struct mthca_mlx_seg {
|
|||
__be16 vcrc;
|
||||
};
|
||||
|
||||
static __always_inline void mthca_set_data_seg(struct mthca_data_seg *dseg,
|
||||
struct ib_sge *sg)
|
||||
{
|
||||
dseg->byte_count = cpu_to_be32(sg->length);
|
||||
dseg->lkey = cpu_to_be32(sg->lkey);
|
||||
dseg->addr = cpu_to_be64(sg->addr);
|
||||
}
|
||||
|
||||
static __always_inline void mthca_set_data_seg_inval(struct mthca_data_seg *dseg)
|
||||
{
|
||||
dseg->byte_count = 0;
|
||||
dseg->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
|
||||
dseg->addr = 0;
|
||||
}
|
||||
|
||||
#endif /* MTHCA_WQE_H */
|
||||
|
|
|
@ -310,8 +310,6 @@ int iser_conn_init(struct iser_conn **ib_conn);
|
|||
|
||||
void iser_conn_terminate(struct iser_conn *ib_conn);
|
||||
|
||||
void iser_conn_release(struct iser_conn *ib_conn);
|
||||
|
||||
void iser_rcv_completion(struct iser_desc *desc,
|
||||
unsigned long dto_xfer_len);
|
||||
|
||||
|
@ -329,9 +327,6 @@ void iser_reg_single(struct iser_device *device,
|
|||
struct iser_regd_buf *regd_buf,
|
||||
enum dma_data_direction direction);
|
||||
|
||||
int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
|
||||
enum iser_data_dir cmd_dir);
|
||||
|
||||
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
|
||||
enum iser_data_dir cmd_dir);
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ void iser_reg_single(struct iser_device *device,
|
|||
/**
|
||||
* iser_start_rdma_unaligned_sg
|
||||
*/
|
||||
int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
|
||||
static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
|
||||
enum iser_data_dir cmd_dir)
|
||||
{
|
||||
int dma_nents;
|
||||
|
|
|
@ -310,6 +310,29 @@ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Frees all conn objects and deallocs conn descriptor
|
||||
*/
|
||||
static void iser_conn_release(struct iser_conn *ib_conn)
|
||||
{
|
||||
struct iser_device *device = ib_conn->device;
|
||||
|
||||
BUG_ON(ib_conn->state != ISER_CONN_DOWN);
|
||||
|
||||
mutex_lock(&ig.connlist_mutex);
|
||||
list_del(&ib_conn->conn_list);
|
||||
mutex_unlock(&ig.connlist_mutex);
|
||||
|
||||
iser_free_ib_conn_res(ib_conn);
|
||||
ib_conn->device = NULL;
|
||||
/* on EVENT_ADDR_ERROR there's no device yet for this conn */
|
||||
if (device != NULL)
|
||||
iser_device_try_release(device);
|
||||
if (ib_conn->iser_conn)
|
||||
ib_conn->iser_conn->ib_conn = NULL;
|
||||
kfree(ib_conn);
|
||||
}
|
||||
|
||||
/**
|
||||
* triggers start of the disconnect procedures and wait for them to be done
|
||||
*/
|
||||
|
@ -549,30 +572,6 @@ connect_failure:
|
|||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* Frees all conn objects and deallocs conn descriptor
|
||||
*/
|
||||
void iser_conn_release(struct iser_conn *ib_conn)
|
||||
{
|
||||
struct iser_device *device = ib_conn->device;
|
||||
|
||||
BUG_ON(ib_conn->state != ISER_CONN_DOWN);
|
||||
|
||||
mutex_lock(&ig.connlist_mutex);
|
||||
list_del(&ib_conn->conn_list);
|
||||
mutex_unlock(&ig.connlist_mutex);
|
||||
|
||||
iser_free_ib_conn_res(ib_conn);
|
||||
ib_conn->device = NULL;
|
||||
/* on EVENT_ADDR_ERROR there's no device yet for this conn */
|
||||
if (device != NULL)
|
||||
iser_device_try_release(device);
|
||||
if (ib_conn->iser_conn)
|
||||
ib_conn->iser_conn->ib_conn = NULL;
|
||||
kfree(ib_conn);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* iser_reg_page_vec - Register physical memory
|
||||
*
|
||||
|
|
|
@ -30,41 +30,133 @@
|
|||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
|
||||
void mlx4_handle_catas_err(struct mlx4_dev *dev)
|
||||
enum {
|
||||
MLX4_CATAS_POLL_INTERVAL = 5 * HZ,
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(catas_lock);
|
||||
|
||||
static LIST_HEAD(catas_list);
|
||||
static struct workqueue_struct *catas_wq;
|
||||
static struct work_struct catas_work;
|
||||
|
||||
static int internal_err_reset = 1;
|
||||
module_param(internal_err_reset, int, 0644);
|
||||
MODULE_PARM_DESC(internal_err_reset,
|
||||
"Reset device on internal errors if non-zero (default 1)");
|
||||
|
||||
static void dump_err_buf(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
||||
int i;
|
||||
|
||||
mlx4_err(dev, "Catastrophic error detected:\n");
|
||||
mlx4_err(dev, "Internal error detected:\n");
|
||||
for (i = 0; i < priv->fw.catas_size; ++i)
|
||||
mlx4_err(dev, " buf[%02x]: %08x\n",
|
||||
i, swab32(readl(priv->catas_err.map + i)));
|
||||
|
||||
mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0);
|
||||
}
|
||||
|
||||
void mlx4_map_catas_buf(struct mlx4_dev *dev)
|
||||
static void poll_catas(unsigned long dev_ptr)
|
||||
{
|
||||
struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
||||
if (readl(priv->catas_err.map)) {
|
||||
dump_err_buf(dev);
|
||||
|
||||
mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0);
|
||||
|
||||
if (internal_err_reset) {
|
||||
spin_lock(&catas_lock);
|
||||
list_add(&priv->catas_err.list, &catas_list);
|
||||
spin_unlock(&catas_lock);
|
||||
|
||||
queue_work(catas_wq, &catas_work);
|
||||
}
|
||||
} else
|
||||
mod_timer(&priv->catas_err.timer,
|
||||
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
|
||||
}
|
||||
|
||||
static void catas_reset(struct work_struct *work)
|
||||
{
|
||||
struct mlx4_priv *priv, *tmppriv;
|
||||
struct mlx4_dev *dev;
|
||||
|
||||
LIST_HEAD(tlist);
|
||||
int ret;
|
||||
|
||||
spin_lock_irq(&catas_lock);
|
||||
list_splice_init(&catas_list, &tlist);
|
||||
spin_unlock_irq(&catas_lock);
|
||||
|
||||
list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
|
||||
ret = mlx4_restart_one(priv->dev.pdev);
|
||||
dev = &priv->dev;
|
||||
if (ret)
|
||||
mlx4_err(dev, "Reset failed (%d)\n", ret);
|
||||
else
|
||||
mlx4_dbg(dev, "Reset succeeded\n");
|
||||
}
|
||||
}
|
||||
|
||||
void mlx4_start_catas_poll(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
unsigned long addr;
|
||||
|
||||
INIT_LIST_HEAD(&priv->catas_err.list);
|
||||
init_timer(&priv->catas_err.timer);
|
||||
priv->catas_err.map = NULL;
|
||||
|
||||
addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
|
||||
priv->fw.catas_offset;
|
||||
|
||||
priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
|
||||
if (!priv->catas_err.map)
|
||||
mlx4_warn(dev, "Failed to map catastrophic error buffer at 0x%lx\n",
|
||||
if (!priv->catas_err.map) {
|
||||
mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n",
|
||||
addr);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void mlx4_unmap_catas_buf(struct mlx4_dev *dev)
|
||||
priv->catas_err.timer.data = (unsigned long) dev;
|
||||
priv->catas_err.timer.function = poll_catas;
|
||||
priv->catas_err.timer.expires =
|
||||
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
|
||||
add_timer(&priv->catas_err.timer);
|
||||
}
|
||||
|
||||
void mlx4_stop_catas_poll(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
||||
del_timer_sync(&priv->catas_err.timer);
|
||||
|
||||
if (priv->catas_err.map)
|
||||
iounmap(priv->catas_err.map);
|
||||
|
||||
spin_lock_irq(&catas_lock);
|
||||
list_del(&priv->catas_err.list);
|
||||
spin_unlock_irq(&catas_lock);
|
||||
}
|
||||
|
||||
int __init mlx4_catas_init(void)
|
||||
{
|
||||
INIT_WORK(&catas_work, catas_reset);
|
||||
|
||||
catas_wq = create_singlethread_workqueue("mlx4_err");
|
||||
if (!catas_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx4_catas_cleanup(void)
|
||||
{
|
||||
destroy_workqueue(catas_wq);
|
||||
}
|
||||
|
|
|
@ -89,14 +89,12 @@ struct mlx4_eq_context {
|
|||
(1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
|
||||
(1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
|
||||
(1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
|
||||
(1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
|
||||
(1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
|
||||
(1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
|
||||
(1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
|
||||
(1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
|
||||
(1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
|
||||
(1ull << MLX4_EVENT_TYPE_CMD))
|
||||
#define MLX4_CATAS_EVENT_MASK (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR)
|
||||
|
||||
struct mlx4_eqe {
|
||||
u8 reserved1;
|
||||
|
@ -264,7 +262,7 @@ static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
|
|||
|
||||
writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
|
||||
|
||||
for (i = 0; i < MLX4_EQ_CATAS; ++i)
|
||||
for (i = 0; i < MLX4_NUM_EQ; ++i)
|
||||
work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
|
||||
|
||||
return IRQ_RETVAL(work);
|
||||
|
@ -281,14 +279,6 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t mlx4_catas_interrupt(int irq, void *dev_ptr)
|
||||
{
|
||||
mlx4_handle_catas_err(dev_ptr);
|
||||
|
||||
/* MSI-X vectors always belong to us */
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
|
||||
int eq_num)
|
||||
{
|
||||
|
@ -490,11 +480,9 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
|
|||
|
||||
if (eq_table->have_irq)
|
||||
free_irq(dev->pdev->irq, dev);
|
||||
for (i = 0; i < MLX4_EQ_CATAS; ++i)
|
||||
for (i = 0; i < MLX4_NUM_EQ; ++i)
|
||||
if (eq_table->eq[i].have_irq)
|
||||
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
|
||||
if (eq_table->eq[MLX4_EQ_CATAS].have_irq)
|
||||
free_irq(eq_table->eq[MLX4_EQ_CATAS].irq, dev);
|
||||
}
|
||||
|
||||
static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev)
|
||||
|
@ -598,32 +586,19 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev)
|
|||
if (dev->flags & MLX4_FLAG_MSI_X) {
|
||||
static const char *eq_name[] = {
|
||||
[MLX4_EQ_COMP] = DRV_NAME " (comp)",
|
||||
[MLX4_EQ_ASYNC] = DRV_NAME " (async)",
|
||||
[MLX4_EQ_CATAS] = DRV_NAME " (catas)"
|
||||
[MLX4_EQ_ASYNC] = DRV_NAME " (async)"
|
||||
};
|
||||
|
||||
err = mlx4_create_eq(dev, 1, MLX4_EQ_CATAS,
|
||||
&priv->eq_table.eq[MLX4_EQ_CATAS]);
|
||||
if (err)
|
||||
goto err_out_async;
|
||||
|
||||
for (i = 0; i < MLX4_EQ_CATAS; ++i) {
|
||||
for (i = 0; i < MLX4_NUM_EQ; ++i) {
|
||||
err = request_irq(priv->eq_table.eq[i].irq,
|
||||
mlx4_msi_x_interrupt,
|
||||
0, eq_name[i], priv->eq_table.eq + i);
|
||||
if (err)
|
||||
goto err_out_catas;
|
||||
goto err_out_async;
|
||||
|
||||
priv->eq_table.eq[i].have_irq = 1;
|
||||
}
|
||||
|
||||
err = request_irq(priv->eq_table.eq[MLX4_EQ_CATAS].irq,
|
||||
mlx4_catas_interrupt, 0,
|
||||
eq_name[MLX4_EQ_CATAS], dev);
|
||||
if (err)
|
||||
goto err_out_catas;
|
||||
|
||||
priv->eq_table.eq[MLX4_EQ_CATAS].have_irq = 1;
|
||||
} else {
|
||||
err = request_irq(dev->pdev->irq, mlx4_interrupt,
|
||||
IRQF_SHARED, DRV_NAME, dev);
|
||||
|
@ -639,22 +614,11 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev)
|
|||
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
|
||||
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
|
||||
|
||||
for (i = 0; i < MLX4_EQ_CATAS; ++i)
|
||||
for (i = 0; i < MLX4_NUM_EQ; ++i)
|
||||
eq_set_ci(&priv->eq_table.eq[i], 1);
|
||||
|
||||
if (dev->flags & MLX4_FLAG_MSI_X) {
|
||||
err = mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 0,
|
||||
priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
|
||||
if (err)
|
||||
mlx4_warn(dev, "MAP_EQ for catas EQ %d failed (%d)\n",
|
||||
priv->eq_table.eq[MLX4_EQ_CATAS].eqn, err);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_catas:
|
||||
mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
|
||||
|
||||
err_out_async:
|
||||
mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
|
||||
|
||||
|
@ -675,19 +639,13 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
|
|||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
int i;
|
||||
|
||||
if (dev->flags & MLX4_FLAG_MSI_X)
|
||||
mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 1,
|
||||
priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
|
||||
|
||||
mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
|
||||
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
|
||||
|
||||
mlx4_free_irqs(dev);
|
||||
|
||||
for (i = 0; i < MLX4_EQ_CATAS; ++i)
|
||||
for (i = 0; i < MLX4_NUM_EQ; ++i)
|
||||
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
|
||||
if (dev->flags & MLX4_FLAG_MSI_X)
|
||||
mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
|
||||
|
||||
mlx4_unmap_clr_int(dev);
|
||||
|
||||
|
|
|
@ -142,6 +142,7 @@ int mlx4_register_device(struct mlx4_dev *dev)
|
|||
mlx4_add_device(intf, priv);
|
||||
|
||||
mutex_unlock(&intf_mutex);
|
||||
mlx4_start_catas_poll(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -151,6 +152,7 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
|
|||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_interface *intf;
|
||||
|
||||
mlx4_stop_catas_poll(dev);
|
||||
mutex_lock(&intf_mutex);
|
||||
|
||||
list_for_each_entry(intf, &intf_list, list)
|
||||
|
|
|
@ -78,7 +78,7 @@ static const char mlx4_version[] __devinitdata =
|
|||
static struct mlx4_profile default_profile = {
|
||||
.num_qp = 1 << 16,
|
||||
.num_srq = 1 << 16,
|
||||
.rdmarc_per_qp = 4,
|
||||
.rdmarc_per_qp = 1 << 4,
|
||||
.num_cq = 1 << 16,
|
||||
.num_mcg = 1 << 13,
|
||||
.num_mpt = 1 << 17,
|
||||
|
@ -583,13 +583,11 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev)
|
|||
goto err_pd_table_free;
|
||||
}
|
||||
|
||||
mlx4_map_catas_buf(dev);
|
||||
|
||||
err = mlx4_init_eq_table(dev);
|
||||
if (err) {
|
||||
mlx4_err(dev, "Failed to initialize "
|
||||
"event queue table, aborting.\n");
|
||||
goto err_catas_buf;
|
||||
goto err_mr_table_free;
|
||||
}
|
||||
|
||||
err = mlx4_cmd_use_events(dev);
|
||||
|
@ -659,8 +657,7 @@ err_cmd_poll:
|
|||
err_eq_table_free:
|
||||
mlx4_cleanup_eq_table(dev);
|
||||
|
||||
err_catas_buf:
|
||||
mlx4_unmap_catas_buf(dev);
|
||||
err_mr_table_free:
|
||||
mlx4_cleanup_mr_table(dev);
|
||||
|
||||
err_pd_table_free:
|
||||
|
@ -836,9 +833,6 @@ err_cleanup:
|
|||
mlx4_cleanup_cq_table(dev);
|
||||
mlx4_cmd_use_polling(dev);
|
||||
mlx4_cleanup_eq_table(dev);
|
||||
|
||||
mlx4_unmap_catas_buf(dev);
|
||||
|
||||
mlx4_cleanup_mr_table(dev);
|
||||
mlx4_cleanup_pd_table(dev);
|
||||
mlx4_cleanup_uar_table(dev);
|
||||
|
@ -885,9 +879,6 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev)
|
|||
mlx4_cleanup_cq_table(dev);
|
||||
mlx4_cmd_use_polling(dev);
|
||||
mlx4_cleanup_eq_table(dev);
|
||||
|
||||
mlx4_unmap_catas_buf(dev);
|
||||
|
||||
mlx4_cleanup_mr_table(dev);
|
||||
mlx4_cleanup_pd_table(dev);
|
||||
|
||||
|
@ -908,6 +899,12 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev)
|
|||
}
|
||||
}
|
||||
|
||||
int mlx4_restart_one(struct pci_dev *pdev)
|
||||
{
|
||||
mlx4_remove_one(pdev);
|
||||
return mlx4_init_one(pdev, NULL);
|
||||
}
|
||||
|
||||
static struct pci_device_id mlx4_pci_table[] = {
|
||||
{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
|
||||
|
@ -930,6 +927,10 @@ static int __init mlx4_init(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = mlx4_catas_init();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pci_register_driver(&mlx4_driver);
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
@ -937,6 +938,7 @@ static int __init mlx4_init(void)
|
|||
static void __exit mlx4_cleanup(void)
|
||||
{
|
||||
pci_unregister_driver(&mlx4_driver);
|
||||
mlx4_catas_cleanup();
|
||||
}
|
||||
|
||||
module_init(mlx4_init);
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/timer.h>
|
||||
|
||||
#include <linux/mlx4/device.h>
|
||||
#include <linux/mlx4/doorbell.h>
|
||||
|
@ -67,7 +68,6 @@ enum {
|
|||
enum {
|
||||
MLX4_EQ_ASYNC,
|
||||
MLX4_EQ_COMP,
|
||||
MLX4_EQ_CATAS,
|
||||
MLX4_NUM_EQ
|
||||
};
|
||||
|
||||
|
@ -248,7 +248,8 @@ struct mlx4_mcg_table {
|
|||
|
||||
struct mlx4_catas_err {
|
||||
u32 __iomem *map;
|
||||
int size;
|
||||
struct timer_list timer;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct mlx4_priv {
|
||||
|
@ -311,9 +312,11 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
|
|||
void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
|
||||
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
|
||||
|
||||
void mlx4_map_catas_buf(struct mlx4_dev *dev);
|
||||
void mlx4_unmap_catas_buf(struct mlx4_dev *dev);
|
||||
|
||||
void mlx4_start_catas_poll(struct mlx4_dev *dev);
|
||||
void mlx4_stop_catas_poll(struct mlx4_dev *dev);
|
||||
int mlx4_catas_init(void);
|
||||
void mlx4_catas_cleanup(void);
|
||||
int mlx4_restart_one(struct pci_dev *pdev);
|
||||
int mlx4_register_device(struct mlx4_dev *dev);
|
||||
void mlx4_unregister_device(struct mlx4_dev *dev);
|
||||
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type,
|
||||
|
|
Loading…
Reference in New Issue