Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (42 commits)
  IB/qib: Add missing <linux/slab.h> include
  IB/ehca: Drop unnecessary NULL test
  RDMA/nes: Fix confusing if statement indentation
  IB/ehca: Init irq tasklet before irq can happen
  RDMA/nes: Fix misindented code
  RDMA/nes: Fix showing wqm_quanta
  RDMA/nes: Get rid of "set but not used" variables
  RDMA/nes: Read firmware version from correct place
  IB/srp: Export req_lim via sysfs
  IB/srp: Make receive buffer handling more robust
  IB/srp: Use print_hex_dump()
  IB: Rename RAW_ETY to RAW_ETHERTYPE
  RDMA/nes: Fix two sparse warnings
  RDMA/cxgb3: Make needlessly global iwch_l2t_send() static
  IB/iser: Make needlessly global iser_alloc_rx_descriptors() static
  RDMA/cxgb4: Add timeouts when waiting for FW responses
  IB/qib: Fix race between qib_error_qp() and receive packet processing
  IB/qib: Limit the number of packets processed per interrupt
  IB/qib: Allow writes to the diag_counters to be able to clear them
  IB/qib: Set cfgctxts to number of CPUs by default
  ...
This commit is contained in:
Linus Torvalds 2010-08-07 17:08:02 -07:00
commit 3cc08fc35d
47 changed files with 580 additions and 499 deletions

View File

@ -2409,10 +2409,12 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
msg_response = CM_MSG_RESPONSE_REP;
break;
case IB_CM_ESTABLISHED:
cm_state = cm_id->state;
lap_state = IB_CM_MRA_LAP_SENT;
msg_response = CM_MSG_RESPONSE_OTHER;
break;
if (cm_id->lap_state == IB_CM_LAP_RCVD) {
cm_state = cm_id->state;
lap_state = IB_CM_MRA_LAP_SENT;
msg_response = CM_MSG_RESPONSE_OTHER;
break;
}
default:
ret = -EINVAL;
goto error1;

View File

@ -1085,7 +1085,6 @@ err_cdev:
static void ib_umad_kill_port(struct ib_umad_port *port)
{
struct ib_umad_file *file;
int already_dead;
int id;
dev_set_drvdata(port->dev, NULL);
@ -1103,7 +1102,6 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
list_for_each_entry(file, &port->file_list, port_list) {
mutex_lock(&file->mutex);
already_dead = file->agents_dead;
file->agents_dead = 1;
mutex_unlock(&file->mutex);

View File

@ -310,8 +310,8 @@ EXPORT_SYMBOL(ib_create_qp);
static const struct {
int valid;
enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETY + 1];
enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETY + 1];
enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETHERTYPE + 1];
enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETHERTYPE + 1];
} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = {
[IB_QPS_RESET] = { .valid = 1 },

View File

@ -137,7 +137,7 @@ static void stop_ep_timer(struct iwch_ep *ep)
put_ep(&ep->com);
}
int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
{
int error = 0;
struct cxio_rdev *rdev;

View File

@ -371,7 +371,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
qhp->wq.sq_size_log2);
if (num_wrs <= 0) {
if (num_wrs == 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
err = -ENOMEM;
goto out;
@ -554,7 +554,7 @@ int iwch_bind_mw(struct ib_qp *qp,
}
num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
qhp->wq.sq_size_log2);
if ((num_wrs) <= 0) {
if (num_wrs == 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
return -ENOMEM;
}

View File

@ -61,6 +61,10 @@ static char *states[] = {
NULL,
};
static int dack_mode;
module_param(dack_mode, int, 0644);
MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)");
int c4iw_max_read_depth = 8;
module_param(c4iw_max_read_depth, int, 0644);
MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
@ -469,11 +473,12 @@ static int send_connect(struct c4iw_ep *ep)
__func__);
return -ENOMEM;
}
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
wscale = compute_wscale(rcv_win);
opt0 = KEEP_ALIVE(1) |
DELACK(1) |
WND_SCALE(wscale) |
MSS_IDX(mtu_idx) |
L2T_IDX(ep->l2t->idx) |
@ -780,11 +785,11 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
event.private_data_len = ep->plen;
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
}
if (ep->com.cm_id) {
PDBG("%s ep %p tid %u status %d\n", __func__, ep,
ep->hwtid, status);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
}
PDBG("%s ep %p tid %u status %d\n", __func__, ep,
ep->hwtid, status);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
if (status < 0) {
ep->com.cm_id->rem_ref(ep->com.cm_id);
ep->com.cm_id = NULL;
@ -845,8 +850,10 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
INIT_TP_WR(req, ep->hwtid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
ep->hwtid));
req->credit_dack = cpu_to_be32(credits);
set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx);
req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
F_RX_DACK_CHANGE |
V_RX_DACK_MODE(dack_mode));
set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
c4iw_ofld_send(&ep->com.dev->rdev, skb);
return credits;
}
@ -1264,6 +1271,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
wscale = compute_wscale(rcv_win);
opt0 = KEEP_ALIVE(1) |
DELACK(1) |
WND_SCALE(wscale) |
MSS_IDX(mtu_idx) |
L2T_IDX(ep->l2t->idx) |
@ -1287,7 +1295,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
ep->hwtid));
rpl->opt0 = cpu_to_be64(opt0);
rpl->opt2 = cpu_to_be32(opt2);
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
return;
@ -1344,7 +1352,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
u16 rss_qid;
u32 mtu;
int step;
int txq_idx;
int txq_idx, ctrlq_idx;
parent_ep = lookup_stid(t, stid);
PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
@ -1376,6 +1384,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
txq_idx = cxgb4_port_idx(pdev) * step;
ctrlq_idx = cxgb4_port_idx(pdev);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
dev_put(pdev);
@ -1387,6 +1396,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1;
step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
ctrlq_idx = cxgb4_port_idx(dst->neighbour->dev);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[
cxgb4_port_idx(dst->neighbour->dev) * step];
@ -1426,6 +1436,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
child_ep->rss_qid = rss_qid;
child_ep->mtu = mtu;
child_ep->txq_idx = txq_idx;
child_ep->ctrlq_idx = ctrlq_idx;
PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
tx_chan, smac_idx, rss_qid);
@ -1473,8 +1484,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
int closing = 0;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(hdr);
int start_timer = 0;
int stop_timer = 0;
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@ -1511,7 +1520,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
wake_up(&ep->com.waitq);
break;
case FPDU_MODE:
start_timer = 1;
start_ep_timer(ep);
__state_set(&ep->com, CLOSING);
closing = 1;
peer_close_upcall(ep);
@ -1524,7 +1533,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
disconnect = 0;
break;
case MORIBUND:
stop_timer = 1;
stop_ep_timer(ep);
if (ep->com.cm_id && ep->com.qp) {
attrs.next_state = C4IW_QP_STATE_IDLE;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
@ -1547,10 +1556,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
}
if (start_timer)
start_ep_timer(ep);
if (stop_timer)
stop_ep_timer(ep);
if (disconnect)
c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
if (release)
@ -1579,7 +1584,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned long flags;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(req);
int stop_timer = 0;
ep = lookup_tid(t, tid);
if (is_neg_adv_abort(req->status)) {
@ -1594,10 +1598,10 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
case CONNECTING:
break;
case MPA_REQ_WAIT:
stop_timer = 1;
stop_ep_timer(ep);
break;
case MPA_REQ_SENT:
stop_timer = 1;
stop_ep_timer(ep);
connect_reply_upcall(ep, -ECONNRESET);
break;
case MPA_REP_SENT:
@ -1621,7 +1625,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
break;
case MORIBUND:
case CLOSING:
stop_timer = 1;
stop_ep_timer(ep);
/*FALLTHROUGH*/
case FPDU_MODE:
if (ep->com.cm_id && ep->com.qp) {
@ -1667,8 +1671,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
rpl->cmd = CPL_ABORT_NO_RST;
c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
out:
if (stop_timer)
stop_ep_timer(ep);
if (release)
release_ep_resources(ep);
return 0;
@ -1683,7 +1685,6 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
int release = 0;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(rpl);
int stop_timer = 0;
ep = lookup_tid(t, tid);
@ -1697,7 +1698,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
__state_set(&ep->com, MORIBUND);
break;
case MORIBUND:
stop_timer = 1;
stop_ep_timer(ep);
if ((ep->com.cm_id) && (ep->com.qp)) {
attrs.next_state = C4IW_QP_STATE_IDLE;
c4iw_modify_qp(ep->com.qp->rhp,
@ -1717,8 +1718,6 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
break;
}
spin_unlock_irqrestore(&ep->com.lock, flags);
if (stop_timer)
stop_ep_timer(ep);
if (release)
release_ep_resources(ep);
return 0;
@ -1957,6 +1956,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->txq_idx = cxgb4_port_idx(pdev) * step;
step = ep->com.dev->rdev.lldi.nrxq /
ep->com.dev->rdev.lldi.nchan;
ep->ctrlq_idx = cxgb4_port_idx(pdev);
ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
cxgb4_port_idx(pdev) * step];
dev_put(pdev);
@ -1971,6 +1971,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
step = ep->com.dev->rdev.lldi.ntxq /
ep->com.dev->rdev.lldi.nchan;
ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
ep->ctrlq_idx = cxgb4_port_idx(ep->dst->neighbour->dev);
step = ep->com.dev->rdev.lldi.nrxq /
ep->com.dev->rdev.lldi.nchan;
ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
@ -2049,8 +2050,15 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
goto fail3;
/* wait for pass_open_rpl */
wait_event(ep->com.waitq, ep->com.rpl_done);
err = ep->com.rpl_err;
wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
if (ep->com.rpl_done)
err = ep->com.rpl_err;
else {
printk(KERN_ERR MOD "Device %s not responding!\n",
pci_name(ep->com.dev->rdev.lldi.pdev));
ep->com.dev->rdev.flags = T4_FATAL_ERROR;
err = -EIO;
}
if (!err) {
cm_id->provider_data = ep;
goto out;
@ -2079,10 +2087,17 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
err = listen_stop(ep);
if (err)
goto done;
wait_event(ep->com.waitq, ep->com.rpl_done);
wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
if (ep->com.rpl_done)
err = ep->com.rpl_err;
else {
printk(KERN_ERR MOD "Device %s not responding!\n",
pci_name(ep->com.dev->rdev.lldi.pdev));
ep->com.dev->rdev.flags = T4_FATAL_ERROR;
err = -EIO;
}
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
done:
err = ep->com.rpl_err;
cm_id->rem_ref(cm_id);
c4iw_put_ep(&ep->com);
return err;
@ -2095,8 +2110,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
int close = 0;
int fatal = 0;
struct c4iw_rdev *rdev;
int start_timer = 0;
int stop_timer = 0;
spin_lock_irqsave(&ep->com.lock, flags);
@ -2120,7 +2133,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
ep->com.state = ABORTING;
else {
ep->com.state = CLOSING;
start_timer = 1;
start_ep_timer(ep);
}
set_bit(CLOSE_SENT, &ep->com.flags);
break;
@ -2128,7 +2141,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
close = 1;
if (abrupt) {
stop_timer = 1;
stop_ep_timer(ep);
ep->com.state = ABORTING;
} else
ep->com.state = MORIBUND;
@ -2146,10 +2159,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
}
spin_unlock_irqrestore(&ep->com.lock, flags);
if (start_timer)
start_ep_timer(ep);
if (stop_timer)
stop_ep_timer(ep);
if (close) {
if (abrupt)
ret = abort_connection(ep, NULL, gfp);
@ -2244,7 +2253,7 @@ static void process_work(struct work_struct *work)
{
struct sk_buff *skb = NULL;
struct c4iw_dev *dev;
struct cpl_act_establish *rpl = cplhdr(skb);
struct cpl_act_establish *rpl;
unsigned int opcode;
int ret;

View File

@ -43,7 +43,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
int ret;
wr_len = sizeof *res_wr + sizeof *res;
skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
@ -118,7 +118,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
/* build fw_ri_res_wr */
wr_len = sizeof *res_wr + sizeof *res;
skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
goto err4;

View File

@ -619,6 +619,7 @@ struct c4iw_ep {
u16 plen;
u16 rss_qid;
u16 txq_idx;
u16 ctrlq_idx;
u8 tos;
};

View File

@ -59,7 +59,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
wr_len = roundup(sizeof *req + sizeof *sc +
roundup(copy_len, T4_ULPTX_MIN_IO), 16);
skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

View File

@ -130,7 +130,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
/* build fw_ri_res_wr */
wr_len = sizeof *res_wr + 2 * sizeof *res;
skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
goto err7;
@ -162,7 +162,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
V_FW_RI_RES_WR_DCAEN(0) |
V_FW_RI_RES_WR_DCACPU(0) |
V_FW_RI_RES_WR_FBMIN(3) |
V_FW_RI_RES_WR_FBMIN(2) |
V_FW_RI_RES_WR_FBMAX(3) |
V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
V_FW_RI_RES_WR_CIDXFTHRESH(0) |
@ -185,7 +185,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
V_FW_RI_RES_WR_DCAEN(0) |
V_FW_RI_RES_WR_DCACPU(0) |
V_FW_RI_RES_WR_FBMIN(3) |
V_FW_RI_RES_WR_FBMIN(2) |
V_FW_RI_RES_WR_FBMAX(3) |
V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
V_FW_RI_RES_WR_CIDXFTHRESH(0) |
@ -235,12 +235,78 @@ err1:
return -ENOMEM;
}
static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
struct ib_send_wr *wr, int max, u32 *plenp)
{
u8 *dstp, *srcp;
u32 plen = 0;
int i;
int rem, len;
dstp = (u8 *)immdp->data;
for (i = 0; i < wr->num_sge; i++) {
if ((plen + wr->sg_list[i].length) > max)
return -EMSGSIZE;
srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
plen += wr->sg_list[i].length;
rem = wr->sg_list[i].length;
while (rem) {
if (dstp == (u8 *)&sq->queue[sq->size])
dstp = (u8 *)sq->queue;
if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
len = rem;
else
len = (u8 *)&sq->queue[sq->size] - dstp;
memcpy(dstp, srcp, len);
dstp += len;
srcp += len;
rem -= len;
}
}
immdp->op = FW_RI_DATA_IMMD;
immdp->r1 = 0;
immdp->r2 = 0;
immdp->immdlen = cpu_to_be32(plen);
*plenp = plen;
return 0;
}
static int build_isgl(__be64 *queue_start, __be64 *queue_end,
struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
int num_sge, u32 *plenp)
{
int i;
u32 plen = 0;
__be64 *flitp = (__be64 *)isglp->sge;
for (i = 0; i < num_sge; i++) {
if ((plen + sg_list[i].length) < plen)
return -EMSGSIZE;
plen += sg_list[i].length;
*flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
sg_list[i].length);
if (++flitp == queue_end)
flitp = queue_start;
*flitp = cpu_to_be64(sg_list[i].addr);
if (++flitp == queue_end)
flitp = queue_start;
}
isglp->op = FW_RI_DATA_ISGL;
isglp->r1 = 0;
isglp->nsge = cpu_to_be16(num_sge);
isglp->r2 = 0;
if (plenp)
*plenp = plen;
return 0;
}
static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
struct ib_send_wr *wr, u8 *len16)
{
u32 plen;
int size;
u8 *datap;
int ret;
if (wr->num_sge > T4_MAX_SEND_SGE)
return -EINVAL;
@ -267,43 +333,23 @@ static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
default:
return -EINVAL;
}
plen = 0;
if (wr->num_sge) {
if (wr->send_flags & IB_SEND_INLINE) {
datap = (u8 *)wqe->send.u.immd_src[0].data;
for (i = 0; i < wr->num_sge; i++) {
if ((plen + wr->sg_list[i].length) >
T4_MAX_SEND_INLINE) {
return -EMSGSIZE;
}
plen += wr->sg_list[i].length;
memcpy(datap,
(void *)(unsigned long)wr->sg_list[i].addr,
wr->sg_list[i].length);
datap += wr->sg_list[i].length;
}
wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
wqe->send.u.immd_src[0].r1 = 0;
wqe->send.u.immd_src[0].r2 = 0;
wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen);
ret = build_immd(sq, wqe->send.u.immd_src, wr,
T4_MAX_SEND_INLINE, &plen);
if (ret)
return ret;
size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
plen;
} else {
for (i = 0; i < wr->num_sge; i++) {
if ((plen + wr->sg_list[i].length) < plen)
return -EMSGSIZE;
plen += wr->sg_list[i].length;
wqe->send.u.isgl_src[0].sge[i].stag =
cpu_to_be32(wr->sg_list[i].lkey);
wqe->send.u.isgl_src[0].sge[i].len =
cpu_to_be32(wr->sg_list[i].length);
wqe->send.u.isgl_src[0].sge[i].to =
cpu_to_be64(wr->sg_list[i].addr);
}
wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL;
wqe->send.u.isgl_src[0].r1 = 0;
wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge);
wqe->send.u.isgl_src[0].r2 = 0;
ret = build_isgl((__be64 *)sq->queue,
(__be64 *)&sq->queue[sq->size],
wqe->send.u.isgl_src,
wr->sg_list, wr->num_sge, &plen);
if (ret)
return ret;
size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
wr->num_sge * sizeof(struct fw_ri_sge);
}
@ -313,62 +359,40 @@ static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
wqe->send.u.immd_src[0].r2 = 0;
wqe->send.u.immd_src[0].immdlen = 0;
size = sizeof wqe->send + sizeof(struct fw_ri_immd);
plen = 0;
}
*len16 = DIV_ROUND_UP(size, 16);
wqe->send.plen = cpu_to_be32(plen);
return 0;
}
static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
struct ib_send_wr *wr, u8 *len16)
{
int i;
u32 plen;
int size;
u8 *datap;
int ret;
if (wr->num_sge > T4_MAX_WRITE_SGE)
if (wr->num_sge > T4_MAX_SEND_SGE)
return -EINVAL;
wqe->write.r2 = 0;
wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
plen = 0;
if (wr->num_sge) {
if (wr->send_flags & IB_SEND_INLINE) {
datap = (u8 *)wqe->write.u.immd_src[0].data;
for (i = 0; i < wr->num_sge; i++) {
if ((plen + wr->sg_list[i].length) >
T4_MAX_WRITE_INLINE) {
return -EMSGSIZE;
}
plen += wr->sg_list[i].length;
memcpy(datap,
(void *)(unsigned long)wr->sg_list[i].addr,
wr->sg_list[i].length);
datap += wr->sg_list[i].length;
}
wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
wqe->write.u.immd_src[0].r1 = 0;
wqe->write.u.immd_src[0].r2 = 0;
wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen);
ret = build_immd(sq, wqe->write.u.immd_src, wr,
T4_MAX_WRITE_INLINE, &plen);
if (ret)
return ret;
size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
plen;
} else {
for (i = 0; i < wr->num_sge; i++) {
if ((plen + wr->sg_list[i].length) < plen)
return -EMSGSIZE;
plen += wr->sg_list[i].length;
wqe->write.u.isgl_src[0].sge[i].stag =
cpu_to_be32(wr->sg_list[i].lkey);
wqe->write.u.isgl_src[0].sge[i].len =
cpu_to_be32(wr->sg_list[i].length);
wqe->write.u.isgl_src[0].sge[i].to =
cpu_to_be64(wr->sg_list[i].addr);
}
wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL;
wqe->write.u.isgl_src[0].r1 = 0;
wqe->write.u.isgl_src[0].nsge =
cpu_to_be16(wr->num_sge);
wqe->write.u.isgl_src[0].r2 = 0;
ret = build_isgl((__be64 *)sq->queue,
(__be64 *)&sq->queue[sq->size],
wqe->write.u.isgl_src,
wr->sg_list, wr->num_sge, &plen);
if (ret)
return ret;
size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
wr->num_sge * sizeof(struct fw_ri_sge);
}
@ -378,6 +402,7 @@ static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
wqe->write.u.immd_src[0].r2 = 0;
wqe->write.u.immd_src[0].immdlen = 0;
size = sizeof wqe->write + sizeof(struct fw_ri_immd);
plen = 0;
}
*len16 = DIV_ROUND_UP(size, 16);
wqe->write.plen = cpu_to_be32(plen);
@ -416,29 +441,13 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
struct ib_recv_wr *wr, u8 *len16)
{
int i;
int plen = 0;
int ret;
for (i = 0; i < wr->num_sge; i++) {
if ((plen + wr->sg_list[i].length) < plen)
return -EMSGSIZE;
plen += wr->sg_list[i].length;
wqe->recv.isgl.sge[i].stag =
cpu_to_be32(wr->sg_list[i].lkey);
wqe->recv.isgl.sge[i].len =
cpu_to_be32(wr->sg_list[i].length);
wqe->recv.isgl.sge[i].to =
cpu_to_be64(wr->sg_list[i].addr);
}
for (; i < T4_MAX_RECV_SGE; i++) {
wqe->recv.isgl.sge[i].stag = 0;
wqe->recv.isgl.sge[i].len = 0;
wqe->recv.isgl.sge[i].to = 0;
}
wqe->recv.isgl.op = FW_RI_DATA_ISGL;
wqe->recv.isgl.r1 = 0;
wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge);
wqe->recv.isgl.r2 = 0;
ret = build_isgl((__be64 *)qhp->wq.rq.queue,
(__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
&wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
if (ret)
return ret;
*len16 = DIV_ROUND_UP(sizeof wqe->recv +
wr->num_sge * sizeof(struct fw_ri_sge), 16);
return 0;
@ -547,7 +556,9 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
*bad_wr = wr;
break;
}
wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx];
wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
fw_flags = 0;
if (wr->send_flags & IB_SEND_SOLICITED)
fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
@ -564,12 +575,12 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swsqe->opcode = FW_RI_SEND;
else
swsqe->opcode = FW_RI_SEND_WITH_INV;
err = build_rdma_send(wqe, wr, &len16);
err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
break;
case IB_WR_RDMA_WRITE:
fw_opcode = FW_RI_RDMA_WRITE_WR;
swsqe->opcode = FW_RI_RDMA_WRITE;
err = build_rdma_write(wqe, wr, &len16);
err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
break;
case IB_WR_RDMA_READ:
case IB_WR_RDMA_READ_WITH_INV:
@ -619,8 +630,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swsqe->opcode, swsqe->read_len);
wr = wr->next;
num_wrs--;
t4_sq_produce(&qhp->wq);
idx++;
t4_sq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
}
if (t4_wq_db_enabled(&qhp->wq))
t4_ring_sq_db(&qhp->wq, idx);
@ -656,7 +667,9 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
*bad_wr = wr;
break;
}
wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx];
wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
qhp->wq.rq.wq_pidx *
T4_EQ_ENTRY_SIZE);
if (num_wrs)
err = build_rdma_recv(qhp, wqe, wr, &len16);
else
@ -675,15 +688,12 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
wqe->recv.r2[1] = 0;
wqe->recv.r2[2] = 0;
wqe->recv.len16 = len16;
if (len16 < 5)
wqe->flits[8] = 0;
PDBG("%s cookie 0x%llx pidx %u\n", __func__,
(unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
t4_rq_produce(&qhp->wq);
t4_rq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
wr = wr->next;
num_wrs--;
idx++;
}
if (t4_wq_db_enabled(&qhp->wq))
t4_ring_rq_db(&qhp->wq, idx);
@ -951,7 +961,8 @@ static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
__flush_qp(qhp, rchp, schp, flag);
}
static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
struct c4iw_ep *ep)
{
struct fw_ri_wr *wqe;
int ret;
@ -959,12 +970,12 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
struct sk_buff *skb;
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
qhp->ep->hwtid);
ep->hwtid);
skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
if (!skb)
return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof *wqe);
@ -972,7 +983,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
FW_WR_OP(FW_RI_INIT_WR) |
FW_WR_COMPL(1));
wqe->flowid_len16 = cpu_to_be32(
FW_WR_FLOWID(qhp->ep->hwtid) |
FW_WR_FLOWID(ep->hwtid) |
FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
wqe->cookie = (u64)&wr_wait;
@ -1035,7 +1046,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
qhp->ep->hwtid);
skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
if (!skb)
return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
@ -1202,17 +1213,16 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
case C4IW_QP_STATE_CLOSING:
BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
qhp->attr.state = C4IW_QP_STATE_CLOSING;
ep = qhp->ep;
if (!internal) {
abort = 0;
disconnect = 1;
ep = qhp->ep;
c4iw_get_ep(&ep->com);
}
spin_unlock_irqrestore(&qhp->lock, flag);
ret = rdma_fini(rhp, qhp);
ret = rdma_fini(rhp, qhp, ep);
spin_lock_irqsave(&qhp->lock, flag);
if (ret) {
ep = qhp->ep;
c4iw_get_ep(&ep->com);
disconnect = abort = 1;
goto err;

View File

@ -65,10 +65,10 @@ struct t4_status_page {
u8 db_off;
};
#define T4_EQ_SIZE 64
#define T4_EQ_ENTRY_SIZE 64
#define T4_SQ_NUM_SLOTS 4
#define T4_SQ_NUM_BYTES (T4_EQ_SIZE * T4_SQ_NUM_SLOTS)
#define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS)
#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
#define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
@ -84,7 +84,7 @@ struct t4_status_page {
#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
#define T4_RQ_NUM_SLOTS 2
#define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS)
#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
#define T4_MAX_RECV_SGE 4
union t4_wr {
@ -97,20 +97,18 @@ union t4_wr {
struct fw_ri_fr_nsmr_wr fr;
struct fw_ri_inv_lstag_wr inv;
struct t4_status_page status;
__be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
};
union t4_recv_wr {
struct fw_ri_recv_wr recv;
struct t4_status_page status;
__be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
};
static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
enum fw_wr_opcodes opcode, u8 flags, u8 len16)
{
int slots_used;
wqe->send.opcode = (u8)opcode;
wqe->send.flags = flags;
wqe->send.wrid = wrid;
@ -118,12 +116,6 @@ static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
wqe->send.r1[1] = 0;
wqe->send.r1[2] = 0;
wqe->send.len16 = len16;
slots_used = DIV_ROUND_UP(len16*16, T4_EQ_SIZE);
while (slots_used < T4_SQ_NUM_SLOTS) {
wqe->flits[slots_used * T4_EQ_SIZE / sizeof(__be64)] = 0;
slots_used++;
}
}
/* CQE/AE status codes */
@ -289,6 +281,7 @@ struct t4_sq {
u16 size;
u16 cidx;
u16 pidx;
u16 wq_pidx;
};
struct t4_swrqe {
@ -310,6 +303,7 @@ struct t4_rq {
u16 size;
u16 cidx;
u16 pidx;
u16 wq_pidx;
};
struct t4_wq {
@ -340,11 +334,14 @@ static inline u32 t4_rq_avail(struct t4_wq *wq)
return wq->rq.size - 1 - wq->rq.in_use;
}
static inline void t4_rq_produce(struct t4_wq *wq)
static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
{
wq->rq.in_use++;
if (++wq->rq.pidx == wq->rq.size)
wq->rq.pidx = 0;
wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS)
wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS;
}
static inline void t4_rq_consume(struct t4_wq *wq)
@ -370,11 +367,14 @@ static inline u32 t4_sq_avail(struct t4_wq *wq)
return wq->sq.size - 1 - wq->sq.in_use;
}
static inline void t4_sq_produce(struct t4_wq *wq)
static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
{
wq->sq.in_use++;
if (++wq->sq.pidx == wq->sq.size)
wq->sq.pidx = 0;
wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS)
wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS;
}
static inline void t4_sq_consume(struct t4_wq *wq)
@ -386,14 +386,12 @@ static inline void t4_sq_consume(struct t4_wq *wq)
static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
{
inc *= T4_SQ_NUM_SLOTS;
wmb();
writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
}
static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
{
inc *= T4_RQ_NUM_SLOTS;
wmb();
writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
}

View File

@ -826,4 +826,14 @@ struct ulptx_idata {
#define S_ULPTX_NSGE 0
#define M_ULPTX_NSGE 0xFFFF
#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
#define S_RX_DACK_MODE 29
#define M_RX_DACK_MODE 0x3
#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
#define S_RX_DACK_CHANGE 31
#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
#endif /* _T4FW_RI_API_H_ */

View File

@ -122,21 +122,21 @@ int ehca_create_eq(struct ehca_shca *shca,
/* register interrupt handlers and initialize work queues */
if (type == EHCA_EQ) {
tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
IRQF_DISABLED, "ehca_eq",
(void *)shca);
if (ret < 0)
ehca_err(ib_dev, "Can't map interrupt handler.");
tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
} else if (type == EHCA_NEQ) {
tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
IRQF_DISABLED, "ehca_neq",
(void *)shca);
if (ret < 0)
ehca_err(ib_dev, "Can't map interrupt handler.");
tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
}
eq->is_initialized = 1;

View File

@ -360,7 +360,8 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
* a firmware property, so it's valid across all adapters
*/
if (ehca_lock_hcalls == -1)
ehca_lock_hcalls = !(shca->hca_cap & HCA_CAP_H_ALLOC_RES_SYNC);
ehca_lock_hcalls = !EHCA_BMASK_GET(HCA_CAP_H_ALLOC_RES_SYNC,
shca->hca_cap);
/* translate supported MR page sizes; always support 4K */
shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;

View File

@ -933,11 +933,6 @@ int ehca_unmap_fmr(struct list_head *fmr_list)
/* check all FMR belong to same SHCA, and check internal flag */
list_for_each_entry(ib_fmr, fmr_list, list) {
prev_shca = shca;
if (!ib_fmr) {
ehca_gen_err("bad fmr=%p in list", ib_fmr);
ret = -EINVAL;
goto unmap_fmr_exit0;
}
shca = container_of(ib_fmr->device, struct ehca_shca,
ib_device);
e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);

View File

@ -251,7 +251,7 @@ static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
return ST_UD;
case IB_QPT_RAW_IPV6:
return -EINVAL;
case IB_QPT_RAW_ETY:
case IB_QPT_RAW_ETHERTYPE:
return -EINVAL;
default:
ehca_gen_err("Invalid ibqptype=%x", ibqptype);

View File

@ -269,6 +269,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
struct ehca_cq *cq,
struct ehca_alloc_cq_parms *param)
{
int rc;
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
@ -283,8 +284,19 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
param->act_nr_of_entries = (u32)outs[3];
param->act_pages = (u32)outs[4];
if (ret == H_SUCCESS)
hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
if (ret == H_SUCCESS) {
rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
if (rc) {
ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
rc, outs[5]);
ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
cq->ipz_cq_handle.handle, /* r5 */
0, 0, 0, 0, 0);
ret = H_NO_MEM;
}
}
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);
@ -295,6 +307,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
struct ehca_alloc_qp_parms *parms, int is_user)
{
int rc;
u64 ret;
u64 allocate_controls, max_r10_reg, r11, r12;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
@ -358,8 +371,19 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
parms->rqueue.queue_size =
(u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
if (ret == H_SUCCESS)
hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
if (ret == H_SUCCESS) {
rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
if (rc) {
ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
rc, outs[6]);
ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
parms->qp_handle.handle, /* r5 */
0, 0, 0, 0, 0);
ret = H_NO_MEM;
}
}
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);

View File

@ -42,10 +42,9 @@
#include "ehca_classes.h"
#include "hipz_hw.h"
int hcall_map_page(u64 physaddr, u64 *mapaddr)
u64 hcall_map_page(u64 physaddr)
{
*mapaddr = (u64)(ioremap(physaddr, EHCA_PAGESIZE));
return 0;
return (u64)ioremap(physaddr, EHCA_PAGESIZE);
}
int hcall_unmap_page(u64 mapaddr)
@ -58,9 +57,9 @@ int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
u64 paddr_kernel, u64 paddr_user)
{
if (!is_user) {
int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle);
if (ret)
return ret;
galpas->kernel.fw_handle = hcall_map_page(paddr_kernel);
if (!galpas->kernel.fw_handle)
return -ENOMEM;
} else
galpas->kernel.fw_handle = 0;

View File

@ -83,7 +83,7 @@ int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
int hcp_galpas_dtor(struct h_galpas *galpas);
int hcall_map_page(u64 physaddr, u64 * mapaddr);
u64 hcall_map_page(u64 physaddr);
int hcall_unmap_page(u64 mapaddr);

View File

@ -390,6 +390,8 @@ done:
ipath_enable_armlaunch(dd);
}
static void cleanup_device(struct ipath_devdata *dd);
static int __devinit ipath_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@ -616,8 +618,13 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
goto bail;
bail_irqsetup:
if (pdev->irq)
free_irq(pdev->irq, dd);
cleanup_device(dd);
if (dd->ipath_irq)
dd->ipath_f_free_irq(dd);
if (dd->ipath_f_cleanup)
dd->ipath_f_cleanup(dd);
bail_iounmap:
iounmap((volatile void __iomem *) dd->ipath_kregbase);
@ -635,7 +642,7 @@ bail:
return ret;
}
static void __devexit cleanup_device(struct ipath_devdata *dd)
static void cleanup_device(struct ipath_devdata *dd)
{
int port;
struct ipath_portdata **tmp;

View File

@ -1817,7 +1817,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
case IB_QPT_RAW_IPV6:
op_mod = 2;
break;
case IB_QPT_RAW_ETY:
case IB_QPT_RAW_ETHERTYPE:
op_mod = 3;
break;
default:

View File

@ -110,8 +110,8 @@ static unsigned int sysfs_nonidx_addr;
static unsigned int sysfs_idx_addr;
static struct pci_device_id nes_pci_table[] = {
{PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR, PCI_ANY_ID, PCI_ANY_ID},
{ PCI_VDEVICE(NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020), },
{ PCI_VDEVICE(NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR), },
{0}
};
@ -259,13 +259,11 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
unsigned long flags;
struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 qp_id;
atomic_inc(&qps_destroyed);
/* Free the control structures */
qp_id = nesqp->hwqp.qp_id;
if (nesqp->pbl_vbase) {
pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase);
@ -441,7 +439,6 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
struct net_device *netdev = NULL;
struct nes_device *nesdev = NULL;
int ret = 0;
struct nes_vnic *nesvnic = NULL;
void __iomem *mmio_regs = NULL;
u8 hw_rev;
@ -664,25 +661,21 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
nes_notifiers_registered++;
/* Initialize network devices */
if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) {
goto bail7;
}
if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL)
goto bail7;
/* Register network device */
ret = register_netdev(netdev);
if (ret) {
printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret);
nes_netdev_destroy(netdev);
goto bail7;
}
/* Register network device */
ret = register_netdev(netdev);
if (ret) {
printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret);
nes_netdev_destroy(netdev);
goto bail7;
}
nes_print_macaddr(netdev);
/* create a CM core for this netdev */
nesvnic = netdev_priv(netdev);
nesdev->netdev_count++;
nesdev->nesadapter->netdev_count++;
nes_print_macaddr(netdev);
nesdev->netdev_count++;
nesdev->nesadapter->netdev_count++;
printk(KERN_ERR PFX "%s: NetEffect RNIC driver successfully loaded.\n",
pci_name(pcidev));
@ -1104,7 +1097,7 @@ static ssize_t nes_show_wqm_quanta(struct device_driver *ddp, char *buf)
i++;
}
return snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta);
return snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta_value);
}

View File

@ -262,6 +262,7 @@ struct nes_device {
u16 base_doorbell_index;
u16 currcq_count;
u16 deepcq_count;
u8 iw_status;
u8 msi_enabled;
u8 netdev_count;
u8 napi_isr_ran;
@ -527,6 +528,7 @@ void nes_cm_disconn_worker(void *);
int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32);
int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
struct nes_ib_device *nes_init_ofa_device(struct net_device *);
void nes_port_ibevent(struct nes_vnic *nesvnic);
void nes_destroy_ofa_device(struct nes_ib_device *);
int nes_register_ofa_device(struct nes_ib_device *);

View File

@ -1719,8 +1719,6 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
{
int datasize = 0;
u32 inc_sequence;
u32 rem_seq_ack;
u32 rem_seq;
int ret = 0;
int optionsize;
optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
@ -1730,8 +1728,6 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
skb_pull(skb, tcph->doff << 2);
inc_sequence = ntohl(tcph->seq);
rem_seq = ntohl(tcph->seq);
rem_seq_ack = ntohl(tcph->ack_seq);
datasize = skb->len;
switch (cm_node->state) {
case NES_CM_STATE_SYN_RCVD:
@ -2565,7 +2561,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
u16 last_ae;
u8 original_hw_tcp_state;
u8 original_ibqp_state;
enum iw_cm_event_type disconn_status = IW_CM_EVENT_STATUS_OK;
enum iw_cm_event_status disconn_status = IW_CM_EVENT_STATUS_OK;
int issue_disconn = 0;
int issue_close = 0;
int issue_flush = 0;
@ -3128,17 +3124,15 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
struct nes_vnic *nesvnic;
struct nes_cm_listener *cm_node;
struct nes_cm_info cm_info;
struct nes_adapter *adapter;
int err;
nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n",
cm_id, ntohs(cm_id->local_addr.sin_port));
nesvnic = to_nesvnic(cm_id->device);
if (!nesvnic)
return -EINVAL;
adapter = nesvnic->nesdev->nesadapter;
nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n",
nesvnic, nesvnic->netdev, nesvnic->netdev->name);

View File

@ -1970,7 +1970,7 @@ void nes_destroy_nic_qp(struct nes_vnic *nesvnic)
dev_kfree_skb(
nesvnic->nic.tx_skb[nesvnic->nic.sq_tail]);
nesvnic->nic.sq_tail = (++nesvnic->nic.sq_tail)
nesvnic->nic.sq_tail = (nesvnic->nic.sq_tail + 1)
& (nesvnic->nic.sq_size - 1);
}
@ -2737,9 +2737,9 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
nesnic->sq_tail &= nesnic->sq_size-1;
if (sq_cqes > 128) {
barrier();
/* restart the queue if it had been stopped */
if (netif_queue_stopped(nesvnic->netdev))
netif_wake_queue(nesvnic->netdev);
/* restart the queue if it had been stopped */
if (netif_queue_stopped(nesvnic->netdev))
netif_wake_queue(nesvnic->netdev);
sq_cqes = 0;
}
} else {
@ -2999,11 +2999,8 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
static u8 *locate_mpa(u8 *pkt, u32 aeq_info)
{
u16 pkt_len;
if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) {
/* skip over ethernet header */
pkt_len = be16_to_cpu(*(u16 *)(pkt + ETH_HLEN - 2));
pkt += ETH_HLEN;
/* Skip over IP and TCP headers */
@ -3283,9 +3280,15 @@ static void nes_terminate_connection(struct nes_device *nesdev, struct nes_qp *n
else
mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG;
nes_terminate_start_timer(nesqp);
nesqp->term_flags |= NES_TERM_SENT;
nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0);
if (!nesdev->iw_status) {
nesqp->term_flags = NES_TERM_DONE;
nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_ERROR, 0, 0);
nes_cm_disconn(nesqp);
} else {
nes_terminate_start_timer(nesqp);
nesqp->term_flags |= NES_TERM_SENT;
nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0);
}
}
static void nes_terminate_send_fin(struct nes_device *nesdev,

View File

@ -1100,11 +1100,12 @@ struct nes_adapter {
u32 wqm_wat;
u32 core_clock;
u32 firmware_version;
u32 eeprom_version;
u32 nic_rx_eth_route_err;
u32 et_rx_coalesce_usecs;
u32 et_rx_max_coalesced_frames;
u32 et_rx_max_coalesced_frames;
u32 et_rx_coalesce_usecs_irq;
u32 et_rx_max_coalesced_frames_irq;
u32 et_pkt_rate_low;

View File

@ -232,6 +232,13 @@ static int nes_netdev_open(struct net_device *netdev)
NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
first_nesvnic = nesvnic;
}
if (nesvnic->of_device_registered) {
nesdev->iw_status = 1;
nesdev->nesadapter->send_term_ok = 1;
nes_port_ibevent(nesvnic);
}
if (first_nesvnic->linkup) {
/* Enable network packets */
nesvnic->linkup = 1;
@ -309,9 +316,9 @@ static int nes_netdev_stop(struct net_device *netdev)
if (nesvnic->of_device_registered) {
nes_destroy_ofa_device(nesvnic->nesibdev);
nesvnic->nesibdev = NULL;
nesvnic->of_device_registered = 0;
nesdev->nesadapter->send_term_ok = 0;
nesdev->iw_status = 0;
nes_port_ibevent(nesvnic);
}
nes_destroy_nic_qp(nesvnic);
@ -463,7 +470,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
u16 nhoffset;
u16 wqes_needed;
u16 wqes_available;
u32 old_head;
u32 wqe_misc;
/*
@ -503,7 +509,6 @@ sq_no_longer_full:
if (skb_is_gso(skb)) {
nesvnic->segmented_tso_requests++;
nesvnic->tso_requests++;
old_head = nesnic->sq_head;
/* Basically 4 fragments available per WQE with extended fragments */
wqes_needed = nr_frags >> 2;
wqes_needed += (nr_frags&3)?1:0;

View File

@ -190,6 +190,11 @@ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesada
nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
(u32)((u8)eeprom_data);
eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 10);
printk(PFX "EEPROM version %u.%u\n", (u8)(eeprom_data>>8), (u8)eeprom_data);
nesadapter->eeprom_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
(u32)((u8)eeprom_data);
no_fw_rev:
/* eeprom is valid */
eeprom_offset = nesadapter->software_eeprom_offset;

View File

@ -518,7 +518,7 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
memset(props, 0, sizeof(*props));
memcpy(&props->sys_image_guid, nesvnic->netdev->dev_addr, 6);
props->fw_ver = nesdev->nesadapter->fw_ver;
props->fw_ver = nesdev->nesadapter->firmware_version;
props->device_cap_flags = nesdev->nesadapter->device_cap_flags;
props->vendor_id = nesdev->nesadapter->vendor_id;
props->vendor_part_id = nesdev->nesadapter->vendor_part_id;
@ -1941,7 +1941,7 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
u8 use_256_pbls = 0;
u8 use_4k_pbls = 0;
u16 use_two_level = (pbl_count_4k > 1) ? 1 : 0;
struct nes_root_vpbl new_root = {0, 0, 0};
struct nes_root_vpbl new_root = { 0, NULL, NULL };
u32 opcode = 0;
u16 major_code;
@ -2112,13 +2112,12 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
u32 driver_key = 0;
u32 root_pbl_index = 0;
u32 cur_pbl_index = 0;
int err = 0, pbl_depth = 0;
int err = 0;
int ret = 0;
u16 pbl_count = 0;
u8 single_page = 1;
u8 stag_key = 0;
pbl_depth = 0;
region_length = 0;
vpbl.pbl_vbase = NULL;
root_vpbl.pbl_vbase = NULL;
@ -2931,7 +2930,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int ret;
u16 original_last_aeq;
u8 issue_modify_qp = 0;
u8 issue_disconnect = 0;
u8 dont_wait = 0;
nes_debug(NES_DBG_MOD_QP, "QP%u: QP State=%u, cur QP State=%u,"
@ -3058,6 +3056,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
nesqp->hte_added = 0;
}
if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) &&
(nesdev->iw_status) &&
(nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) {
next_iwarp_state |= NES_CQP_QP_RESET;
} else {
@ -3082,7 +3081,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
nesqp->iwarp_state);
issue_disconnect = 1;
} else {
nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
@ -3936,6 +3934,17 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
return nesibdev;
}
void nes_port_ibevent(struct nes_vnic *nesvnic)
{
struct nes_ib_device *nesibdev = nesvnic->nesibdev;
struct nes_device *nesdev = nesvnic->nesdev;
struct ib_event event;
event.device = &nesibdev->ibdev;
event.element.port_num = nesvnic->logical_port + 1;
event.event = nesdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
ib_dispatch_event(&event);
}
/**
* nes_destroy_ofa_device

View File

@ -45,6 +45,7 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/fs.h>
#include <linux/completion.h>
@ -326,6 +327,9 @@ struct qib_verbs_txreq {
#define QIB_DEFAULT_MTU 4096
/* max number of IB ports supported per HCA */
#define QIB_MAX_IB_PORTS 2
/*
* Possible IB config parameters for f_get/set_ib_table()
*/

View File

@ -279,7 +279,7 @@ struct qib_base_info {
* may not be implemented; the user code must deal with this if it
* cares, or it must abort after initialization reports the difference.
*/
#define QIB_USER_SWMINOR 10
#define QIB_USER_SWMINOR 11
#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
@ -301,6 +301,18 @@ struct qib_base_info {
*/
#define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION)
/*
* If the unit is specified via open, HCA choice is fixed. If port is
* specified, it's also fixed. Otherwise we try to spread contexts
* across ports and HCAs, using different algorithims. WITHIN is
* the old default, prior to this mechanism.
*/
#define QIB_PORT_ALG_ACROSS 0 /* round robin contexts across HCAs, then
* ports; this is the default */
#define QIB_PORT_ALG_WITHIN 1 /* use all contexts on an HCA (round robin
* active ports within), then next HCA */
#define QIB_PORT_ALG_COUNT 2 /* number of algorithm choices */
/*
* This structure is passed to qib_userinit() to tell the driver where
* user code buffers are, sizes, etc. The offsets and sizes of the
@ -319,7 +331,7 @@ struct qib_user_info {
/* size of struct base_info to write to */
__u32 spu_base_info_size;
__u32 _spu_unused3;
__u32 spu_port_alg; /* which QIB_PORT_ALG_*; unused user minor < 11 */
/*
* If two or more processes wish to share a context, each process

View File

@ -335,7 +335,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
}
for (last = 0, i = 1; !last; i += !last) {
for (last = 0, i = 1; !last && i <= 64; i += !last) {
hdr = dd->f_get_msgheader(dd, rhf_addr);
eflags = qib_hdrget_err_flags(rhf_addr);
etype = qib_hdrget_rcv_type(rhf_addr);

View File

@ -1294,128 +1294,130 @@ bail:
return ret;
}
static inline int usable(struct qib_pportdata *ppd, int active_only)
static inline int usable(struct qib_pportdata *ppd)
{
struct qib_devdata *dd = ppd->dd;
u32 linkok = active_only ? QIBL_LINKACTIVE :
(QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE);
return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
(ppd->lflags & linkok);
(ppd->lflags & QIBL_LINKACTIVE);
}
/*
* Select a context on the given device, either using a requested port
* or the port based on the context number.
*/
static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
const struct qib_user_info *uinfo)
{
struct qib_pportdata *ppd = NULL;
int ret, ctxt;
if (port) {
if (!usable(dd->pport + port - 1)) {
ret = -ENETDOWN;
goto done;
} else
ppd = dd->pport + port - 1;
}
for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
ctxt++)
;
if (ctxt == dd->cfgctxts) {
ret = -EBUSY;
goto done;
}
if (!ppd) {
u32 pidx = ctxt % dd->num_pports;
if (usable(dd->pport + pidx))
ppd = dd->pport + pidx;
else {
for (pidx = 0; pidx < dd->num_pports && !ppd;
pidx++)
if (usable(dd->pport + pidx))
ppd = dd->pport + pidx;
}
}
ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
done:
return ret;
}
static int find_free_ctxt(int unit, struct file *fp,
const struct qib_user_info *uinfo)
{
struct qib_devdata *dd = qib_lookup(unit);
struct qib_pportdata *ppd = NULL;
int ret;
u32 ctxt;
if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) {
if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
ret = -ENODEV;
goto bail;
}
else
ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
/*
* If users requests specific port, only try that one port, else
* select "best" port below, based on context.
*/
if (uinfo->spu_port) {
ppd = dd->pport + uinfo->spu_port - 1;
if (!usable(ppd, 0)) {
ret = -ENETDOWN;
goto bail;
}
}
for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
if (dd->rcd[ctxt])
continue;
/*
* The setting and clearing of user context rcd[x] protected
* by the qib_mutex
*/
if (!ppd) {
/* choose port based on ctxt, if up, else 1st up */
ppd = dd->pport + (ctxt % dd->num_pports);
if (!usable(ppd, 0)) {
int i;
for (i = 0; i < dd->num_pports; i++) {
ppd = dd->pport + i;
if (usable(ppd, 0))
break;
}
if (i == dd->num_pports) {
ret = -ENETDOWN;
goto bail;
}
}
}
ret = setup_ctxt(ppd, ctxt, fp, uinfo);
goto bail;
}
ret = -EBUSY;
bail:
return ret;
}
static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo)
static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
unsigned alg)
{
struct qib_pportdata *ppd;
int ret = 0, devmax;
int npresent, nup;
int ndev;
struct qib_devdata *udd = NULL;
int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
u32 port = uinfo->spu_port, ctxt;
devmax = qib_count_units(&npresent, &nup);
for (ndev = 0; ndev < devmax; ndev++) {
struct qib_devdata *dd = qib_lookup(ndev);
/* device portion of usable() */
if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
continue;
for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
if (dd->rcd[ctxt])
continue;
if (port) {
if (port > dd->num_pports)
continue;
ppd = dd->pport + port - 1;
if (!usable(ppd, 0))
continue;
} else {
/*
* choose port based on ctxt, if up, else
* first port that's up for multi-port HCA
*/
ppd = dd->pport + (ctxt % dd->num_pports);
if (!usable(ppd, 0)) {
int j;
ppd = NULL;
for (j = 0; j < dd->num_pports &&
!ppd; j++)
if (usable(dd->pport + j, 0))
ppd = dd->pport + j;
if (!ppd)
continue; /* to next unit */
}
}
ret = setup_ctxt(ppd, ctxt, fp, uinfo);
goto done;
}
if (!npresent) {
ret = -ENXIO;
goto done;
}
if (nup == 0) {
ret = -ENETDOWN;
goto done;
}
if (npresent) {
if (nup == 0)
ret = -ENETDOWN;
else
ret = -EBUSY;
} else
ret = -ENXIO;
if (alg == QIB_PORT_ALG_ACROSS) {
unsigned inuse = ~0U;
/* find device (with ACTIVE ports) with fewest ctxts in use */
for (ndev = 0; ndev < devmax; ndev++) {
struct qib_devdata *dd = qib_lookup(ndev);
unsigned cused = 0, cfree = 0;
if (!dd)
continue;
if (port && port <= dd->num_pports &&
usable(dd->pport + port - 1))
dusable = 1;
else
for (i = 0; i < dd->num_pports; i++)
if (usable(dd->pport + i))
dusable++;
if (!dusable)
continue;
for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
ctxt++)
if (dd->rcd[ctxt])
cused++;
else
cfree++;
if (cfree && cused < inuse) {
udd = dd;
inuse = cused;
}
}
if (udd) {
ret = choose_port_ctxt(fp, udd, port, uinfo);
goto done;
}
} else {
for (ndev = 0; ndev < devmax; ndev++) {
struct qib_devdata *dd = qib_lookup(ndev);
if (dd) {
ret = choose_port_ctxt(fp, dd, port, uinfo);
if (!ret)
goto done;
if (ret == -EBUSY)
dusable++;
}
}
}
ret = dusable ? -EBUSY : -ENETDOWN;
done:
return ret;
@ -1481,7 +1483,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
{
int ret;
int i_minor;
unsigned swmajor, swminor;
unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
/* Check to be sure we haven't already initialized this file */
if (ctxt_fp(fp)) {
@ -1498,6 +1500,9 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
swminor = uinfo->spu_userversion & 0xffff;
if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
alg = uinfo->spu_port_alg;
mutex_lock(&qib_mutex);
if (qib_compatible_subctxts(swmajor, swminor) &&
@ -1514,7 +1519,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
if (i_minor)
ret = find_free_ctxt(i_minor - 1, fp, uinfo);
else
ret = get_a_ctxt(fp, uinfo);
ret = get_a_ctxt(fp, uinfo, alg);
done_chk_sdma:
if (!ret) {
@ -1862,7 +1867,7 @@ static int disarm_req_delay(struct qib_ctxtdata *rcd)
{
int ret = 0;
if (!usable(rcd->ppd, 1)) {
if (!usable(rcd->ppd)) {
int i;
/*
* if link is down, or otherwise not usable, delay
@ -1881,7 +1886,7 @@ static int disarm_req_delay(struct qib_ctxtdata *rcd)
set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
&rcd->user_event_mask[i]);
}
for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++)
for (i = 0; !usable(rcd->ppd) && i < 300; i++)
msleep(100);
ret = -ENETDOWN;
}

View File

@ -135,8 +135,8 @@ static ssize_t driver_names_read(struct file *file, char __user *buf,
}
static const struct file_operations driver_ops[] = {
{ .read = driver_stats_read, },
{ .read = driver_names_read, },
{ .read = driver_stats_read, .llseek = generic_file_llseek, },
{ .read = driver_names_read, .llseek = generic_file_llseek, },
};
/* read the per-device counters */
@ -164,8 +164,8 @@ static ssize_t dev_names_read(struct file *file, char __user *buf,
}
static const struct file_operations cntr_ops[] = {
{ .read = dev_counters_read, },
{ .read = dev_names_read, },
{ .read = dev_counters_read, .llseek = generic_file_llseek, },
{ .read = dev_names_read, .llseek = generic_file_llseek, },
};
/*
@ -210,9 +210,9 @@ static ssize_t portcntrs_2_read(struct file *file, char __user *buf,
}
static const struct file_operations portcntr_ops[] = {
{ .read = portnames_read, },
{ .read = portcntrs_1_read, },
{ .read = portcntrs_2_read, },
{ .read = portnames_read, .llseek = generic_file_llseek, },
{ .read = portcntrs_1_read, .llseek = generic_file_llseek, },
{ .read = portcntrs_2_read, .llseek = generic_file_llseek, },
};
/*
@ -261,8 +261,8 @@ static ssize_t qsfp_2_read(struct file *file, char __user *buf,
}
static const struct file_operations qsfp_ops[] = {
{ .read = qsfp_1_read, },
{ .read = qsfp_2_read, },
{ .read = qsfp_1_read, .llseek = generic_file_llseek, },
{ .read = qsfp_2_read, .llseek = generic_file_llseek, },
};
static ssize_t flash_read(struct file *file, char __user *buf,

View File

@ -5864,7 +5864,7 @@ static void write_7322_initregs(struct qib_devdata *dd)
* Doesn't clear any of the error bits that might be set.
*/
val = TIDFLOW_ERRBITS; /* these are W1C */
for (i = 0; i < dd->ctxtcnt; i++) {
for (i = 0; i < dd->cfgctxts; i++) {
int flow;
for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
@ -7271,6 +7271,8 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
data = qib_read_kreg_port(ppd, krp_serdesctrl);
/* Turn off IB latency mode */
data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE);
qib_write_kreg_port(ppd, krp_serdesctrl, data |
SYM_MASK(IBSerdesCtrl_0, RXLOSEN));

View File

@ -93,7 +93,7 @@ unsigned long *qib_cpulist;
void qib_set_ctxtcnt(struct qib_devdata *dd)
{
if (!qib_cfgctxts)
dd->cfgctxts = dd->ctxtcnt;
dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
else if (qib_cfgctxts < dd->num_pports)
dd->cfgctxts = dd->ctxtcnt;
else if (qib_cfgctxts <= dd->ctxtcnt)

View File

@ -450,7 +450,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
*
* Flushes both send and receive work queues.
* Returns true if last WQE event should be generated.
* The QP s_lock should be held and interrupts disabled.
* The QP r_lock and s_lock should be held and interrupts disabled.
* If we are already in error state, just return.
*/
int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)

View File

@ -868,7 +868,7 @@ done:
/*
* Back up requester to resend the last un-ACKed request.
* The QP s_lock should be held and interrupts disabled.
* The QP r_lock and s_lock should be held and interrupts disabled.
*/
static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
{
@ -911,7 +911,8 @@ static void rc_timeout(unsigned long arg)
struct qib_ibport *ibp;
unsigned long flags;
spin_lock_irqsave(&qp->s_lock, flags);
spin_lock_irqsave(&qp->r_lock, flags);
spin_lock(&qp->s_lock);
if (qp->s_flags & QIB_S_TIMER) {
ibp = to_iport(qp->ibqp.device, qp->port_num);
ibp->n_rc_timeouts++;
@ -920,7 +921,8 @@ static void rc_timeout(unsigned long arg)
qib_restart_rc(qp, qp->s_last_psn + 1, 1);
qib_schedule_send(qp);
}
spin_unlock_irqrestore(&qp->s_lock, flags);
spin_unlock(&qp->s_lock);
spin_unlock_irqrestore(&qp->r_lock, flags);
}
/*
@ -1414,10 +1416,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
spin_lock_irqsave(&qp->s_lock, flags);
/* Double check we can process this now that we hold the s_lock. */
if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
goto ack_done;
/* Ignore invalid responses. */
if (qib_cmp24(psn, qp->s_next_psn) >= 0)
goto ack_done;
@ -1661,9 +1659,6 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
ibp->n_rc_dupreq++;
spin_lock_irqsave(&qp->s_lock, flags);
/* Double check we can process this now that we hold the s_lock. */
if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
goto unlock_done;
for (i = qp->r_head_ack_queue; ; i = prev) {
if (i == qp->s_tail_ack_queue)
@ -1878,9 +1873,6 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
psn = be32_to_cpu(ohdr->bth[2]);
opcode >>= 24;
/* Prevent simultaneous processing after APM on different CPUs */
spin_lock(&qp->r_lock);
/*
* Process responses (ACKs) before anything else. Note that the
* packet sequence number will be for something in the send work
@ -1891,14 +1883,14 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
hdrsize, pmtu, rcd);
goto runlock;
return;
}
/* Compute 24 bits worth of difference. */
diff = qib_cmp24(psn, qp->r_psn);
if (unlikely(diff)) {
if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
goto runlock;
return;
goto send_ack;
}
@ -2090,9 +2082,6 @@ send_last:
if (next > QIB_MAX_RDMA_ATOMIC)
next = 0;
spin_lock_irqsave(&qp->s_lock, flags);
/* Double check we can process this while holding the s_lock. */
if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
goto srunlock;
if (unlikely(next == qp->s_tail_ack_queue)) {
if (!qp->s_ack_queue[next].sent)
goto nack_inv_unlck;
@ -2146,7 +2135,7 @@ send_last:
qp->s_flags |= QIB_S_RESP_PENDING;
qib_schedule_send(qp);
goto srunlock;
goto sunlock;
}
case OP(COMPARE_SWAP):
@ -2165,9 +2154,6 @@ send_last:
if (next > QIB_MAX_RDMA_ATOMIC)
next = 0;
spin_lock_irqsave(&qp->s_lock, flags);
/* Double check we can process this while holding the s_lock. */
if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
goto srunlock;
if (unlikely(next == qp->s_tail_ack_queue)) {
if (!qp->s_ack_queue[next].sent)
goto nack_inv_unlck;
@ -2213,7 +2199,7 @@ send_last:
qp->s_flags |= QIB_S_RESP_PENDING;
qib_schedule_send(qp);
goto srunlock;
goto sunlock;
}
default:
@ -2227,7 +2213,7 @@ send_last:
/* Send an ACK if requested or required. */
if (psn & (1 << 31))
goto send_ack;
goto runlock;
return;
rnr_nak:
qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
@ -2238,7 +2224,7 @@ rnr_nak:
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
goto runlock;
return;
nack_op_err:
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
@ -2250,7 +2236,7 @@ nack_op_err:
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
goto runlock;
return;
nack_inv_unlck:
spin_unlock_irqrestore(&qp->s_lock, flags);
@ -2264,7 +2250,7 @@ nack_inv:
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
goto runlock;
return;
nack_acc_unlck:
spin_unlock_irqrestore(&qp->s_lock, flags);
@ -2274,13 +2260,6 @@ nack_acc:
qp->r_ack_psn = qp->r_psn;
send_ack:
qib_send_rc_ack(qp);
runlock:
spin_unlock(&qp->r_lock);
return;
srunlock:
spin_unlock_irqrestore(&qp->s_lock, flags);
spin_unlock(&qp->r_lock);
return;
sunlock:

View File

@ -656,6 +656,7 @@ unmap:
}
qp = tx->qp;
qib_put_txreq(tx);
spin_lock(&qp->r_lock);
spin_lock(&qp->s_lock);
if (qp->ibqp.qp_type == IB_QPT_RC) {
/* XXX what about error sending RDMA read responses? */
@ -664,6 +665,7 @@ unmap:
} else if (qp->s_wqe)
qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
spin_unlock(&qp->s_lock);
spin_unlock(&qp->r_lock);
/* return zero to process the next send work request */
goto unlock;

View File

@ -347,7 +347,7 @@ static struct kobj_type qib_sl2vl_ktype = {
#define QIB_DIAGC_ATTR(N) \
static struct qib_diagc_attr qib_diagc_attr_##N = { \
.attr = { .name = __stringify(N), .mode = 0444 }, \
.attr = { .name = __stringify(N), .mode = 0664 }, \
.counter = offsetof(struct qib_ibport, n_##N) \
}
@ -403,8 +403,27 @@ static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
}
static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t size)
{
struct qib_diagc_attr *dattr =
container_of(attr, struct qib_diagc_attr, attr);
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, diagc_kobj);
struct qib_ibport *qibp = &ppd->ibport_data;
char *endp;
long val = simple_strtol(buf, &endp, 0);
if (val < 0 || endp == buf)
return -EINVAL;
*(u32 *)((char *) qibp + dattr->counter) = val;
return size;
}
static const struct sysfs_ops qib_diagc_ops = {
.show = diagc_attr_show,
.store = diagc_attr_store,
};
static struct kobj_type qib_diagc_ktype = {

View File

@ -170,7 +170,7 @@ static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
unsigned cnt)
{
struct qib_pportdata *ppd, *pppd[dd->num_pports];
struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS];
unsigned i;
unsigned long flags;

View File

@ -272,9 +272,6 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
opcode >>= 24;
memset(&wc, 0, sizeof wc);
/* Prevent simultaneous processing after APM on different CPUs */
spin_lock(&qp->r_lock);
/* Compare the PSN verses the expected PSN. */
if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
/*
@ -534,7 +531,6 @@ rdma_last:
}
qp->r_psn++;
qp->r_state = opcode;
spin_unlock(&qp->r_lock);
return;
rewind:
@ -542,12 +538,10 @@ rewind:
qp->r_sge.num_sge = 0;
drop:
ibp->n_pkt_drops++;
spin_unlock(&qp->r_lock);
return;
op_err:
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
spin_unlock(&qp->r_lock);
return;
sunlock:

View File

@ -534,13 +534,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
*/
wc.byte_len = tlen + sizeof(struct ib_grh);
/*
* We need to serialize getting a receive work queue entry and
* generating a completion for it against QPs sending to this QP
* locally.
*/
spin_lock(&qp->r_lock);
/*
* Get the next work request entry to find where to put the data.
*/
@ -552,19 +545,19 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
ret = qib_get_rwqe(qp, 0);
if (ret < 0) {
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
goto bail_unlock;
return;
}
if (!ret) {
if (qp->ibqp.qp_num == 0)
ibp->n_vl15_dropped++;
goto bail_unlock;
return;
}
}
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
qp->r_flags |= QIB_R_REUSE_SGE;
ibp->n_pkt_drops++;
goto bail_unlock;
return;
}
if (has_grh) {
qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
@ -579,7 +572,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
goto bail_unlock;
return;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV;
@ -601,7 +594,5 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
cpu_to_be32(IB_BTH_SOLICITED)) != 0);
bail_unlock:
spin_unlock(&qp->r_lock);
bail:;
}

View File

@ -550,10 +550,12 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
{
struct qib_ibport *ibp = &rcd->ppd->ibport_data;
spin_lock(&qp->r_lock);
/* Check for valid receive state. */
if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
ibp->n_pkt_drops++;
return;
goto unlock;
}
switch (qp->ibqp.qp_type) {
@ -577,6 +579,9 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
default:
break;
}
unlock:
spin_unlock(&qp->r_lock);
}
/**

View File

@ -170,7 +170,7 @@ static void iser_create_send_desc(struct iser_conn *ib_conn,
}
int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
static int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
{
int i, j;
u64 dma_addr;

View File

@ -811,6 +811,38 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
return len;
}
static int srp_post_recv(struct srp_target_port *target)
{
unsigned long flags;
struct srp_iu *iu;
struct ib_sge list;
struct ib_recv_wr wr, *bad_wr;
unsigned int next;
int ret;
spin_lock_irqsave(target->scsi_host->host_lock, flags);
next = target->rx_head & (SRP_RQ_SIZE - 1);
wr.wr_id = next;
iu = target->rx_ring[next];
list.addr = iu->dma;
list.length = iu->size;
list.lkey = target->srp_host->srp_dev->mr->lkey;
wr.next = NULL;
wr.sg_list = &list;
wr.num_sge = 1;
ret = ib_post_recv(target->qp, &wr, &bad_wr);
if (!ret)
++target->rx_head;
spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
return ret;
}
static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
{
struct srp_request *req;
@ -868,6 +900,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
{
struct ib_device *dev;
struct srp_iu *iu;
int res;
u8 opcode;
iu = target->rx_ring[wc->wr_id];
@ -879,21 +912,10 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
opcode = *(u8 *) iu->buf;
if (0) {
int i;
shost_printk(KERN_ERR, target->scsi_host,
PFX "recv completion, opcode 0x%02x\n", opcode);
for (i = 0; i < wc->byte_len; ++i) {
if (i % 8 == 0)
printk(KERN_ERR " [%02x] ", i);
printk(" %02x", ((u8 *) iu->buf)[i]);
if ((i + 1) % 8 == 0)
printk("\n");
}
if (wc->byte_len % 8)
printk("\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
iu->buf, wc->byte_len, true);
}
switch (opcode) {
@ -915,6 +937,11 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
DMA_FROM_DEVICE);
res = srp_post_recv(target);
if (res != 0)
shost_printk(KERN_ERR, target->scsi_host,
PFX "Recv failed with error code %d\n", res);
}
static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
@ -954,45 +981,6 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
}
}
static int __srp_post_recv(struct srp_target_port *target)
{
struct srp_iu *iu;
struct ib_sge list;
struct ib_recv_wr wr, *bad_wr;
unsigned int next;
int ret;
next = target->rx_head & (SRP_RQ_SIZE - 1);
wr.wr_id = next;
iu = target->rx_ring[next];
list.addr = iu->dma;
list.length = iu->size;
list.lkey = target->srp_host->srp_dev->mr->lkey;
wr.next = NULL;
wr.sg_list = &list;
wr.num_sge = 1;
ret = ib_post_recv(target->qp, &wr, &bad_wr);
if (!ret)
++target->rx_head;
return ret;
}
static int srp_post_recv(struct srp_target_port *target)
{
unsigned long flags;
int ret;
spin_lock_irqsave(target->scsi_host->host_lock, flags);
ret = __srp_post_recv(target);
spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
return ret;
}
/*
* Must be called with target->scsi_host->host_lock held to protect
* req_lim and tx_head. Lock cannot be dropped between call here and
@ -1102,11 +1090,6 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
goto err;
}
if (__srp_post_recv(target)) {
shost_printk(KERN_ERR, target->scsi_host, PFX "Recv failed\n");
goto err_unmap;
}
ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
DMA_TO_DEVICE);
@ -1249,6 +1232,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
int attr_mask = 0;
int comp = 0;
int opcode = 0;
int i;
switch (event->event) {
case IB_CM_REQ_ERROR:
@ -1298,7 +1282,11 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
if (target->status)
break;
target->status = srp_post_recv(target);
for (i = 0; i < SRP_RQ_SIZE; i++) {
target->status = srp_post_recv(target);
if (target->status)
break;
}
if (target->status)
break;
@ -1564,6 +1552,18 @@ static ssize_t show_orig_dgid(struct device *dev,
return sprintf(buf, "%pI6\n", target->orig_dgid);
}
static ssize_t show_req_lim(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
if (target->state == SRP_TARGET_DEAD ||
target->state == SRP_TARGET_REMOVED)
return -ENODEV;
return sprintf(buf, "%d\n", target->req_lim);
}
static ssize_t show_zero_req_lim(struct device *dev,
struct device_attribute *attr, char *buf)
{
@ -1598,6 +1598,7 @@ static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
@ -1609,6 +1610,7 @@ static struct device_attribute *srp_host_attrs[] = {
&dev_attr_pkey,
&dev_attr_dgid,
&dev_attr_orig_dgid,
&dev_attr_req_lim,
&dev_attr_zero_req_lim,
&dev_attr_local_ib_port,
&dev_attr_local_ib_device,

View File

@ -555,7 +555,7 @@ enum ib_qp_type {
IB_QPT_UC,
IB_QPT_UD,
IB_QPT_RAW_IPV6,
IB_QPT_RAW_ETY
IB_QPT_RAW_ETHERTYPE
};
enum ib_qp_create_flags {