Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/ehca: SRQ fixes to enable IPoIB CM
  IB/ehca: Fix Small QP regressions
This commit is contained in:
Linus Torvalds 2007-08-31 20:40:37 -07:00
commit 6db602d447
4 changed files with 46 additions and 26 deletions

View File

@ -93,9 +93,13 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
props->max_pd = min_t(int, rblock->max_pd, INT_MAX); props->max_pd = min_t(int, rblock->max_pd, INT_MAX);
props->max_ah = min_t(int, rblock->max_ah, INT_MAX); props->max_ah = min_t(int, rblock->max_ah, INT_MAX);
props->max_fmr = min_t(int, rblock->max_mr, INT_MAX); props->max_fmr = min_t(int, rblock->max_mr, INT_MAX);
props->max_srq = 0;
props->max_srq_wr = 0; if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
props->max_srq_sge = 0; props->max_srq = props->max_qp;
props->max_srq_wr = props->max_qp_wr;
props->max_srq_sge = 3;
}
props->max_pkeys = 16; props->max_pkeys = 16;
props->local_ca_ack_delay props->local_ca_ack_delay
= rblock->local_ca_ack_delay; = rblock->local_ca_ack_delay;

View File

@ -175,10 +175,32 @@ error_data1:
} }
static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp,
enum ib_event_type event_type)
{
struct ib_event event;
event.device = &shca->ib_device;
event.event = event_type;
if (qp->ext_type == EQPT_SRQ) {
if (!qp->ib_srq.event_handler)
return;
event.element.srq = &qp->ib_srq;
qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context);
} else {
if (!qp->ib_qp.event_handler)
return;
event.element.qp = &qp->ib_qp;
qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
}
}
static void qp_event_callback(struct ehca_shca *shca, u64 eqe, static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
enum ib_event_type event_type, int fatal) enum ib_event_type event_type, int fatal)
{ {
struct ib_event event;
struct ehca_qp *qp; struct ehca_qp *qp;
u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe); u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
@ -186,30 +208,22 @@ static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
qp = idr_find(&ehca_qp_idr, token); qp = idr_find(&ehca_qp_idr, token);
read_unlock(&ehca_qp_idr_lock); read_unlock(&ehca_qp_idr_lock);
if (!qp) if (!qp)
return; return;
if (fatal) if (fatal)
ehca_error_data(shca, qp, qp->ipz_qp_handle.handle); ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
event.device = &shca->ib_device; dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ?
IB_EVENT_SRQ_ERR : event_type);
if (qp->ext_type == EQPT_SRQ) { /*
if (!qp->ib_srq.event_handler) * eHCA only processes one WQE at a time for SRQ base QPs,
return; * so the last WQE has been processed as soon as the QP enters
* error state.
event.event = fatal ? IB_EVENT_SRQ_ERR : event_type; */
event.element.srq = &qp->ib_srq; if (fatal && qp->ext_type == EQPT_SRQBASE)
qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context); dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
} else {
if (!qp->ib_qp.event_handler)
return;
event.event = event_type;
event.element.qp = &qp->ib_qp;
qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
}
return; return;
} }

View File

@ -600,10 +600,12 @@ static struct ehca_qp *internal_create_qp(
if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap) if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)
&& !(context && udata)) { /* no small QP support in userspace ATM */ && !(context && udata)) { /* no small QP support in userspace ATM */
ehca_determine_small_queue( if (HAS_SQ(my_qp))
&parms.squeue, max_send_sge, is_llqp); ehca_determine_small_queue(
ehca_determine_small_queue( &parms.squeue, max_send_sge, is_llqp);
&parms.rqueue, max_recv_sge, is_llqp); if (HAS_RQ(my_qp))
ehca_determine_small_queue(
&parms.rqueue, max_recv_sge, is_llqp);
parms.qp_storage = parms.qp_storage =
(parms.squeue.is_small || parms.rqueue.is_small); (parms.squeue.is_small || parms.rqueue.is_small);
} }

View File

@ -172,7 +172,7 @@ static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
unsigned long bit; unsigned long bit;
int free_page = 0; int free_page = 0;
bit = ((unsigned long)queue->queue_pages[0] & PAGE_MASK) bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK)
>> (order + 9); >> (order + 9);
mutex_lock(&pd->lock); mutex_lock(&pd->lock);