IB/ipath: Add support for 7220 receive queue changes
Newer HCAs have a HW option to write a sequence number to each receive queue entry and avoid a separate DMA of the tail register to memory. This patch adds support for these changes. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
2ba3f56eb4
commit
9355fb6a06
|
@ -198,7 +198,7 @@ typedef enum _ipath_ureg {
|
|||
#define IPATH_RUNTIME_FORCE_WC_ORDER 0x4
|
||||
#define IPATH_RUNTIME_RCVHDR_COPY 0x8
|
||||
#define IPATH_RUNTIME_MASTER 0x10
|
||||
/* 0x20 and 0x40 are no longer used, but are reserved for ABI compatibility */
|
||||
#define IPATH_RUNTIME_NODMA_RTAIL 0x80
|
||||
#define IPATH_RUNTIME_FORCE_PIOAVAIL 0x400
|
||||
#define IPATH_RUNTIME_PIO_REGSWAPPED 0x800
|
||||
|
||||
|
@ -662,8 +662,12 @@ struct infinipath_counters {
|
|||
#define INFINIPATH_RHF_LENGTH_SHIFT 0
|
||||
#define INFINIPATH_RHF_RCVTYPE_MASK 0x7
|
||||
#define INFINIPATH_RHF_RCVTYPE_SHIFT 11
|
||||
#define INFINIPATH_RHF_EGRINDEX_MASK 0x7FF
|
||||
#define INFINIPATH_RHF_EGRINDEX_MASK 0xFFF
|
||||
#define INFINIPATH_RHF_EGRINDEX_SHIFT 16
|
||||
#define INFINIPATH_RHF_SEQ_MASK 0xF
|
||||
#define INFINIPATH_RHF_SEQ_SHIFT 0
|
||||
#define INFINIPATH_RHF_HDRQ_OFFSET_MASK 0x7FF
|
||||
#define INFINIPATH_RHF_HDRQ_OFFSET_SHIFT 4
|
||||
#define INFINIPATH_RHF_H_ICRCERR 0x80000000
|
||||
#define INFINIPATH_RHF_H_VCRCERR 0x40000000
|
||||
#define INFINIPATH_RHF_H_PARITYERR 0x20000000
|
||||
|
@ -673,6 +677,8 @@ struct infinipath_counters {
|
|||
#define INFINIPATH_RHF_H_TIDERR 0x02000000
|
||||
#define INFINIPATH_RHF_H_MKERR 0x01000000
|
||||
#define INFINIPATH_RHF_H_IBERR 0x00800000
|
||||
#define INFINIPATH_RHF_H_ERR_MASK 0xFF800000
|
||||
#define INFINIPATH_RHF_L_USE_EGR 0x80000000
|
||||
#define INFINIPATH_RHF_L_SWA 0x00008000
|
||||
#define INFINIPATH_RHF_L_SWB 0x00004000
|
||||
|
||||
|
@ -696,6 +702,7 @@ struct infinipath_counters {
|
|||
/* SendPIO per-buffer control */
|
||||
#define INFINIPATH_SP_TEST 0x40
|
||||
#define INFINIPATH_SP_TESTEBP 0x20
|
||||
#define INFINIPATH_SP_TRIGGER_SHIFT 15
|
||||
|
||||
/* SendPIOAvail bits */
|
||||
#define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
|
||||
|
@ -762,6 +769,7 @@ struct ether_header {
|
|||
#define IPATH_MSN_MASK 0xFFFFFF
|
||||
#define IPATH_QPN_MASK 0xFFFFFF
|
||||
#define IPATH_MULTICAST_LID_BASE 0xC000
|
||||
#define IPATH_EAGER_TID_ID INFINIPATH_I_TID_MASK
|
||||
#define IPATH_MULTICAST_QPN 0xFFFFFF
|
||||
|
||||
/* Receive Header Queue: receive type (from infinipath) */
|
||||
|
@ -781,7 +789,7 @@ struct ether_header {
|
|||
*/
|
||||
static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf)
|
||||
{
|
||||
return __le32_to_cpu(rbuf[1]);
|
||||
return __le32_to_cpu(rbuf[1]) & INFINIPATH_RHF_H_ERR_MASK;
|
||||
}
|
||||
|
||||
static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf)
|
||||
|
@ -802,6 +810,23 @@ static inline __u32 ipath_hdrget_index(const __le32 * rbuf)
|
|||
& INFINIPATH_RHF_EGRINDEX_MASK;
|
||||
}
|
||||
|
||||
static inline __u32 ipath_hdrget_seq(const __le32 *rbuf)
|
||||
{
|
||||
return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_SEQ_SHIFT)
|
||||
& INFINIPATH_RHF_SEQ_MASK;
|
||||
}
|
||||
|
||||
static inline __u32 ipath_hdrget_offset(const __le32 *rbuf)
|
||||
{
|
||||
return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_HDRQ_OFFSET_SHIFT)
|
||||
& INFINIPATH_RHF_HDRQ_OFFSET_MASK;
|
||||
}
|
||||
|
||||
static inline __u32 ipath_hdrget_use_egr_buf(const __le32 *rbuf)
|
||||
{
|
||||
return __le32_to_cpu(rbuf[0]) & INFINIPATH_RHF_L_USE_EGR;
|
||||
}
|
||||
|
||||
static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword)
|
||||
{
|
||||
return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
|
||||
|
|
|
@ -41,7 +41,6 @@
|
|||
|
||||
#include "ipath_kernel.h"
|
||||
#include "ipath_verbs.h"
|
||||
#include "ipath_common.h"
|
||||
|
||||
static void ipath_update_pio_bufs(struct ipath_devdata *);
|
||||
|
||||
|
@ -720,6 +719,8 @@ static void __devexit cleanup_device(struct ipath_devdata *dd)
|
|||
tmpp = dd->ipath_pageshadow;
|
||||
dd->ipath_pageshadow = NULL;
|
||||
vfree(tmpp);
|
||||
|
||||
dd->ipath_egrtidbase = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1078,18 +1079,17 @@ static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
|
|||
u32 eflags,
|
||||
u32 l,
|
||||
u32 etail,
|
||||
u64 *rc)
|
||||
__le32 *rhf_addr,
|
||||
struct ipath_message_header *hdr)
|
||||
{
|
||||
char emsg[128];
|
||||
struct ipath_message_header *hdr;
|
||||
|
||||
get_rhf_errstring(eflags, emsg, sizeof emsg);
|
||||
hdr = (struct ipath_message_header *)&rc[1];
|
||||
ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
|
||||
"tlen=%x opcode=%x egridx=%x: %s\n",
|
||||
eflags, l,
|
||||
ipath_hdrget_rcv_type((__le32 *) rc),
|
||||
ipath_hdrget_length_in_bytes((__le32 *) rc),
|
||||
ipath_hdrget_rcv_type(rhf_addr),
|
||||
ipath_hdrget_length_in_bytes(rhf_addr),
|
||||
be32_to_cpu(hdr->bth[0]) >> 24,
|
||||
etail, emsg);
|
||||
|
||||
|
@ -1114,8 +1114,8 @@ static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
|
|||
*/
|
||||
void ipath_kreceive(struct ipath_portdata *pd)
|
||||
{
|
||||
u64 *rc;
|
||||
struct ipath_devdata *dd = pd->port_dd;
|
||||
__le32 *rhf_addr;
|
||||
void *ebuf;
|
||||
const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
|
||||
const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
|
||||
|
@ -1123,46 +1123,43 @@ void ipath_kreceive(struct ipath_portdata *pd)
|
|||
struct ipath_message_header *hdr;
|
||||
u32 eflags, i, etype, tlen, pkttot = 0, updegr = 0, reloop = 0;
|
||||
static u64 totcalls; /* stats, may eventually remove */
|
||||
|
||||
if (!dd->ipath_hdrqtailptr) {
|
||||
ipath_dev_err(dd,
|
||||
"hdrqtailptr not set, can't do receives\n");
|
||||
goto bail;
|
||||
}
|
||||
int last;
|
||||
|
||||
l = pd->port_head;
|
||||
rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
|
||||
if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
|
||||
u32 seq = ipath_hdrget_seq(rhf_addr);
|
||||
|
||||
if (seq != pd->port_seq_cnt)
|
||||
goto bail;
|
||||
hdrqtail = 0;
|
||||
} else {
|
||||
hdrqtail = ipath_get_rcvhdrtail(pd);
|
||||
if (l == hdrqtail)
|
||||
goto bail;
|
||||
smp_rmb();
|
||||
}
|
||||
|
||||
reloop:
|
||||
for (i = 0; l != hdrqtail; i++) {
|
||||
u32 qp;
|
||||
u8 *bthbytes;
|
||||
|
||||
rc = (u64 *) (pd->port_rcvhdrq + (l << 2));
|
||||
hdr = (struct ipath_message_header *)&rc[1];
|
||||
/*
|
||||
* could make a network order version of IPATH_KD_QP, and
|
||||
* do the obvious shift before masking to speed this up.
|
||||
*/
|
||||
qp = ntohl(hdr->bth[1]) & 0xffffff;
|
||||
bthbytes = (u8 *) hdr->bth;
|
||||
|
||||
eflags = ipath_hdrget_err_flags((__le32 *) rc);
|
||||
etype = ipath_hdrget_rcv_type((__le32 *) rc);
|
||||
for (last = 0, i = 1; !last; i++) {
|
||||
hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
|
||||
eflags = ipath_hdrget_err_flags(rhf_addr);
|
||||
etype = ipath_hdrget_rcv_type(rhf_addr);
|
||||
/* total length */
|
||||
tlen = ipath_hdrget_length_in_bytes((__le32 *) rc);
|
||||
tlen = ipath_hdrget_length_in_bytes(rhf_addr);
|
||||
ebuf = NULL;
|
||||
if (etype != RCVHQ_RCV_TYPE_EXPECTED) {
|
||||
if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
|
||||
ipath_hdrget_use_egr_buf(rhf_addr) :
|
||||
(etype != RCVHQ_RCV_TYPE_EXPECTED)) {
|
||||
/*
|
||||
* it turns out that the chips uses an eager buffer
|
||||
* It turns out that the chip uses an eager buffer
|
||||
* for all non-expected packets, whether it "needs"
|
||||
* one or not. So always get the index, but don't
|
||||
* set ebuf (so we try to copy data) unless the
|
||||
* length requires it.
|
||||
*/
|
||||
etail = ipath_hdrget_index((__le32 *) rc);
|
||||
etail = ipath_hdrget_index(rhf_addr);
|
||||
updegr = 1;
|
||||
if (tlen > sizeof(*hdr) ||
|
||||
etype == RCVHQ_RCV_TYPE_NON_KD)
|
||||
ebuf = ipath_get_egrbuf(dd, etail);
|
||||
|
@ -1173,75 +1170,91 @@ reloop:
|
|||
* packets; only ipathhdrerr should be set.
|
||||
*/
|
||||
|
||||
if (etype != RCVHQ_RCV_TYPE_NON_KD && etype !=
|
||||
RCVHQ_RCV_TYPE_ERROR && ipath_hdrget_ipath_ver(
|
||||
hdr->iph.ver_port_tid_offset) !=
|
||||
IPS_PROTO_VERSION) {
|
||||
if (etype != RCVHQ_RCV_TYPE_NON_KD &&
|
||||
etype != RCVHQ_RCV_TYPE_ERROR &&
|
||||
ipath_hdrget_ipath_ver(hdr->iph.ver_port_tid_offset) !=
|
||||
IPS_PROTO_VERSION)
|
||||
ipath_cdbg(PKT, "Bad InfiniPath protocol version "
|
||||
"%x\n", etype);
|
||||
}
|
||||
|
||||
if (unlikely(eflags))
|
||||
ipath_rcv_hdrerr(dd, eflags, l, etail, rc);
|
||||
ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
|
||||
else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
|
||||
ipath_ib_rcv(dd->verbs_dev, rc + 1, ebuf, tlen);
|
||||
ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
|
||||
if (dd->ipath_lli_counter)
|
||||
dd->ipath_lli_counter--;
|
||||
} else if (etype == RCVHQ_RCV_TYPE_EAGER) {
|
||||
u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24;
|
||||
u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff;
|
||||
ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
|
||||
"qp=%x), len %x; ignored\n",
|
||||
etype, bthbytes[0], qp, tlen);
|
||||
etype, opcode, qp, tlen);
|
||||
}
|
||||
else if (etype == RCVHQ_RCV_TYPE_EAGER)
|
||||
ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
|
||||
"qp=%x), len %x; ignored\n",
|
||||
etype, bthbytes[0], qp, tlen);
|
||||
else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
|
||||
ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
|
||||
be32_to_cpu(hdr->bth[0]) & 0xff);
|
||||
be32_to_cpu(hdr->bth[0]) >> 24);
|
||||
else {
|
||||
/*
|
||||
* error packet, type of error unknown.
|
||||
* Probably type 3, but we don't know, so don't
|
||||
* even try to print the opcode, etc.
|
||||
* Usually caused by a "bad packet", that has no
|
||||
* BTH, when the LRH says it should.
|
||||
*/
|
||||
ipath_dbg("Error Pkt, but no eflags! egrbuf %x, "
|
||||
"len %x\nhdrq@%lx;hdrq+%x rhf: %llx; "
|
||||
"hdr %llx %llx %llx %llx %llx\n",
|
||||
etail, tlen, (unsigned long) rc, l,
|
||||
(unsigned long long) rc[0],
|
||||
(unsigned long long) rc[1],
|
||||
(unsigned long long) rc[2],
|
||||
(unsigned long long) rc[3],
|
||||
(unsigned long long) rc[4],
|
||||
(unsigned long long) rc[5]);
|
||||
ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
|
||||
" %x, len %x hdrq+%x rhf: %Lx\n",
|
||||
etail, tlen, l,
|
||||
le64_to_cpu(*(__le64 *) rhf_addr));
|
||||
if (ipath_debug & __IPATH_ERRPKTDBG) {
|
||||
u32 j, *d, dw = rsize-2;
|
||||
if (rsize > (tlen>>2))
|
||||
dw = tlen>>2;
|
||||
d = (u32 *)hdr;
|
||||
printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n",
|
||||
dw);
|
||||
for (j = 0; j < dw; j++)
|
||||
printk(KERN_DEBUG "%8x%s", d[j],
|
||||
(j%8) == 7 ? "\n" : " ");
|
||||
printk(KERN_DEBUG ".\n");
|
||||
}
|
||||
}
|
||||
l += rsize;
|
||||
if (l >= maxcnt)
|
||||
l = 0;
|
||||
if (etype != RCVHQ_RCV_TYPE_EXPECTED)
|
||||
updegr = 1;
|
||||
rhf_addr = (__le32 *) pd->port_rcvhdrq +
|
||||
l + dd->ipath_rhf_offset;
|
||||
if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
|
||||
u32 seq = ipath_hdrget_seq(rhf_addr);
|
||||
|
||||
if (++pd->port_seq_cnt > 13)
|
||||
pd->port_seq_cnt = 1;
|
||||
if (seq != pd->port_seq_cnt)
|
||||
last = 1;
|
||||
} else if (l == hdrqtail)
|
||||
last = 1;
|
||||
/*
|
||||
* update head regs on last packet, and every 16 packets.
|
||||
* Reduce bus traffic, while still trying to prevent
|
||||
* rcvhdrq overflows, for when the queue is nearly full
|
||||
*/
|
||||
if (l == hdrqtail || (i && !(i&0xf))) {
|
||||
u64 lval;
|
||||
if (l == hdrqtail)
|
||||
/* request IBA6120 interrupt only on last */
|
||||
lval = dd->ipath_rhdrhead_intr_off | l;
|
||||
else
|
||||
lval = l;
|
||||
ipath_write_ureg(dd, ur_rcvhdrhead, lval, 0);
|
||||
if (last || !(i & 0xf)) {
|
||||
u64 lval = l;
|
||||
|
||||
/* request IBA6120 and 7220 interrupt only on last */
|
||||
if (last)
|
||||
lval |= dd->ipath_rhdrhead_intr_off;
|
||||
ipath_write_ureg(dd, ur_rcvhdrhead, lval,
|
||||
pd->port_port);
|
||||
if (updegr) {
|
||||
ipath_write_ureg(dd, ur_rcvegrindexhead,
|
||||
etail, 0);
|
||||
etail, pd->port_port);
|
||||
updegr = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!dd->ipath_rhdrhead_intr_off && !reloop) {
|
||||
if (!dd->ipath_rhdrhead_intr_off && !reloop &&
|
||||
!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
|
||||
/* IBA6110 workaround; we can have a race clearing chip
|
||||
* interrupt with another interrupt about to be delivered,
|
||||
* and can clear it before it is delivered on the GPIO
|
||||
|
@ -1638,19 +1651,27 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
|
|||
ret = -ENOMEM;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
|
||||
pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
|
||||
&dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, GFP_KERNEL);
|
||||
&dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
|
||||
GFP_KERNEL);
|
||||
if (!pd->port_rcvhdrtail_kvaddr) {
|
||||
ipath_dev_err(dd, "attempt to allocate 1 page "
|
||||
"for port %u rcvhdrqtailaddr failed\n",
|
||||
pd->port_port);
|
||||
"for port %u rcvhdrqtailaddr "
|
||||
"failed\n", pd->port_port);
|
||||
ret = -ENOMEM;
|
||||
dma_free_coherent(&dd->pcidev->dev, amt,
|
||||
pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
|
||||
pd->port_rcvhdrq,
|
||||
pd->port_rcvhdrq_phys);
|
||||
pd->port_rcvhdrq = NULL;
|
||||
goto bail;
|
||||
}
|
||||
pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
|
||||
ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx "
|
||||
"physical\n", pd->port_port,
|
||||
(unsigned long long) phys_hdrqtail);
|
||||
}
|
||||
|
||||
pd->port_rcvhdrq_size = amt;
|
||||
|
||||
|
@ -1660,10 +1681,6 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
|
|||
(unsigned long) pd->port_rcvhdrq_phys,
|
||||
(unsigned long) pd->port_rcvhdrq_size,
|
||||
pd->port_port);
|
||||
|
||||
ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx physical\n",
|
||||
pd->port_port,
|
||||
(unsigned long long) phys_hdrqtail);
|
||||
}
|
||||
else
|
||||
ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
|
||||
|
@ -1687,7 +1704,6 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
|
|||
ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
|
||||
pd->port_port, pd->port_rcvhdrq_phys);
|
||||
|
||||
ret = 0;
|
||||
bail:
|
||||
return ret;
|
||||
}
|
||||
|
@ -2222,7 +2238,7 @@ void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
|
|||
ipath_cdbg(VERBOSE, "free closed port %d "
|
||||
"ipath_port0_skbinfo @ %p\n", pd->port_port,
|
||||
skbinfo);
|
||||
for (e = 0; e < dd->ipath_rcvegrcnt; e++)
|
||||
for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
|
||||
if (skbinfo[e].skb) {
|
||||
pci_unmap_single(dd->pcidev, skbinfo[e].phys,
|
||||
dd->ipath_ibmaxlen,
|
||||
|
|
|
@ -1930,22 +1930,25 @@ static int ipath_do_user_init(struct file *fp,
|
|||
pd->port_hdrqfull_poll = pd->port_hdrqfull;
|
||||
|
||||
/*
|
||||
* now enable the port; the tail registers will be written to memory
|
||||
* by the chip as soon as it sees the write to
|
||||
* dd->ipath_kregs->kr_rcvctrl. The update only happens on
|
||||
* transition from 0 to 1, so clear it first, then set it as part of
|
||||
* enabling the port. This will (very briefly) affect any other
|
||||
* open ports, but it shouldn't be long enough to be an issue.
|
||||
* We explictly set the in-memory copy to 0 beforehand, so we don't
|
||||
* have to wait to be sure the DMA update has happened.
|
||||
* Now enable the port for receive.
|
||||
* For chips that are set to DMA the tail register to memory
|
||||
* when they change (and when the update bit transitions from
|
||||
* 0 to 1. So for those chips, we turn it off and then back on.
|
||||
* This will (very briefly) affect any other open ports, but the
|
||||
* duration is very short, and therefore isn't an issue. We
|
||||
* explictly set the in-memory tail copy to 0 beforehand, so we
|
||||
* don't have to wait to be sure the DMA update has happened
|
||||
* (chip resets head/tail to 0 on transition to enable).
|
||||
*/
|
||||
if (pd->port_rcvhdrtail_kvaddr)
|
||||
ipath_clear_rcvhdrtail(pd);
|
||||
set_bit(dd->ipath_r_portenable_shift + pd->port_port,
|
||||
&dd->ipath_rcvctrl);
|
||||
if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
|
||||
if (pd->port_rcvhdrtail_kvaddr)
|
||||
ipath_clear_rcvhdrtail(pd);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
|
||||
dd->ipath_rcvctrl &
|
||||
~(1ULL << dd->ipath_r_tailupd_shift));
|
||||
}
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
|
||||
dd->ipath_rcvctrl);
|
||||
/* Notify any waiting slaves */
|
||||
|
@ -1973,14 +1976,15 @@ static void unlock_expected_tids(struct ipath_portdata *pd)
|
|||
ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
|
||||
pd->port_port);
|
||||
for (i = port_tidbase; i < maxtid; i++) {
|
||||
if (!dd->ipath_pageshadow[i])
|
||||
struct page *ps = dd->ipath_pageshadow[i];
|
||||
|
||||
if (!ps)
|
||||
continue;
|
||||
|
||||
dd->ipath_pageshadow[i] = NULL;
|
||||
pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i],
|
||||
PAGE_SIZE, PCI_DMA_FROMDEVICE);
|
||||
ipath_release_user_pages_on_close(&dd->ipath_pageshadow[i],
|
||||
1);
|
||||
dd->ipath_pageshadow[i] = NULL;
|
||||
ipath_release_user_pages_on_close(&ps, 1);
|
||||
cnt++;
|
||||
ipath_stats.sps_pageunlocks++;
|
||||
}
|
||||
|
|
|
@ -306,7 +306,9 @@ static const struct ipath_cregs ipath_ht_cregs = {
|
|||
|
||||
/* kr_intstatus, kr_intclear, kr_intmask bits */
|
||||
#define INFINIPATH_I_RCVURG_MASK ((1U<<9)-1)
|
||||
#define INFINIPATH_I_RCVURG_SHIFT 0
|
||||
#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<9)-1)
|
||||
#define INFINIPATH_I_RCVAVAIL_SHIFT 12
|
||||
|
||||
/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
|
||||
#define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0
|
||||
|
|
|
@ -316,7 +316,9 @@ static const struct ipath_cregs ipath_pe_cregs = {
|
|||
|
||||
/* kr_intstatus, kr_intclear, kr_intmask bits */
|
||||
#define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1)
|
||||
#define INFINIPATH_I_RCVURG_SHIFT 0
|
||||
#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1)
|
||||
#define INFINIPATH_I_RCVAVAIL_SHIFT 12
|
||||
|
||||
/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
|
||||
#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
|
||||
|
|
|
@ -219,14 +219,14 @@ static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd)
|
|||
pd->port_cnt = 1;
|
||||
/* The port 0 pkey table is used by the layer interface. */
|
||||
pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
|
||||
pd->port_seq_cnt = 1;
|
||||
}
|
||||
return pd;
|
||||
}
|
||||
|
||||
static int init_chip_first(struct ipath_devdata *dd,
|
||||
struct ipath_portdata **pdp)
|
||||
static int init_chip_first(struct ipath_devdata *dd)
|
||||
{
|
||||
struct ipath_portdata *pd = NULL;
|
||||
struct ipath_portdata *pd;
|
||||
int ret = 0;
|
||||
u64 val;
|
||||
|
||||
|
@ -242,12 +242,14 @@ static int init_chip_first(struct ipath_devdata *dd,
|
|||
else if (ipath_cfgports <= dd->ipath_portcnt) {
|
||||
dd->ipath_cfgports = ipath_cfgports;
|
||||
ipath_dbg("Configured to use %u ports out of %u in chip\n",
|
||||
dd->ipath_cfgports, dd->ipath_portcnt);
|
||||
dd->ipath_cfgports, ipath_read_kreg32(dd,
|
||||
dd->ipath_kregs->kr_portcnt));
|
||||
} else {
|
||||
dd->ipath_cfgports = dd->ipath_portcnt;
|
||||
ipath_dbg("Tried to configured to use %u ports; chip "
|
||||
"only supports %u\n", ipath_cfgports,
|
||||
dd->ipath_portcnt);
|
||||
ipath_read_kreg32(dd,
|
||||
dd->ipath_kregs->kr_portcnt));
|
||||
}
|
||||
/*
|
||||
* Allocate full portcnt array, rather than just cfgports, because
|
||||
|
@ -324,36 +326,39 @@ static int init_chip_first(struct ipath_devdata *dd,
|
|||
mutex_init(&dd->ipath_eep_lock);
|
||||
|
||||
done:
|
||||
*pdp = pd;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* init_chip_reset - re-initialize after a reset, or enable
|
||||
* @dd: the infinipath device
|
||||
* @pdp: output for port data
|
||||
*
|
||||
* sanity check at least some of the values after reset, and
|
||||
* ensure no receive or transmit (explictly, in case reset
|
||||
* failed
|
||||
*/
|
||||
static int init_chip_reset(struct ipath_devdata *dd,
|
||||
struct ipath_portdata **pdp)
|
||||
static int init_chip_reset(struct ipath_devdata *dd)
|
||||
{
|
||||
u32 rtmp;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* ensure chip does no sends or receives, tail updates, or
|
||||
* pioavail updates while we re-initialize
|
||||
*/
|
||||
dd->ipath_rcvctrl &= ~(1ULL << dd->ipath_r_tailupd_shift);
|
||||
for (i = 0; i < dd->ipath_portcnt; i++) {
|
||||
clear_bit(dd->ipath_r_portenable_shift + i,
|
||||
&dd->ipath_rcvctrl);
|
||||
clear_bit(dd->ipath_r_intravail_shift + i,
|
||||
&dd->ipath_rcvctrl);
|
||||
}
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
|
||||
dd->ipath_rcvctrl);
|
||||
|
||||
*pdp = dd->ipath_pd[0];
|
||||
/* ensure chip does no sends or receives while we re-initialize */
|
||||
dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control);
|
||||
|
||||
rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
|
||||
if (dd->ipath_portcnt != rtmp)
|
||||
dev_info(&dd->pcidev->dev, "portcnt was %u before "
|
||||
"reset, now %u, using original\n",
|
||||
dd->ipath_portcnt, rtmp);
|
||||
rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
|
||||
if (rtmp != dd->ipath_rcvtidcnt)
|
||||
dev_info(&dd->pcidev->dev, "tidcnt was %u before "
|
||||
|
@ -456,10 +461,10 @@ static void init_shadow_tids(struct ipath_devdata *dd)
|
|||
dd->ipath_physshadow = addrs;
|
||||
}
|
||||
|
||||
static void enable_chip(struct ipath_devdata *dd,
|
||||
struct ipath_portdata *pd, int reinit)
|
||||
static void enable_chip(struct ipath_devdata *dd, int reinit)
|
||||
{
|
||||
u32 val;
|
||||
u64 rcvmask;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
|
@ -478,12 +483,15 @@ static void enable_chip(struct ipath_devdata *dd,
|
|||
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
|
||||
|
||||
/*
|
||||
* enable port 0 receive, and receive interrupt. other ports
|
||||
* done as user opens and inits them.
|
||||
* Enable kernel ports' receive and receive interrupt.
|
||||
* Other ports done as user opens and inits them.
|
||||
*/
|
||||
dd->ipath_rcvctrl = (1ULL << dd->ipath_r_tailupd_shift) |
|
||||
(1ULL << dd->ipath_r_portenable_shift) |
|
||||
(1ULL << dd->ipath_r_intravail_shift);
|
||||
rcvmask = 1ULL;
|
||||
dd->ipath_rcvctrl |= (rcvmask << dd->ipath_r_portenable_shift) |
|
||||
(rcvmask << dd->ipath_r_intravail_shift);
|
||||
if (!(dd->ipath_flags & IPATH_NODMA_RTAIL))
|
||||
dd->ipath_rcvctrl |= (1ULL << dd->ipath_r_tailupd_shift);
|
||||
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
|
||||
dd->ipath_rcvctrl);
|
||||
|
||||
|
@ -494,8 +502,8 @@ static void enable_chip(struct ipath_devdata *dd,
|
|||
dd->ipath_flags |= IPATH_INITTED;
|
||||
|
||||
/*
|
||||
* init our shadow copies of head from tail values, and write
|
||||
* head values to match.
|
||||
* Init our shadow copies of head from tail values,
|
||||
* and write head values to match.
|
||||
*/
|
||||
val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
|
||||
ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
|
||||
|
@ -529,8 +537,7 @@ static void enable_chip(struct ipath_devdata *dd,
|
|||
dd->ipath_flags |= IPATH_PRESENT;
|
||||
}
|
||||
|
||||
static int init_housekeeping(struct ipath_devdata *dd,
|
||||
struct ipath_portdata **pdp, int reinit)
|
||||
static int init_housekeeping(struct ipath_devdata *dd, int reinit)
|
||||
{
|
||||
char boardn[32];
|
||||
int ret = 0;
|
||||
|
@ -591,18 +598,9 @@ static int init_housekeeping(struct ipath_devdata *dd,
|
|||
ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
|
||||
INFINIPATH_E_RESET);
|
||||
|
||||
if (reinit)
|
||||
ret = init_chip_reset(dd, pdp);
|
||||
else
|
||||
ret = init_chip_first(dd, pdp);
|
||||
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
ipath_cdbg(VERBOSE, "Revision %llx (PCI %x), %u ports, %u tids, "
|
||||
"%u egrtids\n", (unsigned long long) dd->ipath_revision,
|
||||
dd->ipath_pcirev, dd->ipath_portcnt, dd->ipath_rcvtidcnt,
|
||||
dd->ipath_rcvegrcnt);
|
||||
ipath_cdbg(VERBOSE, "Revision %llx (PCI %x)\n",
|
||||
(unsigned long long) dd->ipath_revision,
|
||||
dd->ipath_pcirev);
|
||||
|
||||
if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) &
|
||||
INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) {
|
||||
|
@ -641,6 +639,14 @@ static int init_housekeeping(struct ipath_devdata *dd,
|
|||
|
||||
ipath_dbg("%s", dd->ipath_boardversion);
|
||||
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
if (reinit)
|
||||
ret = init_chip_reset(dd);
|
||||
else
|
||||
ret = init_chip_first(dd);
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
@ -666,11 +672,11 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
|||
u32 val32, kpiobufs;
|
||||
u32 piobufs, uports;
|
||||
u64 val;
|
||||
struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
|
||||
struct ipath_portdata *pd;
|
||||
gfp_t gfp_flags = GFP_USER | __GFP_COMP;
|
||||
unsigned long flags;
|
||||
|
||||
ret = init_housekeeping(dd, &pd, reinit);
|
||||
ret = init_housekeeping(dd, reinit);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
|
@ -690,7 +696,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
|||
* we now use routines that backend onto __get_free_pages, the
|
||||
* rest would be wasted.
|
||||
*/
|
||||
dd->ipath_rcvhdrcnt = dd->ipath_rcvegrcnt;
|
||||
dd->ipath_rcvhdrcnt = max(dd->ipath_p0_rcvegrcnt, dd->ipath_rcvegrcnt);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt,
|
||||
dd->ipath_rcvhdrcnt);
|
||||
|
||||
|
@ -721,8 +727,8 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
|||
if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) {
|
||||
int i = (int) piobufs -
|
||||
(int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
|
||||
if (i < 0)
|
||||
i = 0;
|
||||
if (i < 1)
|
||||
i = 1;
|
||||
dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of "
|
||||
"%d for kernel leaves too few for %d user ports "
|
||||
"(%d each); using %u\n", kpiobufs,
|
||||
|
@ -741,6 +747,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
|||
ipath_dbg("allocating %u pbufs/port leaves %u unused, "
|
||||
"add to kernel\n", dd->ipath_pbufsport, val32);
|
||||
dd->ipath_lastport_piobuf -= val32;
|
||||
kpiobufs += val32;
|
||||
ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n",
|
||||
dd->ipath_pbufsport, val32);
|
||||
}
|
||||
|
@ -759,8 +766,10 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
|||
*/
|
||||
ipath_cancel_sends(dd, 0);
|
||||
|
||||
/* early_init sets rcvhdrentsize and rcvhdrsize, so this must be
|
||||
* done after early_init */
|
||||
/*
|
||||
* Early_init sets rcvhdrentsize and rcvhdrsize, so this must be
|
||||
* done after early_init.
|
||||
*/
|
||||
dd->ipath_hdrqlast =
|
||||
dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize,
|
||||
|
@ -835,58 +844,65 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
|||
/* enable errors that are masked, at least this first time. */
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
|
||||
~dd->ipath_maskederrs);
|
||||
dd->ipath_errormask = ipath_read_kreg64(dd,
|
||||
dd->ipath_kregs->kr_errormask);
|
||||
dd->ipath_maskederrs = 0; /* don't re-enable ignored in timer */
|
||||
dd->ipath_errormask =
|
||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
|
||||
/* clear any interrupts up to this point (ints still not enabled) */
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
|
||||
|
||||
dd->ipath_f_tidtemplate(dd);
|
||||
|
||||
/*
|
||||
* Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing
|
||||
* re-init, the simplest way to handle this is to free
|
||||
* existing, and re-allocate.
|
||||
* Need to re-create rest of port 0 portdata as well.
|
||||
*/
|
||||
pd = dd->ipath_pd[0];
|
||||
if (reinit) {
|
||||
/* Alloc and init new ipath_portdata for port0,
|
||||
struct ipath_portdata *npd;
|
||||
|
||||
/*
|
||||
* Alloc and init new ipath_portdata for port0,
|
||||
* Then free old pd. Could lead to fragmentation, but also
|
||||
* makes later support for hot-swap easier.
|
||||
*/
|
||||
struct ipath_portdata *npd;
|
||||
npd = create_portdata0(dd);
|
||||
if (npd) {
|
||||
ipath_free_pddata(dd, pd);
|
||||
dd->ipath_pd[0] = pd = npd;
|
||||
dd->ipath_pd[0] = npd;
|
||||
pd = npd;
|
||||
} else {
|
||||
ipath_dev_err(dd, "Unable to allocate portdata for"
|
||||
" port 0, failing\n");
|
||||
ipath_dev_err(dd, "Unable to allocate portdata"
|
||||
" for port 0, failing\n");
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
dd->ipath_f_tidtemplate(dd);
|
||||
ret = ipath_create_rcvhdrq(dd, pd);
|
||||
if (!ret) {
|
||||
dd->ipath_hdrqtailptr =
|
||||
(volatile __le64 *)pd->port_rcvhdrtail_kvaddr;
|
||||
if (!ret)
|
||||
ret = create_port0_egr(dd);
|
||||
}
|
||||
if (ret)
|
||||
ipath_dev_err(dd, "failed to allocate port 0 (kernel) "
|
||||
if (ret) {
|
||||
ipath_dev_err(dd, "failed to allocate kernel port's "
|
||||
"rcvhdrq and/or egr bufs\n");
|
||||
goto done;
|
||||
}
|
||||
else
|
||||
enable_chip(dd, pd, reinit);
|
||||
enable_chip(dd, reinit);
|
||||
|
||||
|
||||
if (!ret && !reinit) {
|
||||
/* used when we close a port, for DMA already in flight at close */
|
||||
if (!reinit) {
|
||||
/*
|
||||
* Used when we close a port, for DMA already in flight
|
||||
* at close.
|
||||
*/
|
||||
dd->ipath_dummy_hdrq = dma_alloc_coherent(
|
||||
&dd->pcidev->dev, pd->port_rcvhdrq_size,
|
||||
&dd->pcidev->dev, dd->ipath_pd[0]->port_rcvhdrq_size,
|
||||
&dd->ipath_dummy_hdrq_phys,
|
||||
gfp_flags);
|
||||
if (!dd->ipath_dummy_hdrq) {
|
||||
dev_info(&dd->pcidev->dev,
|
||||
"Couldn't allocate 0x%lx bytes for dummy hdrq\n",
|
||||
pd->port_rcvhdrq_size);
|
||||
dd->ipath_pd[0]->port_rcvhdrq_size);
|
||||
/* fallback to just 0'ing */
|
||||
dd->ipath_dummy_hdrq_phys = 0UL;
|
||||
}
|
||||
|
|
|
@ -695,8 +695,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
|||
struct ipath_portdata *pd = dd->ipath_pd[i];
|
||||
if (i == 0) {
|
||||
hd = pd->port_head;
|
||||
tl = (u32) le64_to_cpu(
|
||||
*dd->ipath_hdrqtailptr);
|
||||
tl = ipath_get_hdrqtail(pd);
|
||||
} else if (pd && pd->port_cnt &&
|
||||
pd->port_rcvhdrtail_kvaddr) {
|
||||
/*
|
||||
|
@ -732,8 +731,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
|||
* vs user)
|
||||
*/
|
||||
ipath_stats.sps_etidfull++;
|
||||
if (pd->port_head !=
|
||||
(u32) le64_to_cpu(*dd->ipath_hdrqtailptr))
|
||||
if (pd->port_head != ipath_get_hdrqtail(pd))
|
||||
chkerrpkts = 1;
|
||||
}
|
||||
|
||||
|
@ -952,7 +950,7 @@ set:
|
|||
* process was waiting for a packet to arrive, and didn't want
|
||||
* to poll
|
||||
*/
|
||||
static void handle_urcv(struct ipath_devdata *dd, u32 istat)
|
||||
static void handle_urcv(struct ipath_devdata *dd, u64 istat)
|
||||
{
|
||||
u64 portr;
|
||||
int i;
|
||||
|
@ -968,9 +966,9 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
|
|||
* and ipath_poll_next()...
|
||||
*/
|
||||
rmb();
|
||||
portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) &
|
||||
dd->ipath_i_rcvavail_mask)
|
||||
| ((istat >> INFINIPATH_I_RCVURG_SHIFT) &
|
||||
portr = ((istat >> dd->ipath_i_rcvavail_shift) &
|
||||
dd->ipath_i_rcvavail_mask) |
|
||||
((istat >> dd->ipath_i_rcvurg_shift) &
|
||||
dd->ipath_i_rcvurg_mask);
|
||||
for (i = 1; i < dd->ipath_cfgports; i++) {
|
||||
struct ipath_portdata *pd = dd->ipath_pd[i];
|
||||
|
@ -991,7 +989,7 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
|
|||
}
|
||||
if (rcvdint) {
|
||||
/* only want to take one interrupt, so turn off the rcv
|
||||
* interrupt for all the ports that we did the wakeup on
|
||||
* interrupt for all the ports that we set the rcv_waiting
|
||||
* (but never for kernel port)
|
||||
*/
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
|
||||
|
@ -1006,8 +1004,7 @@ irqreturn_t ipath_intr(int irq, void *data)
|
|||
ipath_err_t estat = 0;
|
||||
irqreturn_t ret;
|
||||
static unsigned unexpected = 0;
|
||||
static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) |
|
||||
(1U<<INFINIPATH_I_RCVURG_SHIFT);
|
||||
u64 kportrbits;
|
||||
|
||||
ipath_stats.sps_ints++;
|
||||
|
||||
|
@ -1076,9 +1073,7 @@ irqreturn_t ipath_intr(int irq, void *data)
|
|||
ipath_dev_err(dd, "Read of error status failed "
|
||||
"(all bits set); ignoring\n");
|
||||
else
|
||||
if (handle_errors(dd, estat))
|
||||
/* force calling ipath_kreceive() */
|
||||
chk0rcv = 1;
|
||||
chk0rcv |= handle_errors(dd, estat);
|
||||
}
|
||||
|
||||
if (istat & INFINIPATH_I_GPIO) {
|
||||
|
@ -1158,7 +1153,6 @@ irqreturn_t ipath_intr(int irq, void *data)
|
|||
(u64) to_clear);
|
||||
}
|
||||
}
|
||||
chk0rcv |= istat & port0rbits;
|
||||
|
||||
/*
|
||||
* Clear the interrupt bits we found set, unless they are receive
|
||||
|
@ -1171,20 +1165,20 @@ irqreturn_t ipath_intr(int irq, void *data)
|
|||
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
|
||||
|
||||
/*
|
||||
* handle port0 receive before checking for pio buffers available,
|
||||
* since receives can overflow; piobuf waiters can afford a few
|
||||
* extra cycles, since they were waiting anyway, and user's waiting
|
||||
* for receive are at the bottom.
|
||||
* Handle kernel receive queues before checking for pio buffers
|
||||
* available since receives can overflow; piobuf waiters can afford
|
||||
* a few extra cycles, since they were waiting anyway, and user's
|
||||
* waiting for receive are at the bottom.
|
||||
*/
|
||||
if (chk0rcv) {
|
||||
kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) |
|
||||
(1ULL << dd->ipath_i_rcvurg_shift);
|
||||
if (chk0rcv || (istat & kportrbits)) {
|
||||
istat &= ~kportrbits;
|
||||
ipath_kreceive(dd->ipath_pd[0]);
|
||||
istat &= ~port0rbits;
|
||||
}
|
||||
|
||||
if (istat & ((dd->ipath_i_rcvavail_mask <<
|
||||
INFINIPATH_I_RCVAVAIL_SHIFT)
|
||||
| (dd->ipath_i_rcvurg_mask <<
|
||||
INFINIPATH_I_RCVURG_SHIFT)))
|
||||
if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) |
|
||||
(dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))
|
||||
handle_urcv(dd, istat);
|
||||
|
||||
if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
|
||||
|
|
|
@ -175,6 +175,8 @@ struct ipath_portdata {
|
|||
u16 poll_type;
|
||||
/* port rcvhdrq head offset */
|
||||
u32 port_head;
|
||||
/* receive packet sequence counter */
|
||||
u32 port_seq_cnt;
|
||||
};
|
||||
|
||||
struct sk_buff;
|
||||
|
@ -224,11 +226,6 @@ struct ipath_devdata {
|
|||
unsigned long ipath_physaddr;
|
||||
/* base of memory alloced for ipath_kregbase, for free */
|
||||
u64 *ipath_kregalloc;
|
||||
/*
|
||||
* virtual address where port0 rcvhdrqtail updated for this unit.
|
||||
* only written to by the chip, not the driver.
|
||||
*/
|
||||
volatile __le64 *ipath_hdrqtailptr;
|
||||
/* ipath_cfgports pointers */
|
||||
struct ipath_portdata **ipath_pd;
|
||||
/* sk_buffs used by port 0 eager receive queue */
|
||||
|
@ -286,6 +283,7 @@ struct ipath_devdata {
|
|||
/* per chip actions needed for IB Link up/down changes */
|
||||
int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
|
||||
|
||||
unsigned ipath_lastegr_idx;
|
||||
struct ipath_ibdev *verbs_dev;
|
||||
struct timer_list verbs_timer;
|
||||
/* total dwords sent (summed from counter) */
|
||||
|
@ -593,14 +591,6 @@ struct ipath_devdata {
|
|||
u8 ipath_minrev;
|
||||
/* board rev, from ipath_revision */
|
||||
u8 ipath_boardrev;
|
||||
|
||||
u8 ipath_r_portenable_shift;
|
||||
u8 ipath_r_intravail_shift;
|
||||
u8 ipath_r_tailupd_shift;
|
||||
u8 ipath_r_portcfg_shift;
|
||||
|
||||
/* unit # of this chip, if present */
|
||||
int ipath_unit;
|
||||
/* saved for restore after reset */
|
||||
u8 ipath_pci_cacheline;
|
||||
/* LID mask control */
|
||||
|
@ -616,6 +606,14 @@ struct ipath_devdata {
|
|||
/* Rx Polarity inversion (compensate for ~tx on partner) */
|
||||
u8 ipath_rx_pol_inv;
|
||||
|
||||
u8 ipath_r_portenable_shift;
|
||||
u8 ipath_r_intravail_shift;
|
||||
u8 ipath_r_tailupd_shift;
|
||||
u8 ipath_r_portcfg_shift;
|
||||
|
||||
/* unit # of this chip, if present */
|
||||
int ipath_unit;
|
||||
|
||||
/* local link integrity counter */
|
||||
u32 ipath_lli_counter;
|
||||
/* local link integrity errors */
|
||||
|
@ -645,8 +643,8 @@ struct ipath_devdata {
|
|||
* Below should be computable from number of ports,
|
||||
* since they are never modified.
|
||||
*/
|
||||
u32 ipath_i_rcvavail_mask;
|
||||
u32 ipath_i_rcvurg_mask;
|
||||
u64 ipath_i_rcvavail_mask;
|
||||
u64 ipath_i_rcvurg_mask;
|
||||
u16 ipath_i_rcvurg_shift;
|
||||
u16 ipath_i_rcvavail_shift;
|
||||
|
||||
|
@ -835,6 +833,8 @@ void ipath_hol_event(unsigned long);
|
|||
#define IPATH_LINKUNK 0x400
|
||||
/* Write combining flush needed for PIO */
|
||||
#define IPATH_PIO_FLUSH_WC 0x1000
|
||||
/* DMA Receive tail pointer */
|
||||
#define IPATH_NODMA_RTAIL 0x2000
|
||||
/* no IB cable, or no device on IB cable */
|
||||
#define IPATH_NOCABLE 0x4000
|
||||
/* Supports port zero per packet receive interrupts via
|
||||
|
@ -845,9 +845,9 @@ void ipath_hol_event(unsigned long);
|
|||
/* packet/word counters are 32 bit, else those 4 counters
|
||||
* are 64bit */
|
||||
#define IPATH_32BITCOUNTERS 0x20000
|
||||
/* can miss port0 rx interrupts */
|
||||
/* Interrupt register is 64 bits */
|
||||
#define IPATH_INTREG_64 0x40000
|
||||
/* can miss port0 rx interrupts */
|
||||
#define IPATH_DISABLED 0x80000 /* administratively disabled */
|
||||
/* Use GPIO interrupts for new counters */
|
||||
#define IPATH_GPIO_ERRINTRS 0x100000
|
||||
|
@ -1035,6 +1035,27 @@ static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
|
|||
pd->port_rcvhdrtail_kvaddr));
|
||||
}
|
||||
|
||||
static inline u32 ipath_get_hdrqtail(const struct ipath_portdata *pd)
|
||||
{
|
||||
const struct ipath_devdata *dd = pd->port_dd;
|
||||
u32 hdrqtail;
|
||||
|
||||
if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
|
||||
__le32 *rhf_addr;
|
||||
u32 seq;
|
||||
|
||||
rhf_addr = (__le32 *) pd->port_rcvhdrq +
|
||||
pd->port_head + dd->ipath_rhf_offset;
|
||||
seq = ipath_hdrget_seq(rhf_addr);
|
||||
hdrqtail = pd->port_head;
|
||||
if (seq == pd->port_seq_cnt)
|
||||
hdrqtail++;
|
||||
} else
|
||||
hdrqtail = ipath_get_rcvhdrtail(pd);
|
||||
|
||||
return hdrqtail;
|
||||
}
|
||||
|
||||
static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
|
||||
{
|
||||
return (dd->ipath_flags & IPATH_INTREG_64) ?
|
||||
|
|
|
@ -86,8 +86,6 @@
|
|||
#define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38)
|
||||
|
||||
/* kr_intstatus, kr_intclear, kr_intmask bits */
|
||||
#define INFINIPATH_I_RCVURG_SHIFT 0
|
||||
#define INFINIPATH_I_RCVAVAIL_SHIFT 12
|
||||
#define INFINIPATH_I_ERROR 0x80000000
|
||||
#define INFINIPATH_I_SPIOSENT 0x40000000
|
||||
#define INFINIPATH_I_SPIOBUFAVAIL 0x20000000
|
||||
|
|
|
@ -136,6 +136,7 @@ static void ipath_qcheck(struct ipath_devdata *dd)
|
|||
struct ipath_portdata *pd = dd->ipath_pd[0];
|
||||
size_t blen = 0;
|
||||
char buf[128];
|
||||
u32 hdrqtail;
|
||||
|
||||
*buf = 0;
|
||||
if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
|
||||
|
@ -174,17 +175,18 @@ static void ipath_qcheck(struct ipath_devdata *dd)
|
|||
if (blen)
|
||||
ipath_dbg("%s\n", buf);
|
||||
|
||||
if (pd->port_head != (u32)
|
||||
le64_to_cpu(*dd->ipath_hdrqtailptr)) {
|
||||
hdrqtail = ipath_get_hdrqtail(pd);
|
||||
if (pd->port_head != hdrqtail) {
|
||||
if (dd->ipath_lastport0rcv_cnt ==
|
||||
ipath_stats.sps_port0pkts) {
|
||||
ipath_cdbg(PKT, "missing rcv interrupts? "
|
||||
"port0 hd=%llx tl=%x; port0pkts %llx\n",
|
||||
(unsigned long long)
|
||||
le64_to_cpu(*dd->ipath_hdrqtailptr),
|
||||
pd->port_head,
|
||||
"port0 hd=%x tl=%x; port0pkts %llx; write"
|
||||
" hd (w/intr)\n",
|
||||
pd->port_head, hdrqtail,
|
||||
(unsigned long long)
|
||||
ipath_stats.sps_port0pkts);
|
||||
ipath_write_ureg(dd, ur_rcvhdrhead, hdrqtail |
|
||||
dd->ipath_rhdrhead_intr_off, pd->port_port);
|
||||
}
|
||||
dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue