Merge branch 'cxgb4-next'
Hariprasad Shenai says: ==================== RDMA/cxgb4/cxgb4vf/csiostor: Cleanup register defines This series continues to cleanup all the macros/register defines related to SGE, PCIE, MC, MA, TCAM, MAC, etc that are defined in t4_regs.h and the affected files. Will post another 1 or 2 series so that we can cover all the macros so that they all follow the same style to be consistent. The patches series is created against 'net-next' tree. And includes patches on cxgb4, cxgb4vf, iw_cxgb4 and csiostor driver. We have included all the maintainers of respective drivers. Kindly review the change and let us know in case of any review comments. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
a515abd777
|
@ -465,14 +465,14 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5,
|
|||
} else {
|
||||
PDBG("%s: DB wq->sq.pidx = %d\n",
|
||||
__func__, wq->sq.pidx);
|
||||
writel(PIDX_T5(inc), wq->sq.udb);
|
||||
writel(PIDX_T5_V(inc), wq->sq.udb);
|
||||
}
|
||||
|
||||
/* Flush user doorbell area writes. */
|
||||
wmb();
|
||||
return;
|
||||
}
|
||||
writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
|
||||
writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db);
|
||||
}
|
||||
|
||||
static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
|
||||
|
@ -489,14 +489,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
|
|||
} else {
|
||||
PDBG("%s: DB wq->rq.pidx = %d\n",
|
||||
__func__, wq->rq.pidx);
|
||||
writel(PIDX_T5(inc), wq->rq.udb);
|
||||
writel(PIDX_T5_V(inc), wq->rq.udb);
|
||||
}
|
||||
|
||||
/* Flush user doorbell area writes. */
|
||||
wmb();
|
||||
return;
|
||||
}
|
||||
writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
|
||||
writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db);
|
||||
}
|
||||
|
||||
static inline int t4_wq_in_error(struct t4_wq *wq)
|
||||
|
@ -561,14 +561,14 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
|
|||
u32 val;
|
||||
|
||||
set_bit(CQ_ARMED, &cq->flags);
|
||||
while (cq->cidx_inc > CIDXINC_MASK) {
|
||||
val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
|
||||
INGRESSQID(cq->cqid);
|
||||
while (cq->cidx_inc > CIDXINC_M) {
|
||||
val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
|
||||
INGRESSQID_V(cq->cqid);
|
||||
writel(val, cq->gts);
|
||||
cq->cidx_inc -= CIDXINC_MASK;
|
||||
cq->cidx_inc -= CIDXINC_M;
|
||||
}
|
||||
val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
|
||||
INGRESSQID(cq->cqid);
|
||||
val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
|
||||
INGRESSQID_V(cq->cqid);
|
||||
writel(val, cq->gts);
|
||||
cq->cidx_inc = 0;
|
||||
return 0;
|
||||
|
@ -597,11 +597,11 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
|
|||
static inline void t4_hwcq_consume(struct t4_cq *cq)
|
||||
{
|
||||
cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
|
||||
if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) {
|
||||
if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) {
|
||||
u32 val;
|
||||
|
||||
val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
|
||||
INGRESSQID(cq->cqid);
|
||||
val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
|
||||
INGRESSQID_V(cq->cqid);
|
||||
writel(val, cq->gts);
|
||||
cq->cidx_inc = 0;
|
||||
}
|
||||
|
|
|
@ -66,6 +66,7 @@
|
|||
|
||||
#include "cxgb4.h"
|
||||
#include "t4_regs.h"
|
||||
#include "t4_values.h"
|
||||
#include "t4_msg.h"
|
||||
#include "t4fw_api.h"
|
||||
#include "cxgb4_dcb.h"
|
||||
|
@ -358,8 +359,8 @@ MODULE_PARM_DESC(select_queue,
|
|||
*/
|
||||
enum {
|
||||
TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
|
||||
TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
|
||||
TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
|
||||
TP_VLAN_PRI_MAP_FIRST = FCOE_S,
|
||||
TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_S,
|
||||
};
|
||||
|
||||
static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
|
||||
|
@ -833,11 +834,11 @@ static void disable_msi(struct adapter *adapter)
|
|||
static irqreturn_t t4_nondata_intr(int irq, void *cookie)
|
||||
{
|
||||
struct adapter *adap = cookie;
|
||||
u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
|
||||
|
||||
u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
|
||||
if (v & PFSW) {
|
||||
if (v & PFSW_F) {
|
||||
adap->swintr = 1;
|
||||
t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
|
||||
t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
|
||||
}
|
||||
t4_slow_intr_handler(adap);
|
||||
return IRQ_HANDLED;
|
||||
|
@ -1050,9 +1051,9 @@ static void enable_rx(struct adapter *adap)
|
|||
if (q->handler)
|
||||
napi_enable(&q->napi);
|
||||
/* 0-increment GTS to start the timer and enable interrupts */
|
||||
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
|
||||
SEINTARM(q->intr_params) |
|
||||
INGRESSQID(q->cntxt_id));
|
||||
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
|
||||
SEINTARM_V(q->intr_params) |
|
||||
INGRESSQID_V(q->cntxt_id));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1176,10 +1177,10 @@ freeout: t4_free_sge_resources(adap);
|
|||
}
|
||||
|
||||
t4_write_reg(adap, is_t4(adap->params.chip) ?
|
||||
MPS_TRC_RSS_CONTROL :
|
||||
MPS_T5_TRC_RSS_CONTROL,
|
||||
RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
|
||||
QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
|
||||
MPS_TRC_RSS_CONTROL_A :
|
||||
MPS_T5_TRC_RSS_CONTROL_A,
|
||||
RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
|
||||
QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1589,9 +1590,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
|
|||
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
|
||||
data += sizeof(struct queue_port_stats) / sizeof(u64);
|
||||
if (!is_t4(adapter->params.chip)) {
|
||||
t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
|
||||
val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
|
||||
val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
|
||||
t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
|
||||
val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
|
||||
val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
|
||||
*data = val1 - val2;
|
||||
data++;
|
||||
*data = val2;
|
||||
|
@ -3600,14 +3601,14 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
|
|||
struct adapter *adap = netdev2adap(dev);
|
||||
u32 v1, v2, lp_count, hp_count;
|
||||
|
||||
v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
|
||||
v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
|
||||
v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
|
||||
v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
|
||||
if (is_t4(adap->params.chip)) {
|
||||
lp_count = G_LP_COUNT(v1);
|
||||
hp_count = G_HP_COUNT(v1);
|
||||
lp_count = LP_COUNT_G(v1);
|
||||
hp_count = HP_COUNT_G(v1);
|
||||
} else {
|
||||
lp_count = G_LP_COUNT_T5(v1);
|
||||
hp_count = G_HP_COUNT_T5(v2);
|
||||
lp_count = LP_COUNT_T5_G(v1);
|
||||
hp_count = HP_COUNT_T5_G(v2);
|
||||
}
|
||||
return lpfifo ? lp_count : hp_count;
|
||||
}
|
||||
|
@ -3653,10 +3654,10 @@ void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
|
|||
{
|
||||
struct adapter *adap = netdev2adap(dev);
|
||||
|
||||
t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
|
||||
t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
|
||||
HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
|
||||
HPZ3(pgsz_order[3]));
|
||||
t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
|
||||
t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
|
||||
HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
|
||||
HPZ3_V(pgsz_order[3]));
|
||||
}
|
||||
EXPORT_SYMBOL(cxgb4_iscsi_init);
|
||||
|
||||
|
@ -3666,14 +3667,14 @@ int cxgb4_flush_eq_cache(struct net_device *dev)
|
|||
int ret;
|
||||
|
||||
ret = t4_fwaddrspace_write(adap, adap->mbox,
|
||||
0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
|
||||
0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(cxgb4_flush_eq_cache);
|
||||
|
||||
static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
|
||||
{
|
||||
u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
|
||||
u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
|
||||
__be64 indices;
|
||||
int ret;
|
||||
|
||||
|
@ -3702,14 +3703,20 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
|
|||
|
||||
if (pidx != hw_pidx) {
|
||||
u16 delta;
|
||||
u32 val;
|
||||
|
||||
if (pidx >= hw_pidx)
|
||||
delta = pidx - hw_pidx;
|
||||
else
|
||||
delta = size - hw_pidx + pidx;
|
||||
|
||||
if (is_t4(adap->params.chip))
|
||||
val = PIDX_V(delta);
|
||||
else
|
||||
val = PIDX_T5_V(delta);
|
||||
wmb();
|
||||
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
|
||||
QID(qid) | PIDX(delta));
|
||||
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
|
||||
QID_V(qid) | val);
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
|
@ -3721,8 +3728,8 @@ void cxgb4_disable_db_coalescing(struct net_device *dev)
|
|||
struct adapter *adap;
|
||||
|
||||
adap = netdev2adap(dev);
|
||||
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
|
||||
F_NOCOALESCE);
|
||||
t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
|
||||
NOCOALESCE_F);
|
||||
}
|
||||
EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
|
||||
|
||||
|
@ -3731,7 +3738,7 @@ void cxgb4_enable_db_coalescing(struct net_device *dev)
|
|||
struct adapter *adap;
|
||||
|
||||
adap = netdev2adap(dev);
|
||||
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
|
||||
t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
|
||||
|
||||
|
@ -3809,8 +3816,8 @@ u64 cxgb4_read_sge_timestamp(struct net_device *dev)
|
|||
struct adapter *adap;
|
||||
|
||||
adap = netdev2adap(dev);
|
||||
lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
|
||||
hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
|
||||
lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
|
||||
hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
|
||||
|
||||
return ((u64)hi << 32) | (u64)lo;
|
||||
}
|
||||
|
@ -3870,14 +3877,14 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
|
|||
u32 v1, v2, lp_count, hp_count;
|
||||
|
||||
do {
|
||||
v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
|
||||
v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
|
||||
v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
|
||||
v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
|
||||
if (is_t4(adap->params.chip)) {
|
||||
lp_count = G_LP_COUNT(v1);
|
||||
hp_count = G_HP_COUNT(v1);
|
||||
lp_count = LP_COUNT_G(v1);
|
||||
hp_count = HP_COUNT_G(v1);
|
||||
} else {
|
||||
lp_count = G_LP_COUNT_T5(v1);
|
||||
hp_count = G_HP_COUNT_T5(v2);
|
||||
lp_count = LP_COUNT_T5_G(v1);
|
||||
hp_count = HP_COUNT_T5_G(v2);
|
||||
}
|
||||
|
||||
if (lp_count == 0 && hp_count == 0)
|
||||
|
@ -3904,8 +3911,8 @@ static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
|
|||
* are committed before we tell HW about them.
|
||||
*/
|
||||
wmb();
|
||||
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
|
||||
QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
|
||||
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
|
||||
QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
|
||||
q->db_pidx_inc = 0;
|
||||
}
|
||||
q->db_disabled = 0;
|
||||
|
@ -3952,9 +3959,9 @@ static void process_db_full(struct work_struct *work)
|
|||
drain_db_fifo(adap, dbfifo_drain_delay);
|
||||
enable_dbs(adap);
|
||||
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
|
||||
t4_set_reg_field(adap, SGE_INT_ENABLE3,
|
||||
DBFIFO_HP_INT | DBFIFO_LP_INT,
|
||||
DBFIFO_HP_INT | DBFIFO_LP_INT);
|
||||
t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
|
||||
DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
|
||||
DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
|
||||
}
|
||||
|
||||
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
|
||||
|
@ -3968,14 +3975,20 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
|
|||
goto out;
|
||||
if (q->db_pidx != hw_pidx) {
|
||||
u16 delta;
|
||||
u32 val;
|
||||
|
||||
if (q->db_pidx >= hw_pidx)
|
||||
delta = q->db_pidx - hw_pidx;
|
||||
else
|
||||
delta = q->size - hw_pidx + q->db_pidx;
|
||||
|
||||
if (is_t4(adap->params.chip))
|
||||
val = PIDX_V(delta);
|
||||
else
|
||||
val = PIDX_T5_V(delta);
|
||||
wmb();
|
||||
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
|
||||
QID(q->cntxt_id) | PIDX(delta));
|
||||
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
|
||||
QID_V(q->cntxt_id) | val);
|
||||
}
|
||||
out:
|
||||
q->db_disabled = 0;
|
||||
|
@ -4024,14 +4037,14 @@ static void process_db_drop(struct work_struct *work)
|
|||
dev_err(adap->pdev_dev, "doorbell drop recovery: "
|
||||
"qid=%d, pidx_inc=%d\n", qid, pidx_inc);
|
||||
else
|
||||
writel(PIDX_T5(pidx_inc) | QID(bar2_qid),
|
||||
writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
|
||||
adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
|
||||
|
||||
/* Re-enable BAR2 WC */
|
||||
t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
|
||||
}
|
||||
|
||||
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
|
||||
t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
|
||||
}
|
||||
|
||||
void t4_db_full(struct adapter *adap)
|
||||
|
@ -4039,8 +4052,8 @@ void t4_db_full(struct adapter *adap)
|
|||
if (is_t4(adap->params.chip)) {
|
||||
disable_dbs(adap);
|
||||
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
|
||||
t4_set_reg_field(adap, SGE_INT_ENABLE3,
|
||||
DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
|
||||
t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
|
||||
DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
|
||||
queue_work(adap->workq, &adap->db_full_task);
|
||||
}
|
||||
}
|
||||
|
@ -4081,7 +4094,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
|
|||
lli.nports = adap->params.nports;
|
||||
lli.wr_cred = adap->params.ofldq_wr_cred;
|
||||
lli.adapter_type = adap->params.chip;
|
||||
lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
|
||||
lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
|
||||
lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
|
||||
lli.udb_density = 1 << adap->params.sge.eq_qpp;
|
||||
lli.ucq_density = 1 << adap->params.sge.iq_qpp;
|
||||
|
@ -4089,8 +4102,8 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
|
|||
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
|
||||
for (i = 0; i < NCHAN; i++)
|
||||
lli.tx_modq[i] = i;
|
||||
lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
|
||||
lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
|
||||
lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
|
||||
lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
|
||||
lli.fw_vers = adap->params.fw_vers;
|
||||
lli.dbfifo_int_thresh = dbfifo_int_thresh;
|
||||
lli.sge_ingpadboundary = adap->sge.fl_align;
|
||||
|
@ -4567,13 +4580,13 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
|
|||
f->fs.val.lip[i] = val[i];
|
||||
f->fs.mask.lip[i] = ~0;
|
||||
}
|
||||
if (adap->params.tp.vlan_pri_map & F_PORT) {
|
||||
if (adap->params.tp.vlan_pri_map & PORT_F) {
|
||||
f->fs.val.iport = port;
|
||||
f->fs.mask.iport = mask;
|
||||
}
|
||||
}
|
||||
|
||||
if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
|
||||
if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
|
||||
f->fs.val.proto = IPPROTO_TCP;
|
||||
f->fs.mask.proto = ~0;
|
||||
}
|
||||
|
@ -4783,7 +4796,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
|
|||
|
||||
void t4_fatal_err(struct adapter *adap)
|
||||
{
|
||||
t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
|
||||
t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
|
||||
t4_intr_disable(adap);
|
||||
dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
|
||||
}
|
||||
|
@ -4858,16 +4871,16 @@ static void setup_memwin(struct adapter *adap)
|
|||
mem_win2_base = MEMWIN2_BASE_T5;
|
||||
mem_win2_aperture = MEMWIN2_APERTURE_T5;
|
||||
}
|
||||
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
|
||||
mem_win0_base | BIR(0) |
|
||||
WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
|
||||
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
|
||||
mem_win1_base | BIR(0) |
|
||||
WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
|
||||
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
|
||||
mem_win2_base | BIR(0) |
|
||||
WINDOW(ilog2(mem_win2_aperture) - 10));
|
||||
t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
|
||||
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
|
||||
mem_win0_base | BIR_V(0) |
|
||||
WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
|
||||
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
|
||||
mem_win1_base | BIR_V(0) |
|
||||
WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
|
||||
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
|
||||
mem_win2_base | BIR_V(0) |
|
||||
WINDOW_V(ilog2(mem_win2_aperture) - 10));
|
||||
t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
|
||||
}
|
||||
|
||||
static void setup_memwin_rdma(struct adapter *adap)
|
||||
|
@ -4881,13 +4894,13 @@ static void setup_memwin_rdma(struct adapter *adap)
|
|||
start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
|
||||
sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
|
||||
t4_write_reg(adap,
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
|
||||
start | BIR(1) | WINDOW(ilog2(sz_kb)));
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
|
||||
start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
|
||||
t4_write_reg(adap,
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
|
||||
adap->vres.ocq.start);
|
||||
t4_read_reg(adap,
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4936,38 +4949,38 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
|
|||
t4_sge_init(adap);
|
||||
|
||||
/* tweak some settings */
|
||||
t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
|
||||
t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
|
||||
t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
|
||||
v = t4_read_reg(adap, TP_PIO_DATA);
|
||||
t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
|
||||
t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
|
||||
t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
|
||||
t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
|
||||
v = t4_read_reg(adap, TP_PIO_DATA_A);
|
||||
t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
|
||||
|
||||
/* first 4 Tx modulation queues point to consecutive Tx channels */
|
||||
adap->params.tp.tx_modq_map = 0xE4;
|
||||
t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
|
||||
V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
|
||||
t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
|
||||
TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
|
||||
|
||||
/* associate each Tx modulation queue with consecutive Tx channels */
|
||||
v = 0x84218421;
|
||||
t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
|
||||
&v, 1, A_TP_TX_SCHED_HDR);
|
||||
t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
|
||||
&v, 1, A_TP_TX_SCHED_FIFO);
|
||||
t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
|
||||
&v, 1, A_TP_TX_SCHED_PCMD);
|
||||
t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
|
||||
&v, 1, TP_TX_SCHED_HDR_A);
|
||||
t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
|
||||
&v, 1, TP_TX_SCHED_FIFO_A);
|
||||
t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
|
||||
&v, 1, TP_TX_SCHED_PCMD_A);
|
||||
|
||||
#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
|
||||
if (is_offload(adap)) {
|
||||
t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
|
||||
V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
||||
V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
||||
V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
||||
V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
|
||||
t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
|
||||
V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
||||
V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
||||
V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
||||
V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
|
||||
t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
|
||||
TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
||||
TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
||||
TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
||||
TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
|
||||
t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
|
||||
TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
||||
TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
||||
TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
||||
TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
|
||||
}
|
||||
|
||||
/* get basic stuff going */
|
||||
|
@ -5013,16 +5026,16 @@ static int adap_init0_tweaks(struct adapter *adapter)
|
|||
rx_dma_offset);
|
||||
rx_dma_offset = 2;
|
||||
}
|
||||
t4_set_reg_field(adapter, SGE_CONTROL,
|
||||
PKTSHIFT_MASK,
|
||||
PKTSHIFT(rx_dma_offset));
|
||||
t4_set_reg_field(adapter, SGE_CONTROL_A,
|
||||
PKTSHIFT_V(PKTSHIFT_M),
|
||||
PKTSHIFT_V(rx_dma_offset));
|
||||
|
||||
/*
|
||||
* Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
|
||||
* adds the pseudo header itself.
|
||||
*/
|
||||
t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
|
||||
CSUM_HAS_PSEUDO_HDR, 0);
|
||||
t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
|
||||
CSUM_HAS_PSEUDO_HDR_F, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -5046,7 +5059,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
|
|||
*/
|
||||
if (reset) {
|
||||
ret = t4_fw_reset(adapter, adapter->mbox,
|
||||
PIORSTMODE | PIORST);
|
||||
PIORSTMODE_F | PIORST_F);
|
||||
if (ret < 0)
|
||||
goto bye;
|
||||
}
|
||||
|
@ -5251,7 +5264,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
|
|||
*/
|
||||
if (reset) {
|
||||
ret = t4_fw_reset(adapter, adapter->mbox,
|
||||
PIORSTMODE | PIORST);
|
||||
PIORSTMODE_F | PIORST_F);
|
||||
if (ret < 0)
|
||||
goto bye;
|
||||
}
|
||||
|
@ -5332,8 +5345,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
|
|||
s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
|
||||
s->counter_val[0] = 1;
|
||||
for (i = 1; i < SGE_NCOUNTERS; i++)
|
||||
s->counter_val[i] = min(intr_cnt[i - 1],
|
||||
THRESHOLD_0_GET(THRESHOLD_0_MASK));
|
||||
s->counter_val[i] = min(intr_cnt[i - 1], THRESHOLD_0_M);
|
||||
t4_sge_init(adapter);
|
||||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
|
@ -5389,34 +5401,34 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
|
|||
case 0:
|
||||
/* compressed filter field not enabled */
|
||||
break;
|
||||
case FCOE_MASK:
|
||||
case FCOE_F:
|
||||
bits += 1;
|
||||
break;
|
||||
case PORT_MASK:
|
||||
case PORT_F:
|
||||
bits += 3;
|
||||
break;
|
||||
case VNIC_ID_MASK:
|
||||
case VNIC_F:
|
||||
bits += 17;
|
||||
break;
|
||||
case VLAN_MASK:
|
||||
case VLAN_F:
|
||||
bits += 17;
|
||||
break;
|
||||
case TOS_MASK:
|
||||
case TOS_F:
|
||||
bits += 8;
|
||||
break;
|
||||
case PROTOCOL_MASK:
|
||||
case PROTOCOL_F:
|
||||
bits += 8;
|
||||
break;
|
||||
case ETHERTYPE_MASK:
|
||||
case ETHERTYPE_F:
|
||||
bits += 16;
|
||||
break;
|
||||
case MACMATCH_MASK:
|
||||
case MACMATCH_F:
|
||||
bits += 9;
|
||||
break;
|
||||
case MPSHITTYPE_MASK:
|
||||
case MPSHITTYPE_F:
|
||||
bits += 3;
|
||||
break;
|
||||
case FRAGMENTATION_MASK:
|
||||
case FRAGMENTATION_F:
|
||||
bits += 1;
|
||||
break;
|
||||
}
|
||||
|
@ -5430,8 +5442,8 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
|
|||
}
|
||||
}
|
||||
v = tp_vlan_pri_map;
|
||||
t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
|
||||
&v, 1, TP_VLAN_PRI_MAP);
|
||||
t4_write_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
|
||||
&v, 1, TP_VLAN_PRI_MAP_A);
|
||||
|
||||
/*
|
||||
* We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
|
||||
|
@ -5444,17 +5456,17 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
|
|||
* performance impact).
|
||||
*/
|
||||
if (tp_vlan_pri_map)
|
||||
t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
|
||||
FIVETUPLELOOKUP_MASK,
|
||||
FIVETUPLELOOKUP_MASK);
|
||||
t4_set_reg_field(adapter, TP_GLOBAL_CONFIG_A,
|
||||
FIVETUPLELOOKUP_V(FIVETUPLELOOKUP_M),
|
||||
FIVETUPLELOOKUP_V(FIVETUPLELOOKUP_M));
|
||||
|
||||
/*
|
||||
* Tweak some settings.
|
||||
*/
|
||||
t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
|
||||
RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
|
||||
PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
|
||||
KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
|
||||
t4_write_reg(adapter, TP_SHIFT_CNT_A, SYNSHIFTMAX_V(6) |
|
||||
RXTSHIFTMAXR1_V(4) | RXTSHIFTMAXR2_V(15) |
|
||||
PERSHIFTBACKOFFMAX_V(8) | PERSHIFTMAX_V(8) |
|
||||
KEEPALIVEMAXR1_V(4) | KEEPALIVEMAXR2_V(9));
|
||||
|
||||
/*
|
||||
* Get basic stuff going by issuing the Firmware Initialize command.
|
||||
|
@ -6401,7 +6413,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
goto out_unmap_bar0;
|
||||
|
||||
/* We control everything through one PF */
|
||||
func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
|
||||
func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
|
||||
if (func != ent->driver_data) {
|
||||
iounmap(regs);
|
||||
pci_disable_device(pdev);
|
||||
|
@ -6467,9 +6479,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
|
||||
if (!is_t4(adapter->params.chip)) {
|
||||
s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
|
||||
qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
|
||||
SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
|
||||
s_qpp = (QUEUESPERPAGEPF0_S +
|
||||
(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
|
||||
adapter->fn);
|
||||
qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
|
||||
SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
|
||||
num_seg = PAGE_SIZE / SEGMENT_SIZE;
|
||||
|
||||
/* Each segment size is 128B. Write coalescing is enabled only
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include "t4_msg.h"
|
||||
#include "t4fw_api.h"
|
||||
#include "t4_regs.h"
|
||||
#include "t4_values.h"
|
||||
|
||||
#define VLAN_NONE 0xfff
|
||||
|
||||
|
@ -425,7 +426,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
|
|||
* in the Compressed Filter Tuple.
|
||||
*/
|
||||
if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
|
||||
ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
|
||||
ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift;
|
||||
|
||||
if (tp->port_shift >= 0)
|
||||
ntuple |= (u64)l2t->lport << tp->port_shift;
|
||||
|
@ -439,9 +440,9 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
|
|||
u32 pf = FW_VIID_PFN_G(viid);
|
||||
u32 vld = FW_VIID_VIVLD_G(viid);
|
||||
|
||||
ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
|
||||
V_FT_VNID_ID_PF(pf) |
|
||||
V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
|
||||
ntuple |= (u64)(FT_VNID_ID_VF_V(vf) |
|
||||
FT_VNID_ID_PF_V(pf) |
|
||||
FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift;
|
||||
}
|
||||
|
||||
return ntuple;
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#include <net/tcp.h>
|
||||
#include "cxgb4.h"
|
||||
#include "t4_regs.h"
|
||||
#include "t4_values.h"
|
||||
#include "t4_msg.h"
|
||||
#include "t4fw_api.h"
|
||||
|
||||
|
@ -521,10 +522,12 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
|
|||
{
|
||||
u32 val;
|
||||
if (q->pend_cred >= 8) {
|
||||
val = PIDX(q->pend_cred / 8);
|
||||
if (!is_t4(adap->params.chip))
|
||||
val |= DBTYPE(1);
|
||||
val |= DBPRIO(1);
|
||||
if (is_t4(adap->params.chip))
|
||||
val = PIDX_V(q->pend_cred / 8);
|
||||
else
|
||||
val = PIDX_T5_V(q->pend_cred / 8) |
|
||||
DBTYPE_F;
|
||||
val |= DBPRIO_F;
|
||||
wmb();
|
||||
|
||||
/* If we don't have access to the new User Doorbell (T5+), use
|
||||
|
@ -532,10 +535,10 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
|
|||
* mechanism.
|
||||
*/
|
||||
if (unlikely(q->bar2_addr == NULL)) {
|
||||
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
|
||||
val | QID(q->cntxt_id));
|
||||
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
|
||||
val | QID_V(q->cntxt_id));
|
||||
} else {
|
||||
writel(val | QID(q->bar2_qid),
|
||||
writel(val | QID_V(q->bar2_qid),
|
||||
q->bar2_addr + SGE_UDB_KDOORBELL);
|
||||
|
||||
/* This Write memory Barrier will force the write to
|
||||
|
@ -884,7 +887,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
|
|||
* doorbell mechanism; otherwise use the new BAR2 mechanism.
|
||||
*/
|
||||
if (unlikely(q->bar2_addr == NULL)) {
|
||||
u32 val = PIDX(n);
|
||||
u32 val = PIDX_V(n);
|
||||
unsigned long flags;
|
||||
|
||||
/* For T4 we need to participate in the Doorbell Recovery
|
||||
|
@ -892,14 +895,14 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
|
|||
*/
|
||||
spin_lock_irqsave(&q->db_lock, flags);
|
||||
if (!q->db_disabled)
|
||||
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
|
||||
QID(q->cntxt_id) | val);
|
||||
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
|
||||
QID_V(q->cntxt_id) | val);
|
||||
else
|
||||
q->db_pidx_inc += n;
|
||||
q->db_pidx = q->pidx;
|
||||
spin_unlock_irqrestore(&q->db_lock, flags);
|
||||
} else {
|
||||
u32 val = PIDX_T5(n);
|
||||
u32 val = PIDX_T5_V(n);
|
||||
|
||||
/* T4 and later chips share the same PIDX field offset within
|
||||
* the doorbell, but T5 and later shrank the field in order to
|
||||
|
@ -907,7 +910,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
|
|||
* large in the first place (14 bits) so we just use the T5
|
||||
* and later limits and warn if a Queue ID is too large.
|
||||
*/
|
||||
WARN_ON(val & DBPRIO(1));
|
||||
WARN_ON(val & DBPRIO_F);
|
||||
|
||||
/* If we're only writing a single TX Descriptor and we can use
|
||||
* Inferred QID registers, we can use the Write Combining
|
||||
|
@ -923,7 +926,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
|
|||
(q->bar2_addr + SGE_UDB_WCDOORBELL),
|
||||
wr);
|
||||
} else {
|
||||
writel(val | QID(q->bar2_qid),
|
||||
writel(val | QID_V(q->bar2_qid),
|
||||
q->bar2_addr + SGE_UDB_KDOORBELL);
|
||||
}
|
||||
|
||||
|
@ -2001,16 +2004,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
|
|||
} else
|
||||
params = QINTR_TIMER_IDX(7);
|
||||
|
||||
val = CIDXINC(work_done) | SEINTARM(params);
|
||||
val = CIDXINC_V(work_done) | SEINTARM_V(params);
|
||||
|
||||
/* If we don't have access to the new User GTS (T5+), use the old
|
||||
* doorbell mechanism; otherwise use the new BAR2 mechanism.
|
||||
*/
|
||||
if (unlikely(q->bar2_addr == NULL)) {
|
||||
t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS),
|
||||
val | INGRESSQID((u32)q->cntxt_id));
|
||||
t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
|
||||
val | INGRESSQID_V((u32)q->cntxt_id));
|
||||
} else {
|
||||
writel(val | INGRESSQID(q->bar2_qid),
|
||||
writel(val | INGRESSQID_V(q->bar2_qid),
|
||||
q->bar2_addr + SGE_UDB_GTS);
|
||||
wmb();
|
||||
}
|
||||
|
@ -2056,16 +2059,16 @@ static unsigned int process_intrq(struct adapter *adap)
|
|||
rspq_next(q);
|
||||
}
|
||||
|
||||
val = CIDXINC(credits) | SEINTARM(q->intr_params);
|
||||
val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
|
||||
|
||||
/* If we don't have access to the new User GTS (T5+), use the old
|
||||
* doorbell mechanism; otherwise use the new BAR2 mechanism.
|
||||
*/
|
||||
if (unlikely(q->bar2_addr == NULL)) {
|
||||
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
|
||||
val | INGRESSQID(q->cntxt_id));
|
||||
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
|
||||
val | INGRESSQID_V(q->cntxt_id));
|
||||
} else {
|
||||
writel(val | INGRESSQID(q->bar2_qid),
|
||||
writel(val | INGRESSQID_V(q->bar2_qid),
|
||||
q->bar2_addr + SGE_UDB_GTS);
|
||||
wmb();
|
||||
}
|
||||
|
@ -2095,7 +2098,7 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie)
|
|||
{
|
||||
struct adapter *adap = cookie;
|
||||
|
||||
t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
|
||||
t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
|
||||
if (t4_slow_intr_handler(adap) | process_intrq(adap))
|
||||
return IRQ_HANDLED;
|
||||
return IRQ_NONE; /* probably shared interrupt */
|
||||
|
@ -2142,9 +2145,9 @@ static void sge_rx_timer_cb(unsigned long data)
|
|||
}
|
||||
}
|
||||
|
||||
t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
|
||||
idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
|
||||
idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
|
||||
t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13);
|
||||
idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A);
|
||||
idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
u32 debug0, debug11;
|
||||
|
@ -2188,12 +2191,12 @@ static void sge_rx_timer_cb(unsigned long data)
|
|||
/* Read and save the SGE IDMA State and Queue ID information.
|
||||
* We do this every time in case it changes across time ...
|
||||
*/
|
||||
t4_write_reg(adap, SGE_DEBUG_INDEX, 0);
|
||||
debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
|
||||
t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0);
|
||||
debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
|
||||
s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
|
||||
|
||||
t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
|
||||
debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
|
||||
t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11);
|
||||
debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
|
||||
s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
|
||||
|
||||
CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
|
||||
|
@ -2770,8 +2773,8 @@ static int t4_sge_init_soft(struct adapter *adap)
|
|||
* process_responses() and that only packet data is going to the
|
||||
* Free Lists.
|
||||
*/
|
||||
if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) !=
|
||||
RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
|
||||
if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
|
||||
RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
|
||||
dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -2785,7 +2788,7 @@ static int t4_sge_init_soft(struct adapter *adap)
|
|||
* XXX meet our needs!
|
||||
*/
|
||||
#define READ_FL_BUF(x) \
|
||||
t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32))
|
||||
t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
|
||||
|
||||
fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
|
||||
fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
|
||||
|
@ -2823,27 +2826,27 @@ static int t4_sge_init_soft(struct adapter *adap)
|
|||
* Retrieve our RX interrupt holdoff timer values and counter
|
||||
* threshold values from the SGE parameters.
|
||||
*/
|
||||
timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1);
|
||||
timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3);
|
||||
timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5);
|
||||
timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
|
||||
timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
|
||||
timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
|
||||
s->timer_val[0] = core_ticks_to_us(adap,
|
||||
TIMERVALUE0_GET(timer_value_0_and_1));
|
||||
TIMERVALUE0_G(timer_value_0_and_1));
|
||||
s->timer_val[1] = core_ticks_to_us(adap,
|
||||
TIMERVALUE1_GET(timer_value_0_and_1));
|
||||
TIMERVALUE1_G(timer_value_0_and_1));
|
||||
s->timer_val[2] = core_ticks_to_us(adap,
|
||||
TIMERVALUE2_GET(timer_value_2_and_3));
|
||||
TIMERVALUE2_G(timer_value_2_and_3));
|
||||
s->timer_val[3] = core_ticks_to_us(adap,
|
||||
TIMERVALUE3_GET(timer_value_2_and_3));
|
||||
TIMERVALUE3_G(timer_value_2_and_3));
|
||||
s->timer_val[4] = core_ticks_to_us(adap,
|
||||
TIMERVALUE4_GET(timer_value_4_and_5));
|
||||
TIMERVALUE4_G(timer_value_4_and_5));
|
||||
s->timer_val[5] = core_ticks_to_us(adap,
|
||||
TIMERVALUE5_GET(timer_value_4_and_5));
|
||||
TIMERVALUE5_G(timer_value_4_and_5));
|
||||
|
||||
ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD);
|
||||
s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
|
||||
s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
|
||||
s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
|
||||
s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
|
||||
ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
|
||||
s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
|
||||
s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
|
||||
s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
|
||||
s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2856,29 +2859,28 @@ static int t4_sge_init_hard(struct adapter *adap)
|
|||
* Set up our basic SGE mode to deliver CPL messages to our Ingress
|
||||
* Queue and Packet Date to the Free List.
|
||||
*/
|
||||
t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
|
||||
RXPKTCPLMODE_MASK);
|
||||
t4_set_reg_field(adap, SGE_CONTROL_A, RXPKTCPLMODE_F, RXPKTCPLMODE_F);
|
||||
|
||||
/*
|
||||
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
|
||||
* and generate an interrupt when this occurs so we can recover.
|
||||
*/
|
||||
if (is_t4(adap->params.chip)) {
|
||||
t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
|
||||
V_HP_INT_THRESH(M_HP_INT_THRESH) |
|
||||
V_LP_INT_THRESH(M_LP_INT_THRESH),
|
||||
V_HP_INT_THRESH(dbfifo_int_thresh) |
|
||||
V_LP_INT_THRESH(dbfifo_int_thresh));
|
||||
t4_set_reg_field(adap, SGE_DBFIFO_STATUS_A,
|
||||
HP_INT_THRESH_V(HP_INT_THRESH_M) |
|
||||
LP_INT_THRESH_V(LP_INT_THRESH_M),
|
||||
HP_INT_THRESH_V(dbfifo_int_thresh) |
|
||||
LP_INT_THRESH_V(dbfifo_int_thresh));
|
||||
} else {
|
||||
t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
|
||||
V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
|
||||
V_LP_INT_THRESH_T5(dbfifo_int_thresh));
|
||||
t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
|
||||
V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
|
||||
V_HP_INT_THRESH_T5(dbfifo_int_thresh));
|
||||
t4_set_reg_field(adap, SGE_DBFIFO_STATUS_A,
|
||||
LP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M),
|
||||
LP_INT_THRESH_T5_V(dbfifo_int_thresh));
|
||||
t4_set_reg_field(adap, SGE_DBFIFO_STATUS2_A,
|
||||
HP_INT_THRESH_T5_V(HP_INT_THRESH_T5_M),
|
||||
HP_INT_THRESH_T5_V(dbfifo_int_thresh));
|
||||
}
|
||||
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
|
||||
F_ENABLE_DROP);
|
||||
t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, ENABLE_DROP_F,
|
||||
ENABLE_DROP_F);
|
||||
|
||||
/*
|
||||
* SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
|
||||
|
@ -2887,31 +2889,31 @@ static int t4_sge_init_hard(struct adapter *adap)
|
|||
s->fl_pg_order = FL_PG_ORDER;
|
||||
if (s->fl_pg_order)
|
||||
t4_write_reg(adap,
|
||||
SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
|
||||
SGE_FL_BUFFER_SIZE0_A+RX_LARGE_PG_BUF*sizeof(u32),
|
||||
PAGE_SIZE << FL_PG_ORDER);
|
||||
t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
|
||||
t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A+RX_SMALL_MTU_BUF*sizeof(u32),
|
||||
FL_MTU_SMALL_BUFSIZE(adap));
|
||||
t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
|
||||
t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A+RX_LARGE_MTU_BUF*sizeof(u32),
|
||||
FL_MTU_LARGE_BUFSIZE(adap));
|
||||
|
||||
/*
|
||||
* Note that the SGE Ingress Packet Count Interrupt Threshold and
|
||||
* Timer Holdoff values must be supplied by our caller.
|
||||
*/
|
||||
t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
|
||||
THRESHOLD_0(s->counter_val[0]) |
|
||||
THRESHOLD_1(s->counter_val[1]) |
|
||||
THRESHOLD_2(s->counter_val[2]) |
|
||||
THRESHOLD_3(s->counter_val[3]));
|
||||
t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
|
||||
TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
|
||||
TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
|
||||
t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
|
||||
TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
|
||||
TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
|
||||
t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
|
||||
TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
|
||||
TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
|
||||
t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD_A,
|
||||
THRESHOLD_0_V(s->counter_val[0]) |
|
||||
THRESHOLD_1_V(s->counter_val[1]) |
|
||||
THRESHOLD_2_V(s->counter_val[2]) |
|
||||
THRESHOLD_3_V(s->counter_val[3]));
|
||||
t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1_A,
|
||||
TIMERVALUE0_V(us_to_core_ticks(adap, s->timer_val[0])) |
|
||||
TIMERVALUE1_V(us_to_core_ticks(adap, s->timer_val[1])));
|
||||
t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3_A,
|
||||
TIMERVALUE2_V(us_to_core_ticks(adap, s->timer_val[2])) |
|
||||
TIMERVALUE3_V(us_to_core_ticks(adap, s->timer_val[3])));
|
||||
t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5_A,
|
||||
TIMERVALUE4_V(us_to_core_ticks(adap, s->timer_val[4])) |
|
||||
TIMERVALUE5_V(us_to_core_ticks(adap, s->timer_val[5])));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2927,9 +2929,9 @@ int t4_sge_init(struct adapter *adap)
|
|||
* Ingress Padding Boundary and Egress Status Page Size are set up by
|
||||
* t4_fixup_host_params().
|
||||
*/
|
||||
sge_control = t4_read_reg(adap, SGE_CONTROL);
|
||||
s->pktshift = PKTSHIFT_GET(sge_control);
|
||||
s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
|
||||
sge_control = t4_read_reg(adap, SGE_CONTROL_A);
|
||||
s->pktshift = PKTSHIFT_G(sge_control);
|
||||
s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
|
||||
|
||||
/* T4 uses a single control field to specify both the PCIe Padding and
|
||||
* Packing Boundary. T5 introduced the ability to specify these
|
||||
|
@ -2937,8 +2939,8 @@ int t4_sge_init(struct adapter *adap)
|
|||
* within Packed Buffer Mode is the maximum of these two
|
||||
* specifications.
|
||||
*/
|
||||
ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) +
|
||||
X_INGPADBOUNDARY_SHIFT);
|
||||
ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
|
||||
INGPADBOUNDARY_SHIFT_X);
|
||||
if (is_t4(adap->params.chip)) {
|
||||
s->fl_align = ingpadboundary;
|
||||
} else {
|
||||
|
@ -2975,11 +2977,11 @@ int t4_sge_init(struct adapter *adap)
|
|||
* buffers and a new field which only applies to Packed Mode Free List
|
||||
* buffers.
|
||||
*/
|
||||
sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL);
|
||||
sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
|
||||
if (is_t4(adap->params.chip))
|
||||
egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl);
|
||||
egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
|
||||
else
|
||||
egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl);
|
||||
egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
|
||||
s->fl_starve_thres = 2*egress_threshold + 1;
|
||||
|
||||
setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
* This file is part of the Chelsio T4 Ethernet driver for Linux.
|
||||
*
|
||||
* Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __T4_VALUES_H__
|
||||
#define __T4_VALUES_H__
|
||||
|
||||
/* This file contains definitions for various T4 register value hardware
|
||||
* constants. The types of values encoded here are predominantly those for
|
||||
* register fields which control "modal" behavior. For the most part, we do
|
||||
* not include definitions for register fields which are simple numeric
|
||||
* metrics, etc.
|
||||
*/
|
||||
|
||||
/* SGE register field values.
|
||||
*/
|
||||
|
||||
/* CONTROL1 register */
|
||||
#define RXPKTCPLMODE_SPLIT_X 1
|
||||
|
||||
#define INGPCIEBOUNDARY_SHIFT_X 5
|
||||
#define INGPCIEBOUNDARY_32B_X 0
|
||||
|
||||
#define INGPADBOUNDARY_SHIFT_X 5
|
||||
|
||||
/* CONTROL2 register */
|
||||
#define INGPACKBOUNDARY_SHIFT_X 5
|
||||
#define INGPACKBOUNDARY_16B_X 0
|
||||
|
||||
/* GTS register */
|
||||
#define SGE_TIMERREGS 6
|
||||
|
||||
/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
|
||||
* The User Doorbells are each 128 bytes in length with a Simple Doorbell at
|
||||
* offsets 8x and a Write Combining single 64-byte Egress Queue Unit
|
||||
* (IDXSIZE_UNIT_X) Gather Buffer interface at offset 64. For Ingress Queues,
|
||||
* we have a Going To Sleep register at offsets 8x+4.
|
||||
*
|
||||
* As noted above, we have many instances of the Simple Doorbell and Going To
|
||||
* Sleep registers at offsets 8x and 8x+4, respectively. We want to use a
|
||||
* non-64-byte aligned offset for the Simple Doorbell in order to attempt to
|
||||
* avoid buffering of the writes to the Simple Doorbell and we want to use a
|
||||
* non-contiguous offset for the Going To Sleep writes in order to avoid
|
||||
* possible combining between them.
|
||||
*/
|
||||
#define SGE_UDB_SIZE 128
|
||||
#define SGE_UDB_KDOORBELL 8
|
||||
#define SGE_UDB_GTS 20
|
||||
#define SGE_UDB_WCDOORBELL 64
|
||||
|
||||
/* PCI-E definitions */
|
||||
#define WINDOW_SHIFT_X 10
|
||||
#define PCIEOFST_SHIFT_X 10
|
||||
|
||||
/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
|
||||
* Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
|
||||
* selects for a particular field being present. These fields, when present
|
||||
* in the Compressed Filter Tuple, have the following widths in bits.
|
||||
*/
|
||||
#define FT_FCOE_W 1
|
||||
#define FT_PORT_W 3
|
||||
#define FT_VNIC_ID_W 17
|
||||
#define FT_VLAN_W 17
|
||||
#define FT_TOS_W 8
|
||||
#define FT_PROTOCOL_W 8
|
||||
#define FT_ETHERTYPE_W 16
|
||||
#define FT_MACMATCH_W 9
|
||||
#define FT_MPSHITTYPE_W 3
|
||||
#define FT_FRAGMENTATION_W 1
|
||||
|
||||
/* Some of the Compressed Filter Tuple fields have internal structure. These
|
||||
* bit shifts/masks describe those structures. All shifts are relative to the
|
||||
* base position of the fields within the Compressed Filter Tuple
|
||||
*/
|
||||
#define FT_VLAN_VLD_S 16
|
||||
#define FT_VLAN_VLD_V(x) ((x) << FT_VLAN_VLD_S)
|
||||
#define FT_VLAN_VLD_F FT_VLAN_VLD_V(1U)
|
||||
|
||||
#define FT_VNID_ID_VF_S 0
|
||||
#define FT_VNID_ID_VF_V(x) ((x) << FT_VNID_ID_VF_S)
|
||||
|
||||
#define FT_VNID_ID_PF_S 7
|
||||
#define FT_VNID_ID_PF_V(x) ((x) << FT_VNID_ID_PF_S)
|
||||
|
||||
#define FT_VNID_ID_VLD_S 16
|
||||
#define FT_VNID_ID_VLD_V(x) ((x) << FT_VNID_ID_VLD_S)
|
||||
|
||||
#endif /* __T4_VALUES_H__ */
|
|
@ -380,9 +380,9 @@ static void qenable(struct sge_rspq *rspq)
|
|||
* enable interrupts.
|
||||
*/
|
||||
t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
|
||||
CIDXINC(0) |
|
||||
SEINTARM(rspq->intr_params) |
|
||||
INGRESSQID(rspq->cntxt_id));
|
||||
CIDXINC_V(0) |
|
||||
SEINTARM_V(rspq->intr_params) |
|
||||
INGRESSQID_V(rspq->cntxt_id));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -403,9 +403,9 @@ static void enable_rx(struct adapter *adapter)
|
|||
*/
|
||||
if (adapter->flags & USING_MSI)
|
||||
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
|
||||
CIDXINC(0) |
|
||||
SEINTARM(s->intrq.intr_params) |
|
||||
INGRESSQID(s->intrq.cntxt_id));
|
||||
CIDXINC_V(0) |
|
||||
SEINTARM_V(s->intrq.intr_params) |
|
||||
INGRESSQID_V(s->intrq.cntxt_id));
|
||||
|
||||
}
|
||||
|
||||
|
@ -1673,7 +1673,7 @@ static void cxgb4vf_get_regs(struct net_device *dev,
|
|||
reg_block_dump(adapter, regbuf,
|
||||
T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
|
||||
T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
|
||||
? A_PL_VF_WHOAMI : A_PL_VF_REVISION));
|
||||
? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
|
||||
reg_block_dump(adapter, regbuf,
|
||||
T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
|
||||
T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
|
||||
|
@ -2294,26 +2294,22 @@ static int adap_init0(struct adapter *adapter)
|
|||
* threshold values from the SGE parameters.
|
||||
*/
|
||||
s->timer_val[0] = core_ticks_to_us(adapter,
|
||||
TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1));
|
||||
TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
|
||||
s->timer_val[1] = core_ticks_to_us(adapter,
|
||||
TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1));
|
||||
TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
|
||||
s->timer_val[2] = core_ticks_to_us(adapter,
|
||||
TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3));
|
||||
TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
|
||||
s->timer_val[3] = core_ticks_to_us(adapter,
|
||||
TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3));
|
||||
TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
|
||||
s->timer_val[4] = core_ticks_to_us(adapter,
|
||||
TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5));
|
||||
TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
|
||||
s->timer_val[5] = core_ticks_to_us(adapter,
|
||||
TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5));
|
||||
TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
|
||||
|
||||
s->counter_val[0] =
|
||||
THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold);
|
||||
s->counter_val[1] =
|
||||
THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold);
|
||||
s->counter_val[2] =
|
||||
THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold);
|
||||
s->counter_val[3] =
|
||||
THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold);
|
||||
s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
|
||||
s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
|
||||
s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
|
||||
s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
|
||||
|
||||
/*
|
||||
* Grab our Virtual Interface resource allocation, extract the
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
#include "t4vf_defs.h"
|
||||
|
||||
#include "../cxgb4/t4_regs.h"
|
||||
#include "../cxgb4/t4_values.h"
|
||||
#include "../cxgb4/t4fw_api.h"
|
||||
#include "../cxgb4/t4_msg.h"
|
||||
|
||||
|
@ -531,11 +532,11 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
|
|||
*/
|
||||
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
|
||||
if (is_t4(adapter->params.chip))
|
||||
val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
|
||||
val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
|
||||
else
|
||||
val = PIDX_T5(fl->pend_cred / FL_PER_EQ_UNIT) |
|
||||
DBTYPE(1);
|
||||
val |= DBPRIO(1);
|
||||
val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
|
||||
DBTYPE_F;
|
||||
val |= DBPRIO_F;
|
||||
|
||||
/* Make sure all memory writes to the Free List queue are
|
||||
* committed before we tell the hardware about them.
|
||||
|
@ -549,9 +550,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
|
|||
if (unlikely(fl->bar2_addr == NULL)) {
|
||||
t4_write_reg(adapter,
|
||||
T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
|
||||
QID(fl->cntxt_id) | val);
|
||||
QID_V(fl->cntxt_id) | val);
|
||||
} else {
|
||||
writel(val | QID(fl->bar2_qid),
|
||||
writel(val | QID_V(fl->bar2_qid),
|
||||
fl->bar2_addr + SGE_UDB_KDOORBELL);
|
||||
|
||||
/* This Write memory Barrier will force the write to
|
||||
|
@ -979,12 +980,12 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
|
|||
* doorbell mechanism; otherwise use the new BAR2 mechanism.
|
||||
*/
|
||||
if (unlikely(tq->bar2_addr == NULL)) {
|
||||
u32 val = PIDX(n);
|
||||
u32 val = PIDX_V(n);
|
||||
|
||||
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
|
||||
QID(tq->cntxt_id) | val);
|
||||
QID_V(tq->cntxt_id) | val);
|
||||
} else {
|
||||
u32 val = PIDX_T5(n);
|
||||
u32 val = PIDX_T5_V(n);
|
||||
|
||||
/* T4 and later chips share the same PIDX field offset within
|
||||
* the doorbell, but T5 and later shrank the field in order to
|
||||
|
@ -992,7 +993,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
|
|||
* large in the first place (14 bits) so we just use the T5
|
||||
* and later limits and warn if a Queue ID is too large.
|
||||
*/
|
||||
WARN_ON(val & DBPRIO(1));
|
||||
WARN_ON(val & DBPRIO_F);
|
||||
|
||||
/* If we're only writing a single Egress Unit and the BAR2
|
||||
* Queue ID is 0, we can use the Write Combining Doorbell
|
||||
|
@ -1023,7 +1024,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
|
|||
count--;
|
||||
}
|
||||
} else
|
||||
writel(val | QID(tq->bar2_qid),
|
||||
writel(val | QID_V(tq->bar2_qid),
|
||||
tq->bar2_addr + SGE_UDB_KDOORBELL);
|
||||
|
||||
/* This Write Memory Barrier will force the write to the User
|
||||
|
@ -1875,13 +1876,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
|
|||
if (unlikely(work_done == 0))
|
||||
rspq->unhandled_irqs++;
|
||||
|
||||
val = CIDXINC(work_done) | SEINTARM(intr_params);
|
||||
val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
|
||||
if (is_t4(rspq->adapter->params.chip)) {
|
||||
t4_write_reg(rspq->adapter,
|
||||
T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
|
||||
val | INGRESSQID((u32)rspq->cntxt_id));
|
||||
val | INGRESSQID_V((u32)rspq->cntxt_id));
|
||||
} else {
|
||||
writel(val | INGRESSQID(rspq->bar2_qid),
|
||||
writel(val | INGRESSQID_V(rspq->bar2_qid),
|
||||
rspq->bar2_addr + SGE_UDB_GTS);
|
||||
wmb();
|
||||
}
|
||||
|
@ -1975,12 +1976,12 @@ static unsigned int process_intrq(struct adapter *adapter)
|
|||
rspq_next(intrq);
|
||||
}
|
||||
|
||||
val = CIDXINC(work_done) | SEINTARM(intrq->intr_params);
|
||||
val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
|
||||
if (is_t4(adapter->params.chip))
|
||||
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
|
||||
val | INGRESSQID(intrq->cntxt_id));
|
||||
val | INGRESSQID_V(intrq->cntxt_id));
|
||||
else {
|
||||
writel(val | INGRESSQID(intrq->bar2_qid),
|
||||
writel(val | INGRESSQID_V(intrq->bar2_qid),
|
||||
intrq->bar2_addr + SGE_UDB_GTS);
|
||||
wmb();
|
||||
}
|
||||
|
@ -2583,7 +2584,7 @@ int t4vf_sge_init(struct adapter *adapter)
|
|||
fl0, fl1);
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) {
|
||||
if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
|
||||
dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -2593,9 +2594,9 @@ int t4vf_sge_init(struct adapter *adapter)
|
|||
*/
|
||||
if (fl1)
|
||||
s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
|
||||
s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
|
||||
s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
|
||||
? 128 : 64);
|
||||
s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
|
||||
s->pktshift = PKTSHIFT_G(sge_params->sge_control);
|
||||
|
||||
/* T4 uses a single control field to specify both the PCIe Padding and
|
||||
* Packing Boundary. T5 introduced the ability to specify these
|
||||
|
@ -2607,8 +2608,8 @@ int t4vf_sge_init(struct adapter *adapter)
|
|||
* end doing this because it would initialize the Padding Boundary and
|
||||
* leave the Packing Boundary initialized to 0 (16 bytes).)
|
||||
*/
|
||||
ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
|
||||
X_INGPADBOUNDARY_SHIFT);
|
||||
ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) +
|
||||
INGPADBOUNDARY_SHIFT_X);
|
||||
if (is_t4(adapter->params.chip)) {
|
||||
s->fl_align = ingpadboundary;
|
||||
} else {
|
||||
|
@ -2633,7 +2634,7 @@ int t4vf_sge_init(struct adapter *adapter)
|
|||
* Congestion Threshold is in units of 2 Free List pointers.)
|
||||
*/
|
||||
s->fl_starve_thres
|
||||
= EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
|
||||
= EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
|
||||
|
||||
/*
|
||||
* Set up tasklet timers.
|
||||
|
|
|
@ -64,8 +64,8 @@
|
|||
* Mailbox Data in the fixed CIM PF map and the programmable VF map must
|
||||
* match. However, it's a useful convention ...
|
||||
*/
|
||||
#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA
|
||||
#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA!
|
||||
#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA_A
|
||||
#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA_A!
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include "t4vf_defs.h"
|
||||
|
||||
#include "../cxgb4/t4_regs.h"
|
||||
#include "../cxgb4/t4_values.h"
|
||||
#include "../cxgb4/t4fw_api.h"
|
||||
|
||||
/*
|
||||
|
@ -137,9 +138,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
|
|||
* Loop trying to get ownership of the mailbox. Return an error
|
||||
* if we can't gain ownership.
|
||||
*/
|
||||
v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
|
||||
v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
|
||||
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
|
||||
v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
|
||||
v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
|
||||
if (v != MBOX_OWNER_DRV)
|
||||
return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
|
||||
|
||||
|
@ -161,7 +162,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
|
|||
t4_read_reg(adapter, mbox_data); /* flush write */
|
||||
|
||||
t4_write_reg(adapter, mbox_ctl,
|
||||
MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
|
||||
MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
|
||||
t4_read_reg(adapter, mbox_ctl); /* flush write */
|
||||
|
||||
/*
|
||||
|
@ -183,14 +184,14 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
|
|||
* If we're the owner, see if this is the reply we wanted.
|
||||
*/
|
||||
v = t4_read_reg(adapter, mbox_ctl);
|
||||
if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
|
||||
if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
|
||||
/*
|
||||
* If the Message Valid bit isn't on, revoke ownership
|
||||
* of the mailbox and continue waiting for our reply.
|
||||
*/
|
||||
if ((v & MBMSGVALID) == 0) {
|
||||
if ((v & MBMSGVALID_F) == 0) {
|
||||
t4_write_reg(adapter, mbox_ctl,
|
||||
MBOWNER(MBOX_OWNER_NONE));
|
||||
MBOWNER_V(MBOX_OWNER_NONE));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -216,7 +217,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
|
|||
& FW_CMD_REQUEST_F) != 0);
|
||||
}
|
||||
t4_write_reg(adapter, mbox_ctl,
|
||||
MBOWNER(MBOX_OWNER_NONE));
|
||||
MBOWNER_V(MBOX_OWNER_NONE));
|
||||
return -FW_CMD_RETVAL_G(v);
|
||||
}
|
||||
}
|
||||
|
@ -528,19 +529,19 @@ int t4vf_get_sge_params(struct adapter *adapter)
|
|||
int v;
|
||||
|
||||
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL));
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A));
|
||||
params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE));
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A));
|
||||
params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0));
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A));
|
||||
params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1));
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A));
|
||||
params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1));
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A));
|
||||
params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3));
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A));
|
||||
params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5));
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A));
|
||||
v = t4vf_query_params(adapter, 7, params, vals);
|
||||
if (v)
|
||||
return v;
|
||||
|
@ -576,9 +577,9 @@ int t4vf_get_sge_params(struct adapter *adapter)
|
|||
}
|
||||
|
||||
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD));
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A));
|
||||
params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL));
|
||||
FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A));
|
||||
v = t4vf_query_params(adapter, 2, params, vals);
|
||||
if (v)
|
||||
return v;
|
||||
|
@ -615,8 +616,8 @@ int t4vf_get_sge_params(struct adapter *adapter)
|
|||
* the driver can just use it.
|
||||
*/
|
||||
whoami = t4_read_reg(adapter,
|
||||
T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI);
|
||||
pf = SOURCEPF_GET(whoami);
|
||||
T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
|
||||
pf = SOURCEPF_G(whoami);
|
||||
|
||||
s_hps = (HOSTPAGESIZEPF0_S +
|
||||
(HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
|
||||
|
@ -628,10 +629,10 @@ int t4vf_get_sge_params(struct adapter *adapter)
|
|||
(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
|
||||
sge_params->sge_vf_eq_qpp =
|
||||
((sge_params->sge_egress_queues_per_page >> s_qpp)
|
||||
& QUEUESPERPAGEPF0_MASK);
|
||||
& QUEUESPERPAGEPF0_M);
|
||||
sge_params->sge_vf_iq_qpp =
|
||||
((sge_params->sge_ingress_queues_per_page >> s_qpp)
|
||||
& QUEUESPERPAGEPF0_MASK);
|
||||
& QUEUESPERPAGEPF0_M);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1590,7 +1591,7 @@ int t4vf_prep_adapter(struct adapter *adapter)
|
|||
break;
|
||||
|
||||
case CHELSIO_T5:
|
||||
chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
|
||||
chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
|
||||
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -188,9 +188,9 @@ void
|
|||
csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
|
||||
unsigned int mask, unsigned int val)
|
||||
{
|
||||
csio_wr_reg32(hw, addr, TP_PIO_ADDR);
|
||||
val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask;
|
||||
csio_wr_reg32(hw, val, TP_PIO_DATA);
|
||||
csio_wr_reg32(hw, addr, TP_PIO_ADDR_A);
|
||||
val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask;
|
||||
csio_wr_reg32(hw, val, TP_PIO_DATA_A);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -421,17 +421,15 @@ csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
|
|||
|
||||
if (!byte_cnt || byte_cnt > 4)
|
||||
return -EINVAL;
|
||||
if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
|
||||
if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
|
||||
return -EBUSY;
|
||||
|
||||
cont = cont ? SF_CONT : 0;
|
||||
lock = lock ? SF_LOCK : 0;
|
||||
|
||||
csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP);
|
||||
ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
|
||||
10, NULL);
|
||||
csio_wr_reg32(hw, SF_LOCK_V(lock) | SF_CONT_V(cont) |
|
||||
BYTECNT_V(byte_cnt - 1), SF_OP_A);
|
||||
ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
|
||||
10, NULL);
|
||||
if (!ret)
|
||||
*valp = csio_rd_reg32(hw, SF_DATA);
|
||||
*valp = csio_rd_reg32(hw, SF_DATA_A);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -453,16 +451,14 @@ csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
|
|||
{
|
||||
if (!byte_cnt || byte_cnt > 4)
|
||||
return -EINVAL;
|
||||
if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
|
||||
if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
|
||||
return -EBUSY;
|
||||
|
||||
cont = cont ? SF_CONT : 0;
|
||||
lock = lock ? SF_LOCK : 0;
|
||||
csio_wr_reg32(hw, val, SF_DATA_A);
|
||||
csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) |
|
||||
OP_V(1) | SF_LOCK_V(lock), SF_OP_A);
|
||||
|
||||
csio_wr_reg32(hw, val, SF_DATA);
|
||||
csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP);
|
||||
|
||||
return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
|
||||
return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
|
||||
10, NULL);
|
||||
}
|
||||
|
||||
|
@ -533,7 +529,7 @@ csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
|
|||
for ( ; nwords; nwords--, data++) {
|
||||
ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
|
||||
if (nwords == 1)
|
||||
csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
|
||||
csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
|
||||
if (ret)
|
||||
return ret;
|
||||
if (byte_oriented)
|
||||
|
@ -586,7 +582,7 @@ csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
|
|||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
|
||||
csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
|
||||
|
||||
/* Read the page to verify the write succeeded */
|
||||
ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
|
||||
|
@ -603,7 +599,7 @@ csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
|
|||
return 0;
|
||||
|
||||
unlock:
|
||||
csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
|
||||
csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -641,7 +637,7 @@ out:
|
|||
if (ret)
|
||||
csio_err(hw, "erase of flash sector %d failed, error %d\n",
|
||||
start, ret);
|
||||
csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
|
||||
csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -833,7 +829,7 @@ csio_hw_get_flash_params(struct csio_hw *hw)
|
|||
ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
|
||||
if (!ret)
|
||||
ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
|
||||
csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
|
||||
csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
|
@ -861,17 +857,17 @@ csio_hw_dev_ready(struct csio_hw *hw)
|
|||
uint32_t reg;
|
||||
int cnt = 6;
|
||||
|
||||
while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) &&
|
||||
(--cnt != 0))
|
||||
while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) &&
|
||||
(--cnt != 0))
|
||||
mdelay(100);
|
||||
|
||||
if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) ||
|
||||
(SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) {
|
||||
if ((cnt == 0) && (((int32_t)(SOURCEPF_G(reg)) < 0) ||
|
||||
(SOURCEPF_G(reg) >= CSIO_MAX_PFN))) {
|
||||
csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
hw->pfn = SOURCEPF_GET(reg);
|
||||
hw->pfn = SOURCEPF_G(reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -959,8 +955,8 @@ retry:
|
|||
* timeout ... and then retry if we haven't exhausted
|
||||
* our retries ...
|
||||
*/
|
||||
pcie_fw = csio_rd_reg32(hw, PCIE_FW);
|
||||
if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
|
||||
pcie_fw = csio_rd_reg32(hw, PCIE_FW_A);
|
||||
if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
|
||||
if (waiting <= 0) {
|
||||
if (retries-- > 0)
|
||||
goto retry;
|
||||
|
@ -976,10 +972,10 @@ retry:
|
|||
* report errors preferentially.
|
||||
*/
|
||||
if (state) {
|
||||
if (pcie_fw & PCIE_FW_ERR) {
|
||||
if (pcie_fw & PCIE_FW_ERR_F) {
|
||||
*state = CSIO_DEV_STATE_ERR;
|
||||
rv = -ETIMEDOUT;
|
||||
} else if (pcie_fw & PCIE_FW_INIT)
|
||||
} else if (pcie_fw & PCIE_FW_INIT_F)
|
||||
*state = CSIO_DEV_STATE_INIT;
|
||||
}
|
||||
|
||||
|
@ -988,9 +984,9 @@ retry:
|
|||
* there's not a valid Master PF, grab its identity
|
||||
* for our caller.
|
||||
*/
|
||||
if (mpfn == PCIE_FW_MASTER_MASK &&
|
||||
(pcie_fw & PCIE_FW_MASTER_VLD))
|
||||
mpfn = PCIE_FW_MASTER_GET(pcie_fw);
|
||||
if (mpfn == PCIE_FW_MASTER_M &&
|
||||
(pcie_fw & PCIE_FW_MASTER_VLD_F))
|
||||
mpfn = PCIE_FW_MASTER_G(pcie_fw);
|
||||
break;
|
||||
}
|
||||
hw->flags &= ~CSIO_HWF_MASTER;
|
||||
|
@ -1078,7 +1074,7 @@ csio_do_reset(struct csio_hw *hw, bool fw_rst)
|
|||
|
||||
if (!fw_rst) {
|
||||
/* PIO reset */
|
||||
csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
|
||||
csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
|
||||
mdelay(2000);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1090,7 +1086,7 @@ csio_do_reset(struct csio_hw *hw, bool fw_rst)
|
|||
}
|
||||
|
||||
csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
|
||||
PIORSTMODE | PIORST, 0, NULL);
|
||||
PIORSTMODE_F | PIORST_F, 0, NULL);
|
||||
|
||||
if (csio_mb_issue(hw, mbp)) {
|
||||
csio_err(hw, "Issue of RESET command failed.n");
|
||||
|
@ -1156,7 +1152,7 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
|
|||
* If a legitimate mailbox is provided, issue a RESET command
|
||||
* with a HALT indication.
|
||||
*/
|
||||
if (mbox <= PCIE_FW_MASTER_MASK) {
|
||||
if (mbox <= PCIE_FW_MASTER_M) {
|
||||
struct csio_mb *mbp;
|
||||
|
||||
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
|
||||
|
@ -1166,7 +1162,7 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
|
|||
}
|
||||
|
||||
csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
|
||||
PIORSTMODE | PIORST, FW_RESET_CMD_HALT_F,
|
||||
PIORSTMODE_F | PIORST_F, FW_RESET_CMD_HALT_F,
|
||||
NULL);
|
||||
|
||||
if (csio_mb_issue(hw, mbp)) {
|
||||
|
@ -1193,8 +1189,9 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
|
|||
* rather than a RESET ... if it's new enough to understand that ...
|
||||
*/
|
||||
if (retval == 0 || force) {
|
||||
csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST);
|
||||
csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT);
|
||||
csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
|
||||
csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F,
|
||||
PCIE_FW_HALT_F);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1234,7 +1231,7 @@ csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
|
|||
* doing it automatically, we need to clear the PCIE_FW.HALT
|
||||
* bit.
|
||||
*/
|
||||
csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0);
|
||||
csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0);
|
||||
|
||||
/*
|
||||
* If we've been given a valid mailbox, first try to get the
|
||||
|
@ -1243,21 +1240,21 @@ csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
|
|||
* valid mailbox or the RESET command failed, fall back to
|
||||
* hitting the chip with a hammer.
|
||||
*/
|
||||
if (mbox <= PCIE_FW_MASTER_MASK) {
|
||||
csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
|
||||
if (mbox <= PCIE_FW_MASTER_M) {
|
||||
csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
|
||||
msleep(100);
|
||||
if (csio_do_reset(hw, true) == 0)
|
||||
return 0;
|
||||
}
|
||||
|
||||
csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
|
||||
csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
|
||||
msleep(2000);
|
||||
} else {
|
||||
int ms;
|
||||
|
||||
csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
|
||||
csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
|
||||
for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
|
||||
if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT))
|
||||
if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F))
|
||||
return 0;
|
||||
msleep(100);
|
||||
ms += 100;
|
||||
|
@ -2039,7 +2036,7 @@ csio_hw_configure(struct csio_hw *hw)
|
|||
}
|
||||
|
||||
/* HW version */
|
||||
hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV);
|
||||
hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A);
|
||||
|
||||
/* Needed for FW download */
|
||||
rv = csio_hw_get_flash_params(hw);
|
||||
|
@ -2217,7 +2214,7 @@ out:
|
|||
return;
|
||||
}
|
||||
|
||||
#define PF_INTR_MASK (PFSW | PFCIM)
|
||||
#define PF_INTR_MASK (PFSW_F | PFCIM_F)
|
||||
|
||||
/*
|
||||
* csio_hw_intr_enable - Enable HW interrupts
|
||||
|
@ -2229,21 +2226,21 @@ static void
|
|||
csio_hw_intr_enable(struct csio_hw *hw)
|
||||
{
|
||||
uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
|
||||
uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
|
||||
uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE);
|
||||
uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
|
||||
uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A);
|
||||
|
||||
/*
|
||||
* Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
|
||||
* by FW, so do nothing for INTX.
|
||||
*/
|
||||
if (hw->intr_mode == CSIO_IM_MSIX)
|
||||
csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
|
||||
AIVEC(AIVEC_MASK), vec);
|
||||
csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
|
||||
AIVEC_V(AIVEC_M), vec);
|
||||
else if (hw->intr_mode == CSIO_IM_MSI)
|
||||
csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
|
||||
AIVEC(AIVEC_MASK), 0);
|
||||
csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
|
||||
AIVEC_V(AIVEC_M), 0);
|
||||
|
||||
csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE));
|
||||
csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A));
|
||||
|
||||
/* Turn on MB interrupts - this will internally flush PIO as well */
|
||||
csio_mb_intr_enable(hw);
|
||||
|
@ -2253,19 +2250,19 @@ csio_hw_intr_enable(struct csio_hw *hw)
|
|||
/*
|
||||
* Disable the Serial FLASH interrupt, if enabled!
|
||||
*/
|
||||
pl &= (~SF);
|
||||
csio_wr_reg32(hw, pl, PL_INT_ENABLE);
|
||||
pl &= (~SF_F);
|
||||
csio_wr_reg32(hw, pl, PL_INT_ENABLE_A);
|
||||
|
||||
csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE |
|
||||
EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC |
|
||||
ERR_CPL_OPCODE_0 | ERR_DROPPED_DB |
|
||||
ERR_DATA_CPL_ON_HIGH_QID1 |
|
||||
ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
|
||||
ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
|
||||
ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
|
||||
ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR,
|
||||
SGE_INT_ENABLE3);
|
||||
csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf);
|
||||
csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F |
|
||||
EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F |
|
||||
ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F |
|
||||
ERR_DATA_CPL_ON_HIGH_QID1_F |
|
||||
ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
|
||||
ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
|
||||
ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
|
||||
ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F,
|
||||
SGE_INT_ENABLE3_A);
|
||||
csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf);
|
||||
}
|
||||
|
||||
hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
|
||||
|
@ -2281,16 +2278,16 @@ csio_hw_intr_enable(struct csio_hw *hw)
|
|||
void
|
||||
csio_hw_intr_disable(struct csio_hw *hw)
|
||||
{
|
||||
uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
|
||||
uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
|
||||
|
||||
if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
|
||||
return;
|
||||
|
||||
hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
|
||||
|
||||
csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE));
|
||||
csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A));
|
||||
if (csio_is_hw_master(hw))
|
||||
csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0);
|
||||
csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0);
|
||||
|
||||
/* Turn off MB interrupts */
|
||||
csio_mb_intr_disable(hw);
|
||||
|
@ -2300,7 +2297,7 @@ csio_hw_intr_disable(struct csio_hw *hw)
|
|||
void
|
||||
csio_hw_fatal_err(struct csio_hw *hw)
|
||||
{
|
||||
csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0);
|
||||
csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0);
|
||||
csio_hw_intr_disable(hw);
|
||||
|
||||
/* Do not reset HW, we may need FW state for debugging */
|
||||
|
@ -2594,7 +2591,7 @@ csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
|
|||
* register directly.
|
||||
*/
|
||||
csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
|
||||
csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
|
||||
csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
|
||||
mdelay(2000);
|
||||
break;
|
||||
|
||||
|
@ -2682,11 +2679,11 @@ static void csio_tp_intr_handler(struct csio_hw *hw)
|
|||
{
|
||||
static struct intr_info tp_intr_info[] = {
|
||||
{ 0x3fffffff, "TP parity error", -1, 1 },
|
||||
{ FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
|
||||
{ FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info))
|
||||
if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info))
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
||||
|
@ -2698,52 +2695,52 @@ static void csio_sge_intr_handler(struct csio_hw *hw)
|
|||
uint64_t v;
|
||||
|
||||
static struct intr_info sge_intr_info[] = {
|
||||
{ ERR_CPL_EXCEED_IQE_SIZE,
|
||||
{ ERR_CPL_EXCEED_IQE_SIZE_F,
|
||||
"SGE received CPL exceeding IQE size", -1, 1 },
|
||||
{ ERR_INVALID_CIDX_INC,
|
||||
{ ERR_INVALID_CIDX_INC_F,
|
||||
"SGE GTS CIDX increment too large", -1, 0 },
|
||||
{ ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
|
||||
{ ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
|
||||
{ ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
|
||||
{ ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
|
||||
{ ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 },
|
||||
{ ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
|
||||
"SGE IQID > 1023 received CPL for FL", -1, 0 },
|
||||
{ ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
|
||||
{ ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
|
||||
0 },
|
||||
{ ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
|
||||
{ ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
|
||||
0 },
|
||||
{ ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
|
||||
{ ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
|
||||
0 },
|
||||
{ ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
|
||||
{ ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
|
||||
0 },
|
||||
{ ERR_ING_CTXT_PRIO,
|
||||
{ ERR_ING_CTXT_PRIO_F,
|
||||
"SGE too many priority ingress contexts", -1, 0 },
|
||||
{ ERR_EGR_CTXT_PRIO,
|
||||
{ ERR_EGR_CTXT_PRIO_F,
|
||||
"SGE too many priority egress contexts", -1, 0 },
|
||||
{ INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
|
||||
{ EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
|
||||
{ INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
|
||||
{ EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) |
|
||||
((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32);
|
||||
v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) |
|
||||
((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32);
|
||||
if (v) {
|
||||
csio_fatal(hw, "SGE parity error (%#llx)\n",
|
||||
(unsigned long long)v);
|
||||
csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
|
||||
SGE_INT_CAUSE1);
|
||||
csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2);
|
||||
SGE_INT_CAUSE1_A);
|
||||
csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A);
|
||||
}
|
||||
|
||||
v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info);
|
||||
v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info);
|
||||
|
||||
if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) ||
|
||||
if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) ||
|
||||
v != 0)
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
||||
#define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\
|
||||
OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR)
|
||||
#define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\
|
||||
IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR)
|
||||
#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
|
||||
OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
|
||||
#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
|
||||
IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
|
||||
|
||||
/*
|
||||
* CIM interrupt handler.
|
||||
|
@ -2751,53 +2748,53 @@ static void csio_sge_intr_handler(struct csio_hw *hw)
|
|||
static void csio_cim_intr_handler(struct csio_hw *hw)
|
||||
{
|
||||
static struct intr_info cim_intr_info[] = {
|
||||
{ PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
|
||||
{ PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
|
||||
{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
|
||||
{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
|
||||
{ MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
|
||||
{ MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
|
||||
{ TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
|
||||
{ TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
|
||||
{ MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
|
||||
{ MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
|
||||
{ TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
|
||||
{ TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
static struct intr_info cim_upintr_info[] = {
|
||||
{ RSVDSPACEINT, "CIM reserved space access", -1, 1 },
|
||||
{ ILLTRANSINT, "CIM illegal transaction", -1, 1 },
|
||||
{ ILLWRINT, "CIM illegal write", -1, 1 },
|
||||
{ ILLRDINT, "CIM illegal read", -1, 1 },
|
||||
{ ILLRDBEINT, "CIM illegal read BE", -1, 1 },
|
||||
{ ILLWRBEINT, "CIM illegal write BE", -1, 1 },
|
||||
{ SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
|
||||
{ SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
|
||||
{ BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
|
||||
{ SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
|
||||
{ SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
|
||||
{ BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
|
||||
{ SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
|
||||
{ SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
|
||||
{ BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
|
||||
{ BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
|
||||
{ SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
|
||||
{ SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
|
||||
{ BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
|
||||
{ BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
|
||||
{ SGLRDPLINT , "CIM single read from PL space", -1, 1 },
|
||||
{ SGLWRPLINT , "CIM single write to PL space", -1, 1 },
|
||||
{ BLKRDPLINT , "CIM block read from PL space", -1, 1 },
|
||||
{ BLKWRPLINT , "CIM block write to PL space", -1, 1 },
|
||||
{ REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
|
||||
{ RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
|
||||
{ TIMEOUTINT , "CIM PIF timeout", -1, 1 },
|
||||
{ TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
|
||||
{ RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
|
||||
{ ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
|
||||
{ ILLWRINT_F, "CIM illegal write", -1, 1 },
|
||||
{ ILLRDINT_F, "CIM illegal read", -1, 1 },
|
||||
{ ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
|
||||
{ ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
|
||||
{ SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
|
||||
{ SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
|
||||
{ BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
|
||||
{ SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
|
||||
{ SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
|
||||
{ BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
|
||||
{ SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
|
||||
{ SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
|
||||
{ BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
|
||||
{ BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
|
||||
{ SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
|
||||
{ SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
|
||||
{ BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
|
||||
{ BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
|
||||
{ SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
|
||||
{ SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
|
||||
{ BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
|
||||
{ BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
|
||||
{ REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
|
||||
{ RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
|
||||
{ TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
|
||||
{ TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
int fat;
|
||||
|
||||
fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE,
|
||||
cim_intr_info) +
|
||||
csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE,
|
||||
cim_upintr_info);
|
||||
fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A,
|
||||
cim_intr_info) +
|
||||
csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A,
|
||||
cim_upintr_info);
|
||||
if (fat)
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
@ -2813,7 +2810,7 @@ static void csio_ulprx_intr_handler(struct csio_hw *hw)
|
|||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info))
|
||||
if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
||||
|
@ -2823,19 +2820,19 @@ static void csio_ulprx_intr_handler(struct csio_hw *hw)
|
|||
static void csio_ulptx_intr_handler(struct csio_hw *hw)
|
||||
{
|
||||
static struct intr_info ulptx_intr_info[] = {
|
||||
{ PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
|
||||
{ PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
|
||||
0 },
|
||||
{ PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
|
||||
{ PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
|
||||
0 },
|
||||
{ PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
|
||||
{ PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
|
||||
0 },
|
||||
{ PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
|
||||
{ PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
|
||||
0 },
|
||||
{ 0xfffffff, "ULPTX parity error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info))
|
||||
if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
||||
|
@ -2845,20 +2842,20 @@ static void csio_ulptx_intr_handler(struct csio_hw *hw)
|
|||
static void csio_pmtx_intr_handler(struct csio_hw *hw)
|
||||
{
|
||||
static struct intr_info pmtx_intr_info[] = {
|
||||
{ PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
|
||||
{ PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
|
||||
{ PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
|
||||
{ ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
|
||||
{ PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
|
||||
{ PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
|
||||
{ PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
|
||||
{ ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
|
||||
{ 0xffffff0, "PMTX framing error", -1, 1 },
|
||||
{ OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
|
||||
{ DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
|
||||
{ OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
|
||||
{ DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1,
|
||||
1 },
|
||||
{ ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
|
||||
{ C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
|
||||
{ ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
|
||||
{ PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info))
|
||||
if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info))
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
||||
|
@ -2868,17 +2865,17 @@ static void csio_pmtx_intr_handler(struct csio_hw *hw)
|
|||
static void csio_pmrx_intr_handler(struct csio_hw *hw)
|
||||
{
|
||||
static struct intr_info pmrx_intr_info[] = {
|
||||
{ ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
|
||||
{ ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
|
||||
{ 0x3ffff0, "PMRX framing error", -1, 1 },
|
||||
{ OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
|
||||
{ DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
|
||||
{ OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
|
||||
{ DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1,
|
||||
1 },
|
||||
{ IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
|
||||
{ E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
|
||||
{ IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
|
||||
{ PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info))
|
||||
if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info))
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
||||
|
@ -2888,16 +2885,16 @@ static void csio_pmrx_intr_handler(struct csio_hw *hw)
|
|||
static void csio_cplsw_intr_handler(struct csio_hw *hw)
|
||||
{
|
||||
static struct intr_info cplsw_intr_info[] = {
|
||||
{ CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
|
||||
{ CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
|
||||
{ TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
|
||||
{ SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
|
||||
{ CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
|
||||
{ ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
|
||||
{ CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
|
||||
{ CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
|
||||
{ TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
|
||||
{ SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
|
||||
{ CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
|
||||
{ ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info))
|
||||
if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info))
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
||||
|
@ -2907,15 +2904,15 @@ static void csio_cplsw_intr_handler(struct csio_hw *hw)
|
|||
static void csio_le_intr_handler(struct csio_hw *hw)
|
||||
{
|
||||
static struct intr_info le_intr_info[] = {
|
||||
{ LIPMISS, "LE LIP miss", -1, 0 },
|
||||
{ LIP0, "LE 0 LIP error", -1, 0 },
|
||||
{ PARITYERR, "LE parity error", -1, 1 },
|
||||
{ UNKNOWNCMD, "LE unknown command", -1, 1 },
|
||||
{ REQQPARERR, "LE request queue parity error", -1, 1 },
|
||||
{ LIPMISS_F, "LE LIP miss", -1, 0 },
|
||||
{ LIP0_F, "LE 0 LIP error", -1, 0 },
|
||||
{ PARITYERR_F, "LE parity error", -1, 1 },
|
||||
{ UNKNOWNCMD_F, "LE unknown command", -1, 1 },
|
||||
{ REQQPARERR_F, "LE request queue parity error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info))
|
||||
if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, le_intr_info))
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
||||
|
@ -2929,19 +2926,22 @@ static void csio_mps_intr_handler(struct csio_hw *hw)
|
|||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
static struct intr_info mps_tx_intr_info[] = {
|
||||
{ TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
|
||||
{ NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
|
||||
{ TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
|
||||
{ TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
|
||||
{ BUBBLE, "MPS Tx underflow", -1, 1 },
|
||||
{ SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
|
||||
{ FRMERR, "MPS Tx framing error", -1, 1 },
|
||||
{ TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
|
||||
{ NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
|
||||
{ TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
|
||||
-1, 1 },
|
||||
{ TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
|
||||
-1, 1 },
|
||||
{ BUBBLE_F, "MPS Tx underflow", -1, 1 },
|
||||
{ SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
|
||||
{ FRMERR_F, "MPS Tx framing error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
static struct intr_info mps_trc_intr_info[] = {
|
||||
{ FILTMEM, "MPS TRC filter parity error", -1, 1 },
|
||||
{ PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
|
||||
{ MISCPERR, "MPS TRC misc parity error", -1, 1 },
|
||||
{ FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
|
||||
{ PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
|
||||
-1, 1 },
|
||||
{ MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
static struct intr_info mps_stat_sram_intr_info[] = {
|
||||
|
@ -2957,36 +2957,37 @@ static void csio_mps_intr_handler(struct csio_hw *hw)
|
|||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
static struct intr_info mps_cls_intr_info[] = {
|
||||
{ MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
|
||||
{ MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
|
||||
{ HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
|
||||
{ MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
|
||||
{ MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
|
||||
{ HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
int fat;
|
||||
|
||||
fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE,
|
||||
mps_rx_intr_info) +
|
||||
csio_handle_intr_status(hw, MPS_TX_INT_CAUSE,
|
||||
mps_tx_intr_info) +
|
||||
csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE,
|
||||
mps_trc_intr_info) +
|
||||
csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM,
|
||||
mps_stat_sram_intr_info) +
|
||||
csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
|
||||
mps_stat_tx_intr_info) +
|
||||
csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
|
||||
mps_stat_rx_intr_info) +
|
||||
csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE,
|
||||
mps_cls_intr_info);
|
||||
fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A,
|
||||
mps_rx_intr_info) +
|
||||
csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A,
|
||||
mps_tx_intr_info) +
|
||||
csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A,
|
||||
mps_trc_intr_info) +
|
||||
csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
|
||||
mps_stat_sram_intr_info) +
|
||||
csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
|
||||
mps_stat_tx_intr_info) +
|
||||
csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
|
||||
mps_stat_rx_intr_info) +
|
||||
csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A,
|
||||
mps_cls_intr_info);
|
||||
|
||||
csio_wr_reg32(hw, 0, MPS_INT_CAUSE);
|
||||
csio_rd_reg32(hw, MPS_INT_CAUSE); /* flush */
|
||||
csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A);
|
||||
csio_rd_reg32(hw, MPS_INT_CAUSE_A); /* flush */
|
||||
if (fat)
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
||||
#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
|
||||
#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
|
||||
ECC_UE_INT_CAUSE_F)
|
||||
|
||||
/*
|
||||
* EDC/MC interrupt handler.
|
||||
|
@ -2998,28 +2999,28 @@ static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
|
|||
unsigned int addr, cnt_addr, v;
|
||||
|
||||
if (idx <= MEM_EDC1) {
|
||||
addr = EDC_REG(EDC_INT_CAUSE, idx);
|
||||
cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
|
||||
addr = EDC_REG(EDC_INT_CAUSE_A, idx);
|
||||
cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
|
||||
} else {
|
||||
addr = MC_INT_CAUSE;
|
||||
cnt_addr = MC_ECC_STATUS;
|
||||
addr = MC_INT_CAUSE_A;
|
||||
cnt_addr = MC_ECC_STATUS_A;
|
||||
}
|
||||
|
||||
v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
|
||||
if (v & PERR_INT_CAUSE)
|
||||
if (v & PERR_INT_CAUSE_F)
|
||||
csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
|
||||
if (v & ECC_CE_INT_CAUSE) {
|
||||
uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr));
|
||||
if (v & ECC_CE_INT_CAUSE_F) {
|
||||
uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr));
|
||||
|
||||
csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr);
|
||||
csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr);
|
||||
csio_warn(hw, "%u %s correctable ECC data error%s\n",
|
||||
cnt, name[idx], cnt > 1 ? "s" : "");
|
||||
}
|
||||
if (v & ECC_UE_INT_CAUSE)
|
||||
if (v & ECC_UE_INT_CAUSE_F)
|
||||
csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
|
||||
|
||||
csio_wr_reg32(hw, v, addr);
|
||||
if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
|
||||
if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
||||
|
@ -3028,18 +3029,18 @@ static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
|
|||
*/
|
||||
static void csio_ma_intr_handler(struct csio_hw *hw)
|
||||
{
|
||||
uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE);
|
||||
uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A);
|
||||
|
||||
if (status & MEM_PERR_INT_CAUSE)
|
||||
if (status & MEM_PERR_INT_CAUSE_F)
|
||||
csio_fatal(hw, "MA parity error, parity status %#x\n",
|
||||
csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS));
|
||||
if (status & MEM_WRAP_INT_CAUSE) {
|
||||
v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS);
|
||||
csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A));
|
||||
if (status & MEM_WRAP_INT_CAUSE_F) {
|
||||
v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A);
|
||||
csio_fatal(hw,
|
||||
"MA address wrap-around error by client %u to address %#x\n",
|
||||
MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4);
|
||||
MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4);
|
||||
}
|
||||
csio_wr_reg32(hw, status, MA_INT_CAUSE);
|
||||
csio_wr_reg32(hw, status, MA_INT_CAUSE_A);
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
||||
|
@ -3049,13 +3050,13 @@ static void csio_ma_intr_handler(struct csio_hw *hw)
|
|||
static void csio_smb_intr_handler(struct csio_hw *hw)
|
||||
{
|
||||
static struct intr_info smb_intr_info[] = {
|
||||
{ MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
|
||||
{ MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
|
||||
{ SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
|
||||
{ MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
|
||||
{ MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
|
||||
{ SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info))
|
||||
if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info))
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
||||
|
@ -3065,14 +3066,14 @@ static void csio_smb_intr_handler(struct csio_hw *hw)
|
|||
static void csio_ncsi_intr_handler(struct csio_hw *hw)
|
||||
{
|
||||
static struct intr_info ncsi_intr_info[] = {
|
||||
{ CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
|
||||
{ MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
|
||||
{ TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
|
||||
{ RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
|
||||
{ CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
|
||||
{ MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
|
||||
{ TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
|
||||
{ RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info))
|
||||
if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info))
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
||||
|
@ -3083,13 +3084,13 @@ static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
|
|||
{
|
||||
uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port));
|
||||
|
||||
v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
|
||||
v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
|
||||
if (!v)
|
||||
return;
|
||||
|
||||
if (v & TXFIFO_PRTY_ERR)
|
||||
if (v & TXFIFO_PRTY_ERR_F)
|
||||
csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
|
||||
if (v & RXFIFO_PRTY_ERR)
|
||||
if (v & RXFIFO_PRTY_ERR_F)
|
||||
csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
|
||||
csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port));
|
||||
csio_hw_fatal_err(hw);
|
||||
|
@ -3101,12 +3102,12 @@ static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
|
|||
static void csio_pl_intr_handler(struct csio_hw *hw)
|
||||
{
|
||||
static struct intr_info pl_intr_info[] = {
|
||||
{ FATALPERR, "T4 fatal parity error", -1, 1 },
|
||||
{ PERRVFID, "PL VFID_MAP parity error", -1, 1 },
|
||||
{ FATALPERR_F, "T4 fatal parity error", -1, 1 },
|
||||
{ PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info))
|
||||
if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info))
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
||||
|
@ -3121,7 +3122,7 @@ static void csio_pl_intr_handler(struct csio_hw *hw)
|
|||
int
|
||||
csio_hw_slow_intr_handler(struct csio_hw *hw)
|
||||
{
|
||||
uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE);
|
||||
uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A);
|
||||
|
||||
if (!(cause & CSIO_GLBL_INTR_MASK)) {
|
||||
CSIO_INC_STATS(hw, n_plint_unexp);
|
||||
|
@ -3132,75 +3133,75 @@ csio_hw_slow_intr_handler(struct csio_hw *hw)
|
|||
|
||||
CSIO_INC_STATS(hw, n_plint_cnt);
|
||||
|
||||
if (cause & CIM)
|
||||
if (cause & CIM_F)
|
||||
csio_cim_intr_handler(hw);
|
||||
|
||||
if (cause & MPS)
|
||||
if (cause & MPS_F)
|
||||
csio_mps_intr_handler(hw);
|
||||
|
||||
if (cause & NCSI)
|
||||
if (cause & NCSI_F)
|
||||
csio_ncsi_intr_handler(hw);
|
||||
|
||||
if (cause & PL)
|
||||
if (cause & PL_F)
|
||||
csio_pl_intr_handler(hw);
|
||||
|
||||
if (cause & SMB)
|
||||
if (cause & SMB_F)
|
||||
csio_smb_intr_handler(hw);
|
||||
|
||||
if (cause & XGMAC0)
|
||||
if (cause & XGMAC0_F)
|
||||
csio_xgmac_intr_handler(hw, 0);
|
||||
|
||||
if (cause & XGMAC1)
|
||||
if (cause & XGMAC1_F)
|
||||
csio_xgmac_intr_handler(hw, 1);
|
||||
|
||||
if (cause & XGMAC_KR0)
|
||||
if (cause & XGMAC_KR0_F)
|
||||
csio_xgmac_intr_handler(hw, 2);
|
||||
|
||||
if (cause & XGMAC_KR1)
|
||||
if (cause & XGMAC_KR1_F)
|
||||
csio_xgmac_intr_handler(hw, 3);
|
||||
|
||||
if (cause & PCIE)
|
||||
if (cause & PCIE_F)
|
||||
hw->chip_ops->chip_pcie_intr_handler(hw);
|
||||
|
||||
if (cause & MC)
|
||||
if (cause & MC_F)
|
||||
csio_mem_intr_handler(hw, MEM_MC);
|
||||
|
||||
if (cause & EDC0)
|
||||
if (cause & EDC0_F)
|
||||
csio_mem_intr_handler(hw, MEM_EDC0);
|
||||
|
||||
if (cause & EDC1)
|
||||
if (cause & EDC1_F)
|
||||
csio_mem_intr_handler(hw, MEM_EDC1);
|
||||
|
||||
if (cause & LE)
|
||||
if (cause & LE_F)
|
||||
csio_le_intr_handler(hw);
|
||||
|
||||
if (cause & TP)
|
||||
if (cause & TP_F)
|
||||
csio_tp_intr_handler(hw);
|
||||
|
||||
if (cause & MA)
|
||||
if (cause & MA_F)
|
||||
csio_ma_intr_handler(hw);
|
||||
|
||||
if (cause & PM_TX)
|
||||
if (cause & PM_TX_F)
|
||||
csio_pmtx_intr_handler(hw);
|
||||
|
||||
if (cause & PM_RX)
|
||||
if (cause & PM_RX_F)
|
||||
csio_pmrx_intr_handler(hw);
|
||||
|
||||
if (cause & ULP_RX)
|
||||
if (cause & ULP_RX_F)
|
||||
csio_ulprx_intr_handler(hw);
|
||||
|
||||
if (cause & CPL_SWITCH)
|
||||
if (cause & CPL_SWITCH_F)
|
||||
csio_cplsw_intr_handler(hw);
|
||||
|
||||
if (cause & SGE)
|
||||
if (cause & SGE_F)
|
||||
csio_sge_intr_handler(hw);
|
||||
|
||||
if (cause & ULP_TX)
|
||||
if (cause & ULP_TX_F)
|
||||
csio_ulptx_intr_handler(hw);
|
||||
|
||||
/* Clear the interrupts just processed for which we are the master. */
|
||||
csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE);
|
||||
csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */
|
||||
csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A);
|
||||
csio_rd_reg32(hw, PL_INT_CAUSE_A); /* flush */
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -117,10 +117,10 @@ extern int csio_msi;
|
|||
#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00
|
||||
#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF
|
||||
|
||||
#define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
|
||||
EDC1 | LE | TP | MA | PM_TX | PM_RX | \
|
||||
ULP_RX | CPL_SWITCH | SGE | \
|
||||
ULP_TX | SF)
|
||||
#define CSIO_GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | \
|
||||
EDC0_F | EDC1_F | LE_F | TP_F | MA_F | \
|
||||
PM_TX_F | PM_RX_F | ULP_RX_F | \
|
||||
CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
|
||||
|
||||
/*
|
||||
* Hard parameters used to initialize the card in the absence of a
|
||||
|
|
|
@ -66,19 +66,19 @@ static inline int csio_is_t5(uint16_t chip)
|
|||
{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
|
||||
|
||||
#define CSIO_HW_PIDX(hw, index) \
|
||||
(csio_is_t4(hw->chip_id) ? (PIDX(index)) : \
|
||||
(PIDX_T5(index) | DBTYPE(1U)))
|
||||
(csio_is_t4(hw->chip_id) ? (PIDX_V(index)) : \
|
||||
(PIDX_T5_G(index) | DBTYPE_F))
|
||||
|
||||
#define CSIO_HW_LP_INT_THRESH(hw, val) \
|
||||
(csio_is_t4(hw->chip_id) ? (LP_INT_THRESH(val)) : \
|
||||
(V_LP_INT_THRESH_T5(val)))
|
||||
(csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_V(val)) : \
|
||||
(LP_INT_THRESH_T5_V(val)))
|
||||
|
||||
#define CSIO_HW_M_LP_INT_THRESH(hw) \
|
||||
(csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_MASK) : (M_LP_INT_THRESH_T5))
|
||||
(csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_M) : (LP_INT_THRESH_T5_M))
|
||||
|
||||
#define CSIO_MAC_INT_CAUSE_REG(hw, port) \
|
||||
(csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE)) : \
|
||||
(T5_PORT_REG(port, MAC_PORT_INT_CAUSE)))
|
||||
(csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE_A)) : \
|
||||
(T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)))
|
||||
|
||||
#define FW_VERSION_MAJOR(hw) (csio_is_t4(hw->chip_id) ? 1 : 0)
|
||||
#define FW_VERSION_MINOR(hw) (csio_is_t4(hw->chip_id) ? 2 : 0)
|
||||
|
|
|
@ -96,11 +96,11 @@ csio_t4_set_mem_win(struct csio_hw *hw, uint32_t win)
|
|||
* back MA register to ensure that changes propagate before we attempt
|
||||
* to use the new values.)
|
||||
*/
|
||||
csio_wr_reg32(hw, mem_win_base | BIR(0) |
|
||||
WINDOW(ilog2(MEMWIN_APERTURE) - 10),
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
|
||||
csio_wr_reg32(hw, mem_win_base | BIR_V(0) |
|
||||
WINDOW_V(ilog2(MEMWIN_APERTURE) - 10),
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
|
||||
csio_rd_reg32(hw,
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -111,69 +111,69 @@ static void
|
|||
csio_t4_pcie_intr_handler(struct csio_hw *hw)
|
||||
{
|
||||
static struct intr_info sysbus_intr_info[] = {
|
||||
{ RNPP, "RXNP array parity error", -1, 1 },
|
||||
{ RPCP, "RXPC array parity error", -1, 1 },
|
||||
{ RCIP, "RXCIF array parity error", -1, 1 },
|
||||
{ RCCP, "Rx completions control array parity error", -1, 1 },
|
||||
{ RFTP, "RXFT array parity error", -1, 1 },
|
||||
{ RNPP_F, "RXNP array parity error", -1, 1 },
|
||||
{ RPCP_F, "RXPC array parity error", -1, 1 },
|
||||
{ RCIP_F, "RXCIF array parity error", -1, 1 },
|
||||
{ RCCP_F, "Rx completions control array parity error", -1, 1 },
|
||||
{ RFTP_F, "RXFT array parity error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
static struct intr_info pcie_port_intr_info[] = {
|
||||
{ TPCP, "TXPC array parity error", -1, 1 },
|
||||
{ TNPP, "TXNP array parity error", -1, 1 },
|
||||
{ TFTP, "TXFT array parity error", -1, 1 },
|
||||
{ TCAP, "TXCA array parity error", -1, 1 },
|
||||
{ TCIP, "TXCIF array parity error", -1, 1 },
|
||||
{ RCAP, "RXCA array parity error", -1, 1 },
|
||||
{ OTDD, "outbound request TLP discarded", -1, 1 },
|
||||
{ RDPE, "Rx data parity error", -1, 1 },
|
||||
{ TDUE, "Tx uncorrectable data error", -1, 1 },
|
||||
{ TPCP_F, "TXPC array parity error", -1, 1 },
|
||||
{ TNPP_F, "TXNP array parity error", -1, 1 },
|
||||
{ TFTP_F, "TXFT array parity error", -1, 1 },
|
||||
{ TCAP_F, "TXCA array parity error", -1, 1 },
|
||||
{ TCIP_F, "TXCIF array parity error", -1, 1 },
|
||||
{ RCAP_F, "RXCA array parity error", -1, 1 },
|
||||
{ OTDD_F, "outbound request TLP discarded", -1, 1 },
|
||||
{ RDPE_F, "Rx data parity error", -1, 1 },
|
||||
{ TDUE_F, "Tx uncorrectable data error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
static struct intr_info pcie_intr_info[] = {
|
||||
{ MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
|
||||
{ MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
|
||||
{ MSIDATAPERR, "MSI data parity error", -1, 1 },
|
||||
{ MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
|
||||
{ MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
|
||||
{ MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
|
||||
{ MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
|
||||
{ PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
|
||||
{ PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
|
||||
{ TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
|
||||
{ CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
|
||||
{ CREQPERR, "PCI CMD channel request parity error", -1, 1 },
|
||||
{ CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
|
||||
{ DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
|
||||
{ DREQPERR, "PCI DMA channel request parity error", -1, 1 },
|
||||
{ DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
|
||||
{ HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
|
||||
{ HREQPERR, "PCI HMA channel request parity error", -1, 1 },
|
||||
{ HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
|
||||
{ CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
|
||||
{ FIDPERR, "PCI FID parity error", -1, 1 },
|
||||
{ INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
|
||||
{ MATAGPERR, "PCI MA tag parity error", -1, 1 },
|
||||
{ PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
|
||||
{ RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
|
||||
{ RXWRPERR, "PCI Rx write parity error", -1, 1 },
|
||||
{ RPLPERR, "PCI replay buffer parity error", -1, 1 },
|
||||
{ PCIESINT, "PCI core secondary fault", -1, 1 },
|
||||
{ PCIEPINT, "PCI core primary fault", -1, 1 },
|
||||
{ UNXSPLCPLERR, "PCI unexpected split completion error", -1,
|
||||
{ MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
|
||||
{ MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
|
||||
{ MSIDATAPERR_F, "MSI data parity error", -1, 1 },
|
||||
{ MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
|
||||
{ MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
|
||||
{ MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
|
||||
{ MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
|
||||
{ PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
|
||||
{ PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
|
||||
{ TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
|
||||
{ CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
|
||||
{ CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
|
||||
{ CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
|
||||
{ DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
|
||||
{ DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
|
||||
{ DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
|
||||
{ HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
|
||||
{ HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
|
||||
{ HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
|
||||
{ CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
|
||||
{ FIDPERR_F, "PCI FID parity error", -1, 1 },
|
||||
{ INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
|
||||
{ MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
|
||||
{ PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
|
||||
{ RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
|
||||
{ RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
|
||||
{ RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
|
||||
{ PCIESINT_F, "PCI core secondary fault", -1, 1 },
|
||||
{ PCIEPINT_F, "PCI core primary fault", -1, 1 },
|
||||
{ UNXSPLCPLERR_F, "PCI unexpected split completion error", -1,
|
||||
0 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
int fat;
|
||||
fat = csio_handle_intr_status(hw,
|
||||
PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
|
||||
PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
|
||||
sysbus_intr_info) +
|
||||
csio_handle_intr_status(hw,
|
||||
PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
|
||||
PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
|
||||
pcie_port_intr_info) +
|
||||
csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
|
||||
csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info);
|
||||
if (fat)
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
@ -209,19 +209,19 @@ csio_t4_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
|
|||
{
|
||||
int i;
|
||||
|
||||
if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)
|
||||
if (csio_rd_reg32(hw, MC_BIST_CMD_A) & START_BIST_F)
|
||||
return -EBUSY;
|
||||
csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);
|
||||
csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);
|
||||
csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);
|
||||
csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
|
||||
MC_BIST_CMD);
|
||||
i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,
|
||||
csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR_A);
|
||||
csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN_A);
|
||||
csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN_A);
|
||||
csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1),
|
||||
MC_BIST_CMD_A);
|
||||
i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD_A, START_BIST_F,
|
||||
0, 10, 1, NULL);
|
||||
if (i)
|
||||
return i;
|
||||
|
||||
#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
|
||||
#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA_A, i)
|
||||
|
||||
for (i = 15; i >= 0; i--)
|
||||
*data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
|
||||
|
@ -250,19 +250,19 @@ csio_t4_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
|
|||
int i;
|
||||
|
||||
idx *= EDC_STRIDE;
|
||||
if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)
|
||||
if (csio_rd_reg32(hw, EDC_BIST_CMD_A + idx) & START_BIST_F)
|
||||
return -EBUSY;
|
||||
csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);
|
||||
csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);
|
||||
csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);
|
||||
csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,
|
||||
EDC_BIST_CMD + idx);
|
||||
i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,
|
||||
csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR_A + idx);
|
||||
csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN_A + idx);
|
||||
csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN_A + idx);
|
||||
csio_wr_reg32(hw, BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F,
|
||||
EDC_BIST_CMD_A + idx);
|
||||
i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD_A + idx, START_BIST_F,
|
||||
0, 10, 1, NULL);
|
||||
if (i)
|
||||
return i;
|
||||
|
||||
#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
|
||||
#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA_A, i) + idx)
|
||||
|
||||
for (i = 15; i >= 0; i--)
|
||||
*data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
|
||||
|
@ -329,9 +329,9 @@ csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
|
|||
* the address is relative to BAR0.
|
||||
*/
|
||||
mem_reg = csio_rd_reg32(hw,
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
|
||||
mem_aperture = 1 << (WINDOW(mem_reg) + 10);
|
||||
mem_base = GET_PCIEOFST(mem_reg) << 10;
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
|
||||
mem_aperture = 1 << (WINDOW_V(mem_reg) + 10);
|
||||
mem_base = PCIEOFST_G(mem_reg) << 10;
|
||||
|
||||
bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
|
||||
bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
|
||||
|
@ -356,9 +356,9 @@ csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
|
|||
* before we attempt to use the new value.
|
||||
*/
|
||||
csio_wr_reg32(hw, pos,
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
|
||||
csio_rd_reg32(hw,
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
|
||||
|
||||
while (offset < mem_aperture && len > 0) {
|
||||
if (dir)
|
||||
|
|
|
@ -56,11 +56,11 @@ csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win)
|
|||
* back MA register to ensure that changes propagate before we attempt
|
||||
* to use the new values.)
|
||||
*/
|
||||
csio_wr_reg32(hw, mem_win_base | BIR(0) |
|
||||
WINDOW(ilog2(MEMWIN_APERTURE) - 10),
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
|
||||
csio_wr_reg32(hw, mem_win_base | BIR_V(0) |
|
||||
WINDOW_V(ilog2(MEMWIN_APERTURE) - 10),
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
|
||||
csio_rd_reg32(hw,
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -72,74 +72,74 @@ static void
|
|||
csio_t5_pcie_intr_handler(struct csio_hw *hw)
|
||||
{
|
||||
static struct intr_info sysbus_intr_info[] = {
|
||||
{ RNPP, "RXNP array parity error", -1, 1 },
|
||||
{ RPCP, "RXPC array parity error", -1, 1 },
|
||||
{ RCIP, "RXCIF array parity error", -1, 1 },
|
||||
{ RCCP, "Rx completions control array parity error", -1, 1 },
|
||||
{ RFTP, "RXFT array parity error", -1, 1 },
|
||||
{ RNPP_F, "RXNP array parity error", -1, 1 },
|
||||
{ RPCP_F, "RXPC array parity error", -1, 1 },
|
||||
{ RCIP_F, "RXCIF array parity error", -1, 1 },
|
||||
{ RCCP_F, "Rx completions control array parity error", -1, 1 },
|
||||
{ RFTP_F, "RXFT array parity error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
static struct intr_info pcie_port_intr_info[] = {
|
||||
{ TPCP, "TXPC array parity error", -1, 1 },
|
||||
{ TNPP, "TXNP array parity error", -1, 1 },
|
||||
{ TFTP, "TXFT array parity error", -1, 1 },
|
||||
{ TCAP, "TXCA array parity error", -1, 1 },
|
||||
{ TCIP, "TXCIF array parity error", -1, 1 },
|
||||
{ RCAP, "RXCA array parity error", -1, 1 },
|
||||
{ OTDD, "outbound request TLP discarded", -1, 1 },
|
||||
{ RDPE, "Rx data parity error", -1, 1 },
|
||||
{ TDUE, "Tx uncorrectable data error", -1, 1 },
|
||||
{ TPCP_F, "TXPC array parity error", -1, 1 },
|
||||
{ TNPP_F, "TXNP array parity error", -1, 1 },
|
||||
{ TFTP_F, "TXFT array parity error", -1, 1 },
|
||||
{ TCAP_F, "TXCA array parity error", -1, 1 },
|
||||
{ TCIP_F, "TXCIF array parity error", -1, 1 },
|
||||
{ RCAP_F, "RXCA array parity error", -1, 1 },
|
||||
{ OTDD_F, "outbound request TLP discarded", -1, 1 },
|
||||
{ RDPE_F, "Rx data parity error", -1, 1 },
|
||||
{ TDUE_F, "Tx uncorrectable data error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
static struct intr_info pcie_intr_info[] = {
|
||||
{ MSTGRPPERR, "Master Response Read Queue parity error",
|
||||
{ MSTGRPPERR_F, "Master Response Read Queue parity error",
|
||||
-1, 1 },
|
||||
{ MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
|
||||
{ MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
|
||||
{ MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
|
||||
{ MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
|
||||
{ MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
|
||||
{ MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
|
||||
{ PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
|
||||
{ MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
|
||||
{ MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
|
||||
{ MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
|
||||
{ MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
|
||||
{ MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
|
||||
{ MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
|
||||
{ PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
|
||||
-1, 1 },
|
||||
{ PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
|
||||
{ PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
|
||||
-1, 1 },
|
||||
{ TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
|
||||
{ MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
|
||||
{ CREQPERR, "PCI CMD channel request parity error", -1, 1 },
|
||||
{ CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
|
||||
{ DREQWRPERR, "PCI DMA channel write request parity error",
|
||||
{ TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
|
||||
{ MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
|
||||
{ CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
|
||||
{ CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
|
||||
{ DREQWRPERR_F, "PCI DMA channel write request parity error",
|
||||
-1, 1 },
|
||||
{ DREQPERR, "PCI DMA channel request parity error", -1, 1 },
|
||||
{ DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
|
||||
{ HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
|
||||
{ HREQPERR, "PCI HMA channel request parity error", -1, 1 },
|
||||
{ HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
|
||||
{ CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
|
||||
{ FIDPERR, "PCI FID parity error", -1, 1 },
|
||||
{ VFIDPERR, "PCI INTx clear parity error", -1, 1 },
|
||||
{ MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
|
||||
{ PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
|
||||
{ IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
|
||||
{ DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
|
||||
{ DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
|
||||
{ HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
|
||||
{ HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
|
||||
{ HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
|
||||
{ CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
|
||||
{ FIDPERR_F, "PCI FID parity error", -1, 1 },
|
||||
{ VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
|
||||
{ MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
|
||||
{ PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
|
||||
{ IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
|
||||
-1, 1 },
|
||||
{ IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
|
||||
{ IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
|
||||
-1, 1 },
|
||||
{ RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
|
||||
{ IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
|
||||
{ TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
|
||||
{ READRSPERR, "Outbound read error", -1, 0 },
|
||||
{ RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
|
||||
{ IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
|
||||
{ TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
|
||||
{ READRSPERR_F, "Outbound read error", -1, 0 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
int fat;
|
||||
fat = csio_handle_intr_status(hw,
|
||||
PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
|
||||
PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
|
||||
sysbus_intr_info) +
|
||||
csio_handle_intr_status(hw,
|
||||
PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
|
||||
PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
|
||||
pcie_port_intr_info) +
|
||||
csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
|
||||
csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info);
|
||||
if (fat)
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
@ -177,25 +177,25 @@ csio_t5_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
|
|||
uint32_t mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
|
||||
uint32_t mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
|
||||
|
||||
mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD, idx);
|
||||
mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR, idx);
|
||||
mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN, idx);
|
||||
mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
|
||||
mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
|
||||
mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD_A, idx);
|
||||
mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
|
||||
mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
|
||||
mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
|
||||
mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
|
||||
|
||||
if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST)
|
||||
if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST_F)
|
||||
return -EBUSY;
|
||||
csio_wr_reg32(hw, addr & ~0x3fU, mc_bist_cmd_addr_reg);
|
||||
csio_wr_reg32(hw, 64, mc_bist_cmd_len_reg);
|
||||
csio_wr_reg32(hw, 0xc, mc_bist_data_pattern_reg);
|
||||
csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
|
||||
csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1),
|
||||
mc_bist_cmd_reg);
|
||||
i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST,
|
||||
i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST_F,
|
||||
0, 10, 1, NULL);
|
||||
if (i)
|
||||
return i;
|
||||
|
||||
#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
|
||||
#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA_A, i)
|
||||
|
||||
for (i = 15; i >= 0; i--)
|
||||
*data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
|
||||
|
@ -231,27 +231,27 @@ csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
|
|||
#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
|
||||
#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
|
||||
|
||||
edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD, idx);
|
||||
edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
|
||||
edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
|
||||
edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
|
||||
edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
|
||||
edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
|
||||
edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
|
||||
edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
|
||||
edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
|
||||
edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
|
||||
#undef EDC_REG_T5
|
||||
#undef EDC_STRIDE_T5
|
||||
|
||||
if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST)
|
||||
if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST_F)
|
||||
return -EBUSY;
|
||||
csio_wr_reg32(hw, addr & ~0x3fU, edc_bist_cmd_addr_reg);
|
||||
csio_wr_reg32(hw, 64, edc_bist_cmd_len_reg);
|
||||
csio_wr_reg32(hw, 0xc, edc_bist_cmd_data_pattern);
|
||||
csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
|
||||
csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1),
|
||||
edc_bist_cmd_reg);
|
||||
i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST,
|
||||
i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST_F,
|
||||
0, 10, 1, NULL);
|
||||
if (i)
|
||||
return i;
|
||||
|
||||
#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
|
||||
#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA_A, i) + idx)
|
||||
|
||||
for (i = 15; i >= 0; i--)
|
||||
*data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
|
||||
|
@ -320,13 +320,13 @@ csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
|
|||
* the address is relative to BAR0.
|
||||
*/
|
||||
mem_reg = csio_rd_reg32(hw,
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
|
||||
mem_aperture = 1 << (WINDOW(mem_reg) + 10);
|
||||
mem_base = GET_PCIEOFST(mem_reg) << 10;
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
|
||||
mem_aperture = 1 << (WINDOW_V(mem_reg) + 10);
|
||||
mem_base = PCIEOFST_G(mem_reg) << 10;
|
||||
|
||||
start = addr & ~(mem_aperture-1);
|
||||
offset = addr - start;
|
||||
win_pf = V_PFNUM(hw->pfn);
|
||||
win_pf = PFNUM_V(hw->pfn);
|
||||
|
||||
csio_dbg(hw, "csio_t5_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n",
|
||||
mem_reg, mem_aperture);
|
||||
|
@ -344,9 +344,9 @@ csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
|
|||
* before we attempt to use the new value.
|
||||
*/
|
||||
csio_wr_reg32(hw, pos | win_pf,
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
|
||||
csio_rd_reg32(hw,
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
|
||||
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
|
||||
|
||||
while (offset < mem_aperture && len > 0) {
|
||||
if (dir)
|
||||
|
|
|
@ -317,7 +317,7 @@ csio_fcoe_isr(int irq, void *dev_id)
|
|||
|
||||
/* Disable the interrupt for this PCI function. */
|
||||
if (hw->intr_mode == CSIO_IM_INTX)
|
||||
csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI));
|
||||
csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
|
||||
|
||||
/*
|
||||
* The read in the following function will flush the
|
||||
|
|
|
@ -1104,8 +1104,8 @@ csio_mb_process_portparams_rsp(struct csio_hw *hw,
|
|||
void
|
||||
csio_mb_intr_enable(struct csio_hw *hw)
|
||||
{
|
||||
csio_wr_reg32(hw, MBMSGRDYINTEN(1), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
|
||||
csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
|
||||
csio_wr_reg32(hw, MBMSGRDYINTEN_F, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
|
||||
csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1117,8 +1117,9 @@ csio_mb_intr_enable(struct csio_hw *hw)
|
|||
void
|
||||
csio_mb_intr_disable(struct csio_hw *hw)
|
||||
{
|
||||
csio_wr_reg32(hw, MBMSGRDYINTEN(0), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
|
||||
csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
|
||||
csio_wr_reg32(hw, MBMSGRDYINTEN_V(0),
|
||||
MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
|
||||
csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1153,8 +1154,8 @@ csio_mb_debug_cmd_handler(struct csio_hw *hw)
|
|||
{
|
||||
int i;
|
||||
__be64 cmd[CSIO_MB_MAX_REGS];
|
||||
uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
|
||||
uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
|
||||
uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
|
||||
uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
|
||||
int size = sizeof(struct fw_debug_cmd);
|
||||
|
||||
/* Copy mailbox data */
|
||||
|
@ -1164,8 +1165,8 @@ csio_mb_debug_cmd_handler(struct csio_hw *hw)
|
|||
csio_mb_dump_fw_dbg(hw, cmd);
|
||||
|
||||
/* Notify FW of mailbox by setting owner as UP */
|
||||
csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | MBOWNER(CSIO_MBOWNER_FW),
|
||||
ctl_reg);
|
||||
csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F |
|
||||
MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg);
|
||||
|
||||
csio_rd_reg32(hw, ctl_reg);
|
||||
wmb();
|
||||
|
@ -1187,8 +1188,8 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
|
|||
__be64 *cmd = mbp->mb;
|
||||
__be64 hdr;
|
||||
struct csio_mbm *mbm = &hw->mbm;
|
||||
uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
|
||||
uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
|
||||
uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
|
||||
uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
|
||||
int size = mbp->mb_size;
|
||||
int rv = -EINVAL;
|
||||
struct fw_cmd_hdr *fw_hdr;
|
||||
|
@ -1224,12 +1225,12 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
|
|||
}
|
||||
|
||||
/* Now get ownership of mailbox */
|
||||
owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
|
||||
owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg));
|
||||
|
||||
if (!csio_mb_is_host_owner(owner)) {
|
||||
|
||||
for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++)
|
||||
owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
|
||||
owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg));
|
||||
/*
|
||||
* Mailbox unavailable. In immediate mode, fail the command.
|
||||
* In other modes, enqueue the request.
|
||||
|
@ -1271,10 +1272,10 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
|
|||
if (mbp->mb_cbfn != NULL) {
|
||||
mbm->mcurrent = mbp;
|
||||
mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo));
|
||||
csio_wr_reg32(hw, MBMSGVALID | MBINTREQ |
|
||||
MBOWNER(CSIO_MBOWNER_FW), ctl_reg);
|
||||
csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F |
|
||||
MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg);
|
||||
} else
|
||||
csio_wr_reg32(hw, MBMSGVALID | MBOWNER(CSIO_MBOWNER_FW),
|
||||
csio_wr_reg32(hw, MBMSGVALID_F | MBOWNER_V(CSIO_MBOWNER_FW),
|
||||
ctl_reg);
|
||||
|
||||
/* Flush posted writes */
|
||||
|
@ -1294,9 +1295,9 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
|
|||
|
||||
/* Check for response */
|
||||
ctl = csio_rd_reg32(hw, ctl_reg);
|
||||
if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
|
||||
if (csio_mb_is_host_owner(MBOWNER_G(ctl))) {
|
||||
|
||||
if (!(ctl & MBMSGVALID)) {
|
||||
if (!(ctl & MBMSGVALID_F)) {
|
||||
csio_wr_reg32(hw, 0, ctl_reg);
|
||||
continue;
|
||||
}
|
||||
|
@ -1457,16 +1458,16 @@ csio_mb_isr_handler(struct csio_hw *hw)
|
|||
__be64 *cmd;
|
||||
uint32_t ctl, cim_cause, pl_cause;
|
||||
int i;
|
||||
uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
|
||||
uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
|
||||
uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
|
||||
uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
|
||||
int size;
|
||||
__be64 hdr;
|
||||
struct fw_cmd_hdr *fw_hdr;
|
||||
|
||||
pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE));
|
||||
cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
|
||||
pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE_A));
|
||||
cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A));
|
||||
|
||||
if (!(pl_cause & PFCIM) || !(cim_cause & MBMSGRDYINT)) {
|
||||
if (!(pl_cause & PFCIM_F) || !(cim_cause & MBMSGRDYINT_F)) {
|
||||
CSIO_INC_STATS(hw, n_mbint_unexp);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1477,16 +1478,16 @@ csio_mb_isr_handler(struct csio_hw *hw)
|
|||
* the upper level cause register. In other words, CIM-cause
|
||||
* first followed by PL-Cause next.
|
||||
*/
|
||||
csio_wr_reg32(hw, MBMSGRDYINT, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
|
||||
csio_wr_reg32(hw, PFCIM, MYPF_REG(PL_PF_INT_CAUSE));
|
||||
csio_wr_reg32(hw, MBMSGRDYINT_F, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A));
|
||||
csio_wr_reg32(hw, PFCIM_F, MYPF_REG(PL_PF_INT_CAUSE_A));
|
||||
|
||||
ctl = csio_rd_reg32(hw, ctl_reg);
|
||||
|
||||
if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
|
||||
if (csio_mb_is_host_owner(MBOWNER_G(ctl))) {
|
||||
|
||||
CSIO_DUMP_MB(hw, hw->pfn, data_reg);
|
||||
|
||||
if (!(ctl & MBMSGVALID)) {
|
||||
if (!(ctl & MBMSGVALID_F)) {
|
||||
csio_warn(hw,
|
||||
"Stray mailbox interrupt recvd,"
|
||||
" mailbox data not valid\n");
|
||||
|
|
|
@ -51,12 +51,12 @@ int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */
|
|||
static int csio_sge_timer_reg = 1;
|
||||
|
||||
#define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \
|
||||
csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg)
|
||||
csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg##_A)
|
||||
|
||||
static void
|
||||
csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
|
||||
{
|
||||
sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 +
|
||||
sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0_A +
|
||||
reg * sizeof(uint32_t));
|
||||
}
|
||||
|
||||
|
@ -71,7 +71,7 @@ csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
|
|||
static inline uint32_t
|
||||
csio_wr_qstat_pgsz(struct csio_hw *hw)
|
||||
{
|
||||
return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ? 128 : 64;
|
||||
return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
|
||||
}
|
||||
|
||||
/* Ring freelist doorbell */
|
||||
|
@ -84,9 +84,9 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
|
|||
* 8 freelist buffer pointers (since each pointer is 8 bytes).
|
||||
*/
|
||||
if (flq->inc_idx >= 8) {
|
||||
csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |
|
||||
csio_wr_reg32(hw, DBPRIO_F | QID_V(flq->un.fl.flid) |
|
||||
CSIO_HW_PIDX(hw, flq->inc_idx / 8),
|
||||
MYPF_REG(SGE_PF_KDOORBELL));
|
||||
MYPF_REG(SGE_PF_KDOORBELL_A));
|
||||
flq->inc_idx &= 7;
|
||||
}
|
||||
}
|
||||
|
@ -95,10 +95,10 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
|
|||
static void
|
||||
csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
|
||||
{
|
||||
csio_wr_reg32(hw, CIDXINC(0) |
|
||||
INGRESSQID(iqid) |
|
||||
TIMERREG(X_TIMERREG_RESTART_COUNTER),
|
||||
MYPF_REG(SGE_PF_GTS));
|
||||
csio_wr_reg32(hw, CIDXINC_V(0) |
|
||||
INGRESSQID_V(iqid) |
|
||||
TIMERREG_V(X_TIMERREG_RESTART_COUNTER),
|
||||
MYPF_REG(SGE_PF_GTS_A));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -982,9 +982,9 @@ csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
|
|||
|
||||
wmb();
|
||||
/* Ring SGE Doorbell writing q->pidx into it */
|
||||
csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |
|
||||
csio_wr_reg32(hw, DBPRIO_V(prio) | QID_V(q->un.eq.physeqid) |
|
||||
CSIO_HW_PIDX(hw, q->inc_idx),
|
||||
MYPF_REG(SGE_PF_KDOORBELL));
|
||||
MYPF_REG(SGE_PF_KDOORBELL_A));
|
||||
q->inc_idx = 0;
|
||||
|
||||
return 0;
|
||||
|
@ -1242,10 +1242,10 @@ csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
|
|||
|
||||
restart:
|
||||
/* Now inform SGE about our incremental index value */
|
||||
csio_wr_reg32(hw, CIDXINC(q->inc_idx) |
|
||||
INGRESSQID(q->un.iq.physiqid) |
|
||||
TIMERREG(csio_sge_timer_reg),
|
||||
MYPF_REG(SGE_PF_GTS));
|
||||
csio_wr_reg32(hw, CIDXINC_V(q->inc_idx) |
|
||||
INGRESSQID_V(q->un.iq.physiqid) |
|
||||
TIMERREG_V(csio_sge_timer_reg),
|
||||
MYPF_REG(SGE_PF_GTS_A));
|
||||
q->stats.n_tot_rsps += q->inc_idx;
|
||||
|
||||
q->inc_idx = 0;
|
||||
|
@ -1310,22 +1310,23 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
|
|||
uint32_t ingpad = 0;
|
||||
uint32_t stat_len = clsz > 64 ? 128 : 64;
|
||||
|
||||
csio_wr_reg32(hw, HOSTPAGESIZEPF0(s_hps) | HOSTPAGESIZEPF1(s_hps) |
|
||||
HOSTPAGESIZEPF2(s_hps) | HOSTPAGESIZEPF3(s_hps) |
|
||||
HOSTPAGESIZEPF4(s_hps) | HOSTPAGESIZEPF5(s_hps) |
|
||||
HOSTPAGESIZEPF6(s_hps) | HOSTPAGESIZEPF7(s_hps),
|
||||
SGE_HOST_PAGE_SIZE);
|
||||
csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
|
||||
HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
|
||||
HOSTPAGESIZEPF4_V(s_hps) | HOSTPAGESIZEPF5_V(s_hps) |
|
||||
HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps),
|
||||
SGE_HOST_PAGE_SIZE_A);
|
||||
|
||||
sge->csio_fl_align = clsz < 32 ? 32 : clsz;
|
||||
ingpad = ilog2(sge->csio_fl_align) - 5;
|
||||
|
||||
csio_set_reg_field(hw, SGE_CONTROL, INGPADBOUNDARY_MASK |
|
||||
EGRSTATUSPAGESIZE(1),
|
||||
INGPADBOUNDARY(ingpad) |
|
||||
EGRSTATUSPAGESIZE(stat_len != 64));
|
||||
csio_set_reg_field(hw, SGE_CONTROL_A,
|
||||
INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
|
||||
EGRSTATUSPAGESIZE_F,
|
||||
INGPADBOUNDARY_V(ingpad) |
|
||||
EGRSTATUSPAGESIZE_V(stat_len != 64));
|
||||
|
||||
/* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
|
||||
csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);
|
||||
csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A);
|
||||
|
||||
/*
|
||||
* If using hard params, the following will get set correctly
|
||||
|
@ -1333,23 +1334,24 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
|
|||
*/
|
||||
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
|
||||
csio_wr_reg32(hw,
|
||||
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
|
||||
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) +
|
||||
sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
|
||||
SGE_FL_BUFFER_SIZE2);
|
||||
SGE_FL_BUFFER_SIZE2_A);
|
||||
csio_wr_reg32(hw,
|
||||
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
|
||||
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) +
|
||||
sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
|
||||
SGE_FL_BUFFER_SIZE3);
|
||||
SGE_FL_BUFFER_SIZE3_A);
|
||||
}
|
||||
|
||||
csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
|
||||
csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A);
|
||||
|
||||
/* default value of rx_dma_offset of the NIC driver */
|
||||
csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,
|
||||
PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));
|
||||
csio_set_reg_field(hw, SGE_CONTROL_A,
|
||||
PKTSHIFT_V(PKTSHIFT_M),
|
||||
PKTSHIFT_V(CSIO_SGE_RX_DMA_OFFSET));
|
||||
|
||||
csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG,
|
||||
CSUM_HAS_PSEUDO_HDR, 0);
|
||||
csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG_A,
|
||||
CSUM_HAS_PSEUDO_HDR_F, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1384,9 +1386,9 @@ csio_wr_get_sge(struct csio_hw *hw)
|
|||
u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
|
||||
u32 ingress_rx_threshold;
|
||||
|
||||
sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
|
||||
sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
|
||||
|
||||
ingpad = INGPADBOUNDARY_GET(sge->sge_control);
|
||||
ingpad = INGPADBOUNDARY_G(sge->sge_control);
|
||||
|
||||
switch (ingpad) {
|
||||
case X_INGPCIEBOUNDARY_32B:
|
||||
|
@ -1410,28 +1412,28 @@ csio_wr_get_sge(struct csio_hw *hw)
|
|||
for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
|
||||
csio_get_flbuf_size(hw, sge, i);
|
||||
|
||||
timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1);
|
||||
timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3);
|
||||
timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5);
|
||||
timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1_A);
|
||||
timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3_A);
|
||||
timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5_A);
|
||||
|
||||
sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
|
||||
TIMERVALUE0_GET(timer_value_0_and_1));
|
||||
TIMERVALUE0_G(timer_value_0_and_1));
|
||||
sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
|
||||
TIMERVALUE1_GET(timer_value_0_and_1));
|
||||
TIMERVALUE1_G(timer_value_0_and_1));
|
||||
sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
|
||||
TIMERVALUE2_GET(timer_value_2_and_3));
|
||||
TIMERVALUE2_G(timer_value_2_and_3));
|
||||
sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
|
||||
TIMERVALUE3_GET(timer_value_2_and_3));
|
||||
TIMERVALUE3_G(timer_value_2_and_3));
|
||||
sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
|
||||
TIMERVALUE4_GET(timer_value_4_and_5));
|
||||
TIMERVALUE4_G(timer_value_4_and_5));
|
||||
sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
|
||||
TIMERVALUE5_GET(timer_value_4_and_5));
|
||||
TIMERVALUE5_G(timer_value_4_and_5));
|
||||
|
||||
ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD);
|
||||
sge->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
|
||||
sge->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
|
||||
sge->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
|
||||
sge->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
|
||||
ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD_A);
|
||||
sge->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
|
||||
sge->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
|
||||
sge->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
|
||||
sge->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
|
||||
|
||||
csio_init_intr_coalesce_parms(hw);
|
||||
}
|
||||
|
@ -1454,9 +1456,9 @@ csio_wr_set_sge(struct csio_hw *hw)
|
|||
* Set up our basic SGE mode to deliver CPL messages to our Ingress
|
||||
* Queue and Packet Date to the Free List.
|
||||
*/
|
||||
csio_set_reg_field(hw, SGE_CONTROL, RXPKTCPLMODE(1), RXPKTCPLMODE(1));
|
||||
csio_set_reg_field(hw, SGE_CONTROL_A, RXPKTCPLMODE_F, RXPKTCPLMODE_F);
|
||||
|
||||
sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
|
||||
sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
|
||||
|
||||
/* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
|
||||
|
||||
|
@ -1464,22 +1466,24 @@ csio_wr_set_sge(struct csio_hw *hw)
|
|||
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
|
||||
* and generate an interrupt when this occurs so we can recover.
|
||||
*/
|
||||
csio_set_reg_field(hw, SGE_DBFIFO_STATUS,
|
||||
HP_INT_THRESH(HP_INT_THRESH_MASK) |
|
||||
CSIO_HW_LP_INT_THRESH(hw, CSIO_HW_M_LP_INT_THRESH(hw)),
|
||||
HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
|
||||
CSIO_HW_LP_INT_THRESH(hw, CSIO_SGE_DBFIFO_INT_THRESH));
|
||||
csio_set_reg_field(hw, SGE_DBFIFO_STATUS_A,
|
||||
HP_INT_THRESH_V(HP_INT_THRESH_M) |
|
||||
CSIO_HW_LP_INT_THRESH(hw,
|
||||
CSIO_HW_M_LP_INT_THRESH(hw)),
|
||||
HP_INT_THRESH_V(CSIO_SGE_DBFIFO_INT_THRESH) |
|
||||
CSIO_HW_LP_INT_THRESH(hw,
|
||||
CSIO_SGE_DBFIFO_INT_THRESH));
|
||||
|
||||
csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,
|
||||
ENABLE_DROP);
|
||||
csio_set_reg_field(hw, SGE_DOORBELL_CONTROL_A, ENABLE_DROP_F,
|
||||
ENABLE_DROP_F);
|
||||
|
||||
/* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
|
||||
|
||||
CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
|
||||
csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1)
|
||||
& ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2);
|
||||
& ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2_A);
|
||||
csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1)
|
||||
& ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3);
|
||||
& ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3_A);
|
||||
CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
|
||||
CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
|
||||
CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
|
||||
|
@ -1502,26 +1506,26 @@ csio_wr_set_sge(struct csio_hw *hw)
|
|||
sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
|
||||
sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
|
||||
|
||||
csio_wr_reg32(hw, THRESHOLD_0(sge->counter_val[0]) |
|
||||
THRESHOLD_1(sge->counter_val[1]) |
|
||||
THRESHOLD_2(sge->counter_val[2]) |
|
||||
THRESHOLD_3(sge->counter_val[3]),
|
||||
SGE_INGRESS_RX_THRESHOLD);
|
||||
csio_wr_reg32(hw, THRESHOLD_0_V(sge->counter_val[0]) |
|
||||
THRESHOLD_1_V(sge->counter_val[1]) |
|
||||
THRESHOLD_2_V(sge->counter_val[2]) |
|
||||
THRESHOLD_3_V(sge->counter_val[3]),
|
||||
SGE_INGRESS_RX_THRESHOLD_A);
|
||||
|
||||
csio_wr_reg32(hw,
|
||||
TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
|
||||
TIMERVALUE1(csio_us_to_core_ticks(hw, sge->timer_val[1])),
|
||||
SGE_TIMER_VALUE_0_AND_1);
|
||||
TIMERVALUE0_V(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
|
||||
TIMERVALUE1_V(csio_us_to_core_ticks(hw, sge->timer_val[1])),
|
||||
SGE_TIMER_VALUE_0_AND_1_A);
|
||||
|
||||
csio_wr_reg32(hw,
|
||||
TIMERVALUE2(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
|
||||
TIMERVALUE3(csio_us_to_core_ticks(hw, sge->timer_val[3])),
|
||||
SGE_TIMER_VALUE_2_AND_3);
|
||||
TIMERVALUE2_V(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
|
||||
TIMERVALUE3_V(csio_us_to_core_ticks(hw, sge->timer_val[3])),
|
||||
SGE_TIMER_VALUE_2_AND_3_A);
|
||||
|
||||
csio_wr_reg32(hw,
|
||||
TIMERVALUE4(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
|
||||
TIMERVALUE5(csio_us_to_core_ticks(hw, sge->timer_val[5])),
|
||||
SGE_TIMER_VALUE_4_AND_5);
|
||||
TIMERVALUE4_V(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
|
||||
TIMERVALUE5_V(csio_us_to_core_ticks(hw, sge->timer_val[5])),
|
||||
SGE_TIMER_VALUE_4_AND_5_A);
|
||||
|
||||
csio_init_intr_coalesce_parms(hw);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue