Revert "Merge branch 'octeontx2-af-CPT'"

This reverts commit b4fbf0b27f, reversing
changes made to 6c977c5c2e.

This seems like net-next material.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2023-01-20 08:38:16 -08:00
parent 300b655db1
commit 45a919bbb2
5 changed files with 88 additions and 278 deletions

View File

@ -195,9 +195,6 @@ M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
msg_rsp) \
M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \
M(CPT_LF_RESET, 0xA08, cpt_lf_reset, cpt_lf_rst_req, msg_rsp) \
M(CPT_FLT_ENG_INFO, 0xA09, cpt_flt_eng_info, cpt_flt_eng_info_req, \
cpt_flt_eng_info_rsp) \
/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
@ -1612,8 +1609,6 @@ struct cpt_lf_alloc_req_msg {
u16 sso_pf_func;
u16 eng_grpmsk;
int blkaddr;
u8 ctx_ilen_valid : 1;
u8 ctx_ilen : 7;
};
#define CPT_INLINE_INBOUND 0
@ -1697,28 +1692,6 @@ struct cpt_inst_lmtst_req {
u64 rsvd;
};
/* Mailbox message format to request for CPT LF reset */
struct cpt_lf_rst_req {
struct mbox_msghdr hdr;
u32 slot;
u32 rsvd;
};
/* Mailbox message format to request for CPT faulted engines */
struct cpt_flt_eng_info_req {
struct mbox_msghdr hdr;
int blkaddr;
bool reset;
u32 rsvd;
};
struct cpt_flt_eng_info_rsp {
struct mbox_msghdr hdr;
u64 flt_eng_map[CPT_10K_AF_INT_VEC_RVU];
u64 rcvrd_eng_map[CPT_10K_AF_INT_VEC_RVU];
u64 rsvd;
};
struct sdp_node_info {
/* Node to which this PF belons to */
u8 node_id;

View File

@ -1164,16 +1164,8 @@ cpt:
goto nix_err;
}
err = rvu_cpt_init(rvu);
if (err) {
dev_err(rvu->dev, "%s: Failed to initialize cpt\n", __func__);
goto mcs_err;
}
return 0;
mcs_err:
rvu_mcs_exit(rvu);
nix_err:
rvu_nix_freemem(rvu);
npa_err:

View File

@ -108,8 +108,6 @@ struct rvu_block {
u64 lfreset_reg;
unsigned char name[NAME_SIZE];
struct rvu *rvu;
u64 cpt_flt_eng_map[3];
u64 cpt_rcvrd_eng_map[3];
};
struct nix_mcast {
@ -461,7 +459,6 @@ struct rvu {
struct rvu_pfvf *pf;
struct rvu_pfvf *hwvf;
struct mutex rsrc_lock; /* Serialize resource alloc/free */
struct mutex alias_lock; /* Serialize bar2 alias access */
int vfs; /* Number of VFs attached to RVU */
int nix_blkaddr[MAX_NIX_BLKS];
@ -513,7 +510,6 @@ struct rvu {
struct ptp *ptp;
int mcs_blk_cnt;
int cpt_pf_num;
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
@ -528,8 +524,6 @@ struct rvu {
struct list_head mcs_intrq_head;
/* mcs interrupt queue lock */
spinlock_t mcs_intrq_lock;
/* CPT interrupt lock */
spinlock_t cpt_intr_lock;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@ -552,17 +546,6 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
return readq(rvu->pfreg_base + offset);
}
static inline void rvu_bar2_sel_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
{
/* HW requires read back of RVU_AF_BAR2_SEL register to make sure completion of
* write operation.
*/
rvu_write64(rvu, block, offset, val);
rvu_read64(rvu, block, offset);
/* Barrier to ensure read completes before accessing LF registers */
mb();
}
/* Silicon revisions */
static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu)
{
@ -882,7 +865,6 @@ void rvu_cpt_unregister_interrupts(struct rvu *rvu);
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
int slot);
int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
int rvu_cpt_init(struct rvu *rvu);
/* CN10K RVU */
int rvu_set_channels_base(struct rvu *rvu);

View File

@ -17,7 +17,7 @@
#define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
/* Length of initial context fetch in 128 byte words */
#define CPT_CTX_ILEN 1ULL
#define CPT_CTX_ILEN 2ULL
#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
({ \
@ -37,68 +37,34 @@
(_rsp)->free_sts_##etype = free_sts; \
})
static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr)
static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr)
{
struct rvu_block *block = ptr;
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
u64 reg, val;
int i, eng;
u8 grp;
u64 reg0, reg1, reg2;
reg = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(vec));
dev_err_ratelimited(rvu->dev, "Received CPTAF FLT%d irq : 0x%llx", vec, reg);
i = -1;
while ((i = find_next_bit((unsigned long *)&reg, 64, i + 1)) < 64) {
switch (vec) {
case 0:
eng = i;
break;
case 1:
eng = i + 64;
break;
case 2:
eng = i + 128;
break;
}
grp = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng)) & 0xFF;
/* Disable and enable the engine which triggers fault */
rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), 0x0);
val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng));
rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val & ~1ULL);
rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp);
rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL);
spin_lock(&rvu->cpt_intr_lock);
block->cpt_flt_eng_map[vec] |= BIT_ULL(i);
val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(eng));
val = val & 0x3;
if (val == 0x1 || val == 0x2)
block->cpt_rcvrd_eng_map[vec] |= BIT_ULL(i);
spin_unlock(&rvu->cpt_intr_lock);
reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
if (!is_rvu_otx2(rvu)) {
reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2));
dev_err_ratelimited(rvu->dev,
"Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx",
reg0, reg1, reg2);
} else {
dev_err_ratelimited(rvu->dev,
"Received CPTAF FLT irq : 0x%llx, 0x%llx",
reg0, reg1);
}
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg);
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0);
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1);
if (!is_rvu_otx2(rvu))
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2);
return IRQ_HANDLED;
}
static irqreturn_t rvu_cpt_af_flt0_intr_handler(int irq, void *ptr)
{
return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT0, ptr);
}
static irqreturn_t rvu_cpt_af_flt1_intr_handler(int irq, void *ptr)
{
return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT1, ptr);
}
static irqreturn_t rvu_cpt_af_flt2_intr_handler(int irq, void *ptr)
{
return cpt_af_flt_intr_handler(CPT_10K_AF_INT_VEC_FLT2, ptr);
}
static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
{
struct rvu_block *block = ptr;
@ -153,10 +119,8 @@ static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
int i;
/* Disable all CPT AF interrupts */
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL);
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL);
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF);
for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++)
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
@ -187,7 +151,7 @@ static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
/* Disable all CPT AF interrupts */
for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), ~0ULL);
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
@ -208,31 +172,16 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
{
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
irq_handler_t flt_fn;
int i, ret;
for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
switch (i) {
case CPT_10K_AF_INT_VEC_FLT0:
flt_fn = rvu_cpt_af_flt0_intr_handler;
break;
case CPT_10K_AF_INT_VEC_FLT1:
flt_fn = rvu_cpt_af_flt1_intr_handler;
break;
case CPT_10K_AF_INT_VEC_FLT2:
flt_fn = rvu_cpt_af_flt2_intr_handler;
break;
}
ret = rvu_cpt_do_register_interrupt(block, off + i,
flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]);
rvu_cpt_af_flt_intr_handler,
&rvu->irq_name[(off + i) * NAME_SIZE]);
if (ret)
goto err;
if (i == CPT_10K_AF_INT_VEC_FLT2)
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF);
else
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
}
ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
@ -259,8 +208,8 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
irq_handler_t flt_fn;
int i, offs, ret = 0;
char irq_name[16];
if (!is_block_implemented(rvu->hw, blkaddr))
return 0;
@ -277,20 +226,13 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
return cpt_10k_register_interrupts(block, offs);
for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
sprintf(&rvu->irq_name[(offs + i) * NAME_SIZE], "CPTAF FLT%d", i);
switch (i) {
case CPT_AF_INT_VEC_FLT0:
flt_fn = rvu_cpt_af_flt0_intr_handler;
break;
case CPT_AF_INT_VEC_FLT1:
flt_fn = rvu_cpt_af_flt1_intr_handler;
break;
}
snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
ret = rvu_cpt_do_register_interrupt(block, offs + i,
flt_fn, &rvu->irq_name[(offs + i) * NAME_SIZE]);
rvu_cpt_af_flt_intr_handler,
irq_name);
if (ret)
goto err;
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
}
ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
@ -348,7 +290,7 @@ static int get_cpt_pf_num(struct rvu *rvu)
static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
{
int cpt_pf_num = rvu->cpt_pf_num;
int cpt_pf_num = get_cpt_pf_num(rvu);
if (rvu_get_pf(pcifunc) != cpt_pf_num)
return false;
@ -360,7 +302,7 @@ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
{
int cpt_pf_num = rvu->cpt_pf_num;
int cpt_pf_num = get_cpt_pf_num(rvu);
if (rvu_get_pf(pcifunc) != cpt_pf_num)
return false;
@ -429,12 +371,8 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
/* Set CPT LF group and priority */
val = (u64)req->eng_grpmsk << 48 | 1;
if (!is_rvu_otx2(rvu)) {
if (req->ctx_ilen_valid)
val |= (req->ctx_ilen << 17);
else
val |= (CPT_CTX_ILEN << 17);
}
if (!is_rvu_otx2(rvu))
val |= (CPT_CTX_ILEN << 17);
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
@ -824,21 +762,10 @@ int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req,
#define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48)
static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req,
int blkaddr, struct cpt_rxc_time_cfg_req *save)
int blkaddr)
{
u64 dfrg_reg;
if (save) {
/* Save older config */
dfrg_reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
save->zombie_thres = FIELD_GET(RXC_ZOMBIE_THRES, dfrg_reg);
save->zombie_limit = FIELD_GET(RXC_ZOMBIE_LIMIT, dfrg_reg);
save->active_thres = FIELD_GET(RXC_ACTIVE_THRES, dfrg_reg);
save->active_limit = FIELD_GET(RXC_ACTIVE_LIMIT, dfrg_reg);
save->step = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
}
dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres);
dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit);
dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres);
@ -863,7 +790,7 @@ int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
!is_cpt_vf(rvu, req->hdr.pcifunc))
return CPT_AF_ERR_ACCESS_DENIED;
cpt_rxc_time_cfg(rvu, req, blkaddr, NULL);
cpt_rxc_time_cfg(rvu, req, blkaddr);
return 0;
}
@ -874,67 +801,9 @@ int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
}
int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
int cptlf, blkaddr, ret;
u16 actual_slot;
u64 ctl, ctl2;
blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
req->slot, &actual_slot);
if (blkaddr < 0)
return CPT_AF_ERR_LF_INVALID;
block = &rvu->hw->block[blkaddr];
cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
if (cptlf < 0)
return CPT_AF_ERR_LF_INVALID;
ctl = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
ctl2 = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
ret = rvu_lf_reset(rvu, block, cptlf);
if (ret)
dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
block->addr, cptlf);
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), ctl);
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), ctl2);
return 0;
}
int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_req *req,
struct cpt_flt_eng_info_rsp *rsp)
{
struct rvu_block *block;
unsigned long flags;
int blkaddr, vec;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
return blkaddr;
block = &rvu->hw->block[blkaddr];
for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) {
spin_lock_irqsave(&rvu->cpt_intr_lock, flags);
rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec];
rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec];
if (req->reset) {
block->cpt_flt_eng_map[vec] = 0x0;
block->cpt_rcvrd_eng_map[vec] = 0x0;
}
spin_unlock_irqrestore(&rvu->cpt_intr_lock, flags);
}
return 0;
}
static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
{
struct cpt_rxc_time_cfg_req req, prev;
struct cpt_rxc_time_cfg_req req;
int timeout = 2000;
u64 reg;
@ -950,7 +819,7 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
req.active_thres = 1;
req.active_limit = 1;
cpt_rxc_time_cfg(rvu, &req, blkaddr, &prev);
cpt_rxc_time_cfg(rvu, &req, blkaddr);
do {
reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
@ -976,68 +845,70 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
if (timeout == 0)
dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
/* Restore config */
cpt_rxc_time_cfg(rvu, &prev, blkaddr, NULL);
}
#define INFLIGHT GENMASK_ULL(8, 0)
#define GRB_CNT GENMASK_ULL(39, 32)
#define GWB_CNT GENMASK_ULL(47, 40)
#define XQ_XOR GENMASK_ULL(63, 63)
#define DQPTR GENMASK_ULL(19, 0)
#define NQPTR GENMASK_ULL(51, 32)
#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF)
#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF)
#define INPROG_GWB(reg) (((reg) >> 40) & 0xFF)
static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
{
int timeout = 1000000;
u64 inprog, inst_ptr;
u64 qsize, pending;
int i = 0;
int i = 0, hard_lp_ctr = 100000;
u64 inprog, grp_ptr;
u16 nq_ptr, dq_ptr;
/* Disable instructions enqueuing */
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0);
/* Disable executions in the LF's queue */
inprog = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
inprog |= BIT_ULL(16);
inprog &= ~BIT_ULL(16);
rvu_write64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog);
qsize = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_SIZE)) & 0x7FFF;
do {
inst_ptr = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_INST_PTR));
pending = (FIELD_GET(XQ_XOR, inst_ptr) * qsize * 40) +
FIELD_GET(NQPTR, inst_ptr) -
FIELD_GET(DQPTR, inst_ptr);
udelay(1);
timeout--;
} while ((pending != 0) && (timeout != 0));
if (timeout == 0)
dev_warn(rvu->dev, "TIMEOUT: CPT poll on pending instructions\n");
timeout = 1000000;
/* Wait for CPT queue to become execution-quiescent */
do {
inprog = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
if (INPROG_GRB_PARTIAL(inprog)) {
i = 0;
hard_lp_ctr--;
} else {
i++;
}
if ((FIELD_GET(INFLIGHT, inprog) == 0) &&
(FIELD_GET(GRB_CNT, inprog) == 0)) {
grp_ptr = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot,
CPT_LF_Q_GRP_PTR));
nq_ptr = (grp_ptr >> 32) & 0x7FFF;
dq_ptr = grp_ptr & 0x7FFF;
} while (hard_lp_ctr && (i < 10) && (nq_ptr != dq_ptr));
if (hard_lp_ctr == 0)
dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
i = 0;
hard_lp_ctr = 100000;
do {
inprog = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
if ((INPROG_INFLIGHT(inprog) == 0) &&
(INPROG_GWB(inprog) < 40) &&
((INPROG_GRB(inprog) == 0) ||
(INPROG_GRB((inprog)) == 40))) {
i++;
} else {
i = 0;
timeout--;
hard_lp_ctr--;
}
} while ((timeout != 0) && (i < 10));
} while (hard_lp_ctr && (i < 10));
if (timeout == 0)
dev_warn(rvu->dev, "TIMEOUT: CPT poll on inflight count\n");
/* Wait for 2 us to flush all queue writes to memory */
udelay(2);
if (hard_lp_ctr == 0)
dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
}
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
@ -1047,15 +918,18 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s
if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
cpt_rxc_teardown(rvu, blkaddr);
mutex_lock(&rvu->alias_lock);
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
cpt_lf_disable_iqueue(rvu, blkaddr, slot);
rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
mutex_unlock(&rvu->alias_lock);
/* Set group drop to help clear out hardware */
reg = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
reg |= BIT_ULL(17);
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), reg);
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
return 0;
}
@ -1066,7 +940,7 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s
static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
int nix_blkaddr)
{
int cpt_pf_num = rvu->cpt_pf_num;
int cpt_pf_num = get_cpt_pf_num(rvu);
struct cpt_inst_lmtst_req *req;
dma_addr_t res_daddr;
int timeout = 3000;
@ -1190,7 +1064,7 @@ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
for (i = 0; i < max_ctx_entries; i++) {
cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
@ -1203,19 +1077,10 @@ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
reg);
}
}
rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
unlock:
mutex_unlock(&rvu->rsrc_lock);
return 0;
}
int rvu_cpt_init(struct rvu *rvu)
{
/* Retrieve CPT PF number */
rvu->cpt_pf_num = get_cpt_pf_num(rvu);
spin_lock_init(&rvu->cpt_intr_lock);
return 0;
}

View File

@ -545,8 +545,6 @@
#define CPT_LF_CTL 0x10
#define CPT_LF_INPROG 0x40
#define CPT_LF_Q_SIZE 0x100
#define CPT_LF_Q_INST_PTR 0x110
#define CPT_LF_Q_GRP_PTR 0x120
#define CPT_LF_CTX_FLUSH 0x510