Merge branch 'macsec-fixes-for-cn10kb'
Geetha sowjanya says: ==================== Macsec fixes for CN10KB This patch set has fixes for the issues encountered while testing macsec on CN10KB silicon. Below is the description of patches: Patch 1: For each LMAC two MCSX_MCS_TOP_SLAVE_CHANNEL_CFG registers exist in CN10KB. Bypass has to be disabled in two registers. Patch 2: Add workaround for errata w.r.t accessing TCAM DATA and MASK registers. Patch 3: Fixes the parser configuration to allow PTP traffic. Patch 4: Addresses the IP vector and block level interrupt mask changes. Patch 5: Fix NULL pointer crashes when rebooting Patch 6: Since MCS is global block shared by all LMACS the TCAM match must include macsec DMAC also to distinguish each macsec interface Patch 7: Before freeing MCS hardware resource to AF clear the stats also. Patch 8: Stats which share single counter in hardware are tracked in software. This tracking was based on wrong secy mode params. Use correct secy mode params Patch 9: When updating secy mode params, PN number was also reset to initial values. Hence do not write to PN value register when updating secy. ==================== Link: https://lore.kernel.org/r/20230426062528.20575-1-gakula@marvell.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
commit
075cafffce
|
@ -473,6 +473,8 @@ void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id,
|
|||
for (reg_id = 0; reg_id < 4; reg_id++) {
|
||||
reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
|
||||
mcs_reg_write(mcs, reg, data[reg_id]);
|
||||
}
|
||||
for (reg_id = 0; reg_id < 4; reg_id++) {
|
||||
reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
|
||||
mcs_reg_write(mcs, reg, mask[reg_id]);
|
||||
}
|
||||
|
@ -480,6 +482,8 @@ void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id,
|
|||
for (reg_id = 0; reg_id < 4; reg_id++) {
|
||||
reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
|
||||
mcs_reg_write(mcs, reg, data[reg_id]);
|
||||
}
|
||||
for (reg_id = 0; reg_id < 4; reg_id++) {
|
||||
reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
|
||||
mcs_reg_write(mcs, reg, mask[reg_id]);
|
||||
}
|
||||
|
@ -494,6 +498,9 @@ int mcs_install_flowid_bypass_entry(struct mcs *mcs)
|
|||
|
||||
/* Flow entry */
|
||||
flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
|
||||
__set_bit(flow_id, mcs->rx.flow_ids.bmap);
|
||||
__set_bit(flow_id, mcs->tx.flow_ids.bmap);
|
||||
|
||||
for (reg_id = 0; reg_id < 4; reg_id++) {
|
||||
reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
|
||||
mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
|
||||
|
@ -504,6 +511,8 @@ int mcs_install_flowid_bypass_entry(struct mcs *mcs)
|
|||
}
|
||||
/* secy */
|
||||
secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
|
||||
__set_bit(secy_id, mcs->rx.secy.bmap);
|
||||
__set_bit(secy_id, mcs->tx.secy.bmap);
|
||||
|
||||
/* Set validate frames to NULL and enable control port */
|
||||
plcy = 0x7ull;
|
||||
|
@ -528,6 +537,7 @@ int mcs_install_flowid_bypass_entry(struct mcs *mcs)
|
|||
/* Enable Flowid entry */
|
||||
mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
|
||||
mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -926,60 +936,42 @@ static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
|
|||
mcs_add_intr_wq_entry(mcs, &event);
|
||||
}
|
||||
|
||||
static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
|
||||
void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
|
||||
enum mcs_direction dir)
|
||||
{
|
||||
struct mcs_intr_event event = { 0 };
|
||||
int i;
|
||||
u64 val, reg;
|
||||
int lmac;
|
||||
|
||||
if (!(intr & MCS_BBE_INT_MASK))
|
||||
if (!(intr & 0x6ULL))
|
||||
return;
|
||||
|
||||
event.mcs_id = mcs->mcs_id;
|
||||
event.pcifunc = mcs->pf_map[0];
|
||||
if (intr & BIT_ULL(1))
|
||||
reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 :
|
||||
MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0;
|
||||
else
|
||||
reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 :
|
||||
MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0;
|
||||
val = mcs_reg_read(mcs, reg);
|
||||
|
||||
for (i = 0; i < MCS_MAX_BBE_INT; i++) {
|
||||
if (!(intr & BIT_ULL(i)))
|
||||
/* policy/data over flow occurred */
|
||||
for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
|
||||
if (!(val & BIT_ULL(lmac)))
|
||||
continue;
|
||||
|
||||
/* Lower nibble denotes data fifo overflow interrupts and
|
||||
* upper nibble indicates policy fifo overflow interrupts.
|
||||
*/
|
||||
if (intr & 0xFULL)
|
||||
event.intr_mask = (dir == MCS_RX) ?
|
||||
MCS_BBE_RX_DFIFO_OVERFLOW_INT :
|
||||
MCS_BBE_TX_DFIFO_OVERFLOW_INT;
|
||||
else
|
||||
event.intr_mask = (dir == MCS_RX) ?
|
||||
MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
|
||||
MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
|
||||
|
||||
/* Notify the lmac_id info which ran into BBE fatal error */
|
||||
event.lmac_id = i & 0x3ULL;
|
||||
mcs_add_intr_wq_entry(mcs, &event);
|
||||
dev_warn(mcs->dev, "BEE:Policy or data overflow occurred on lmac:%d\n", lmac);
|
||||
}
|
||||
}
|
||||
|
||||
static void mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
|
||||
void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
|
||||
enum mcs_direction dir)
|
||||
{
|
||||
struct mcs_intr_event event = { 0 };
|
||||
int i;
|
||||
int lmac;
|
||||
|
||||
if (!(intr & MCS_PAB_INT_MASK))
|
||||
if (!(intr & 0xFFFFFULL))
|
||||
return;
|
||||
|
||||
event.mcs_id = mcs->mcs_id;
|
||||
event.pcifunc = mcs->pf_map[0];
|
||||
|
||||
for (i = 0; i < MCS_MAX_PAB_INT; i++) {
|
||||
if (!(intr & BIT_ULL(i)))
|
||||
continue;
|
||||
|
||||
event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT :
|
||||
MCS_PAB_TX_CHAN_OVERFLOW_INT;
|
||||
|
||||
/* Notify the lmac_id info which ran into PAB fatal error */
|
||||
event.lmac_id = i;
|
||||
mcs_add_intr_wq_entry(mcs, &event);
|
||||
for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
|
||||
if (intr & BIT_ULL(lmac))
|
||||
dev_warn(mcs->dev, "PAB: overflow occurred on lmac:%d\n", lmac);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -988,9 +980,8 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
|
|||
struct mcs *mcs = (struct mcs *)mcs_irq;
|
||||
u64 intr, cpm_intr, bbe_intr, pab_intr;
|
||||
|
||||
/* Disable and clear the interrupt */
|
||||
/* Disable the interrupt */
|
||||
mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
|
||||
mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
|
||||
|
||||
/* Check which block has interrupt*/
|
||||
intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
|
||||
|
@ -1037,7 +1028,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
|
|||
/* BBE RX */
|
||||
if (intr & MCS_BBE_RX_INT_ENA) {
|
||||
bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
|
||||
mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
|
||||
mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
|
||||
|
||||
/* Clear the interrupt */
|
||||
mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
|
||||
|
@ -1047,7 +1038,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
|
|||
/* BBE TX */
|
||||
if (intr & MCS_BBE_TX_INT_ENA) {
|
||||
bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
|
||||
mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
|
||||
mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
|
||||
|
||||
/* Clear the interrupt */
|
||||
mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
|
||||
|
@ -1057,7 +1048,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
|
|||
/* PAB RX */
|
||||
if (intr & MCS_PAB_RX_INT_ENA) {
|
||||
pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
|
||||
mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
|
||||
mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
|
||||
|
||||
/* Clear the interrupt */
|
||||
mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
|
||||
|
@ -1067,14 +1058,15 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
|
|||
/* PAB TX */
|
||||
if (intr & MCS_PAB_TX_INT_ENA) {
|
||||
pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
|
||||
mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
|
||||
mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
|
||||
|
||||
/* Clear the interrupt */
|
||||
mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
|
||||
mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
|
||||
}
|
||||
|
||||
/* Enable the interrupt */
|
||||
/* Clear and enable the interrupt */
|
||||
mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
|
||||
mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@ -1156,7 +1148,7 @@ static int mcs_register_interrupts(struct mcs *mcs)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = request_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP),
|
||||
ret = request_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec),
|
||||
mcs_ip_intr_handler, 0, "MCS_IP", mcs);
|
||||
if (ret) {
|
||||
dev_err(mcs->dev, "MCS IP irq registration failed\n");
|
||||
|
@ -1175,11 +1167,11 @@ static int mcs_register_interrupts(struct mcs *mcs)
|
|||
mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
|
||||
mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
|
||||
|
||||
mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xff);
|
||||
mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xff);
|
||||
mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xFFULL);
|
||||
mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xFFULL);
|
||||
|
||||
mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xff);
|
||||
mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff);
|
||||
mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
|
||||
mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
|
||||
|
||||
mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
|
||||
if (!mcs->tx_sa_active) {
|
||||
|
@ -1190,7 +1182,7 @@ static int mcs_register_interrupts(struct mcs *mcs)
|
|||
return ret;
|
||||
|
||||
free_irq:
|
||||
free_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), mcs);
|
||||
free_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec), mcs);
|
||||
exit:
|
||||
pci_free_irq_vectors(mcs->pdev);
|
||||
mcs->num_vec = 0;
|
||||
|
@ -1325,8 +1317,11 @@ void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
|
|||
void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
|
||||
{
|
||||
u64 reg;
|
||||
int id = lmac_id * 2;
|
||||
|
||||
reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(lmac_id * 2);
|
||||
reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(id);
|
||||
mcs_reg_write(mcs, reg, (u64)mode);
|
||||
reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG((id + 1));
|
||||
mcs_reg_write(mcs, reg, (u64)mode);
|
||||
}
|
||||
|
||||
|
@ -1484,6 +1479,7 @@ void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
|
|||
hw->lmac_cnt = 20; /* lmacs/ports per mcs block */
|
||||
hw->mcs_x2p_intf = 5; /* x2p clabration intf */
|
||||
hw->mcs_blks = 1; /* MCS blocks */
|
||||
hw->ip_vec = MCS_CN10KB_INT_VEC_IP; /* IP vector */
|
||||
}
|
||||
|
||||
static struct mcs_ops cn10kb_mcs_ops = {
|
||||
|
@ -1492,6 +1488,8 @@ static struct mcs_ops cn10kb_mcs_ops = {
|
|||
.mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write,
|
||||
.mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write,
|
||||
.mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map,
|
||||
.mcs_bbe_intr_handler = cn10kb_mcs_bbe_intr_handler,
|
||||
.mcs_pab_intr_handler = cn10kb_mcs_pab_intr_handler,
|
||||
};
|
||||
|
||||
static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
@ -1592,7 +1590,7 @@ static void mcs_remove(struct pci_dev *pdev)
|
|||
|
||||
/* Set MCS to external bypass */
|
||||
mcs_set_external_bypass(mcs, true);
|
||||
free_irq(pci_irq_vector(pdev, MCS_INT_VEC_IP), mcs);
|
||||
free_irq(pci_irq_vector(pdev, mcs->hw->ip_vec), mcs);
|
||||
pci_free_irq_vectors(pdev);
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
|
|
|
@ -43,24 +43,15 @@
|
|||
/* Reserved resources for default bypass entry */
|
||||
#define MCS_RSRC_RSVD_CNT 1
|
||||
|
||||
/* MCS Interrupt Vector Enumeration */
|
||||
enum mcs_int_vec_e {
|
||||
MCS_INT_VEC_MIL_RX_GBL = 0x0,
|
||||
MCS_INT_VEC_MIL_RX_LMACX = 0x1,
|
||||
MCS_INT_VEC_MIL_TX_LMACX = 0x5,
|
||||
MCS_INT_VEC_HIL_RX_GBL = 0x9,
|
||||
MCS_INT_VEC_HIL_RX_LMACX = 0xa,
|
||||
MCS_INT_VEC_HIL_TX_GBL = 0xe,
|
||||
MCS_INT_VEC_HIL_TX_LMACX = 0xf,
|
||||
MCS_INT_VEC_IP = 0x13,
|
||||
MCS_INT_VEC_CNT = 0x14,
|
||||
};
|
||||
/* MCS Interrupt Vector */
|
||||
#define MCS_CNF10KB_INT_VEC_IP 0x13
|
||||
#define MCS_CN10KB_INT_VEC_IP 0x53
|
||||
|
||||
#define MCS_MAX_BBE_INT 8ULL
|
||||
#define MCS_BBE_INT_MASK 0xFFULL
|
||||
|
||||
#define MCS_MAX_PAB_INT 4ULL
|
||||
#define MCS_PAB_INT_MASK 0xFULL
|
||||
#define MCS_MAX_PAB_INT 8ULL
|
||||
#define MCS_PAB_INT_MASK 0xFULL
|
||||
|
||||
#define MCS_BBE_RX_INT_ENA BIT_ULL(0)
|
||||
#define MCS_BBE_TX_INT_ENA BIT_ULL(1)
|
||||
|
@ -137,6 +128,7 @@ struct hwinfo {
|
|||
u8 lmac_cnt;
|
||||
u8 mcs_blks;
|
||||
unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */
|
||||
u16 ip_vec;
|
||||
};
|
||||
|
||||
struct mcs {
|
||||
|
@ -165,6 +157,8 @@ struct mcs_ops {
|
|||
void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
|
||||
void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
|
||||
void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
|
||||
void (*mcs_bbe_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
|
||||
void (*mcs_pab_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
|
||||
};
|
||||
|
||||
extern struct pci_driver mcs_driver;
|
||||
|
@ -219,6 +213,8 @@ void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *ma
|
|||
void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
|
||||
void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
|
||||
void cn10kb_mcs_parser_cfg(struct mcs *mcs);
|
||||
void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
|
||||
void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
|
||||
|
||||
/* CNF10K-B APIs */
|
||||
struct mcs_ops *cnf10kb_get_mac_ops(void);
|
||||
|
@ -229,6 +225,8 @@ void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *m
|
|||
void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
|
||||
void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
|
||||
void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
|
||||
void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
|
||||
void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
|
||||
|
||||
/* Stats APIs */
|
||||
void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
|
||||
|
|
|
@ -13,6 +13,8 @@ static struct mcs_ops cnf10kb_mcs_ops = {
|
|||
.mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write,
|
||||
.mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write,
|
||||
.mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map,
|
||||
.mcs_bbe_intr_handler = cnf10kb_mcs_bbe_intr_handler,
|
||||
.mcs_pab_intr_handler = cnf10kb_mcs_pab_intr_handler,
|
||||
};
|
||||
|
||||
struct mcs_ops *cnf10kb_get_mac_ops(void)
|
||||
|
@ -31,6 +33,7 @@ void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
|
|||
hw->lmac_cnt = 4; /* lmacs/ports per mcs block */
|
||||
hw->mcs_x2p_intf = 1; /* x2p clabration intf */
|
||||
hw->mcs_blks = 7; /* MCS blocks */
|
||||
hw->ip_vec = MCS_CNF10KB_INT_VEC_IP; /* IP vector */
|
||||
}
|
||||
|
||||
void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
|
||||
|
@ -212,3 +215,63 @@ void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
|
|||
mcs_add_intr_wq_entry(mcs, &event);
|
||||
}
|
||||
}
|
||||
|
||||
void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
|
||||
enum mcs_direction dir)
|
||||
{
|
||||
struct mcs_intr_event event = { 0 };
|
||||
int i;
|
||||
|
||||
if (!(intr & MCS_BBE_INT_MASK))
|
||||
return;
|
||||
|
||||
event.mcs_id = mcs->mcs_id;
|
||||
event.pcifunc = mcs->pf_map[0];
|
||||
|
||||
for (i = 0; i < MCS_MAX_BBE_INT; i++) {
|
||||
if (!(intr & BIT_ULL(i)))
|
||||
continue;
|
||||
|
||||
/* Lower nibble denotes data fifo overflow interrupts and
|
||||
* upper nibble indicates policy fifo overflow interrupts.
|
||||
*/
|
||||
if (intr & 0xFULL)
|
||||
event.intr_mask = (dir == MCS_RX) ?
|
||||
MCS_BBE_RX_DFIFO_OVERFLOW_INT :
|
||||
MCS_BBE_TX_DFIFO_OVERFLOW_INT;
|
||||
else
|
||||
event.intr_mask = (dir == MCS_RX) ?
|
||||
MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
|
||||
MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
|
||||
|
||||
/* Notify the lmac_id info which ran into BBE fatal error */
|
||||
event.lmac_id = i & 0x3ULL;
|
||||
mcs_add_intr_wq_entry(mcs, &event);
|
||||
}
|
||||
}
|
||||
|
||||
void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
|
||||
enum mcs_direction dir)
|
||||
{
|
||||
struct mcs_intr_event event = { 0 };
|
||||
int i;
|
||||
|
||||
if (!(intr & MCS_PAB_INT_MASK))
|
||||
return;
|
||||
|
||||
event.mcs_id = mcs->mcs_id;
|
||||
event.pcifunc = mcs->pf_map[0];
|
||||
|
||||
for (i = 0; i < MCS_MAX_PAB_INT; i++) {
|
||||
if (!(intr & BIT_ULL(i)))
|
||||
continue;
|
||||
|
||||
event.intr_mask = (dir == MCS_RX) ?
|
||||
MCS_PAB_RX_CHAN_OVERFLOW_INT :
|
||||
MCS_PAB_TX_CHAN_OVERFLOW_INT;
|
||||
|
||||
/* Notify the lmac_id info which ran into PAB fatal error */
|
||||
event.lmac_id = i;
|
||||
mcs_add_intr_wq_entry(mcs, &event);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -97,6 +97,7 @@
|
|||
#define MCSX_PEX_TX_SLAVE_VLAN_CFGX(a) (0x46f8ull + (a) * 0x8ull)
|
||||
#define MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(a) (0x788ull + (a) * 0x8ull)
|
||||
#define MCSX_PEX_TX_SLAVE_PORT_CONFIG(a) (0x4738ull + (a) * 0x8ull)
|
||||
#define MCSX_PEX_RX_SLAVE_PORT_CFGX(a) (0x3b98ull + (a) * 0x8ull)
|
||||
#define MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(a) ({ \
|
||||
u64 offset; \
|
||||
\
|
||||
|
@ -275,7 +276,10 @@
|
|||
#define MCSX_BBE_RX_SLAVE_CAL_ENTRY 0x180ull
|
||||
#define MCSX_BBE_RX_SLAVE_CAL_LEN 0x188ull
|
||||
#define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a) (0x290ull + (a) * 0x40ull)
|
||||
|
||||
#define MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 0xe20
|
||||
#define MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0 0x1298
|
||||
#define MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 0xe40
|
||||
#define MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0 0x12b8
|
||||
#define MCSX_BBE_RX_SLAVE_BBE_INT ({ \
|
||||
u64 offset; \
|
||||
\
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#include "mcs.h"
|
||||
#include "rvu.h"
|
||||
#include "mcs_reg.h"
|
||||
#include "lmac_common.h"
|
||||
|
||||
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
|
||||
|
@ -32,6 +33,42 @@ static struct _req_type __maybe_unused \
|
|||
MBOX_UP_MCS_MESSAGES
|
||||
#undef M
|
||||
|
||||
void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena)
|
||||
{
|
||||
struct mcs *mcs;
|
||||
u64 cfg;
|
||||
u8 port;
|
||||
|
||||
if (!rvu->mcs_blk_cnt)
|
||||
return;
|
||||
|
||||
/* When ptp is enabled, RPM appends 8B header for all
|
||||
* RX packets. MCS PEX need to configure to skip 8B
|
||||
* during packet parsing.
|
||||
*/
|
||||
|
||||
/* CNF10K-B */
|
||||
if (rvu->mcs_blk_cnt > 1) {
|
||||
mcs = mcs_get_pdata(rpm_id);
|
||||
cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
|
||||
if (ena)
|
||||
cfg |= BIT_ULL(lmac_id);
|
||||
else
|
||||
cfg &= ~BIT_ULL(lmac_id);
|
||||
mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, cfg);
|
||||
return;
|
||||
}
|
||||
/* CN10KB */
|
||||
mcs = mcs_get_pdata(0);
|
||||
port = (rpm_id * rvu->hw->lmac_per_cgx) + lmac_id;
|
||||
cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port));
|
||||
if (ena)
|
||||
cfg |= BIT_ULL(0);
|
||||
else
|
||||
cfg &= ~BIT_ULL(0);
|
||||
mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port), cfg);
|
||||
}
|
||||
|
||||
int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
|
||||
struct mcs_set_lmac_mode *req,
|
||||
struct msg_rsp *rsp)
|
||||
|
|
|
@ -920,6 +920,7 @@ int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
|
|||
/* CN10K MCS */
|
||||
int rvu_mcs_init(struct rvu *rvu);
|
||||
int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
|
||||
void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena);
|
||||
void rvu_mcs_exit(struct rvu *rvu);
|
||||
|
||||
#endif /* RVU_H */
|
||||
|
|
|
@ -773,6 +773,8 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
|
|||
/* This flag is required to clean up CGX conf if app gets killed */
|
||||
pfvf->hw_rx_tstamp_en = enable;
|
||||
|
||||
/* Inform MCS about 8B RX header */
|
||||
rvu_mcs_ptp_cfg(rvu, cgx_id, lmac_id, enable);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -497,8 +497,9 @@ static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused
|
|||
stats.octet_validated_cnt);
|
||||
seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
|
||||
stats.pkt_port_disabled_cnt);
|
||||
seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_badtag_cnt);
|
||||
seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_nosa_cnt);
|
||||
seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
|
||||
seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
|
||||
stats.pkt_nosa_cnt);
|
||||
seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
|
||||
stats.pkt_nosaerror_cnt);
|
||||
seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <net/macsec.h>
|
||||
#include "otx2_common.h"
|
||||
|
||||
#define MCS_TCAM0_MAC_DA_MASK GENMASK_ULL(47, 0)
|
||||
#define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48)
|
||||
#define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0)
|
||||
#define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32)
|
||||
|
@ -149,11 +150,20 @@ static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
|
|||
enum mcs_rsrc_type type, u16 hw_rsrc_id,
|
||||
bool all)
|
||||
{
|
||||
struct mcs_clear_stats *clear_req;
|
||||
struct mbox *mbox = &pfvf->mbox;
|
||||
struct mcs_free_rsrc_req *req;
|
||||
|
||||
mutex_lock(&mbox->lock);
|
||||
|
||||
clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
|
||||
if (!clear_req)
|
||||
goto fail;
|
||||
|
||||
clear_req->id = hw_rsrc_id;
|
||||
clear_req->type = type;
|
||||
clear_req->dir = dir;
|
||||
|
||||
req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
|
||||
if (!req)
|
||||
goto fail;
|
||||
|
@ -237,8 +247,10 @@ static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
|
|||
struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
|
||||
{
|
||||
struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
|
||||
struct macsec_secy *secy = rxsc->sw_secy;
|
||||
struct mcs_flowid_entry_write_req *req;
|
||||
struct mbox *mbox = &pfvf->mbox;
|
||||
u64 mac_da;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&mbox->lock);
|
||||
|
@ -249,11 +261,16 @@ static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
mac_da = ether_addr_to_u64(secy->netdev->dev_addr);
|
||||
|
||||
req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da);
|
||||
req->mask[0] = ~0ULL;
|
||||
req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK;
|
||||
|
||||
req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
|
||||
req->mask[1] = ~0ULL;
|
||||
req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
|
||||
|
||||
req->mask[0] = ~0ULL;
|
||||
req->mask[2] = ~0ULL;
|
||||
req->mask[3] = ~0ULL;
|
||||
|
||||
|
@ -997,7 +1014,7 @@ static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy
|
|||
|
||||
/* Check if sync is really needed */
|
||||
if (secy->validate_frames == txsc->last_validate_frames &&
|
||||
secy->protect_frames == txsc->last_protect_frames)
|
||||
secy->replay_protect == txsc->last_replay_protect)
|
||||
return;
|
||||
|
||||
cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
|
||||
|
@ -1019,19 +1036,19 @@ static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy
|
|||
rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
|
||||
rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
|
||||
|
||||
if (txsc->last_protect_frames)
|
||||
if (txsc->last_replay_protect)
|
||||
rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
|
||||
else
|
||||
rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
|
||||
|
||||
if (txsc->last_validate_frames == MACSEC_VALIDATE_CHECK)
|
||||
if (txsc->last_validate_frames == MACSEC_VALIDATE_DISABLED)
|
||||
rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
|
||||
else
|
||||
rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
|
||||
}
|
||||
|
||||
txsc->last_validate_frames = secy->validate_frames;
|
||||
txsc->last_protect_frames = secy->protect_frames;
|
||||
txsc->last_replay_protect = secy->replay_protect;
|
||||
}
|
||||
|
||||
static int cn10k_mdo_open(struct macsec_context *ctx)
|
||||
|
@ -1100,7 +1117,7 @@ static int cn10k_mdo_add_secy(struct macsec_context *ctx)
|
|||
txsc->sw_secy = secy;
|
||||
txsc->encoding_sa = secy->tx_sc.encoding_sa;
|
||||
txsc->last_validate_frames = secy->validate_frames;
|
||||
txsc->last_protect_frames = secy->protect_frames;
|
||||
txsc->last_replay_protect = secy->replay_protect;
|
||||
|
||||
list_add(&txsc->entry, &cfg->txsc_list);
|
||||
|
||||
|
@ -1117,6 +1134,7 @@ static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
|
|||
struct macsec_secy *secy = ctx->secy;
|
||||
struct macsec_tx_sa *sw_tx_sa;
|
||||
struct cn10k_mcs_txsc *txsc;
|
||||
bool active;
|
||||
u8 sa_num;
|
||||
int err;
|
||||
|
||||
|
@ -1124,15 +1142,19 @@ static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
|
|||
if (!txsc)
|
||||
return -ENOENT;
|
||||
|
||||
txsc->encoding_sa = secy->tx_sc.encoding_sa;
|
||||
|
||||
sa_num = txsc->encoding_sa;
|
||||
sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
|
||||
/* Encoding SA got changed */
|
||||
if (txsc->encoding_sa != secy->tx_sc.encoding_sa) {
|
||||
txsc->encoding_sa = secy->tx_sc.encoding_sa;
|
||||
sa_num = txsc->encoding_sa;
|
||||
sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
|
||||
active = sw_tx_sa ? sw_tx_sa->active : false;
|
||||
cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, active);
|
||||
}
|
||||
|
||||
if (netif_running(secy->netdev)) {
|
||||
cn10k_mcs_sync_stats(pfvf, secy, txsc);
|
||||
|
||||
err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
|
||||
err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
@ -1521,12 +1543,12 @@ static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
|
|||
rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
|
||||
rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
|
||||
|
||||
if (secy->protect_frames)
|
||||
if (secy->replay_protect)
|
||||
rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
|
||||
else
|
||||
rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
|
||||
|
||||
if (secy->validate_frames == MACSEC_VALIDATE_CHECK)
|
||||
if (secy->validate_frames == MACSEC_VALIDATE_DISABLED)
|
||||
rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
|
||||
else
|
||||
rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
|
||||
|
|
|
@ -389,7 +389,7 @@ struct cn10k_mcs_txsc {
|
|||
struct cn10k_txsc_stats stats;
|
||||
struct list_head entry;
|
||||
enum macsec_validation_type last_validate_frames;
|
||||
bool last_protect_frames;
|
||||
bool last_replay_protect;
|
||||
u16 hw_secy_id_tx;
|
||||
u16 hw_secy_id_rx;
|
||||
u16 hw_flow_id;
|
||||
|
|
|
@ -3073,8 +3073,6 @@ static void otx2_remove(struct pci_dev *pdev)
|
|||
otx2_config_pause_frm(pf);
|
||||
}
|
||||
|
||||
cn10k_mcs_free(pf);
|
||||
|
||||
#ifdef CONFIG_DCB
|
||||
/* Disable PFC config */
|
||||
if (pf->pfc_en) {
|
||||
|
@ -3088,6 +3086,7 @@ static void otx2_remove(struct pci_dev *pdev)
|
|||
|
||||
otx2_unregister_dl(pf);
|
||||
unregister_netdev(netdev);
|
||||
cn10k_mcs_free(pf);
|
||||
otx2_sriov_disable(pf->pdev);
|
||||
otx2_sriov_vfcfg_cleanup(pf);
|
||||
if (pf->otx2_wq)
|
||||
|
|
Loading…
Reference in New Issue