octeontx2-af: Handle non-contiguous CGX LMAC interfaces
For this, cgx_id(struct cgx) definition has been changed to reflect cgx port id instead of device instance id. Now cgx_id can be directly used as channel offset for NPC configuration. Assumptions on contiguous cgx port ids has been removed from nix_calibrate_x2p as well. As a side effect, allocation of conversion tables that were based on cgx count are changed to cgx port id max value. Tables would return NULL for invalid cgx ports. Signed-off-by: Linu Cherian <lcherian@marvell.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
44990aaa93
commit
12e4c9ab2e
|
@ -92,17 +92,21 @@ static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
|
||||||
return cgx->lmac_idmap[lmac_id];
|
return cgx->lmac_idmap[lmac_id];
|
||||||
}
|
}
|
||||||
|
|
||||||
int cgx_get_cgx_cnt(void)
|
int cgx_get_cgxcnt_max(void)
|
||||||
{
|
{
|
||||||
struct cgx *cgx_dev;
|
struct cgx *cgx_dev;
|
||||||
int count = 0;
|
int idmax = -ENODEV;
|
||||||
|
|
||||||
list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
|
list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
|
||||||
count++;
|
if (cgx_dev->cgx_id > idmax)
|
||||||
|
idmax = cgx_dev->cgx_id;
|
||||||
|
|
||||||
return count;
|
if (idmax < 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return idmax + 1;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cgx_get_cgx_cnt);
|
EXPORT_SYMBOL(cgx_get_cgxcnt_max);
|
||||||
|
|
||||||
int cgx_get_lmac_cnt(void *cgxd)
|
int cgx_get_lmac_cnt(void *cgxd)
|
||||||
{
|
{
|
||||||
|
@ -679,8 +683,10 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
goto err_release_regions;
|
goto err_release_regions;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
|
||||||
|
& CGX_ID_MASK;
|
||||||
|
|
||||||
list_add(&cgx->cgx_list, &cgx_list);
|
list_add(&cgx->cgx_list, &cgx_list);
|
||||||
cgx->cgx_id = cgx_get_cgx_cnt() - 1;
|
|
||||||
|
|
||||||
cgx_link_usertable_init();
|
cgx_link_usertable_init();
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
/* PCI BAR nos */
|
/* PCI BAR nos */
|
||||||
#define PCI_CFG_REG_BAR_NUM 0
|
#define PCI_CFG_REG_BAR_NUM 0
|
||||||
|
|
||||||
#define MAX_CGX 3
|
#define CGX_ID_MASK 0x7
|
||||||
#define MAX_LMAC_PER_CGX 4
|
#define MAX_LMAC_PER_CGX 4
|
||||||
#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
|
#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
|
||||||
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
|
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
|
||||||
|
@ -95,7 +95,7 @@ struct cgx_event_cb {
|
||||||
|
|
||||||
extern struct pci_driver cgx_driver;
|
extern struct pci_driver cgx_driver;
|
||||||
|
|
||||||
int cgx_get_cgx_cnt(void);
|
int cgx_get_cgxcnt_max(void);
|
||||||
int cgx_get_lmac_cnt(void *cgxd);
|
int cgx_get_lmac_cnt(void *cgxd);
|
||||||
void *cgx_get_pdata(int cgx_id);
|
void *cgx_get_pdata(int cgx_id);
|
||||||
int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
|
int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
|
||||||
|
|
|
@ -226,7 +226,7 @@ struct rvu {
|
||||||
/* CGX */
|
/* CGX */
|
||||||
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
|
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
|
||||||
u8 cgx_mapped_pfs;
|
u8 cgx_mapped_pfs;
|
||||||
u8 cgx_cnt; /* available cgx ports */
|
u8 cgx_cnt_max; /* CGX port count max */
|
||||||
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
|
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
|
||||||
u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
|
u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
|
||||||
* every cgx lmac port
|
* every cgx lmac port
|
||||||
|
|
|
@ -52,7 +52,7 @@ static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
|
||||||
|
|
||||||
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
|
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
|
||||||
{
|
{
|
||||||
if (cgx_id >= rvu->cgx_cnt)
|
if (cgx_id >= rvu->cgx_cnt_max)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
return rvu->cgx_idmap[cgx_id];
|
return rvu->cgx_idmap[cgx_id];
|
||||||
|
@ -61,38 +61,40 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
|
||||||
static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
|
static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
|
||||||
{
|
{
|
||||||
struct npc_pkind *pkind = &rvu->hw->pkind;
|
struct npc_pkind *pkind = &rvu->hw->pkind;
|
||||||
int cgx_cnt = rvu->cgx_cnt;
|
int cgx_cnt_max = rvu->cgx_cnt_max;
|
||||||
int cgx, lmac_cnt, lmac;
|
int cgx, lmac_cnt, lmac;
|
||||||
int pf = PF_CGXMAP_BASE;
|
int pf = PF_CGXMAP_BASE;
|
||||||
int size, free_pkind;
|
int size, free_pkind;
|
||||||
|
|
||||||
if (!cgx_cnt)
|
if (!cgx_cnt_max)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF)
|
if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Alloc map table
|
/* Alloc map table
|
||||||
* An additional entry is required since PF id starts from 1 and
|
* An additional entry is required since PF id starts from 1 and
|
||||||
* hence entry at offset 0 is invalid.
|
* hence entry at offset 0 is invalid.
|
||||||
*/
|
*/
|
||||||
size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
|
size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
|
||||||
rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL);
|
rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
|
||||||
if (!rvu->pf2cgxlmac_map)
|
if (!rvu->pf2cgxlmac_map)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* Initialize offset 0 with an invalid cgx and lmac id */
|
/* Initialize all entries with an invalid cgx and lmac id */
|
||||||
rvu->pf2cgxlmac_map[0] = 0xFF;
|
memset(rvu->pf2cgxlmac_map, 0xFF, size);
|
||||||
|
|
||||||
/* Reverse map table */
|
/* Reverse map table */
|
||||||
rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
|
rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
|
||||||
cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16),
|
cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!rvu->cgxlmac2pf_map)
|
if (!rvu->cgxlmac2pf_map)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
rvu->cgx_mapped_pfs = 0;
|
rvu->cgx_mapped_pfs = 0;
|
||||||
for (cgx = 0; cgx < cgx_cnt; cgx++) {
|
for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
|
||||||
|
if (!rvu_cgx_pdata(cgx, rvu))
|
||||||
|
continue;
|
||||||
lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
|
lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
|
||||||
for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
|
for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
|
||||||
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
|
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
|
||||||
|
@ -234,7 +236,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
|
||||||
cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
|
cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
|
||||||
cb.data = rvu;
|
cb.data = rvu;
|
||||||
|
|
||||||
for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
|
for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
|
||||||
cgxd = rvu_cgx_pdata(cgx, rvu);
|
cgxd = rvu_cgx_pdata(cgx, rvu);
|
||||||
for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
|
for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
|
||||||
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
|
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
|
||||||
|
@ -261,20 +263,22 @@ int rvu_cgx_init(struct rvu *rvu)
|
||||||
{
|
{
|
||||||
int cgx, err;
|
int cgx, err;
|
||||||
|
|
||||||
/* find available cgx ports */
|
/* CGX port id starts from 0 and are not necessarily contiguous
|
||||||
rvu->cgx_cnt = cgx_get_cgx_cnt();
|
* Hence we allocate resources based on the maximum port id value.
|
||||||
if (!rvu->cgx_cnt) {
|
*/
|
||||||
|
rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
|
||||||
|
if (!rvu->cgx_cnt_max) {
|
||||||
dev_info(rvu->dev, "No CGX devices found!\n");
|
dev_info(rvu->dev, "No CGX devices found!\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *),
|
rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
|
||||||
GFP_KERNEL);
|
sizeof(void *), GFP_KERNEL);
|
||||||
if (!rvu->cgx_idmap)
|
if (!rvu->cgx_idmap)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* Initialize the cgxdata table */
|
/* Initialize the cgxdata table */
|
||||||
for (cgx = 0; cgx < rvu->cgx_cnt; cgx++)
|
for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
|
||||||
rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
|
rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
|
||||||
|
|
||||||
/* Map CGX LMAC interfaces to RVU PFs */
|
/* Map CGX LMAC interfaces to RVU PFs */
|
||||||
|
|
|
@ -2107,8 +2107,10 @@ static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
|
||||||
|
|
||||||
status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
|
status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
|
||||||
/* Check if CGX devices are ready */
|
/* Check if CGX devices are ready */
|
||||||
for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
|
for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
|
||||||
if (status & (BIT_ULL(16 + idx)))
|
/* Skip when cgx port is not available */
|
||||||
|
if (!rvu_cgx_pdata(idx, rvu) ||
|
||||||
|
(status & (BIT_ULL(16 + idx))))
|
||||||
continue;
|
continue;
|
||||||
dev_err(rvu->dev,
|
dev_err(rvu->dev,
|
||||||
"CGX%d didn't respond to NIX X2P calibration\n", idx);
|
"CGX%d didn't respond to NIX X2P calibration\n", idx);
|
||||||
|
|
Loading…
Reference in New Issue