2013-01-01 13:22:31 +08:00
|
|
|
/* bnx2x_sriov.c: Broadcom Everest network driver.
|
|
|
|
*
|
|
|
|
* Copyright 2009-2012 Broadcom Corporation
|
|
|
|
*
|
|
|
|
* Unless you and Broadcom execute a separate written software license
|
|
|
|
* agreement governing use of this software, this software is licensed to you
|
|
|
|
* under the terms of the GNU General Public License version 2, available
|
|
|
|
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
|
|
|
|
*
|
|
|
|
* Notwithstanding the above, under no circumstances may you combine this
|
|
|
|
* software in any way with any other Broadcom software provided under a
|
|
|
|
* license other than the GPL, without Broadcom's express prior written
|
|
|
|
* consent.
|
|
|
|
*
|
|
|
|
* Maintained by: Eilon Greenstein <eilong@broadcom.com>
|
|
|
|
* Written by: Shmulik Ravid <shmulikr@broadcom.com>
|
|
|
|
* Ariel Elior <ariele@broadcom.com>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
#include "bnx2x.h"
|
|
|
|
#include "bnx2x_init.h"
|
2013-01-01 13:22:32 +08:00
|
|
|
#include "bnx2x_cmn.h"
|
2013-01-01 13:22:31 +08:00
|
|
|
#include "bnx2x_sriov.h"
|
2013-01-01 13:22:32 +08:00
|
|
|
|
|
|
|
/* General service functions */
|
|
|
|
static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
|
|
|
|
u16 pf_id)
|
|
|
|
{
|
|
|
|
REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
|
|
|
|
pf_id);
|
|
|
|
REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
|
|
|
|
pf_id);
|
|
|
|
REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
|
|
|
|
pf_id);
|
|
|
|
REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
|
|
|
|
pf_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
|
|
|
|
u8 enable)
|
|
|
|
{
|
|
|
|
REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
|
|
|
|
enable);
|
|
|
|
REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
|
|
|
|
enable);
|
|
|
|
REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
|
|
|
|
enable);
|
|
|
|
REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
|
|
|
|
enable);
|
|
|
|
}
|
|
|
|
|
2013-01-01 13:22:31 +08:00
|
|
|
int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
|
|
|
|
{
|
|
|
|
int idx;
|
|
|
|
|
|
|
|
for_each_vf(bp, idx)
|
|
|
|
if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
|
|
|
|
break;
|
|
|
|
return idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
static
|
|
|
|
struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
|
|
|
|
{
|
|
|
|
u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
|
|
|
|
return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bnx2x_ari_enabled(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
return dev->bus->self && dev->bus->self->ari_enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
|
|
|
|
{
|
|
|
|
struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
|
|
|
|
if (vf) {
|
|
|
|
if (!vf_sb_count(vf))
|
|
|
|
vf->igu_base_id = igu_sb_id;
|
|
|
|
++vf_sb_count(vf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
|
|
|
|
{
|
|
|
|
int sb_id;
|
|
|
|
u32 val;
|
|
|
|
u8 fid;
|
|
|
|
|
|
|
|
/* IGU in normal mode - read CAM */
|
|
|
|
for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
|
|
|
|
val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
|
|
|
|
if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
|
|
|
|
continue;
|
|
|
|
fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
|
|
|
|
if (!(fid & IGU_FID_ENCODE_IS_PF))
|
|
|
|
bnx2x_vf_set_igu_info(bp, sb_id,
|
|
|
|
(fid & IGU_FID_VF_NUM_MASK));
|
|
|
|
|
|
|
|
DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
|
|
|
|
((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
|
|
|
|
((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
|
|
|
|
(fid & IGU_FID_VF_NUM_MASK)), sb_id,
|
|
|
|
GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
|
|
|
|
{
|
|
|
|
if (bp->vfdb) {
|
|
|
|
kfree(bp->vfdb->vfqs);
|
|
|
|
kfree(bp->vfdb->vfs);
|
|
|
|
kfree(bp->vfdb);
|
|
|
|
}
|
|
|
|
bp->vfdb = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
|
|
|
|
{
|
|
|
|
int pos;
|
|
|
|
struct pci_dev *dev = bp->pdev;
|
|
|
|
|
|
|
|
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
|
|
|
|
if (!pos) {
|
|
|
|
BNX2X_ERR("failed to find SRIOV capability in device\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
iov->pos = pos;
|
|
|
|
DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
|
|
|
|
pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
|
|
|
|
pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
|
|
|
|
pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
|
|
|
|
pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
|
|
|
|
pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
|
|
|
|
pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
|
|
|
|
pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
|
|
|
|
pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
/* read the SRIOV capability structure
|
|
|
|
* The fields can be read via configuration read or
|
|
|
|
* directly from the device (starting at offset PCICFG_OFFSET)
|
|
|
|
*/
|
|
|
|
if (bnx2x_sriov_pci_cfg_info(bp, iov))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
/* get the number of SRIOV bars */
|
|
|
|
iov->nres = 0;
|
|
|
|
|
|
|
|
/* read the first_vfid */
|
|
|
|
val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
|
|
|
|
iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
|
|
|
|
* 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
|
|
|
|
|
|
|
|
DP(BNX2X_MSG_IOV,
|
|
|
|
"IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
|
|
|
|
BP_FUNC(bp),
|
|
|
|
iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
|
|
|
|
iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
u8 queue_count = 0;
|
|
|
|
|
|
|
|
if (IS_SRIOV(bp))
|
|
|
|
for_each_vf(bp, i)
|
|
|
|
queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
|
|
|
|
|
|
|
|
return queue_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* must be called after PF bars are mapped */
|
|
|
|
int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
|
|
|
|
int num_vfs_param)
|
|
|
|
{
|
|
|
|
int err, i, qcount;
|
|
|
|
struct bnx2x_sriov *iov;
|
|
|
|
struct pci_dev *dev = bp->pdev;
|
|
|
|
|
|
|
|
bp->vfdb = NULL;
|
|
|
|
|
|
|
|
/* verify sriov capability is present in configuration space */
|
|
|
|
if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) {
|
|
|
|
DP(BNX2X_MSG_IOV, "no sriov - capability not found\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* verify is pf */
|
|
|
|
if (IS_VF(bp))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* verify chip revision */
|
|
|
|
if (CHIP_IS_E1x(bp))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* check if SRIOV support is turned off */
|
|
|
|
if (!num_vfs_param)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
|
|
|
|
if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
|
|
|
|
BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
|
|
|
|
BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* SRIOV can be enabled only with MSIX */
|
|
|
|
if (int_mode_param == BNX2X_INT_MODE_MSI ||
|
|
|
|
int_mode_param == BNX2X_INT_MODE_INTX) {
|
|
|
|
BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* verify ari is enabled */
|
|
|
|
if (!bnx2x_ari_enabled(bp->pdev)) {
|
|
|
|
BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* verify igu is in normal mode */
|
|
|
|
if (CHIP_INT_MODE_IS_BC(bp)) {
|
|
|
|
BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocate the vfs database */
|
|
|
|
bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
|
|
|
|
if (!bp->vfdb) {
|
|
|
|
BNX2X_ERR("failed to allocate vf database\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto failed;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get the sriov info - Linux already collected all the pertinent
|
|
|
|
* information, however the sriov structure is for the private use
|
|
|
|
* of the pci module. Also we want this information regardless
|
|
|
|
* of the hyper-visor.
|
|
|
|
*/
|
|
|
|
iov = &(bp->vfdb->sriov);
|
|
|
|
err = bnx2x_sriov_info(bp, iov);
|
|
|
|
if (err)
|
|
|
|
goto failed;
|
|
|
|
|
|
|
|
/* SR-IOV capability was enabled but there are no VFs*/
|
|
|
|
if (iov->total == 0)
|
|
|
|
goto failed;
|
|
|
|
|
|
|
|
/* calcuate the actual number of VFs */
|
|
|
|
iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
|
|
|
|
|
|
|
|
/* allcate the vf array */
|
|
|
|
bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
|
|
|
|
BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
|
|
|
|
if (!bp->vfdb->vfs) {
|
|
|
|
BNX2X_ERR("failed to allocate vf array\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto failed;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initial VF init - index and abs_vfid - nr_virtfn must be set */
|
|
|
|
for_each_vf(bp, i) {
|
|
|
|
bnx2x_vf(bp, i, index) = i;
|
|
|
|
bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
|
|
|
|
bnx2x_vf(bp, i, state) = VF_FREE;
|
|
|
|
INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
|
|
|
|
mutex_init(&bnx2x_vf(bp, i, op_mutex));
|
|
|
|
bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
|
|
|
|
bnx2x_get_vf_igu_cam_info(bp);
|
|
|
|
|
|
|
|
/* get the total queue count and allocate the global queue arrays */
|
|
|
|
qcount = bnx2x_iov_get_max_queue_count(bp);
|
|
|
|
|
|
|
|
/* allocate the queue arrays for all VFs */
|
|
|
|
bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!bp->vfdb->vfqs) {
|
|
|
|
BNX2X_ERR("failed to allocate vf queue array\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto failed;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
failed:
|
|
|
|
DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
|
|
|
|
__bnx2x_iov_free_vfdb(bp);
|
|
|
|
return err;
|
|
|
|
}
|
2013-01-01 13:22:32 +08:00
|
|
|
/* VF enable primitives
|
|
|
|
* when pretend is required the caller is responsible
|
|
|
|
* for calling pretend prior to calling these routines
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* called only on E1H or E2.
|
|
|
|
* When pretending to be PF, the pretend value is the function number 0...7
|
|
|
|
* When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
|
|
|
|
* combination
|
|
|
|
*/
|
|
|
|
int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
|
|
|
|
{
|
|
|
|
u32 pretend_reg;
|
|
|
|
|
|
|
|
if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* get my own pretend register */
|
|
|
|
pretend_reg = bnx2x_get_pretend_reg(bp);
|
|
|
|
REG_WR(bp, pretend_reg, pretend_func_val);
|
|
|
|
REG_RD(bp, pretend_reg);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* internal vf enable - until vf is enabled internally all transactions
|
|
|
|
* are blocked. this routine should always be called last with pretend.
|
|
|
|
*/
|
|
|
|
static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
|
|
|
|
{
|
|
|
|
REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* clears vf error in all semi blocks */
|
|
|
|
static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
|
|
|
|
{
|
|
|
|
REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
|
|
|
|
REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
|
|
|
|
REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
|
|
|
|
REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
|
|
|
|
{
|
|
|
|
u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
|
|
|
|
u32 was_err_reg = 0;
|
|
|
|
|
|
|
|
switch (was_err_group) {
|
|
|
|
case 0:
|
|
|
|
was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
|
|
|
|
}
|
|
|
|
|
|
|
|
void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
|
|
|
|
{
|
|
|
|
/* set the VF-PF association in the FW */
|
|
|
|
storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
|
|
|
|
storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
|
|
|
|
|
|
|
|
/* clear vf errors*/
|
|
|
|
bnx2x_vf_semi_clear_err(bp, abs_vfid);
|
|
|
|
bnx2x_vf_pglue_clear_err(bp, abs_vfid);
|
|
|
|
|
|
|
|
/* internal vf-enable - pretend */
|
|
|
|
bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
|
|
|
|
DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
|
|
|
|
bnx2x_vf_enable_internal(bp, true);
|
|
|
|
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
|
|
|
|
}
|
|
|
|
|
|
|
|
static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
|
|
|
|
{
|
|
|
|
struct pci_dev *dev;
|
|
|
|
struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
|
|
|
|
|
|
|
|
if (!vf)
|
|
|
|
goto unknown_dev;
|
|
|
|
|
|
|
|
dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
|
|
|
|
if (dev)
|
|
|
|
return bnx2x_is_pcie_pending(dev);
|
|
|
|
|
|
|
|
unknown_dev:
|
|
|
|
BNX2X_ERR("Unknown device\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
|
|
|
|
{
|
|
|
|
/* Wait 100ms */
|
|
|
|
msleep(100);
|
|
|
|
|
|
|
|
/* Verify no pending pci transactions */
|
|
|
|
if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
|
|
|
|
BNX2X_ERR("PCIE Transactions still pending\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* must be called after the number of PF queues and the number of VFs are
|
|
|
|
* both known
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
|
|
|
|
{
|
|
|
|
u16 vlan_count = 0;
|
|
|
|
|
|
|
|
/* will be set only during VF-ACQUIRE */
|
|
|
|
resc->num_rxqs = 0;
|
|
|
|
resc->num_txqs = 0;
|
|
|
|
|
|
|
|
/* no credit calculcis for macs (just yet) */
|
|
|
|
resc->num_mac_filters = 1;
|
|
|
|
|
|
|
|
/* divvy up vlan rules */
|
|
|
|
vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
|
|
|
|
vlan_count = 1 << ilog2(vlan_count);
|
|
|
|
resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
|
|
|
|
|
|
|
|
/* no real limitation */
|
|
|
|
resc->num_mc_filters = 0;
|
|
|
|
|
|
|
|
/* num_sbs already set */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* IOV global initialization routines */
|
|
|
|
void bnx2x_iov_init_dq(struct bnx2x *bp)
|
|
|
|
{
|
|
|
|
if (!IS_SRIOV(bp))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Set the DQ such that the CID reflect the abs_vfid */
|
|
|
|
REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
|
|
|
|
REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
|
|
|
|
|
|
|
|
/* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
|
|
|
|
* the PF L2 queues
|
|
|
|
*/
|
|
|
|
REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
|
|
|
|
|
|
|
|
/* The VF window size is the log2 of the max number of CIDs per VF */
|
|
|
|
REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
|
|
|
|
|
|
|
|
/* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
|
|
|
|
* the Pf doorbell size although the 2 are independent.
|
|
|
|
*/
|
|
|
|
REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
|
|
|
|
BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
|
|
|
|
|
|
|
|
/* No security checks for now -
|
|
|
|
* configure single rule (out of 16) mask = 0x1, value = 0x0,
|
|
|
|
* CID range 0 - 0x1ffff
|
|
|
|
*/
|
|
|
|
REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
|
|
|
|
REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
|
|
|
|
REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
|
|
|
|
REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
|
|
|
|
|
|
|
|
/* set the number of VF alllowed doorbells to the full DQ range */
|
|
|
|
REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
|
|
|
|
|
|
|
|
/* set the VF doorbell threshold */
|
|
|
|
REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
void bnx2x_iov_init_dmae(struct bnx2x *bp)
|
|
|
|
{
|
|
|
|
DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
|
|
|
|
if (!IS_SRIOV(bp))
|
|
|
|
return;
|
|
|
|
|
|
|
|
REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
|
|
|
|
{
|
|
|
|
struct pci_dev *dev = bp->pdev;
|
|
|
|
struct bnx2x_sriov *iov = &bp->vfdb->sriov;
|
|
|
|
|
|
|
|
return dev->bus->number + ((dev->devfn + iov->offset +
|
|
|
|
iov->stride * vfid) >> 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
|
|
|
|
{
|
|
|
|
struct pci_dev *dev = bp->pdev;
|
|
|
|
struct bnx2x_sriov *iov = &bp->vfdb->sriov;
|
|
|
|
|
|
|
|
return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
|
{
|
|
|
|
int i, n;
|
|
|
|
struct pci_dev *dev = bp->pdev;
|
|
|
|
struct bnx2x_sriov *iov = &bp->vfdb->sriov;
|
|
|
|
|
|
|
|
for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
|
|
|
|
u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
|
|
|
|
u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
|
|
|
|
|
|
|
|
do_div(size, iov->total);
|
|
|
|
vf->bars[n].bar = start + size * vf->abs_vfid;
|
|
|
|
vf->bars[n].size = size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void bnx2x_iov_free_mem(struct bnx2x *bp)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!IS_SRIOV(bp))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* free vfs hw contexts */
|
|
|
|
for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
|
|
|
|
struct hw_dma *cxt = &bp->vfdb->context[i];
|
|
|
|
BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
|
|
|
|
BP_VFDB(bp)->sp_dma.mapping,
|
|
|
|
BP_VFDB(bp)->sp_dma.size);
|
|
|
|
|
|
|
|
BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
|
|
|
|
BP_VF_MBX_DMA(bp)->mapping,
|
|
|
|
BP_VF_MBX_DMA(bp)->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
int bnx2x_iov_alloc_mem(struct bnx2x *bp)
|
|
|
|
{
|
|
|
|
size_t tot_size;
|
|
|
|
int i, rc = 0;
|
|
|
|
|
|
|
|
if (!IS_SRIOV(bp))
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
/* allocate vfs hw contexts */
|
|
|
|
tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
|
|
|
|
BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
|
|
|
|
|
|
|
|
for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
|
|
|
|
struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
|
|
|
|
cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
|
|
|
|
|
|
|
|
if (cxt->size) {
|
|
|
|
BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
|
|
|
|
} else {
|
|
|
|
cxt->addr = NULL;
|
|
|
|
cxt->mapping = 0;
|
|
|
|
}
|
|
|
|
tot_size -= cxt->size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocate vfs ramrods dma memory - client_init and set_mac */
|
|
|
|
tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
|
|
|
|
BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
|
|
|
|
tot_size);
|
|
|
|
BP_VFDB(bp)->sp_dma.size = tot_size;
|
|
|
|
|
|
|
|
/* allocate mailboxes */
|
|
|
|
tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
|
|
|
|
BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
|
|
|
|
tot_size);
|
|
|
|
BP_VF_MBX_DMA(bp)->size = tot_size;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
alloc_mem_err:
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* called by bnx2x_nic_load */
|
|
|
|
int bnx2x_iov_nic_init(struct bnx2x *bp)
|
|
|
|
{
|
|
|
|
int vfid, qcount, i;
|
|
|
|
|
|
|
|
if (!IS_SRIOV(bp)) {
|
|
|
|
DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
|
|
|
|
|
|
|
|
/* initialize vf database */
|
|
|
|
for_each_vf(bp, vfid) {
|
|
|
|
struct bnx2x_virtf *vf = BP_VF(bp, vfid);
|
|
|
|
|
|
|
|
int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
|
|
|
|
BNX2X_CIDS_PER_VF;
|
|
|
|
|
|
|
|
union cdu_context *base_cxt = (union cdu_context *)
|
|
|
|
BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
|
|
|
|
(base_vf_cid & (ILT_PAGE_CIDS-1));
|
|
|
|
|
|
|
|
DP(BNX2X_MSG_IOV,
|
|
|
|
"VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
|
|
|
|
vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
|
|
|
|
BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
|
|
|
|
|
|
|
|
/* init statically provisioned resources */
|
|
|
|
bnx2x_iov_static_resc(bp, &vf->alloc_resc);
|
|
|
|
|
|
|
|
/* queues are initialized during VF-ACQUIRE */
|
|
|
|
|
|
|
|
/* reserve the vf vlan credit */
|
|
|
|
bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
|
|
|
|
|
|
|
|
vf->filter_state = 0;
|
|
|
|
vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
|
|
|
|
|
|
|
|
/* init mcast object - This object will be re-initialized
|
|
|
|
* during VF-ACQUIRE with the proper cl_id and cid.
|
|
|
|
* It needs to be initialized here so that it can be safely
|
|
|
|
* handled by a subsequent FLR flow.
|
|
|
|
*/
|
|
|
|
bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
|
|
|
|
0xFF, 0xFF, 0xFF,
|
|
|
|
bnx2x_vf_sp(bp, vf, mcast_rdata),
|
|
|
|
bnx2x_vf_sp_map(bp, vf, mcast_rdata),
|
|
|
|
BNX2X_FILTER_MCAST_PENDING,
|
|
|
|
&vf->filter_state,
|
|
|
|
BNX2X_OBJ_TYPE_RX_TX);
|
|
|
|
|
|
|
|
/* set the mailbox message addresses */
|
|
|
|
BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
|
|
|
|
(((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
|
|
|
|
MBX_MSG_ALIGNED_SIZE);
|
|
|
|
|
|
|
|
BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
|
|
|
|
vfid * MBX_MSG_ALIGNED_SIZE;
|
|
|
|
|
|
|
|
/* Enable vf mailbox */
|
|
|
|
bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Final VF init */
|
|
|
|
qcount = 0;
|
|
|
|
for_each_vf(bp, i) {
|
|
|
|
struct bnx2x_virtf *vf = BP_VF(bp, i);
|
|
|
|
|
|
|
|
/* fill in the BDF and bars */
|
|
|
|
vf->bus = bnx2x_vf_bus(bp, i);
|
|
|
|
vf->devfn = bnx2x_vf_devfn(bp, i);
|
|
|
|
bnx2x_vf_set_bars(bp, vf);
|
|
|
|
|
|
|
|
DP(BNX2X_MSG_IOV,
|
|
|
|
"VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
|
|
|
|
vf->abs_vfid, vf->bus, vf->devfn,
|
|
|
|
(unsigned)vf->bars[0].bar, vf->bars[0].size,
|
|
|
|
(unsigned)vf->bars[1].bar, vf->bars[1].size,
|
|
|
|
(unsigned)vf->bars[2].bar, vf->bars[2].size);
|
|
|
|
|
|
|
|
/* set local queue arrays */
|
|
|
|
vf->vfqs = &bp->vfdb->vfqs[qcount];
|
|
|
|
qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2013-01-01 13:22:31 +08:00
|
|
|
|
|
|
|
/* called by bnx2x_init_hw_func, returns the next ilt line */
|
|
|
|
int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct bnx2x_ilt *ilt = BP_ILT(bp);
|
|
|
|
|
|
|
|
if (!IS_SRIOV(bp))
|
|
|
|
return line;
|
|
|
|
|
|
|
|
/* set vfs ilt lines */
|
|
|
|
for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
|
|
|
|
struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
|
|
|
|
|
|
|
|
ilt->lines[line+i].page = hw_cxt->addr;
|
|
|
|
ilt->lines[line+i].page_mapping = hw_cxt->mapping;
|
|
|
|
ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
|
|
|
|
}
|
|
|
|
return line + i;
|
|
|
|
}
|
|
|
|
|
|
|
|
void bnx2x_iov_remove_one(struct bnx2x *bp)
|
|
|
|
{
|
|
|
|
/* if SRIOV is not enabled there's nothing to do */
|
|
|
|
if (!IS_SRIOV(bp))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* free vf database */
|
|
|
|
__bnx2x_iov_free_vfdb(bp);
|
|
|
|
}
|