Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-02-22

This series contains updates to the ice driver only.

Bruce adds the __always_unused attribute to a parameter to avoid
compiler warnings when using -Wunused-parameter.  Fixed unnecessary
type-casting and the use of sizeof().  Fix the allocation of structs
that have become memory hogs, so allocate them in heaps and fix all the
associated references.  Fixed the "possible" numeric overflow issues
that were caught with static analysis.

Maciej fixes the maximum MTU calculation by taking into account double
VLAN tagging amd ensure that the operations are done in the correct
order.

Victor fixes the supported node calculation, where we were not taking
into account if there is space to add the new VSI or intermediate node
above that layer, then it is not required to continue the calculation.
Added a check for a leaf node presence for a given VSI, which is needed
before removing a VSI.

Jake fixes an issue where the VSI list is shared, so simply removing a
VSI from the list will cause issues for the other users who reference
the list.  Since we also free the memory, this could lead to
segmentation faults.

Brett fixes an issue where driver unload could cause a system reboot
when intel_iommu=on parameter is set.  The issue is that we are not
clearing the CAUSE_ENA bit for the appropriate control queues register
when freeing the miscellaneous interrupt vector.

Mitch is so kind, he prevented spamming the VF with link messages when
the link status really has not changed.  Updates the driver to use the
absolute vector ID and not the per-PF vector ID for the VF MSIx vector
allocation.

Lukasz fixes the ethtool pause parameter for the ice driver, which was
originally based off the link status but is now based off the PHY
configuration.  This is to resolve an issue where pause parameters could
be set while link was down.

Jesse updates the string that reports statistics so the string does not
get modified at runtime and cause reports of string truncation.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-02-25 14:14:24 -08:00
commit a0392abe57
12 changed files with 364 additions and 199 deletions

View File

@ -83,7 +83,7 @@ extern const char ice_drv_ver[];
#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \ #define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \
ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)))
#define ICE_UP_TABLE_TRANSLATE(val, i) \ #define ICE_UP_TABLE_TRANSLATE(val, i) \
(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \ (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \

View File

@ -2450,6 +2450,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
{ {
struct ice_aqc_dis_txqs *cmd; struct ice_aqc_dis_txqs *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status;
u16 i, sz = 0; u16 i, sz = 0;
cmd = &desc.params.dis_txqs; cmd = &desc.params.dis_txqs;
@ -2485,6 +2486,8 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
break; break;
} }
/* flush pipe on time out */
cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
/* If no queue group info, we are in a reset flow. Issue the AQ */ /* If no queue group info, we are in a reset flow. Issue the AQ */
if (!qg_list) if (!qg_list)
goto do_aq; goto do_aq;
@ -2510,7 +2513,17 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
do_aq: do_aq:
return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
if (status) {
if (!qg_list)
ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
vmvf_num, hw->adminq.sq_last_status);
else
ice_debug(hw, ICE_DBG_SCHED, "disable Q %d failed %d\n",
le16_to_cpu(qg_list[0].q_id[0]),
hw->adminq.sq_last_status);
}
return status;
} }
/* End of FW Admin Queue command wrappers */ /* End of FW Admin Queue command wrappers */
@ -2796,8 +2809,12 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
/* add the lan q */ /* add the lan q */
status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
if (status) if (status) {
ice_debug(hw, ICE_DBG_SCHED, "enable Q %d failed %d\n",
le16_to_cpu(buf->txqs[0].txq_id),
hw->adminq.sq_last_status);
goto ena_txq_exit; goto ena_txq_exit;
}
node.node_teid = buf->txqs[0].q_teid; node.node_teid = buf->txqs[0].q_teid;
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;

View File

@ -63,45 +63,45 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
* is queried on the base PF netdev. * is queried on the base PF netdev.
*/ */
static const struct ice_stats ice_gstrings_pf_stats[] = { static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes), ICE_PF_STAT("port.tx_bytes", stats.eth.tx_bytes),
ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes), ICE_PF_STAT("port.rx_bytes", stats.eth.rx_bytes),
ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast), ICE_PF_STAT("port.tx_unicast", stats.eth.tx_unicast),
ICE_PF_STAT("rx_unicast", stats.eth.rx_unicast), ICE_PF_STAT("port.rx_unicast", stats.eth.rx_unicast),
ICE_PF_STAT("tx_multicast", stats.eth.tx_multicast), ICE_PF_STAT("port.tx_multicast", stats.eth.tx_multicast),
ICE_PF_STAT("rx_multicast", stats.eth.rx_multicast), ICE_PF_STAT("port.rx_multicast", stats.eth.rx_multicast),
ICE_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), ICE_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast),
ICE_PF_STAT("rx_broadcast", stats.eth.rx_broadcast), ICE_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast),
ICE_PF_STAT("tx_errors", stats.eth.tx_errors), ICE_PF_STAT("port.tx_errors", stats.eth.tx_errors),
ICE_PF_STAT("tx_size_64", stats.tx_size_64), ICE_PF_STAT("port.tx_size_64", stats.tx_size_64),
ICE_PF_STAT("rx_size_64", stats.rx_size_64), ICE_PF_STAT("port.rx_size_64", stats.rx_size_64),
ICE_PF_STAT("tx_size_127", stats.tx_size_127), ICE_PF_STAT("port.tx_size_127", stats.tx_size_127),
ICE_PF_STAT("rx_size_127", stats.rx_size_127), ICE_PF_STAT("port.rx_size_127", stats.rx_size_127),
ICE_PF_STAT("tx_size_255", stats.tx_size_255), ICE_PF_STAT("port.tx_size_255", stats.tx_size_255),
ICE_PF_STAT("rx_size_255", stats.rx_size_255), ICE_PF_STAT("port.rx_size_255", stats.rx_size_255),
ICE_PF_STAT("tx_size_511", stats.tx_size_511), ICE_PF_STAT("port.tx_size_511", stats.tx_size_511),
ICE_PF_STAT("rx_size_511", stats.rx_size_511), ICE_PF_STAT("port.rx_size_511", stats.rx_size_511),
ICE_PF_STAT("tx_size_1023", stats.tx_size_1023), ICE_PF_STAT("port.tx_size_1023", stats.tx_size_1023),
ICE_PF_STAT("rx_size_1023", stats.rx_size_1023), ICE_PF_STAT("port.rx_size_1023", stats.rx_size_1023),
ICE_PF_STAT("tx_size_1522", stats.tx_size_1522), ICE_PF_STAT("port.tx_size_1522", stats.tx_size_1522),
ICE_PF_STAT("rx_size_1522", stats.rx_size_1522), ICE_PF_STAT("port.rx_size_1522", stats.rx_size_1522),
ICE_PF_STAT("tx_size_big", stats.tx_size_big), ICE_PF_STAT("port.tx_size_big", stats.tx_size_big),
ICE_PF_STAT("rx_size_big", stats.rx_size_big), ICE_PF_STAT("port.rx_size_big", stats.rx_size_big),
ICE_PF_STAT("link_xon_tx", stats.link_xon_tx), ICE_PF_STAT("port.link_xon_tx", stats.link_xon_tx),
ICE_PF_STAT("link_xon_rx", stats.link_xon_rx), ICE_PF_STAT("port.link_xon_rx", stats.link_xon_rx),
ICE_PF_STAT("link_xoff_tx", stats.link_xoff_tx), ICE_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx),
ICE_PF_STAT("link_xoff_rx", stats.link_xoff_rx), ICE_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx),
ICE_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), ICE_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down),
ICE_PF_STAT("rx_undersize", stats.rx_undersize), ICE_PF_STAT("port.rx_undersize", stats.rx_undersize),
ICE_PF_STAT("rx_fragments", stats.rx_fragments), ICE_PF_STAT("port.rx_fragments", stats.rx_fragments),
ICE_PF_STAT("rx_oversize", stats.rx_oversize), ICE_PF_STAT("port.rx_oversize", stats.rx_oversize),
ICE_PF_STAT("rx_jabber", stats.rx_jabber), ICE_PF_STAT("port.rx_jabber", stats.rx_jabber),
ICE_PF_STAT("rx_csum_bad", hw_csum_rx_error), ICE_PF_STAT("port.rx_csum_bad", hw_csum_rx_error),
ICE_PF_STAT("rx_length_errors", stats.rx_len_errors), ICE_PF_STAT("port.rx_length_errors", stats.rx_len_errors),
ICE_PF_STAT("rx_dropped", stats.eth.rx_discards), ICE_PF_STAT("port.rx_dropped", stats.eth.rx_discards),
ICE_PF_STAT("rx_crc_errors", stats.crc_errors), ICE_PF_STAT("port.rx_crc_errors", stats.crc_errors),
ICE_PF_STAT("illegal_bytes", stats.illegal_bytes), ICE_PF_STAT("port.illegal_bytes", stats.illegal_bytes),
ICE_PF_STAT("mac_local_faults", stats.mac_local_faults), ICE_PF_STAT("port.mac_local_faults", stats.mac_local_faults),
ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults), ICE_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults),
}; };
static const u32 ice_regs_dump_list[] = { static const u32 ice_regs_dump_list[] = {
@ -304,7 +304,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
return; return;
for (i = 0; i < ICE_PF_STATS_LEN; i++) { for (i = 0; i < ICE_PF_STATS_LEN; i++) {
snprintf(p, ETH_GSTRING_LEN, "port.%s", snprintf(p, ETH_GSTRING_LEN, "%s",
ice_gstrings_pf_stats[i].stat_string); ice_gstrings_pf_stats[i].stat_string);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
@ -1084,7 +1084,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
* current PHY type, get what is supported by the NVM and intersect * current PHY type, get what is supported by the NVM and intersect
* them to get what is truly supported * them to get what is truly supported
*/ */
memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings)); memset(&cap_ksettings, 0, sizeof(cap_ksettings));
ice_phy_type_to_ethtool(netdev, &cap_ksettings); ice_phy_type_to_ethtool(netdev, &cap_ksettings);
ethtool_intersect_link_masks(ks, &cap_ksettings); ethtool_intersect_link_masks(ks, &cap_ksettings);
@ -1416,7 +1416,7 @@ ice_set_link_ksettings(struct net_device *netdev,
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* copy the ksettings to copy_ks to avoid modifying the original */ /* copy the ksettings to copy_ks to avoid modifying the original */
memcpy(&copy_ks, ks, sizeof(struct ethtool_link_ksettings)); memcpy(&copy_ks, ks, sizeof(copy_ks));
/* save autoneg out of ksettings */ /* save autoneg out of ksettings */
autoneg = copy_ks.base.autoneg; autoneg = copy_ks.base.autoneg;
@ -1435,7 +1435,7 @@ ice_set_link_ksettings(struct net_device *netdev,
return -EINVAL; return -EINVAL;
/* get our own copy of the bits to check against */ /* get our own copy of the bits to check against */
memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings)); memset(&safe_ks, 0, sizeof(safe_ks));
safe_ks.base.cmd = copy_ks.base.cmd; safe_ks.base.cmd = copy_ks.base.cmd;
safe_ks.base.link_mode_masks_nwords = safe_ks.base.link_mode_masks_nwords =
copy_ks.base.link_mode_masks_nwords; copy_ks.base.link_mode_masks_nwords;
@ -1449,8 +1449,7 @@ ice_set_link_ksettings(struct net_device *netdev,
/* If copy_ks.base and safe_ks.base are not the same now, then they are /* If copy_ks.base and safe_ks.base are not the same now, then they are
* trying to set something that we do not support. * trying to set something that we do not support.
*/ */
if (memcmp(&copy_ks.base, &safe_ks.base, if (memcmp(&copy_ks.base, &safe_ks.base, sizeof(copy_ks.base)))
sizeof(struct ethtool_link_settings)))
return -EOPNOTSUPP; return -EOPNOTSUPP;
while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
@ -1474,7 +1473,7 @@ ice_set_link_ksettings(struct net_device *netdev,
} }
/* Copy abilities to config in case autoneg is not set below */ /* Copy abilities to config in case autoneg is not set below */
memset(&config, 0, sizeof(struct ice_aqc_set_phy_cfg_data)); memset(&config, 0, sizeof(config));
config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE; config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE;
if (abilities->caps & ICE_AQC_PHY_AN_MODE) if (abilities->caps & ICE_AQC_PHY_AN_MODE)
config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
@ -1668,7 +1667,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
vsi->tx_rings[0]->count, new_tx_cnt); vsi->tx_rings[0]->count, new_tx_cnt);
tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
sizeof(struct ice_ring), GFP_KERNEL); sizeof(*tx_rings), GFP_KERNEL);
if (!tx_rings) { if (!tx_rings) {
err = -ENOMEM; err = -ENOMEM;
goto done; goto done;
@ -1700,7 +1699,7 @@ process_rx:
vsi->rx_rings[0]->count, new_rx_cnt); vsi->rx_rings[0]->count, new_rx_cnt);
rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
sizeof(struct ice_ring), GFP_KERNEL); sizeof(*rx_rings), GFP_KERNEL);
if (!rx_rings) { if (!rx_rings) {
err = -ENOMEM; err = -ENOMEM;
goto done; goto done;
@ -1819,21 +1818,36 @@ static void
ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
{ {
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_port_info *pi; struct ice_port_info *pi = np->vsi->port_info;
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_vsi *vsi = np->vsi;
enum ice_status status;
pi = np->vsi->port_info; /* Initialize pause params */
pause->autoneg = pause->rx_pause = 0;
((pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) ? pause->tx_pause = 0;
pcaps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*pcaps),
GFP_KERNEL);
if (!pcaps)
return;
/* Get current phy config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (status)
goto out;
pause->autoneg = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
AUTONEG_ENABLE : AUTONEG_DISABLE); AUTONEG_ENABLE : AUTONEG_DISABLE);
if (pi->fc.current_mode == ICE_FC_RX_PAUSE) { if (pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
pause->rx_pause = 1;
} else if (pi->fc.current_mode == ICE_FC_TX_PAUSE) {
pause->tx_pause = 1; pause->tx_pause = 1;
} else if (pi->fc.current_mode == ICE_FC_FULL) { if (pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
pause->rx_pause = 1; pause->rx_pause = 1;
pause->tx_pause = 1;
} out:
devm_kfree(&vsi->back->pdev->dev, pcaps);
} }
/** /**

View File

@ -30,6 +30,7 @@
#define PF_FW_ATQLEN_ATQVFE_M BIT(28) #define PF_FW_ATQLEN_ATQVFE_M BIT(28)
#define PF_FW_ATQLEN_ATQOVFL_M BIT(29) #define PF_FW_ATQLEN_ATQOVFL_M BIT(29)
#define PF_FW_ATQLEN_ATQCRIT_M BIT(30) #define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
#define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4))
#define PF_FW_ATQLEN_ATQENABLE_M BIT(31) #define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
#define PF_FW_ATQT 0x00080400 #define PF_FW_ATQT 0x00080400
#define PF_MBX_ARQBAH 0x0022E400 #define PF_MBX_ARQBAH 0x0022E400

View File

@ -249,12 +249,12 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
/* allocate memory for both Tx and Rx ring pointers */ /* allocate memory for both Tx and Rx ring pointers */
vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
sizeof(struct ice_ring *), GFP_KERNEL); sizeof(*vsi->tx_rings), GFP_KERNEL);
if (!vsi->tx_rings) if (!vsi->tx_rings)
goto err_txrings; goto err_txrings;
vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
sizeof(struct ice_ring *), GFP_KERNEL); sizeof(*vsi->rx_rings), GFP_KERNEL);
if (!vsi->rx_rings) if (!vsi->rx_rings)
goto err_rxrings; goto err_rxrings;
@ -262,7 +262,7 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
/* allocate memory for q_vector pointers */ /* allocate memory for q_vector pointers */
vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
vsi->num_q_vectors, vsi->num_q_vectors,
sizeof(struct ice_q_vector *), sizeof(*vsi->q_vectors),
GFP_KERNEL); GFP_KERNEL);
if (!vsi->q_vectors) if (!vsi->q_vectors)
goto err_vectors; goto err_vectors;
@ -348,19 +348,25 @@ static int ice_get_free_slot(void *array, int size, int curr)
void ice_vsi_delete(struct ice_vsi *vsi) void ice_vsi_delete(struct ice_vsi *vsi)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx ctxt; struct ice_vsi_ctx *ctxt;
enum ice_status status; enum ice_status status;
ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return;
if (vsi->type == ICE_VSI_VF) if (vsi->type == ICE_VSI_VF)
ctxt.vf_num = vsi->vf_id; ctxt->vf_num = vsi->vf_id;
ctxt.vsi_num = vsi->vsi_num; ctxt->vsi_num = vsi->vsi_num;
memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props)); memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL); status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
if (status) if (status)
dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
vsi->vsi_num); vsi->vsi_num);
devm_kfree(&pf->pdev->dev, ctxt);
} }
/** /**
@ -908,37 +914,41 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
*/ */
static int ice_vsi_init(struct ice_vsi *vsi) static int ice_vsi_init(struct ice_vsi *vsi)
{ {
struct ice_vsi_ctx ctxt = { 0 };
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
struct ice_vsi_ctx *ctxt;
int ret = 0; int ret = 0;
ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
switch (vsi->type) { switch (vsi->type) {
case ICE_VSI_PF: case ICE_VSI_PF:
ctxt.flags = ICE_AQ_VSI_TYPE_PF; ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break; break;
case ICE_VSI_VF: case ICE_VSI_VF:
ctxt.flags = ICE_AQ_VSI_TYPE_VF; ctxt->flags = ICE_AQ_VSI_TYPE_VF;
/* VF number here is the absolute VF number (0-255) */ /* VF number here is the absolute VF number (0-255) */
ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
break; break;
default: default:
return -ENODEV; return -ENODEV;
} }
ice_set_dflt_vsi_ctx(&ctxt); ice_set_dflt_vsi_ctx(ctxt);
/* if the switch is in VEB mode, allow VSI loopback */ /* if the switch is in VEB mode, allow VSI loopback */
if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
/* Set LUT type and HASH type if RSS is enabled */ /* Set LUT type and HASH type if RSS is enabled */
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_set_rss_vsi_ctx(&ctxt, vsi); ice_set_rss_vsi_ctx(ctxt, vsi);
ctxt.info.sw_id = vsi->port_info->sw_id; ctxt->info.sw_id = vsi->port_info->sw_id;
ice_vsi_setup_q_map(vsi, &ctxt); ice_vsi_setup_q_map(vsi, ctxt);
ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL); ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) { if (ret) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"Add VSI failed, err %d\n", ret); "Add VSI failed, err %d\n", ret);
@ -946,11 +956,12 @@ static int ice_vsi_init(struct ice_vsi *vsi)
} }
/* keep context for update VSI operations */ /* keep context for update VSI operations */
vsi->info = ctxt.info; vsi->info = ctxt->info;
/* record VSI number returned */ /* record VSI number returned */
vsi->vsi_num = ctxt.vsi_num; vsi->vsi_num = ctxt->vsi_num;
devm_kfree(&pf->pdev->dev, ctxt);
return ret; return ret;
} }
@ -1620,7 +1631,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
u16 buf_len, i, pf_q; u16 buf_len, i, pf_q;
int err = 0, tc; int err = 0, tc;
buf_len = sizeof(struct ice_aqc_add_tx_qgrp); buf_len = sizeof(*qg_buf);
qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
if (!qg_buf) if (!qg_buf)
return -ENOMEM; return -ENOMEM;
@ -1823,26 +1834,34 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
{ {
struct device *dev = &vsi->back->pdev->dev; struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw; struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx ctxt = { 0 }; struct ice_vsi_ctx *ctxt;
enum ice_status status; enum ice_status status;
int ret = 0;
ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
/* Here we are configuring the VSI to let the driver add VLAN tags by /* Here we are configuring the VSI to let the driver add VLAN tags by
* setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
* insertion happens in the Tx hot path, in ice_tx_map. * insertion happens in the Tx hot path, in ice_tx_map.
*/ */
ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) { if (status) {
dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status); status, hw->adminq.sq_last_status);
return -EIO; ret = -EIO;
goto out;
} }
vsi->info.vlan_flags = ctxt.info.vlan_flags; vsi->info.vlan_flags = ctxt->info.vlan_flags;
return 0; out:
devm_kfree(dev, ctxt);
return ret;
} }
/** /**
@ -1854,35 +1873,42 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
{ {
struct device *dev = &vsi->back->pdev->dev; struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw; struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx ctxt = { 0 }; struct ice_vsi_ctx *ctxt;
enum ice_status status; enum ice_status status;
int ret = 0;
ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
/* Here we are configuring what the VSI should do with the VLAN tag in /* Here we are configuring what the VSI should do with the VLAN tag in
* the Rx packet. We can either leave the tag in the packet or put it in * the Rx packet. We can either leave the tag in the packet or put it in
* the Rx descriptor. * the Rx descriptor.
*/ */
if (ena) { if (ena)
/* Strip VLAN tag from Rx packet and put it in the desc */ /* Strip VLAN tag from Rx packet and put it in the desc */
ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
} else { else
/* Disable stripping. Leave tag in packet */ /* Disable stripping. Leave tag in packet */
ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
}
/* Allow all packets untagged/tagged */ /* Allow all packets untagged/tagged */
ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) { if (status) {
dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n", dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
ena, status, hw->adminq.sq_last_status); ena, status, hw->adminq.sq_last_status);
return -EIO; ret = -EIO;
goto out;
} }
vsi->info.vlan_flags = ctxt.info.vlan_flags; vsi->info.vlan_flags = ctxt->info.vlan_flags;
return 0; out:
devm_kfree(dev, ctxt);
return ret;
} }
/** /**
@ -2492,12 +2518,14 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
*/ */
int ice_vsi_release(struct ice_vsi *vsi) int ice_vsi_release(struct ice_vsi *vsi)
{ {
struct ice_vf *vf = NULL;
struct ice_pf *pf; struct ice_pf *pf;
struct ice_vf *vf;
if (!vsi->back) if (!vsi->back)
return -ENODEV; return -ENODEV;
pf = vsi->back; pf = vsi->back;
if (vsi->type == ICE_VSI_VF)
vf = &pf->vf[vsi->vf_id]; vf = &pf->vf[vsi->vf_id];
/* do not unregister and free netdevs while driver is in the reset /* do not unregister and free netdevs while driver is in the reset
* recovery pending state. Since reset/rebuild happens through PF * recovery pending state. Since reset/rebuild happens through PF

View File

@ -609,6 +609,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
} }
} }
if (!new_link_same_as_old && pf->num_alloc_vfs)
ice_vc_notify_link_state(pf); ice_vc_notify_link_state(pf);
return 0; return 0;
@ -1355,15 +1356,40 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
return ret; return ret;
} }
/**
* ice_dis_ctrlq_interrupts - disable control queue interrupts
* @hw: pointer to HW structure
*/
static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
{
/* disable Admin queue Interrupt causes */
wr32(hw, PFINT_FW_CTL,
rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
/* disable Mailbox queue Interrupt causes */
wr32(hw, PFINT_MBX_CTL,
rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
/* disable Control queue Interrupt causes */
wr32(hw, PFINT_OICR_CTL,
rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
ice_flush(hw);
}
/** /**
* ice_free_irq_msix_misc - Unroll misc vector setup * ice_free_irq_msix_misc - Unroll misc vector setup
* @pf: board private structure * @pf: board private structure
*/ */
static void ice_free_irq_msix_misc(struct ice_pf *pf) static void ice_free_irq_msix_misc(struct ice_pf *pf)
{ {
struct ice_hw *hw = &pf->hw;
ice_dis_ctrlq_interrupts(hw);
/* disable OICR interrupt */ /* disable OICR interrupt */
wr32(&pf->hw, PFINT_OICR_ENA, 0); wr32(hw, PFINT_OICR_ENA, 0);
ice_flush(&pf->hw); ice_flush(hw);
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector); synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector);
@ -1377,6 +1403,32 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID); ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID);
} }
/**
* ice_ena_ctrlq_interrupts - enable control queue interrupts
* @hw: pointer to HW structure
* @v_idx: HW vector index to associate the control queue interrupts with
*/
static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 v_idx)
{
u32 val;
val = ((v_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
PFINT_OICR_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_OICR_CTL, val);
/* enable Admin queue Interrupt causes */
val = ((v_idx & PFINT_FW_CTL_MSIX_INDX_M) |
PFINT_FW_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_FW_CTL, val);
/* enable Mailbox queue Interrupt causes */
val = ((v_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
PFINT_MBX_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_MBX_CTL, val);
ice_flush(hw);
}
/** /**
* ice_req_irq_msix_misc - Setup the misc vector to handle non queue events * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
* @pf: board private structure * @pf: board private structure
@ -1389,7 +1441,6 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
{ {
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
int oicr_idx, err = 0; int oicr_idx, err = 0;
u32 val;
if (!pf->int_name[0]) if (!pf->int_name[0])
snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
@ -1438,20 +1489,7 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
skip_req_irq: skip_req_irq:
ice_ena_misc_vector(pf); ice_ena_misc_vector(pf);
val = ((pf->hw_oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | ice_ena_ctrlq_interrupts(hw, pf->hw_oicr_idx);
PFINT_OICR_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_OICR_CTL, val);
/* This enables Admin queue Interrupt causes */
val = ((pf->hw_oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
PFINT_FW_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_FW_CTL, val);
/* This enables Mailbox queue Interrupt causes */
val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
PFINT_MBX_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_MBX_CTL, val);
wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx), wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx),
ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
@ -1513,8 +1551,8 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
int err; int err;
netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv), netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
vsi->alloc_txq, vsi->alloc_rxq); vsi->alloc_rxq);
if (!netdev) if (!netdev)
return -ENOMEM; return -ENOMEM;
@ -1867,7 +1905,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_left -= pf->num_lan_msix; v_left -= pf->num_lan_msix;
pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
sizeof(struct msix_entry), GFP_KERNEL); sizeof(*pf->msix_entries), GFP_KERNEL);
if (!pf->msix_entries) { if (!pf->msix_entries) {
err = -ENOMEM; err = -ENOMEM;
@ -1955,7 +1993,6 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
static int ice_init_interrupt_scheme(struct ice_pf *pf) static int ice_init_interrupt_scheme(struct ice_pf *pf)
{ {
int vectors = 0, hw_vectors = 0; int vectors = 0, hw_vectors = 0;
ssize_t size;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
vectors = ice_ena_msix_range(pf); vectors = ice_ena_msix_range(pf);
@ -1966,9 +2003,9 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
return vectors; return vectors;
/* set up vector assignment tracking */ /* set up vector assignment tracking */
size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors); pf->sw_irq_tracker =
devm_kzalloc(&pf->pdev->dev, sizeof(*pf->sw_irq_tracker) +
pf->sw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); (sizeof(u16) * vectors), GFP_KERNEL);
if (!pf->sw_irq_tracker) { if (!pf->sw_irq_tracker) {
ice_dis_msix(pf); ice_dis_msix(pf);
return -ENOMEM; return -ENOMEM;
@ -1980,9 +2017,9 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
/* set up HW vector assignment tracking */ /* set up HW vector assignment tracking */
hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
size = sizeof(struct ice_res_tracker) + (sizeof(u16) * hw_vectors); pf->hw_irq_tracker =
devm_kzalloc(&pf->pdev->dev, sizeof(*pf->hw_irq_tracker) +
pf->hw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); (sizeof(u16) * hw_vectors), GFP_KERNEL);
if (!pf->hw_irq_tracker) { if (!pf->hw_irq_tracker) {
ice_clear_interrupt_scheme(pf); ice_clear_interrupt_scheme(pf);
return -ENOMEM; return -ENOMEM;
@ -2116,7 +2153,7 @@ static int ice_probe(struct pci_dev *pdev,
} }
pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi, pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
sizeof(struct ice_vsi *), GFP_KERNEL); sizeof(*pf->vsi), GFP_KERNEL);
if (!pf->vsi) { if (!pf->vsi) {
err = -ENOMEM; err = -ENOMEM;
goto err_init_pf_unroll; goto err_init_pf_unroll;
@ -2148,7 +2185,7 @@ static int ice_probe(struct pci_dev *pdev,
} }
/* create switch struct for the switch element created by FW on boot */ /* create switch struct for the switch element created by FW on boot */
pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw), pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(*pf->first_sw),
GFP_KERNEL); GFP_KERNEL);
if (!pf->first_sw) { if (!pf->first_sw) {
err = -ENOMEM; err = -ENOMEM;
@ -2435,11 +2472,12 @@ static void ice_set_rx_mode(struct net_device *netdev)
* @addr: the MAC address entry being added * @addr: the MAC address entry being added
* @vid: VLAN id * @vid: VLAN id
* @flags: instructions from stack about fdb operation * @flags: instructions from stack about fdb operation
* @extack: netlink extended ack
*/ */
static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], static int
struct net_device *dev, const unsigned char *addr, ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
u16 vid, u16 flags, struct net_device *dev, const unsigned char *addr, u16 vid,
struct netlink_ext_ack *extack) u16 flags, struct netlink_ext_ack __always_unused *extack)
{ {
int err; int err;
@ -3707,30 +3745,39 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
struct device *dev = &vsi->back->pdev->dev; struct device *dev = &vsi->back->pdev->dev;
struct ice_aqc_vsi_props *vsi_props; struct ice_aqc_vsi_props *vsi_props;
struct ice_hw *hw = &vsi->back->hw; struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx ctxt = { 0 }; struct ice_vsi_ctx *ctxt;
enum ice_status status; enum ice_status status;
int ret = 0;
vsi_props = &vsi->info; vsi_props = &vsi->info;
ctxt.info = vsi->info;
ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
ctxt->info = vsi->info;
if (bmode == BRIDGE_MODE_VEB) if (bmode == BRIDGE_MODE_VEB)
/* change from VEPA to VEB mode */ /* change from VEPA to VEB mode */
ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
else else
/* change from VEB to VEPA mode */ /* change from VEB to VEPA mode */
ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) { if (status) {
dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
bmode, status, hw->adminq.sq_last_status); bmode, status, hw->adminq.sq_last_status);
return -EIO; ret = -EIO;
goto out;
} }
/* Update sw flags for book keeping */ /* Update sw flags for book keeping */
vsi_props->sw_flags = ctxt.info.sw_flags; vsi_props->sw_flags = ctxt->info.sw_flags;
return 0; out:
devm_kfree(dev, ctxt);
return ret;
} }
/** /**

View File

@ -152,9 +152,10 @@ ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
*/ */
off_w = offset % ICE_SR_SECTOR_SIZE_IN_WORDS; off_w = offset % ICE_SR_SECTOR_SIZE_IN_WORDS;
read_size = off_w ? read_size = off_w ?
min(*words, min_t(u16, *words,
(u16)(ICE_SR_SECTOR_SIZE_IN_WORDS - off_w)) : (ICE_SR_SECTOR_SIZE_IN_WORDS - off_w)) :
min((*words - words_read), ICE_SR_SECTOR_SIZE_IN_WORDS); min_t(u16, (*words - words_read),
ICE_SR_SECTOR_SIZE_IN_WORDS);
/* Check if this is last command, if so set proper flag */ /* Check if this is last command, if so set proper flag */
if ((words_read + read_size) >= *words) if ((words_read + read_size) >= *words)

View File

@ -1066,8 +1066,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
hw->max_children[i] = le16_to_cpu(max_sibl); hw->max_children[i] = le16_to_cpu(max_sibl);
} }
hw->layer_info = (struct ice_aqc_layer_props *) hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
(hw->num_tx_sched_layers * (hw->num_tx_sched_layers *
sizeof(*hw->layer_info)), sizeof(*hw->layer_info)),
GFP_KERNEL); GFP_KERNEL);
@ -1344,8 +1343,13 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
node = node->sibling; node = node->sibling;
} }
/* tree has one intermediate node to add this new VSI.
* So no need to calculate supported nodes for below
* layers.
*/
if (node)
break;
/* all the nodes are full, allocate a new one */ /* all the nodes are full, allocate a new one */
if (!node)
num_nodes[i]++; num_nodes[i]++;
} }
} }
@ -1611,6 +1615,23 @@ ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
} }
} }
/**
* ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
* @node: pointer to the sub-tree node
*
* This function checks for a leaf node presence in a given sub-tree node.
*/
static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
{
u8 i;
for (i = 0; i < node->num_children; i++)
if (ice_sched_is_leaf_node_present(node->children[i]))
return true;
/* check for a leaf node */
return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
}
/** /**
* ice_sched_rm_vsi_cfg - remove the VSI and its children nodes * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
* @pi: port information structure * @pi: port information structure
@ -1645,6 +1666,12 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (!vsi_node) if (!vsi_node)
continue; continue;
if (ice_sched_is_leaf_node_present(vsi_node)) {
ice_debug(pi->hw, ICE_DBG_SCHED,
"VSI has leaf nodes in TC %d\n", i);
status = ICE_ERR_IN_USE;
goto exit_sched_rm_vsi_cfg;
}
while (j < vsi_node->num_children) { while (j < vsi_node->num_children) {
if (vsi_node->children[j]->owner == owner) { if (vsi_node->children[j]->owner == owner) {
ice_free_sched_node(pi, vsi_node->children[j]); ice_free_sched_node(pi, vsi_node->children[j]);

View File

@ -22,6 +22,7 @@ enum ice_status {
ICE_ERR_OUT_OF_RANGE = -13, ICE_ERR_OUT_OF_RANGE = -13,
ICE_ERR_ALREADY_EXISTS = -14, ICE_ERR_ALREADY_EXISTS = -14,
ICE_ERR_DOES_NOT_EXIST = -15, ICE_ERR_DOES_NOT_EXIST = -15,
ICE_ERR_IN_USE = -16,
ICE_ERR_MAX_LIMIT = -17, ICE_ERR_MAX_LIMIT = -17,
ICE_ERR_RESET_ONGOING = -18, ICE_ERR_RESET_ONGOING = -18,
ICE_ERR_BUF_TOO_SHORT = -52, ICE_ERR_BUF_TOO_SHORT = -52,

View File

@ -98,7 +98,7 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
u8 i; u8 i;
recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
sizeof(struct ice_sw_recipe), GFP_KERNEL); sizeof(*recps), GFP_KERNEL);
if (!recps) if (!recps)
return ICE_ERR_NO_MEMORY; return ICE_ERR_NO_MEMORY;
@ -1538,9 +1538,20 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
} else if (!list_elem->vsi_list_info) { } else if (!list_elem->vsi_list_info) {
status = ICE_ERR_DOES_NOT_EXIST; status = ICE_ERR_DOES_NOT_EXIST;
goto exit; goto exit;
} else { } else if (list_elem->vsi_list_info->ref_cnt > 1) {
if (list_elem->vsi_list_info->ref_cnt > 1) /* a ref_cnt > 1 indicates that the vsi_list is being
* shared by multiple rules. Decrement the ref_cnt and
* remove this rule, but do not modify the list, as it
* is in-use by other rules.
*/
list_elem->vsi_list_info->ref_cnt--; list_elem->vsi_list_info->ref_cnt--;
remove_rule = true;
} else {
/* a ref_cnt of 1 indicates the vsi_list is only used
* by one rule. However, the original removal request is only
* for a single VSI. Update the vsi_list first, and only
* remove the rule if there are no further VSIs in this list.
*/
vsi_handle = f_entry->fltr_info.vsi_handle; vsi_handle = f_entry->fltr_info.vsi_handle;
status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
if (status) if (status)

View File

@ -48,7 +48,6 @@ static struct netdev_queue *txring_txq(const struct ice_ring *ring)
*/ */
void ice_clean_tx_ring(struct ice_ring *tx_ring) void ice_clean_tx_ring(struct ice_ring *tx_ring)
{ {
unsigned long size;
u16 i; u16 i;
/* ring already cleared, nothing to do */ /* ring already cleared, nothing to do */
@ -59,8 +58,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
for (i = 0; i < tx_ring->count; i++) for (i = 0; i < tx_ring->count; i++)
ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
size = sizeof(struct ice_tx_buf) * tx_ring->count; memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
memset(tx_ring->tx_buf, 0, size);
/* Zero out the descriptor ring */ /* Zero out the descriptor ring */
memset(tx_ring->desc, 0, tx_ring->size); memset(tx_ring->desc, 0, tx_ring->size);
@ -226,21 +224,21 @@ static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
int ice_setup_tx_ring(struct ice_ring *tx_ring) int ice_setup_tx_ring(struct ice_ring *tx_ring)
{ {
struct device *dev = tx_ring->dev; struct device *dev = tx_ring->dev;
int bi_size;
if (!dev) if (!dev)
return -ENOMEM; return -ENOMEM;
/* warn if we are about to overwrite the pointer */ /* warn if we are about to overwrite the pointer */
WARN_ON(tx_ring->tx_buf); WARN_ON(tx_ring->tx_buf);
bi_size = sizeof(struct ice_tx_buf) * tx_ring->count; tx_ring->tx_buf =
tx_ring->tx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL); devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
GFP_KERNEL);
if (!tx_ring->tx_buf) if (!tx_ring->tx_buf)
return -ENOMEM; return -ENOMEM;
/* round up to nearest 4K */ /* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct ice_tx_desc); tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
tx_ring->size = ALIGN(tx_ring->size, 4096); 4096);
tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
GFP_KERNEL); GFP_KERNEL);
if (!tx_ring->desc) { if (!tx_ring->desc) {
@ -267,7 +265,6 @@ err:
void ice_clean_rx_ring(struct ice_ring *rx_ring) void ice_clean_rx_ring(struct ice_ring *rx_ring)
{ {
struct device *dev = rx_ring->dev; struct device *dev = rx_ring->dev;
unsigned long size;
u16 i; u16 i;
/* ring already cleared, nothing to do */ /* ring already cleared, nothing to do */
@ -292,8 +289,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
rx_buf->page_offset = 0; rx_buf->page_offset = 0;
} }
size = sizeof(struct ice_rx_buf) * rx_ring->count; memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
memset(rx_ring->rx_buf, 0, size);
/* Zero out the descriptor ring */ /* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size); memset(rx_ring->desc, 0, rx_ring->size);
@ -331,15 +327,15 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
int ice_setup_rx_ring(struct ice_ring *rx_ring) int ice_setup_rx_ring(struct ice_ring *rx_ring)
{ {
struct device *dev = rx_ring->dev; struct device *dev = rx_ring->dev;
int bi_size;
if (!dev) if (!dev)
return -ENOMEM; return -ENOMEM;
/* warn if we are about to overwrite the pointer */ /* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_buf); WARN_ON(rx_ring->rx_buf);
bi_size = sizeof(struct ice_rx_buf) * rx_ring->count; rx_ring->rx_buf =
rx_ring->rx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL); devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
GFP_KERNEL);
if (!rx_ring->rx_buf) if (!rx_ring->rx_buf)
return -ENOMEM; return -ENOMEM;
@ -1173,7 +1169,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_update_ena_itr(vsi, q_vector); ice_update_ena_itr(vsi, q_vector);
return min(work_done, budget - 1); return min_t(int, work_done, budget - 1);
} }
/* helper function for building cmd/type/offset */ /* helper function for building cmd/type/offset */

View File

@ -173,7 +173,8 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
wr32(hw, VPINT_ALLOC(vf->vf_id), 0); wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
first = vf->first_vector_idx; first = vf->first_vector_idx +
hw->func_caps.common_cap.msix_vector_first_id;
last = first + pf->num_vf_msix - 1; last = first + pf->num_vf_msix - 1;
for (v = first; v <= last; v++) { for (v = first; v <= last; v++) {
u32 reg; u32 reg;
@ -310,6 +311,11 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
*/ */
clear_bit(ICE_VF_STATE_INIT, vf->vf_states); clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
/* Clear the VF's ARQLEN register. This is how the VF detects reset,
* since the VFGEN_RSTAT register doesn't stick at 0 after reset.
*/
wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0);
/* In the case of a VFLR, the HW has already reset the VF and we /* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register. * just need to clean up, so don't hit the VFRTRIG register.
*/ */
@ -345,25 +351,33 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
{ {
struct device *dev = &vsi->back->pdev->dev; struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw; struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx ctxt = { 0 }; struct ice_vsi_ctx *ctxt;
enum ice_status status; enum ice_status status;
int ret = 0;
ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED | ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
ICE_AQ_VSI_PVLAN_INSERT_PVID | ICE_AQ_VSI_PVLAN_INSERT_PVID |
ICE_AQ_VSI_VLAN_EMOD_STR; ICE_AQ_VSI_VLAN_EMOD_STR);
ctxt.info.pvid = cpu_to_le16(vid); ctxt->info.pvid = cpu_to_le16(vid);
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) { if (status) {
dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status); status, hw->adminq.sq_last_status);
return -EIO; ret = -EIO;
goto out;
} }
vsi->info.pvid = ctxt.info.pvid; vsi->info.pvid = ctxt->info.pvid;
vsi->info.vlan_flags = ctxt.info.vlan_flags; vsi->info.vlan_flags = ctxt->info.vlan_flags;
return 0; out:
devm_kfree(dev, ctxt);
return ret;
} }
/** /**
@ -510,7 +524,8 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
hw = &pf->hw; hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
first = vf->first_vector_idx; first = vf->first_vector_idx +
hw->func_caps.common_cap.msix_vector_first_id;
last = (first + pf->num_vf_msix) - 1; last = (first + pf->num_vf_msix) - 1;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
@ -2479,11 +2494,12 @@ int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
{ {
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi_ctx ctx = { 0 };
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctx;
enum ice_status status;
struct ice_vf *vf; struct ice_vf *vf;
int status; int ret = 0;
/* validate the request */ /* validate the request */
if (vf_id >= pf->num_alloc_vfs) { if (vf_id >= pf->num_alloc_vfs) {
@ -2503,25 +2519,31 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
return 0; return 0;
} }
ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
if (ena) { if (ena) {
ctx.info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
ctx.info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M; ctx->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
} }
status = ice_update_vsi(&pf->hw, vsi->idx, &ctx, NULL); status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
if (status) { if (status) {
dev_dbg(&pf->pdev->dev, dev_dbg(&pf->pdev->dev,
"Error %d, failed to update VSI* parameters\n", status); "Error %d, failed to update VSI* parameters\n", status);
return -EIO; ret = -EIO;
goto out;
} }
vf->spoofchk = ena; vf->spoofchk = ena;
vsi->info.sec_flags = ctx.info.sec_flags; vsi->info.sec_flags = ctx->info.sec_flags;
vsi->info.sw_flags2 = ctx.info.sw_flags2; vsi->info.sw_flags2 = ctx->info.sw_flags2;
out:
return status; devm_kfree(&pf->pdev->dev, ctx);
return ret;
} }
/** /**