Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2020-05-31

This series contains updates to the ice driver only.

Brett modifies the driver to allow users to clear a VF's
administratively set MAC address on the PF.  Fixes the driver to
recognize an existing VLAN tag when DMAC/SMAC is enabled in a packet.
Fixes an issue, so that VF's are reset after any VF port VLAN
modifications are made on the PF.  Made sure the register QRXFLXP_CNTXT
is cleared before writing a new value to ensure the previous value is
not passed forward.  Updates the PF to allow the VF to request a reset
as soon as it has been initialized.  Fixes an issue to ensure when a VSI
is created, it uses the current coalesce value, not the default value.

Paul allows untrusted VF's to add 16 filters.

Dan increases the timeout needed after a PFR to allow ample time for
package download.

Chinh adjust the define value for the number of PHY speeds we currently
support.  Changes the driver to ignore EMODE error when configuring the
PHY.

Jesse fixes an issue which was preventing a user from configuring the
interface before bringing it up.

Henry fixes the logic for adding back perfect flows after flow director
filter does a deletion.

Bruce fixes line wrappings to make it more consistent.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-06-01 12:09:08 -07:00
commit 2a2e01e7b1
16 changed files with 141 additions and 155 deletions

View File

@ -974,7 +974,7 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2) #define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3) #define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4) #define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
#define ICE_PHY_TYPE_HIGH_MAX_INDEX 19 #define ICE_PHY_TYPE_HIGH_MAX_INDEX 5
struct ice_aqc_get_phy_caps_data { struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
@ -1826,6 +1826,7 @@ enum ice_aq_err {
ICE_AQ_RC_EINVAL = 14, /* Invalid argument */ ICE_AQ_RC_EINVAL = 14, /* Invalid argument */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */ ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */ ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
ICE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */ ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */ ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */
ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */ ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */

View File

@ -3,6 +3,7 @@
#include <net/xdp_sock_drv.h> #include <net/xdp_sock_drv.h>
#include "ice_base.h" #include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h" #include "ice_dcb_lib.h"
/** /**
@ -288,7 +289,6 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
u32 rxdid = ICE_RXDID_FLEX_NIC; u32 rxdid = ICE_RXDID_FLEX_NIC;
struct ice_rlan_ctx rlan_ctx; struct ice_rlan_ctx rlan_ctx;
struct ice_hw *hw; struct ice_hw *hw;
u32 regval;
u16 pf_q; u16 pf_q;
int err; int err;
@ -385,27 +385,16 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Rx queue threshold in units of 64 */ /* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1; rlan_ctx.lrxqthresh = 1;
/* Enable Flexible Descriptors in the queue context which /* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format * allows this driver to select a specific receive descriptor format
*/ * increasing context priority to pick up profile ID; default is 0x01;
regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); * setting to 0x03 to ensure profile is programming if prev context is
if (vsi->type != ICE_VSI_VF) { * of same priority
regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & */
QRXFLXP_CNTXT_RXDID_IDX_M; if (vsi->type != ICE_VSI_VF)
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3);
/* increasing context priority to pick up profile ID; else
* default is 0x01; setting to 0x03 to ensure profile ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3);
* is programming if prev context is of same priority
*/
regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
} else {
regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
QRXFLXP_CNTXT_RXDID_PRIO_M |
QRXFLXP_CNTXT_TS_M);
}
wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
/* Absolute queue number out of 2K needs to be passed */ /* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);

View File

@ -964,7 +964,12 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { /* Wait for the PFR to complete. The wait time is the global config lock
* timeout plus the PFR timeout which will account for a possible reset
* that is occurring during a download package operation.
*/
for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
ICE_PF_RESET_WAIT_COUNT; cnt++) {
reg = rd32(hw, PFGEN_CTRL); reg = rd32(hw, PFGEN_CTRL);
if (!(reg & PFGEN_CTRL_PFSWR_M)) if (!(reg & PFGEN_CTRL_PFSWR_M))
break; break;
@ -2227,6 +2232,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{ {
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status;
if (!cfg) if (!cfg)
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
@ -2255,7 +2261,11 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value); ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt); ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
status = 0;
return status;
} }
/** /**

View File

@ -769,8 +769,7 @@ enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
* *
* Destroys the send and receive queue locks for a given control queue. * Destroys the send and receive queue locks for a given control queue.
*/ */
static void static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
{ {
mutex_destroy(&cq->sq_lock); mutex_destroy(&cq->sq_lock);
mutex_destroy(&cq->rq_lock); mutex_destroy(&cq->rq_lock);

View File

@ -791,39 +791,31 @@ void ice_update_dcb_stats(struct ice_pf *pf)
* ice_tx_prepare_vlan_flags_dcb - prepare VLAN tagging for DCB * ice_tx_prepare_vlan_flags_dcb - prepare VLAN tagging for DCB
* @tx_ring: ring to send buffer on * @tx_ring: ring to send buffer on
* @first: pointer to struct ice_tx_buf * @first: pointer to struct ice_tx_buf
*
* This should not be called if the outer VLAN is software offloaded as the VLAN
* tag will already be configured with the correct ID and priority bits
*/ */
int void
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
struct ice_tx_buf *first) struct ice_tx_buf *first)
{ {
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
if (!test_bit(ICE_FLAG_DCB_ENA, tx_ring->vsi->back->flags)) if (!test_bit(ICE_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
return 0; return;
/* Insert 802.1p priority into VLAN header */ /* Insert 802.1p priority into VLAN header */
if ((first->tx_flags & (ICE_TX_FLAGS_HW_VLAN | ICE_TX_FLAGS_SW_VLAN)) || if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN) ||
skb->priority != TC_PRIO_CONTROL) { skb->priority != TC_PRIO_CONTROL) {
first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M; first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
/* Mask the lower 3 bits to set the 802.1p priority */ /* Mask the lower 3 bits to set the 802.1p priority */
first->tx_flags |= (skb->priority & 0x7) << first->tx_flags |= (skb->priority & 0x7) <<
ICE_TX_FLAGS_VLAN_PR_S; ICE_TX_FLAGS_VLAN_PR_S;
if (first->tx_flags & ICE_TX_FLAGS_SW_VLAN) { /* if this is not already set it means a VLAN 0 + priority needs
struct vlan_ethhdr *vhdr; * to be offloaded
int rc; */
first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
rc = skb_cow_head(skb, 0);
if (rc < 0)
return rc;
vhdr = (struct vlan_ethhdr *)skb->data;
vhdr->h_vlan_TCI = htons(first->tx_flags >>
ICE_TX_FLAGS_VLAN_S);
} else {
first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
}
} }
return 0;
} }
/** /**

View File

@ -27,7 +27,7 @@ void ice_pf_dcb_recfg(struct ice_pf *pf);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi); void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked); int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf); void ice_update_dcb_stats(struct ice_pf *pf);
int void
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
struct ice_tx_buf *first); struct ice_tx_buf *first);
void void

View File

@ -3189,10 +3189,6 @@ ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
/* check to see if VSI is active */
if (test_bit(__ICE_DOWN, vsi->state))
return;
/* report maximum channels */ /* report maximum channels */
ch->max_rx = ice_get_max_rxq(pf); ch->max_rx = ice_get_max_rxq(pf);
ch->max_tx = ice_get_max_txq(pf); ch->max_tx = ice_get_max_txq(pf);

View File

@ -1363,6 +1363,31 @@ release_lock:
mutex_unlock(&hw->fdir_fltr_lock); mutex_unlock(&hw->fdir_fltr_lock);
} }
/**
* ice_fdir_do_rem_flow - delete flow and possibly add perfect flow
* @pf: PF structure
* @flow_type: FDir flow type to release
*/
static void
ice_fdir_do_rem_flow(struct ice_pf *pf, enum ice_fltr_ptype flow_type)
{
struct ice_hw *hw = &pf->hw;
bool need_perfect = false;
if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
need_perfect = true;
if (need_perfect && test_bit(flow_type, hw->fdir_perfect_fltr))
return;
ice_fdir_rem_flow(hw, ICE_BLK_FD, flow_type);
if (need_perfect)
ice_create_init_fdir_rule(pf, flow_type);
}
/** /**
* ice_fdir_update_list_entry - add or delete a filter from the filter list * ice_fdir_update_list_entry - add or delete a filter from the filter list
* @pf: PF structure * @pf: PF structure
@ -1393,7 +1418,7 @@ ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
/* we just deleted the last filter of flow_type so we /* we just deleted the last filter of flow_type so we
* should also delete the HW filter info. * should also delete the HW filter info.
*/ */
ice_fdir_rem_flow(hw, ICE_BLK_FD, old_fltr->flow_type); ice_fdir_do_rem_flow(pf, old_fltr->flow_type);
list_del(&old_fltr->fltr_node); list_del(&old_fltr->fltr_node);
devm_kfree(ice_hw_to_dev(hw), old_fltr); devm_kfree(ice_hw_to_dev(hw), old_fltr);
} }

View File

@ -1595,6 +1595,32 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
} }
} }
/**
* ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
* @hw: HW pointer
* @pf_q: index of the Rx queue in the PF's queue space
* @rxdid: flexible descriptor RXDID
* @prio: priority for the RXDID for this queue
*/
void
ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio)
{
int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
/* clear any previous values */
regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
QRXFLXP_CNTXT_RXDID_PRIO_M |
QRXFLXP_CNTXT_TS_M);
regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
QRXFLXP_CNTXT_RXDID_IDX_M;
regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
}
/** /**
* ice_vsi_cfg_rxqs - Configure the VSI for Rx * ice_vsi_cfg_rxqs - Configure the VSI for Rx
* @vsi: the VSI being configured * @vsi: the VSI being configured
@ -2681,15 +2707,13 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i], ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
&coalesce[i]); &coalesce[i]);
for (; i < vsi->num_q_vectors; i++) { /* number of q_vectors increased, so assume coalesce settings were
struct ice_coalesce_stored coalesce_dflt = { * changed globally (i.e. ethtool -C eth0 instead of per-queue) and use
.itr_tx = ICE_DFLT_TX_ITR, * the previous settings from q_vector 0 for all of the new q_vectors
.itr_rx = ICE_DFLT_RX_ITR, */
.intrl = 0 for (; i < vsi->num_q_vectors; i++)
};
ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i], ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
&coalesce_dflt); &coalesce[0]);
}
} }
/** /**

View File

@ -74,6 +74,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi);
bool ice_is_reset_in_progress(unsigned long *state); bool ice_is_reset_in_progress(unsigned long *state);
void
ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio);
void ice_vsi_put_qs(struct ice_vsi *vsi); void ice_vsi_put_qs(struct ice_vsi *vsi);
void ice_vsi_dis_irq(struct ice_vsi *vsi); void ice_vsi_dis_irq(struct ice_vsi *vsi);

View File

@ -3248,7 +3248,7 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf)
if (!opt_fw_filename) if (!opt_fw_filename)
return NULL; return NULL;
snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llX.pkg", snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
ICE_DDP_PKG_PATH, dsn); ICE_DDP_PKG_PATH, dsn);
return opt_fw_filename; return opt_fw_filename;
@ -5159,6 +5159,8 @@ const char *ice_aq_str(enum ice_aq_err aq_err)
return "ICE_AQ_RC_ENOSPC"; return "ICE_AQ_RC_ENOSPC";
case ICE_AQ_RC_ENOSYS: case ICE_AQ_RC_ENOSYS:
return "ICE_AQ_RC_ENOSYS"; return "ICE_AQ_RC_ENOSYS";
case ICE_AQ_RC_EMODE:
return "ICE_AQ_RC_EMODE";
case ICE_AQ_RC_ENOSEC: case ICE_AQ_RC_ENOSEC:
return "ICE_AQ_RC_ENOSEC"; return "ICE_AQ_RC_ENOSEC";
case ICE_AQ_RC_EBADSIG: case ICE_AQ_RC_EBADSIG:

View File

@ -1714,8 +1714,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
* This function removes single aggregator VSI info entry from * This function removes single aggregator VSI info entry from
* aggregator list. * aggregator list.
*/ */
static void static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
{ {
struct ice_sched_agg_info *agg_info; struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_info *atmp; struct ice_sched_agg_info *atmp;
@ -1947,8 +1946,7 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
* *
* Save or clear CIR bandwidth (BW) in the passed param bw_t_info. * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
*/ */
static void static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{ {
if (bw == ICE_SCHED_DFLT_BW) { if (bw == ICE_SCHED_DFLT_BW) {
clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
@ -1967,8 +1965,7 @@ ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
* *
* Save or clear EIR bandwidth (BW) in the passed param bw_t_info. * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
*/ */
static void static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{ {
if (bw == ICE_SCHED_DFLT_BW) { if (bw == ICE_SCHED_DFLT_BW) {
clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
@ -1993,8 +1990,7 @@ ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
* *
* Save or clear shared bandwidth (BW) in the passed param bw_t_info. * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
*/ */
static void static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{ {
if (bw == ICE_SCHED_DFLT_BW) { if (bw == ICE_SCHED_DFLT_BW) {
clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);

View File

@ -1612,8 +1612,7 @@ exit:
* check for duplicates in this case, removing duplicates from a given * check for duplicates in this case, removing duplicates from a given
* list should be taken care of in the caller of this function. * list should be taken care of in the caller of this function.
*/ */
enum ice_status enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
{ {
struct ice_aqc_sw_rules_elem *s_rule, *r_iter; struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr; struct ice_fltr_list_entry *m_list_itr;
@ -1914,8 +1913,7 @@ exit:
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @v_list: list of VLAN entries and forwarding information * @v_list: list of VLAN entries and forwarding information
*/ */
enum ice_status enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
{ {
struct ice_fltr_list_entry *v_list_itr; struct ice_fltr_list_entry *v_list_itr;
@ -2145,8 +2143,7 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
* the entries passed into m_list were added previously. It will not attempt to * the entries passed into m_list were added previously. It will not attempt to
* do a partial remove of entries that were found. * do a partial remove of entries that were found.
*/ */
enum ice_status enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{ {
struct ice_fltr_list_entry *list_itr, *tmp; struct ice_fltr_list_entry *list_itr, *tmp;
struct mutex *rule_lock; /* Lock to protect filter rule list */ struct mutex *rule_lock; /* Lock to protect filter rule list */

View File

@ -2053,49 +2053,25 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
* *
* Checks the skb and set up correspondingly several generic transmit flags * Checks the skb and set up correspondingly several generic transmit flags
* related to VLAN tagging for the HW, such as VLAN, DCB, etc. * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
*
* Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly.
*/ */
static int static void
ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
{ {
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
__be16 protocol = skb->protocol;
if (protocol == htons(ETH_P_8021Q) && /* nothing left to do, software offloaded VLAN */
!(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
/* when HW VLAN acceleration is turned off by the user the return;
* stack sets the protocol to 8021q so that the driver
* can take any steps required to support the SW only
* VLAN handling. In our case the driver doesn't need
* to take any further steps so just set the protocol
* to the encapsulated ethertype.
*/
skb->protocol = vlan_get_protocol(skb);
return 0;
}
/* if we have a HW VLAN tag being added, default to the HW one */ /* currently, we always assume 802.1Q for VLAN insertion as VLAN
* insertion for 802.1AD is not supported
*/
if (skb_vlan_tag_present(skb)) { if (skb_vlan_tag_present(skb)) {
first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
} else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr;
/* for SW VLAN, check the next protocol and store the tag */
vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
sizeof(_vhdr),
&_vhdr);
if (!vhdr)
return -EINVAL;
first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
ICE_TX_FLAGS_VLAN_S;
first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
} }
return ice_tx_prepare_vlan_flags_dcb(tx_ring, first); ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
} }
/** /**
@ -2403,8 +2379,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
first->tx_flags = 0; first->tx_flags = 0;
/* prepare the VLAN tagging flags for Tx */ /* prepare the VLAN tagging flags for Tx */
if (ice_tx_prepare_vlan_flags(tx_ring, first)) ice_tx_prepare_vlan_flags(tx_ring, first);
goto out_drop;
/* set up TSO offload */ /* set up TSO offload */
tso = ice_tso(first, &offload); tso = ice_tso(first, &offload);

View File

@ -2014,7 +2014,7 @@ err:
*/ */
static void ice_vc_reset_vf_msg(struct ice_vf *vf) static void ice_vc_reset_vf_msg(struct ice_vf *vf)
{ {
if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
ice_reset_vf(vf, false); ice_reset_vf(vf, false);
} }
@ -3295,7 +3295,6 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
__be16 vlan_proto) __be16 vlan_proto)
{ {
struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_vsi *vsi;
struct device *dev; struct device *dev;
struct ice_vf *vf; struct ice_vf *vf;
u16 vlanprio; u16 vlanprio;
@ -3317,8 +3316,6 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
} }
vf = &pf->vf[vf_id]; vf = &pf->vf[vf_id];
vsi = pf->vsi[vf->lan_vsi_idx];
ret = ice_check_vf_ready_for_cfg(vf); ret = ice_check_vf_ready_for_cfg(vf);
if (ret) if (ret)
return ret; return ret;
@ -3331,44 +3328,15 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
return 0; return 0;
} }
if (vlan_id || qos) { vf->port_vlan_info = vlanprio;
/* remove VLAN 0 filter set by default when transitioning from
* no port VLAN to a port VLAN. No change to old port VLAN on
* failure.
*/
ret = ice_vsi_kill_vlan(vsi, 0);
if (ret)
return ret;
ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
if (ret)
return ret;
} else {
/* add VLAN 0 filter back when transitioning from port VLAN to
* no port VLAN. No change to old port VLAN on failure.
*/
ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
if (ret)
return ret;
ret = ice_vsi_manage_pvid(vsi, 0, false);
if (ret)
return ret;
}
if (vlan_id) { if (vf->port_vlan_info)
dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n", dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
vlan_id, qos, vf_id); vlan_id, qos, vf_id);
else
dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
/* add VLAN filter for the port VLAN */ ice_vc_reset_vf(vf);
ret = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
if (ret)
return ret;
}
/* remove old port VLAN filter with valid VLAN ID or QoS fields */
if (vf->port_vlan_info)
ice_vsi_kill_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK);
/* keep port VLAN information persistent on resets */
vf->port_vlan_info = le16_to_cpu(vsi->info.pvid);
return 0; return 0;
} }
@ -3904,7 +3872,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
if (ice_validate_vf_id(pf, vf_id)) if (ice_validate_vf_id(pf, vf_id))
return -EINVAL; return -EINVAL;
if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) { if (is_multicast_ether_addr(mac)) {
netdev_err(netdev, "%pM not a valid unicast address\n", mac); netdev_err(netdev, "%pM not a valid unicast address\n", mac);
return -EINVAL; return -EINVAL;
} }
@ -3924,15 +3892,21 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
return -EINVAL; return -EINVAL;
} }
/* copy MAC into dflt_lan_addr and trigger a VF reset. The reset /* VF is notified of its new MAC via the PF's response to the
* flow will use the updated dflt_lan_addr and add a MAC filter * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
* using ice_add_mac. Also set pf_set_mac to indicate that the PF has
* set the MAC address for this VF.
*/ */
ether_addr_copy(vf->dflt_lan_addr.addr, mac); ether_addr_copy(vf->dflt_lan_addr.addr, mac);
vf->pf_set_mac = true; if (is_zero_ether_addr(mac)) {
netdev_info(netdev, "MAC on VF %d set to %pM. VF driver will be reinitialized\n", /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
vf_id, mac); vf->pf_set_mac = false;
netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
vf->vf_id);
} else {
/* PF will add MAC rule for the VF */
vf->pf_set_mac = true;
netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
mac, vf_id);
}
ice_vc_reset_vf(vf); ice_vc_reset_vf(vf);
return 0; return 0;

View File

@ -7,7 +7,10 @@
/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */ /* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
#define ICE_MAX_VLAN_PER_VF 8 #define ICE_MAX_VLAN_PER_VF 8
#define ICE_MAX_MACADDR_PER_VF 12 /* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for
* broadcast, and 16 for additional unicast/multicast filters
*/
#define ICE_MAX_MACADDR_PER_VF 18
/* Malicious Driver Detection */ /* Malicious Driver Detection */
#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10 #define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10