Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== This series contains updates to the ice driver only. Bruce updates the driver to store the number of functions the device has so that it won't have to compute it when setting safe mode capabilities. Adds a check to adjust the reporting of capabilities for devices with more than 4 ports, which differ for devices with less than 4 ports. Brett adds a helper function to determine if the VF is allowed to do VLAN operations based on the host's VF configuration. Also adds a new function that initializes VLAN stripping (enabled/disabled) for the VF based on the device supported capabilities. Adds a check if the vector index is valid with the respect to the number of transmit and receive queues configured when we set coalesce settings for DCB. Adds a check if the promisc_mask contains ICE_PROMISC_VLAN_RX or ICE_PROMISC_VLAN_TX so that VLAN 0 promiscuous rules to be removed. Add a helper macro for a commonly used de-reference of a pointer to &pf->dev->pdev. Jesse fixes an issue where if an invalid virtchnl request from the VF, the driver would return uninitialized data to the VF from the PF stack, so ensure the stack variable is initialized earlier. Add helpers to the virtchnl interface make the reporting of strings consistent and help reduce stack space. Implements VF statistics gathering via the kernel ndo_get_vf_stats(). Akeem ensures we disable the state flag for each VF when its resources are returned to the device. Tony does additional cleanup in the driver to ensure the when we allocate and free memory within the same function, we should not be using devm_* variants; use regular alloc and free functions. Henry implements code to query and set the number of channels on the primary VSI for a PF via ethtool. Jake cleans up needless NULL checks in ice_sched_cleanup_all(). Kevin updates the firmware API version to align with current NVM images. v2: Added "Fixes:" tag to patch 5 commit description and added the use of netif_is_rxfh_configured() in patch 13 to see if RSS has been configured by the user, if so do not overwrite that configuration. ==================== Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
This commit is contained in:
commit
c4f2cbd376
|
@ -130,6 +130,8 @@ extern const char ice_drv_ver[];
|
|||
ICE_PROMISC_VLAN_TX | \
|
||||
ICE_PROMISC_VLAN_RX)
|
||||
|
||||
#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
|
||||
|
||||
struct ice_txq_meta {
|
||||
u32 q_teid; /* Tx-scheduler element identifier */
|
||||
u16 q_id; /* Entry in VSI's txq_map bitmap */
|
||||
|
@ -283,6 +285,8 @@ struct ice_vsi {
|
|||
u16 num_txq; /* Used Tx queues */
|
||||
u16 alloc_rxq; /* Allocated Rx queues */
|
||||
u16 num_rxq; /* Used Rx queues */
|
||||
u16 req_txq; /* User requested Tx queues */
|
||||
u16 req_rxq; /* User requested Rx queues */
|
||||
u16 num_rx_desc;
|
||||
u16 num_tx_desc;
|
||||
struct ice_tc_cfg tc_cfg;
|
||||
|
@ -489,6 +493,7 @@ void ice_set_ethtool_ops(struct net_device *netdev);
|
|||
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
|
||||
u16 ice_get_avail_txq_count(struct ice_pf *pf);
|
||||
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
|
||||
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx);
|
||||
void ice_update_vsi_stats(struct ice_vsi *vsi);
|
||||
void ice_update_pf_stats(struct ice_pf *pf);
|
||||
int ice_up(struct ice_vsi *vsi);
|
||||
|
@ -503,6 +508,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
|||
int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
|
||||
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
|
||||
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
|
||||
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
|
||||
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
|
||||
int ice_open(struct net_device *netdev);
|
||||
int ice_stop(struct net_device *netdev);
|
||||
|
|
|
@ -101,7 +101,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
|
|||
struct ice_q_vector *q_vector;
|
||||
|
||||
/* allocate q_vector */
|
||||
q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
|
||||
q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector),
|
||||
GFP_KERNEL);
|
||||
if (!q_vector)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -138,10 +139,11 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
|
|||
struct ice_q_vector *q_vector;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct ice_ring *ring;
|
||||
struct device *dev;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
if (!vsi->q_vectors[v_idx]) {
|
||||
dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n",
|
||||
v_idx);
|
||||
dev_dbg(dev, "Queue vector at index %d not found\n", v_idx);
|
||||
return;
|
||||
}
|
||||
q_vector = vsi->q_vectors[v_idx];
|
||||
|
@ -155,7 +157,7 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
|
|||
if (vsi->netdev)
|
||||
netif_napi_del(&q_vector->napi);
|
||||
|
||||
devm_kfree(&pf->pdev->dev, q_vector);
|
||||
devm_kfree(dev, q_vector);
|
||||
vsi->q_vectors[v_idx] = NULL;
|
||||
}
|
||||
|
||||
|
@ -482,7 +484,7 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
|
|||
/* wait for the change to finish */
|
||||
ret = ice_pf_rxq_wait(pf, pf_q, ena);
|
||||
if (ret)
|
||||
dev_err(&pf->pdev->dev,
|
||||
dev_err(ice_pf_to_dev(pf),
|
||||
"VSI idx %d Rx ring %d %sable timeout\n",
|
||||
vsi->idx, pf_q, (ena ? "en" : "dis"));
|
||||
|
||||
|
@ -500,11 +502,12 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
|
|||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
int v_idx = 0, num_q_vectors;
|
||||
struct device *dev;
|
||||
int err;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
if (vsi->q_vectors[0]) {
|
||||
dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
|
||||
vsi->vsi_num);
|
||||
dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
|
@ -522,8 +525,7 @@ err_out:
|
|||
while (v_idx--)
|
||||
ice_free_q_vector(vsi, v_idx);
|
||||
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Failed to allocate %d q_vector for VSI %d, ret=%d\n",
|
||||
dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
|
||||
vsi->num_q_vectors, vsi->vsi_num, err);
|
||||
vsi->num_q_vectors = 0;
|
||||
return err;
|
||||
|
@ -640,7 +642,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
|
|||
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
|
||||
1, qg_buf, buf_len, NULL);
|
||||
if (status) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
dev_err(ice_pf_to_dev(pf),
|
||||
"Failed to set LAN Tx queue context, error: %d\n",
|
||||
status);
|
||||
return -ENODEV;
|
||||
|
|
|
@ -1673,6 +1673,10 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
|
|||
ice_debug(hw, ICE_DBG_INIT,
|
||||
"%s: valid_functions (bitmap) = %d\n", prefix,
|
||||
caps->valid_functions);
|
||||
|
||||
/* store func count for resource management purposes */
|
||||
if (dev_p)
|
||||
dev_p->num_funcs = hweight32(number);
|
||||
break;
|
||||
case ICE_AQC_CAPS_SRIOV:
|
||||
caps->sr_iov_1_1 = (number == 1);
|
||||
|
@ -1779,6 +1783,18 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Re-calculate capabilities that are dependent on the number of
|
||||
* physical ports; i.e. some features are not supported or function
|
||||
* differently on devices with more than 4 ports.
|
||||
*/
|
||||
if (hw->dev_caps.num_funcs > 4) {
|
||||
/* Max 4 TCs per port */
|
||||
caps->maxtc = 4;
|
||||
ice_debug(hw, ICE_DBG_INIT,
|
||||
"%s: maxtc = %d (based on #ports)\n", prefix,
|
||||
caps->maxtc);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1875,8 +1891,7 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
|
|||
struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
|
||||
u32 valid_func, rxq_first_id, txq_first_id;
|
||||
u32 msix_vector_first_id, max_mtu;
|
||||
u32 num_func = 0;
|
||||
u8 i;
|
||||
u32 num_funcs;
|
||||
|
||||
/* cache some func_caps values that should be restored after memset */
|
||||
valid_func = func_caps->common_cap.valid_functions;
|
||||
|
@ -1909,6 +1924,7 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
|
|||
rxq_first_id = dev_caps->common_cap.rxq_first_id;
|
||||
msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
|
||||
max_mtu = dev_caps->common_cap.max_mtu;
|
||||
num_funcs = dev_caps->num_funcs;
|
||||
|
||||
/* unset dev capabilities */
|
||||
memset(dev_caps, 0, sizeof(*dev_caps));
|
||||
|
@ -1919,19 +1935,14 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
|
|||
dev_caps->common_cap.rxq_first_id = rxq_first_id;
|
||||
dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
|
||||
dev_caps->common_cap.max_mtu = max_mtu;
|
||||
|
||||
/* valid_func is a bitmap. get number of functions */
|
||||
#define ICE_MAX_FUNCS 8
|
||||
for (i = 0; i < ICE_MAX_FUNCS; i++)
|
||||
if (valid_func & BIT(i))
|
||||
num_func++;
|
||||
dev_caps->num_funcs = num_funcs;
|
||||
|
||||
/* one Tx and one Rx queue per function in safe mode */
|
||||
dev_caps->common_cap.num_rxq = num_func;
|
||||
dev_caps->common_cap.num_txq = num_func;
|
||||
dev_caps->common_cap.num_rxq = num_funcs;
|
||||
dev_caps->common_cap.num_txq = num_funcs;
|
||||
|
||||
/* two MSIX vectors per function */
|
||||
dev_caps->common_cap.num_msix_vectors = 2 * num_func;
|
||||
dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
*/
|
||||
#define EXP_FW_API_VER_BRANCH 0x00
|
||||
#define EXP_FW_API_VER_MAJOR 0x01
|
||||
#define EXP_FW_API_VER_MINOR 0x03
|
||||
#define EXP_FW_API_VER_MINOR 0x05
|
||||
|
||||
/* Different control queue types: These are mainly for SW consumption. */
|
||||
enum ice_ctl_q {
|
||||
|
|
|
@ -4,8 +4,6 @@
|
|||
#include "ice_dcb_lib.h"
|
||||
#include "ice_dcb_nl.h"
|
||||
|
||||
static void ice_pf_dcb_recfg(struct ice_pf *pf);
|
||||
|
||||
/**
|
||||
* ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
|
||||
* @vsi: the VSI being configured
|
||||
|
@ -160,6 +158,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
|
|||
{
|
||||
struct ice_aqc_port_ets_elem buf = { 0 };
|
||||
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
int ret = ICE_DCB_NO_HW_CHG;
|
||||
struct ice_vsi *pf_vsi;
|
||||
|
||||
|
@ -171,15 +170,15 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
|
|||
|
||||
/* Enable DCB tagging only when more than one TC */
|
||||
if (ice_dcb_get_num_tc(new_cfg) > 1) {
|
||||
dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
|
||||
dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
|
||||
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
|
||||
} else {
|
||||
dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
|
||||
dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n");
|
||||
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
|
||||
}
|
||||
|
||||
if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) {
|
||||
dev_dbg(&pf->pdev->dev, "No change in DCB config required\n");
|
||||
dev_dbg(dev, "No change in DCB config required\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -188,10 +187,10 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
|
|||
if (!old_cfg)
|
||||
return -ENOMEM;
|
||||
|
||||
dev_info(&pf->pdev->dev, "Commit DCB Configuration to the hardware\n");
|
||||
dev_info(dev, "Commit DCB Configuration to the hardware\n");
|
||||
pf_vsi = ice_get_main_vsi(pf);
|
||||
if (!pf_vsi) {
|
||||
dev_dbg(&pf->pdev->dev, "PF VSI doesn't exist\n");
|
||||
dev_dbg(dev, "PF VSI doesn't exist\n");
|
||||
ret = -EINVAL;
|
||||
goto free_cfg;
|
||||
}
|
||||
|
@ -213,7 +212,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
|
|||
if (pf->hw.port_info->is_sw_lldp) {
|
||||
ret = ice_set_dcb_cfg(pf->hw.port_info);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev, "Set DCB Config failed\n");
|
||||
dev_err(dev, "Set DCB Config failed\n");
|
||||
/* Restore previous settings to local config */
|
||||
memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg));
|
||||
goto out;
|
||||
|
@ -222,7 +221,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
|
|||
|
||||
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
|
||||
dev_err(dev, "Query Port ETS failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -269,6 +268,7 @@ static bool
|
|||
ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
|
||||
struct ice_dcbx_cfg *new_cfg)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
bool need_reconfig = false;
|
||||
|
||||
/* Check if ETS configuration has changed */
|
||||
|
@ -279,33 +279,33 @@ ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
|
|||
&old_cfg->etscfg.prio_table,
|
||||
sizeof(new_cfg->etscfg.prio_table))) {
|
||||
need_reconfig = true;
|
||||
dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
|
||||
dev_dbg(dev, "ETS UP2TC changed.\n");
|
||||
}
|
||||
|
||||
if (memcmp(&new_cfg->etscfg.tcbwtable,
|
||||
&old_cfg->etscfg.tcbwtable,
|
||||
sizeof(new_cfg->etscfg.tcbwtable)))
|
||||
dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
|
||||
dev_dbg(dev, "ETS TC BW Table changed.\n");
|
||||
|
||||
if (memcmp(&new_cfg->etscfg.tsatable,
|
||||
&old_cfg->etscfg.tsatable,
|
||||
sizeof(new_cfg->etscfg.tsatable)))
|
||||
dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
|
||||
dev_dbg(dev, "ETS TSA Table changed.\n");
|
||||
}
|
||||
|
||||
/* Check if PFC configuration has changed */
|
||||
if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
|
||||
need_reconfig = true;
|
||||
dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
|
||||
dev_dbg(dev, "PFC config change detected.\n");
|
||||
}
|
||||
|
||||
/* Check if APP Table has changed */
|
||||
if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) {
|
||||
need_reconfig = true;
|
||||
dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
|
||||
dev_dbg(dev, "APP Table change detected.\n");
|
||||
}
|
||||
|
||||
dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
|
||||
dev_dbg(dev, "dcb need_reconfig=%d\n", need_reconfig);
|
||||
return need_reconfig;
|
||||
}
|
||||
|
||||
|
@ -317,11 +317,12 @@ void ice_dcb_rebuild(struct ice_pf *pf)
|
|||
{
|
||||
struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg;
|
||||
struct ice_aqc_port_ets_elem buf = { 0 };
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
enum ice_status ret;
|
||||
|
||||
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
|
||||
dev_err(dev, "Query Port ETS failed\n");
|
||||
goto dcb_error;
|
||||
}
|
||||
|
||||
|
@ -340,17 +341,14 @@ void ice_dcb_rebuild(struct ice_pf *pf)
|
|||
ice_cfg_etsrec_defaults(pf->hw.port_info);
|
||||
ret = ice_set_dcb_cfg(pf->hw.port_info);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n");
|
||||
dev_err(dev, "Failed to set DCB to unwilling\n");
|
||||
goto dcb_error;
|
||||
}
|
||||
|
||||
/* Retrieve DCB config and ensure same as current in SW */
|
||||
prev_cfg = devm_kmemdup(&pf->pdev->dev, local_dcbx_cfg,
|
||||
sizeof(*prev_cfg), GFP_KERNEL);
|
||||
if (!prev_cfg) {
|
||||
dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n");
|
||||
prev_cfg = kmemdup(local_dcbx_cfg, sizeof(*prev_cfg), GFP_KERNEL);
|
||||
if (!prev_cfg)
|
||||
goto dcb_error;
|
||||
}
|
||||
|
||||
ice_init_dcb(&pf->hw, true);
|
||||
if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
|
||||
|
@ -360,12 +358,13 @@ void ice_dcb_rebuild(struct ice_pf *pf)
|
|||
|
||||
if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) {
|
||||
/* difference in cfg detected - disable DCB till next MIB */
|
||||
dev_err(&pf->pdev->dev, "Set local MIB not accurate\n");
|
||||
dev_err(dev, "Set local MIB not accurate\n");
|
||||
kfree(prev_cfg);
|
||||
goto dcb_error;
|
||||
}
|
||||
|
||||
/* fetched config congruent to previous configuration */
|
||||
devm_kfree(&pf->pdev->dev, prev_cfg);
|
||||
kfree(prev_cfg);
|
||||
|
||||
/* Set the local desired config */
|
||||
if (local_dcbx_cfg->dcbx_mode == ICE_DCBX_MODE_CEE)
|
||||
|
@ -375,27 +374,30 @@ void ice_dcb_rebuild(struct ice_pf *pf)
|
|||
ice_cfg_etsrec_defaults(pf->hw.port_info);
|
||||
ret = ice_set_dcb_cfg(pf->hw.port_info);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev, "Failed to set desired config\n");
|
||||
dev_err(dev, "Failed to set desired config\n");
|
||||
goto dcb_error;
|
||||
}
|
||||
dev_info(&pf->pdev->dev, "DCB restored after reset\n");
|
||||
dev_info(dev, "DCB restored after reset\n");
|
||||
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
|
||||
dev_err(dev, "Query Port ETS failed\n");
|
||||
goto dcb_error;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
dcb_error:
|
||||
dev_err(&pf->pdev->dev, "Disabling DCB until new settings occur\n");
|
||||
prev_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*prev_cfg), GFP_KERNEL);
|
||||
dev_err(dev, "Disabling DCB until new settings occur\n");
|
||||
prev_cfg = kzalloc(sizeof(*prev_cfg), GFP_KERNEL);
|
||||
if (!prev_cfg)
|
||||
return;
|
||||
|
||||
prev_cfg->etscfg.willing = true;
|
||||
prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
|
||||
prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
|
||||
memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
|
||||
ice_pf_dcb_cfg(pf, prev_cfg, false);
|
||||
devm_kfree(&pf->pdev->dev, prev_cfg);
|
||||
kfree(prev_cfg);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -410,18 +412,17 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
|
|||
int ret = 0;
|
||||
|
||||
pi = pf->hw.port_info;
|
||||
newcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*newcfg), GFP_KERNEL);
|
||||
newcfg = kmemdup(&pi->local_dcbx_cfg, sizeof(*newcfg), GFP_KERNEL);
|
||||
if (!newcfg)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(newcfg, &pi->local_dcbx_cfg, sizeof(*newcfg));
|
||||
memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
|
||||
|
||||
dev_info(&pf->pdev->dev, "Configuring initial DCB values\n");
|
||||
dev_info(ice_pf_to_dev(pf), "Configuring initial DCB values\n");
|
||||
if (ice_pf_dcb_cfg(pf, newcfg, locked))
|
||||
ret = -EINVAL;
|
||||
|
||||
devm_kfree(&pf->pdev->dev, newcfg);
|
||||
kfree(newcfg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -442,9 +443,10 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
|
|||
|
||||
hw = &pf->hw;
|
||||
pi = hw->port_info;
|
||||
dcbcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*dcbcfg), GFP_KERNEL);
|
||||
dcbcfg = kzalloc(sizeof(*dcbcfg), GFP_KERNEL);
|
||||
if (!dcbcfg)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(dcbcfg, 0, sizeof(*dcbcfg));
|
||||
memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg));
|
||||
|
||||
dcbcfg->etscfg.willing = ets_willing ? 1 : 0;
|
||||
|
@ -465,7 +467,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
|
|||
dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE;
|
||||
|
||||
ret = ice_pf_dcb_cfg(pf, dcbcfg, locked);
|
||||
devm_kfree(&pf->pdev->dev, dcbcfg);
|
||||
kfree(dcbcfg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -504,13 +506,13 @@ static bool ice_dcb_tc_contig(u8 *prio_table)
|
|||
static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
int ret;
|
||||
|
||||
/* Configure SW DCB default with ETS non-willing */
|
||||
ret = ice_dcb_sw_dflt_cfg(pf, false, true);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Failed to set local DCB config %d\n", ret);
|
||||
dev_err(dev, "Failed to set local DCB config %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -518,7 +520,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
|
|||
dcbcfg->etscfg.willing = 1;
|
||||
ret = ice_set_dcb_cfg(pf->hw.port_info);
|
||||
if (ret)
|
||||
dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n");
|
||||
dev_err(dev, "Failed to set DCB to unwilling\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -531,7 +533,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
|
|||
* calling this function. Reconfiguring DCB based on
|
||||
* local_dcbx_cfg.
|
||||
*/
|
||||
static void ice_pf_dcb_recfg(struct ice_pf *pf)
|
||||
void ice_pf_dcb_recfg(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
|
||||
u8 tc_map = 0;
|
||||
|
@ -539,10 +541,12 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf)
|
|||
|
||||
/* Update each VSI */
|
||||
ice_for_each_vsi(pf, v) {
|
||||
if (!pf->vsi[v])
|
||||
struct ice_vsi *vsi = pf->vsi[v];
|
||||
|
||||
if (!vsi)
|
||||
continue;
|
||||
|
||||
if (pf->vsi[v]->type == ICE_VSI_PF) {
|
||||
if (vsi->type == ICE_VSI_PF) {
|
||||
tc_map = ice_dcb_get_ena_tc(dcbcfg);
|
||||
|
||||
/* If DCBX request non-contiguous TC, then configure
|
||||
|
@ -556,17 +560,16 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf)
|
|||
tc_map = ICE_DFLT_TRAFFIC_CLASS;
|
||||
}
|
||||
|
||||
ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map);
|
||||
ret = ice_vsi_cfg_tc(vsi, tc_map);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Failed to config TC for VSI index: %d\n",
|
||||
pf->vsi[v]->idx);
|
||||
dev_err(ice_pf_to_dev(pf), "Failed to config TC for VSI index: %d\n",
|
||||
vsi->idx);
|
||||
continue;
|
||||
}
|
||||
|
||||
ice_vsi_map_rings_to_vectors(pf->vsi[v]);
|
||||
if (pf->vsi[v]->type == ICE_VSI_PF)
|
||||
ice_dcbnl_set_all(pf->vsi[v]);
|
||||
ice_vsi_map_rings_to_vectors(vsi);
|
||||
if (vsi->type == ICE_VSI_PF)
|
||||
ice_dcbnl_set_all(vsi);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -577,7 +580,7 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf)
|
|||
*/
|
||||
int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
|
||||
{
|
||||
struct device *dev = &pf->pdev->dev;
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
struct ice_port_info *port_info;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
int err;
|
||||
|
@ -586,23 +589,22 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
|
|||
|
||||
err = ice_init_dcb(hw, false);
|
||||
if (err && !port_info->is_sw_lldp) {
|
||||
dev_err(&pf->pdev->dev, "Error initializing DCB %d\n", err);
|
||||
dev_err(dev, "Error initializing DCB %d\n", err);
|
||||
goto dcb_init_err;
|
||||
}
|
||||
|
||||
dev_info(&pf->pdev->dev,
|
||||
dev_info(dev,
|
||||
"DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
|
||||
pf->hw.func_caps.common_cap.maxtc);
|
||||
if (err) {
|
||||
struct ice_vsi *pf_vsi;
|
||||
|
||||
/* FW LLDP is disabled, activate SW DCBX/LLDP mode */
|
||||
dev_info(&pf->pdev->dev,
|
||||
"FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
|
||||
dev_info(dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
|
||||
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
|
||||
err = ice_dcb_sw_dflt_cfg(pf, true, locked);
|
||||
if (err) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
dev_err(dev,
|
||||
"Failed to set local DCB config %d\n", err);
|
||||
err = -EIO;
|
||||
goto dcb_init_err;
|
||||
|
@ -613,8 +615,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
|
|||
*/
|
||||
pf_vsi = ice_get_main_vsi(pf);
|
||||
if (!pf_vsi) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Failed to set local DCB config\n");
|
||||
dev_err(dev, "Failed to set local DCB config\n");
|
||||
err = -EIO;
|
||||
goto dcb_init_err;
|
||||
}
|
||||
|
@ -729,6 +730,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
|
|||
struct ice_rq_event_info *event)
|
||||
{
|
||||
struct ice_aqc_port_ets_elem buf = { 0 };
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
struct ice_aqc_lldp_get_mib *mib;
|
||||
struct ice_dcbx_cfg tmp_dcbx_cfg;
|
||||
bool need_reconfig = false;
|
||||
|
@ -742,8 +744,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
|
|||
return;
|
||||
|
||||
if (pf->dcbx_cap & DCB_CAP_DCBX_HOST) {
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
"MIB Change Event in HOST mode\n");
|
||||
dev_dbg(dev, "MIB Change Event in HOST mode\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -752,21 +753,20 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
|
|||
/* Ignore if event is not for Nearest Bridge */
|
||||
type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
|
||||
ICE_AQ_LLDP_BRID_TYPE_M);
|
||||
dev_dbg(&pf->pdev->dev, "LLDP event MIB bridge type 0x%x\n", type);
|
||||
dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", type);
|
||||
if (type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
|
||||
return;
|
||||
|
||||
/* Check MIB Type and return if event for Remote MIB update */
|
||||
type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
"LLDP event mib type %s\n", type ? "remote" : "local");
|
||||
dev_dbg(dev, "LLDP event mib type %s\n", type ? "remote" : "local");
|
||||
if (type == ICE_AQ_LLDP_MIB_REMOTE) {
|
||||
/* Update the remote cached instance and return */
|
||||
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
|
||||
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
|
||||
&pi->remote_dcbx_cfg);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev, "Failed to get remote DCB config\n");
|
||||
dev_err(dev, "Failed to get remote DCB config\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -780,14 +780,13 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
|
|||
/* Get updated DCBX data from firmware */
|
||||
ret = ice_get_dcb_cfg(pf->hw.port_info);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev, "Failed to get DCB config\n");
|
||||
dev_err(dev, "Failed to get DCB config\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* No change detected in DCBX configs */
|
||||
if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
"No change detected in DCBX configuration.\n");
|
||||
dev_dbg(dev, "No change detected in DCBX configuration.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -799,16 +798,16 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
|
|||
|
||||
/* Enable DCB tagging only when more than one TC */
|
||||
if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) {
|
||||
dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
|
||||
dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
|
||||
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
|
||||
} else {
|
||||
dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
|
||||
dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n");
|
||||
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
|
||||
}
|
||||
|
||||
pf_vsi = ice_get_main_vsi(pf);
|
||||
if (!pf_vsi) {
|
||||
dev_dbg(&pf->pdev->dev, "PF VSI doesn't exist\n");
|
||||
dev_dbg(dev, "PF VSI doesn't exist\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -817,7 +816,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
|
|||
|
||||
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
|
||||
dev_err(dev, "Query Port ETS failed\n");
|
||||
rtnl_unlock();
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
|
|||
u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
|
||||
int
|
||||
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
|
||||
void ice_pf_dcb_recfg(struct ice_pf *pf);
|
||||
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
|
||||
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
|
||||
void ice_update_dcb_stats(struct ice_pf *pf);
|
||||
|
@ -58,7 +59,7 @@ ice_dcb_get_tc(struct ice_vsi __always_unused *vsi,
|
|||
static inline int
|
||||
ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked)
|
||||
{
|
||||
dev_dbg(&pf->pdev->dev, "DCB not supported\n");
|
||||
dev_dbg(ice_pf_to_dev(pf), "DCB not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
|
@ -78,6 +79,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
|
|||
}
|
||||
|
||||
#define ice_update_dcb_stats(pf) do {} while (0)
|
||||
#define ice_pf_dcb_recfg(pf) do {} while (0)
|
||||
#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
|
||||
#define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0)
|
||||
#define ice_set_cgd_num(tlan_ctx, ring) do {} while (0)
|
||||
|
|
|
@ -179,7 +179,7 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
|
|||
else
|
||||
pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
|
||||
|
||||
dev_info(&pf->pdev->dev, "DCBx mode = 0x%x\n", mode);
|
||||
dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
|
||||
return ICE_DCB_HW_CHG_RST;
|
||||
}
|
||||
|
||||
|
@ -297,7 +297,7 @@ ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
|
|||
return;
|
||||
|
||||
*setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
dev_dbg(ice_pf_to_dev(pf),
|
||||
"Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
|
||||
prio, *setting, pi->local_dcbx_cfg.pfc.pfcena);
|
||||
}
|
||||
|
@ -328,7 +328,7 @@ static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set)
|
|||
else
|
||||
new_cfg->pfc.pfcena &= ~BIT(prio);
|
||||
|
||||
dev_dbg(&pf->pdev->dev, "Set PFC config UP:%d set:%d pfcena:0x%x\n",
|
||||
dev_dbg(ice_pf_to_dev(pf), "Set PFC config UP:%d set:%d pfcena:0x%x\n",
|
||||
prio, set, new_cfg->pfc.pfcena);
|
||||
}
|
||||
|
||||
|
@ -359,7 +359,7 @@ static u8 ice_dcbnl_getstate(struct net_device *netdev)
|
|||
|
||||
state = test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
|
||||
|
||||
dev_dbg(&pf->pdev->dev, "DCB enabled state = %d\n", state);
|
||||
dev_dbg(ice_pf_to_dev(pf), "DCB enabled state = %d\n", state);
|
||||
return state;
|
||||
}
|
||||
|
||||
|
@ -418,7 +418,7 @@ ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
|
|||
return;
|
||||
|
||||
*pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
dev_dbg(ice_pf_to_dev(pf),
|
||||
"Get PG config prio=%d tc=%d\n", prio, *pgid);
|
||||
}
|
||||
|
||||
|
@ -479,7 +479,7 @@ ice_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct)
|
|||
return;
|
||||
|
||||
*bw_pct = pi->local_dcbx_cfg.etscfg.tcbwtable[pgid];
|
||||
dev_dbg(&pf->pdev->dev, "Get PG BW config tc=%d bw_pct=%d\n",
|
||||
dev_dbg(ice_pf_to_dev(pf), "Get PG BW config tc=%d bw_pct=%d\n",
|
||||
pgid, *bw_pct);
|
||||
}
|
||||
|
||||
|
@ -597,7 +597,7 @@ static u8 ice_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
|
|||
break;
|
||||
}
|
||||
|
||||
dev_dbg(&pf->pdev->dev, "DCBX Get Capability cap=%d capval=0x%x\n",
|
||||
dev_dbg(ice_pf_to_dev(pf), "DCBX Get Capability cap=%d capval=0x%x\n",
|
||||
capid, *cap);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -248,7 +248,7 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
|
|||
int ret = 0;
|
||||
u16 *buf;
|
||||
|
||||
dev = &pf->pdev->dev;
|
||||
dev = ice_pf_to_dev(pf);
|
||||
|
||||
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
|
||||
|
||||
|
@ -343,6 +343,7 @@ static u64 ice_eeprom_test(struct net_device *netdev)
|
|||
static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
|
||||
{
|
||||
struct ice_pf *pf = (struct ice_pf *)hw->back;
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
static const u32 patterns[] = {
|
||||
0x5A5A5A5A, 0xA5A5A5A5,
|
||||
0x00000000, 0xFFFFFFFF
|
||||
|
@ -358,7 +359,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
|
|||
val = rd32(hw, reg);
|
||||
if (val == pattern)
|
||||
continue;
|
||||
dev_err(&pf->pdev->dev,
|
||||
dev_err(dev,
|
||||
"%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
|
||||
, __func__, reg, pattern, val);
|
||||
return 1;
|
||||
|
@ -367,7 +368,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
|
|||
wr32(hw, reg, orig_val);
|
||||
val = rd32(hw, reg);
|
||||
if (val != orig_val) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
dev_err(dev,
|
||||
"%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
|
||||
, __func__, reg, orig_val, val);
|
||||
return 1;
|
||||
|
@ -507,7 +508,7 @@ static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size)
|
|||
if (!pf)
|
||||
return -EINVAL;
|
||||
|
||||
data = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
|
||||
data = devm_kzalloc(ice_pf_to_dev(pf), size, GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -649,9 +650,11 @@ static u64 ice_loopback_test(struct net_device *netdev)
|
|||
u8 broadcast[ETH_ALEN], ret = 0;
|
||||
int num_frames, valid_frames;
|
||||
LIST_HEAD(tmp_list);
|
||||
struct device *dev;
|
||||
u8 *tx_frame;
|
||||
int i;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
netdev_info(netdev, "loopback test\n");
|
||||
|
||||
test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
|
||||
|
@ -712,12 +715,12 @@ static u64 ice_loopback_test(struct net_device *netdev)
|
|||
ret = 10;
|
||||
|
||||
lbtest_free_frame:
|
||||
devm_kfree(&pf->pdev->dev, tx_frame);
|
||||
devm_kfree(dev, tx_frame);
|
||||
remove_mac_filters:
|
||||
if (ice_remove_mac(&pf->hw, &tmp_list))
|
||||
netdev_err(netdev, "Could not remove MAC filter for the test VSI");
|
||||
free_mac_list:
|
||||
ice_free_fltr_list(&pf->pdev->dev, &tmp_list);
|
||||
ice_free_fltr_list(dev, &tmp_list);
|
||||
lbtest_mac_dis:
|
||||
/* Disable MAC loopback after the test is completed. */
|
||||
if (ice_aq_set_mac_loopback(&pf->hw, false, NULL))
|
||||
|
@ -774,6 +777,9 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
|
|||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
bool if_running = netif_running(netdev);
|
||||
struct ice_pf *pf = np->vsi->back;
|
||||
struct device *dev;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
|
||||
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
|
||||
netdev_info(netdev, "offline testing starting\n");
|
||||
|
@ -781,7 +787,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
|
|||
set_bit(__ICE_TESTING, pf->state);
|
||||
|
||||
if (ice_active_vfs(pf)) {
|
||||
dev_warn(&pf->pdev->dev,
|
||||
dev_warn(dev,
|
||||
"Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
|
||||
data[ICE_ETH_TEST_REG] = 1;
|
||||
data[ICE_ETH_TEST_EEPROM] = 1;
|
||||
|
@ -816,8 +822,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
|
|||
int status = ice_open(netdev);
|
||||
|
||||
if (status) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Could not open device %s, err %d",
|
||||
dev_err(dev, "Could not open device %s, err %d",
|
||||
pf->int_name, status);
|
||||
}
|
||||
}
|
||||
|
@ -962,7 +967,7 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
|
|||
}
|
||||
|
||||
/* Get last SW configuration */
|
||||
caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
|
||||
caps = kzalloc(sizeof(*caps), GFP_KERNEL);
|
||||
if (!caps)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1007,7 +1012,7 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
|
|||
}
|
||||
|
||||
done:
|
||||
devm_kfree(&vsi->back->pdev->dev, caps);
|
||||
kfree(caps);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1083,7 +1088,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
|
|||
break;
|
||||
}
|
||||
|
||||
caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
|
||||
caps = kzalloc(sizeof(*caps), GFP_KERNEL);
|
||||
if (!caps)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1110,7 +1115,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
|
|||
fecparam->fec |= ETHTOOL_FEC_OFF;
|
||||
|
||||
done:
|
||||
devm_kfree(&vsi->back->pdev->dev, caps);
|
||||
kfree(caps);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1155,12 +1160,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
|
|||
DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS);
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct device *dev;
|
||||
int ret = 0;
|
||||
u32 i;
|
||||
|
||||
if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
|
||||
|
||||
bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS);
|
||||
|
@ -1189,7 +1196,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
|
|||
* events to respond to.
|
||||
*/
|
||||
if (status)
|
||||
dev_info(&pf->pdev->dev,
|
||||
dev_info(dev,
|
||||
"Failed to unreg for LLDP events\n");
|
||||
|
||||
/* The AQ call to stop the FW LLDP agent will generate
|
||||
|
@ -1197,15 +1204,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
|
|||
*/
|
||||
status = ice_aq_stop_lldp(&pf->hw, true, true, NULL);
|
||||
if (status)
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Fail to stop LLDP agent\n");
|
||||
dev_warn(dev, "Fail to stop LLDP agent\n");
|
||||
/* Use case for having the FW LLDP agent stopped
|
||||
* will likely not need DCB, so failure to init is
|
||||
* not a concern of ethtool
|
||||
*/
|
||||
status = ice_init_pf_dcb(pf, true);
|
||||
if (status)
|
||||
dev_warn(&pf->pdev->dev, "Fail to init DCB\n");
|
||||
dev_warn(dev, "Fail to init DCB\n");
|
||||
} else {
|
||||
enum ice_status status;
|
||||
bool dcbx_agent_status;
|
||||
|
@ -1215,8 +1221,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
|
|||
*/
|
||||
status = ice_aq_start_lldp(&pf->hw, true, NULL);
|
||||
if (status)
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Fail to start LLDP Agent\n");
|
||||
dev_warn(dev, "Fail to start LLDP Agent\n");
|
||||
|
||||
/* AQ command to start FW DCBX agent will fail if
|
||||
* the agent is already started
|
||||
|
@ -1225,10 +1230,9 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
|
|||
&dcbx_agent_status,
|
||||
NULL);
|
||||
if (status)
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
"Failed to start FW DCBX\n");
|
||||
dev_dbg(dev, "Failed to start FW DCBX\n");
|
||||
|
||||
dev_info(&pf->pdev->dev, "FW DCBX agent is %s\n",
|
||||
dev_info(dev, "FW DCBX agent is %s\n",
|
||||
dcbx_agent_status ? "ACTIVE" : "DISABLED");
|
||||
|
||||
/* Failure to configure MIB change or init DCB is not
|
||||
|
@ -1238,7 +1242,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
|
|||
*/
|
||||
status = ice_init_pf_dcb(pf, true);
|
||||
if (status)
|
||||
dev_dbg(&pf->pdev->dev, "Fail to init DCB\n");
|
||||
dev_dbg(dev, "Fail to init DCB\n");
|
||||
|
||||
/* Remove rule to direct LLDP packets to default VSI.
|
||||
* The FW LLDP engine will now be consuming them.
|
||||
|
@ -1248,7 +1252,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
|
|||
/* Register for MIB change events */
|
||||
status = ice_cfg_lldp_mib_change(&pf->hw, true);
|
||||
if (status)
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
dev_dbg(dev,
|
||||
"Fail to enable MIB change events\n");
|
||||
}
|
||||
}
|
||||
|
@ -2141,7 +2145,7 @@ ice_get_link_ksettings(struct net_device *netdev,
|
|||
/* flow control is symmetric and always supported */
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
|
||||
|
||||
caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
|
||||
caps = kzalloc(sizeof(*caps), GFP_KERNEL);
|
||||
if (!caps)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2199,7 +2203,7 @@ ice_get_link_ksettings(struct net_device *netdev,
|
|||
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
|
||||
|
||||
done:
|
||||
devm_kfree(&vsi->back->pdev->dev, caps);
|
||||
kfree(caps);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -2428,8 +2432,7 @@ ice_set_link_ksettings(struct net_device *netdev,
|
|||
usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX);
|
||||
}
|
||||
|
||||
abilities = devm_kzalloc(&pf->pdev->dev, sizeof(*abilities),
|
||||
GFP_KERNEL);
|
||||
abilities = kzalloc(sizeof(*abilities), GFP_KERNEL);
|
||||
if (!abilities)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2521,7 +2524,7 @@ ice_set_link_ksettings(struct net_device *netdev,
|
|||
}
|
||||
|
||||
done:
|
||||
devm_kfree(&pf->pdev->dev, abilities);
|
||||
kfree(abilities);
|
||||
clear_bit(__ICE_CFG_BUSY, pf->state);
|
||||
|
||||
return err;
|
||||
|
@ -2649,8 +2652,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
|
|||
netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
|
||||
vsi->tx_rings[0]->count, new_tx_cnt);
|
||||
|
||||
tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_txq,
|
||||
sizeof(*tx_rings), GFP_KERNEL);
|
||||
tx_rings = kcalloc(vsi->num_txq, sizeof(*tx_rings), GFP_KERNEL);
|
||||
if (!tx_rings) {
|
||||
err = -ENOMEM;
|
||||
goto done;
|
||||
|
@ -2666,7 +2668,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
|
|||
if (err) {
|
||||
while (i--)
|
||||
ice_clean_tx_ring(&tx_rings[i]);
|
||||
devm_kfree(&pf->pdev->dev, tx_rings);
|
||||
kfree(tx_rings);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
@ -2678,8 +2680,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
|
|||
netdev_info(netdev, "Changing XDP descriptor count from %d to %d\n",
|
||||
vsi->xdp_rings[0]->count, new_tx_cnt);
|
||||
|
||||
xdp_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_xdp_txq,
|
||||
sizeof(*xdp_rings), GFP_KERNEL);
|
||||
xdp_rings = kcalloc(vsi->num_xdp_txq, sizeof(*xdp_rings), GFP_KERNEL);
|
||||
if (!xdp_rings) {
|
||||
err = -ENOMEM;
|
||||
goto free_tx;
|
||||
|
@ -2695,7 +2696,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
|
|||
if (err) {
|
||||
while (i--)
|
||||
ice_clean_tx_ring(&xdp_rings[i]);
|
||||
devm_kfree(&pf->pdev->dev, xdp_rings);
|
||||
kfree(xdp_rings);
|
||||
goto free_tx;
|
||||
}
|
||||
ice_set_ring_xdp(&xdp_rings[i]);
|
||||
|
@ -2709,8 +2710,7 @@ process_rx:
|
|||
netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
|
||||
vsi->rx_rings[0]->count, new_rx_cnt);
|
||||
|
||||
rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_rxq,
|
||||
sizeof(*rx_rings), GFP_KERNEL);
|
||||
rx_rings = kcalloc(vsi->num_rxq, sizeof(*rx_rings), GFP_KERNEL);
|
||||
if (!rx_rings) {
|
||||
err = -ENOMEM;
|
||||
goto done;
|
||||
|
@ -2740,7 +2740,7 @@ rx_unwind:
|
|||
i--;
|
||||
ice_free_rx_ring(&rx_rings[i]);
|
||||
}
|
||||
devm_kfree(&pf->pdev->dev, rx_rings);
|
||||
kfree(rx_rings);
|
||||
err = -ENOMEM;
|
||||
goto free_tx;
|
||||
}
|
||||
|
@ -2758,7 +2758,7 @@ process_link:
|
|||
ice_free_tx_ring(vsi->tx_rings[i]);
|
||||
*vsi->tx_rings[i] = tx_rings[i];
|
||||
}
|
||||
devm_kfree(&pf->pdev->dev, tx_rings);
|
||||
kfree(tx_rings);
|
||||
}
|
||||
|
||||
if (rx_rings) {
|
||||
|
@ -2776,7 +2776,7 @@ process_link:
|
|||
rx_rings[i].next_to_alloc = 0;
|
||||
*vsi->rx_rings[i] = rx_rings[i];
|
||||
}
|
||||
devm_kfree(&pf->pdev->dev, rx_rings);
|
||||
kfree(rx_rings);
|
||||
}
|
||||
|
||||
if (xdp_rings) {
|
||||
|
@ -2784,7 +2784,7 @@ process_link:
|
|||
ice_free_tx_ring(vsi->xdp_rings[i]);
|
||||
*vsi->xdp_rings[i] = xdp_rings[i];
|
||||
}
|
||||
devm_kfree(&pf->pdev->dev, xdp_rings);
|
||||
kfree(xdp_rings);
|
||||
}
|
||||
|
||||
vsi->num_tx_desc = new_tx_cnt;
|
||||
|
@ -2798,7 +2798,7 @@ free_tx:
|
|||
if (tx_rings) {
|
||||
ice_for_each_txq(vsi, i)
|
||||
ice_free_tx_ring(&tx_rings[i]);
|
||||
devm_kfree(&pf->pdev->dev, tx_rings);
|
||||
kfree(tx_rings);
|
||||
}
|
||||
|
||||
done:
|
||||
|
@ -2846,7 +2846,6 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
|
|||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
struct ice_port_info *pi = np->vsi->port_info;
|
||||
struct ice_aqc_get_phy_caps_data *pcaps;
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
struct ice_dcbx_cfg *dcbx_cfg;
|
||||
enum ice_status status;
|
||||
|
||||
|
@ -2856,8 +2855,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
|
|||
|
||||
dcbx_cfg = &pi->local_dcbx_cfg;
|
||||
|
||||
pcaps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*pcaps),
|
||||
GFP_KERNEL);
|
||||
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
|
||||
if (!pcaps)
|
||||
return;
|
||||
|
||||
|
@ -2880,7 +2878,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
|
|||
pause->rx_pause = 1;
|
||||
|
||||
out:
|
||||
devm_kfree(&vsi->back->pdev->dev, pcaps);
|
||||
kfree(pcaps);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3061,7 +3059,7 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
|
||||
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
|
||||
if (!lut)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3074,7 +3072,7 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
|
|||
indir[i] = (u32)(lut[i]);
|
||||
|
||||
out:
|
||||
devm_kfree(&pf->pdev->dev, lut);
|
||||
kfree(lut);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3095,8 +3093,10 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
|
|||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct device *dev;
|
||||
u8 *seed = NULL;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
@ -3109,8 +3109,7 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
|
|||
if (key) {
|
||||
if (!vsi->rss_hkey_user) {
|
||||
vsi->rss_hkey_user =
|
||||
devm_kzalloc(&pf->pdev->dev,
|
||||
ICE_VSIQF_HKEY_ARRAY_SIZE,
|
||||
devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE,
|
||||
GFP_KERNEL);
|
||||
if (!vsi->rss_hkey_user)
|
||||
return -ENOMEM;
|
||||
|
@ -3120,8 +3119,7 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
|
|||
}
|
||||
|
||||
if (!vsi->rss_lut_user) {
|
||||
vsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev,
|
||||
vsi->rss_table_size,
|
||||
vsi->rss_lut_user = devm_kzalloc(dev, vsi->rss_table_size,
|
||||
GFP_KERNEL);
|
||||
if (!vsi->rss_lut_user)
|
||||
return -ENOMEM;
|
||||
|
@ -3144,6 +3142,188 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_max_txq - return the maximum number of Tx queues for in a PF
|
||||
* @pf: PF structure
|
||||
*/
|
||||
static int ice_get_max_txq(struct ice_pf *pf)
|
||||
{
|
||||
return min_t(int, num_online_cpus(),
|
||||
pf->hw.func_caps.common_cap.num_txq);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_max_rxq - return the maximum number of Rx queues for in a PF
|
||||
* @pf: PF structure
|
||||
*/
|
||||
static int ice_get_max_rxq(struct ice_pf *pf)
|
||||
{
|
||||
return min_t(int, num_online_cpus(),
|
||||
pf->hw.func_caps.common_cap.num_rxq);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_combined_cnt - return the current number of combined channels
|
||||
* @vsi: PF VSI pointer
|
||||
*
|
||||
* Go through all queue vectors and count ones that have both Rx and Tx ring
|
||||
* attached
|
||||
*/
|
||||
static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
|
||||
{
|
||||
u32 combined = 0;
|
||||
int q_idx;
|
||||
|
||||
ice_for_each_q_vector(vsi, q_idx) {
|
||||
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
|
||||
|
||||
if (q_vector->rx.ring && q_vector->tx.ring)
|
||||
combined++;
|
||||
}
|
||||
|
||||
return combined;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_channels - get the current and max supported channels
|
||||
* @dev: network interface device structure
|
||||
* @ch: ethtool channel data structure
|
||||
*/
|
||||
static void
|
||||
ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
|
||||
{
|
||||
struct ice_netdev_priv *np = netdev_priv(dev);
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
|
||||
/* check to see if VSI is active */
|
||||
if (test_bit(__ICE_DOWN, vsi->state))
|
||||
return;
|
||||
|
||||
/* report maximum channels */
|
||||
ch->max_rx = ice_get_max_rxq(pf);
|
||||
ch->max_tx = ice_get_max_txq(pf);
|
||||
ch->max_combined = min_t(int, ch->max_rx, ch->max_tx);
|
||||
|
||||
/* report current channels */
|
||||
ch->combined_count = ice_get_combined_cnt(vsi);
|
||||
ch->rx_count = vsi->num_rxq - ch->combined_count;
|
||||
ch->tx_count = vsi->num_txq - ch->combined_count;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size
|
||||
* @vsi: VSI to reconfigure RSS LUT on
|
||||
* @req_rss_size: requested range of queue numbers for hashing
|
||||
*
|
||||
* Set the VSI's RSS parameters, configure the RSS LUT based on these.
|
||||
*/
|
||||
static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
enum ice_status status;
|
||||
struct device *dev;
|
||||
struct ice_hw *hw;
|
||||
int err = 0;
|
||||
u8 *lut;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
hw = &pf->hw;
|
||||
|
||||
if (!req_rss_size)
|
||||
return -EINVAL;
|
||||
|
||||
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
|
||||
if (!lut)
|
||||
return -ENOMEM;
|
||||
|
||||
/* set RSS LUT parameters */
|
||||
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
|
||||
vsi->rss_size = 1;
|
||||
} else {
|
||||
struct ice_hw_common_caps *caps = &hw->func_caps.common_cap;
|
||||
|
||||
vsi->rss_size = min_t(int, req_rss_size,
|
||||
BIT(caps->rss_table_entry_width));
|
||||
}
|
||||
|
||||
/* create/set RSS LUT */
|
||||
ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
|
||||
status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut,
|
||||
vsi->rss_table_size);
|
||||
if (status) {
|
||||
dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n",
|
||||
status, hw->adminq.rq_last_status);
|
||||
err = -EIO;
|
||||
}
|
||||
|
||||
kfree(lut);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_set_channels - set the number channels
|
||||
* @dev: network interface device structure
|
||||
* @ch: ethtool channel data structure
|
||||
*/
|
||||
static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
|
||||
{
|
||||
struct ice_netdev_priv *np = netdev_priv(dev);
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
int new_rx = 0, new_tx = 0;
|
||||
u32 curr_combined;
|
||||
|
||||
/* do not support changing channels in Safe Mode */
|
||||
if (ice_is_safe_mode(pf)) {
|
||||
netdev_err(dev, "Changing channel in Safe Mode is not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
/* do not support changing other_count */
|
||||
if (ch->other_count)
|
||||
return -EINVAL;
|
||||
|
||||
curr_combined = ice_get_combined_cnt(vsi);
|
||||
|
||||
/* these checks are for cases where user didn't specify a particular
|
||||
* value on cmd line but we get non-zero value anyway via
|
||||
* get_channels(); look at ethtool.c in ethtool repository (the user
|
||||
* space part), particularly, do_schannels() routine
|
||||
*/
|
||||
if (ch->rx_count == vsi->num_rxq - curr_combined)
|
||||
ch->rx_count = 0;
|
||||
if (ch->tx_count == vsi->num_txq - curr_combined)
|
||||
ch->tx_count = 0;
|
||||
if (ch->combined_count == curr_combined)
|
||||
ch->combined_count = 0;
|
||||
|
||||
if (!(ch->combined_count || (ch->rx_count && ch->tx_count))) {
|
||||
netdev_err(dev, "Please specify at least 1 Rx and 1 Tx channel\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
new_rx = ch->combined_count + ch->rx_count;
|
||||
new_tx = ch->combined_count + ch->tx_count;
|
||||
|
||||
if (new_rx > ice_get_max_rxq(pf)) {
|
||||
netdev_err(dev, "Maximum allowed Rx channels is %d\n",
|
||||
ice_get_max_rxq(pf));
|
||||
return -EINVAL;
|
||||
}
|
||||
if (new_tx > ice_get_max_txq(pf)) {
|
||||
netdev_err(dev, "Maximum allowed Tx channels is %d\n",
|
||||
ice_get_max_txq(pf));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ice_vsi_recfg_qs(vsi, new_rx, new_tx);
|
||||
|
||||
if (new_rx && !netif_is_rxfh_configured(dev))
|
||||
return ice_vsi_set_dflt_rss_lut(vsi, new_rx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum ice_container_type {
|
||||
ICE_RX_CONTAINER,
|
||||
ICE_TX_CONTAINER,
|
||||
|
@ -3183,7 +3363,7 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
|
|||
ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
|
||||
break;
|
||||
default:
|
||||
dev_dbg(&pf->pdev->dev, "Invalid c_type %d\n", c_type);
|
||||
dev_dbg(ice_pf_to_dev(pf), "Invalid c_type %d\n", c_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -3323,7 +3503,8 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
|
|||
|
||||
break;
|
||||
default:
|
||||
dev_dbg(&pf->pdev->dev, "Invalid container type %d\n", c_type);
|
||||
dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n",
|
||||
c_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -3420,10 +3601,17 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
|
|||
struct ice_vsi *vsi = np->vsi;
|
||||
|
||||
if (q_num < 0) {
|
||||
int i;
|
||||
int v_idx;
|
||||
|
||||
ice_for_each_q_vector(vsi, i) {
|
||||
if (ice_set_q_coalesce(vsi, ec, i))
|
||||
ice_for_each_q_vector(vsi, v_idx) {
|
||||
/* In some cases if DCB is configured the num_[rx|tx]q
|
||||
* can be less than vsi->num_q_vectors. This check
|
||||
* accounts for that so we don't report a false failure
|
||||
*/
|
||||
if (v_idx >= vsi->num_rxq && v_idx >= vsi->num_txq)
|
||||
goto set_complete;
|
||||
|
||||
if (ice_set_q_coalesce(vsi, ec, v_idx))
|
||||
return -EINVAL;
|
||||
}
|
||||
goto set_complete;
|
||||
|
@ -3625,6 +3813,8 @@ static const struct ethtool_ops ice_ethtool_ops = {
|
|||
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
|
||||
.get_rxfh = ice_get_rxfh,
|
||||
.set_rxfh = ice_set_rxfh,
|
||||
.get_channels = ice_get_channels,
|
||||
.set_channels = ice_set_channels,
|
||||
.get_ts_info = ethtool_op_get_ts_info,
|
||||
.get_per_queue_coalesce = ice_get_per_q_coalesce,
|
||||
.set_per_queue_coalesce = ice_set_per_q_coalesce,
|
||||
|
@ -3650,6 +3840,7 @@ static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
|
|||
.get_ringparam = ice_get_ringparam,
|
||||
.set_ringparam = ice_set_ringparam,
|
||||
.nway_reset = ice_nway_reset,
|
||||
.get_channels = ice_get_channels,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -52,26 +52,29 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
|
|||
static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct device *dev;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
|
||||
/* allocate memory for both Tx and Rx ring pointers */
|
||||
vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
|
||||
vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
|
||||
sizeof(*vsi->tx_rings), GFP_KERNEL);
|
||||
if (!vsi->tx_rings)
|
||||
return -ENOMEM;
|
||||
|
||||
vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
|
||||
vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
|
||||
sizeof(*vsi->rx_rings), GFP_KERNEL);
|
||||
if (!vsi->rx_rings)
|
||||
goto err_rings;
|
||||
|
||||
/* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
|
||||
vsi->txq_map = devm_kcalloc(&pf->pdev->dev, (2 * vsi->alloc_txq),
|
||||
vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq),
|
||||
sizeof(*vsi->txq_map), GFP_KERNEL);
|
||||
|
||||
if (!vsi->txq_map)
|
||||
goto err_txq_map;
|
||||
|
||||
vsi->rxq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
|
||||
vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
|
||||
sizeof(*vsi->rxq_map), GFP_KERNEL);
|
||||
if (!vsi->rxq_map)
|
||||
goto err_rxq_map;
|
||||
|
@ -81,7 +84,7 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
|
|||
return 0;
|
||||
|
||||
/* allocate memory for q_vector pointers */
|
||||
vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors,
|
||||
vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
|
||||
sizeof(*vsi->q_vectors), GFP_KERNEL);
|
||||
if (!vsi->q_vectors)
|
||||
goto err_vectors;
|
||||
|
@ -89,13 +92,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
|
|||
return 0;
|
||||
|
||||
err_vectors:
|
||||
devm_kfree(&pf->pdev->dev, vsi->rxq_map);
|
||||
devm_kfree(dev, vsi->rxq_map);
|
||||
err_rxq_map:
|
||||
devm_kfree(&pf->pdev->dev, vsi->txq_map);
|
||||
devm_kfree(dev, vsi->txq_map);
|
||||
err_txq_map:
|
||||
devm_kfree(&pf->pdev->dev, vsi->rx_rings);
|
||||
devm_kfree(dev, vsi->rx_rings);
|
||||
err_rings:
|
||||
devm_kfree(&pf->pdev->dev, vsi->tx_rings);
|
||||
devm_kfree(dev, vsi->tx_rings);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -139,15 +142,24 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
|
|||
case ICE_VSI_PF:
|
||||
vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
|
||||
num_online_cpus());
|
||||
if (vsi->req_txq) {
|
||||
vsi->alloc_txq = vsi->req_txq;
|
||||
vsi->num_txq = vsi->req_txq;
|
||||
}
|
||||
|
||||
pf->num_lan_tx = vsi->alloc_txq;
|
||||
|
||||
/* only 1 Rx queue unless RSS is enabled */
|
||||
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
|
||||
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
|
||||
vsi->alloc_rxq = 1;
|
||||
else
|
||||
} else {
|
||||
vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
|
||||
num_online_cpus());
|
||||
if (vsi->req_rxq) {
|
||||
vsi->alloc_rxq = vsi->req_rxq;
|
||||
vsi->num_rxq = vsi->req_rxq;
|
||||
}
|
||||
}
|
||||
|
||||
pf->num_lan_rx = vsi->alloc_rxq;
|
||||
|
||||
|
@ -169,7 +181,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
|
|||
vsi->alloc_rxq = 1;
|
||||
break;
|
||||
default:
|
||||
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
|
||||
dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -215,7 +227,7 @@ void ice_vsi_delete(struct ice_vsi *vsi)
|
|||
struct ice_vsi_ctx *ctxt;
|
||||
enum ice_status status;
|
||||
|
||||
ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
|
||||
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
|
||||
if (!ctxt)
|
||||
return;
|
||||
|
||||
|
@ -227,10 +239,10 @@ void ice_vsi_delete(struct ice_vsi *vsi)
|
|||
|
||||
status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
|
||||
if (status)
|
||||
dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
|
||||
vsi->vsi_num);
|
||||
dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
|
||||
vsi->vsi_num, status);
|
||||
|
||||
devm_kfree(&pf->pdev->dev, ctxt);
|
||||
kfree(ctxt);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -240,26 +252,29 @@ void ice_vsi_delete(struct ice_vsi *vsi)
|
|||
static void ice_vsi_free_arrays(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct device *dev;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
|
||||
/* free the ring and vector containers */
|
||||
if (vsi->q_vectors) {
|
||||
devm_kfree(&pf->pdev->dev, vsi->q_vectors);
|
||||
devm_kfree(dev, vsi->q_vectors);
|
||||
vsi->q_vectors = NULL;
|
||||
}
|
||||
if (vsi->tx_rings) {
|
||||
devm_kfree(&pf->pdev->dev, vsi->tx_rings);
|
||||
devm_kfree(dev, vsi->tx_rings);
|
||||
vsi->tx_rings = NULL;
|
||||
}
|
||||
if (vsi->rx_rings) {
|
||||
devm_kfree(&pf->pdev->dev, vsi->rx_rings);
|
||||
devm_kfree(dev, vsi->rx_rings);
|
||||
vsi->rx_rings = NULL;
|
||||
}
|
||||
if (vsi->txq_map) {
|
||||
devm_kfree(&pf->pdev->dev, vsi->txq_map);
|
||||
devm_kfree(dev, vsi->txq_map);
|
||||
vsi->txq_map = NULL;
|
||||
}
|
||||
if (vsi->rxq_map) {
|
||||
devm_kfree(&pf->pdev->dev, vsi->rxq_map);
|
||||
devm_kfree(dev, vsi->rxq_map);
|
||||
vsi->rxq_map = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -276,6 +291,7 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
|
|||
int ice_vsi_clear(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_pf *pf = NULL;
|
||||
struct device *dev;
|
||||
|
||||
if (!vsi)
|
||||
return 0;
|
||||
|
@ -284,10 +300,10 @@ int ice_vsi_clear(struct ice_vsi *vsi)
|
|||
return -EINVAL;
|
||||
|
||||
pf = vsi->back;
|
||||
dev = ice_pf_to_dev(pf);
|
||||
|
||||
if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
|
||||
dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
|
||||
vsi->idx);
|
||||
dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -300,7 +316,7 @@ int ice_vsi_clear(struct ice_vsi *vsi)
|
|||
|
||||
ice_vsi_free_arrays(vsi);
|
||||
mutex_unlock(&pf->sw_mutex);
|
||||
devm_kfree(&pf->pdev->dev, vsi);
|
||||
devm_kfree(dev, vsi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -333,6 +349,7 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
|
|||
static struct ice_vsi *
|
||||
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
struct ice_vsi *vsi = NULL;
|
||||
|
||||
/* Need to protect the allocation of the VSIs at the PF level */
|
||||
|
@ -343,11 +360,11 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
|
|||
* is available to be populated
|
||||
*/
|
||||
if (pf->next_vsi == ICE_NO_VSI) {
|
||||
dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
|
||||
dev_dbg(dev, "out of VSI slots!\n");
|
||||
goto unlock_pf;
|
||||
}
|
||||
|
||||
vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
|
||||
vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
|
||||
if (!vsi)
|
||||
goto unlock_pf;
|
||||
|
||||
|
@ -379,7 +396,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
|
|||
goto err_rings;
|
||||
break;
|
||||
default:
|
||||
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
|
||||
dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
|
||||
goto unlock_pf;
|
||||
}
|
||||
|
||||
|
@ -392,7 +409,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
|
|||
goto unlock_pf;
|
||||
|
||||
err_rings:
|
||||
devm_kfree(&pf->pdev->dev, vsi);
|
||||
devm_kfree(dev, vsi);
|
||||
vsi = NULL;
|
||||
unlock_pf:
|
||||
mutex_unlock(&pf->sw_mutex);
|
||||
|
@ -481,14 +498,15 @@ bool ice_is_safe_mode(struct ice_pf *pf)
|
|||
*/
|
||||
static void ice_rss_clean(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_pf *pf;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct device *dev;
|
||||
|
||||
pf = vsi->back;
|
||||
dev = ice_pf_to_dev(pf);
|
||||
|
||||
if (vsi->rss_hkey_user)
|
||||
devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
|
||||
devm_kfree(dev, vsi->rss_hkey_user);
|
||||
if (vsi->rss_lut_user)
|
||||
devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
|
||||
devm_kfree(dev, vsi->rss_lut_user);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -526,7 +544,7 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
|
|||
case ICE_VSI_LB:
|
||||
break;
|
||||
default:
|
||||
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
|
||||
dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n",
|
||||
vsi->type);
|
||||
break;
|
||||
}
|
||||
|
@ -630,7 +648,9 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
|
|||
else
|
||||
max_rss = ICE_MAX_SMALL_RSS_QS;
|
||||
qcount_rx = min_t(int, rx_numq_tc, max_rss);
|
||||
qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
|
||||
if (!vsi->req_rxq)
|
||||
qcount_rx = min_t(int, qcount_rx,
|
||||
vsi->rss_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -702,9 +722,11 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
|
|||
static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
|
||||
{
|
||||
u8 lut_type, hash_type;
|
||||
struct device *dev;
|
||||
struct ice_pf *pf;
|
||||
|
||||
pf = vsi->back;
|
||||
dev = ice_pf_to_dev(pf);
|
||||
|
||||
switch (vsi->type) {
|
||||
case ICE_VSI_PF:
|
||||
|
@ -718,11 +740,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
|
|||
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
|
||||
break;
|
||||
case ICE_VSI_LB:
|
||||
dev_dbg(&pf->pdev->dev, "Unsupported VSI type %s\n",
|
||||
dev_dbg(dev, "Unsupported VSI type %s\n",
|
||||
ice_vsi_type_str(vsi->type));
|
||||
return;
|
||||
default:
|
||||
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
|
||||
dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -735,18 +757,21 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
|
|||
/**
|
||||
* ice_vsi_init - Create and initialize a VSI
|
||||
* @vsi: the VSI being configured
|
||||
* @init_vsi: is this call creating a VSI
|
||||
*
|
||||
* This initializes a VSI context depending on the VSI type to be added and
|
||||
* passes it down to the add_vsi aq command to create a new VSI.
|
||||
*/
|
||||
static int ice_vsi_init(struct ice_vsi *vsi)
|
||||
static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
struct ice_vsi_ctx *ctxt;
|
||||
struct device *dev;
|
||||
int ret = 0;
|
||||
|
||||
ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
|
||||
dev = ice_pf_to_dev(pf);
|
||||
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
|
||||
if (!ctxt)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -763,7 +788,8 @@ static int ice_vsi_init(struct ice_vsi *vsi)
|
|||
ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
|
||||
break;
|
||||
default:
|
||||
return -ENODEV;
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ice_set_dflt_vsi_ctx(ctxt);
|
||||
|
@ -772,11 +798,24 @@ static int ice_vsi_init(struct ice_vsi *vsi)
|
|||
ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
|
||||
|
||||
/* Set LUT type and HASH type if RSS is enabled */
|
||||
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
|
||||
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
|
||||
ice_set_rss_vsi_ctx(ctxt, vsi);
|
||||
/* if updating VSI context, make sure to set valid_section:
|
||||
* to indicate which section of VSI context being updated
|
||||
*/
|
||||
if (!init_vsi)
|
||||
ctxt->info.valid_sections |=
|
||||
cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
|
||||
}
|
||||
|
||||
ctxt->info.sw_id = vsi->port_info->sw_id;
|
||||
ice_vsi_setup_q_map(vsi, ctxt);
|
||||
if (!init_vsi) /* means VSI being updated */
|
||||
/* must to indicate which section of VSI context are
|
||||
* being modified
|
||||
*/
|
||||
ctxt->info.valid_sections |=
|
||||
cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
|
||||
|
||||
/* Enable MAC Antispoof with new VSI being initialized or updated */
|
||||
if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
|
||||
|
@ -793,11 +832,20 @@ static int ice_vsi_init(struct ice_vsi *vsi)
|
|||
cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
|
||||
}
|
||||
|
||||
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Add VSI failed, err %d\n", ret);
|
||||
return -EIO;
|
||||
if (init_vsi) {
|
||||
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
|
||||
if (ret) {
|
||||
dev_err(dev, "Add VSI failed, err %d\n", ret);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
|
||||
if (ret) {
|
||||
dev_err(dev, "Update VSI failed, err %d\n", ret);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* keep context for update VSI operations */
|
||||
|
@ -806,7 +854,8 @@ static int ice_vsi_init(struct ice_vsi *vsi)
|
|||
/* record VSI number returned */
|
||||
vsi->vsi_num = ctxt->vsi_num;
|
||||
|
||||
devm_kfree(&pf->pdev->dev, ctxt);
|
||||
out:
|
||||
kfree(ctxt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -823,14 +872,16 @@ static int ice_vsi_init(struct ice_vsi *vsi)
|
|||
static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct device *dev;
|
||||
u16 num_q_vectors;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
/* SRIOV doesn't grab irq_tracker entries for each VSI */
|
||||
if (vsi->type == ICE_VSI_VF)
|
||||
return 0;
|
||||
|
||||
if (vsi->base_vector) {
|
||||
dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
|
||||
dev_dbg(dev, "VSI %d has non-zero base vector %d\n",
|
||||
vsi->vsi_num, vsi->base_vector);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
@ -840,7 +891,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
|
|||
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
|
||||
vsi->idx);
|
||||
if (vsi->base_vector < 0) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
dev_err(dev,
|
||||
"Failed to get tracking for %d vectors for VSI %d, err=%d\n",
|
||||
num_q_vectors, vsi->vsi_num, vsi->base_vector);
|
||||
return -ENOENT;
|
||||
|
@ -883,8 +934,10 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
|
|||
static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct device *dev;
|
||||
int i;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
/* Allocate Tx rings */
|
||||
for (i = 0; i < vsi->alloc_txq; i++) {
|
||||
struct ice_ring *ring;
|
||||
|
@ -899,7 +952,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
|
|||
ring->reg_idx = vsi->txq_map[i];
|
||||
ring->ring_active = false;
|
||||
ring->vsi = vsi;
|
||||
ring->dev = &pf->pdev->dev;
|
||||
ring->dev = dev;
|
||||
ring->count = vsi->num_tx_desc;
|
||||
vsi->tx_rings[i] = ring;
|
||||
}
|
||||
|
@ -918,7 +971,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
|
|||
ring->ring_active = false;
|
||||
ring->vsi = vsi;
|
||||
ring->netdev = vsi->netdev;
|
||||
ring->dev = &pf->pdev->dev;
|
||||
ring->dev = dev;
|
||||
ring->count = vsi->num_rx_desc;
|
||||
vsi->rx_rings[i] = ring;
|
||||
}
|
||||
|
@ -944,8 +997,7 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
|
|||
int err = 0;
|
||||
u8 *lut;
|
||||
|
||||
lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size,
|
||||
GFP_KERNEL);
|
||||
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
|
||||
if (!lut)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -958,7 +1010,7 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
|
|||
}
|
||||
|
||||
err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
|
||||
devm_kfree(&vsi->back->pdev->dev, lut);
|
||||
kfree(lut);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -971,12 +1023,14 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
|
|||
struct ice_aqc_get_set_rss_keys *key;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
enum ice_status status;
|
||||
struct device *dev;
|
||||
int err = 0;
|
||||
u8 *lut;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
|
||||
|
||||
lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
|
||||
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
|
||||
if (!lut)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -989,13 +1043,12 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
|
|||
vsi->rss_table_size);
|
||||
|
||||
if (status) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"set_rss_lut failed, error %d\n", status);
|
||||
dev_err(dev, "set_rss_lut failed, error %d\n", status);
|
||||
err = -EIO;
|
||||
goto ice_vsi_cfg_rss_exit;
|
||||
}
|
||||
|
||||
key = devm_kzalloc(&pf->pdev->dev, sizeof(*key), GFP_KERNEL);
|
||||
key = kzalloc(sizeof(*key), GFP_KERNEL);
|
||||
if (!key) {
|
||||
err = -ENOMEM;
|
||||
goto ice_vsi_cfg_rss_exit;
|
||||
|
@ -1012,14 +1065,13 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
|
|||
status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
|
||||
|
||||
if (status) {
|
||||
dev_err(&pf->pdev->dev, "set_rss_key failed, error %d\n",
|
||||
status);
|
||||
dev_err(dev, "set_rss_key failed, error %d\n", status);
|
||||
err = -EIO;
|
||||
}
|
||||
|
||||
devm_kfree(&pf->pdev->dev, key);
|
||||
kfree(key);
|
||||
ice_vsi_cfg_rss_exit:
|
||||
devm_kfree(&pf->pdev->dev, lut);
|
||||
kfree(lut);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1039,7 +1091,7 @@ int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
|
|||
struct ice_fltr_list_entry *tmp;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
|
||||
tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
|
||||
tmp = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*tmp), GFP_ATOMIC);
|
||||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1131,9 +1183,11 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
|
|||
struct ice_pf *pf = vsi->back;
|
||||
LIST_HEAD(tmp_add_list);
|
||||
enum ice_status status;
|
||||
struct device *dev;
|
||||
int err = 0;
|
||||
|
||||
tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
|
||||
dev = ice_pf_to_dev(pf);
|
||||
tmp = devm_kzalloc(dev, sizeof(*tmp), GFP_KERNEL);
|
||||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1150,11 +1204,11 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
|
|||
status = ice_add_vlan(&pf->hw, &tmp_add_list);
|
||||
if (status) {
|
||||
err = -ENODEV;
|
||||
dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
|
||||
vid, vsi->vsi_num);
|
||||
dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid,
|
||||
vsi->vsi_num);
|
||||
}
|
||||
|
||||
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
|
||||
ice_free_fltr_list(dev, &tmp_add_list);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1171,9 +1225,11 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
|
|||
struct ice_pf *pf = vsi->back;
|
||||
LIST_HEAD(tmp_add_list);
|
||||
enum ice_status status;
|
||||
struct device *dev;
|
||||
int err = 0;
|
||||
|
||||
list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
|
||||
dev = ice_pf_to_dev(pf);
|
||||
list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
|
||||
if (!list)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1189,17 +1245,17 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
|
|||
|
||||
status = ice_remove_vlan(&pf->hw, &tmp_add_list);
|
||||
if (status == ICE_ERR_DOES_NOT_EXIST) {
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
dev_dbg(dev,
|
||||
"Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
|
||||
vid, vsi->vsi_num, status);
|
||||
} else if (status) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
dev_err(dev,
|
||||
"Error removing VLAN %d on vsi %i error: %d\n",
|
||||
vid, vsi->vsi_num, status);
|
||||
err = -EIO;
|
||||
}
|
||||
|
||||
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
|
||||
ice_free_fltr_list(dev, &tmp_add_list);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1397,13 +1453,12 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
|
|||
*/
|
||||
int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
|
||||
{
|
||||
struct device *dev = &vsi->back->pdev->dev;
|
||||
struct ice_hw *hw = &vsi->back->hw;
|
||||
struct ice_vsi_ctx *ctxt;
|
||||
enum ice_status status;
|
||||
int ret = 0;
|
||||
|
||||
ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
|
||||
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
|
||||
if (!ctxt)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1421,7 +1476,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
|
|||
|
||||
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
|
||||
if (status) {
|
||||
dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
|
||||
dev_err(&vsi->back->pdev->dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
|
||||
status, hw->adminq.sq_last_status);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
|
@ -1429,7 +1484,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
|
|||
|
||||
vsi->info.vlan_flags = ctxt->info.vlan_flags;
|
||||
out:
|
||||
devm_kfree(dev, ctxt);
|
||||
kfree(ctxt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1440,13 +1495,12 @@ out:
|
|||
*/
|
||||
int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
|
||||
{
|
||||
struct device *dev = &vsi->back->pdev->dev;
|
||||
struct ice_hw *hw = &vsi->back->hw;
|
||||
struct ice_vsi_ctx *ctxt;
|
||||
enum ice_status status;
|
||||
int ret = 0;
|
||||
|
||||
ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
|
||||
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
|
||||
if (!ctxt)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1468,7 +1522,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
|
|||
|
||||
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
|
||||
if (status) {
|
||||
dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
|
||||
dev_err(&vsi->back->pdev->dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
|
||||
ena, status, hw->adminq.sq_last_status);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
|
@ -1476,7 +1530,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
|
|||
|
||||
vsi->info.vlan_flags = ctxt->info.vlan_flags;
|
||||
out:
|
||||
devm_kfree(dev, ctxt);
|
||||
kfree(ctxt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1569,7 +1623,6 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
|
|||
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
|
||||
{
|
||||
struct ice_vsi_ctx *ctxt;
|
||||
struct device *dev;
|
||||
struct ice_pf *pf;
|
||||
int status;
|
||||
|
||||
|
@ -1577,8 +1630,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
|
|||
return -EINVAL;
|
||||
|
||||
pf = vsi->back;
|
||||
dev = &pf->pdev->dev;
|
||||
ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
|
||||
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
|
||||
if (!ctxt)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1612,11 +1664,11 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
|
|||
vsi->info.sec_flags = ctxt->info.sec_flags;
|
||||
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
|
||||
|
||||
devm_kfree(dev, ctxt);
|
||||
kfree(ctxt);
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
devm_kfree(dev, ctxt);
|
||||
kfree(ctxt);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
@ -1685,8 +1737,10 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
|
|||
struct ice_pf *pf = vsi->back;
|
||||
LIST_HEAD(tmp_add_list);
|
||||
enum ice_status status;
|
||||
struct device *dev;
|
||||
|
||||
list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
|
||||
dev = ice_pf_to_dev(pf);
|
||||
list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
|
||||
if (!list)
|
||||
return;
|
||||
|
||||
|
@ -1706,11 +1760,11 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
|
|||
status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
|
||||
|
||||
if (status)
|
||||
dev_err(&pf->pdev->dev,
|
||||
dev_err(dev,
|
||||
"Failure Adding or Removing Ethertype on VSI %i error: %d\n",
|
||||
vsi->vsi_num, status);
|
||||
|
||||
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
|
||||
ice_free_fltr_list(dev, &tmp_add_list);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1725,8 +1779,10 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
|
|||
struct ice_pf *pf = vsi->back;
|
||||
LIST_HEAD(tmp_add_list);
|
||||
enum ice_status status;
|
||||
struct device *dev;
|
||||
|
||||
list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
|
||||
dev = ice_pf_to_dev(pf);
|
||||
list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
|
||||
if (!list)
|
||||
return;
|
||||
|
||||
|
@ -1753,12 +1809,11 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
|
|||
status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
|
||||
|
||||
if (status)
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Fail %s %s LLDP rule on VSI %i error: %d\n",
|
||||
dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
|
||||
create ? "adding" : "removing", tx ? "TX" : "RX",
|
||||
vsi->vsi_num, status);
|
||||
|
||||
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
|
||||
ice_free_fltr_list(dev, &tmp_add_list);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1780,7 +1835,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
|
|||
enum ice_vsi_type type, u16 vf_id)
|
||||
{
|
||||
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
|
||||
struct device *dev = &pf->pdev->dev;
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
enum ice_status status;
|
||||
struct ice_vsi *vsi;
|
||||
int ret, i;
|
||||
|
@ -1816,7 +1871,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
|
|||
ice_vsi_set_tc_cfg(vsi);
|
||||
|
||||
/* create the VSI */
|
||||
ret = ice_vsi_init(vsi);
|
||||
ret = ice_vsi_init(vsi, true);
|
||||
if (ret)
|
||||
goto unroll_get_qs;
|
||||
|
||||
|
@ -1889,8 +1944,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
|
|||
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
|
||||
max_txqs);
|
||||
if (status) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"VSI %d failed lan queue config, error %d\n",
|
||||
dev_err(dev, "VSI %d failed lan queue config, error %d\n",
|
||||
vsi->vsi_num, status);
|
||||
goto unroll_vector_base;
|
||||
}
|
||||
|
@ -2002,8 +2056,7 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
|
|||
/* clear the affinity_mask in the IRQ descriptor */
|
||||
irq_set_affinity_hint(irq_num, NULL);
|
||||
synchronize_irq(irq_num);
|
||||
devm_free_irq(&pf->pdev->dev, irq_num,
|
||||
vsi->q_vectors[i]);
|
||||
devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2189,7 +2242,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
|
|||
return -EINVAL;
|
||||
|
||||
if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
dev_err(ice_pf_to_dev(pf),
|
||||
"param err: needed=%d, num_entries = %d id=0x%04x\n",
|
||||
needed, res->num_entries, id);
|
||||
return -EINVAL;
|
||||
|
@ -2351,10 +2404,11 @@ int ice_vsi_release(struct ice_vsi *vsi)
|
|||
/**
|
||||
* ice_vsi_rebuild - Rebuild VSI after reset
|
||||
* @vsi: VSI to be rebuild
|
||||
* @init_vsi: is this an initialization or a reconfigure of the VSI
|
||||
*
|
||||
* Returns 0 on success and negative value on failure
|
||||
*/
|
||||
int ice_vsi_rebuild(struct ice_vsi *vsi)
|
||||
int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
|
||||
{
|
||||
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
|
||||
struct ice_vf *vf = NULL;
|
||||
|
@ -2406,7 +2460,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
|
|||
ice_vsi_set_tc_cfg(vsi);
|
||||
|
||||
/* Initialize VSI struct elements and create VSI in FW */
|
||||
ret = ice_vsi_init(vsi);
|
||||
ret = ice_vsi_init(vsi, init_vsi);
|
||||
if (ret < 0)
|
||||
goto err_vsi;
|
||||
|
||||
|
@ -2471,10 +2525,15 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
|
|||
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
|
||||
max_txqs);
|
||||
if (status) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
dev_err(ice_pf_to_dev(pf),
|
||||
"VSI %d failed lan queue config, error %d\n",
|
||||
vsi->vsi_num, status);
|
||||
goto err_vectors;
|
||||
if (init_vsi) {
|
||||
ret = -EIO;
|
||||
goto err_vectors;
|
||||
} else {
|
||||
return ice_schedule_reset(pf, ICE_RESET_PFR);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
|
@ -2534,9 +2593,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
|
|||
struct ice_vsi_ctx *ctx;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
enum ice_status status;
|
||||
struct device *dev;
|
||||
int i, ret = 0;
|
||||
u8 num_tc = 0;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
|
||||
ice_for_each_traffic_class(i) {
|
||||
/* build bitmap of enabled TCs */
|
||||
if (ena_tc & BIT(i))
|
||||
|
@ -2548,7 +2610,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
|
|||
vsi->tc_cfg.ena_tc = ena_tc;
|
||||
vsi->tc_cfg.numtc = num_tc;
|
||||
|
||||
ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2561,7 +2623,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
|
|||
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
|
||||
status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
|
||||
if (status) {
|
||||
dev_info(&pf->pdev->dev, "Failed VSI Update\n");
|
||||
dev_info(dev, "Failed VSI Update\n");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
@ -2570,8 +2632,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
|
|||
max_txqs);
|
||||
|
||||
if (status) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"VSI %d failed TC config, error %d\n",
|
||||
dev_err(dev, "VSI %d failed TC config, error %d\n",
|
||||
vsi->vsi_num, status);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
|
@ -2581,7 +2642,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
|
|||
|
||||
ice_vsi_cfg_netdev_tc(vsi, ena_tc);
|
||||
out:
|
||||
devm_kfree(&pf->pdev->dev, ctx);
|
||||
kfree(ctx);
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_DCB */
|
||||
|
|
|
@ -73,7 +73,7 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
|
|||
int
|
||||
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id);
|
||||
|
||||
int ice_vsi_rebuild(struct ice_vsi *vsi);
|
||||
int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi);
|
||||
|
||||
bool ice_is_reset_in_progress(unsigned long *state);
|
||||
|
||||
|
|
|
@ -44,6 +44,7 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
|
|||
static struct workqueue_struct *ice_wq;
|
||||
static const struct net_device_ops ice_netdev_safe_mode_ops;
|
||||
static const struct net_device_ops ice_netdev_ops;
|
||||
static int ice_vsi_open(struct ice_vsi *vsi);
|
||||
|
||||
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
|
||||
|
||||
|
@ -161,7 +162,7 @@ unregister:
|
|||
* had an error
|
||||
*/
|
||||
if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
dev_err(ice_pf_to_dev(pf),
|
||||
"Could not add MAC filters error %d. Unregistering device\n",
|
||||
status);
|
||||
unregister_netdev(vsi->netdev);
|
||||
|
@ -495,7 +496,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
|
|||
*/
|
||||
static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
|
||||
{
|
||||
struct device *dev = &pf->pdev->dev;
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
|
||||
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
|
||||
|
@ -724,7 +725,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
|
|||
an = "False";
|
||||
|
||||
/* Get FEC mode requested based on PHY caps last SW configuration */
|
||||
caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
|
||||
caps = kzalloc(sizeof(*caps), GFP_KERNEL);
|
||||
if (!caps) {
|
||||
fec_req = "Unknown";
|
||||
goto done;
|
||||
|
@ -744,7 +745,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
|
|||
else
|
||||
fec_req = "NONE";
|
||||
|
||||
devm_kfree(&vsi->back->pdev->dev, caps);
|
||||
kfree(caps);
|
||||
|
||||
done:
|
||||
netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Autoneg: %s, Flow Control: %s\n",
|
||||
|
@ -792,6 +793,7 @@ static int
|
|||
ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
|
||||
u16 link_speed)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
struct ice_phy_info *phy_info;
|
||||
struct ice_vsi *vsi;
|
||||
u16 old_link_speed;
|
||||
|
@ -809,7 +811,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
|
|||
*/
|
||||
result = ice_update_link_info(pi);
|
||||
if (result)
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
dev_dbg(dev,
|
||||
"Failed to update link status and re-enable link events for port %d\n",
|
||||
pi->lport);
|
||||
|
||||
|
@ -828,7 +830,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
|
|||
|
||||
result = ice_aq_set_link_restart_an(pi, false, NULL);
|
||||
if (result) {
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
dev_dbg(dev,
|
||||
"Failed to set link down, VSI %d error %d\n",
|
||||
vsi->vsi_num, result);
|
||||
return result;
|
||||
|
@ -924,7 +926,7 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
|
|||
!!(link_data->link_info & ICE_AQ_LINK_UP),
|
||||
le16_to_cpu(link_data->link_speed));
|
||||
if (status)
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
dev_dbg(ice_pf_to_dev(pf),
|
||||
"Could not process link event, error %d\n", status);
|
||||
|
||||
return status;
|
||||
|
@ -937,6 +939,7 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
|
|||
*/
|
||||
static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
struct ice_rq_event_info event;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
struct ice_ctl_q_info *cq;
|
||||
|
@ -958,8 +961,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
|
|||
qtype = "Mailbox";
|
||||
break;
|
||||
default:
|
||||
dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
|
||||
q_type);
|
||||
dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -971,15 +973,15 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
|
|||
PF_FW_ARQLEN_ARQCRIT_M)) {
|
||||
oldval = val;
|
||||
if (val & PF_FW_ARQLEN_ARQVFE_M)
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
"%s Receive Queue VF Error detected\n", qtype);
|
||||
dev_dbg(dev, "%s Receive Queue VF Error detected\n",
|
||||
qtype);
|
||||
if (val & PF_FW_ARQLEN_ARQOVFL_M) {
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
dev_dbg(dev,
|
||||
"%s Receive Queue Overflow Error detected\n",
|
||||
qtype);
|
||||
}
|
||||
if (val & PF_FW_ARQLEN_ARQCRIT_M)
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
dev_dbg(dev,
|
||||
"%s Receive Queue Critical Error detected\n",
|
||||
qtype);
|
||||
val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
|
||||
|
@ -993,16 +995,14 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
|
|||
PF_FW_ATQLEN_ATQCRIT_M)) {
|
||||
oldval = val;
|
||||
if (val & PF_FW_ATQLEN_ATQVFE_M)
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
dev_dbg(dev,
|
||||
"%s Send Queue VF Error detected\n", qtype);
|
||||
if (val & PF_FW_ATQLEN_ATQOVFL_M) {
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
"%s Send Queue Overflow Error detected\n",
|
||||
dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
|
||||
qtype);
|
||||
}
|
||||
if (val & PF_FW_ATQLEN_ATQCRIT_M)
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
"%s Send Queue Critical Error detected\n",
|
||||
dev_dbg(dev, "%s Send Queue Critical Error detected\n",
|
||||
qtype);
|
||||
val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
|
||||
PF_FW_ATQLEN_ATQCRIT_M);
|
||||
|
@ -1011,8 +1011,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
|
|||
}
|
||||
|
||||
event.buf_len = cq->rq_buf_size;
|
||||
event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len,
|
||||
GFP_KERNEL);
|
||||
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
|
||||
if (!event.msg_buf)
|
||||
return 0;
|
||||
|
||||
|
@ -1024,8 +1023,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
|
|||
if (ret == ICE_ERR_AQ_NO_WORK)
|
||||
break;
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"%s Receive Queue event error %d\n", qtype,
|
||||
dev_err(dev, "%s Receive Queue event error %d\n", qtype,
|
||||
ret);
|
||||
break;
|
||||
}
|
||||
|
@ -1035,8 +1033,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
|
|||
switch (opcode) {
|
||||
case ice_aqc_opc_get_link_status:
|
||||
if (ice_handle_link_event(pf, &event))
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Could not handle link event\n");
|
||||
dev_err(dev, "Could not handle link event\n");
|
||||
break;
|
||||
case ice_mbx_opc_send_msg_to_pf:
|
||||
ice_vc_process_vf_msg(pf, &event);
|
||||
|
@ -1048,14 +1045,14 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
|
|||
ice_dcb_process_lldp_set_mib_change(pf, &event);
|
||||
break;
|
||||
default:
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
dev_dbg(dev,
|
||||
"%s Receive Queue unknown event 0x%04x ignored\n",
|
||||
qtype, opcode);
|
||||
break;
|
||||
}
|
||||
} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
|
||||
|
||||
devm_kfree(&pf->pdev->dev, event.msg_buf);
|
||||
kfree(event.msg_buf);
|
||||
|
||||
return pending && (i == ICE_DFLT_IRQ_WORK);
|
||||
}
|
||||
|
@ -1199,6 +1196,7 @@ static void ice_service_timer(struct timer_list *t)
|
|||
*/
|
||||
static void ice_handle_mdd_event(struct ice_pf *pf)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
bool mdd_detected = false;
|
||||
u32 reg;
|
||||
|
@ -1220,7 +1218,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
|
|||
GL_MDET_TX_PQM_QNUM_S);
|
||||
|
||||
if (netif_msg_tx_err(pf))
|
||||
dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
|
||||
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
|
||||
event, queue, pf_num, vf_num);
|
||||
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
|
||||
mdd_detected = true;
|
||||
|
@ -1238,7 +1236,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
|
|||
GL_MDET_TX_TCLAN_QNUM_S);
|
||||
|
||||
if (netif_msg_rx_err(pf))
|
||||
dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
|
||||
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
|
||||
event, queue, pf_num, vf_num);
|
||||
wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
|
||||
mdd_detected = true;
|
||||
|
@ -1256,7 +1254,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
|
|||
GL_MDET_RX_QNUM_S);
|
||||
|
||||
if (netif_msg_rx_err(pf))
|
||||
dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
|
||||
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
|
||||
event, queue, pf_num, vf_num);
|
||||
wr32(hw, GL_MDET_RX, 0xffffffff);
|
||||
mdd_detected = true;
|
||||
|
@ -1268,21 +1266,21 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
|
|||
reg = rd32(hw, PF_MDET_TX_PQM);
|
||||
if (reg & PF_MDET_TX_PQM_VALID_M) {
|
||||
wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
|
||||
dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
|
||||
dev_info(dev, "TX driver issue detected, PF reset issued\n");
|
||||
pf_mdd_detected = true;
|
||||
}
|
||||
|
||||
reg = rd32(hw, PF_MDET_TX_TCLAN);
|
||||
if (reg & PF_MDET_TX_TCLAN_VALID_M) {
|
||||
wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
|
||||
dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
|
||||
dev_info(dev, "TX driver issue detected, PF reset issued\n");
|
||||
pf_mdd_detected = true;
|
||||
}
|
||||
|
||||
reg = rd32(hw, PF_MDET_RX);
|
||||
if (reg & PF_MDET_RX_VALID_M) {
|
||||
wr32(hw, PF_MDET_RX, 0xFFFF);
|
||||
dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
|
||||
dev_info(dev, "RX driver issue detected, PF reset issued\n");
|
||||
pf_mdd_detected = true;
|
||||
}
|
||||
/* Queue belongs to the PF initiate a reset */
|
||||
|
@ -1302,7 +1300,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
|
|||
if (reg & VP_MDET_TX_PQM_VALID_M) {
|
||||
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
|
||||
vf_mdd_detected = true;
|
||||
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
|
||||
dev_info(dev, "TX driver issue detected on VF %d\n",
|
||||
i);
|
||||
}
|
||||
|
||||
|
@ -1310,7 +1308,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
|
|||
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
|
||||
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
|
||||
vf_mdd_detected = true;
|
||||
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
|
||||
dev_info(dev, "TX driver issue detected on VF %d\n",
|
||||
i);
|
||||
}
|
||||
|
||||
|
@ -1318,7 +1316,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
|
|||
if (reg & VP_MDET_TX_TDPU_VALID_M) {
|
||||
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
|
||||
vf_mdd_detected = true;
|
||||
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
|
||||
dev_info(dev, "TX driver issue detected on VF %d\n",
|
||||
i);
|
||||
}
|
||||
|
||||
|
@ -1326,7 +1324,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
|
|||
if (reg & VP_MDET_RX_VALID_M) {
|
||||
wr32(hw, VP_MDET_RX(i), 0xFFFF);
|
||||
vf_mdd_detected = true;
|
||||
dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
|
||||
dev_info(dev, "RX driver issue detected on VF %d\n",
|
||||
i);
|
||||
}
|
||||
|
||||
|
@ -1334,7 +1332,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
|
|||
vf->num_mdd_events++;
|
||||
if (vf->num_mdd_events &&
|
||||
vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
|
||||
dev_info(&pf->pdev->dev,
|
||||
dev_info(dev,
|
||||
"VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
|
||||
i, vf->num_mdd_events);
|
||||
}
|
||||
|
@ -1370,7 +1368,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
|
|||
|
||||
pi = vsi->port_info;
|
||||
|
||||
pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
|
||||
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
|
||||
if (!pcaps)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1389,7 +1387,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
|
|||
link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
|
||||
goto out;
|
||||
|
||||
cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
|
||||
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
|
||||
if (!cfg) {
|
||||
retcode = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -1414,9 +1412,9 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
|
|||
retcode = -EIO;
|
||||
}
|
||||
|
||||
devm_kfree(dev, cfg);
|
||||
kfree(cfg);
|
||||
out:
|
||||
devm_kfree(dev, pcaps);
|
||||
kfree(pcaps);
|
||||
return retcode;
|
||||
}
|
||||
|
||||
|
@ -1527,6 +1525,44 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
|
|||
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_schedule_reset - schedule a reset
|
||||
* @pf: board private structure
|
||||
* @reset: reset being requested
|
||||
*/
|
||||
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
|
||||
/* bail out if earlier reset has failed */
|
||||
if (test_bit(__ICE_RESET_FAILED, pf->state)) {
|
||||
dev_dbg(dev, "earlier reset has failed\n");
|
||||
return -EIO;
|
||||
}
|
||||
/* bail if reset/recovery already in progress */
|
||||
if (ice_is_reset_in_progress(pf->state)) {
|
||||
dev_dbg(dev, "Reset already in progress\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
switch (reset) {
|
||||
case ICE_RESET_PFR:
|
||||
set_bit(__ICE_PFR_REQ, pf->state);
|
||||
break;
|
||||
case ICE_RESET_CORER:
|
||||
set_bit(__ICE_CORER_REQ, pf->state);
|
||||
break;
|
||||
case ICE_RESET_GLOBR:
|
||||
set_bit(__ICE_GLOBR_REQ, pf->state);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ice_service_task_schedule(pf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_irq_affinity_notify - Callback for affinity changes
|
||||
* @notify: context as to what irq was changed
|
||||
|
@ -1581,11 +1617,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
|
|||
int q_vectors = vsi->num_q_vectors;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
int base = vsi->base_vector;
|
||||
struct device *dev;
|
||||
int rx_int_idx = 0;
|
||||
int tx_int_idx = 0;
|
||||
int vector, err;
|
||||
int irq_num;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
for (vector = 0; vector < q_vectors; vector++) {
|
||||
struct ice_q_vector *q_vector = vsi->q_vectors[vector];
|
||||
|
||||
|
@ -1605,8 +1643,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
|
|||
/* skip this unused q_vector */
|
||||
continue;
|
||||
}
|
||||
err = devm_request_irq(&pf->pdev->dev, irq_num,
|
||||
vsi->irq_handler, 0,
|
||||
err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0,
|
||||
q_vector->name, q_vector);
|
||||
if (err) {
|
||||
netdev_err(vsi->netdev,
|
||||
|
@ -1632,7 +1669,7 @@ free_q_irqs:
|
|||
irq_num = pf->msix_entries[base + vector].vector,
|
||||
irq_set_affinity_notifier(irq_num, NULL);
|
||||
irq_set_affinity_hint(irq_num, NULL);
|
||||
devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]);
|
||||
devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
@ -1721,9 +1758,11 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
|
|||
.mapping_mode = ICE_VSI_MAP_CONTIG
|
||||
};
|
||||
enum ice_status status;
|
||||
struct device *dev;
|
||||
int i, v_idx;
|
||||
|
||||
vsi->xdp_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_xdp_txq,
|
||||
dev = ice_pf_to_dev(pf);
|
||||
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
|
||||
sizeof(*vsi->xdp_rings), GFP_KERNEL);
|
||||
if (!vsi->xdp_rings)
|
||||
return -ENOMEM;
|
||||
|
@ -1770,8 +1809,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
|
|||
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
|
||||
max_txqs);
|
||||
if (status) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Failed VSI LAN queue config for XDP, error:%d\n",
|
||||
dev_err(dev, "Failed VSI LAN queue config for XDP, error:%d\n",
|
||||
status);
|
||||
goto clear_xdp_rings;
|
||||
}
|
||||
|
@ -1793,7 +1831,7 @@ err_map_xdp:
|
|||
}
|
||||
mutex_unlock(&pf->avail_q_mutex);
|
||||
|
||||
devm_kfree(&pf->pdev->dev, vsi->xdp_rings);
|
||||
devm_kfree(dev, vsi->xdp_rings);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -1846,7 +1884,7 @@ free_qmap:
|
|||
vsi->xdp_rings[i] = NULL;
|
||||
}
|
||||
|
||||
devm_kfree(&pf->pdev->dev, vsi->xdp_rings);
|
||||
devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
|
||||
vsi->xdp_rings = NULL;
|
||||
|
||||
if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
|
||||
|
@ -1993,8 +2031,10 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
|
|||
struct ice_pf *pf = (struct ice_pf *)data;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
struct device *dev;
|
||||
u32 oicr, ena_mask;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
|
||||
set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
|
||||
|
||||
|
@ -2030,8 +2070,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
|
|||
else if (reset == ICE_RESET_EMPR)
|
||||
pf->empr_count++;
|
||||
else
|
||||
dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n",
|
||||
reset);
|
||||
dev_dbg(dev, "Invalid reset type %d\n", reset);
|
||||
|
||||
/* If a reset cycle isn't already in progress, we set a bit in
|
||||
* pf->state so that the service task can start a reset/rebuild.
|
||||
|
@ -2065,8 +2104,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
|
|||
|
||||
if (oicr & PFINT_OICR_HMC_ERR_M) {
|
||||
ena_mask &= ~PFINT_OICR_HMC_ERR_M;
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
"HMC Error interrupt - info 0x%x, data 0x%x\n",
|
||||
dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n",
|
||||
rd32(hw, PFHMC_ERRORINFO),
|
||||
rd32(hw, PFHMC_ERRORDATA));
|
||||
}
|
||||
|
@ -2074,8 +2112,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
|
|||
/* Report any remaining unexpected interrupts */
|
||||
oicr &= ena_mask;
|
||||
if (oicr) {
|
||||
dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
|
||||
oicr);
|
||||
dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
|
||||
/* If a critical error is pending there is no choice but to
|
||||
* reset the device.
|
||||
*/
|
||||
|
@ -2133,7 +2170,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
|
|||
|
||||
if (pf->msix_entries) {
|
||||
synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
|
||||
devm_free_irq(&pf->pdev->dev,
|
||||
devm_free_irq(ice_pf_to_dev(pf),
|
||||
pf->msix_entries[pf->oicr_idx].vector, pf);
|
||||
}
|
||||
|
||||
|
@ -2177,13 +2214,13 @@ static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
|
|||
*/
|
||||
static int ice_req_irq_msix_misc(struct ice_pf *pf)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
int oicr_idx, err = 0;
|
||||
|
||||
if (!pf->int_name[0])
|
||||
snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
|
||||
dev_driver_string(&pf->pdev->dev),
|
||||
dev_name(&pf->pdev->dev));
|
||||
dev_driver_string(dev), dev_name(dev));
|
||||
|
||||
/* Do not request IRQ but do enable OICR interrupt since settings are
|
||||
* lost during reset. Note that this function is called only during
|
||||
|
@ -2200,12 +2237,10 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
|
|||
pf->num_avail_sw_msix -= 1;
|
||||
pf->oicr_idx = oicr_idx;
|
||||
|
||||
err = devm_request_irq(&pf->pdev->dev,
|
||||
pf->msix_entries[pf->oicr_idx].vector,
|
||||
err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
|
||||
ice_misc_intr, 0, pf->int_name, pf);
|
||||
if (err) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"devm_request_irq for %s failed: %d\n",
|
||||
dev_err(dev, "devm_request_irq for %s failed: %d\n",
|
||||
pf->int_name, err);
|
||||
ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
|
||||
pf->num_avail_sw_msix += 1;
|
||||
|
@ -2338,7 +2373,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
|
|||
ice_set_ops(netdev);
|
||||
|
||||
if (vsi->type == ICE_VSI_PF) {
|
||||
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
|
||||
SET_NETDEV_DEV(netdev, ice_pf_to_dev(pf));
|
||||
ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
|
||||
ether_addr_copy(netdev->dev_addr, mac_addr);
|
||||
ether_addr_copy(netdev->perm_addr, mac_addr);
|
||||
|
@ -2665,7 +2700,7 @@ static int ice_init_pf(struct ice_pf *pf)
|
|||
|
||||
pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
|
||||
if (!pf->avail_rxqs) {
|
||||
devm_kfree(&pf->pdev->dev, pf->avail_txqs);
|
||||
devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
|
||||
pf->avail_txqs = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -2682,6 +2717,7 @@ static int ice_init_pf(struct ice_pf *pf)
|
|||
*/
|
||||
static int ice_ena_msix_range(struct ice_pf *pf)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
int v_left, v_actual, v_budget = 0;
|
||||
int needed, err, i;
|
||||
|
||||
|
@ -2702,7 +2738,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
|
|||
v_budget += needed;
|
||||
v_left -= needed;
|
||||
|
||||
pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
|
||||
pf->msix_entries = devm_kcalloc(dev, v_budget,
|
||||
sizeof(*pf->msix_entries), GFP_KERNEL);
|
||||
|
||||
if (!pf->msix_entries) {
|
||||
|
@ -2718,13 +2754,13 @@ static int ice_ena_msix_range(struct ice_pf *pf)
|
|||
ICE_MIN_MSIX, v_budget);
|
||||
|
||||
if (v_actual < 0) {
|
||||
dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
|
||||
dev_err(dev, "unable to reserve MSI-X vectors\n");
|
||||
err = v_actual;
|
||||
goto msix_err;
|
||||
}
|
||||
|
||||
if (v_actual < v_budget) {
|
||||
dev_warn(&pf->pdev->dev,
|
||||
dev_warn(dev,
|
||||
"not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
|
||||
v_budget, v_actual);
|
||||
/* 2 vectors for LAN (traffic + OICR) */
|
||||
|
@ -2743,11 +2779,11 @@ static int ice_ena_msix_range(struct ice_pf *pf)
|
|||
return v_actual;
|
||||
|
||||
msix_err:
|
||||
devm_kfree(&pf->pdev->dev, pf->msix_entries);
|
||||
devm_kfree(dev, pf->msix_entries);
|
||||
goto exit_err;
|
||||
|
||||
no_hw_vecs_left_err:
|
||||
dev_err(&pf->pdev->dev,
|
||||
dev_err(dev,
|
||||
"not enough device MSI-X vectors. requested = %d, available = %d\n",
|
||||
needed, v_left);
|
||||
err = -ERANGE;
|
||||
|
@ -2763,7 +2799,7 @@ exit_err:
|
|||
static void ice_dis_msix(struct ice_pf *pf)
|
||||
{
|
||||
pci_disable_msix(pf->pdev);
|
||||
devm_kfree(&pf->pdev->dev, pf->msix_entries);
|
||||
devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
|
||||
pf->msix_entries = NULL;
|
||||
}
|
||||
|
||||
|
@ -2776,7 +2812,7 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
|
|||
ice_dis_msix(pf);
|
||||
|
||||
if (pf->irq_tracker) {
|
||||
devm_kfree(&pf->pdev->dev, pf->irq_tracker);
|
||||
devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
|
||||
pf->irq_tracker = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -2796,7 +2832,7 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
|
|||
|
||||
/* set up vector assignment tracking */
|
||||
pf->irq_tracker =
|
||||
devm_kzalloc(&pf->pdev->dev, sizeof(*pf->irq_tracker) +
|
||||
devm_kzalloc(ice_pf_to_dev(pf), sizeof(*pf->irq_tracker) +
|
||||
(sizeof(u16) * vectors), GFP_KERNEL);
|
||||
if (!pf->irq_tracker) {
|
||||
ice_dis_msix(pf);
|
||||
|
@ -2811,6 +2847,52 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_recfg_qs - Change the number of queues on a VSI
|
||||
* @vsi: VSI being changed
|
||||
* @new_rx: new number of Rx queues
|
||||
* @new_tx: new number of Tx queues
|
||||
*
|
||||
* Only change the number of queues if new_tx, or new_rx is non-0.
|
||||
*
|
||||
* Returns 0 on success.
|
||||
*/
|
||||
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
int err = 0, timeout = 50;
|
||||
|
||||
if (!new_rx && !new_tx)
|
||||
return -EINVAL;
|
||||
|
||||
while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
|
||||
timeout--;
|
||||
if (!timeout)
|
||||
return -EBUSY;
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
if (new_tx)
|
||||
vsi->req_txq = new_tx;
|
||||
if (new_rx)
|
||||
vsi->req_rxq = new_rx;
|
||||
|
||||
/* set for the next time the netdev is started */
|
||||
if (!netif_running(vsi->netdev)) {
|
||||
ice_vsi_rebuild(vsi, false);
|
||||
dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
ice_vsi_close(vsi);
|
||||
ice_vsi_rebuild(vsi, false);
|
||||
ice_pf_dcb_recfg(pf);
|
||||
ice_vsi_open(vsi);
|
||||
done:
|
||||
clear_bit(__ICE_CFG_BUSY, pf->state);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_log_pkg_init - log result of DDP package load
|
||||
* @hw: pointer to hardware info
|
||||
|
@ -2820,7 +2902,7 @@ static void
|
|||
ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
|
||||
{
|
||||
struct ice_pf *pf = (struct ice_pf *)hw->back;
|
||||
struct device *dev = &pf->pdev->dev;
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
|
||||
switch (*status) {
|
||||
case ICE_SUCCESS:
|
||||
|
@ -2939,7 +3021,7 @@ static void
|
|||
ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
|
||||
{
|
||||
enum ice_status status = ICE_ERR_PARAM;
|
||||
struct device *dev = &pf->pdev->dev;
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
|
||||
/* Load DDP Package */
|
||||
|
@ -2979,7 +3061,7 @@ ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
|
|||
static void ice_verify_cacheline_size(struct ice_pf *pf)
|
||||
{
|
||||
if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
|
||||
dev_warn(&pf->pdev->dev,
|
||||
dev_warn(ice_pf_to_dev(pf),
|
||||
"%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
|
||||
ICE_CACHE_LINE_BYTES);
|
||||
}
|
||||
|
@ -3049,7 +3131,7 @@ static void ice_request_fw(struct ice_pf *pf)
|
|||
{
|
||||
char *opt_fw_filename = ice_get_opt_fw_name(pf);
|
||||
const struct firmware *firmware = NULL;
|
||||
struct device *dev = &pf->pdev->dev;
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
int err = 0;
|
||||
|
||||
/* optional device-specific DDP (if present) overrides the default DDP
|
||||
|
@ -3240,7 +3322,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
|
|||
|
||||
err = ice_setup_pf_sw(pf);
|
||||
if (err) {
|
||||
dev_err(dev, "probe failed due to setup PF switch:%d\n", err);
|
||||
dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
|
||||
goto err_alloc_sw_unroll;
|
||||
}
|
||||
|
||||
|
@ -3288,7 +3370,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
|
|||
err_alloc_sw_unroll:
|
||||
set_bit(__ICE_SERVICE_DIS, pf->state);
|
||||
set_bit(__ICE_DOWN, pf->state);
|
||||
devm_kfree(&pf->pdev->dev, pf->first_sw);
|
||||
devm_kfree(dev, pf->first_sw);
|
||||
err_msix_misc_unroll:
|
||||
ice_free_irq_msix_misc(pf);
|
||||
err_init_interrupt_unroll:
|
||||
|
@ -4410,7 +4492,7 @@ static int ice_vsi_open(struct ice_vsi *vsi)
|
|||
goto err_setup_rx;
|
||||
|
||||
snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
|
||||
dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
|
||||
dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
|
||||
err = ice_vsi_req_irq_msix(vsi, int_name);
|
||||
if (err)
|
||||
goto err_setup_rx;
|
||||
|
@ -4459,7 +4541,7 @@ static void ice_vsi_release_all(struct ice_pf *pf)
|
|||
|
||||
err = ice_vsi_release(pf->vsi[i]);
|
||||
if (err)
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
dev_dbg(ice_pf_to_dev(pf),
|
||||
"Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
|
||||
i, err, pf->vsi[i]->vsi_num);
|
||||
}
|
||||
|
@ -4474,6 +4556,7 @@ static void ice_vsi_release_all(struct ice_pf *pf)
|
|||
*/
|
||||
static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
enum ice_status status;
|
||||
int i, err;
|
||||
|
||||
|
@ -4484,9 +4567,9 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
|
|||
continue;
|
||||
|
||||
/* rebuild the VSI */
|
||||
err = ice_vsi_rebuild(vsi);
|
||||
err = ice_vsi_rebuild(vsi, true);
|
||||
if (err) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
dev_err(dev,
|
||||
"rebuild VSI failed, err %d, VSI index %d, type %s\n",
|
||||
err, vsi->idx, ice_vsi_type_str(type));
|
||||
return err;
|
||||
|
@ -4495,7 +4578,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
|
|||
/* replay filters for the VSI */
|
||||
status = ice_replay_vsi(&pf->hw, vsi->idx);
|
||||
if (status) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
dev_err(dev,
|
||||
"replay VSI failed, status %d, VSI index %d, type %s\n",
|
||||
status, vsi->idx, ice_vsi_type_str(type));
|
||||
return -EIO;
|
||||
|
@ -4509,14 +4592,14 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
|
|||
/* enable the VSI */
|
||||
err = ice_ena_vsi(vsi, false);
|
||||
if (err) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
dev_err(dev,
|
||||
"enable VSI failed, err %d, VSI index %d, type %s\n",
|
||||
err, vsi->idx, ice_vsi_type_str(type));
|
||||
return err;
|
||||
}
|
||||
|
||||
dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %s\n",
|
||||
vsi->idx, ice_vsi_type_str(type));
|
||||
dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
|
||||
ice_vsi_type_str(type));
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -4555,7 +4638,7 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf)
|
|||
*/
|
||||
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
|
||||
{
|
||||
struct device *dev = &pf->pdev->dev;
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
enum ice_status ret;
|
||||
int err;
|
||||
|
@ -4601,7 +4684,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
|
|||
|
||||
err = ice_update_link_info(hw->port_info);
|
||||
if (err)
|
||||
dev_err(&pf->pdev->dev, "Get link status error %d\n", err);
|
||||
dev_err(dev, "Get link status error %d\n", err);
|
||||
|
||||
/* start misc vector */
|
||||
err = ice_req_irq_msix_misc(pf);
|
||||
|
@ -4760,7 +4843,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
|
|||
struct ice_pf *pf = vsi->back;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
enum ice_status status;
|
||||
struct device *dev;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
if (seed) {
|
||||
struct ice_aqc_get_set_rss_keys *buf =
|
||||
(struct ice_aqc_get_set_rss_keys *)seed;
|
||||
|
@ -4768,8 +4853,7 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
|
|||
status = ice_aq_set_rss_key(hw, vsi->idx, buf);
|
||||
|
||||
if (status) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Cannot set RSS key, err %d aq_err %d\n",
|
||||
dev_err(dev, "Cannot set RSS key, err %d aq_err %d\n",
|
||||
status, hw->adminq.rq_last_status);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -4779,8 +4863,7 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
|
|||
status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
|
||||
lut, lut_size);
|
||||
if (status) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Cannot set RSS lut, err %d aq_err %d\n",
|
||||
dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n",
|
||||
status, hw->adminq.rq_last_status);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -4803,15 +4886,16 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
|
|||
struct ice_pf *pf = vsi->back;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
enum ice_status status;
|
||||
struct device *dev;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
if (seed) {
|
||||
struct ice_aqc_get_set_rss_keys *buf =
|
||||
(struct ice_aqc_get_set_rss_keys *)seed;
|
||||
|
||||
status = ice_aq_get_rss_key(hw, vsi->idx, buf);
|
||||
if (status) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Cannot get RSS key, err %d aq_err %d\n",
|
||||
dev_err(dev, "Cannot get RSS key, err %d aq_err %d\n",
|
||||
status, hw->adminq.rq_last_status);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -4821,8 +4905,7 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
|
|||
status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
|
||||
lut, lut_size);
|
||||
if (status) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Cannot get RSS lut, err %d aq_err %d\n",
|
||||
dev_err(dev, "Cannot get RSS lut, err %d aq_err %d\n",
|
||||
status, hw->adminq.rq_last_status);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -4866,7 +4949,6 @@ ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
|||
*/
|
||||
static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
|
||||
{
|
||||
struct device *dev = &vsi->back->pdev->dev;
|
||||
struct ice_aqc_vsi_props *vsi_props;
|
||||
struct ice_hw *hw = &vsi->back->hw;
|
||||
struct ice_vsi_ctx *ctxt;
|
||||
|
@ -4875,7 +4957,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
|
|||
|
||||
vsi_props = &vsi->info;
|
||||
|
||||
ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
|
||||
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
|
||||
if (!ctxt)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -4891,7 +4973,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
|
|||
|
||||
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
|
||||
if (status) {
|
||||
dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
|
||||
dev_err(&vsi->back->pdev->dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
|
||||
bmode, status, hw->adminq.sq_last_status);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
|
@ -4900,7 +4982,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
|
|||
vsi_props->sw_flags = ctxt->info.sw_flags;
|
||||
|
||||
out:
|
||||
devm_kfree(dev, ctxt);
|
||||
kfree(ctxt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -5222,6 +5304,7 @@ static const struct net_device_ops ice_netdev_ops = {
|
|||
.ndo_set_vf_trust = ice_set_vf_trust,
|
||||
.ndo_set_vf_vlan = ice_set_vf_port_vlan,
|
||||
.ndo_set_vf_link_state = ice_set_vf_link_state,
|
||||
.ndo_get_vf_stats = ice_get_vf_stats,
|
||||
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
|
||||
.ndo_set_features = ice_set_features,
|
||||
|
|
|
@ -798,8 +798,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
|
|||
hw->layer_info = NULL;
|
||||
}
|
||||
|
||||
if (hw->port_info)
|
||||
ice_sched_clear_port(hw->port_info);
|
||||
ice_sched_clear_port(hw->port_info);
|
||||
|
||||
hw->num_tx_sched_layers = 0;
|
||||
hw->num_tx_sched_phys_layers = 0;
|
||||
|
|
|
@ -2428,7 +2428,7 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
|
|||
if (!ice_is_vsi_valid(hw, vsi_handle))
|
||||
return ICE_ERR_PARAM;
|
||||
|
||||
if (vid)
|
||||
if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
|
||||
recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
|
||||
else
|
||||
recipe_id = ICE_SW_LKUP_PROMISC;
|
||||
|
@ -2440,13 +2440,18 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
|
|||
|
||||
mutex_lock(rule_lock);
|
||||
list_for_each_entry(itr, rule_head, list_entry) {
|
||||
struct ice_fltr_info *fltr_info;
|
||||
u8 fltr_promisc_mask = 0;
|
||||
|
||||
if (!ice_vsi_uses_fltr(itr, vsi_handle))
|
||||
continue;
|
||||
fltr_info = &itr->fltr_info;
|
||||
|
||||
fltr_promisc_mask |=
|
||||
ice_determine_promisc_mask(&itr->fltr_info);
|
||||
if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
|
||||
vid != fltr_info->l_data.mac_vlan.vlan_id)
|
||||
continue;
|
||||
|
||||
fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
|
||||
|
||||
/* Skip if filter is not completely specified by given mask */
|
||||
if (fltr_promisc_mask & ~promisc_mask)
|
||||
|
@ -2454,7 +2459,7 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
|
|||
|
||||
status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
|
||||
&remove_list_head,
|
||||
&itr->fltr_info);
|
||||
fltr_info);
|
||||
if (status) {
|
||||
mutex_unlock(rule_lock);
|
||||
goto free_fltr_list;
|
||||
|
|
|
@ -202,6 +202,7 @@ struct ice_hw_dev_caps {
|
|||
struct ice_hw_common_caps common_cap;
|
||||
u32 num_vfs_exposed; /* Total number of VFs exposed */
|
||||
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
|
||||
u32 num_funcs;
|
||||
};
|
||||
|
||||
/* MAC info */
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -122,6 +122,9 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
|
|||
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
|
||||
|
||||
void ice_set_vf_state_qs_dis(struct ice_vf *vf);
|
||||
int
|
||||
ice_get_vf_stats(struct net_device *netdev, int vf_id,
|
||||
struct ifla_vf_stats *vf_stats);
|
||||
#else /* CONFIG_PCI_IOV */
|
||||
#define ice_process_vflr_event(pf) do {} while (0)
|
||||
#define ice_free_vfs(pf) do {} while (0)
|
||||
|
@ -194,5 +197,13 @@ ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
ice_get_vf_stats(struct net_device __always_unused *netdev,
|
||||
int __always_unused vf_id,
|
||||
struct ifla_vf_stats __always_unused *vf_stats)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif /* CONFIG_PCI_IOV */
|
||||
#endif /* _ICE_VIRTCHNL_PF_H_ */
|
||||
|
|
|
@ -331,7 +331,7 @@ static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem)
|
|||
struct device *dev;
|
||||
unsigned int i;
|
||||
|
||||
dev = &pf->pdev->dev;
|
||||
dev = ice_pf_to_dev(pf);
|
||||
for (i = 0; i < umem->npgs; i++) {
|
||||
dma_addr_t dma = dma_map_page_attrs(dev, umem->pgs[i], 0,
|
||||
PAGE_SIZE,
|
||||
|
@ -369,7 +369,7 @@ static void ice_xsk_umem_dma_unmap(struct ice_vsi *vsi, struct xdp_umem *umem)
|
|||
struct device *dev;
|
||||
unsigned int i;
|
||||
|
||||
dev = &pf->pdev->dev;
|
||||
dev = ice_pf_to_dev(pf);
|
||||
for (i = 0; i < umem->npgs; i++) {
|
||||
dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
|
||||
|
|
Loading…
Reference in New Issue