Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2018-01-26

This series contains updates to i40e and i40evf.

Michal updates the driver to pass critical errors from the firmware to
the caller.

Patryk fixes an issue of creating multiple identical filters with the
same location, by simply moving the functions so that we remove the
existing filter and then add the new filter.

Paweł adds back in the ability to turn off offloads when VLAN is set for
the VF driver.  Fixed an issue where the number of TC queue pairs was
exceeding MSI-X vectors count, causing messages about invalid TC mapping
and wrong selected Tx queue.

Alex cleans up the i40e/i40evf_set_itr_per_queue() by dropping all the
unneeded pointer chases.  Puts to use the reg_idx value, which was going
unused, so that we can avoid having to compute the vector every time
throughout the driver.

Upasana enable the driver to display LLDP information on the vSphere Web
Client by exposing DCB parameters.

Alice converts our flags from 32 to 64 bit size, since we have added
more flags.

Dave implements a private ethtool flag to disable the processing of LLDP
packets by the firmware, so that the firmware will not consume LLDPDU
and cause them to be sent up the stack.

Alan adds a mechanism for detecting/storing the flag for processing of
LLDP packets by the firmware, so that its current state is persistent
across reboots/reloads of the driver.

Avinash fixes kdump with i40e due to resource constraints.  We were
enabling VMDq and iWARP when we just have a single CPU, which was
starving kdump for the lack of IRQs.

Jake adds support to program the fragmented IPv4 input set PCTYPE.
Fixed the reported masks to properly report that the entire field is
masked, since we had accidentally swapped the mask values for the IPv4
addresses with the L4 port numbers.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-01-28 21:26:34 -05:00
commit 5abe9ead9a
17 changed files with 342 additions and 106 deletions

View File

@ -508,39 +508,40 @@ struct i40e_pf {
#define I40E_HW_PORT_ID_VALID BIT(17)
#define I40E_HW_RESTART_AUTONEG BIT(18)
u32 flags;
#define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
#define I40E_FLAG_MSI_ENABLED BIT(1)
#define I40E_FLAG_MSIX_ENABLED BIT(2)
#define I40E_FLAG_RSS_ENABLED BIT(3)
#define I40E_FLAG_VMDQ_ENABLED BIT(4)
#define I40E_FLAG_FILTER_SYNC BIT(5)
#define I40E_FLAG_SRIOV_ENABLED BIT(6)
#define I40E_FLAG_DCB_CAPABLE BIT(7)
#define I40E_FLAG_DCB_ENABLED BIT(8)
#define I40E_FLAG_FD_SB_ENABLED BIT(9)
#define I40E_FLAG_FD_ATR_ENABLED BIT(10)
#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT(11)
#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT(12)
#define I40E_FLAG_MFP_ENABLED BIT(13)
#define I40E_FLAG_UDP_FILTER_SYNC BIT(14)
#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(15)
#define I40E_FLAG_VEB_MODE_ENABLED BIT(16)
#define I40E_FLAG_VEB_STATS_ENABLED BIT(17)
#define I40E_FLAG_LINK_POLLING_ENABLED BIT(18)
#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(19)
#define I40E_FLAG_TEMP_LINK_POLLING BIT(20)
#define I40E_FLAG_LEGACY_RX BIT(21)
#define I40E_FLAG_PTP BIT(22)
#define I40E_FLAG_IWARP_ENABLED BIT(23)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT(24)
#define I40E_FLAG_CLIENT_L2_CHANGE BIT(25)
#define I40E_FLAG_CLIENT_RESET BIT(26)
#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(27)
#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(28)
#define I40E_FLAG_TC_MQPRIO BIT(29)
#define I40E_FLAG_FD_SB_INACTIVE BIT(30)
#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(31)
u64 flags;
#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(0)
#define I40E_FLAG_MSI_ENABLED BIT_ULL(1)
#define I40E_FLAG_MSIX_ENABLED BIT_ULL(2)
#define I40E_FLAG_RSS_ENABLED BIT_ULL(3)
#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(4)
#define I40E_FLAG_FILTER_SYNC BIT_ULL(5)
#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(6)
#define I40E_FLAG_DCB_CAPABLE BIT_ULL(7)
#define I40E_FLAG_DCB_ENABLED BIT_ULL(8)
#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(9)
#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(10)
#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(11)
#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(12)
#define I40E_FLAG_MFP_ENABLED BIT_ULL(13)
#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(14)
#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(15)
#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(16)
#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(17)
#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(18)
#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(19)
#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(20)
#define I40E_FLAG_LEGACY_RX BIT_ULL(21)
#define I40E_FLAG_PTP BIT_ULL(22)
#define I40E_FLAG_IWARP_ENABLED BIT_ULL(23)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(24)
#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(25)
#define I40E_FLAG_CLIENT_RESET BIT_ULL(26)
#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT_ULL(27)
#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT_ULL(28)
#define I40E_FLAG_TC_MQPRIO BIT_ULL(29)
#define I40E_FLAG_FD_SB_INACTIVE BIT_ULL(30)
#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT_ULL(31)
#define I40E_FLAG_DISABLE_FW_LLDP BIT_ULL(32)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;

View File

@ -907,10 +907,15 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Writeback timeout.\n");
status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n");
status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
} else {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: Writeback timeout.\n");
status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
}
}
asq_send_command_error:
@ -971,7 +976,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
}
/* set next_to_use to head */
ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;

View File

@ -205,6 +205,7 @@ enum i40e_admin_queue_opc {
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
i40e_aqc_opc_set_dcb_parameters = 0x0303,
/* TX scheduler */
i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
@ -2496,6 +2497,17 @@ struct i40e_aqc_lldp_start {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
/* Set DCB (direct 0x0303) */
struct i40e_aqc_set_dcb_parameters {
u8 command;
#define I40E_AQ_DCB_SET_AGENT 0x1
#define I40E_DCB_VALID 0x1
u8 valid_flags;
u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
/* Get CEE DCBX Oper Config (0x0A07)
* uses the generic descriptor struct
* returns below as indirect response

View File

@ -278,6 +278,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
return "I40E_NOT_SUPPORTED";
case I40E_ERR_FIRMWARE_API_VERSION:
return "I40E_ERR_FIRMWARE_API_VERSION";
case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@ -3639,7 +3641,34 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
cmd->command = I40E_AQ_LLDP_AGENT_START;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
* i40e_aq_set_dcb_parameters
* @hw: pointer to the hw struct
* @cmd_details: pointer to command details structure or NULL
* @dcb_enable: True if DCB configuration needs to be applied
*
**/
enum i40e_status_code
i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_dcb_parameters *cmd =
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_dcb_parameters);
if (dcb_enable) {
cmd->valid_flags = I40E_DCB_VALID;
cmd->command = I40E_AQ_DCB_SET_AGENT;
}
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;

View File

@ -233,6 +233,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
I40E_PRIV_FLAG("disable-source-pruning",
I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@ -2305,6 +2306,8 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
struct ethtool_coalesce *ec,
int queue)
{
struct i40e_ring *rx_ring = vsi->rx_rings[queue];
struct i40e_ring *tx_ring = vsi->tx_rings[queue];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_q_vector *q_vector;
@ -2312,26 +2315,26 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);
vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs;
vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs;
rx_ring->rx_itr_setting = ec->rx_coalesce_usecs;
tx_ring->tx_itr_setting = ec->tx_coalesce_usecs;
if (ec->use_adaptive_rx_coalesce)
vsi->rx_rings[queue]->rx_itr_setting |= I40E_ITR_DYNAMIC;
rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC;
else
vsi->rx_rings[queue]->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
rx_ring->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
if (ec->use_adaptive_tx_coalesce)
vsi->tx_rings[queue]->tx_itr_setting |= I40E_ITR_DYNAMIC;
tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC;
else
vsi->tx_rings[queue]->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
tx_ring->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
q_vector = vsi->rx_rings[queue]->q_vector;
q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[queue]->rx_itr_setting);
q_vector = rx_ring->q_vector;
q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
q_vector = vsi->tx_rings[queue]->q_vector;
q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[queue]->tx_itr_setting);
q_vector = tx_ring->q_vector;
q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
@ -2746,16 +2749,16 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
no_input_set:
if (input_set & I40E_L3_SRC_MASK)
fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFF);
fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF);
if (input_set & I40E_L3_DST_MASK)
fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFF);
fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFFFFFF);
if (input_set & I40E_L4_SRC_MASK)
fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFFFFFF);
fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFF);
if (input_set & I40E_L4_DST_MASK)
fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFFFFFF);
fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFF);
if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
fsp->ring_cookie = RX_CLS_FLOW_DISC;
@ -3806,6 +3809,16 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
i40e_write_fd_input_set(pf, index, new_mask);
/* IP_USER_FLOW filters match both IPv4/Other and IPv4/Fragmented
* frames. If we're programming the input set for IPv4/Other, we also
* need to program the IPv4/Fragmented input set. Since we don't have
* separate support, we'll always assume and enforce that the two flow
* types must have matching input sets.
*/
if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
new_mask);
/* Add the new offset and update table, if necessary */
if (new_flex_offset) {
err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset,
@ -3827,6 +3840,87 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
return 0;
}
/**
* i40e_match_fdir_filter - Return true of two filters match
* @a: pointer to filter struct
* @b: pointer to filter struct
*
* Returns true if the two filters match exactly the same criteria. I.e. they
* match the same flow type and have the same parameters. We don't need to
* check any input-set since all filters of the same flow type must use the
* same input set.
**/
static bool i40e_match_fdir_filter(struct i40e_fdir_filter *a,
struct i40e_fdir_filter *b)
{
/* The filters do not much if any of these criteria differ. */
if (a->dst_ip != b->dst_ip ||
a->src_ip != b->src_ip ||
a->dst_port != b->dst_port ||
a->src_port != b->src_port ||
a->flow_type != b->flow_type ||
a->ip4_proto != b->ip4_proto)
return false;
return true;
}
/**
* i40e_disallow_matching_filters - Check that new filters differ
* @vsi: pointer to the targeted VSI
* @input: new filter to check
*
* Due to hardware limitations, it is not possible for two filters that match
* similar criteria to be programmed at the same time. This is true for a few
* reasons:
*
* (a) all filters matching a particular flow type must use the same input
* set, that is they must match the same criteria.
* (b) different flow types will never match the same packet, as the flow type
* is decided by hardware before checking which rules apply.
* (c) hardware has no way to distinguish which order filters apply in.
*
* Due to this, we can't really support using the location data to order
* filters in the hardware parsing. It is technically possible for the user to
* request two filters matching the same criteria but which select different
* queues. In this case, rather than keep both filters in the list, we reject
* the 2nd filter when the user requests adding it.
*
* This avoids needing to track location for programming the filter to
* hardware, and ensures that we avoid some strange scenarios involving
* deleting filters which match the same criteria.
**/
static int i40e_disallow_matching_filters(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input)
{
struct i40e_pf *pf = vsi->back;
struct i40e_fdir_filter *rule;
struct hlist_node *node2;
/* Loop through every filter, and check that it doesn't match */
hlist_for_each_entry_safe(rule, node2,
&pf->fdir_filter_list, fdir_node) {
/* Don't check the filters match if they share the same fd_id,
* since the new filter is actually just updating the target
* of the old filter.
*/
if (rule->fd_id == input->fd_id)
continue;
/* If any filters match, then print a warning message to the
* kernel message buffer and bail out.
*/
if (i40e_match_fdir_filter(rule, input)) {
dev_warn(&pf->pdev->dev,
"Existing user defined filter %d already matches this flow.\n",
rule->fd_id);
return -EINVAL;
}
}
return 0;
}
/**
* i40e_add_fdir_ethtool - Add/Remove Flow Director filters
* @vsi: pointer to the targeted VSI
@ -3939,19 +4033,25 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->flex_offset = userdef.flex_offset;
}
ret = i40e_add_del_fdir(vsi, input, true);
/* Avoid programming two filters with identical match criteria. */
ret = i40e_disallow_matching_filters(vsi, input);
if (ret)
goto free_input;
goto free_filter_memory;
/* Add the input filter to the fdir_input_list, possibly replacing
* a previous filter. Do not free the input structure after adding it
* to the list as this would cause a use-after-free bug.
*/
i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
ret = i40e_add_del_fdir(vsi, input, true);
if (ret)
goto remove_sw_rule;
return 0;
free_input:
remove_sw_rule:
hlist_del(&input->fdir_node);
pf->fdir_pf_active_filters--;
free_filter_memory:
kfree(input);
return ret;
}
@ -4264,7 +4364,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 orig_flags, new_flags, changed_flags;
u64 orig_flags, new_flags, changed_flags;
u32 i, j;
orig_flags = READ_ONCE(pf->flags);
@ -4315,13 +4415,32 @@ flags_complete:
!(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE))
return -EOPNOTSUPP;
/* Disable FW LLDP not supported if NPAR active or if FW
* API version < 1.7
*/
if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
if (pf->hw.func_caps.npar_enable) {
dev_warn(&pf->pdev->dev,
"Unable to stop FW LLDP if NPAR active\n");
return -EOPNOTSUPP;
}
if (pf->hw.aq.api_maj_ver < 1 ||
(pf->hw.aq.api_maj_ver == 1 &&
pf->hw.aq.api_min_ver < 7)) {
dev_warn(&pf->pdev->dev,
"FW ver does not support stopping FW LLDP\n");
return -EOPNOTSUPP;
}
}
/* Compare and exchange the new flags into place. If we failed, that
* is if cmpxchg returns anything but the old value, this means that
* something else has modified the flags variable since we copied it
* originally. We'll just punt with an error and log something in the
* message buffer.
*/
if (cmpxchg(&pf->flags, orig_flags, new_flags) != orig_flags) {
if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) {
dev_warn(&pf->pdev->dev,
"Unable to update pf->flags as it was modified by another thread...\n");
return -EAGAIN;
@ -4360,12 +4479,37 @@ flags_complete:
}
}
if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
struct i40e_dcbx_config *dcbcfg;
int i;
i40e_aq_stop_lldp(&pf->hw, true, NULL);
i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
/* reset local_dcbx_config to default */
dcbcfg = &pf->hw.local_dcbx_config;
dcbcfg->etscfg.willing = 1;
dcbcfg->etscfg.maxtcs = 0;
dcbcfg->etscfg.tcbwtable[0] = 100;
for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++)
dcbcfg->etscfg.tcbwtable[i] = 0;
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
dcbcfg->etscfg.prioritytable[i] = 0;
dcbcfg->etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
dcbcfg->pfc.willing = 1;
dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
} else {
i40e_aq_start_lldp(&pf->hw, NULL);
}
}
/* Issue reset to cause things to take effect, as additional bits
* are added we will need to create a mask of bits requiring reset
*/
if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
I40E_FLAG_LEGACY_RX |
I40E_FLAG_SOURCE_PRUNING_DISABLED))
I40E_FLAG_SOURCE_PRUNING_DISABLED |
I40E_FLAG_DISABLE_FW_LLDP))
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
return 0;

View File

@ -1818,6 +1818,10 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
num_tc_qps = qcount / numtc;
num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
/* Do not allow use more TC queue pairs than MSI-X vectors exist */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
/* Setup queue offset/count for all TCs for given VSI */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
/* See if the given TC is enabled for the given VSI */
@ -4122,6 +4126,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
q_vector->num_ringpairs = num_ringpairs;
q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
q_vector->rx.count = 0;
q_vector->tx.count = 0;
@ -6320,8 +6325,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw;
int err = 0;
/* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT)
/* Do not enable DCB for SW1 and SW2 images even if the FW is capable
* Also do not enable DCBx if FW LLDP agent is disabled
*/
if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
(pf->flags & I40E_FLAG_DISABLE_FW_LLDP))
goto out;
/* Get the initial DCB configuration */
@ -6348,6 +6356,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev,
"DCBX offload is supported for this PF.\n");
}
} else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
} else {
dev_info(&pf->pdev->dev,
"Query for DCB configuration failed, err %s aq_err %s\n",
@ -7674,6 +7685,9 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
/* Reprogram the default input set for Other/IPv4 */
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
}
/**
@ -9221,6 +9235,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
goto end_core_reset;
}
/* Enable FW to write a default DCB config on link-up */
i40e_aq_set_dcb_parameters(hw, true, NULL);
#ifdef CONFIG_I40E_DCB
ret = i40e_init_pf_dcb(pf);
if (ret) {
@ -11060,13 +11077,13 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->hw.aq.fw_maj_ver >= 6)
pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
if (pf->hw.func_caps.vmdq) {
if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
pf->flags |= I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
}
if (pf->hw.func_caps.iwarp) {
if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
pf->flags |= I40E_FLAG_IWARP_ENABLED;
/* IWARP needs one extra vector for CQP just like MISC.*/
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
@ -13543,6 +13560,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
/* Enable FW to write default DCB config on link-up */
i40e_aq_set_dcb_parameters(hw, true, NULL);
#ifdef CONFIG_I40E_DCB
err = i40e_init_pf_dcb(pf);
if (err) {

View File

@ -225,6 +225,10 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
bool dcb_enable,
struct i40e_asq_cmd_details
*cmd_details);
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,

View File

@ -95,6 +95,7 @@ enum i40e_status_code {
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
I40E_ERR_FIRMWARE_API_VERSION = -65,
I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
#endif /* _I40E_STATUS_H_ */

View File

@ -956,7 +956,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
wr32(&vsi->back->hw,
I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
val);
} else {
val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
@ -983,8 +983,7 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
/* allow 00 to be written to the index */
wr32(&vsi->back->hw,
I40E_PFINT_DYN_CTLN(q_vector->v_idx +
vsi->base_vector - 1), val);
I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
} else {
u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
@ -2311,7 +2310,6 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_hw *hw = &vsi->back->hw;
bool rx = false, tx = false;
u32 rxval, txval;
int vector;
int idx = q_vector->v_idx;
int rx_itr_setting, tx_itr_setting;
@ -2321,8 +2319,6 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
return;
}
vector = (q_vector->v_idx + vsi->base_vector);
/* avoid dynamic calculation if in countdown mode OR if
* all dynamic is disabled
*/
@ -2371,12 +2367,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval |= BIT(31);
/* don't check _DOWN because interrupt isn't being enabled */
wr32(hw, INTREG(vector - 1), rxval);
wr32(hw, INTREG(q_vector->reg_idx), rxval);
}
enable_int:
if (!test_bit(__I40E_VSI_DOWN, vsi->state))
wr32(hw, INTREG(vector - 1), txval);
wr32(hw, INTREG(q_vector->reg_idx), txval);
if (q_vector->itr_countdown)
q_vector->itr_countdown--;

View File

@ -837,10 +837,15 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Writeback timeout.\n");
status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n");
status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
} else {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: Writeback timeout.\n");
status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
}
}
asq_send_command_error:
@ -901,7 +906,7 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
}
/* set next_to_use to head */
ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;

View File

@ -205,6 +205,7 @@ enum i40e_admin_queue_opc {
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
i40e_aqc_opc_set_dcb_parameters = 0x0303,
/* TX scheduler */
i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
@ -2461,6 +2462,17 @@ struct i40e_aqc_lldp_start {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
/* Set DCB (direct 0x0303) */
struct i40e_aqc_set_dcb_parameters {
u8 command;
#define I40E_AQ_DCB_SET_AGENT 0x1
#define I40E_DCB_VALID 0x1
u8 valid_flags;
u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
/* Apply MIB changes (0x0A07)
* uses the generic struc as it contains no data
*/

View File

@ -284,6 +284,8 @@ const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
return "I40E_NOT_SUPPORTED";
case I40E_ERR_FIRMWARE_API_VERSION:
return "I40E_ERR_FIRMWARE_API_VERSION";
case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);

View File

@ -95,6 +95,7 @@ enum i40e_status_code {
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
I40E_ERR_FIRMWARE_API_VERSION = -65,
I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
#endif /* _I40E_STATUS_H_ */

View File

@ -369,8 +369,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
wr32(&vsi->back->hw,
I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
vsi->base_vector - 1), val);
I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
q_vector->arm_wb_state = true;
}
@ -389,7 +388,7 @@ void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
/* allow 00 to be written to the index */;
wr32(&vsi->back->hw,
I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
I40E_VFINT_DYN_CTLN1(q_vector->reg_idx),
val);
}
@ -1498,12 +1497,9 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_hw *hw = &vsi->back->hw;
bool rx = false, tx = false;
u32 rxval, txval;
int vector;
int idx = q_vector->v_idx;
int rx_itr_setting, tx_itr_setting;
vector = (q_vector->v_idx + vsi->base_vector);
/* avoid dynamic calculation if in countdown mode OR if
* all dynamic is disabled
*/
@ -1552,12 +1548,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval |= BIT(31);
/* don't check _DOWN because interrupt isn't being enabled */
wr32(hw, INTREG(vector - 1), rxval);
wr32(hw, INTREG(q_vector->reg_idx), rxval);
}
enable_int:
if (!test_bit(__I40E_VSI_DOWN, vsi->state))
wr32(hw, INTREG(vector - 1), txval);
wr32(hw, INTREG(q_vector->reg_idx), txval);
if (q_vector->itr_countdown)
q_vector->itr_countdown--;

View File

@ -114,14 +114,14 @@ struct i40e_q_vector {
struct i40evf_adapter *adapter;
struct i40e_vsi *vsi;
struct napi_struct napi;
unsigned long reg_idx;
struct i40e_ring_container rx;
struct i40e_ring_container tx;
u32 ring_mask;
u8 num_ringpairs; /* total number of ring pairs in vector */
#define ITR_COUNTDOWN_START 100
u8 itr_countdown; /* when 0 or 1 update ITR */
int v_idx; /* vector index in list */
u16 v_idx; /* index in the vsi->q_vector array. */
u16 reg_idx; /* register index of the interrupt */
char name[IFNAMSIZ + 15];
bool arm_wb_state;
cpumask_t affinity_mask;

View File

@ -512,31 +512,31 @@ static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter,
struct ethtool_coalesce *ec,
int queue)
{
struct i40e_ring *rx_ring = &adapter->rx_rings[queue];
struct i40e_ring *tx_ring = &adapter->tx_rings[queue];
struct i40e_vsi *vsi = &adapter->vsi;
struct i40e_hw *hw = &adapter->hw;
struct i40e_q_vector *q_vector;
u16 vector;
adapter->rx_rings[queue].rx_itr_setting = ec->rx_coalesce_usecs;
adapter->tx_rings[queue].tx_itr_setting = ec->tx_coalesce_usecs;
rx_ring->rx_itr_setting = ec->rx_coalesce_usecs;
tx_ring->tx_itr_setting = ec->tx_coalesce_usecs;
if (ec->use_adaptive_rx_coalesce)
adapter->rx_rings[queue].rx_itr_setting |= I40E_ITR_DYNAMIC;
else
adapter->rx_rings[queue].rx_itr_setting &= ~I40E_ITR_DYNAMIC;
rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC;
if (!ec->use_adaptive_rx_coalesce)
rx_ring->rx_itr_setting ^= I40E_ITR_DYNAMIC;
if (ec->use_adaptive_tx_coalesce)
adapter->tx_rings[queue].tx_itr_setting |= I40E_ITR_DYNAMIC;
else
adapter->tx_rings[queue].tx_itr_setting &= ~I40E_ITR_DYNAMIC;
tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC;
if (!ec->use_adaptive_tx_coalesce)
tx_ring->tx_itr_setting ^= I40E_ITR_DYNAMIC;
q_vector = adapter->rx_rings[queue].q_vector;
q_vector->rx.itr = ITR_TO_REG(adapter->rx_rings[queue].rx_itr_setting);
q_vector = rx_ring->q_vector;
q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
q_vector = adapter->tx_rings[queue].q_vector;
q_vector->tx.itr = ITR_TO_REG(adapter->tx_rings[queue].tx_itr_setting);
q_vector = tx_ring->q_vector;
q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1), q_vector->tx.itr);

View File

@ -1387,6 +1387,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
q_vector->adapter = adapter;
q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx;
q_vector->reg_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
netif_napi_add(adapter->netdev, &q_vector->napi,
i40evf_napi_poll, NAPI_POLL_WEIGHT);
@ -2344,13 +2345,19 @@ static int i40evf_set_features(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
if (!VLAN_ALLOWED(adapter))
/* Don't allow changing VLAN_RX flag when VLAN is set for VF
* and return an error in this case
*/
if (VLAN_ALLOWED(adapter)) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
adapter->aq_required |=
I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
else
adapter->aq_required |=
I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
} else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
return -EINVAL;
if (features & NETIF_F_HW_VLAN_CTAG_RX)
adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
else
adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
}
return 0;
}