Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2015-03-03 This series contains updates to fm10k, i40e and i40evf. Matthew updates the fm10k driver by cleaning up code comments and whitespace issues. Also modifies the tunnel length header check, to make it more robust by calculating the inner L4 header length based on whether it is TCP or UDP. Implemented ndo_features_check() that allows drivers to report their offload capabilities per-skb. Neerav updates the i40e driver to skip over priority tagging if DCB is not enabled. Fixes an issue where the driver is not flushing out the DCBNL app table for applications that are not present in the local DCBX application configuration TLVs. Fixed i40e where, in the case of MFP mode, the driver was returning the incorrect number of traffic classes for partitions that are not enabled for iSCSI. Even though the driver was not configuring these traffic classes in the transmit scheduler for the NIC partitions, it does use this map to setup the queue mappings. Shannon updates i40e/i40evf to include the firmware build number in the formatted firmware version string. Akeem adds a safety net (by adding a 'default' case) for the possible unmatched switch calls. Mitch updates i40e to not automatically disable PF loopback at runtime, now that we have the functionality to enable and disable PF loopback. This fix cleans up a bogus error message when removing the PF module with VFs enabled. Adds a extra check to make sure that the indirection table pointer is valid before dereferencing it. Anjali enables i40e to enable more than the max RSS qps when running in a single TC mode for the main VSI. It is possible to enable as many as num_online_cpus(). Adds a firmware check to ensure that DCB is disabled for firmware versions older than 4.33. Updates i40e/i40evf to add missing packet types for VXLAN offload. Updated i40e to be able to handle varying RSS table size for each VSI, since all VSI's do not have the same RSS table size. v2: Dropped previous patch #9 "i40e/i40evf: Add capability to gather VEB per TC stats" since the stats should be in ethtool and not debugfs. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
b97526f3ff
|
@ -439,6 +439,7 @@ extern char fm10k_driver_name[];
|
|||
extern const char fm10k_driver_version[];
|
||||
int fm10k_init_queueing_scheme(struct fm10k_intfc *interface);
|
||||
void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface);
|
||||
__be16 fm10k_tx_encap_offload(struct sk_buff *skb);
|
||||
netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
|
||||
struct fm10k_ring *tx_ring);
|
||||
void fm10k_tx_timeout_reset(struct fm10k_intfc *interface);
|
||||
|
|
|
@ -398,7 +398,7 @@ static void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw,
|
|||
/* Retrieve RX Owner Data */
|
||||
id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx));
|
||||
|
||||
/* Process RX Ring*/
|
||||
/* Process RX Ring */
|
||||
do {
|
||||
rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx),
|
||||
&q->rx_drops);
|
||||
|
@ -466,7 +466,6 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
|
|||
* Function invalidates the index values for the queues so any updates that
|
||||
* may have happened are ignored and the base for the queue stats is reset.
|
||||
**/
|
||||
|
||||
void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
|
||||
{
|
||||
u32 i;
|
||||
|
|
|
@ -1019,7 +1019,7 @@ static int fm10k_set_channels(struct net_device *dev,
|
|||
}
|
||||
|
||||
static int fm10k_get_ts_info(struct net_device *dev,
|
||||
struct ethtool_ts_info *info)
|
||||
struct ethtool_ts_info *info)
|
||||
{
|
||||
struct fm10k_intfc *interface = netdev_priv(dev);
|
||||
|
||||
|
|
|
@ -275,7 +275,7 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
|
|||
if (vf_idx >= iov_data->num_vfs)
|
||||
return FM10K_ERR_PARAM;
|
||||
|
||||
/* determine if an update has occured and if so notify the VF */
|
||||
/* determine if an update has occurred and if so notify the VF */
|
||||
vf_info = &iov_data->vf_info[vf_idx];
|
||||
if (vf_info->sw_vid != pvid) {
|
||||
vf_info->sw_vid = pvid;
|
||||
|
|
|
@ -711,10 +711,6 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
|
|||
if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS)
|
||||
return NULL;
|
||||
|
||||
/* verify protocol is transparent Ethernet bridging */
|
||||
if (nvgre_hdr->proto != htons(ETH_P_TEB))
|
||||
return NULL;
|
||||
|
||||
/* report start of ethernet header */
|
||||
if (nvgre_hdr->flags & NVGRE_TNI)
|
||||
return (struct ethhdr *)(nvgre_hdr + 1);
|
||||
|
@ -722,15 +718,13 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
|
|||
return (struct ethhdr *)(&nvgre_hdr->tni);
|
||||
}
|
||||
|
||||
static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
|
||||
__be16 fm10k_tx_encap_offload(struct sk_buff *skb)
|
||||
{
|
||||
u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen;
|
||||
struct ethhdr *eth_hdr;
|
||||
u8 l4_hdr = 0;
|
||||
|
||||
/* fm10k supports 184 octets of outer+inner headers. Minus 20 for inner L4. */
|
||||
#define FM10K_MAX_ENCAP_TRANSPORT_OFFSET 164
|
||||
if (skb_inner_transport_header(skb) - skb_mac_header(skb) >
|
||||
FM10K_MAX_ENCAP_TRANSPORT_OFFSET)
|
||||
if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
|
||||
skb->inner_protocol != htons(ETH_P_TEB))
|
||||
return 0;
|
||||
|
||||
switch (vlan_get_protocol(skb)) {
|
||||
|
@ -760,12 +754,33 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
|
|||
|
||||
switch (eth_hdr->h_proto) {
|
||||
case htons(ETH_P_IP):
|
||||
inner_l4_hdr = inner_ip_hdr(skb)->protocol;
|
||||
break;
|
||||
case htons(ETH_P_IPV6):
|
||||
inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (inner_l4_hdr) {
|
||||
case IPPROTO_TCP:
|
||||
inner_l4_hlen = inner_tcp_hdrlen(skb);
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
inner_l4_hlen = 8;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The hardware allows tunnel offloads only if the combined inner and
|
||||
* outer header is 184 bytes or less
|
||||
*/
|
||||
if (skb_inner_transport_header(skb) + inner_l4_hlen -
|
||||
skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH)
|
||||
return 0;
|
||||
|
||||
return eth_hdr->h_proto;
|
||||
}
|
||||
|
||||
|
@ -934,10 +949,10 @@ static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
|
|||
{
|
||||
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
|
||||
|
||||
/* Memory barrier before checking head and tail */
|
||||
smp_mb();
|
||||
|
||||
/* We need to check again in a case another CPU has just
|
||||
* made room available. */
|
||||
/* Check again in a case another CPU has just made room available */
|
||||
if (likely(fm10k_desc_unused(tx_ring) < size))
|
||||
return -EBUSY;
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ static bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo)
|
|||
* @fifo: pointer to FIFO
|
||||
* @offset: offset to add to head
|
||||
*
|
||||
* This function returns the indicies into the fifo based on head + offset
|
||||
* This function returns the indices into the fifo based on head + offset
|
||||
**/
|
||||
static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
|
||||
{
|
||||
|
@ -84,7 +84,7 @@ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
|
|||
* @fifo: pointer to FIFO
|
||||
* @offset: offset to add to tail
|
||||
*
|
||||
* This function returns the indicies into the fifo based on tail + offset
|
||||
* This function returns the indices into the fifo based on tail + offset
|
||||
**/
|
||||
static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
|
||||
{
|
||||
|
@ -326,7 +326,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len)
|
|||
* fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem
|
||||
* @mbx: pointer to mailbox
|
||||
*
|
||||
* This function will take a seciton of the Rx FIFO and copy it into the
|
||||
* This function will take a section of the Rx FIFO and copy it into the
|
||||
mbx->tail--;
|
||||
* mailbox memory. The offset in mbmem is based on the lower bits of the
|
||||
* tail and len determines the length to copy.
|
||||
|
@ -418,7 +418,7 @@ static void fm10k_mbx_pull_head(struct fm10k_hw *hw,
|
|||
* @hw: pointer to hardware structure
|
||||
* @mbx: pointer to mailbox
|
||||
*
|
||||
* This function will take a seciton of the mailbox memory and copy it
|
||||
* This function will take a section of the mailbox memory and copy it
|
||||
* into the Rx FIFO. The offset is based on the lower bits of the
|
||||
* head and len determines the length to copy.
|
||||
**/
|
||||
|
@ -464,7 +464,7 @@ static void fm10k_mbx_read_copy(struct fm10k_hw *hw,
|
|||
* @tail: tail index of message
|
||||
*
|
||||
* This function will first validate the tail index and size for the
|
||||
* incoming message. It then updates the acknowlegment number and
|
||||
* incoming message. It then updates the acknowledgment number and
|
||||
* copies the data into the FIFO. It will return the number of messages
|
||||
* dequeued on success and a negative value on error.
|
||||
**/
|
||||
|
@ -761,7 +761,7 @@ static s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw,
|
|||
err = fm10k_fifo_enqueue(&mbx->tx, msg);
|
||||
}
|
||||
|
||||
/* if we failed trhead the error */
|
||||
/* if we failed treat the error */
|
||||
if (err) {
|
||||
mbx->timeout = 0;
|
||||
mbx->tx_busy++;
|
||||
|
@ -815,7 +815,7 @@ static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
|
|||
{
|
||||
u32 mbmem = mbx->mbmem_reg;
|
||||
|
||||
/* write new msg header to notify recepient of change */
|
||||
/* write new msg header to notify recipient of change */
|
||||
fm10k_write_reg(hw, mbmem, mbx->mbx_hdr);
|
||||
|
||||
/* write mailbox to sent interrupt */
|
||||
|
@ -1251,7 +1251,7 @@ static s32 fm10k_mbx_process_error(struct fm10k_hw *hw,
|
|||
/* we will need to pull all of the fields for verification */
|
||||
head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
|
||||
|
||||
/* we only have lower 10 bits of error number os add upper bits */
|
||||
/* we only have lower 10 bits of error number so add upper bits */
|
||||
err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO);
|
||||
err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO);
|
||||
|
||||
|
@ -1548,7 +1548,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
|
|||
mbx->timeout = 0;
|
||||
mbx->udelay = FM10K_MBX_INIT_DELAY;
|
||||
|
||||
/* initalize tail and head */
|
||||
/* initialize tail and head */
|
||||
mbx->tail = 1;
|
||||
mbx->head = 1;
|
||||
|
||||
|
@ -1627,7 +1627,7 @@ static void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx)
|
|||
mbx->local = FM10K_SM_MBX_VERSION;
|
||||
mbx->remote = 0;
|
||||
|
||||
/* initalize tail and head */
|
||||
/* initialize tail and head */
|
||||
mbx->tail = 1;
|
||||
mbx->head = 1;
|
||||
|
||||
|
|
|
@ -356,7 +356,7 @@ static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface)
|
|||
* fm10k_request_glort_range - Request GLORTs for use in configuring rules
|
||||
* @interface: board private structure
|
||||
*
|
||||
* This function allocates a range of glorts for this inteface to use.
|
||||
* This function allocates a range of glorts for this interface to use.
|
||||
**/
|
||||
static void fm10k_request_glort_range(struct fm10k_intfc *interface)
|
||||
{
|
||||
|
@ -781,7 +781,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
|
|||
|
||||
fm10k_mbx_lock(interface);
|
||||
|
||||
/* only need to update the VLAN if not in promiscous mode */
|
||||
/* only need to update the VLAN if not in promiscuous mode */
|
||||
if (!(netdev->flags & IFF_PROMISC)) {
|
||||
err = hw->mac.ops.update_vlan(hw, vid, 0, set);
|
||||
if (err)
|
||||
|
@ -970,7 +970,7 @@ static void fm10k_set_rx_mode(struct net_device *dev)
|
|||
|
||||
fm10k_mbx_lock(interface);
|
||||
|
||||
/* syncronize all of the addresses */
|
||||
/* synchronize all of the addresses */
|
||||
if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
|
||||
__dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
|
||||
if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
|
||||
|
@ -1051,7 +1051,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
|
|||
vid, true, 0);
|
||||
}
|
||||
|
||||
/* syncronize all of the addresses */
|
||||
/* synchronize all of the addresses */
|
||||
if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
|
||||
__dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
|
||||
if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
|
||||
|
@ -1350,6 +1350,16 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
|
|||
}
|
||||
}
|
||||
|
||||
static netdev_features_t fm10k_features_check(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
if (!skb->encapsulation || fm10k_tx_encap_offload(skb))
|
||||
return features;
|
||||
|
||||
return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
|
||||
}
|
||||
|
||||
static const struct net_device_ops fm10k_netdev_ops = {
|
||||
.ndo_open = fm10k_open,
|
||||
.ndo_stop = fm10k_close,
|
||||
|
@ -1372,6 +1382,7 @@ static const struct net_device_ops fm10k_netdev_ops = {
|
|||
.ndo_do_ioctl = fm10k_ioctl,
|
||||
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
|
||||
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
|
||||
.ndo_features_check = fm10k_features_check,
|
||||
};
|
||||
|
||||
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
|
||||
|
|
|
@ -648,7 +648,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
|
|||
/* Configure the Rx buffer size for one buff without split */
|
||||
srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT;
|
||||
|
||||
/* Configure the Rx ring to supress loopback packets */
|
||||
/* Configure the Rx ring to suppress loopback packets */
|
||||
srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS;
|
||||
fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl);
|
||||
|
||||
|
|
|
@ -234,8 +234,7 @@ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
|
|||
vid = (vid << 17) >> 17;
|
||||
|
||||
/* verify the reserved 0 fields are 0 */
|
||||
if (len >= FM10K_VLAN_TABLE_VID_MAX ||
|
||||
vid >= FM10K_VLAN_TABLE_VID_MAX)
|
||||
if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX)
|
||||
return FM10K_ERR_PARAM;
|
||||
|
||||
/* Loop through the table updating all required VLANs */
|
||||
|
@ -312,7 +311,7 @@ bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort)
|
|||
}
|
||||
|
||||
/**
|
||||
* fm10k_update_uc_addr_pf - Update device unicast addresss
|
||||
* fm10k_update_xc_addr_pf - Update device addresses
|
||||
* @hw: pointer to the HW structure
|
||||
* @glort: base resource tag for this request
|
||||
* @mac: MAC address to add/remove from table
|
||||
|
@ -356,7 +355,7 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
|
|||
}
|
||||
|
||||
/**
|
||||
* fm10k_update_uc_addr_pf - Update device unicast addresss
|
||||
* fm10k_update_uc_addr_pf - Update device unicast addresses
|
||||
* @hw: pointer to the HW structure
|
||||
* @glort: base resource tag for this request
|
||||
* @mac: MAC address to add/remove from table
|
||||
|
@ -454,7 +453,7 @@ static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw)
|
|||
break;
|
||||
}
|
||||
|
||||
/* always reset VFITR2[0] to point to last enabled PF vector*/
|
||||
/* always reset VFITR2[0] to point to last enabled PF vector */
|
||||
fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i);
|
||||
|
||||
/* reset ITR2[0] to point to last enabled PF vector */
|
||||
|
@ -812,7 +811,7 @@ static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx)
|
|||
if (vf_idx >= hw->iov.num_vfs)
|
||||
return FM10K_ERR_PARAM;
|
||||
|
||||
/* determine vector offset and count*/
|
||||
/* determine vector offset and count */
|
||||
vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
|
||||
vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
|
||||
|
||||
|
@ -951,7 +950,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
|
|||
if (vf_info->mbx.ops.disconnect)
|
||||
vf_info->mbx.ops.disconnect(hw, &vf_info->mbx);
|
||||
|
||||
/* determine vector offset and count*/
|
||||
/* determine vector offset and count */
|
||||
vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
|
||||
vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
|
||||
|
||||
|
@ -1035,7 +1034,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
|
|||
((u32)vf_info->mac[2]);
|
||||
}
|
||||
|
||||
/* map queue pairs back to VF from last to first*/
|
||||
/* map queue pairs back to VF from last to first */
|
||||
for (i = queues_per_pool; i--;) {
|
||||
fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal);
|
||||
fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah);
|
||||
|
@ -1141,7 +1140,7 @@ static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw,
|
|||
*
|
||||
* This function is a default handler for MSI-X requests from the VF. The
|
||||
* assumption is that in this case it is acceptable to just directly
|
||||
* hand off the message form the VF to the underlying shared code.
|
||||
* hand off the message from the VF to the underlying shared code.
|
||||
**/
|
||||
s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
|
||||
struct fm10k_mbx_info *mbx)
|
||||
|
@ -1160,7 +1159,7 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
|
|||
*
|
||||
* This function is a default handler for MAC/VLAN requests from the VF.
|
||||
* The assumption is that in this case it is acceptable to just directly
|
||||
* hand off the message form the VF to the underlying shared code.
|
||||
* hand off the message from the VF to the underlying shared code.
|
||||
**/
|
||||
s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
|
||||
struct fm10k_mbx_info *mbx)
|
||||
|
@ -1404,7 +1403,7 @@ static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
|
|||
&stats->vlan_drop);
|
||||
loopback_drop = fm10k_read_hw_stats_32b(hw,
|
||||
FM10K_STATS_LOOPBACK_DROP,
|
||||
&stats->loopback_drop);
|
||||
&stats->loopback_drop);
|
||||
nodesc_drop = fm10k_read_hw_stats_32b(hw,
|
||||
FM10K_STATS_NODESC_DROP,
|
||||
&stats->nodesc_drop);
|
||||
|
@ -1573,7 +1572,7 @@ static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
|
|||
s32 ret_val = 0;
|
||||
u32 dma_ctrl2;
|
||||
|
||||
/* verify the switch is ready for interraction */
|
||||
/* verify the switch is ready for interaction */
|
||||
dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
|
||||
if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
|
||||
goto out;
|
||||
|
|
|
@ -710,7 +710,7 @@ void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags)
|
|||
/**
|
||||
* fm10k_tlv_msg_test - Validate all results on test message receive
|
||||
* @hw: Pointer to hardware structure
|
||||
* @results: Pointer array to attributes in the mesage
|
||||
* @results: Pointer array to attributes in the message
|
||||
* @mbx: Pointer to mailbox information structure
|
||||
*
|
||||
* This function does a check to verify all attributes match what the test
|
||||
|
|
|
@ -356,6 +356,9 @@ struct fm10k_hw;
|
|||
#define FM10K_QUEUE_DISABLE_TIMEOUT 100
|
||||
#define FM10K_RESET_TIMEOUT 150
|
||||
|
||||
/* Maximum supported combined inner and outer header length for encapsulation */
|
||||
#define FM10K_TUNNEL_HEADER_LENGTH 184
|
||||
|
||||
/* VF registers */
|
||||
#define FM10K_VFCTRL 0x00000
|
||||
#define FM10K_VFCTRL_RST 0x00000008
|
||||
|
@ -593,7 +596,7 @@ struct fm10k_vf_info {
|
|||
u16 sw_vid; /* Switch API assigned VLAN */
|
||||
u16 pf_vid; /* PF assigned Default VLAN */
|
||||
u8 mac[ETH_ALEN]; /* PF Default MAC address */
|
||||
u8 vsi; /* VSI idenfifier */
|
||||
u8 vsi; /* VSI identifier */
|
||||
u8 vf_idx; /* which VF this is */
|
||||
u8 vf_flags; /* flags indicating what modes
|
||||
* are supported for the port
|
||||
|
|
|
@ -37,7 +37,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
/* If permenant address is set then we need to restore it */
|
||||
/* If permanent address is set then we need to restore it */
|
||||
if (is_valid_ether_addr(perm_addr)) {
|
||||
bal = (((u32)perm_addr[3]) << 24) |
|
||||
(((u32)perm_addr[4]) << 16) |
|
||||
|
@ -65,7 +65,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
|
|||
* fm10k_reset_hw_vf - VF hardware reset
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* This function should return the hardare to a state similar to the
|
||||
* This function should return the hardware to a state similar to the
|
||||
* one it is in after just being initialized.
|
||||
**/
|
||||
static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
|
||||
|
@ -252,7 +252,7 @@ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw)
|
|||
}
|
||||
|
||||
/**
|
||||
* fm10k_update_uc_addr_vf - Update device unicast address
|
||||
* fm10k_update_uc_addr_vf - Update device unicast addresses
|
||||
* @hw: pointer to the HW structure
|
||||
* @glort: unused
|
||||
* @mac: MAC address to add/remove from table
|
||||
|
@ -282,7 +282,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
|
|||
memcmp(hw->mac.perm_addr, mac, ETH_ALEN))
|
||||
return FM10K_ERR_PARAM;
|
||||
|
||||
/* add bit to notify us if this is a set of clear operation */
|
||||
/* add bit to notify us if this is a set or clear operation */
|
||||
if (!add)
|
||||
vid |= FM10K_VLAN_CLEAR;
|
||||
|
||||
|
@ -295,7 +295,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
|
|||
}
|
||||
|
||||
/**
|
||||
* fm10k_update_mc_addr_vf - Update device multicast address
|
||||
* fm10k_update_mc_addr_vf - Update device multicast addresses
|
||||
* @hw: pointer to the HW structure
|
||||
* @glort: unused
|
||||
* @mac: MAC address to add/remove from table
|
||||
|
@ -319,7 +319,7 @@ static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort,
|
|||
if (!is_multicast_ether_addr(mac))
|
||||
return FM10K_ERR_PARAM;
|
||||
|
||||
/* add bit to notify us if this is a set of clear operation */
|
||||
/* add bit to notify us if this is a set or clear operation */
|
||||
if (!add)
|
||||
vid |= FM10K_VLAN_CLEAR;
|
||||
|
||||
|
@ -515,7 +515,7 @@ static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb)
|
|||
* @hw: pointer to the hardware structure
|
||||
*
|
||||
* Function reads the content of 2 registers, combined to represent a 64 bit
|
||||
* value measured in nanosecods. In order to guarantee the value is accurate
|
||||
* value measured in nanoseconds. In order to guarantee the value is accurate
|
||||
* we check the 32 most significant bits both before and after reading the
|
||||
* 32 least significant bits to verify they didn't change as we were reading
|
||||
* the registers.
|
||||
|
|
|
@ -471,6 +471,9 @@ struct i40e_vsi {
|
|||
u16 rx_itr_setting;
|
||||
u16 tx_itr_setting;
|
||||
|
||||
u16 rss_table_size;
|
||||
u16 rss_size;
|
||||
|
||||
u16 max_frame;
|
||||
u16 rx_hdr_len;
|
||||
u16 rx_buf_len;
|
||||
|
@ -488,6 +491,7 @@ struct i40e_vsi {
|
|||
|
||||
u16 base_queue; /* vsi's first queue in hw array */
|
||||
u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
|
||||
u16 req_queue_pairs; /* User requested queue pairs */
|
||||
u16 num_queue_pairs; /* Used tx and rx pairs */
|
||||
u16 num_desc;
|
||||
enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
|
||||
|
@ -557,14 +561,14 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)
|
|||
static char buf[32];
|
||||
|
||||
snprintf(buf, sizeof(buf),
|
||||
"f%d.%d a%d.%d n%02x.%02x e%08x",
|
||||
hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
|
||||
"f%d.%d.%05d a%d.%d n%x.%02x e%x",
|
||||
hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
|
||||
hw->aq.api_maj_ver, hw->aq.api_min_ver,
|
||||
(hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
|
||||
I40E_NVM_VERSION_HI_SHIFT,
|
||||
(hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
|
||||
I40E_NVM_VERSION_LO_SHIFT,
|
||||
hw->nvm.eetrack);
|
||||
(hw->nvm.eetrack & 0xffffff));
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
@ -725,6 +729,7 @@ void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
|
|||
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
|
||||
#ifdef CONFIG_I40E_DCB
|
||||
void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
|
||||
struct i40e_dcbx_config *old_cfg,
|
||||
struct i40e_dcbx_config *new_cfg);
|
||||
void i40e_dcbnl_set_all(struct i40e_vsi *vsi);
|
||||
void i40e_dcbnl_setup(struct i40e_vsi *vsi);
|
||||
|
|
|
@ -592,6 +592,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
|
|||
ret_code = i40e_aq_get_firmware_version(hw,
|
||||
&hw->aq.fw_maj_ver,
|
||||
&hw->aq.fw_min_ver,
|
||||
&hw->aq.fw_build,
|
||||
&hw->aq.api_maj_ver,
|
||||
&hw->aq.api_min_ver,
|
||||
NULL);
|
||||
|
|
|
@ -93,6 +93,7 @@ struct i40e_adminq_info {
|
|||
u16 asq_buf_size; /* send queue buffer size */
|
||||
u16 fw_maj_ver; /* firmware major version */
|
||||
u16 fw_min_ver; /* firmware minor version */
|
||||
u32 fw_build; /* firmware build number */
|
||||
u16 api_maj_ver; /* api major version */
|
||||
u16 api_min_ver; /* api minor version */
|
||||
bool nvm_release_on_done;
|
||||
|
|
|
@ -1737,6 +1737,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
|
|||
* @hw: pointer to the hw struct
|
||||
* @fw_major_version: firmware major version
|
||||
* @fw_minor_version: firmware minor version
|
||||
* @fw_build: firmware build number
|
||||
* @api_major_version: major queue version
|
||||
* @api_minor_version: minor queue version
|
||||
* @cmd_details: pointer to command details structure or NULL
|
||||
|
@ -1745,6 +1746,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
|
|||
**/
|
||||
i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
|
||||
u16 *fw_major_version, u16 *fw_minor_version,
|
||||
u32 *fw_build,
|
||||
u16 *api_major_version, u16 *api_minor_version,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
{
|
||||
|
@ -1758,13 +1760,15 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
|
|||
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
|
||||
|
||||
if (!status) {
|
||||
if (fw_major_version != NULL)
|
||||
if (fw_major_version)
|
||||
*fw_major_version = le16_to_cpu(resp->fw_major);
|
||||
if (fw_minor_version != NULL)
|
||||
if (fw_minor_version)
|
||||
*fw_minor_version = le16_to_cpu(resp->fw_minor);
|
||||
if (api_major_version != NULL)
|
||||
if (fw_build)
|
||||
*fw_build = le32_to_cpu(resp->fw_build);
|
||||
if (api_major_version)
|
||||
*api_major_version = le16_to_cpu(resp->api_major);
|
||||
if (api_minor_version != NULL)
|
||||
if (api_minor_version)
|
||||
*api_minor_version = le16_to_cpu(resp->api_minor);
|
||||
}
|
||||
|
||||
|
|
|
@ -269,22 +269,21 @@ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
|
|||
/**
|
||||
* i40e_dcbnl_flush_apps - Delete all removed APPs
|
||||
* @pf: the corresponding pf
|
||||
* @old_cfg: old DCBX configuration data
|
||||
* @new_cfg: new DCBX configuration data
|
||||
*
|
||||
* Find and delete all APPs that are not present in the passed
|
||||
* DCB configuration
|
||||
**/
|
||||
void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
|
||||
struct i40e_dcbx_config *old_cfg,
|
||||
struct i40e_dcbx_config *new_cfg)
|
||||
{
|
||||
struct i40e_dcb_app_priority_table app;
|
||||
struct i40e_dcbx_config *dcbxcfg;
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
int i;
|
||||
|
||||
dcbxcfg = &hw->local_dcbx_config;
|
||||
for (i = 0; i < dcbxcfg->numapps; i++) {
|
||||
app = dcbxcfg->app[i];
|
||||
for (i = 0; i < old_cfg->numapps; i++) {
|
||||
app = old_cfg->app[i];
|
||||
/* The APP is not available anymore delete it */
|
||||
if (!i40e_dcbnl_find_app(new_cfg, &app))
|
||||
i40e_dcbnl_del_app(pf, &app);
|
||||
|
@ -306,9 +305,7 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
|
|||
if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
|
||||
return;
|
||||
|
||||
/* Do not setup DCB NL ops for MFP mode */
|
||||
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
|
||||
dev->dcbnl_ops = &dcbnl_ops;
|
||||
dev->dcbnl_ops = &dcbnl_ops;
|
||||
|
||||
/* Set initial IEEE DCB settings */
|
||||
i40e_dcbnl_set_all(vsi);
|
||||
|
|
|
@ -1413,6 +1413,8 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
|
|||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1654,6 +1656,8 @@ static int i40e_set_phys_id(struct net_device *netdev,
|
|||
case ETHTOOL_ID_INACTIVE:
|
||||
i40e_led_set(hw, pf->led_status, false);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -2344,10 +2348,6 @@ static int i40e_set_channels(struct net_device *dev,
|
|||
/* update feature limits from largest to smallest supported values */
|
||||
/* TODO: Flow director limit, DCB etc */
|
||||
|
||||
/* cap RSS limit */
|
||||
if (count > pf->rss_size_max)
|
||||
count = pf->rss_size_max;
|
||||
|
||||
/* use rss_reconfig to rebuild with new queue count and update traffic
|
||||
* class queue mapping
|
||||
*/
|
||||
|
|
|
@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
|
|||
|
||||
#define DRV_VERSION_MAJOR 1
|
||||
#define DRV_VERSION_MINOR 2
|
||||
#define DRV_VERSION_BUILD 9
|
||||
#define DRV_VERSION_BUILD 10
|
||||
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
|
||||
__stringify(DRV_VERSION_MINOR) "." \
|
||||
__stringify(DRV_VERSION_BUILD) DRV_KERN
|
||||
|
@ -1566,6 +1566,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
|
|||
|
||||
/* Set actual Tx/Rx queue pairs */
|
||||
vsi->num_queue_pairs = offset;
|
||||
if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
|
||||
if (vsi->req_queue_pairs > 0)
|
||||
vsi->num_queue_pairs = vsi->req_queue_pairs;
|
||||
else
|
||||
vsi->num_queue_pairs = pf->num_lan_msix;
|
||||
}
|
||||
|
||||
/* Scheduler section valid can only be set for ADD VSI */
|
||||
if (is_add) {
|
||||
|
@ -4101,7 +4107,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
|
|||
if (pf->hw.func_caps.iscsi)
|
||||
enabled_tc = i40e_get_iscsi_tc_map(pf);
|
||||
else
|
||||
enabled_tc = pf->hw.func_caps.enabled_tcmap;
|
||||
return 1; /* Only TC0 */
|
||||
|
||||
/* At least have TC0 */
|
||||
enabled_tc = (enabled_tc ? enabled_tc : 0x1);
|
||||
|
@ -4151,11 +4157,11 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
|
|||
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
|
||||
return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
|
||||
|
||||
/* MPF enabled and iSCSI PF type */
|
||||
/* MFP enabled and iSCSI PF type */
|
||||
if (pf->hw.func_caps.iscsi)
|
||||
return i40e_get_iscsi_tc_map(pf);
|
||||
else
|
||||
return pf->hw.func_caps.enabled_tcmap;
|
||||
return i40e_pf_get_default_tc(pf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4545,6 +4551,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
|
|||
struct i40e_hw *hw = &pf->hw;
|
||||
int err = 0;
|
||||
|
||||
/* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
|
||||
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
|
||||
(pf->hw.aq.fw_maj_ver < 4))
|
||||
goto out;
|
||||
|
||||
/* Get the initial DCB configuration */
|
||||
err = i40e_init_dcb(hw);
|
||||
if (!err) {
|
||||
|
@ -5155,7 +5166,6 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
|
|||
struct i40e_aqc_lldp_get_mib *mib =
|
||||
(struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
|
||||
struct i40e_dcbx_config tmp_dcbx_cfg;
|
||||
bool need_reconfig = false;
|
||||
int ret = 0;
|
||||
|
@ -5188,8 +5198,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
|
|||
|
||||
memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
|
||||
/* Store the old configuration */
|
||||
tmp_dcbx_cfg = *dcbx_cfg;
|
||||
memcpy(&tmp_dcbx_cfg, &hw->local_dcbx_config, sizeof(tmp_dcbx_cfg));
|
||||
|
||||
/* Reset the old DCBx configuration data */
|
||||
memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
|
||||
/* Get updated DCBX data from firmware */
|
||||
ret = i40e_get_dcb_config(&pf->hw);
|
||||
if (ret) {
|
||||
|
@ -5198,20 +5210,22 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
|
|||
}
|
||||
|
||||
/* No change detected in DCBX configs */
|
||||
if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
|
||||
if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
|
||||
sizeof(tmp_dcbx_cfg))) {
|
||||
dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg);
|
||||
need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
|
||||
&hw->local_dcbx_config);
|
||||
|
||||
i40e_dcbnl_flush_apps(pf, dcbx_cfg);
|
||||
i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
|
||||
|
||||
if (!need_reconfig)
|
||||
goto exit;
|
||||
|
||||
/* Enable DCB tagging only when more than one TC */
|
||||
if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
|
||||
if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
|
||||
pf->flags |= I40E_FLAG_DCB_ENABLED;
|
||||
else
|
||||
pf->flags &= ~I40E_FLAG_DCB_ENABLED;
|
||||
|
@ -6305,13 +6319,14 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
|
|||
}
|
||||
}
|
||||
|
||||
msleep(75);
|
||||
ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
|
||||
if (ret) {
|
||||
dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
|
||||
pf->hw.aq.asq_last_status);
|
||||
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
|
||||
(pf->hw.aq.fw_maj_ver < 4)) {
|
||||
msleep(75);
|
||||
ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
|
||||
if (ret)
|
||||
dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
|
||||
pf->hw.aq.asq_last_status);
|
||||
}
|
||||
|
||||
/* reinit the misc interrupt */
|
||||
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
|
||||
ret = i40e_setup_misc_vector(pf);
|
||||
|
@ -6698,6 +6713,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
|
|||
vsi->idx = vsi_idx;
|
||||
vsi->rx_itr_setting = pf->rx_itr_default;
|
||||
vsi->tx_itr_setting = pf->tx_itr_default;
|
||||
vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
|
||||
pf->rss_table_size : 64;
|
||||
vsi->netdev_registered = false;
|
||||
vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
|
||||
INIT_LIST_HEAD(&vsi->mac_filter_list);
|
||||
|
@ -6921,7 +6938,8 @@ static int i40e_init_msix(struct i40e_pf *pf)
|
|||
* If we can't get what we want, we'll simplify to nearly nothing
|
||||
* and try again. If that still fails, we punt.
|
||||
*/
|
||||
pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
|
||||
pf->num_lan_msix = min_t(int, num_online_cpus(),
|
||||
hw->func_caps.num_msix_vectors);
|
||||
pf->num_vmdq_msix = pf->num_vmdq_qps;
|
||||
other_vecs = 1;
|
||||
other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
|
||||
|
@ -7189,6 +7207,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
|
|||
static int i40e_config_rss(struct i40e_pf *pf)
|
||||
{
|
||||
u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
|
||||
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
u32 lut = 0;
|
||||
int i, j;
|
||||
|
@ -7206,6 +7225,8 @@ static int i40e_config_rss(struct i40e_pf *pf)
|
|||
wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
|
||||
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
|
||||
|
||||
vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
|
||||
|
||||
/* Check capability and Set table size and register per hw expectation*/
|
||||
reg_val = rd32(hw, I40E_PFQF_CTL_0);
|
||||
if (hw->func_caps.rss_table_size == 512) {
|
||||
|
@ -7227,7 +7248,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
|
|||
* If LAN VSI is the only consumer for RSS then this requirement
|
||||
* is not necessary.
|
||||
*/
|
||||
if (j == pf->rss_size)
|
||||
if (j == vsi->rss_size)
|
||||
j = 0;
|
||||
/* lut = 4-byte sliding window of 4 lut entries */
|
||||
lut = (lut << 8) | (j &
|
||||
|
@ -7251,15 +7272,19 @@ static int i40e_config_rss(struct i40e_pf *pf)
|
|||
**/
|
||||
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
|
||||
{
|
||||
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
|
||||
int new_rss_size;
|
||||
|
||||
if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
|
||||
return 0;
|
||||
|
||||
queue_count = min_t(int, queue_count, pf->rss_size_max);
|
||||
new_rss_size = min_t(int, queue_count, pf->rss_size_max);
|
||||
|
||||
if (queue_count != pf->rss_size) {
|
||||
if (queue_count != vsi->num_queue_pairs) {
|
||||
vsi->req_queue_pairs = queue_count;
|
||||
i40e_prep_for_reset(pf);
|
||||
|
||||
pf->rss_size = queue_count;
|
||||
pf->rss_size = new_rss_size;
|
||||
|
||||
i40e_reset_and_rebuild(pf, true);
|
||||
i40e_config_rss(pf);
|
||||
|
@ -7432,6 +7457,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
|
|||
*/
|
||||
pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
|
||||
pf->rss_size = 1;
|
||||
pf->rss_table_size = pf->hw.func_caps.rss_table_size;
|
||||
pf->rss_size_max = min_t(int, pf->rss_size_max,
|
||||
pf->hw.func_caps.num_tx_qp);
|
||||
if (pf->hw.func_caps.rss) {
|
||||
|
@ -9258,7 +9284,11 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
|
|||
pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
|
||||
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
|
||||
}
|
||||
pf->num_lan_qps = pf->rss_size_max;
|
||||
pf->num_lan_qps = max_t(int, pf->rss_size_max,
|
||||
num_online_cpus());
|
||||
pf->num_lan_qps = min_t(int, pf->num_lan_qps,
|
||||
pf->hw.func_caps.num_tx_qp);
|
||||
|
||||
queues_left -= pf->num_lan_qps;
|
||||
}
|
||||
|
||||
|
@ -9662,13 +9692,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (err)
|
||||
dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
|
||||
|
||||
msleep(75);
|
||||
err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
|
||||
if (err) {
|
||||
dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
|
||||
pf->hw.aq.asq_last_status);
|
||||
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
|
||||
(pf->hw.aq.fw_maj_ver < 4)) {
|
||||
msleep(75);
|
||||
err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
|
||||
if (err)
|
||||
dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
|
||||
pf->hw.aq.asq_last_status);
|
||||
}
|
||||
|
||||
/* The main driver is (mostly) up and happy. We need to set this state
|
||||
* before setting up the misc vector or we get a race and the vector
|
||||
* ends up disabled forever.
|
||||
|
|
|
@ -66,6 +66,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
|
|||
|
||||
i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
|
||||
u16 *fw_major_version, u16 *fw_minor_version,
|
||||
u32 *fw_build,
|
||||
u16 *api_major_version, u16 *api_minor_version,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
|
||||
|
|
|
@ -1354,10 +1354,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
|
|||
struct iphdr *iph;
|
||||
__sum16 csum;
|
||||
|
||||
ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
|
||||
(rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
|
||||
ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
|
||||
(rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
|
||||
ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
|
||||
(rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
|
||||
ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
|
||||
(rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
|
@ -2043,6 +2043,9 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
|
|||
tx_flags |= I40E_TX_FLAGS_SW_VLAN;
|
||||
}
|
||||
|
||||
if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
|
||||
goto out;
|
||||
|
||||
/* Insert 802.1p priority into VLAN header */
|
||||
if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
|
||||
(skb->priority != TC_PRIO_CONTROL)) {
|
||||
|
@ -2063,6 +2066,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
|
|||
tx_flags |= I40E_TX_FLAGS_HW_VLAN;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
*flags = tx_flags;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -832,7 +832,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
|
|||
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
|
||||
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
|
||||
}
|
||||
i40e_disable_pf_switch_lb(pf);
|
||||
} else {
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"unable to disable SR-IOV because VFs are assigned.\n");
|
||||
|
|
|
@ -93,6 +93,7 @@ struct i40e_adminq_info {
|
|||
u16 asq_buf_size; /* send queue buffer size */
|
||||
u16 fw_maj_ver; /* firmware major version */
|
||||
u16 fw_min_ver; /* firmware minor version */
|
||||
u32 fw_build; /* firmware build number */
|
||||
u16 api_maj_ver; /* api major version */
|
||||
u16 api_min_ver; /* api minor version */
|
||||
bool nvm_release_on_done;
|
||||
|
|
|
@ -852,10 +852,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
|
|||
struct iphdr *iph;
|
||||
__sum16 csum;
|
||||
|
||||
ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
|
||||
(rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
|
||||
ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
|
||||
(rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
|
||||
ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
|
||||
(rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
|
||||
ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
|
||||
(rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
|
|
|
@ -642,12 +642,14 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
|
|||
if (!indir)
|
||||
return 0;
|
||||
|
||||
for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
|
||||
hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
|
||||
indir[j++] = hlut_val & 0xff;
|
||||
indir[j++] = (hlut_val >> 8) & 0xff;
|
||||
indir[j++] = (hlut_val >> 16) & 0xff;
|
||||
indir[j++] = (hlut_val >> 24) & 0xff;
|
||||
if (indir) {
|
||||
for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
|
||||
hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
|
||||
indir[j++] = hlut_val & 0xff;
|
||||
indir[j++] = (hlut_val >> 8) & 0xff;
|
||||
indir[j++] = (hlut_val >> 16) & 0xff;
|
||||
indir[j++] = (hlut_val >> 24) & 0xff;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
|
|||
static const char i40evf_driver_string[] =
|
||||
"Intel(R) XL710/X710 Virtual Function Network Driver";
|
||||
|
||||
#define DRV_VERSION "1.2.3"
|
||||
#define DRV_VERSION "1.2.4"
|
||||
const char i40evf_driver_version[] = DRV_VERSION;
|
||||
static const char i40evf_copyright[] =
|
||||
"Copyright (c) 2013 - 2014 Intel Corporation.";
|
||||
|
|
Loading…
Reference in New Issue