Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2014-10-02 This series contains updates to fm10k, igb, ixgbe and i40e. Alex provides two updates to the fm10k driver. First reduces the buffer size to 2k for all page sizes, since most frames only have a 1500 MTU so supporting a buffer size larger than this is somewhat wasteful. Second fixes an issue where the number of transmit queues was not being updated, so added the lines necessary to update the number of transmit queues. Rick Jones provides two patches to convert ixgbe, igb and i40e to use dev_consume_skb_any(). Emil provides two patches for ixgbe, first cleans up a couple of wait loops on auto-negotiation that were not needed. Second fixes an issue reported by Fujitsu/Red Hat, which consolidates the logic behind the dynamically setting of TXDCTL.WTHRESH depending on interrupt throttle rate (ITR) setting regardless of BQL. Ethan Zhao provides a cleanup patch for ixgbe where he noticed a duplicate define. Bernhard Kaindl provides a patch for igb to remove a source of latency spikes by not calling code that uses mdelay() for feeding a PHY stat while being called with a spinlock held. Todd bumps the igb version based on the recent changes. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
579899a9ea
|
@ -47,13 +47,9 @@
|
|||
#define FM10K_DEFAULT_TX_WORK 256
|
||||
|
||||
#define FM10K_RXBUFFER_256 256
|
||||
#define FM10K_RXBUFFER_16384 16384
|
||||
#define FM10K_RX_HDR_LEN FM10K_RXBUFFER_256
|
||||
#if PAGE_SIZE <= FM10K_RXBUFFER_16384
|
||||
#define FM10K_RX_BUFSZ (PAGE_SIZE / 2)
|
||||
#else
|
||||
#define FM10K_RX_BUFSZ FM10K_RXBUFFER_16384
|
||||
#endif
|
||||
#define FM10K_RXBUFFER_2048 2048
|
||||
#define FM10K_RX_BUFSZ FM10K_RXBUFFER_2048
|
||||
|
||||
/* How many Rx Buffers do we bundle into one write to the hardware ? */
|
||||
#define FM10K_RX_BUFFER_WRITE 16 /* Must be power of 2 */
|
||||
|
|
|
@ -546,6 +546,10 @@ int fm10k_open(struct net_device *netdev)
|
|||
fm10k_request_glort_range(interface);
|
||||
|
||||
/* Notify the stack of the actual queue counts */
|
||||
err = netif_set_real_num_tx_queues(netdev,
|
||||
interface->num_tx_queues);
|
||||
if (err)
|
||||
goto err_set_queues;
|
||||
|
||||
err = netif_set_real_num_rx_queues(netdev,
|
||||
interface->num_rx_queues);
|
||||
|
@ -601,7 +605,7 @@ int fm10k_close(struct net_device *netdev)
|
|||
static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct fm10k_intfc *interface = netdev_priv(dev);
|
||||
unsigned int r_idx = 0;
|
||||
unsigned int r_idx = skb->queue_mapping;
|
||||
int err;
|
||||
|
||||
if ((skb->protocol == htons(ETH_P_8021Q)) &&
|
||||
|
|
|
@ -702,7 +702,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
|
|||
total_packets += tx_buf->gso_segs;
|
||||
|
||||
/* free the skb */
|
||||
dev_kfree_skb_any(tx_buf->skb);
|
||||
dev_consume_skb_any(tx_buf->skb);
|
||||
|
||||
/* unmap skb header data */
|
||||
dma_unmap_single(tx_ring->dev,
|
||||
|
|
|
@ -265,11 +265,6 @@ struct e1000_hw_stats {
|
|||
u64 b2ogprc;
|
||||
};
|
||||
|
||||
struct e1000_phy_stats {
|
||||
u32 idle_errors;
|
||||
u32 receive_errors;
|
||||
};
|
||||
|
||||
struct e1000_host_mng_dhcp_cookie {
|
||||
u32 signature;
|
||||
u8 status;
|
||||
|
|
|
@ -403,7 +403,6 @@ struct igb_adapter {
|
|||
struct e1000_hw hw;
|
||||
struct e1000_hw_stats stats;
|
||||
struct e1000_phy_info phy_info;
|
||||
struct e1000_phy_stats phy_stats;
|
||||
|
||||
u32 test_icr;
|
||||
struct igb_ring test_tx_ring;
|
||||
|
|
|
@ -58,7 +58,7 @@
|
|||
|
||||
#define MAJ 5
|
||||
#define MIN 2
|
||||
#define BUILD 13
|
||||
#define BUILD 15
|
||||
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
|
||||
__stringify(BUILD) "-k"
|
||||
char igb_driver_name[] = "igb";
|
||||
|
@ -5206,14 +5206,11 @@ void igb_update_stats(struct igb_adapter *adapter,
|
|||
struct e1000_hw *hw = &adapter->hw;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
u32 reg, mpc;
|
||||
u16 phy_tmp;
|
||||
int i;
|
||||
u64 bytes, packets;
|
||||
unsigned int start;
|
||||
u64 _bytes, _packets;
|
||||
|
||||
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
|
||||
|
||||
/* Prevent stats update while adapter is being reset, or if the pci
|
||||
* connection is down.
|
||||
*/
|
||||
|
@ -5374,15 +5371,6 @@ void igb_update_stats(struct igb_adapter *adapter,
|
|||
|
||||
/* Tx Dropped needs to be maintained elsewhere */
|
||||
|
||||
/* Phy Stats */
|
||||
if (hw->phy.media_type == e1000_media_type_copper) {
|
||||
if ((adapter->link_speed == SPEED_1000) &&
|
||||
(!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
|
||||
phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
|
||||
adapter->phy_stats.idle_errors += phy_tmp;
|
||||
}
|
||||
}
|
||||
|
||||
/* Management Stats */
|
||||
adapter->stats.mgptc += rd32(E1000_MGTPTC);
|
||||
adapter->stats.mgprc += rd32(E1000_MGTPRC);
|
||||
|
@ -6386,7 +6374,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
|
|||
total_packets += tx_buffer->gso_segs;
|
||||
|
||||
/* free the skb */
|
||||
dev_kfree_skb_any(tx_buffer->skb);
|
||||
dev_consume_skb_any(tx_buffer->skb);
|
||||
|
||||
/* unmap skb header data */
|
||||
dma_unmap_single(tx_ring->dev,
|
||||
|
|
|
@ -307,7 +307,6 @@ enum ixgbe_ring_f_enum {
|
|||
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
|
||||
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
|
||||
#define IXGBE_MAX_L2A_QUEUES 4
|
||||
#define IXGBE_MAX_L2A_QUEUES 4
|
||||
#define IXGBE_BAD_L2A_QUEUE 3
|
||||
#define IXGBE_MAX_MACVLANS 31
|
||||
#define IXGBE_MAX_DCBMACVLANS 8
|
||||
|
|
|
@ -2267,7 +2267,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
|
|||
if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
|
||||
adapter->tx_itr_setting = adapter->rx_itr_setting;
|
||||
|
||||
#if IS_ENABLED(CONFIG_BQL)
|
||||
/* detect ITR changes that require update of TXDCTL.WTHRESH */
|
||||
if ((adapter->tx_itr_setting != 1) &&
|
||||
(adapter->tx_itr_setting < IXGBE_100K_ITR)) {
|
||||
|
@ -2279,7 +2278,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
|
|||
(tx_itr_prev < IXGBE_100K_ITR))
|
||||
need_reset = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* check the old value and enable RSC if necessary */
|
||||
need_reset |= ixgbe_update_rsc(adapter);
|
||||
|
||||
|
|
|
@ -1094,7 +1094,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
|
|||
total_packets += tx_buffer->gso_segs;
|
||||
|
||||
/* free the skb */
|
||||
dev_kfree_skb_any(tx_buffer->skb);
|
||||
dev_consume_skb_any(tx_buffer->skb);
|
||||
|
||||
/* unmap skb header data */
|
||||
dma_unmap_single(tx_ring->dev,
|
||||
|
@ -2982,11 +2982,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
|
|||
* to or less than the number of on chip descriptors, which is
|
||||
* currently 40.
|
||||
*/
|
||||
#if IS_ENABLED(CONFIG_BQL)
|
||||
if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
|
||||
#else
|
||||
if (!ring->q_vector || (ring->q_vector->itr < 8))
|
||||
#endif
|
||||
txdctl |= (1 << 16); /* WTHRESH = 1 */
|
||||
else
|
||||
txdctl |= (8 << 16); /* WTHRESH = 8 */
|
||||
|
|
|
@ -445,8 +445,6 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
|
|||
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
|
||||
{
|
||||
s32 status = 0;
|
||||
u32 time_out;
|
||||
u32 max_time_out = 10;
|
||||
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
|
||||
bool autoneg = false;
|
||||
ixgbe_link_speed speed;
|
||||
|
@ -514,25 +512,6 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
|
|||
hw->phy.ops.write_reg(hw, MDIO_CTRL1,
|
||||
MDIO_MMD_AN, autoneg_reg);
|
||||
|
||||
/* Wait for autonegotiation to finish */
|
||||
for (time_out = 0; time_out < max_time_out; time_out++) {
|
||||
udelay(10);
|
||||
/* Restart PHY autonegotiation and wait for completion */
|
||||
status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
|
||||
MDIO_MMD_AN,
|
||||
&autoneg_reg);
|
||||
|
||||
autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
|
||||
if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (time_out == max_time_out) {
|
||||
hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n");
|
||||
return IXGBE_ERR_LINK_SETUP;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -657,8 +636,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
|
|||
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
|
||||
{
|
||||
s32 status;
|
||||
u32 time_out;
|
||||
u32 max_time_out = 10;
|
||||
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
|
||||
bool autoneg = false;
|
||||
ixgbe_link_speed speed;
|
||||
|
@ -724,24 +701,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
|
|||
hw->phy.ops.write_reg(hw, MDIO_CTRL1,
|
||||
MDIO_MMD_AN, autoneg_reg);
|
||||
|
||||
/* Wait for autonegotiation to finish */
|
||||
for (time_out = 0; time_out < max_time_out; time_out++) {
|
||||
udelay(10);
|
||||
/* Restart PHY autonegotiation and wait for completion */
|
||||
status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
|
||||
MDIO_MMD_AN,
|
||||
&autoneg_reg);
|
||||
|
||||
autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
|
||||
if (autoneg_reg == MDIO_AN_STAT1_COMPLETE)
|
||||
break;
|
||||
}
|
||||
|
||||
if (time_out == max_time_out) {
|
||||
hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n");
|
||||
return IXGBE_ERR_LINK_SETUP;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue