Merge branch 'intel'
Jeff Kirsher says: ==================== This series contains updates to ixgbevf, ixgbe and igb. Don provides 3 patches for ixgbevf where he cleans up a redundant read mailbox failure check, adds a new function to wait for receive queues to be disabled before disabling NAPI, and move the API negotiation so that it occurs in the reset path. This will allow the PF to be informed of the API version earlier. Jacob provides a ixgbevf and ixgbe patch. His ixgbevf patch removes the use of hw_dbg when the ixgbe_get_regs function is called in ethtool. The ixgbe patch renames the LL_EXTENDED_STATS and some of the functions required to implement busy polling in order to remove the marketing "low latency" blurb which hides what the code actually does. Leonardo provides a ixgbe patch to add support for DCB registers dump using ethtool for 82599 and x540 ethernet controllers. I (Jeff) provide a ixgbe patch to cleanup whitespace issues seen in a code review. Todd provides for igb to add support for i354 in the ethtool offline tests. Laura provides an igb patch to add the ethtool callbacks necessary to configure the number of RSS queues. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
5229432f15
|
@ -487,6 +487,7 @@ int igb_up(struct igb_adapter *);
|
|||
void igb_down(struct igb_adapter *);
|
||||
void igb_reinit_locked(struct igb_adapter *);
|
||||
void igb_reset(struct igb_adapter *);
|
||||
int igb_reinit_queues(struct igb_adapter *);
|
||||
void igb_write_rss_indir_tbl(struct igb_adapter *);
|
||||
int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
|
||||
int igb_setup_tx_resources(struct igb_ring *);
|
||||
|
|
|
@ -1656,7 +1656,8 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
|
|||
if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
|
||||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
|
||||
(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
|
||||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
|
||||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
|
||||
(hw->device_id == E1000_DEV_ID_I354_SGMII)) {
|
||||
|
||||
/* Enable DH89xxCC MPHY for near end loopback */
|
||||
reg = rd32(E1000_MPHY_ADDR_CTL);
|
||||
|
@ -1722,7 +1723,8 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
|
|||
if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
|
||||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
|
||||
(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
|
||||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
|
||||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
|
||||
(hw->device_id == E1000_DEV_ID_I354_SGMII)) {
|
||||
u32 reg;
|
||||
|
||||
/* Disable near end loopback on DH89xxCC */
|
||||
|
@ -2872,6 +2874,88 @@ static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int igb_max_channels(struct igb_adapter *adapter)
|
||||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
unsigned int max_combined = 0;
|
||||
|
||||
switch (hw->mac.type) {
|
||||
case e1000_i211:
|
||||
max_combined = IGB_MAX_RX_QUEUES_I211;
|
||||
break;
|
||||
case e1000_82575:
|
||||
case e1000_i210:
|
||||
max_combined = IGB_MAX_RX_QUEUES_82575;
|
||||
break;
|
||||
case e1000_i350:
|
||||
if (!!adapter->vfs_allocated_count) {
|
||||
max_combined = 1;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case e1000_82576:
|
||||
if (!!adapter->vfs_allocated_count) {
|
||||
max_combined = 2;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case e1000_82580:
|
||||
case e1000_i354:
|
||||
default:
|
||||
max_combined = IGB_MAX_RX_QUEUES;
|
||||
break;
|
||||
}
|
||||
|
||||
return max_combined;
|
||||
}
|
||||
|
||||
static void igb_get_channels(struct net_device *netdev,
|
||||
struct ethtool_channels *ch)
|
||||
{
|
||||
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
/* Report maximum channels */
|
||||
ch->max_combined = igb_max_channels(adapter);
|
||||
|
||||
/* Report info for other vector */
|
||||
if (adapter->msix_entries) {
|
||||
ch->max_other = NON_Q_VECTORS;
|
||||
ch->other_count = NON_Q_VECTORS;
|
||||
}
|
||||
|
||||
ch->combined_count = adapter->rss_queues;
|
||||
}
|
||||
|
||||
static int igb_set_channels(struct net_device *netdev,
|
||||
struct ethtool_channels *ch)
|
||||
{
|
||||
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||
unsigned int count = ch->combined_count;
|
||||
|
||||
/* Verify they are not requesting separate vectors */
|
||||
if (!count || ch->rx_count || ch->tx_count)
|
||||
return -EINVAL;
|
||||
|
||||
/* Verify other_count is valid and has not been changed */
|
||||
if (ch->other_count != NON_Q_VECTORS)
|
||||
return -EINVAL;
|
||||
|
||||
/* Verify the number of channels doesn't exceed hw limits */
|
||||
if (count > igb_max_channels(adapter))
|
||||
return -EINVAL;
|
||||
|
||||
if (count != adapter->rss_queues) {
|
||||
adapter->rss_queues = count;
|
||||
|
||||
/* Hardware has to reinitialize queues and interrupts to
|
||||
* match the new configuration.
|
||||
*/
|
||||
return igb_reinit_queues(adapter);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ethtool_ops igb_ethtool_ops = {
|
||||
.get_settings = igb_get_settings,
|
||||
.set_settings = igb_set_settings,
|
||||
|
@ -2908,6 +2992,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
|
|||
.get_rxfh_indir_size = igb_get_rxfh_indir_size,
|
||||
.get_rxfh_indir = igb_get_rxfh_indir,
|
||||
.set_rxfh_indir = igb_set_rxfh_indir,
|
||||
.get_channels = igb_get_channels,
|
||||
.set_channels = igb_set_channels,
|
||||
.begin = igb_ethtool_begin,
|
||||
.complete = igb_ethtool_complete,
|
||||
};
|
||||
|
|
|
@ -7838,4 +7838,26 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
|
|||
return E1000_SUCCESS;
|
||||
|
||||
}
|
||||
|
||||
int igb_reinit_queues(struct igb_adapter *adapter)
|
||||
{
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
int err = 0;
|
||||
|
||||
if (netif_running(netdev))
|
||||
igb_close(netdev);
|
||||
|
||||
igb_clear_interrupt_scheme(adapter);
|
||||
|
||||
if (igb_init_interrupt_scheme(adapter, true)) {
|
||||
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (netif_running(netdev))
|
||||
err = igb_open(netdev);
|
||||
|
||||
return err;
|
||||
}
|
||||
/* igb_main.c */
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
#include <net/busy_poll.h>
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
#define LL_EXTENDED_STATS
|
||||
#define BP_EXTENDED_STATS
|
||||
#endif
|
||||
/* common prefix used by pr_<> macros */
|
||||
#undef pr_fmt
|
||||
|
@ -187,11 +187,11 @@ struct ixgbe_rx_buffer {
|
|||
struct ixgbe_queue_stats {
|
||||
u64 packets;
|
||||
u64 bytes;
|
||||
#ifdef LL_EXTENDED_STATS
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
u64 yields;
|
||||
u64 misses;
|
||||
u64 cleaned;
|
||||
#endif /* LL_EXTENDED_STATS */
|
||||
#endif /* BP_EXTENDED_STATS */
|
||||
};
|
||||
|
||||
struct ixgbe_tx_queue_stats {
|
||||
|
@ -399,7 +399,7 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
|
|||
WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
|
||||
q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
|
||||
rc = false;
|
||||
#ifdef LL_EXTENDED_STATS
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
q_vector->tx.ring->stats.yields++;
|
||||
#endif
|
||||
} else
|
||||
|
@ -432,7 +432,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
|
|||
if ((q_vector->state & IXGBE_QV_LOCKED)) {
|
||||
q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
|
||||
rc = false;
|
||||
#ifdef LL_EXTENDED_STATS
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
q_vector->rx.ring->stats.yields++;
|
||||
#endif
|
||||
} else
|
||||
|
@ -457,7 +457,7 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
|
|||
}
|
||||
|
||||
/* true if a socket is polling, even if it did not get the lock */
|
||||
static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
|
||||
static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
|
||||
{
|
||||
WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
|
||||
return q_vector->state & IXGBE_QV_USER_PEND;
|
||||
|
@ -487,7 +487,7 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
|
||||
static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -442,7 +442,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
|
|||
|
||||
static int ixgbe_get_regs_len(struct net_device *netdev)
|
||||
{
|
||||
#define IXGBE_REGS_LEN 1129
|
||||
#define IXGBE_REGS_LEN 1139
|
||||
return IXGBE_REGS_LEN * sizeof(u32);
|
||||
}
|
||||
|
||||
|
@ -602,22 +602,53 @@ static void ixgbe_get_regs(struct net_device *netdev,
|
|||
regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
|
||||
|
||||
/* DCB */
|
||||
regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
|
||||
regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
|
||||
regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
|
||||
regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
|
||||
regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
|
||||
regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
|
||||
|
||||
switch (hw->mac.type) {
|
||||
case ixgbe_mac_82598EB:
|
||||
regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
|
||||
regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
|
||||
for (i = 0; i < 8; i++)
|
||||
regs_buff[833 + i] =
|
||||
IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
|
||||
for (i = 0; i < 8; i++)
|
||||
regs_buff[841 + i] =
|
||||
IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
|
||||
for (i = 0; i < 8; i++)
|
||||
regs_buff[849 + i] =
|
||||
IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
|
||||
for (i = 0; i < 8; i++)
|
||||
regs_buff[857 + i] =
|
||||
IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
|
||||
break;
|
||||
case ixgbe_mac_82599EB:
|
||||
case ixgbe_mac_X540:
|
||||
regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
|
||||
regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
|
||||
for (i = 0; i < 8; i++)
|
||||
regs_buff[833 + i] =
|
||||
IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
|
||||
for (i = 0; i < 8; i++)
|
||||
regs_buff[841 + i] =
|
||||
IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
|
||||
for (i = 0; i < 8; i++)
|
||||
regs_buff[849 + i] =
|
||||
IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
|
||||
for (i = 0; i < 8; i++)
|
||||
regs_buff[857 + i] =
|
||||
IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
|
||||
regs_buff[865 + i] =
|
||||
IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
|
||||
for (i = 0; i < 8; i++)
|
||||
regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
|
||||
for (i = 0; i < 8; i++)
|
||||
regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
|
||||
for (i = 0; i < 8; i++)
|
||||
regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
|
||||
for (i = 0; i < 8; i++)
|
||||
regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
|
||||
for (i = 0; i < 8; i++)
|
||||
regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
|
||||
regs_buff[873 + i] =
|
||||
IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
|
||||
|
||||
/* Statistics */
|
||||
regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
|
||||
|
@ -757,6 +788,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
|
|||
|
||||
/* 82599 X540 specific registers */
|
||||
regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
|
||||
|
||||
/* 82599 X540 specific DCB registers */
|
||||
regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
|
||||
regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
|
||||
for (i = 0; i < 4; i++)
|
||||
regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
|
||||
regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
|
||||
/* same as RTTQCNRM */
|
||||
regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
|
||||
/* same as RTTQCNRR */
|
||||
|
||||
/* X540 specific DCB registers */
|
||||
regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
|
||||
regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
|
||||
}
|
||||
|
||||
static int ixgbe_get_eeprom_len(struct net_device *netdev)
|
||||
|
@ -1072,7 +1117,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
|
|||
data[i] = 0;
|
||||
data[i+1] = 0;
|
||||
i += 2;
|
||||
#ifdef LL_EXTENDED_STATS
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
data[i] = 0;
|
||||
data[i+1] = 0;
|
||||
data[i+2] = 0;
|
||||
|
@ -1087,7 +1132,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
|
|||
data[i+1] = ring->stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
|
||||
i += 2;
|
||||
#ifdef LL_EXTENDED_STATS
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
data[i] = ring->stats.yields;
|
||||
data[i+1] = ring->stats.misses;
|
||||
data[i+2] = ring->stats.cleaned;
|
||||
|
@ -1100,7 +1145,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
|
|||
data[i] = 0;
|
||||
data[i+1] = 0;
|
||||
i += 2;
|
||||
#ifdef LL_EXTENDED_STATS
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
data[i] = 0;
|
||||
data[i+1] = 0;
|
||||
data[i+2] = 0;
|
||||
|
@ -1115,7 +1160,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
|
|||
data[i+1] = ring->stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
|
||||
i += 2;
|
||||
#ifdef LL_EXTENDED_STATS
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
data[i] = ring->stats.yields;
|
||||
data[i+1] = ring->stats.misses;
|
||||
data[i+2] = ring->stats.cleaned;
|
||||
|
@ -1157,28 +1202,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
|
|||
p += ETH_GSTRING_LEN;
|
||||
sprintf(p, "tx_queue_%u_bytes", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
#ifdef LL_EXTENDED_STATS
|
||||
sprintf(p, "tx_queue_%u_ll_napi_yield", i);
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
sprintf(p, "tx_queue_%u_bp_napi_yield", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
sprintf(p, "tx_queue_%u_ll_misses", i);
|
||||
sprintf(p, "tx_queue_%u_bp_misses", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
sprintf(p, "tx_queue_%u_ll_cleaned", i);
|
||||
sprintf(p, "tx_queue_%u_bp_cleaned", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
#endif /* LL_EXTENDED_STATS */
|
||||
#endif /* BP_EXTENDED_STATS */
|
||||
}
|
||||
for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
|
||||
sprintf(p, "rx_queue_%u_packets", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
sprintf(p, "rx_queue_%u_bytes", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
#ifdef LL_EXTENDED_STATS
|
||||
sprintf(p, "rx_queue_%u_ll_poll_yield", i);
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
sprintf(p, "rx_queue_%u_bp_poll_yield", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
sprintf(p, "rx_queue_%u_ll_misses", i);
|
||||
sprintf(p, "rx_queue_%u_bp_misses", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
sprintf(p, "rx_queue_%u_ll_cleaned", i);
|
||||
sprintf(p, "rx_queue_%u_bp_cleaned", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
#endif /* LL_EXTENDED_STATS */
|
||||
#endif /* BP_EXTENDED_STATS */
|
||||
}
|
||||
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
|
||||
sprintf(p, "tx_pb_%u_pxon", i);
|
||||
|
|
|
@ -1585,7 +1585,7 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
|
|||
{
|
||||
struct ixgbe_adapter *adapter = q_vector->adapter;
|
||||
|
||||
if (ixgbe_qv_ll_polling(q_vector))
|
||||
if (ixgbe_qv_busy_polling(q_vector))
|
||||
netif_receive_skb(skb);
|
||||
else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
|
||||
napi_gro_receive(&q_vector->napi, skb);
|
||||
|
@ -2097,7 +2097,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
|
|||
|
||||
ixgbe_for_each_ring(ring, q_vector->rx) {
|
||||
found = ixgbe_clean_rx_irq(q_vector, ring, 4);
|
||||
#ifdef LL_EXTENDED_STATS
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
if (found)
|
||||
ring->stats.cleaned += found;
|
||||
else
|
||||
|
|
|
@ -57,28 +57,28 @@
|
|||
#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
|
||||
|
||||
/* Bitmasks */
|
||||
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
|
||||
#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
|
||||
#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
|
||||
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
|
||||
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
|
||||
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
|
||||
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
|
||||
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
|
||||
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
|
||||
#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
|
||||
#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
|
||||
#define IXGBE_SFF_ADDRESSING_MODE 0x4
|
||||
#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
|
||||
#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
|
||||
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
|
||||
#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
|
||||
#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
|
||||
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
|
||||
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
|
||||
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
|
||||
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
|
||||
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
|
||||
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
|
||||
#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
|
||||
#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
|
||||
#define IXGBE_SFF_ADDRESSING_MODE 0x4
|
||||
#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
|
||||
#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
|
||||
#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
|
||||
#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
|
||||
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
|
||||
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
|
||||
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
|
||||
#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
|
||||
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
|
||||
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
|
||||
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
|
||||
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
|
||||
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
|
||||
#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
|
||||
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
|
||||
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
|
||||
|
||||
/* Flow control defines */
|
||||
#define IXGBE_TAF_SYM_PAUSE 0x400
|
||||
|
|
|
@ -561,6 +561,10 @@ struct ixgbe_thermal_sensor_data {
|
|||
#define IXGBE_RTTDQSEL 0x04904
|
||||
#define IXGBE_RTTDT1C 0x04908
|
||||
#define IXGBE_RTTDT1S 0x0490C
|
||||
#define IXGBE_RTTQCNCR 0x08B00
|
||||
#define IXGBE_RTTQCNTG 0x04A90
|
||||
#define IXGBE_RTTBCNRD 0x0498C
|
||||
#define IXGBE_RTTQCNRR 0x0498C
|
||||
#define IXGBE_RTTDTECC 0x04990
|
||||
#define IXGBE_RTTDTECC_NO_BCN 0x00000100
|
||||
#define IXGBE_RTTBCNRC 0x04984
|
||||
|
@ -570,6 +574,7 @@ struct ixgbe_thermal_sensor_data {
|
|||
#define IXGBE_RTTBCNRC_RF_INT_MASK \
|
||||
(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
|
||||
#define IXGBE_RTTBCNRM 0x04980
|
||||
#define IXGBE_RTTQCNRM 0x04980
|
||||
|
||||
/* FCoE DMA Context Registers */
|
||||
#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
|
||||
|
|
|
@ -32,12 +32,12 @@
|
|||
#include "ixgbe.h"
|
||||
#include "ixgbe_phy.h"
|
||||
|
||||
#define IXGBE_X540_MAX_TX_QUEUES 128
|
||||
#define IXGBE_X540_MAX_RX_QUEUES 128
|
||||
#define IXGBE_X540_RAR_ENTRIES 128
|
||||
#define IXGBE_X540_MC_TBL_SIZE 128
|
||||
#define IXGBE_X540_VFT_TBL_SIZE 128
|
||||
#define IXGBE_X540_RX_PB_SIZE 384
|
||||
#define IXGBE_X540_MAX_TX_QUEUES 128
|
||||
#define IXGBE_X540_MAX_RX_QUEUES 128
|
||||
#define IXGBE_X540_RAR_ENTRIES 128
|
||||
#define IXGBE_X540_MC_TBL_SIZE 128
|
||||
#define IXGBE_X540_VFT_TBL_SIZE 128
|
||||
#define IXGBE_X540_RX_PB_SIZE 384
|
||||
|
||||
static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
|
||||
static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
|
||||
|
|
|
@ -140,58 +140,10 @@ static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
|
|||
|
||||
#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
|
||||
|
||||
static char *ixgbevf_reg_names[] = {
|
||||
"IXGBE_VFCTRL",
|
||||
"IXGBE_VFSTATUS",
|
||||
"IXGBE_VFLINKS",
|
||||
"IXGBE_VFRXMEMWRAP",
|
||||
"IXGBE_VFFRTIMER",
|
||||
"IXGBE_VTEICR",
|
||||
"IXGBE_VTEICS",
|
||||
"IXGBE_VTEIMS",
|
||||
"IXGBE_VTEIMC",
|
||||
"IXGBE_VTEIAC",
|
||||
"IXGBE_VTEIAM",
|
||||
"IXGBE_VTEITR",
|
||||
"IXGBE_VTIVAR",
|
||||
"IXGBE_VTIVAR_MISC",
|
||||
"IXGBE_VFRDBAL0",
|
||||
"IXGBE_VFRDBAL1",
|
||||
"IXGBE_VFRDBAH0",
|
||||
"IXGBE_VFRDBAH1",
|
||||
"IXGBE_VFRDLEN0",
|
||||
"IXGBE_VFRDLEN1",
|
||||
"IXGBE_VFRDH0",
|
||||
"IXGBE_VFRDH1",
|
||||
"IXGBE_VFRDT0",
|
||||
"IXGBE_VFRDT1",
|
||||
"IXGBE_VFRXDCTL0",
|
||||
"IXGBE_VFRXDCTL1",
|
||||
"IXGBE_VFSRRCTL0",
|
||||
"IXGBE_VFSRRCTL1",
|
||||
"IXGBE_VFPSRTYPE",
|
||||
"IXGBE_VFTDBAL0",
|
||||
"IXGBE_VFTDBAL1",
|
||||
"IXGBE_VFTDBAH0",
|
||||
"IXGBE_VFTDBAH1",
|
||||
"IXGBE_VFTDLEN0",
|
||||
"IXGBE_VFTDLEN1",
|
||||
"IXGBE_VFTDH0",
|
||||
"IXGBE_VFTDH1",
|
||||
"IXGBE_VFTDT0",
|
||||
"IXGBE_VFTDT1",
|
||||
"IXGBE_VFTXDCTL0",
|
||||
"IXGBE_VFTXDCTL1",
|
||||
"IXGBE_VFTDWBAL0",
|
||||
"IXGBE_VFTDWBAL1",
|
||||
"IXGBE_VFTDWBAH0",
|
||||
"IXGBE_VFTDWBAH1"
|
||||
};
|
||||
|
||||
|
||||
static int ixgbevf_get_regs_len(struct net_device *netdev)
|
||||
{
|
||||
return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
|
||||
#define IXGBE_REGS_LEN 45
|
||||
return IXGBE_REGS_LEN * sizeof(u32);
|
||||
}
|
||||
|
||||
static void ixgbevf_get_regs(struct net_device *netdev,
|
||||
|
@ -264,9 +216,6 @@ static void ixgbevf_get_regs(struct net_device *netdev,
|
|||
regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
|
||||
for (i = 0; i < 2; i++)
|
||||
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
|
||||
hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
|
||||
}
|
||||
|
||||
static void ixgbevf_get_drvinfo(struct net_device *netdev,
|
||||
|
|
|
@ -756,37 +756,12 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
|
|||
static irqreturn_t ixgbevf_msix_other(int irq, void *data)
|
||||
{
|
||||
struct ixgbevf_adapter *adapter = data;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
u32 msg;
|
||||
bool got_ack = false;
|
||||
|
||||
hw->mac.get_link_status = 1;
|
||||
if (!hw->mbx.ops.check_for_ack(hw))
|
||||
got_ack = true;
|
||||
|
||||
if (!hw->mbx.ops.check_for_msg(hw)) {
|
||||
hw->mbx.ops.read(hw, &msg, 1);
|
||||
|
||||
if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) {
|
||||
mod_timer(&adapter->watchdog_timer,
|
||||
round_jiffies(jiffies + 1));
|
||||
adapter->link_up = false;
|
||||
}
|
||||
|
||||
if (msg & IXGBE_VT_MSGTYPE_NACK)
|
||||
dev_info(&pdev->dev,
|
||||
"Last Request of type %2.2x to PF Nacked\n",
|
||||
msg & 0xFF);
|
||||
hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
|
||||
}
|
||||
|
||||
/* checking for the ack clears the PFACK bit. Place
|
||||
* it back in the v2p_mailbox cache so that anyone
|
||||
* polling for an ack will not miss it
|
||||
*/
|
||||
if (got_ack)
|
||||
hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
|
||||
if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
|
||||
mod_timer(&adapter->watchdog_timer, jiffies);
|
||||
|
||||
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
|
||||
|
||||
|
@ -1327,27 +1302,51 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
|
|||
}
|
||||
}
|
||||
|
||||
#define IXGBE_MAX_RX_DESC_POLL 10
|
||||
static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
|
||||
int rxr)
|
||||
#define IXGBEVF_MAX_RX_DESC_POLL 10
|
||||
static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
|
||||
int rxr)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
|
||||
u32 rxdctl;
|
||||
int j = adapter->rx_ring[rxr].reg_idx;
|
||||
int k;
|
||||
|
||||
for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
|
||||
if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
|
||||
break;
|
||||
else
|
||||
msleep(1);
|
||||
}
|
||||
if (k >= IXGBE_MAX_RX_DESC_POLL) {
|
||||
hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
|
||||
"not set within the polling period\n", rxr);
|
||||
}
|
||||
do {
|
||||
usleep_range(1000, 2000);
|
||||
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
|
||||
} while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
|
||||
|
||||
ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
|
||||
adapter->rx_ring[rxr].count - 1);
|
||||
if (!wait_loop)
|
||||
hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
|
||||
rxr);
|
||||
|
||||
ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
|
||||
(adapter->rx_ring[rxr].count - 1));
|
||||
}
|
||||
|
||||
static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
|
||||
struct ixgbevf_ring *ring)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
|
||||
u32 rxdctl;
|
||||
u8 reg_idx = ring->reg_idx;
|
||||
|
||||
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
|
||||
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
|
||||
|
||||
/* write value back with RXDCTL.ENABLE bit cleared */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
|
||||
|
||||
/* the hardware may take up to 100us to really disable the rx queue */
|
||||
do {
|
||||
udelay(10);
|
||||
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
|
||||
} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
|
||||
|
||||
if (!wait_loop)
|
||||
hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
|
||||
reg_idx);
|
||||
}
|
||||
|
||||
static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
|
||||
|
@ -1545,8 +1544,6 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
|
|||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
|
||||
ixgbevf_negotiate_api(adapter);
|
||||
|
||||
ixgbevf_reset_queues(adapter);
|
||||
|
||||
ixgbevf_configure(adapter);
|
||||
|
@ -1679,7 +1676,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
|
|||
|
||||
/* signal that we are down to the interrupt handler */
|
||||
set_bit(__IXGBEVF_DOWN, &adapter->state);
|
||||
/* disable receives */
|
||||
|
||||
/* disable all enabled rx queues */
|
||||
for (i = 0; i < adapter->num_rx_queues; i++)
|
||||
ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
|
||||
|
||||
netif_tx_disable(netdev);
|
||||
|
||||
|
@ -1733,10 +1733,12 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
|
|||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
|
||||
if (hw->mac.ops.reset_hw(hw))
|
||||
if (hw->mac.ops.reset_hw(hw)) {
|
||||
hw_dbg(hw, "PF still resetting\n");
|
||||
else
|
||||
} else {
|
||||
hw->mac.ops.init_hw(hw);
|
||||
ixgbevf_negotiate_api(adapter);
|
||||
}
|
||||
|
||||
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
|
||||
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
|
||||
|
@ -2072,6 +2074,9 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
|
|||
hw->mac.max_tx_queues = 2;
|
||||
hw->mac.max_rx_queues = 2;
|
||||
|
||||
/* lock to protect mailbox accesses */
|
||||
spin_lock_init(&adapter->mbx_lock);
|
||||
|
||||
err = hw->mac.ops.reset_hw(hw);
|
||||
if (err) {
|
||||
dev_info(&pdev->dev,
|
||||
|
@ -2082,6 +2087,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
|
|||
pr_err("init_shared_code failed: %d\n", err);
|
||||
goto out;
|
||||
}
|
||||
ixgbevf_negotiate_api(adapter);
|
||||
err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
|
||||
if (err)
|
||||
dev_info(&pdev->dev, "Error reading MAC address\n");
|
||||
|
@ -2097,9 +2103,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
|
|||
memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
|
||||
}
|
||||
|
||||
/* lock to protect mailbox accesses */
|
||||
spin_lock_init(&adapter->mbx_lock);
|
||||
|
||||
/* Enable dynamic interrupt throttling rates */
|
||||
adapter->rx_itr_setting = 1;
|
||||
adapter->tx_itr_setting = 1;
|
||||
|
@ -2620,8 +2623,6 @@ static int ixgbevf_open(struct net_device *netdev)
|
|||
}
|
||||
}
|
||||
|
||||
ixgbevf_negotiate_api(adapter);
|
||||
|
||||
/* setup queue reg_idx and Rx queue count */
|
||||
err = ixgbevf_setup_queues(adapter);
|
||||
if (err)
|
||||
|
@ -3216,6 +3217,8 @@ static int ixgbevf_resume(struct pci_dev *pdev)
|
|||
}
|
||||
pci_set_master(pdev);
|
||||
|
||||
ixgbevf_reset(adapter);
|
||||
|
||||
rtnl_lock();
|
||||
err = ixgbevf_init_interrupt_scheme(adapter);
|
||||
rtnl_unlock();
|
||||
|
@ -3224,8 +3227,6 @@ static int ixgbevf_resume(struct pci_dev *pdev)
|
|||
return err;
|
||||
}
|
||||
|
||||
ixgbevf_reset(adapter);
|
||||
|
||||
if (netif_running(netdev)) {
|
||||
err = ixgbevf_open(netdev);
|
||||
if (err)
|
||||
|
|
Loading…
Reference in New Issue