Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== This series contains updates to ixgbevf and e1000e. Alex's ixgbevf patch is meant to address several race issues that become possible because next_to_watch could possibly be set to a value that shows that the descriptor is done when it is not. In order to correct that we instead make next_to_watch a pointer that is set to NULL during cleanup, and set to the eop_desc after the descriptor rings have been written. Stephen's ixgbevf patch makes the PCI id table a const and reformats the table to match what the ixgbe driver does. The remaining 13 patches from Bruce are cleanup patches for e1000e to resolve checkpatch.pl warnings/errors, removing blank lines where necessary and fix code formatting. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
0bcf6aa72b
|
@ -37,7 +37,9 @@
|
|||
* "index + 5".
|
||||
*/
|
||||
static const u16 e1000_gg82563_cable_length_table[] = {
|
||||
0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
|
||||
0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF
|
||||
};
|
||||
|
||||
#define GG82563_CABLE_LENGTH_TABLE_SIZE \
|
||||
ARRAY_SIZE(e1000_gg82563_cable_length_table)
|
||||
|
||||
|
@ -116,7 +118,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
|
|||
nvm->type = e1000_nvm_eeprom_spi;
|
||||
|
||||
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
|
||||
E1000_EECD_SIZE_EX_SHIFT);
|
||||
E1000_EECD_SIZE_EX_SHIFT);
|
||||
|
||||
/* Added to a constant, "size" becomes the left-shift value
|
||||
* for setting word_size.
|
||||
|
@ -393,7 +395,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
|
|||
* before the device has completed the "Page Select" MDI
|
||||
* transaction. So we wait 200us after each MDI command...
|
||||
*/
|
||||
udelay(200);
|
||||
usleep_range(200, 400);
|
||||
|
||||
/* ...and verify the command was successful. */
|
||||
ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
|
||||
|
@ -403,17 +405,17 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
|
|||
return -E1000_ERR_PHY;
|
||||
}
|
||||
|
||||
udelay(200);
|
||||
usleep_range(200, 400);
|
||||
|
||||
ret_val = e1000e_read_phy_reg_mdic(hw,
|
||||
MAX_PHY_REG_ADDRESS & offset,
|
||||
data);
|
||||
MAX_PHY_REG_ADDRESS & offset,
|
||||
data);
|
||||
|
||||
udelay(200);
|
||||
usleep_range(200, 400);
|
||||
} else {
|
||||
ret_val = e1000e_read_phy_reg_mdic(hw,
|
||||
MAX_PHY_REG_ADDRESS & offset,
|
||||
data);
|
||||
MAX_PHY_REG_ADDRESS & offset,
|
||||
data);
|
||||
}
|
||||
|
||||
e1000_release_phy_80003es2lan(hw);
|
||||
|
@ -462,7 +464,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
|
|||
* before the device has completed the "Page Select" MDI
|
||||
* transaction. So we wait 200us after each MDI command...
|
||||
*/
|
||||
udelay(200);
|
||||
usleep_range(200, 400);
|
||||
|
||||
/* ...and verify the command was successful. */
|
||||
ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
|
||||
|
@ -472,17 +474,17 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
|
|||
return -E1000_ERR_PHY;
|
||||
}
|
||||
|
||||
udelay(200);
|
||||
usleep_range(200, 400);
|
||||
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw,
|
||||
MAX_PHY_REG_ADDRESS & offset,
|
||||
data);
|
||||
MAX_PHY_REG_ADDRESS &
|
||||
offset, data);
|
||||
|
||||
udelay(200);
|
||||
usleep_range(200, 400);
|
||||
} else {
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw,
|
||||
MAX_PHY_REG_ADDRESS & offset,
|
||||
data);
|
||||
MAX_PHY_REG_ADDRESS &
|
||||
offset, data);
|
||||
}
|
||||
|
||||
e1000_release_phy_80003es2lan(hw);
|
||||
|
@ -580,7 +582,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
|
|||
e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n");
|
||||
|
||||
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
|
||||
100000, &link);
|
||||
100000, &link);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
|
@ -595,7 +597,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
|
|||
|
||||
/* Try once more */
|
||||
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
|
||||
100000, &link);
|
||||
100000, &link);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
}
|
||||
|
@ -666,14 +668,12 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
|
|||
s32 ret_val;
|
||||
|
||||
if (hw->phy.media_type == e1000_media_type_copper) {
|
||||
ret_val = e1000e_get_speed_and_duplex_copper(hw,
|
||||
speed,
|
||||
duplex);
|
||||
ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
|
||||
hw->phy.ops.cfg_on_link_up(hw);
|
||||
} else {
|
||||
ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
|
||||
speed,
|
||||
duplex);
|
||||
speed,
|
||||
duplex);
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
|
@ -754,9 +754,9 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
|
|||
|
||||
/* Initialize identification LED */
|
||||
ret_val = mac->ops.id_led_init(hw);
|
||||
/* An error is not fatal and we should not stop init due to this */
|
||||
if (ret_val)
|
||||
e_dbg("Error initializing identification LED\n");
|
||||
/* This is not fatal and we should not stop init due to this */
|
||||
|
||||
/* Disabling VLAN filtering */
|
||||
e_dbg("Initializing the IEEE VLAN\n");
|
||||
|
@ -784,14 +784,14 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
|
|||
|
||||
/* Set the transmit descriptor write-back policy */
|
||||
reg_data = er32(TXDCTL(0));
|
||||
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
|
||||
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
|
||||
reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
|
||||
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
|
||||
ew32(TXDCTL(0), reg_data);
|
||||
|
||||
/* ...for both queues. */
|
||||
reg_data = er32(TXDCTL(1));
|
||||
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
|
||||
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
|
||||
reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
|
||||
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
|
||||
ew32(TXDCTL(1), reg_data);
|
||||
|
||||
/* Enable retransmit on late collisions */
|
||||
|
@ -818,13 +818,12 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
|
|||
/* default to true to enable the MDIC W/A */
|
||||
hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
|
||||
|
||||
ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
|
||||
E1000_KMRNCTRLSTA_OFFSET >>
|
||||
E1000_KMRNCTRLSTA_OFFSET_SHIFT,
|
||||
&i);
|
||||
ret_val =
|
||||
e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >>
|
||||
E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i);
|
||||
if (!ret_val) {
|
||||
if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
|
||||
E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
|
||||
E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
|
||||
hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
|
||||
}
|
||||
|
||||
|
@ -891,7 +890,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
|
|||
{
|
||||
struct e1000_phy_info *phy = &hw->phy;
|
||||
s32 ret_val;
|
||||
u32 ctrl_ext;
|
||||
u32 reg;
|
||||
u16 data;
|
||||
|
||||
ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
|
||||
|
@ -954,22 +953,19 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
|
|||
}
|
||||
|
||||
/* Bypass Rx and Tx FIFO's */
|
||||
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
|
||||
E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
|
||||
E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
|
||||
E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
|
||||
reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL;
|
||||
data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
|
||||
E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
|
||||
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
|
||||
E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
|
||||
&data);
|
||||
reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE;
|
||||
ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
|
||||
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
|
||||
E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
|
||||
data);
|
||||
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
|
@ -982,9 +978,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
|
|||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
ctrl_ext = er32(CTRL_EXT);
|
||||
ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
|
||||
ew32(CTRL_EXT, ctrl_ext);
|
||||
reg = er32(CTRL_EXT);
|
||||
reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
|
||||
ew32(CTRL_EXT, reg);
|
||||
|
||||
ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
|
||||
if (ret_val)
|
||||
|
@ -1049,27 +1045,29 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
|
|||
* polling the phy; this fixes erroneous timeouts at 10Mbps.
|
||||
*/
|
||||
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
|
||||
0xFFFF);
|
||||
0xFFFF);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
|
||||
®_data);
|
||||
®_data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
reg_data |= 0x3F;
|
||||
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
|
||||
reg_data);
|
||||
reg_data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
|
||||
E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
|
||||
®_data);
|
||||
ret_val =
|
||||
e1000_read_kmrn_reg_80003es2lan(hw,
|
||||
E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
|
||||
®_data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
|
||||
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
|
||||
E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
|
||||
reg_data);
|
||||
ret_val =
|
||||
e1000_write_kmrn_reg_80003es2lan(hw,
|
||||
E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
|
||||
reg_data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
|
@ -1096,7 +1094,7 @@ static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
|
|||
|
||||
if (hw->phy.media_type == e1000_media_type_copper) {
|
||||
ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed,
|
||||
&duplex);
|
||||
&duplex);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
|
@ -1125,9 +1123,10 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
|
|||
u16 reg_data, reg_data2;
|
||||
|
||||
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
|
||||
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
|
||||
E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
|
||||
reg_data);
|
||||
ret_val =
|
||||
e1000_write_kmrn_reg_80003es2lan(hw,
|
||||
E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
|
||||
reg_data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
|
@ -1171,9 +1170,10 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
|
|||
u32 i = 0;
|
||||
|
||||
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
|
||||
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
|
||||
E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
|
||||
reg_data);
|
||||
ret_val =
|
||||
e1000_write_kmrn_reg_80003es2lan(hw,
|
||||
E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
|
||||
reg_data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
|
@ -1220,7 +1220,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
|
|||
return ret_val;
|
||||
|
||||
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
|
||||
E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
|
||||
E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
|
||||
ew32(KMRNCTRLSTA, kmrnctrlsta);
|
||||
e1e_flush();
|
||||
|
||||
|
@ -1255,7 +1255,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
|
|||
return ret_val;
|
||||
|
||||
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
|
||||
E1000_KMRNCTRLSTA_OFFSET) | data;
|
||||
E1000_KMRNCTRLSTA_OFFSET) | data;
|
||||
ew32(KMRNCTRLSTA, kmrnctrlsta);
|
||||
e1e_flush();
|
||||
|
||||
|
@ -1419,4 +1419,3 @@ const struct e1000_info e1000_es2_info = {
|
|||
.phy_ops = &es2_phy_ops,
|
||||
.nvm_ops = &es2_nvm_ops,
|
||||
};
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
|
|||
default:
|
||||
nvm->type = e1000_nvm_eeprom_spi;
|
||||
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
|
||||
E1000_EECD_SIZE_EX_SHIFT);
|
||||
E1000_EECD_SIZE_EX_SHIFT);
|
||||
/* Added to a constant, "size" becomes the left-shift value
|
||||
* for setting word_size.
|
||||
*/
|
||||
|
@ -437,7 +437,7 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
|
|||
return ret_val;
|
||||
|
||||
phy->id = (u32)(phy_id << 16);
|
||||
udelay(20);
|
||||
usleep_range(20, 40);
|
||||
ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
@ -482,7 +482,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
|
|||
if (!(swsm & E1000_SWSM_SMBI))
|
||||
break;
|
||||
|
||||
udelay(50);
|
||||
usleep_range(50, 100);
|
||||
i++;
|
||||
}
|
||||
|
||||
|
@ -499,7 +499,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
|
|||
if (er32(SWSM) & E1000_SWSM_SWESMBI)
|
||||
break;
|
||||
|
||||
udelay(50);
|
||||
usleep_range(50, 100);
|
||||
}
|
||||
|
||||
if (i == fw_timeout) {
|
||||
|
@ -526,6 +526,7 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
|
|||
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
|
||||
ew32(SWSM, swsm);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
|
||||
* @hw: pointer to the HW structure
|
||||
|
@ -846,9 +847,9 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
|
|||
}
|
||||
|
||||
for (i = 0; i < words; i++) {
|
||||
eewr = (data[i] << E1000_NVM_RW_REG_DATA) |
|
||||
((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
|
||||
E1000_NVM_RW_REG_START;
|
||||
eewr = ((data[i] << E1000_NVM_RW_REG_DATA) |
|
||||
((offset + i) << E1000_NVM_RW_ADDR_SHIFT) |
|
||||
E1000_NVM_RW_REG_START);
|
||||
|
||||
ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
|
||||
if (ret_val)
|
||||
|
@ -875,8 +876,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
|
|||
s32 timeout = PHY_CFG_TIMEOUT;
|
||||
|
||||
while (timeout) {
|
||||
if (er32(EEMNGCTL) &
|
||||
E1000_NVM_CFG_DONE_PORT_0)
|
||||
if (er32(EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0)
|
||||
break;
|
||||
usleep_range(1000, 2000);
|
||||
timeout--;
|
||||
|
@ -1022,7 +1022,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
|
|||
}
|
||||
|
||||
if (hw->nvm.type == e1000_nvm_flash_hw) {
|
||||
udelay(10);
|
||||
usleep_range(10, 20);
|
||||
ctrl_ext = er32(CTRL_EXT);
|
||||
ctrl_ext |= E1000_CTRL_EXT_EE_RST;
|
||||
ew32(CTRL_EXT, ctrl_ext);
|
||||
|
@ -1095,9 +1095,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
|
|||
|
||||
/* Initialize identification LED */
|
||||
ret_val = mac->ops.id_led_init(hw);
|
||||
/* An error is not fatal and we should not stop init due to this */
|
||||
if (ret_val)
|
||||
e_dbg("Error initializing identification LED\n");
|
||||
/* This is not fatal and we should not stop init due to this */
|
||||
|
||||
/* Disabling VLAN filtering */
|
||||
e_dbg("Initializing the IEEE VLAN\n");
|
||||
|
@ -1122,9 +1122,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
|
|||
|
||||
/* Set the transmit descriptor write-back policy */
|
||||
reg_data = er32(TXDCTL(0));
|
||||
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
|
||||
E1000_TXDCTL_FULL_TX_DESC_WB |
|
||||
E1000_TXDCTL_COUNT_DESC;
|
||||
reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
|
||||
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
|
||||
ew32(TXDCTL(0), reg_data);
|
||||
|
||||
/* ...for both queues. */
|
||||
|
@ -1140,9 +1139,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
|
|||
break;
|
||||
default:
|
||||
reg_data = er32(TXDCTL(1));
|
||||
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
|
||||
E1000_TXDCTL_FULL_TX_DESC_WB |
|
||||
E1000_TXDCTL_COUNT_DESC;
|
||||
reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
|
||||
E1000_TXDCTL_FULL_TX_DESC_WB |
|
||||
E1000_TXDCTL_COUNT_DESC);
|
||||
ew32(TXDCTL(1), reg_data);
|
||||
break;
|
||||
}
|
||||
|
@ -1530,7 +1529,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
|
|||
status = er32(STATUS);
|
||||
er32(RXCW);
|
||||
/* SYNCH bit and IV bit are sticky */
|
||||
udelay(10);
|
||||
usleep_range(10, 20);
|
||||
rxcw = er32(RXCW);
|
||||
|
||||
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
|
||||
|
@ -1633,7 +1632,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
|
|||
* the IV bit and restart Autoneg
|
||||
*/
|
||||
for (i = 0; i < AN_RETRY_COUNT; i++) {
|
||||
udelay(10);
|
||||
usleep_range(10, 20);
|
||||
rxcw = er32(RXCW);
|
||||
if ((rxcw & E1000_RXCW_SYNCH) &&
|
||||
(rxcw & E1000_RXCW_C))
|
||||
|
@ -2066,4 +2065,3 @@ const struct e1000_info e1000_82583_info = {
|
|||
.phy_ops = &e82_phy_ops_bm,
|
||||
.nvm_ops = &e82571_nvm_ops,
|
||||
};
|
||||
|
||||
|
|
|
@ -44,6 +44,8 @@
|
|||
#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
|
||||
#define E1000_EIAC_MASK_82574 0x01F00000
|
||||
|
||||
#define E1000_IVAR_INT_ALLOC_VALID 0x8
|
||||
|
||||
/* Manageability Operation Mode mask */
|
||||
#define E1000_NVM_INIT_CTRL2_MNGM 0x6000
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@
|
|||
#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
|
||||
#define E1000_CTRL_EXT_EIAME 0x01000000
|
||||
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
|
||||
#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
|
||||
#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
|
||||
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
|
||||
#define E1000_CTRL_EXT_LSECCK 0x00001000
|
||||
#define E1000_CTRL_EXT_PHYPDEN 0x00100000
|
||||
|
@ -216,6 +216,8 @@
|
|||
#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
|
||||
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
|
||||
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
|
||||
#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
|
||||
#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
|
||||
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
|
||||
#define E1000_CTRL_RST 0x04000000 /* Global reset */
|
||||
#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
|
||||
|
@ -239,12 +241,11 @@
|
|||
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
|
||||
#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
|
||||
#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
|
||||
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
|
||||
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */
|
||||
|
||||
#define HALF_DUPLEX 1
|
||||
#define FULL_DUPLEX 2
|
||||
|
||||
|
||||
#define ADVERTISE_10_HALF 0x0001
|
||||
#define ADVERTISE_10_FULL 0x0002
|
||||
#define ADVERTISE_100_HALF 0x0004
|
||||
|
@ -311,6 +312,7 @@
|
|||
|
||||
/* SerDes Control */
|
||||
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
|
||||
#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
|
||||
|
||||
/* Receive Checksum Control */
|
||||
#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
|
||||
|
@ -400,7 +402,8 @@
|
|||
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
|
||||
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
|
||||
#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
|
||||
#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
|
||||
/* If this bit asserted, the driver should claim the interrupt */
|
||||
#define E1000_ICR_INT_ASSERTED 0x80000000
|
||||
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
|
||||
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
|
||||
#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
|
||||
|
@ -583,13 +586,13 @@
|
|||
#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
|
||||
#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
|
||||
|
||||
#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */
|
||||
#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
|
||||
#define E1000_NVM_RW_REG_START 1 /* Start operation */
|
||||
#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
|
||||
#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
|
||||
#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
|
||||
#define E1000_FLASH_UPDATES 2000
|
||||
#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM r/w regs */
|
||||
#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
|
||||
#define E1000_NVM_RW_REG_START 1 /* Start operation */
|
||||
#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
|
||||
#define E1000_NVM_POLL_WRITE 1 /* Flag for polling write complete */
|
||||
#define E1000_NVM_POLL_READ 0 /* Flag for polling read complete */
|
||||
#define E1000_FLASH_UPDATES 2000
|
||||
|
||||
/* NVM Word Offsets */
|
||||
#define NVM_COMPAT 0x0003
|
||||
|
|
|
@ -61,7 +61,6 @@ struct e1000_info;
|
|||
#define e_notice(format, arg...) \
|
||||
netdev_notice(adapter->netdev, format, ## arg)
|
||||
|
||||
|
||||
/* Interrupt modes, as used by the IntMode parameter */
|
||||
#define E1000E_INT_MODE_LEGACY 0
|
||||
#define E1000E_INT_MODE_MSI 1
|
||||
|
@ -239,9 +238,8 @@ struct e1000_adapter {
|
|||
u16 tx_itr;
|
||||
u16 rx_itr;
|
||||
|
||||
/* Tx */
|
||||
struct e1000_ring *tx_ring /* One per active queue */
|
||||
____cacheline_aligned_in_smp;
|
||||
/* Tx - one ring per active queue */
|
||||
struct e1000_ring *tx_ring ____cacheline_aligned_in_smp;
|
||||
u32 tx_fifo_limit;
|
||||
|
||||
struct napi_struct napi;
|
||||
|
@ -487,8 +485,8 @@ extern int e1000e_setup_tx_resources(struct e1000_ring *ring);
|
|||
extern void e1000e_free_rx_resources(struct e1000_ring *ring);
|
||||
extern void e1000e_free_tx_resources(struct e1000_ring *ring);
|
||||
extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
|
||||
struct rtnl_link_stats64
|
||||
*stats);
|
||||
struct rtnl_link_stats64
|
||||
*stats);
|
||||
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
|
||||
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
|
||||
extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
|
||||
|
@ -558,12 +556,14 @@ static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
|
|||
return hw->nvm.ops.update(hw);
|
||||
}
|
||||
|
||||
static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
|
||||
static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words,
|
||||
u16 *data)
|
||||
{
|
||||
return hw->nvm.ops.read(hw, offset, words, data);
|
||||
}
|
||||
|
||||
static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
|
||||
static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
|
||||
u16 *data)
|
||||
{
|
||||
return hw->nvm.ops.write(hw, offset, words, data);
|
||||
}
|
||||
|
@ -597,7 +597,7 @@ static inline s32 __ew32_prepare(struct e1000_hw *hw)
|
|||
s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
|
||||
|
||||
while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
|
||||
udelay(50);
|
||||
usleep_range(50, 100);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
|
||||
#include "e1000.h"
|
||||
|
||||
enum {NETDEV_STATS, E1000_STATS};
|
||||
enum { NETDEV_STATS, E1000_STATS };
|
||||
|
||||
struct e1000_stats {
|
||||
char stat_string[ETH_GSTRING_LEN];
|
||||
|
@ -120,6 +120,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
|
|||
"Interrupt test (offline)", "Loopback test (offline)",
|
||||
"Link test (on/offline)"
|
||||
};
|
||||
|
||||
#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
|
||||
|
||||
static int e1000_get_settings(struct net_device *netdev,
|
||||
|
@ -196,8 +197,7 @@ static int e1000_get_settings(struct net_device *netdev,
|
|||
/* MDI-X => 2; MDI =>1; Invalid =>0 */
|
||||
if ((hw->phy.media_type == e1000_media_type_copper) &&
|
||||
netif_carrier_ok(netdev))
|
||||
ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
|
||||
ETH_TP_MDI;
|
||||
ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI;
|
||||
else
|
||||
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
|
||||
|
||||
|
@ -223,8 +223,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
|
|||
|
||||
/* Fiber NICs only allow 1000 gbps Full duplex */
|
||||
if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
|
||||
spd != SPEED_1000 &&
|
||||
dplx != DUPLEX_FULL) {
|
||||
(spd != SPEED_1000) && (dplx != DUPLEX_FULL)) {
|
||||
goto err_inval;
|
||||
}
|
||||
|
||||
|
@ -297,12 +296,10 @@ static int e1000_set_settings(struct net_device *netdev,
|
|||
hw->mac.autoneg = 1;
|
||||
if (hw->phy.media_type == e1000_media_type_fiber)
|
||||
hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
|
||||
ADVERTISED_FIBRE |
|
||||
ADVERTISED_Autoneg;
|
||||
ADVERTISED_FIBRE | ADVERTISED_Autoneg;
|
||||
else
|
||||
hw->phy.autoneg_advertised = ecmd->advertising |
|
||||
ADVERTISED_TP |
|
||||
ADVERTISED_Autoneg;
|
||||
ADVERTISED_TP | ADVERTISED_Autoneg;
|
||||
ecmd->advertising = hw->phy.autoneg_advertised;
|
||||
if (adapter->fc_autoneg)
|
||||
hw->fc.requested_mode = e1000_fc_default;
|
||||
|
@ -345,7 +342,7 @@ static void e1000_get_pauseparam(struct net_device *netdev,
|
|||
struct e1000_hw *hw = &adapter->hw;
|
||||
|
||||
pause->autoneg =
|
||||
(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
|
||||
(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
|
||||
|
||||
if (hw->fc.current_mode == e1000_fc_rx_pause) {
|
||||
pause->rx_pause = 1;
|
||||
|
@ -434,7 +431,7 @@ static void e1000_get_regs(struct net_device *netdev,
|
|||
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
|
||||
|
||||
regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
|
||||
adapter->pdev->device;
|
||||
adapter->pdev->device;
|
||||
|
||||
regs_buff[0] = er32(CTRL);
|
||||
regs_buff[1] = er32(STATUS);
|
||||
|
@ -502,8 +499,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
|
|||
first_word = eeprom->offset >> 1;
|
||||
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
|
||||
|
||||
eeprom_buff = kmalloc(sizeof(u16) *
|
||||
(last_word - first_word + 1), GFP_KERNEL);
|
||||
eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
|
||||
GFP_KERNEL);
|
||||
if (!eeprom_buff)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -514,7 +511,7 @@ static int e1000_get_eeprom(struct net_device *netdev,
|
|||
} else {
|
||||
for (i = 0; i < last_word - first_word + 1; i++) {
|
||||
ret_val = e1000_read_nvm(hw, first_word + i, 1,
|
||||
&eeprom_buff[i]);
|
||||
&eeprom_buff[i]);
|
||||
if (ret_val)
|
||||
break;
|
||||
}
|
||||
|
@ -552,7 +549,8 @@ static int e1000_set_eeprom(struct net_device *netdev,
|
|||
if (eeprom->len == 0)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16)))
|
||||
if (eeprom->magic !=
|
||||
(adapter->pdev->vendor | (adapter->pdev->device << 16)))
|
||||
return -EFAULT;
|
||||
|
||||
if (adapter->flags & FLAG_READ_ONLY_NVM)
|
||||
|
@ -578,7 +576,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
|
|||
/* need read/modify/write of last changed EEPROM word */
|
||||
/* only the first byte of the word is being modified */
|
||||
ret_val = e1000_read_nvm(hw, last_word, 1,
|
||||
&eeprom_buff[last_word - first_word]);
|
||||
&eeprom_buff[last_word - first_word]);
|
||||
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
@ -617,8 +615,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
|
|||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
strlcpy(drvinfo->driver, e1000e_driver_name,
|
||||
sizeof(drvinfo->driver));
|
||||
strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
|
||||
strlcpy(drvinfo->version, e1000e_driver_version,
|
||||
sizeof(drvinfo->version));
|
||||
|
||||
|
@ -626,10 +623,10 @@ static void e1000_get_drvinfo(struct net_device *netdev,
|
|||
* PCI-E controllers
|
||||
*/
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d-%d",
|
||||
(adapter->eeprom_vers & 0xF000) >> 12,
|
||||
(adapter->eeprom_vers & 0x0FF0) >> 4,
|
||||
(adapter->eeprom_vers & 0x000F));
|
||||
"%d.%d-%d",
|
||||
(adapter->eeprom_vers & 0xF000) >> 12,
|
||||
(adapter->eeprom_vers & 0x0FF0) >> 4,
|
||||
(adapter->eeprom_vers & 0x000F));
|
||||
|
||||
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
|
||||
sizeof(drvinfo->bus_info));
|
||||
|
@ -755,7 +752,8 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
|
|||
{
|
||||
u32 pat, val;
|
||||
static const u32 test[] = {
|
||||
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
|
||||
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
|
||||
};
|
||||
for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
|
||||
E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
|
||||
(test[pat] & write));
|
||||
|
@ -785,6 +783,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
|
||||
do { \
|
||||
if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
|
||||
|
@ -812,16 +811,16 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
|
|||
u32 wlock_mac = 0;
|
||||
|
||||
/* The status register is Read Only, so a write should fail.
|
||||
* Some bits that get toggled are ignored.
|
||||
* Some bits that get toggled are ignored. There are several bits
|
||||
* on newer hardware that are r/w.
|
||||
*/
|
||||
switch (mac->type) {
|
||||
/* there are several bits on newer hardware that are r/w */
|
||||
case e1000_82571:
|
||||
case e1000_82572:
|
||||
case e1000_80003es2lan:
|
||||
toggle = 0x7FFFF3FF;
|
||||
break;
|
||||
default:
|
||||
default:
|
||||
toggle = 0x7FFFF033;
|
||||
break;
|
||||
}
|
||||
|
@ -927,7 +926,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
|
|||
}
|
||||
|
||||
/* If Checksum is not Correct return error else test passed */
|
||||
if ((checksum != (u16) NVM_SUM) && !(*data))
|
||||
if ((checksum != (u16)NVM_SUM) && !(*data))
|
||||
*data = 2;
|
||||
|
||||
return *data;
|
||||
|
@ -935,7 +934,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
|
|||
|
||||
static irqreturn_t e1000_test_intr(int __always_unused irq, void *data)
|
||||
{
|
||||
struct net_device *netdev = (struct net_device *) data;
|
||||
struct net_device *netdev = (struct net_device *)data;
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
|
||||
|
@ -968,8 +967,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
|
|||
if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
|
||||
netdev)) {
|
||||
shared_int = 0;
|
||||
} else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
|
||||
netdev->name, netdev)) {
|
||||
} else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, netdev->name,
|
||||
netdev)) {
|
||||
*data = 1;
|
||||
ret_val = -1;
|
||||
goto out;
|
||||
|
@ -1079,28 +1078,33 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
|
|||
struct e1000_ring *tx_ring = &adapter->test_tx_ring;
|
||||
struct e1000_ring *rx_ring = &adapter->test_rx_ring;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
struct e1000_buffer *buffer_info;
|
||||
int i;
|
||||
|
||||
if (tx_ring->desc && tx_ring->buffer_info) {
|
||||
for (i = 0; i < tx_ring->count; i++) {
|
||||
if (tx_ring->buffer_info[i].dma)
|
||||
buffer_info = &tx_ring->buffer_info[i];
|
||||
|
||||
if (buffer_info->dma)
|
||||
dma_unmap_single(&pdev->dev,
|
||||
tx_ring->buffer_info[i].dma,
|
||||
tx_ring->buffer_info[i].length,
|
||||
DMA_TO_DEVICE);
|
||||
if (tx_ring->buffer_info[i].skb)
|
||||
dev_kfree_skb(tx_ring->buffer_info[i].skb);
|
||||
buffer_info->dma,
|
||||
buffer_info->length,
|
||||
DMA_TO_DEVICE);
|
||||
if (buffer_info->skb)
|
||||
dev_kfree_skb(buffer_info->skb);
|
||||
}
|
||||
}
|
||||
|
||||
if (rx_ring->desc && rx_ring->buffer_info) {
|
||||
for (i = 0; i < rx_ring->count; i++) {
|
||||
if (rx_ring->buffer_info[i].dma)
|
||||
buffer_info = &rx_ring->buffer_info[i];
|
||||
|
||||
if (buffer_info->dma)
|
||||
dma_unmap_single(&pdev->dev,
|
||||
rx_ring->buffer_info[i].dma,
|
||||
2048, DMA_FROM_DEVICE);
|
||||
if (rx_ring->buffer_info[i].skb)
|
||||
dev_kfree_skb(rx_ring->buffer_info[i].skb);
|
||||
buffer_info->dma,
|
||||
2048, DMA_FROM_DEVICE);
|
||||
if (buffer_info->skb)
|
||||
dev_kfree_skb(buffer_info->skb);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1137,8 +1141,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
|
|||
tx_ring->count = E1000_DEFAULT_TXD;
|
||||
|
||||
tx_ring->buffer_info = kcalloc(tx_ring->count,
|
||||
sizeof(struct e1000_buffer),
|
||||
GFP_KERNEL);
|
||||
sizeof(struct e1000_buffer), GFP_KERNEL);
|
||||
if (!tx_ring->buffer_info) {
|
||||
ret_val = 1;
|
||||
goto err_nomem;
|
||||
|
@ -1155,8 +1158,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
|
|||
tx_ring->next_to_use = 0;
|
||||
tx_ring->next_to_clean = 0;
|
||||
|
||||
ew32(TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
|
||||
ew32(TDBAH(0), ((u64) tx_ring->dma >> 32));
|
||||
ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF));
|
||||
ew32(TDBAH(0), ((u64)tx_ring->dma >> 32));
|
||||
ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc));
|
||||
ew32(TDH(0), 0);
|
||||
ew32(TDT(0), 0);
|
||||
|
@ -1178,8 +1181,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
|
|||
tx_ring->buffer_info[i].skb = skb;
|
||||
tx_ring->buffer_info[i].length = skb->len;
|
||||
tx_ring->buffer_info[i].dma =
|
||||
dma_map_single(&pdev->dev, skb->data, skb->len,
|
||||
DMA_TO_DEVICE);
|
||||
dma_map_single(&pdev->dev, skb->data, skb->len,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&pdev->dev,
|
||||
tx_ring->buffer_info[i].dma)) {
|
||||
ret_val = 4;
|
||||
|
@ -1199,8 +1202,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
|
|||
rx_ring->count = E1000_DEFAULT_RXD;
|
||||
|
||||
rx_ring->buffer_info = kcalloc(rx_ring->count,
|
||||
sizeof(struct e1000_buffer),
|
||||
GFP_KERNEL);
|
||||
sizeof(struct e1000_buffer), GFP_KERNEL);
|
||||
if (!rx_ring->buffer_info) {
|
||||
ret_val = 5;
|
||||
goto err_nomem;
|
||||
|
@ -1219,16 +1221,16 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
|
|||
rctl = er32(RCTL);
|
||||
if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
|
||||
ew32(RCTL, rctl & ~E1000_RCTL_EN);
|
||||
ew32(RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF));
|
||||
ew32(RDBAH(0), ((u64) rx_ring->dma >> 32));
|
||||
ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF));
|
||||
ew32(RDBAH(0), ((u64)rx_ring->dma >> 32));
|
||||
ew32(RDLEN(0), rx_ring->size);
|
||||
ew32(RDH(0), 0);
|
||||
ew32(RDT(0), 0);
|
||||
rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
|
||||
E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
|
||||
E1000_RCTL_SBP | E1000_RCTL_SECRC |
|
||||
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
|
||||
(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
|
||||
E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
|
||||
E1000_RCTL_SBP | E1000_RCTL_SECRC |
|
||||
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
|
||||
(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
|
||||
ew32(RCTL, rctl);
|
||||
|
||||
for (i = 0; i < rx_ring->count; i++) {
|
||||
|
@ -1243,8 +1245,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
|
|||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
rx_ring->buffer_info[i].skb = skb;
|
||||
rx_ring->buffer_info[i].dma =
|
||||
dma_map_single(&pdev->dev, skb->data, 2048,
|
||||
DMA_FROM_DEVICE);
|
||||
dma_map_single(&pdev->dev, skb->data, 2048,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&pdev->dev,
|
||||
rx_ring->buffer_info[i].dma)) {
|
||||
ret_val = 8;
|
||||
|
@ -1295,7 +1297,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
|
|||
|
||||
ew32(CTRL, ctrl_reg);
|
||||
e1e_flush();
|
||||
udelay(500);
|
||||
usleep_range(500, 1000);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1321,7 +1323,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
|
|||
e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
|
||||
/* Assert SW reset for above settings to take effect */
|
||||
hw->phy.ops.commit(hw);
|
||||
mdelay(1);
|
||||
usleep_range(1000, 2000);
|
||||
/* Force Full Duplex */
|
||||
e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
|
||||
e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
|
||||
|
@ -1362,7 +1364,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
|
|||
|
||||
/* force 1000, set loopback */
|
||||
e1e_wphy(hw, MII_BMCR, 0x4140);
|
||||
mdelay(250);
|
||||
msleep(250);
|
||||
|
||||
/* Now set up the MAC to the same speed/duplex as the PHY. */
|
||||
ctrl_reg = er32(CTRL);
|
||||
|
@ -1394,7 +1396,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
|
|||
if (hw->phy.type == e1000_phy_m88)
|
||||
e1000_phy_disable_receiver(adapter);
|
||||
|
||||
udelay(500);
|
||||
usleep_range(500, 1000);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1430,8 +1432,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
|
|||
/* special write to serdes control register to enable SerDes analog
|
||||
* loopback
|
||||
*/
|
||||
#define E1000_SERDES_LB_ON 0x410
|
||||
ew32(SCTL, E1000_SERDES_LB_ON);
|
||||
ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK);
|
||||
e1e_flush();
|
||||
usleep_range(10000, 20000);
|
||||
|
||||
|
@ -1525,8 +1526,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
|
|||
case e1000_82572:
|
||||
if (hw->phy.media_type == e1000_media_type_fiber ||
|
||||
hw->phy.media_type == e1000_media_type_internal_serdes) {
|
||||
#define E1000_SERDES_LB_OFF 0x400
|
||||
ew32(SCTL, E1000_SERDES_LB_OFF);
|
||||
ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
|
||||
e1e_flush();
|
||||
usleep_range(10000, 20000);
|
||||
break;
|
||||
|
@ -1563,7 +1563,7 @@ static int e1000_check_lbtest_frame(struct sk_buff *skb,
|
|||
frame_size &= ~1;
|
||||
if (*(skb->data + 3) == 0xFF)
|
||||
if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
|
||||
(*(skb->data + frame_size / 2 + 12) == 0xAF))
|
||||
(*(skb->data + frame_size / 2 + 12) == 0xAF))
|
||||
return 0;
|
||||
return 13;
|
||||
}
|
||||
|
@ -1574,6 +1574,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
|
|||
struct e1000_ring *rx_ring = &adapter->test_rx_ring;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
struct e1000_buffer *buffer_info;
|
||||
int i, j, k, l;
|
||||
int lc;
|
||||
int good_cnt;
|
||||
|
@ -1594,14 +1595,17 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
|
|||
|
||||
k = 0;
|
||||
l = 0;
|
||||
for (j = 0; j <= lc; j++) { /* loop count loop */
|
||||
for (i = 0; i < 64; i++) { /* send the packets */
|
||||
e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
|
||||
1024);
|
||||
/* loop count loop */
|
||||
for (j = 0; j <= lc; j++) {
|
||||
/* send the packets */
|
||||
for (i = 0; i < 64; i++) {
|
||||
buffer_info = &tx_ring->buffer_info[k];
|
||||
|
||||
e1000_create_lbtest_frame(buffer_info->skb, 1024);
|
||||
dma_sync_single_for_device(&pdev->dev,
|
||||
tx_ring->buffer_info[k].dma,
|
||||
tx_ring->buffer_info[k].length,
|
||||
DMA_TO_DEVICE);
|
||||
buffer_info->dma,
|
||||
buffer_info->length,
|
||||
DMA_TO_DEVICE);
|
||||
k++;
|
||||
if (k == tx_ring->count)
|
||||
k = 0;
|
||||
|
@ -1611,13 +1615,16 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
|
|||
msleep(200);
|
||||
time = jiffies; /* set the start time for the receive */
|
||||
good_cnt = 0;
|
||||
do { /* receive the sent packets */
|
||||
dma_sync_single_for_cpu(&pdev->dev,
|
||||
rx_ring->buffer_info[l].dma, 2048,
|
||||
DMA_FROM_DEVICE);
|
||||
/* receive the sent packets */
|
||||
do {
|
||||
buffer_info = &rx_ring->buffer_info[l];
|
||||
|
||||
ret_val = e1000_check_lbtest_frame(
|
||||
rx_ring->buffer_info[l].skb, 1024);
|
||||
dma_sync_single_for_cpu(&pdev->dev,
|
||||
buffer_info->dma, 2048,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
ret_val = e1000_check_lbtest_frame(buffer_info->skb,
|
||||
1024);
|
||||
if (!ret_val)
|
||||
good_cnt++;
|
||||
l++;
|
||||
|
@ -1636,7 +1643,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
|
|||
ret_val = 14; /* error code for time out error */
|
||||
break;
|
||||
}
|
||||
} /* end loop count loop */
|
||||
}
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
|
@ -1695,7 +1702,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
|
|||
/* On some Phy/switch combinations, link establishment
|
||||
* can take a few seconds more than expected.
|
||||
*/
|
||||
msleep(5000);
|
||||
msleep_interruptible(5000);
|
||||
|
||||
if (!(er32(STATUS) & E1000_STATUS_LU))
|
||||
*data = 1;
|
||||
|
@ -1979,12 +1986,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
|
|||
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
|
||||
switch (e1000_gstrings_stats[i].type) {
|
||||
case NETDEV_STATS:
|
||||
p = (char *) &net_stats +
|
||||
e1000_gstrings_stats[i].stat_offset;
|
||||
p = (char *)&net_stats +
|
||||
e1000_gstrings_stats[i].stat_offset;
|
||||
break;
|
||||
case E1000_STATS:
|
||||
p = (char *) adapter +
|
||||
e1000_gstrings_stats[i].stat_offset;
|
||||
p = (char *)adapter +
|
||||
e1000_gstrings_stats[i].stat_offset;
|
||||
break;
|
||||
default:
|
||||
data[i] = 0;
|
||||
|
@ -1992,7 +1999,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
|
|||
}
|
||||
|
||||
data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
|
||||
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
|
||||
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -167,7 +167,7 @@ enum e1000_1000t_rx_status {
|
|||
e1000_1000t_rx_status_undefined = 0xFF
|
||||
};
|
||||
|
||||
enum e1000_rev_polarity{
|
||||
enum e1000_rev_polarity {
|
||||
e1000_rev_polarity_normal = 0,
|
||||
e1000_rev_polarity_reversed,
|
||||
e1000_rev_polarity_undefined = 0xFF
|
||||
|
@ -545,7 +545,7 @@ struct e1000_mac_info {
|
|||
u16 mta_reg_count;
|
||||
|
||||
/* Maximum size of the MTA register table in all supported adapters */
|
||||
#define MAX_MTA_REG 128
|
||||
#define MAX_MTA_REG 128
|
||||
u32 mta_shadow[MAX_MTA_REG];
|
||||
u16 rar_entry_count;
|
||||
|
||||
|
|
|
@ -61,15 +61,15 @@
|
|||
/* Offset 04h HSFSTS */
|
||||
union ich8_hws_flash_status {
|
||||
struct ich8_hsfsts {
|
||||
u16 flcdone :1; /* bit 0 Flash Cycle Done */
|
||||
u16 flcerr :1; /* bit 1 Flash Cycle Error */
|
||||
u16 dael :1; /* bit 2 Direct Access error Log */
|
||||
u16 berasesz :2; /* bit 4:3 Sector Erase Size */
|
||||
u16 flcinprog :1; /* bit 5 flash cycle in Progress */
|
||||
u16 reserved1 :2; /* bit 13:6 Reserved */
|
||||
u16 reserved2 :6; /* bit 13:6 Reserved */
|
||||
u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
|
||||
u16 flockdn :1; /* bit 15 Flash Config Lock-Down */
|
||||
u16 flcdone:1; /* bit 0 Flash Cycle Done */
|
||||
u16 flcerr:1; /* bit 1 Flash Cycle Error */
|
||||
u16 dael:1; /* bit 2 Direct Access error Log */
|
||||
u16 berasesz:2; /* bit 4:3 Sector Erase Size */
|
||||
u16 flcinprog:1; /* bit 5 flash cycle in Progress */
|
||||
u16 reserved1:2; /* bit 13:6 Reserved */
|
||||
u16 reserved2:6; /* bit 13:6 Reserved */
|
||||
u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
|
||||
u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
|
||||
} hsf_status;
|
||||
u16 regval;
|
||||
};
|
||||
|
@ -78,11 +78,11 @@ union ich8_hws_flash_status {
|
|||
/* Offset 06h FLCTL */
|
||||
union ich8_hws_flash_ctrl {
|
||||
struct ich8_hsflctl {
|
||||
u16 flcgo :1; /* 0 Flash Cycle Go */
|
||||
u16 flcycle :2; /* 2:1 Flash Cycle */
|
||||
u16 reserved :5; /* 7:3 Reserved */
|
||||
u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
|
||||
u16 flockdn :6; /* 15:10 Reserved */
|
||||
u16 flcgo:1; /* 0 Flash Cycle Go */
|
||||
u16 flcycle:2; /* 2:1 Flash Cycle */
|
||||
u16 reserved:5; /* 7:3 Reserved */
|
||||
u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
|
||||
u16 flockdn:6; /* 15:10 Reserved */
|
||||
} hsf_ctrl;
|
||||
u16 regval;
|
||||
};
|
||||
|
@ -90,10 +90,10 @@ union ich8_hws_flash_ctrl {
|
|||
/* ICH Flash Region Access Permissions */
|
||||
union ich8_hws_flash_regacc {
|
||||
struct ich8_flracc {
|
||||
u32 grra :8; /* 0:7 GbE region Read Access */
|
||||
u32 grwa :8; /* 8:15 GbE region Write Access */
|
||||
u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
|
||||
u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
|
||||
u32 grra:8; /* 0:7 GbE region Read Access */
|
||||
u32 grwa:8; /* 8:15 GbE region Write Access */
|
||||
u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
|
||||
u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
|
||||
} hsf_flregacc;
|
||||
u16 regval;
|
||||
};
|
||||
|
@ -312,7 +312,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
|
|||
mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
|
||||
ew32(CTRL, mac_reg);
|
||||
e1e_flush();
|
||||
udelay(10);
|
||||
usleep_range(10, 20);
|
||||
mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
|
||||
ew32(CTRL, mac_reg);
|
||||
e1e_flush();
|
||||
|
@ -548,8 +548,8 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
|
|||
/* find total size of the NVM, then cut in half since the total
|
||||
* size represents two separate NVM banks.
|
||||
*/
|
||||
nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
|
||||
<< FLASH_SECTOR_ADDR_SHIFT;
|
||||
nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
|
||||
<< FLASH_SECTOR_ADDR_SHIFT);
|
||||
nvm->flash_bank_size /= 2;
|
||||
/* Adjust to word count */
|
||||
nvm->flash_bank_size /= sizeof(u16);
|
||||
|
@ -1073,9 +1073,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
|
|||
u32 fwsm;
|
||||
|
||||
fwsm = er32(FWSM);
|
||||
return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
|
||||
((fwsm & E1000_FWSM_MODE_MASK) ==
|
||||
(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
|
||||
return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
|
||||
((fwsm & E1000_FWSM_MODE_MASK) ==
|
||||
(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1092,7 +1092,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
|
|||
|
||||
fwsm = er32(FWSM);
|
||||
return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
|
||||
(fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
|
||||
(fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1379,8 +1379,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
|
|||
word_addr = (u16)(cnf_base_addr << 1);
|
||||
|
||||
for (i = 0; i < cnf_size; i++) {
|
||||
ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
|
||||
®_data);
|
||||
ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, ®_data);
|
||||
if (ret_val)
|
||||
goto release;
|
||||
|
||||
|
@ -1440,13 +1439,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
|
|||
if (ret_val)
|
||||
goto release;
|
||||
|
||||
status_reg &= BM_CS_STATUS_LINK_UP |
|
||||
BM_CS_STATUS_RESOLVED |
|
||||
BM_CS_STATUS_SPEED_MASK;
|
||||
status_reg &= (BM_CS_STATUS_LINK_UP |
|
||||
BM_CS_STATUS_RESOLVED |
|
||||
BM_CS_STATUS_SPEED_MASK);
|
||||
|
||||
if (status_reg == (BM_CS_STATUS_LINK_UP |
|
||||
BM_CS_STATUS_RESOLVED |
|
||||
BM_CS_STATUS_SPEED_1000))
|
||||
BM_CS_STATUS_RESOLVED |
|
||||
BM_CS_STATUS_SPEED_1000))
|
||||
k1_enable = false;
|
||||
}
|
||||
|
||||
|
@ -1455,13 +1454,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
|
|||
if (ret_val)
|
||||
goto release;
|
||||
|
||||
status_reg &= HV_M_STATUS_LINK_UP |
|
||||
HV_M_STATUS_AUTONEG_COMPLETE |
|
||||
HV_M_STATUS_SPEED_MASK;
|
||||
status_reg &= (HV_M_STATUS_LINK_UP |
|
||||
HV_M_STATUS_AUTONEG_COMPLETE |
|
||||
HV_M_STATUS_SPEED_MASK);
|
||||
|
||||
if (status_reg == (HV_M_STATUS_LINK_UP |
|
||||
HV_M_STATUS_AUTONEG_COMPLETE |
|
||||
HV_M_STATUS_SPEED_1000))
|
||||
HV_M_STATUS_AUTONEG_COMPLETE |
|
||||
HV_M_STATUS_SPEED_1000))
|
||||
k1_enable = false;
|
||||
}
|
||||
|
||||
|
@ -1518,7 +1517,7 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
|
|||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
udelay(20);
|
||||
usleep_range(20, 40);
|
||||
ctrl_ext = er32(CTRL_EXT);
|
||||
ctrl_reg = er32(CTRL);
|
||||
|
||||
|
@ -1528,11 +1527,11 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
|
|||
|
||||
ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
|
||||
e1e_flush();
|
||||
udelay(20);
|
||||
usleep_range(20, 40);
|
||||
ew32(CTRL, ctrl_reg);
|
||||
ew32(CTRL_EXT, ctrl_ext);
|
||||
e1e_flush();
|
||||
udelay(20);
|
||||
usleep_range(20, 40);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1606,7 +1605,6 @@ release:
|
|||
return ret_val;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
|
||||
* @hw: pointer to the HW structure
|
||||
|
@ -1773,7 +1771,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
|
|||
* SHRAL/H) and initial CRC values to the MAC
|
||||
*/
|
||||
for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
|
||||
u8 mac_addr[ETH_ALEN] = {0};
|
||||
u8 mac_addr[ETH_ALEN] = { 0 };
|
||||
u32 addr_high, addr_low;
|
||||
|
||||
addr_high = er32(RAH(i));
|
||||
|
@ -1804,8 +1802,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
|
|||
ew32(RCTL, mac_reg);
|
||||
|
||||
ret_val = e1000e_read_kmrn_reg(hw,
|
||||
E1000_KMRNCTRLSTA_CTRL_OFFSET,
|
||||
&data);
|
||||
E1000_KMRNCTRLSTA_CTRL_OFFSET,
|
||||
&data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
ret_val = e1000e_write_kmrn_reg(hw,
|
||||
|
@ -1814,8 +1812,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
|
|||
if (ret_val)
|
||||
return ret_val;
|
||||
ret_val = e1000e_read_kmrn_reg(hw,
|
||||
E1000_KMRNCTRLSTA_HD_CTRL,
|
||||
&data);
|
||||
E1000_KMRNCTRLSTA_HD_CTRL,
|
||||
&data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
data &= ~(0xF << 8);
|
||||
|
@ -1862,8 +1860,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
|
|||
ew32(RCTL, mac_reg);
|
||||
|
||||
ret_val = e1000e_read_kmrn_reg(hw,
|
||||
E1000_KMRNCTRLSTA_CTRL_OFFSET,
|
||||
&data);
|
||||
E1000_KMRNCTRLSTA_CTRL_OFFSET,
|
||||
&data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
ret_val = e1000e_write_kmrn_reg(hw,
|
||||
|
@ -1872,8 +1870,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
|
|||
if (ret_val)
|
||||
return ret_val;
|
||||
ret_val = e1000e_read_kmrn_reg(hw,
|
||||
E1000_KMRNCTRLSTA_HD_CTRL,
|
||||
&data);
|
||||
E1000_KMRNCTRLSTA_HD_CTRL,
|
||||
&data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
data &= ~(0xF << 8);
|
||||
|
@ -2039,7 +2037,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
|
|||
do {
|
||||
data = er32(STATUS);
|
||||
data &= E1000_STATUS_LAN_INIT_DONE;
|
||||
udelay(100);
|
||||
usleep_range(100, 200);
|
||||
} while ((!data) && --loop);
|
||||
|
||||
/* If basic configuration is incomplete before the above loop
|
||||
|
@ -2384,7 +2382,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
|
|||
|
||||
/* Check bank 0 */
|
||||
ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
|
||||
&sig_byte);
|
||||
&sig_byte);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
|
||||
|
@ -2395,8 +2393,8 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
|
|||
|
||||
/* Check bank 1 */
|
||||
ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
|
||||
bank1_offset,
|
||||
&sig_byte);
|
||||
bank1_offset,
|
||||
&sig_byte);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
|
||||
|
@ -2449,8 +2447,8 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
|
|||
|
||||
ret_val = 0;
|
||||
for (i = 0; i < words; i++) {
|
||||
if (dev_spec->shadow_ram[offset+i].modified) {
|
||||
data[i] = dev_spec->shadow_ram[offset+i].value;
|
||||
if (dev_spec->shadow_ram[offset + i].modified) {
|
||||
data[i] = dev_spec->shadow_ram[offset + i].value;
|
||||
} else {
|
||||
ret_val = e1000_read_flash_word_ich8lan(hw,
|
||||
act_offset + i,
|
||||
|
@ -2635,8 +2633,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
|
|||
if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
|
||||
return -E1000_ERR_NVM;
|
||||
|
||||
flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
|
||||
hw->nvm.flash_base_addr;
|
||||
flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
|
||||
hw->nvm.flash_base_addr);
|
||||
|
||||
do {
|
||||
udelay(1);
|
||||
|
@ -2653,8 +2651,9 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
|
|||
|
||||
ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
|
||||
|
||||
ret_val = e1000_flash_cycle_ich8lan(hw,
|
||||
ICH_FLASH_READ_COMMAND_TIMEOUT);
|
||||
ret_val =
|
||||
e1000_flash_cycle_ich8lan(hw,
|
||||
ICH_FLASH_READ_COMMAND_TIMEOUT);
|
||||
|
||||
/* Check if FCERR is set to 1, if set to 1, clear it
|
||||
* and try the whole sequence a few more times, else
|
||||
|
@ -2713,8 +2712,8 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
|
|||
nvm->ops.acquire(hw);
|
||||
|
||||
for (i = 0; i < words; i++) {
|
||||
dev_spec->shadow_ram[offset+i].modified = true;
|
||||
dev_spec->shadow_ram[offset+i].value = data[i];
|
||||
dev_spec->shadow_ram[offset + i].modified = true;
|
||||
dev_spec->shadow_ram[offset + i].value = data[i];
|
||||
}
|
||||
|
||||
nvm->ops.release(hw);
|
||||
|
@ -2783,8 +2782,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
|
|||
data = dev_spec->shadow_ram[i].value;
|
||||
} else {
|
||||
ret_val = e1000_read_flash_word_ich8lan(hw, i +
|
||||
old_bank_offset,
|
||||
&data);
|
||||
old_bank_offset,
|
||||
&data);
|
||||
if (ret_val)
|
||||
break;
|
||||
}
|
||||
|
@ -2802,7 +2801,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
|
|||
/* Convert offset to bytes. */
|
||||
act_offset = (i + new_bank_offset) << 1;
|
||||
|
||||
udelay(100);
|
||||
usleep_range(100, 200);
|
||||
/* Write the bytes to the new bank. */
|
||||
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
|
||||
act_offset,
|
||||
|
@ -2810,10 +2809,10 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
|
|||
if (ret_val)
|
||||
break;
|
||||
|
||||
udelay(100);
|
||||
usleep_range(100, 200);
|
||||
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
|
||||
act_offset + 1,
|
||||
(u8)(data >> 8));
|
||||
act_offset + 1,
|
||||
(u8)(data >> 8));
|
||||
if (ret_val)
|
||||
break;
|
||||
}
|
||||
|
@ -2989,8 +2988,8 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
|
|||
offset > ICH_FLASH_LINEAR_ADDR_MASK)
|
||||
return -E1000_ERR_NVM;
|
||||
|
||||
flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
|
||||
hw->nvm.flash_base_addr;
|
||||
flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
|
||||
hw->nvm.flash_base_addr);
|
||||
|
||||
do {
|
||||
udelay(1);
|
||||
|
@ -3001,7 +3000,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
|
|||
|
||||
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
|
||||
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
|
||||
hsflctl.hsf_ctrl.fldbcount = size -1;
|
||||
hsflctl.hsf_ctrl.fldbcount = size - 1;
|
||||
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
|
||||
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
|
||||
|
||||
|
@ -3017,8 +3016,9 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
|
|||
/* check if FCERR is set to 1 , if set to 1, clear it
|
||||
* and try the whole sequence a few more times else done
|
||||
*/
|
||||
ret_val = e1000_flash_cycle_ich8lan(hw,
|
||||
ICH_FLASH_WRITE_COMMAND_TIMEOUT);
|
||||
ret_val =
|
||||
e1000_flash_cycle_ich8lan(hw,
|
||||
ICH_FLASH_WRITE_COMMAND_TIMEOUT);
|
||||
if (!ret_val)
|
||||
break;
|
||||
|
||||
|
@ -3077,7 +3077,7 @@ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
|
|||
|
||||
for (program_retries = 0; program_retries < 100; program_retries++) {
|
||||
e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
|
||||
udelay(100);
|
||||
usleep_range(100, 200);
|
||||
ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
|
||||
if (!ret_val)
|
||||
break;
|
||||
|
@ -3148,8 +3148,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
|
|||
flash_linear_addr = hw->nvm.flash_base_addr;
|
||||
flash_linear_addr += (bank) ? flash_bank_size : 0;
|
||||
|
||||
for (j = 0; j < iteration ; j++) {
|
||||
for (j = 0; j < iteration; j++) {
|
||||
do {
|
||||
u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
|
||||
|
||||
/* Steps */
|
||||
ret_val = e1000_flash_cycle_init_ich8lan(hw);
|
||||
if (ret_val)
|
||||
|
@ -3169,8 +3171,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
|
|||
flash_linear_addr += (j * sector_size);
|
||||
ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
|
||||
|
||||
ret_val = e1000_flash_cycle_ich8lan(hw,
|
||||
ICH_FLASH_ERASE_COMMAND_TIMEOUT);
|
||||
ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
|
||||
if (!ret_val)
|
||||
break;
|
||||
|
||||
|
@ -3209,8 +3210,7 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
|
|||
return ret_val;
|
||||
}
|
||||
|
||||
if (*data == ID_LED_RESERVED_0000 ||
|
||||
*data == ID_LED_RESERVED_FFFF)
|
||||
if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
|
||||
*data = ID_LED_DEFAULT_ICH8LAN;
|
||||
|
||||
return 0;
|
||||
|
@ -3450,9 +3450,9 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
|
|||
|
||||
/* Initialize identification LED */
|
||||
ret_val = mac->ops.id_led_init(hw);
|
||||
/* An error is not fatal and we should not stop init due to this */
|
||||
if (ret_val)
|
||||
e_dbg("Error initializing identification LED\n");
|
||||
/* This is not fatal and we should not stop init due to this */
|
||||
|
||||
/* Setup the receive address. */
|
||||
e1000e_init_rx_addrs(hw, mac->rar_entry_count);
|
||||
|
@ -3480,16 +3480,16 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
|
|||
|
||||
/* Set the transmit descriptor write-back policy for both queues */
|
||||
txdctl = er32(TXDCTL(0));
|
||||
txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
|
||||
E1000_TXDCTL_FULL_TX_DESC_WB;
|
||||
txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
|
||||
E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
|
||||
txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
|
||||
E1000_TXDCTL_FULL_TX_DESC_WB);
|
||||
txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
|
||||
E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
|
||||
ew32(TXDCTL(0), txdctl);
|
||||
txdctl = er32(TXDCTL(1));
|
||||
txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
|
||||
E1000_TXDCTL_FULL_TX_DESC_WB;
|
||||
txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
|
||||
E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
|
||||
txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
|
||||
E1000_TXDCTL_FULL_TX_DESC_WB);
|
||||
txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
|
||||
E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
|
||||
ew32(TXDCTL(1), txdctl);
|
||||
|
||||
/* ICH8 has opposite polarity of no_snoop bits.
|
||||
|
@ -3498,7 +3498,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
|
|||
if (mac->type == e1000_ich8lan)
|
||||
snoop = PCIE_ICH8_SNOOP_ALL;
|
||||
else
|
||||
snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
|
||||
snoop = (u32)~(PCIE_NO_SNOOP_ALL);
|
||||
e1000e_set_pcie_no_snoop(hw, snoop);
|
||||
|
||||
ctrl_ext = er32(CTRL_EXT);
|
||||
|
@ -3514,6 +3514,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
|
|||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
|
||||
* @hw: pointer to the HW structure
|
||||
|
@ -3625,8 +3626,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
|
|||
*/
|
||||
hw->fc.current_mode = hw->fc.requested_mode;
|
||||
|
||||
e_dbg("After fix-ups FlowControl is now = %x\n",
|
||||
hw->fc.current_mode);
|
||||
e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
|
||||
|
||||
/* Continue to configure the copper link. */
|
||||
ret_val = hw->mac.ops.setup_physical_interface(hw);
|
||||
|
@ -3676,12 +3676,12 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
|
|||
if (ret_val)
|
||||
return ret_val;
|
||||
ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
|
||||
®_data);
|
||||
®_data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
reg_data |= 0x3F;
|
||||
ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
|
||||
reg_data);
|
||||
reg_data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
|
@ -3754,8 +3754,7 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
|
|||
return ret_val;
|
||||
|
||||
if ((hw->mac.type == e1000_ich8lan) &&
|
||||
(hw->phy.type == e1000_phy_igp_3) &&
|
||||
(*speed == SPEED_1000)) {
|
||||
(hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
|
||||
ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
|
||||
}
|
||||
|
||||
|
@ -3838,7 +3837,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
|
|||
* /disabled - false).
|
||||
**/
|
||||
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
|
||||
bool state)
|
||||
bool state)
|
||||
{
|
||||
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
|
||||
|
||||
|
@ -3920,12 +3919,12 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
|
|||
return;
|
||||
|
||||
ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
|
||||
®_data);
|
||||
®_data);
|
||||
if (ret_val)
|
||||
return;
|
||||
reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
|
||||
ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
|
||||
reg_data);
|
||||
reg_data);
|
||||
if (ret_val)
|
||||
return;
|
||||
reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
|
||||
|
|
|
@ -596,7 +596,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
|
|||
* serdes media type.
|
||||
*/
|
||||
/* SYNCH bit and IV bit are sticky. */
|
||||
udelay(10);
|
||||
usleep_range(10, 20);
|
||||
rxcw = er32(RXCW);
|
||||
if (rxcw & E1000_RXCW_SYNCH) {
|
||||
if (!(rxcw & E1000_RXCW_IV)) {
|
||||
|
@ -613,7 +613,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
|
|||
status = er32(STATUS);
|
||||
if (status & E1000_STATUS_LU) {
|
||||
/* SYNCH bit and IV bit are sticky, so reread rxcw. */
|
||||
udelay(10);
|
||||
usleep_range(10, 20);
|
||||
rxcw = er32(RXCW);
|
||||
if (rxcw & E1000_RXCW_SYNCH) {
|
||||
if (!(rxcw & E1000_RXCW_IV)) {
|
||||
|
@ -1382,7 +1382,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
|
|||
if (!(swsm & E1000_SWSM_SMBI))
|
||||
break;
|
||||
|
||||
udelay(50);
|
||||
usleep_range(50, 100);
|
||||
i++;
|
||||
}
|
||||
|
||||
|
@ -1400,7 +1400,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
|
|||
if (er32(SWSM) & E1000_SWSM_SWESMBI)
|
||||
break;
|
||||
|
||||
udelay(50);
|
||||
usleep_range(50, 100);
|
||||
}
|
||||
|
||||
if (i == timeout) {
|
||||
|
@ -1712,7 +1712,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
|
|||
while (timeout) {
|
||||
if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
|
||||
break;
|
||||
udelay(100);
|
||||
usleep_range(100, 200);
|
||||
timeout--;
|
||||
}
|
||||
|
||||
|
|
|
@ -219,9 +219,8 @@ static void e1000e_dump(struct e1000_adapter *adapter)
|
|||
if (netdev) {
|
||||
dev_info(&adapter->pdev->dev, "Net device Info\n");
|
||||
pr_info("Device Name state trans_start last_rx\n");
|
||||
pr_info("%-15s %016lX %016lX %016lX\n",
|
||||
netdev->name, netdev->state, netdev->trans_start,
|
||||
netdev->last_rx);
|
||||
pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
|
||||
netdev->state, netdev->trans_start, netdev->last_rx);
|
||||
}
|
||||
|
||||
/* Print Registers */
|
||||
|
@ -755,8 +754,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
|
|||
cpu_to_le64(ps_page->dma);
|
||||
}
|
||||
|
||||
skb = __netdev_alloc_skb_ip_align(netdev,
|
||||
adapter->rx_ps_bsize0,
|
||||
skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
|
||||
gfp);
|
||||
|
||||
if (!skb) {
|
||||
|
@ -850,8 +848,8 @@ check_page:
|
|||
|
||||
if (!buffer_info->dma)
|
||||
buffer_info->dma = dma_map_page(&pdev->dev,
|
||||
buffer_info->page, 0,
|
||||
PAGE_SIZE,
|
||||
buffer_info->page, 0,
|
||||
PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
|
||||
|
@ -937,10 +935,8 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
|||
|
||||
cleaned = true;
|
||||
cleaned_count++;
|
||||
dma_unmap_single(&pdev->dev,
|
||||
buffer_info->dma,
|
||||
adapter->rx_buffer_len,
|
||||
DMA_FROM_DEVICE);
|
||||
dma_unmap_single(&pdev->dev, buffer_info->dma,
|
||||
adapter->rx_buffer_len, DMA_FROM_DEVICE);
|
||||
buffer_info->dma = 0;
|
||||
|
||||
length = le16_to_cpu(rx_desc->wb.upper.length);
|
||||
|
@ -1068,8 +1064,8 @@ static void e1000_put_txbuf(struct e1000_ring *tx_ring,
|
|||
static void e1000_print_hw_hang(struct work_struct *work)
|
||||
{
|
||||
struct e1000_adapter *adapter = container_of(work,
|
||||
struct e1000_adapter,
|
||||
print_hang_task);
|
||||
struct e1000_adapter,
|
||||
print_hang_task);
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
struct e1000_ring *tx_ring = adapter->tx_ring;
|
||||
unsigned int i = tx_ring->next_to_clean;
|
||||
|
@ -1082,8 +1078,7 @@ static void e1000_print_hw_hang(struct work_struct *work)
|
|||
if (test_bit(__E1000_DOWN, &adapter->state))
|
||||
return;
|
||||
|
||||
if (!adapter->tx_hang_recheck &&
|
||||
(adapter->flags2 & FLAG2_DMA_BURST)) {
|
||||
if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
|
||||
/* May be block on write-back, flush and detect again
|
||||
* flush pending descriptor writebacks to memory
|
||||
*/
|
||||
|
@ -1125,19 +1120,10 @@ static void e1000_print_hw_hang(struct work_struct *work)
|
|||
"PHY 1000BASE-T Status <%x>\n"
|
||||
"PHY Extended Status <%x>\n"
|
||||
"PCI Status <%x>\n",
|
||||
readl(tx_ring->head),
|
||||
readl(tx_ring->tail),
|
||||
tx_ring->next_to_use,
|
||||
tx_ring->next_to_clean,
|
||||
tx_ring->buffer_info[eop].time_stamp,
|
||||
eop,
|
||||
jiffies,
|
||||
eop_desc->upper.fields.status,
|
||||
er32(STATUS),
|
||||
phy_status,
|
||||
phy_1000t_status,
|
||||
phy_ext_status,
|
||||
pci_status);
|
||||
readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
|
||||
tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
|
||||
eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
|
||||
phy_status, phy_1000t_status, phy_ext_status, pci_status);
|
||||
|
||||
/* Suggest workaround for known h/w issue */
|
||||
if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
|
||||
|
@ -1430,7 +1416,7 @@ copydone:
|
|||
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
|
||||
|
||||
if (rx_desc->wb.upper.header_status &
|
||||
cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
|
||||
cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
|
||||
adapter->rx_hdr_split++;
|
||||
|
||||
e1000_receive_skb(adapter, netdev, skb, staterr,
|
||||
|
@ -1468,7 +1454,7 @@ next_desc:
|
|||
* e1000_consume_page - helper function
|
||||
**/
|
||||
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
|
||||
u16 length)
|
||||
u16 length)
|
||||
{
|
||||
bi->page = NULL;
|
||||
skb->len += length;
|
||||
|
@ -1495,7 +1481,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
|||
unsigned int i;
|
||||
int cleaned_count = 0;
|
||||
bool cleaned = false;
|
||||
unsigned int total_rx_bytes=0, total_rx_packets=0;
|
||||
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
|
||||
struct skb_shared_info *shinfo;
|
||||
|
||||
i = rx_ring->next_to_clean;
|
||||
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
|
||||
|
@ -1541,7 +1528,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
|||
rx_ring->rx_skb_top = NULL;
|
||||
goto next_desc;
|
||||
}
|
||||
|
||||
#define rxtop (rx_ring->rx_skb_top)
|
||||
if (!(staterr & E1000_RXD_STAT_EOP)) {
|
||||
/* this descriptor is only the beginning (or middle) */
|
||||
|
@ -1549,12 +1535,13 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
|||
/* this is the beginning of a chain */
|
||||
rxtop = skb;
|
||||
skb_fill_page_desc(rxtop, 0, buffer_info->page,
|
||||
0, length);
|
||||
0, length);
|
||||
} else {
|
||||
/* this is the middle of a chain */
|
||||
skb_fill_page_desc(rxtop,
|
||||
skb_shinfo(rxtop)->nr_frags,
|
||||
buffer_info->page, 0, length);
|
||||
shinfo = skb_shinfo(rxtop);
|
||||
skb_fill_page_desc(rxtop, shinfo->nr_frags,
|
||||
buffer_info->page, 0,
|
||||
length);
|
||||
/* re-use the skb, only consumed the page */
|
||||
buffer_info->skb = skb;
|
||||
}
|
||||
|
@ -1563,9 +1550,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
|||
} else {
|
||||
if (rxtop) {
|
||||
/* end of the chain */
|
||||
skb_fill_page_desc(rxtop,
|
||||
skb_shinfo(rxtop)->nr_frags,
|
||||
buffer_info->page, 0, length);
|
||||
shinfo = skb_shinfo(rxtop);
|
||||
skb_fill_page_desc(rxtop, shinfo->nr_frags,
|
||||
buffer_info->page, 0,
|
||||
length);
|
||||
/* re-use the current skb, we only consumed the
|
||||
* page
|
||||
*/
|
||||
|
@ -1590,10 +1578,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
|||
skb_put(skb, length);
|
||||
} else {
|
||||
skb_fill_page_desc(skb, 0,
|
||||
buffer_info->page, 0,
|
||||
length);
|
||||
buffer_info->page, 0,
|
||||
length);
|
||||
e1000_consume_page(buffer_info, skb,
|
||||
length);
|
||||
length);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1666,8 +1654,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
|
|||
DMA_FROM_DEVICE);
|
||||
else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
|
||||
dma_unmap_page(&pdev->dev, buffer_info->dma,
|
||||
PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
|
||||
dma_unmap_single(&pdev->dev, buffer_info->dma,
|
||||
adapter->rx_ps_bsize0,
|
||||
|
@ -1720,7 +1707,8 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
|
|||
static void e1000e_downshift_workaround(struct work_struct *work)
|
||||
{
|
||||
struct e1000_adapter *adapter = container_of(work,
|
||||
struct e1000_adapter, downshift_task);
|
||||
struct e1000_adapter,
|
||||
downshift_task);
|
||||
|
||||
if (test_bit(__E1000_DOWN, &adapter->state))
|
||||
return;
|
||||
|
@ -1913,7 +1901,6 @@ static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
|
|||
struct e1000_hw *hw = &adapter->hw;
|
||||
struct e1000_ring *tx_ring = adapter->tx_ring;
|
||||
|
||||
|
||||
adapter->total_tx_bytes = 0;
|
||||
adapter->total_tx_packets = 0;
|
||||
|
||||
|
@ -1970,7 +1957,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
|
|||
ew32(RFCTL, rfctl);
|
||||
}
|
||||
|
||||
#define E1000_IVAR_INT_ALLOC_VALID 0x8
|
||||
/* Configure Rx vector */
|
||||
rx_ring->ims_val = E1000_IMS_RXQ0;
|
||||
adapter->eiac_mask |= rx_ring->ims_val;
|
||||
|
@ -2045,8 +2031,9 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
|
|||
if (adapter->flags & FLAG_HAS_MSIX) {
|
||||
adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
|
||||
adapter->msix_entries = kcalloc(adapter->num_vectors,
|
||||
sizeof(struct msix_entry),
|
||||
GFP_KERNEL);
|
||||
sizeof(struct
|
||||
msix_entry),
|
||||
GFP_KERNEL);
|
||||
if (adapter->msix_entries) {
|
||||
for (i = 0; i < adapter->num_vectors; i++)
|
||||
adapter->msix_entries[i].entry = i;
|
||||
|
@ -2490,7 +2477,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
|
|||
switch (itr_setting) {
|
||||
case lowest_latency:
|
||||
/* handle TSO and jumbo frames */
|
||||
if (bytes/packets > 8000)
|
||||
if (bytes / packets > 8000)
|
||||
retval = bulk_latency;
|
||||
else if ((packets < 5) && (bytes > 512))
|
||||
retval = low_latency;
|
||||
|
@ -2498,13 +2485,13 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
|
|||
case low_latency: /* 50 usec aka 20000 ints/s */
|
||||
if (bytes > 10000) {
|
||||
/* this if handles the TSO accounting */
|
||||
if (bytes/packets > 8000)
|
||||
if (bytes / packets > 8000)
|
||||
retval = bulk_latency;
|
||||
else if ((packets < 10) || ((bytes/packets) > 1200))
|
||||
else if ((packets < 10) || ((bytes / packets) > 1200))
|
||||
retval = bulk_latency;
|
||||
else if ((packets > 35))
|
||||
retval = lowest_latency;
|
||||
} else if (bytes/packets > 2000) {
|
||||
} else if (bytes / packets > 2000) {
|
||||
retval = bulk_latency;
|
||||
} else if (packets <= 2 && bytes < 512) {
|
||||
retval = lowest_latency;
|
||||
|
@ -2556,8 +2543,8 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
|
|||
|
||||
current_itr = max(adapter->rx_itr, adapter->tx_itr);
|
||||
|
||||
switch (current_itr) {
|
||||
/* counts and packets in update_itr are dependent on these numbers */
|
||||
switch (current_itr) {
|
||||
case lowest_latency:
|
||||
new_itr = 70000;
|
||||
break;
|
||||
|
@ -2578,8 +2565,7 @@ set_itr_now:
|
|||
* increasing
|
||||
*/
|
||||
new_itr = new_itr > adapter->itr ?
|
||||
min(adapter->itr + (new_itr >> 2), new_itr) :
|
||||
new_itr;
|
||||
min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
|
||||
adapter->itr = new_itr;
|
||||
adapter->rx_ring->itr_val = new_itr;
|
||||
if (adapter->msix_entries)
|
||||
|
@ -2810,8 +2796,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
|
|||
u16 vid = adapter->hw.mng_cookie.vlan_id;
|
||||
u16 old_vid = adapter->mng_vlan_id;
|
||||
|
||||
if (adapter->hw.mng_cookie.status &
|
||||
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
|
||||
if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
|
||||
e1000_vlan_rx_add_vid(netdev, vid);
|
||||
adapter->mng_vlan_id = vid;
|
||||
}
|
||||
|
@ -2827,7 +2812,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
|
|||
e1000_vlan_rx_add_vid(adapter->netdev, 0);
|
||||
|
||||
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
|
||||
e1000_vlan_rx_add_vid(adapter->netdev, vid);
|
||||
e1000_vlan_rx_add_vid(adapter->netdev, vid);
|
||||
}
|
||||
|
||||
static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
|
||||
|
@ -3002,8 +2987,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
|
|||
rctl = er32(RCTL);
|
||||
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
|
||||
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
|
||||
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
|
||||
(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
|
||||
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
|
||||
(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
|
||||
|
||||
/* Do not Store bad packets */
|
||||
rctl &= ~E1000_RCTL_SBP;
|
||||
|
@ -3089,19 +3074,17 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
|
|||
/* Enable Packet split descriptors */
|
||||
rctl |= E1000_RCTL_DTYP_PS;
|
||||
|
||||
psrctl |= adapter->rx_ps_bsize0 >>
|
||||
E1000_PSRCTL_BSIZE0_SHIFT;
|
||||
psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
|
||||
|
||||
switch (adapter->rx_ps_pages) {
|
||||
case 3:
|
||||
psrctl |= PAGE_SIZE <<
|
||||
E1000_PSRCTL_BSIZE3_SHIFT;
|
||||
psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
|
||||
/* fall-through */
|
||||
case 2:
|
||||
psrctl |= PAGE_SIZE <<
|
||||
E1000_PSRCTL_BSIZE2_SHIFT;
|
||||
psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
|
||||
/* fall-through */
|
||||
case 1:
|
||||
psrctl |= PAGE_SIZE >>
|
||||
E1000_PSRCTL_BSIZE1_SHIFT;
|
||||
psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -3275,7 +3258,7 @@ static int e1000e_write_mc_addr_list(struct net_device *netdev)
|
|||
/* update_mc_addr_list expects a packed array of only addresses. */
|
||||
i = 0;
|
||||
netdev_for_each_mc_addr(ha, netdev)
|
||||
memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
|
||||
memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
|
||||
|
||||
hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
|
||||
kfree(mta_list);
|
||||
|
@ -3752,8 +3735,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
|
|||
* but don't include ethernet FCS because hardware appends it
|
||||
*/
|
||||
min_tx_space = (adapter->max_frame_size +
|
||||
sizeof(struct e1000_tx_desc) -
|
||||
ETH_FCS_LEN) * 2;
|
||||
sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2;
|
||||
min_tx_space = ALIGN(min_tx_space, 1024);
|
||||
min_tx_space >>= 10;
|
||||
/* software strips receive CRC, so leave room for it */
|
||||
|
@ -3856,13 +3838,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
|
|||
if ((adapter->max_frame_size * 2) > (pba << 10)) {
|
||||
if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
|
||||
dev_info(&adapter->pdev->dev,
|
||||
"Interrupt Throttle Rate turned off\n");
|
||||
"Interrupt Throttle Rate off\n");
|
||||
adapter->flags2 |= FLAG2_DISABLE_AIM;
|
||||
e1000e_write_itr(adapter, 0);
|
||||
}
|
||||
} else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
|
||||
dev_info(&adapter->pdev->dev,
|
||||
"Interrupt Throttle Rate turned on\n");
|
||||
"Interrupt Throttle Rate on\n");
|
||||
adapter->flags2 &= ~FLAG2_DISABLE_AIM;
|
||||
adapter->itr = 20000;
|
||||
e1000e_write_itr(adapter, adapter->itr);
|
||||
|
@ -4261,8 +4243,7 @@ static int e1000_open(struct net_device *netdev)
|
|||
e1000e_power_up_phy(adapter);
|
||||
|
||||
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
|
||||
if ((adapter->hw.mng_cookie.status &
|
||||
E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
|
||||
if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
|
||||
e1000_update_mng_vlan(adapter);
|
||||
|
||||
/* DMA latency requirement to workaround jumbo issue */
|
||||
|
@ -4364,8 +4345,7 @@ static int e1000_close(struct net_device *netdev)
|
|||
/* kill manageability vlan ID if supported, but not if a vlan with
|
||||
* the same ID is registered on the host OS (let 8021q kill it)
|
||||
*/
|
||||
if (adapter->hw.mng_cookie.status &
|
||||
E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
|
||||
if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
|
||||
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
|
||||
|
||||
/* If AMT is enabled, let the firmware know that the network
|
||||
|
@ -4381,6 +4361,7 @@ static int e1000_close(struct net_device *netdev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_set_mac - Change the Ethernet Address of the NIC
|
||||
* @netdev: network interface device structure
|
||||
|
@ -4431,7 +4412,8 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
|
|||
static void e1000e_update_phy_task(struct work_struct *work)
|
||||
{
|
||||
struct e1000_adapter *adapter = container_of(work,
|
||||
struct e1000_adapter, update_phy_task);
|
||||
struct e1000_adapter,
|
||||
update_phy_task);
|
||||
|
||||
if (test_bit(__E1000_DOWN, &adapter->state))
|
||||
return;
|
||||
|
@ -4448,7 +4430,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
|
|||
**/
|
||||
static void e1000_update_phy_info(unsigned long data)
|
||||
{
|
||||
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
|
||||
struct e1000_adapter *adapter = (struct e1000_adapter *)data;
|
||||
|
||||
if (test_bit(__E1000_DOWN, &adapter->state))
|
||||
return;
|
||||
|
@ -4615,18 +4597,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
|
|||
* our own version based on RUC and ROC
|
||||
*/
|
||||
netdev->stats.rx_errors = adapter->stats.rxerrc +
|
||||
adapter->stats.crcerrs + adapter->stats.algnerrc +
|
||||
adapter->stats.ruc + adapter->stats.roc +
|
||||
adapter->stats.cexterr;
|
||||
adapter->stats.crcerrs + adapter->stats.algnerrc +
|
||||
adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
|
||||
netdev->stats.rx_length_errors = adapter->stats.ruc +
|
||||
adapter->stats.roc;
|
||||
adapter->stats.roc;
|
||||
netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
|
||||
netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
|
||||
netdev->stats.rx_missed_errors = adapter->stats.mpc;
|
||||
|
||||
/* Tx Errors */
|
||||
netdev->stats.tx_errors = adapter->stats.ecol +
|
||||
adapter->stats.latecol;
|
||||
netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
|
||||
netdev->stats.tx_aborted_errors = adapter->stats.ecol;
|
||||
netdev->stats.tx_window_errors = adapter->stats.latecol;
|
||||
netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
|
||||
|
@ -4782,7 +4762,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
|
|||
**/
|
||||
static void e1000_watchdog(unsigned long data)
|
||||
{
|
||||
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
|
||||
struct e1000_adapter *adapter = (struct e1000_adapter *)data;
|
||||
|
||||
/* Do the rest outside of interrupt context */
|
||||
schedule_work(&adapter->watchdog_task);
|
||||
|
@ -4793,7 +4773,8 @@ static void e1000_watchdog(unsigned long data)
|
|||
static void e1000_watchdog_task(struct work_struct *work)
|
||||
{
|
||||
struct e1000_adapter *adapter = container_of(work,
|
||||
struct e1000_adapter, watchdog_task);
|
||||
struct e1000_adapter,
|
||||
watchdog_task);
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
struct e1000_mac_info *mac = &adapter->hw.mac;
|
||||
struct e1000_phy_info *phy = &adapter->hw.phy;
|
||||
|
@ -4827,8 +4808,8 @@ static void e1000_watchdog_task(struct work_struct *work)
|
|||
/* update snapshot of PHY registers on LSC */
|
||||
e1000_phy_read_status(adapter);
|
||||
mac->ops.get_link_up_info(&adapter->hw,
|
||||
&adapter->link_speed,
|
||||
&adapter->link_duplex);
|
||||
&adapter->link_speed,
|
||||
&adapter->link_duplex);
|
||||
e1000_print_link_info(adapter);
|
||||
|
||||
/* check if SmartSpeed worked */
|
||||
|
@ -4941,7 +4922,7 @@ static void e1000_watchdog_task(struct work_struct *work)
|
|||
adapter->flags |= FLAG_RESTART_NOW;
|
||||
else
|
||||
pm_schedule_suspend(netdev->dev.parent,
|
||||
LINK_TIMEOUT);
|
||||
LINK_TIMEOUT);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4976,8 +4957,8 @@ link_up:
|
|||
*/
|
||||
u32 goc = (adapter->gotc + adapter->gorc) / 10000;
|
||||
u32 dif = (adapter->gotc > adapter->gorc ?
|
||||
adapter->gotc - adapter->gorc :
|
||||
adapter->gorc - adapter->gotc) / 10000;
|
||||
adapter->gotc - adapter->gorc :
|
||||
adapter->gorc - adapter->gotc) / 10000;
|
||||
u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
|
||||
|
||||
e1000e_write_itr(adapter, itr);
|
||||
|
@ -5056,14 +5037,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
|
|||
iph->tot_len = 0;
|
||||
iph->check = 0;
|
||||
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
|
||||
0, IPPROTO_TCP, 0);
|
||||
0, IPPROTO_TCP, 0);
|
||||
cmd_length = E1000_TXD_CMD_IP;
|
||||
ipcse = skb_transport_offset(skb) - 1;
|
||||
} else if (skb_is_gso_v6(skb)) {
|
||||
ipv6_hdr(skb)->payload_len = 0;
|
||||
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
|
||||
&ipv6_hdr(skb)->daddr,
|
||||
0, IPPROTO_TCP, 0);
|
||||
&ipv6_hdr(skb)->daddr,
|
||||
0, IPPROTO_TCP, 0);
|
||||
ipcse = 0;
|
||||
}
|
||||
ipcss = skb_network_offset(skb);
|
||||
|
@ -5072,7 +5053,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
|
|||
tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
|
||||
|
||||
cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
|
||||
E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
|
||||
E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
|
||||
|
||||
i = tx_ring->next_to_use;
|
||||
context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
|
||||
|
@ -5142,8 +5123,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
|
|||
|
||||
context_desc->lower_setup.ip_config = 0;
|
||||
context_desc->upper_setup.tcp_fields.tucss = css;
|
||||
context_desc->upper_setup.tcp_fields.tucso =
|
||||
css + skb->csum_offset;
|
||||
context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
|
||||
context_desc->upper_setup.tcp_fields.tucse = 0;
|
||||
context_desc->tcp_seg_setup.data = 0;
|
||||
context_desc->cmd_and_length = cpu_to_le32(cmd_len);
|
||||
|
@ -5216,7 +5196,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
|
|||
buffer_info->time_stamp = jiffies;
|
||||
buffer_info->next_to_watch = i;
|
||||
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
|
||||
offset, size, DMA_TO_DEVICE);
|
||||
offset, size,
|
||||
DMA_TO_DEVICE);
|
||||
buffer_info->mapped_as_page = true;
|
||||
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
|
||||
goto dma_error;
|
||||
|
@ -5265,7 +5246,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
|
|||
|
||||
if (tx_flags & E1000_TX_FLAGS_TSO) {
|
||||
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
|
||||
E1000_TXD_CMD_TSE;
|
||||
E1000_TXD_CMD_TSE;
|
||||
txd_upper |= E1000_TXD_POPTS_TXSM << 8;
|
||||
|
||||
if (tx_flags & E1000_TX_FLAGS_IPV4)
|
||||
|
@ -5296,8 +5277,8 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
|
|||
buffer_info = &tx_ring->buffer_info[i];
|
||||
tx_desc = E1000_TX_DESC(*tx_ring, i);
|
||||
tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
|
||||
tx_desc->lower.data =
|
||||
cpu_to_le32(txd_lower | buffer_info->length);
|
||||
tx_desc->lower.data = cpu_to_le32(txd_lower |
|
||||
buffer_info->length);
|
||||
tx_desc->upper.data = cpu_to_le32(txd_upper);
|
||||
|
||||
i++;
|
||||
|
@ -5347,11 +5328,11 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
|
|||
if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
|
||||
return 0;
|
||||
|
||||
if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
|
||||
if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
|
||||
return 0;
|
||||
|
||||
{
|
||||
const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
|
||||
const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
|
||||
struct udphdr *udp;
|
||||
|
||||
if (ip->protocol != IPPROTO_UDP)
|
||||
|
@ -5576,7 +5557,7 @@ static void e1000_reset_task(struct work_struct *work)
|
|||
* Returns the address of the device statistics structure.
|
||||
**/
|
||||
struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
|
||||
struct rtnl_link_stats64 *stats)
|
||||
struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
|
@ -5597,18 +5578,15 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
|
|||
* our own version based on RUC and ROC
|
||||
*/
|
||||
stats->rx_errors = adapter->stats.rxerrc +
|
||||
adapter->stats.crcerrs + adapter->stats.algnerrc +
|
||||
adapter->stats.ruc + adapter->stats.roc +
|
||||
adapter->stats.cexterr;
|
||||
stats->rx_length_errors = adapter->stats.ruc +
|
||||
adapter->stats.roc;
|
||||
adapter->stats.crcerrs + adapter->stats.algnerrc +
|
||||
adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
|
||||
stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
|
||||
stats->rx_crc_errors = adapter->stats.crcerrs;
|
||||
stats->rx_frame_errors = adapter->stats.algnerrc;
|
||||
stats->rx_missed_errors = adapter->stats.mpc;
|
||||
|
||||
/* Tx Errors */
|
||||
stats->tx_errors = adapter->stats.ecol +
|
||||
adapter->stats.latecol;
|
||||
stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
|
||||
stats->tx_aborted_errors = adapter->stats.ecol;
|
||||
stats->tx_window_errors = adapter->stats.latecol;
|
||||
stats->tx_carrier_errors = adapter->stats.tncrs;
|
||||
|
@ -5677,9 +5655,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
|
|||
|
||||
/* adjust allocation if LPE protects us, and we aren't using SBP */
|
||||
if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
|
||||
(max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
|
||||
(max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
|
||||
adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
|
||||
+ ETH_FCS_LEN;
|
||||
+ ETH_FCS_LEN;
|
||||
|
||||
if (netif_running(netdev))
|
||||
e1000e_up(adapter);
|
||||
|
@ -5858,7 +5836,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
|
|||
phy_reg &= ~(BM_RCTL_MO_MASK);
|
||||
if (mac_reg & E1000_RCTL_MO_3)
|
||||
phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
|
||||
<< BM_RCTL_MO_SHIFT);
|
||||
<< BM_RCTL_MO_SHIFT);
|
||||
if (mac_reg & E1000_RCTL_BAM)
|
||||
phy_reg |= BM_RCTL_BAM;
|
||||
if (mac_reg & E1000_RCTL_PMCF)
|
||||
|
@ -5932,10 +5910,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
|
|||
}
|
||||
|
||||
ctrl = er32(CTRL);
|
||||
/* advertise wake from D3Cold */
|
||||
#define E1000_CTRL_ADVD3WUC 0x00100000
|
||||
/* phy power management enable */
|
||||
#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
|
||||
ctrl |= E1000_CTRL_ADVD3WUC;
|
||||
if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
|
||||
ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
|
||||
|
@ -6002,8 +5976,7 @@ static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
|
|||
pci_set_power_state(pdev, PCI_D3hot);
|
||||
}
|
||||
|
||||
static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
|
||||
bool wake)
|
||||
static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, bool wake)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
|
@ -6107,24 +6080,24 @@ static int __e1000_resume(struct pci_dev *pdev)
|
|||
e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
|
||||
if (phy_data) {
|
||||
e_info("PHY Wakeup cause - %s\n",
|
||||
phy_data & E1000_WUS_EX ? "Unicast Packet" :
|
||||
phy_data & E1000_WUS_MC ? "Multicast Packet" :
|
||||
phy_data & E1000_WUS_BC ? "Broadcast Packet" :
|
||||
phy_data & E1000_WUS_MAG ? "Magic Packet" :
|
||||
phy_data & E1000_WUS_LNKC ?
|
||||
"Link Status Change" : "other");
|
||||
phy_data & E1000_WUS_EX ? "Unicast Packet" :
|
||||
phy_data & E1000_WUS_MC ? "Multicast Packet" :
|
||||
phy_data & E1000_WUS_BC ? "Broadcast Packet" :
|
||||
phy_data & E1000_WUS_MAG ? "Magic Packet" :
|
||||
phy_data & E1000_WUS_LNKC ?
|
||||
"Link Status Change" : "other");
|
||||
}
|
||||
e1e_wphy(&adapter->hw, BM_WUS, ~0);
|
||||
} else {
|
||||
u32 wus = er32(WUS);
|
||||
if (wus) {
|
||||
e_info("MAC Wakeup cause - %s\n",
|
||||
wus & E1000_WUS_EX ? "Unicast Packet" :
|
||||
wus & E1000_WUS_MC ? "Multicast Packet" :
|
||||
wus & E1000_WUS_BC ? "Broadcast Packet" :
|
||||
wus & E1000_WUS_MAG ? "Magic Packet" :
|
||||
wus & E1000_WUS_LNKC ? "Link Status Change" :
|
||||
"other");
|
||||
wus & E1000_WUS_EX ? "Unicast Packet" :
|
||||
wus & E1000_WUS_MC ? "Multicast Packet" :
|
||||
wus & E1000_WUS_BC ? "Broadcast Packet" :
|
||||
wus & E1000_WUS_MAG ? "Magic Packet" :
|
||||
wus & E1000_WUS_LNKC ? "Link Status Change" :
|
||||
"other");
|
||||
}
|
||||
ew32(WUS, ~0);
|
||||
}
|
||||
|
@ -6413,7 +6386,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
|
|||
e_info("(PCI Express:2.5GT/s:%s) %pM\n",
|
||||
/* bus width */
|
||||
((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
|
||||
"Width x1"),
|
||||
"Width x1"),
|
||||
/* MAC address */
|
||||
netdev->dev_addr);
|
||||
e_info("Intel(R) PRO/%s Network Connection\n",
|
||||
|
@ -6523,7 +6496,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
resource_size_t flash_start, flash_len;
|
||||
static int cards_found;
|
||||
u16 aspm_disable_flag = 0;
|
||||
int i, err, pci_using_dac;
|
||||
int bars, i, err, pci_using_dac;
|
||||
u16 eeprom_data = 0;
|
||||
u16 eeprom_apme_mask = E1000_EEPROM_APME;
|
||||
|
||||
|
@ -6550,15 +6523,16 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
err = dma_set_coherent_mask(&pdev->dev,
|
||||
DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
|
||||
dev_err(&pdev->dev,
|
||||
"No usable DMA configuration, aborting\n");
|
||||
goto err_dma;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = pci_request_selected_regions_exclusive(pdev,
|
||||
pci_select_bars(pdev, IORESOURCE_MEM),
|
||||
e1000e_driver_name);
|
||||
bars = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
err = pci_request_selected_regions_exclusive(pdev, bars,
|
||||
e1000e_driver_name);
|
||||
if (err)
|
||||
goto err_pci_reg;
|
||||
|
||||
|
@ -6727,11 +6701,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
init_timer(&adapter->watchdog_timer);
|
||||
adapter->watchdog_timer.function = e1000_watchdog;
|
||||
adapter->watchdog_timer.data = (unsigned long) adapter;
|
||||
adapter->watchdog_timer.data = (unsigned long)adapter;
|
||||
|
||||
init_timer(&adapter->phy_info_timer);
|
||||
adapter->phy_info_timer.function = e1000_update_phy_info;
|
||||
adapter->phy_info_timer.data = (unsigned long) adapter;
|
||||
adapter->phy_info_timer.data = (unsigned long)adapter;
|
||||
|
||||
INIT_WORK(&adapter->reset_task, e1000_reset_task);
|
||||
INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
|
||||
|
@ -6835,7 +6809,7 @@ err_ioremap:
|
|||
free_netdev(netdev);
|
||||
err_alloc_etherdev:
|
||||
pci_release_selected_regions(pdev,
|
||||
pci_select_bars(pdev, IORESOURCE_MEM));
|
||||
pci_select_bars(pdev, IORESOURCE_MEM));
|
||||
err_pci_reg:
|
||||
err_dma:
|
||||
pci_disable_device(pdev);
|
||||
|
@ -6905,7 +6879,7 @@ static void e1000_remove(struct pci_dev *pdev)
|
|||
if (adapter->hw.flash_address)
|
||||
iounmap(adapter->hw.flash_address);
|
||||
pci_release_selected_regions(pdev,
|
||||
pci_select_bars(pdev, IORESOURCE_MEM));
|
||||
pci_select_bars(pdev, IORESOURCE_MEM));
|
||||
|
||||
free_netdev(netdev);
|
||||
|
||||
|
@ -6926,7 +6900,8 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
|
|||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP),
|
||||
board_82571 },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
|
||||
|
@ -7002,8 +6977,8 @@ MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
|
|||
#ifdef CONFIG_PM
|
||||
static const struct dev_pm_ops e1000_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
|
||||
SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
|
||||
e1000_runtime_resume, e1000_idle)
|
||||
SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
|
||||
e1000_idle)
|
||||
};
|
||||
#endif
|
||||
|
||||
|
|
|
@ -630,7 +630,7 @@ void e1000e_reload_nvm_generic(struct e1000_hw *hw)
|
|||
{
|
||||
u32 ctrl_ext;
|
||||
|
||||
udelay(10);
|
||||
usleep_range(10, 20);
|
||||
ctrl_ext = er32(CTRL_EXT);
|
||||
ctrl_ext |= E1000_CTRL_EXT_EE_RST;
|
||||
ew32(CTRL_EXT, ctrl_ext);
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
unsigned int copybreak = COPYBREAK_DEFAULT;
|
||||
module_param(copybreak, uint, 0644);
|
||||
MODULE_PARM_DESC(copybreak,
|
||||
"Maximum size of packet that is copied to a new buffer on receive");
|
||||
"Maximum size of packet that is copied to a new buffer on receive");
|
||||
|
||||
/* All parameters are treated the same, as an integer array of values.
|
||||
* This macro just reduces the need to repeat the same declaration code
|
||||
|
@ -143,7 +143,8 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
|
|||
*
|
||||
* Default Value: 1 (enabled)
|
||||
*/
|
||||
E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
|
||||
E1000_PARAM(WriteProtectNVM,
|
||||
"Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
|
||||
|
||||
/* Enable CRC Stripping
|
||||
*
|
||||
|
@ -160,13 +161,18 @@ struct e1000_option {
|
|||
const char *err;
|
||||
int def;
|
||||
union {
|
||||
struct { /* range_option info */
|
||||
/* range_option info */
|
||||
struct {
|
||||
int min;
|
||||
int max;
|
||||
} r;
|
||||
struct { /* list_option info */
|
||||
/* list_option info */
|
||||
struct {
|
||||
int nr;
|
||||
struct e1000_opt_list { int i; char *str; } *p;
|
||||
struct e1000_opt_list {
|
||||
int i;
|
||||
char *str;
|
||||
} *p;
|
||||
} l;
|
||||
} arg;
|
||||
};
|
||||
|
@ -246,7 +252,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
|
|||
"Using defaults for all values\n");
|
||||
}
|
||||
|
||||
{ /* Transmit Interrupt Delay */
|
||||
/* Transmit Interrupt Delay */
|
||||
{
|
||||
static const struct e1000_option opt = {
|
||||
.type = range_option,
|
||||
.name = "Transmit Interrupt Delay",
|
||||
|
@ -265,7 +272,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
|
|||
adapter->tx_int_delay = opt.def;
|
||||
}
|
||||
}
|
||||
{ /* Transmit Absolute Interrupt Delay */
|
||||
/* Transmit Absolute Interrupt Delay */
|
||||
{
|
||||
static const struct e1000_option opt = {
|
||||
.type = range_option,
|
||||
.name = "Transmit Absolute Interrupt Delay",
|
||||
|
@ -284,7 +292,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
|
|||
adapter->tx_abs_int_delay = opt.def;
|
||||
}
|
||||
}
|
||||
{ /* Receive Interrupt Delay */
|
||||
/* Receive Interrupt Delay */
|
||||
{
|
||||
static struct e1000_option opt = {
|
||||
.type = range_option,
|
||||
.name = "Receive Interrupt Delay",
|
||||
|
@ -303,7 +312,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
|
|||
adapter->rx_int_delay = opt.def;
|
||||
}
|
||||
}
|
||||
{ /* Receive Absolute Interrupt Delay */
|
||||
/* Receive Absolute Interrupt Delay */
|
||||
{
|
||||
static const struct e1000_option opt = {
|
||||
.type = range_option,
|
||||
.name = "Receive Absolute Interrupt Delay",
|
||||
|
@ -322,7 +332,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
|
|||
adapter->rx_abs_int_delay = opt.def;
|
||||
}
|
||||
}
|
||||
{ /* Interrupt Throttling Rate */
|
||||
/* Interrupt Throttling Rate */
|
||||
{
|
||||
static const struct e1000_option opt = {
|
||||
.type = range_option,
|
||||
.name = "Interrupt Throttling Rate (ints/sec)",
|
||||
|
@ -392,7 +403,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
|
|||
break;
|
||||
}
|
||||
}
|
||||
{ /* Interrupt Mode */
|
||||
/* Interrupt Mode */
|
||||
{
|
||||
static struct e1000_option opt = {
|
||||
.type = range_option,
|
||||
.name = "Interrupt Mode",
|
||||
|
@ -435,7 +447,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
|
|||
kfree(opt.err);
|
||||
#endif
|
||||
}
|
||||
{ /* Smart Power Down */
|
||||
/* Smart Power Down */
|
||||
{
|
||||
static const struct e1000_option opt = {
|
||||
.type = enable_option,
|
||||
.name = "PHY Smart Power Down",
|
||||
|
@ -450,7 +463,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
|
|||
adapter->flags |= FLAG_SMART_POWER_DOWN;
|
||||
}
|
||||
}
|
||||
{ /* CRC Stripping */
|
||||
/* CRC Stripping */
|
||||
{
|
||||
static const struct e1000_option opt = {
|
||||
.type = enable_option,
|
||||
.name = "CRC Stripping",
|
||||
|
@ -470,27 +484,28 @@ void e1000e_check_options(struct e1000_adapter *adapter)
|
|||
adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING;
|
||||
}
|
||||
}
|
||||
{ /* Kumeran Lock Loss Workaround */
|
||||
/* Kumeran Lock Loss Workaround */
|
||||
{
|
||||
static const struct e1000_option opt = {
|
||||
.type = enable_option,
|
||||
.name = "Kumeran Lock Loss Workaround",
|
||||
.err = "defaulting to Enabled",
|
||||
.def = OPTION_ENABLED
|
||||
};
|
||||
bool enabled = opt.def;
|
||||
|
||||
if (num_KumeranLockLoss > bd) {
|
||||
unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
|
||||
e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
|
||||
if (hw->mac.type == e1000_ich8lan)
|
||||
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
|
||||
kmrn_lock_loss);
|
||||
} else {
|
||||
if (hw->mac.type == e1000_ich8lan)
|
||||
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
|
||||
opt.def);
|
||||
enabled = kmrn_lock_loss;
|
||||
}
|
||||
|
||||
if (hw->mac.type == e1000_ich8lan)
|
||||
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
|
||||
enabled);
|
||||
}
|
||||
{ /* Write-protect NVM */
|
||||
/* Write-protect NVM */
|
||||
{
|
||||
static const struct e1000_option opt = {
|
||||
.type = enable_option,
|
||||
.name = "Write-protect NVM",
|
||||
|
@ -500,7 +515,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
|
|||
|
||||
if (adapter->flags & FLAG_IS_ICH) {
|
||||
if (num_WriteProtectNVM > bd) {
|
||||
unsigned int write_protect_nvm = WriteProtectNVM[bd];
|
||||
unsigned int write_protect_nvm =
|
||||
WriteProtectNVM[bd];
|
||||
e1000_validate_option(&write_protect_nvm, &opt,
|
||||
adapter);
|
||||
if (write_protect_nvm)
|
||||
|
|
|
@ -37,7 +37,9 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
|
|||
|
||||
/* Cable length tables */
|
||||
static const u16 e1000_m88_cable_length_table[] = {
|
||||
0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
|
||||
0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED
|
||||
};
|
||||
|
||||
#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
|
||||
ARRAY_SIZE(e1000_m88_cable_length_table)
|
||||
|
||||
|
@ -49,7 +51,9 @@ static const u16 e1000_igp_2_cable_length_table[] = {
|
|||
66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
|
||||
87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
|
||||
100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
|
||||
124};
|
||||
124
|
||||
};
|
||||
|
||||
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
|
||||
ARRAY_SIZE(e1000_igp_2_cable_length_table)
|
||||
|
||||
|
@ -67,8 +71,7 @@ s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
|
|||
|
||||
manc = er32(MANC);
|
||||
|
||||
return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
|
||||
E1000_BLK_PHY_RESET : 0;
|
||||
return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -94,7 +97,7 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
|
|||
return ret_val;
|
||||
|
||||
phy->id = (u32)(phy_id << 16);
|
||||
udelay(20);
|
||||
usleep_range(20, 40);
|
||||
ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
@ -162,7 +165,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
|
|||
* the lower time out
|
||||
*/
|
||||
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
|
||||
udelay(50);
|
||||
usleep_range(50, 100);
|
||||
mdic = er32(MDIC);
|
||||
if (mdic & E1000_MDIC_READY)
|
||||
break;
|
||||
|
@ -175,13 +178,13 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
|
|||
e_dbg("MDI Error\n");
|
||||
return -E1000_ERR_PHY;
|
||||
}
|
||||
*data = (u16) mdic;
|
||||
*data = (u16)mdic;
|
||||
|
||||
/* Allow some time after each MDIC transaction to avoid
|
||||
* reading duplicate data in the next MDIC transaction.
|
||||
*/
|
||||
if (hw->mac.type == e1000_pch2lan)
|
||||
udelay(100);
|
||||
usleep_range(100, 200);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -220,7 +223,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
|
|||
* the lower time out
|
||||
*/
|
||||
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
|
||||
udelay(50);
|
||||
usleep_range(50, 100);
|
||||
mdic = er32(MDIC);
|
||||
if (mdic & E1000_MDIC_READY)
|
||||
break;
|
||||
|
@ -238,7 +241,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
|
|||
* reading duplicate data in the next MDIC transaction.
|
||||
*/
|
||||
if (hw->mac.type == e1000_pch2lan)
|
||||
udelay(100);
|
||||
usleep_range(100, 200);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -324,7 +327,7 @@ s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
|
|||
* semaphores before exiting.
|
||||
**/
|
||||
static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
|
||||
bool locked)
|
||||
bool locked)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
|
||||
|
@ -391,7 +394,7 @@ s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
|
|||
* at the offset. Release any acquired semaphores before exiting.
|
||||
**/
|
||||
static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
|
||||
bool locked)
|
||||
bool locked)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
|
||||
|
@ -410,8 +413,7 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
|
|||
(u16)offset);
|
||||
if (!ret_val)
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS &
|
||||
offset,
|
||||
data);
|
||||
offset, data);
|
||||
if (!locked)
|
||||
hw->phy.ops.release(hw);
|
||||
|
||||
|
@ -458,7 +460,7 @@ s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
|
|||
* Release any acquired semaphores before exiting.
|
||||
**/
|
||||
static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
|
||||
bool locked)
|
||||
bool locked)
|
||||
{
|
||||
u32 kmrnctrlsta;
|
||||
|
||||
|
@ -531,7 +533,7 @@ s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
|
|||
* before exiting.
|
||||
**/
|
||||
static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
|
||||
bool locked)
|
||||
bool locked)
|
||||
{
|
||||
u32 kmrnctrlsta;
|
||||
|
||||
|
@ -772,8 +774,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
|
|||
|
||||
phy_data |= M88E1000_EPSCR_TX_CLK_25;
|
||||
|
||||
if ((phy->revision == 2) &&
|
||||
(phy->id == M88E1111_I_PHY_ID)) {
|
||||
if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) {
|
||||
/* 82573L PHY - set the downshift counter to 5x. */
|
||||
phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
|
||||
phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
|
||||
|
@ -1296,7 +1297,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
|
|||
e_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
|
||||
|
||||
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
|
||||
100000, &link);
|
||||
100000, &link);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
|
@ -1319,7 +1320,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
|
|||
|
||||
/* Try once more */
|
||||
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
|
||||
100000, &link);
|
||||
100000, &link);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
}
|
||||
|
@ -1609,9 +1610,9 @@ s32 e1000_check_polarity_m88(struct e1000_hw *hw)
|
|||
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data);
|
||||
|
||||
if (!ret_val)
|
||||
phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
|
||||
? e1000_rev_polarity_reversed
|
||||
: e1000_rev_polarity_normal;
|
||||
phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY)
|
||||
? e1000_rev_polarity_reversed
|
||||
: e1000_rev_polarity_normal);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
@ -1653,9 +1654,9 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
|
|||
ret_val = e1e_rphy(hw, offset, &data);
|
||||
|
||||
if (!ret_val)
|
||||
phy->cable_polarity = (data & mask)
|
||||
? e1000_rev_polarity_reversed
|
||||
: e1000_rev_polarity_normal;
|
||||
phy->cable_polarity = ((data & mask)
|
||||
? e1000_rev_polarity_reversed
|
||||
: e1000_rev_polarity_normal);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
@ -1685,9 +1686,9 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw)
|
|||
ret_val = e1e_rphy(hw, offset, &phy_data);
|
||||
|
||||
if (!ret_val)
|
||||
phy->cable_polarity = (phy_data & mask)
|
||||
? e1000_rev_polarity_reversed
|
||||
: e1000_rev_polarity_normal;
|
||||
phy->cable_polarity = ((phy_data & mask)
|
||||
? e1000_rev_polarity_reversed
|
||||
: e1000_rev_polarity_normal);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
@ -1733,7 +1734,7 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
|
|||
* Polls the PHY status register for link, 'iterations' number of times.
|
||||
**/
|
||||
s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
|
||||
u32 usec_interval, bool *success)
|
||||
u32 usec_interval, bool *success)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
u16 i, phy_status;
|
||||
|
@ -1756,7 +1757,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
|
|||
if (phy_status & BMSR_LSTATUS)
|
||||
break;
|
||||
if (usec_interval >= 1000)
|
||||
mdelay(usec_interval/1000);
|
||||
mdelay(usec_interval / 1000);
|
||||
else
|
||||
udelay(usec_interval);
|
||||
}
|
||||
|
@ -1791,8 +1792,8 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
|
|||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
|
||||
M88E1000_PSSR_CABLE_LENGTH_SHIFT;
|
||||
index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
|
||||
M88E1000_PSSR_CABLE_LENGTH_SHIFT);
|
||||
|
||||
if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
|
||||
return -E1000_ERR_PHY;
|
||||
|
@ -1824,10 +1825,10 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
|
|||
u16 cur_agc_index, max_agc_index = 0;
|
||||
u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
|
||||
static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
|
||||
IGP02E1000_PHY_AGC_A,
|
||||
IGP02E1000_PHY_AGC_B,
|
||||
IGP02E1000_PHY_AGC_C,
|
||||
IGP02E1000_PHY_AGC_D
|
||||
IGP02E1000_PHY_AGC_A,
|
||||
IGP02E1000_PHY_AGC_B,
|
||||
IGP02E1000_PHY_AGC_C,
|
||||
IGP02E1000_PHY_AGC_D
|
||||
};
|
||||
|
||||
/* Read the AGC registers for all channels */
|
||||
|
@ -1841,8 +1842,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
|
|||
* that can be put into the lookup table to obtain the
|
||||
* approximate cable length.
|
||||
*/
|
||||
cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
|
||||
IGP02E1000_AGC_LENGTH_MASK;
|
||||
cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
|
||||
IGP02E1000_AGC_LENGTH_MASK);
|
||||
|
||||
/* Array index bound check. */
|
||||
if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
|
||||
|
@ -1865,8 +1866,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
|
|||
agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
|
||||
|
||||
/* Calculate cable length with the error range of +/- 10 meters. */
|
||||
phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
|
||||
(agc_value - IGP02E1000_AGC_RANGE) : 0;
|
||||
phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
|
||||
(agc_value - IGP02E1000_AGC_RANGE) : 0);
|
||||
phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
|
||||
|
||||
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
|
||||
|
@ -2040,9 +2041,9 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
|
|||
return ret_val;
|
||||
} else {
|
||||
/* Polarity is forced */
|
||||
phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
|
||||
? e1000_rev_polarity_reversed
|
||||
: e1000_rev_polarity_normal;
|
||||
phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY)
|
||||
? e1000_rev_polarity_reversed
|
||||
: e1000_rev_polarity_normal);
|
||||
}
|
||||
|
||||
ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
|
||||
|
@ -2119,7 +2120,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
|
|||
ew32(CTRL, ctrl);
|
||||
e1e_flush();
|
||||
|
||||
udelay(150);
|
||||
usleep_range(150, 300);
|
||||
|
||||
phy->ops.release(hw);
|
||||
|
||||
|
@ -2375,13 +2376,13 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
|
|||
|
||||
/* Page is shifted left, PHY expects (page x 32) */
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
|
||||
(page << page_shift));
|
||||
(page << page_shift));
|
||||
if (ret_val)
|
||||
goto release;
|
||||
}
|
||||
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
|
||||
data);
|
||||
data);
|
||||
|
||||
release:
|
||||
hw->phy.ops.release(hw);
|
||||
|
@ -2433,13 +2434,13 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
|
|||
|
||||
/* Page is shifted left, PHY expects (page x 32) */
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
|
||||
(page << page_shift));
|
||||
(page << page_shift));
|
||||
if (ret_val)
|
||||
goto release;
|
||||
}
|
||||
|
||||
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
|
||||
data);
|
||||
data);
|
||||
release:
|
||||
hw->phy.ops.release(hw);
|
||||
return ret_val;
|
||||
|
@ -2674,7 +2675,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
|
|||
if (read) {
|
||||
/* Read the Wakeup register page value using opcode 0x12 */
|
||||
ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
|
||||
data);
|
||||
data);
|
||||
} else {
|
||||
/* Write the Wakeup register page value using opcode 0x12 */
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
|
||||
|
@ -2763,7 +2764,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
|
|||
|
||||
if (page > 0 && page < HV_INTC_FC_PAGE_START) {
|
||||
ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
|
||||
data, true);
|
||||
data, true);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -2786,8 +2787,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
|
|||
e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
|
||||
page << IGP_PAGE_SHIFT, reg);
|
||||
|
||||
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
|
||||
data);
|
||||
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data);
|
||||
out:
|
||||
if (!locked)
|
||||
hw->phy.ops.release(hw);
|
||||
|
@ -2871,7 +2871,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
|
|||
|
||||
if (page > 0 && page < HV_INTC_FC_PAGE_START) {
|
||||
ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
|
||||
&data, false);
|
||||
&data, false);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -2910,7 +2910,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
|
|||
page << IGP_PAGE_SHIFT, reg);
|
||||
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
|
||||
data);
|
||||
data);
|
||||
|
||||
out:
|
||||
if (!locked)
|
||||
|
@ -2988,15 +2988,15 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page)
|
|||
* These accesses done with PHY address 2 and without using pages.
|
||||
**/
|
||||
static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
|
||||
u16 *data, bool read)
|
||||
u16 *data, bool read)
|
||||
{
|
||||
s32 ret_val;
|
||||
u32 addr_reg;
|
||||
u32 data_reg;
|
||||
|
||||
/* This takes care of the difference with desktop vs mobile phy */
|
||||
addr_reg = (hw->phy.type == e1000_phy_82578) ?
|
||||
I82578_ADDR_REG : I82577_ADDR_REG;
|
||||
addr_reg = ((hw->phy.type == e1000_phy_82578) ?
|
||||
I82578_ADDR_REG : I82577_ADDR_REG);
|
||||
data_reg = addr_reg + 1;
|
||||
|
||||
/* All operations in this function are phy address 2 */
|
||||
|
@ -3050,8 +3050,8 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
|
|||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
data &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
|
||||
BM_CS_STATUS_SPEED_MASK;
|
||||
data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
|
||||
BM_CS_STATUS_SPEED_MASK);
|
||||
|
||||
if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
|
||||
BM_CS_STATUS_SPEED_1000))
|
||||
|
@ -3086,9 +3086,9 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
|
|||
ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
|
||||
|
||||
if (!ret_val)
|
||||
phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
|
||||
? e1000_rev_polarity_reversed
|
||||
: e1000_rev_polarity_normal;
|
||||
phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY)
|
||||
? e1000_rev_polarity_reversed
|
||||
: e1000_rev_polarity_normal);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
@ -3215,8 +3215,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
|
|||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
|
||||
I82577_DSTATUS_CABLE_LENGTH_SHIFT;
|
||||
length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
|
||||
I82577_DSTATUS_CABLE_LENGTH_SHIFT);
|
||||
|
||||
if (length == E1000_CABLE_LENGTH_UNDEFINED)
|
||||
return -E1000_ERR_PHY;
|
||||
|
|
|
@ -44,8 +44,8 @@ struct ixgbevf_tx_buffer {
|
|||
struct sk_buff *skb;
|
||||
dma_addr_t dma;
|
||||
unsigned long time_stamp;
|
||||
union ixgbe_adv_tx_desc *next_to_watch;
|
||||
u16 length;
|
||||
u16 next_to_watch;
|
||||
u16 mapped_as_page;
|
||||
};
|
||||
|
||||
|
|
|
@ -76,12 +76,9 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
|
|||
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
|
||||
* Class, Class Mask, private data (not used) }
|
||||
*/
|
||||
static struct pci_device_id ixgbevf_pci_tbl[] = {
|
||||
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
|
||||
board_82599_vf},
|
||||
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
|
||||
board_X540_vf},
|
||||
|
||||
static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
|
||||
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
|
||||
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
|
||||
/* required last entry */
|
||||
{0, }
|
||||
};
|
||||
|
@ -190,28 +187,37 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
|
|||
struct ixgbevf_adapter *adapter = q_vector->adapter;
|
||||
union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
|
||||
struct ixgbevf_tx_buffer *tx_buffer_info;
|
||||
unsigned int i, eop, count = 0;
|
||||
unsigned int i, count = 0;
|
||||
unsigned int total_bytes = 0, total_packets = 0;
|
||||
|
||||
if (test_bit(__IXGBEVF_DOWN, &adapter->state))
|
||||
return true;
|
||||
|
||||
i = tx_ring->next_to_clean;
|
||||
eop = tx_ring->tx_buffer_info[i].next_to_watch;
|
||||
eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
|
||||
tx_buffer_info = &tx_ring->tx_buffer_info[i];
|
||||
eop_desc = tx_buffer_info->next_to_watch;
|
||||
|
||||
while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
|
||||
(count < tx_ring->count)) {
|
||||
do {
|
||||
bool cleaned = false;
|
||||
rmb(); /* read buffer_info after eop_desc */
|
||||
/* eop could change between read and DD-check */
|
||||
if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
|
||||
goto cont_loop;
|
||||
|
||||
/* if next_to_watch is not set then there is no work pending */
|
||||
if (!eop_desc)
|
||||
break;
|
||||
|
||||
/* prevent any other reads prior to eop_desc */
|
||||
read_barrier_depends();
|
||||
|
||||
/* if DD is not set pending work has not been completed */
|
||||
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
|
||||
break;
|
||||
|
||||
/* clear next_to_watch to prevent false hangs */
|
||||
tx_buffer_info->next_to_watch = NULL;
|
||||
|
||||
for ( ; !cleaned; count++) {
|
||||
struct sk_buff *skb;
|
||||
tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
|
||||
tx_buffer_info = &tx_ring->tx_buffer_info[i];
|
||||
cleaned = (i == eop);
|
||||
cleaned = (tx_desc == eop_desc);
|
||||
skb = tx_buffer_info->skb;
|
||||
|
||||
if (cleaned && skb) {
|
||||
|
@ -234,12 +240,12 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
|
|||
i++;
|
||||
if (i == tx_ring->count)
|
||||
i = 0;
|
||||
|
||||
tx_buffer_info = &tx_ring->tx_buffer_info[i];
|
||||
}
|
||||
|
||||
cont_loop:
|
||||
eop = tx_ring->tx_buffer_info[i].next_to_watch;
|
||||
eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
|
||||
}
|
||||
eop_desc = tx_buffer_info->next_to_watch;
|
||||
} while (count < tx_ring->count);
|
||||
|
||||
tx_ring->next_to_clean = i;
|
||||
|
||||
|
@ -2806,8 +2812,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
|
|||
}
|
||||
|
||||
static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
|
||||
struct sk_buff *skb, u32 tx_flags,
|
||||
unsigned int first)
|
||||
struct sk_buff *skb, u32 tx_flags)
|
||||
{
|
||||
struct ixgbevf_tx_buffer *tx_buffer_info;
|
||||
unsigned int len;
|
||||
|
@ -2832,7 +2837,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
|
|||
size, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
|
||||
goto dma_error;
|
||||
tx_buffer_info->next_to_watch = i;
|
||||
|
||||
len -= size;
|
||||
total -= size;
|
||||
|
@ -2862,7 +2866,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
|
|||
tx_buffer_info->dma))
|
||||
goto dma_error;
|
||||
tx_buffer_info->mapped_as_page = true;
|
||||
tx_buffer_info->next_to_watch = i;
|
||||
|
||||
len -= size;
|
||||
total -= size;
|
||||
|
@ -2881,8 +2884,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
|
|||
else
|
||||
i = i - 1;
|
||||
tx_ring->tx_buffer_info[i].skb = skb;
|
||||
tx_ring->tx_buffer_info[first].next_to_watch = i;
|
||||
tx_ring->tx_buffer_info[first].time_stamp = jiffies;
|
||||
|
||||
return count;
|
||||
|
||||
|
@ -2891,7 +2892,6 @@ dma_error:
|
|||
|
||||
/* clear timestamp and dma mappings for failed tx_buffer_info map */
|
||||
tx_buffer_info->dma = 0;
|
||||
tx_buffer_info->next_to_watch = 0;
|
||||
count--;
|
||||
|
||||
/* clear timestamp and dma mappings for remaining portion of packet */
|
||||
|
@ -2908,7 +2908,8 @@ dma_error:
|
|||
}
|
||||
|
||||
static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
|
||||
int count, u32 paylen, u8 hdr_len)
|
||||
int count, unsigned int first, u32 paylen,
|
||||
u8 hdr_len)
|
||||
{
|
||||
union ixgbe_adv_tx_desc *tx_desc = NULL;
|
||||
struct ixgbevf_tx_buffer *tx_buffer_info;
|
||||
|
@ -2959,6 +2960,16 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
|
|||
|
||||
tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
|
||||
|
||||
tx_ring->tx_buffer_info[first].time_stamp = jiffies;
|
||||
|
||||
/* Force memory writes to complete before letting h/w
|
||||
* know there are new descriptors to fetch. (Only
|
||||
* applicable for weak-ordered memory model archs,
|
||||
* such as IA-64).
|
||||
*/
|
||||
wmb();
|
||||
|
||||
tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
|
||||
tx_ring->next_to_use = i;
|
||||
}
|
||||
|
||||
|
@ -3050,15 +3061,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||
tx_flags |= IXGBE_TX_FLAGS_CSUM;
|
||||
|
||||
ixgbevf_tx_queue(tx_ring, tx_flags,
|
||||
ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
|
||||
skb->len, hdr_len);
|
||||
/*
|
||||
* Force memory writes to complete before letting h/w
|
||||
* know there are new descriptors to fetch. (Only
|
||||
* applicable for weak-ordered memory model archs,
|
||||
* such as IA-64).
|
||||
*/
|
||||
wmb();
|
||||
ixgbevf_tx_map(tx_ring, skb, tx_flags),
|
||||
first, skb->len, hdr_len);
|
||||
|
||||
writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
|
||||
|
||||
|
|
Loading…
Reference in New Issue