e1000e: cleanup CODE_INDENT checkpatch errors

ERROR:CODE_INDENT: code indent should use tabs where possible

Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
Bruce Allan 2013-02-20 04:05:39 +00:00 committed by Jeff Kirsher
parent 39ba22b413
commit f0ff439872
7 changed files with 178 additions and 187 deletions

View File

@ -116,7 +116,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
E1000_EECD_SIZE_EX_SHIFT);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
@ -406,14 +406,14 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
udelay(200);
ret_val = e1000e_read_phy_reg_mdic(hw,
MAX_PHY_REG_ADDRESS & offset,
data);
MAX_PHY_REG_ADDRESS & offset,
data);
udelay(200);
} else {
ret_val = e1000e_read_phy_reg_mdic(hw,
MAX_PHY_REG_ADDRESS & offset,
data);
MAX_PHY_REG_ADDRESS & offset,
data);
}
e1000_release_phy_80003es2lan(hw);
@ -475,14 +475,14 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
udelay(200);
ret_val = e1000e_write_phy_reg_mdic(hw,
MAX_PHY_REG_ADDRESS & offset,
data);
MAX_PHY_REG_ADDRESS &
offset, data);
udelay(200);
} else {
ret_val = e1000e_write_phy_reg_mdic(hw,
MAX_PHY_REG_ADDRESS & offset,
data);
MAX_PHY_REG_ADDRESS &
offset, data);
}
e1000_release_phy_80003es2lan(hw);
@ -784,14 +784,14 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(0), reg_data);
/* ...for both queues. */
reg_data = er32(TXDCTL(1));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(1), reg_data);
/* Enable retransmit on late collisions */
@ -818,10 +818,9 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* default to true to enable the MDIC W/A */
hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET >>
E1000_KMRNCTRLSTA_OFFSET_SHIFT,
&i);
ret_val =
e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >>
E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i);
if (!ret_val) {
if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
@ -1049,27 +1048,29 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
* polling the phy; this fixes erroneous timeouts at 10Mbps.
*/
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
0xFFFF);
0xFFFF);
if (ret_val)
return ret_val;
ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
&reg_data);
&reg_data);
if (ret_val)
return ret_val;
reg_data |= 0x3F;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
reg_data);
reg_data);
if (ret_val)
return ret_val;
ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
&reg_data);
ret_val =
e1000_read_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
&reg_data);
if (ret_val)
return ret_val;
reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
reg_data);
ret_val =
e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
reg_data);
if (ret_val)
return ret_val;
@ -1096,7 +1097,7 @@ static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
if (hw->phy.media_type == e1000_media_type_copper) {
ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed,
&duplex);
&duplex);
if (ret_val)
return ret_val;
@ -1125,9 +1126,10 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
u16 reg_data, reg_data2;
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
reg_data);
ret_val =
e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
reg_data);
if (ret_val)
return ret_val;
@ -1171,9 +1173,10 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
u32 i = 0;
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
reg_data);
ret_val =
e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
reg_data);
if (ret_val)
return ret_val;
@ -1220,7 +1223,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
return ret_val;
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
@ -1255,7 +1258,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
return ret_val;
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | data;
E1000_KMRNCTRLSTA_OFFSET) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();

View File

@ -846,9 +846,9 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
}
for (i = 0; i < words; i++) {
eewr = (data[i] << E1000_NVM_RW_REG_DATA) |
((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
E1000_NVM_RW_REG_START;
eewr = ((data[i] << E1000_NVM_RW_REG_DATA) |
((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
E1000_NVM_RW_REG_START);
ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
if (ret_val)
@ -1122,9 +1122,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB |
E1000_TXDCTL_COUNT_DESC;
reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB |
E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(0), reg_data);
/* ...for both queues. */
@ -1140,9 +1140,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
break;
default:
reg_data = er32(TXDCTL(1));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB |
E1000_TXDCTL_COUNT_DESC;
reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB |
E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(1), reg_data);
break;
}

View File

@ -196,8 +196,7 @@ static int e1000_get_settings(struct net_device *netdev,
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if ((hw->phy.media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
ETH_TP_MDI;
ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI;
else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
@ -297,12 +296,10 @@ static int e1000_set_settings(struct net_device *netdev,
hw->mac.autoneg = 1;
if (hw->phy.media_type == e1000_media_type_fiber)
hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
ADVERTISED_FIBRE |
ADVERTISED_Autoneg;
ADVERTISED_FIBRE | ADVERTISED_Autoneg;
else
hw->phy.autoneg_advertised = ecmd->advertising |
ADVERTISED_TP |
ADVERTISED_Autoneg;
ADVERTISED_TP | ADVERTISED_Autoneg;
ecmd->advertising = hw->phy.autoneg_advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
@ -345,7 +342,7 @@ static void e1000_get_pauseparam(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
pause->autoneg =
(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
if (hw->fc.current_mode == e1000_fc_rx_pause) {
pause->rx_pause = 1;
@ -434,7 +431,7 @@ static void e1000_get_regs(struct net_device *netdev,
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
adapter->pdev->device;
adapter->pdev->device;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
@ -821,7 +818,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_80003es2lan:
toggle = 0x7FFFF3FF;
break;
default:
default:
toggle = 0x7FFFF033;
break;
}
@ -1178,8 +1175,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[i].length = skb->len;
tx_ring->buffer_info[i].dma =
dma_map_single(&pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
dma_map_single(&pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev,
tx_ring->buffer_info[i].dma)) {
ret_val = 4;
@ -1225,10 +1222,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
ew32(RDH(0), 0);
ew32(RDT(0), 0);
rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
E1000_RCTL_SBP | E1000_RCTL_SECRC |
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
E1000_RCTL_SBP | E1000_RCTL_SECRC |
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
ew32(RCTL, rctl);
for (i = 0; i < rx_ring->count; i++) {
@ -1243,8 +1240,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
skb_reserve(skb, NET_IP_ALIGN);
rx_ring->buffer_info[i].skb = skb;
rx_ring->buffer_info[i].dma =
dma_map_single(&pdev->dev, skb->data, 2048,
DMA_FROM_DEVICE);
dma_map_single(&pdev->dev, skb->data, 2048,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev,
rx_ring->buffer_info[i].dma)) {
ret_val = 8;
@ -1980,11 +1977,11 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
switch (e1000_gstrings_stats[i].type) {
case NETDEV_STATS:
p = (char *) &net_stats +
e1000_gstrings_stats[i].stat_offset;
e1000_gstrings_stats[i].stat_offset;
break;
case E1000_STATS:
p = (char *) adapter +
e1000_gstrings_stats[i].stat_offset;
e1000_gstrings_stats[i].stat_offset;
break;
default:
data[i] = 0;
@ -1992,7 +1989,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
}
data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
}

View File

@ -545,7 +545,7 @@ struct e1000_mac_info {
u16 mta_reg_count;
/* Maximum size of the MTA register table in all supported adapters */
#define MAX_MTA_REG 128
#define MAX_MTA_REG 128
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;

View File

@ -548,8 +548,8 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
/* find total size of the NVM, then cut in half since the total
* size represents two separate NVM banks.
*/
nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
<< FLASH_SECTOR_ADDR_SHIFT;
nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
<< FLASH_SECTOR_ADDR_SHIFT);
nvm->flash_bank_size /= 2;
/* Adjust to word count */
nvm->flash_bank_size /= sizeof(u16);
@ -1073,9 +1073,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
u32 fwsm;
fwsm = er32(FWSM);
return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
((fwsm & E1000_FWSM_MODE_MASK) ==
(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
((fwsm & E1000_FWSM_MODE_MASK) ==
(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
}
/**
@ -1092,7 +1092,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
fwsm = er32(FWSM);
return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
(fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
(fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
}
/**
@ -1440,13 +1440,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
if (ret_val)
goto release;
status_reg &= BM_CS_STATUS_LINK_UP |
BM_CS_STATUS_RESOLVED |
BM_CS_STATUS_SPEED_MASK;
status_reg &= (BM_CS_STATUS_LINK_UP |
BM_CS_STATUS_RESOLVED |
BM_CS_STATUS_SPEED_MASK);
if (status_reg == (BM_CS_STATUS_LINK_UP |
BM_CS_STATUS_RESOLVED |
BM_CS_STATUS_SPEED_1000))
BM_CS_STATUS_RESOLVED |
BM_CS_STATUS_SPEED_1000))
k1_enable = false;
}
@ -1455,13 +1455,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
if (ret_val)
goto release;
status_reg &= HV_M_STATUS_LINK_UP |
HV_M_STATUS_AUTONEG_COMPLETE |
HV_M_STATUS_SPEED_MASK;
status_reg &= (HV_M_STATUS_LINK_UP |
HV_M_STATUS_AUTONEG_COMPLETE |
HV_M_STATUS_SPEED_MASK);
if (status_reg == (HV_M_STATUS_LINK_UP |
HV_M_STATUS_AUTONEG_COMPLETE |
HV_M_STATUS_SPEED_1000))
HV_M_STATUS_AUTONEG_COMPLETE |
HV_M_STATUS_SPEED_1000))
k1_enable = false;
}
@ -2384,7 +2384,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
/* Check bank 0 */
ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
&sig_byte);
&sig_byte);
if (ret_val)
return ret_val;
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
@ -2395,8 +2395,8 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
/* Check bank 1 */
ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
bank1_offset,
&sig_byte);
bank1_offset,
&sig_byte);
if (ret_val)
return ret_val;
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
@ -2635,8 +2635,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
return -E1000_ERR_NVM;
flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
hw->nvm.flash_base_addr;
flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
hw->nvm.flash_base_addr);
do {
udelay(1);
@ -2783,8 +2783,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
data = dev_spec->shadow_ram[i].value;
} else {
ret_val = e1000_read_flash_word_ich8lan(hw, i +
old_bank_offset,
&data);
old_bank_offset,
&data);
if (ret_val)
break;
}
@ -2812,8 +2812,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
udelay(100);
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
act_offset + 1,
(u8)(data >> 8));
act_offset + 1,
(u8)(data >> 8));
if (ret_val)
break;
}
@ -2989,8 +2989,8 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
offset > ICH_FLASH_LINEAR_ADDR_MASK)
return -E1000_ERR_NVM;
flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
hw->nvm.flash_base_addr;
flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
hw->nvm.flash_base_addr);
do {
udelay(1);
@ -3480,16 +3480,16 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
/* Set the transmit descriptor write-back policy for both queues */
txdctl = er32(TXDCTL(0));
txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB;
txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB);
txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
ew32(TXDCTL(0), txdctl);
txdctl = er32(TXDCTL(1));
txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB;
txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB);
txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
ew32(TXDCTL(1), txdctl);
/* ICH8 has opposite polarity of no_snoop bits.
@ -3676,12 +3676,12 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
&reg_data);
&reg_data);
if (ret_val)
return ret_val;
reg_data |= 0x3F;
ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
reg_data);
reg_data);
if (ret_val)
return ret_val;

View File

@ -850,8 +850,8 @@ check_page:
if (!buffer_info->dma)
buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
PAGE_SIZE,
buffer_info->page, 0,
PAGE_SIZE,
DMA_FROM_DEVICE);
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
@ -1068,8 +1068,8 @@ static void e1000_put_txbuf(struct e1000_ring *tx_ring,
static void e1000_print_hw_hang(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
print_hang_task);
struct e1000_adapter,
print_hang_task);
struct net_device *netdev = adapter->netdev;
struct e1000_ring *tx_ring = adapter->tx_ring;
unsigned int i = tx_ring->next_to_clean;
@ -1549,7 +1549,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
/* this is the beginning of a chain */
rxtop = skb;
skb_fill_page_desc(rxtop, 0, buffer_info->page,
0, length);
0, length);
} else {
/* this is the middle of a chain */
skb_fill_page_desc(rxtop,
@ -1590,10 +1590,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
skb_put(skb, length);
} else {
skb_fill_page_desc(skb, 0,
buffer_info->page, 0,
length);
buffer_info->page, 0,
length);
e1000_consume_page(buffer_info, skb,
length);
length);
}
}
}
@ -1666,8 +1666,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
dma_unmap_page(&pdev->dev, buffer_info->dma,
PAGE_SIZE,
DMA_FROM_DEVICE);
PAGE_SIZE, DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
@ -2578,8 +2577,7 @@ set_itr_now:
* increasing
*/
new_itr = new_itr > adapter->itr ?
min(adapter->itr + (new_itr >> 2), new_itr) :
new_itr;
min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
adapter->itr = new_itr;
adapter->rx_ring->itr_val = new_itr;
if (adapter->msix_entries)
@ -2827,7 +2825,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
e1000_vlan_rx_add_vid(adapter->netdev, 0);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
e1000_vlan_rx_add_vid(adapter->netdev, vid);
e1000_vlan_rx_add_vid(adapter->netdev, vid);
}
static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
@ -3002,8 +3000,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
rctl = er32(RCTL);
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
/* Do not Store bad packets */
rctl &= ~E1000_RCTL_SBP;
@ -3275,7 +3273,7 @@ static int e1000e_write_mc_addr_list(struct net_device *netdev)
/* update_mc_addr_list expects a packed array of only addresses. */
i = 0;
netdev_for_each_mc_addr(ha, netdev)
memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
@ -4615,18 +4613,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
* our own version based on RUC and ROC
*/
netdev->stats.rx_errors = adapter->stats.rxerrc +
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc +
adapter->stats.cexterr;
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
netdev->stats.rx_length_errors = adapter->stats.ruc +
adapter->stats.roc;
adapter->stats.roc;
netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
netdev->stats.rx_missed_errors = adapter->stats.mpc;
/* Tx Errors */
netdev->stats.tx_errors = adapter->stats.ecol +
adapter->stats.latecol;
netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
netdev->stats.tx_aborted_errors = adapter->stats.ecol;
netdev->stats.tx_window_errors = adapter->stats.latecol;
netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
@ -5056,14 +5052,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
iph->tot_len = 0;
iph->check = 0;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
0, IPPROTO_TCP, 0);
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
ipcse = 0;
}
ipcss = skb_network_offset(skb);
@ -5072,7 +5068,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
i = tx_ring->next_to_use;
context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
@ -5142,8 +5138,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
context_desc->lower_setup.ip_config = 0;
context_desc->upper_setup.tcp_fields.tucss = css;
context_desc->upper_setup.tcp_fields.tucso =
css + skb->csum_offset;
context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
context_desc->upper_setup.tcp_fields.tucse = 0;
context_desc->tcp_seg_setup.data = 0;
context_desc->cmd_and_length = cpu_to_le32(cmd_len);
@ -5265,7 +5260,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
if (tx_flags & E1000_TX_FLAGS_TSO) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
E1000_TXD_CMD_TSE;
E1000_TXD_CMD_TSE;
txd_upper |= E1000_TXD_POPTS_TXSM << 8;
if (tx_flags & E1000_TX_FLAGS_IPV4)
@ -5296,8 +5291,8 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
buffer_info = &tx_ring->buffer_info[i];
tx_desc = E1000_TX_DESC(*tx_ring, i);
tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
tx_desc->lower.data =
cpu_to_le32(txd_lower | buffer_info->length);
tx_desc->lower.data = cpu_to_le32(txd_lower |
buffer_info->length);
tx_desc->upper.data = cpu_to_le32(txd_upper);
i++;
@ -5597,18 +5592,15 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
* our own version based on RUC and ROC
*/
stats->rx_errors = adapter->stats.rxerrc +
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc +
adapter->stats.cexterr;
stats->rx_length_errors = adapter->stats.ruc +
adapter->stats.roc;
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
stats->rx_crc_errors = adapter->stats.crcerrs;
stats->rx_frame_errors = adapter->stats.algnerrc;
stats->rx_missed_errors = adapter->stats.mpc;
/* Tx Errors */
stats->tx_errors = adapter->stats.ecol +
adapter->stats.latecol;
stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
stats->tx_aborted_errors = adapter->stats.ecol;
stats->tx_window_errors = adapter->stats.latecol;
stats->tx_carrier_errors = adapter->stats.tncrs;
@ -6002,8 +5994,7 @@ static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
pci_set_power_state(pdev, PCI_D3hot);
}
static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
bool wake)
static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, bool wake)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
@ -6413,7 +6404,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
e_info("(PCI Express:2.5GT/s:%s) %pM\n",
/* bus width */
((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
"Width x1"),
"Width x1"),
/* MAC address */
netdev->dev_addr);
e_info("Intel(R) PRO/%s Network Connection\n",
@ -6550,7 +6541,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
dev_err(&pdev->dev,
"No usable DMA configuration, aborting\n");
goto err_dma;
}
}
@ -6835,7 +6827,7 @@ err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@ -6905,7 +6897,7 @@ static void e1000_remove(struct pci_dev *pdev)
if (adapter->hw.flash_address)
iounmap(adapter->hw.flash_address);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
pci_select_bars(pdev, IORESOURCE_MEM));
free_netdev(netdev);

View File

@ -1609,9 +1609,9 @@ s32 e1000_check_polarity_m88(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data);
if (!ret_val)
phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
? e1000_rev_polarity_reversed
: e1000_rev_polarity_normal;
phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY)
? e1000_rev_polarity_reversed
: e1000_rev_polarity_normal);
return ret_val;
}
@ -1653,9 +1653,9 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, offset, &data);
if (!ret_val)
phy->cable_polarity = (data & mask)
? e1000_rev_polarity_reversed
: e1000_rev_polarity_normal;
phy->cable_polarity = ((data & mask)
? e1000_rev_polarity_reversed
: e1000_rev_polarity_normal);
return ret_val;
}
@ -1685,9 +1685,9 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, offset, &phy_data);
if (!ret_val)
phy->cable_polarity = (phy_data & mask)
? e1000_rev_polarity_reversed
: e1000_rev_polarity_normal;
phy->cable_polarity = ((phy_data & mask)
? e1000_rev_polarity_reversed
: e1000_rev_polarity_normal);
return ret_val;
}
@ -1791,8 +1791,8 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
M88E1000_PSSR_CABLE_LENGTH_SHIFT;
index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
M88E1000_PSSR_CABLE_LENGTH_SHIFT);
if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
return -E1000_ERR_PHY;
@ -1824,10 +1824,10 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
u16 cur_agc_index, max_agc_index = 0;
u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
IGP02E1000_PHY_AGC_A,
IGP02E1000_PHY_AGC_B,
IGP02E1000_PHY_AGC_C,
IGP02E1000_PHY_AGC_D
IGP02E1000_PHY_AGC_A,
IGP02E1000_PHY_AGC_B,
IGP02E1000_PHY_AGC_C,
IGP02E1000_PHY_AGC_D
};
/* Read the AGC registers for all channels */
@ -1841,8 +1841,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
* that can be put into the lookup table to obtain the
* approximate cable length.
*/
cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
IGP02E1000_AGC_LENGTH_MASK;
cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
IGP02E1000_AGC_LENGTH_MASK);
/* Array index bound check. */
if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
@ -1865,8 +1865,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
/* Calculate cable length with the error range of +/- 10 meters. */
phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
(agc_value - IGP02E1000_AGC_RANGE) : 0;
phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
(agc_value - IGP02E1000_AGC_RANGE) : 0);
phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
@ -2040,9 +2040,9 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
return ret_val;
} else {
/* Polarity is forced */
phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
? e1000_rev_polarity_reversed
: e1000_rev_polarity_normal;
phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY)
? e1000_rev_polarity_reversed
: e1000_rev_polarity_normal);
}
ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
@ -2375,13 +2375,13 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
/* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
(page << page_shift));
(page << page_shift));
if (ret_val)
goto release;
}
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
data);
release:
hw->phy.ops.release(hw);
@ -2433,13 +2433,13 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
/* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
(page << page_shift));
(page << page_shift));
if (ret_val)
goto release;
}
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
data);
release:
hw->phy.ops.release(hw);
return ret_val;
@ -2674,7 +2674,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
if (read) {
/* Read the Wakeup register page value using opcode 0x12 */
ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
data);
data);
} else {
/* Write the Wakeup register page value using opcode 0x12 */
ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
@ -2763,7 +2763,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
if (page > 0 && page < HV_INTC_FC_PAGE_START) {
ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
data, true);
data, true);
goto out;
}
@ -2786,8 +2786,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
page << IGP_PAGE_SHIFT, reg);
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
data);
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data);
out:
if (!locked)
hw->phy.ops.release(hw);
@ -2871,7 +2870,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
if (page > 0 && page < HV_INTC_FC_PAGE_START) {
ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
&data, false);
&data, false);
goto out;
}
@ -2910,7 +2909,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
page << IGP_PAGE_SHIFT, reg);
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
data);
data);
out:
if (!locked)
@ -2995,8 +2994,8 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
u32 data_reg;
/* This takes care of the difference with desktop vs mobile phy */
addr_reg = (hw->phy.type == e1000_phy_82578) ?
I82578_ADDR_REG : I82577_ADDR_REG;
addr_reg = ((hw->phy.type == e1000_phy_82578) ?
I82578_ADDR_REG : I82577_ADDR_REG);
data_reg = addr_reg + 1;
/* All operations in this function are phy address 2 */
@ -3050,8 +3049,8 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
if (ret_val)
return ret_val;
data &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
BM_CS_STATUS_SPEED_MASK;
data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
BM_CS_STATUS_SPEED_MASK);
if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
BM_CS_STATUS_SPEED_1000))
@ -3086,9 +3085,9 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
if (!ret_val)
phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
? e1000_rev_polarity_reversed
: e1000_rev_polarity_normal;
phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY)
? e1000_rev_polarity_reversed
: e1000_rev_polarity_normal);
return ret_val;
}
@ -3215,8 +3214,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
if (ret_val)
return ret_val;
length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
I82577_DSTATUS_CABLE_LENGTH_SHIFT;
length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
I82577_DSTATUS_CABLE_LENGTH_SHIFT);
if (length == E1000_CABLE_LENGTH_UNDEFINED)
return -E1000_ERR_PHY;