Staging: et131x: prune all the debug code

We don't need it, we have a perfectly good set of debug tools. For this pass
keep a few debug printks around which are "should not happen" items

Signed-off-by: Alan Cox <alan@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Alan Cox 2009-08-27 11:03:09 +01:00 committed by Greg Kroah-Hartman
parent bc7f9c597f
commit 15700039b1
13 changed files with 117 additions and 1721 deletions

View File

@ -10,7 +10,6 @@ et131x-objs := et1310_eeprom.o \
et1310_pm.o \
et1310_rx.o \
et1310_tx.o \
et131x_debug.o \
et131x_initpci.o \
et131x_isr.o \
et131x_netdev.o

View File

@ -56,7 +56,6 @@
*/
#include "et131x_version.h"
#include "et131x_debug.h"
#include "et131x_defs.h"
#include <linux/pci.h>

View File

@ -56,7 +56,6 @@
*/
#include "et131x_version.h"
#include "et131x_debug.h"
#include "et131x_defs.h"
#include <linux/init.h>
@ -75,6 +74,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/pci.h>
#include <asm/system.h>
#include <linux/netdevice.h>
@ -92,11 +92,6 @@
#include "et131x_adapter.h"
#include "et131x_initpci.h"
/* Data for debugging facilities */
#ifdef CONFIG_ET131X_DEBUG
extern dbg_info_t *et131x_dbginfo;
#endif /* CONFIG_ET131X_DEBUG */
/**
* ConfigMacRegs1 - Initialize the first part of MAC regs
* @pAdpater: pointer to our adapter structure
@ -110,8 +105,6 @@ void ConfigMACRegs1(struct et131x_adapter *etdev)
MAC_HFDP_t hfdp;
MII_MGMT_CFG_t mii_mgmt_cfg;
DBG_ENTER(et131x_dbginfo);
/* First we need to reset everything. Write to MAC configuration
* register 1 to perform reset.
*/
@ -171,8 +164,6 @@ void ConfigMACRegs1(struct et131x_adapter *etdev)
/* clear out MAC config reset */
writel(0, &pMac->cfg1.value);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -188,8 +179,6 @@ void ConfigMACRegs2(struct et131x_adapter *etdev)
MAC_IF_CTRL_t ifctrl;
TXMAC_CTL_t ctl;
DBG_ENTER(et131x_dbginfo);
ctl.value = readl(&etdev->regs->txmac.ctl.value);
cfg1.value = readl(&pMac->cfg1.value);
cfg2.value = readl(&pMac->cfg2.value);
@ -255,17 +244,11 @@ void ConfigMACRegs2(struct et131x_adapter *etdev)
delay < 100);
if (delay == 100) {
DBG_ERROR(et131x_dbginfo,
dev_warn(&etdev->pdev->dev,
"Syncd bits did not respond correctly cfg1 word 0x%08x\n",
cfg1.value);
}
DBG_TRACE(et131x_dbginfo,
"Speed %d, Dup %d, CFG1 0x%08x, CFG2 0x%08x, if_ctrl 0x%08x\n",
etdev->linkspeed, etdev->duplex_mode,
readl(&pMac->cfg1.value), readl(&pMac->cfg2.value),
readl(&pMac->if_ctrl.value));
/* Enable TXMAC */
ctl.bits.txmac_en = 0x1;
ctl.bits.fc_disable = 0x1;
@ -275,12 +258,7 @@ void ConfigMACRegs2(struct et131x_adapter *etdev)
if (etdev->Flags & fMP_ADAPTER_LOWER_POWER) {
et131x_rx_dma_enable(etdev);
et131x_tx_dma_enable(etdev);
} else {
DBG_WARNING(et131x_dbginfo,
"Didn't enable Rx/Tx due to low-power mode\n");
}
DBG_LEAVE(et131x_dbginfo);
}
void ConfigRxMacRegs(struct et131x_adapter *etdev)
@ -290,8 +268,6 @@ void ConfigRxMacRegs(struct et131x_adapter *etdev)
RXMAC_WOL_SA_HI_t sa_hi;
RXMAC_PF_CTRL_t pf_ctrl = { 0 };
DBG_ENTER(et131x_dbginfo);
/* Disable the MAC while it is being configured (also disable WOL) */
writel(0x8, &pRxMac->ctrl.value);
@ -421,8 +397,6 @@ void ConfigRxMacRegs(struct et131x_adapter *etdev)
*/
writel(pf_ctrl.value, &pRxMac->pf_ctrl.value);
writel(0x9, &pRxMac->ctrl.value);
DBG_LEAVE(et131x_dbginfo);
}
void ConfigTxMacRegs(struct et131x_adapter *etdev)
@ -430,8 +404,6 @@ void ConfigTxMacRegs(struct et131x_adapter *etdev)
struct _TXMAC_t __iomem *pTxMac = &etdev->regs->txmac;
TXMAC_CF_PARAM_t Local;
DBG_ENTER(et131x_dbginfo);
/* We need to update the Control Frame Parameters
* cfpt - control frame pause timer set to 64 (0x40)
* cfep - control frame extended pause timer set to 0x0
@ -443,8 +415,6 @@ void ConfigTxMacRegs(struct et131x_adapter *etdev)
Local.bits.cfep = 0x0;
writel(Local.value, &pTxMac->cf_param.value);
}
DBG_LEAVE(et131x_dbginfo);
}
void ConfigMacStatRegs(struct et131x_adapter *etdev)
@ -452,8 +422,6 @@ void ConfigMacStatRegs(struct et131x_adapter *etdev)
struct _MAC_STAT_t __iomem *pDevMacStat =
&etdev->regs->macStat;
DBG_ENTER(et131x_dbginfo);
/* Next we need to initialize all the MAC_STAT registers to zero on
* the device.
*/
@ -534,8 +502,6 @@ void ConfigMacStatRegs(struct et131x_adapter *etdev)
writel(Carry2M.value, &pDevMacStat->Carry2M.value);
}
DBG_LEAVE(et131x_dbginfo);
}
void ConfigFlowControl(struct et131x_adapter *etdev)
@ -614,8 +580,6 @@ void HandleMacStatInterrupt(struct et131x_adapter *etdev)
MAC_STAT_REG_1_t Carry1;
MAC_STAT_REG_2_t Carry2;
DBG_ENTER(et131x_dbginfo);
/* Read the interrupt bits from the register(s). These are Clear On
* Write.
*/
@ -659,8 +623,6 @@ void HandleMacStatInterrupt(struct et131x_adapter *etdev)
etdev->Stats.late_collisions += COUNTER_WRAP_12_BIT;
if (Carry2.bits.tncl)
etdev->Stats.collisions += COUNTER_WRAP_12_BIT;
DBG_LEAVE(et131x_dbginfo);
}
void SetupDeviceForMulticast(struct et131x_adapter *etdev)
@ -674,30 +636,14 @@ void SetupDeviceForMulticast(struct et131x_adapter *etdev)
uint32_t hash4 = 0;
u32 pm_csr;
DBG_ENTER(et131x_dbginfo);
/* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
* the multi-cast LIST. If it is NOT specified, (and "ALL" is not
* specified) then we should pass NO multi-cast addresses to the
* driver.
*/
if (etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST) {
DBG_VERBOSE(et131x_dbginfo,
"MULTICAST flag is set, MCCount: %d\n",
etdev->MCAddressCount);
/* Loop through our multicast array and set up the device */
for (nIndex = 0; nIndex < etdev->MCAddressCount; nIndex++) {
DBG_VERBOSE(et131x_dbginfo,
"MCList[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
nIndex,
etdev->MCList[nIndex][0],
etdev->MCList[nIndex][1],
etdev->MCList[nIndex][2],
etdev->MCList[nIndex][3],
etdev->MCList[nIndex][4],
etdev->MCList[nIndex][5]);
result = ether_crc(6, etdev->MCList[nIndex]);
result = (result & 0x3F800000) >> 23;
@ -725,8 +671,6 @@ void SetupDeviceForMulticast(struct et131x_adapter *etdev)
writel(hash3, &rxmac->multi_hash3);
writel(hash4, &rxmac->multi_hash4);
}
DBG_LEAVE(et131x_dbginfo);
}
void SetupDeviceForUnicast(struct et131x_adapter *etdev)
@ -737,8 +681,6 @@ void SetupDeviceForUnicast(struct et131x_adapter *etdev)
RXMAC_UNI_PF_ADDR3_t uni_pf3;
u32 pm_csr;
DBG_ENTER(et131x_dbginfo);
/* Set up unicast packet filter reg 3 to be the first two octets of
* the MAC address for both address
*
@ -769,6 +711,4 @@ void SetupDeviceForUnicast(struct et131x_adapter *etdev)
writel(uni_pf2.value, &rxmac->uni_pf_addr2.value);
writel(uni_pf3.value, &rxmac->uni_pf_addr3.value);
}
DBG_LEAVE(et131x_dbginfo);
}

View File

@ -56,7 +56,6 @@
*/
#include "et131x_version.h"
#include "et131x_debug.h"
#include "et131x_defs.h"
#include <linux/pci.h>
@ -98,11 +97,6 @@
#include "et1310_rx.h"
#include "et1310_mac.h"
/* Data for debugging facilities */
#ifdef CONFIG_ET131X_DEBUG
extern dbg_info_t *et131x_dbginfo;
#endif /* CONFIG_ET131X_DEBUG */
/* Prototypes for functions with local scope */
static int et131x_xcvr_init(struct et131x_adapter *adapter);
@ -157,9 +151,9 @@ int PhyMiRead(struct et131x_adapter *adapter, uint8_t xcvrAddr,
/* If we hit the max delay, we could not read the register */
if (delay >= 50) {
DBG_WARNING(et131x_dbginfo,
dev_warn(&adapter->pdev->dev,
"xcvrReg 0x%08x could not be read\n", xcvrReg);
DBG_WARNING(et131x_dbginfo, "status is 0x%08x\n",
dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
miiIndicator.value);
status = -EIO;
@ -179,10 +173,6 @@ int PhyMiRead(struct et131x_adapter *adapter, uint8_t xcvrAddr,
/* Stop the read operation */
writel(0, &mac->mii_mgmt_cmd.value);
DBG_VERBOSE(et131x_dbginfo, " xcvr_addr = 0x%02x, "
"xcvr_reg = 0x%02x, "
"value = 0x%04x.\n", xcvrAddr, xcvrReg, *value);
/* set the registers we touched back to the state at which we entered
* this function
*/
@ -242,11 +232,11 @@ int MiWrite(struct et131x_adapter *adapter, uint8_t xcvrReg, uint16_t value)
if (delay == 100) {
uint16_t TempValue;
DBG_WARNING(et131x_dbginfo,
"xcvrReg 0x%08x could not be written", xcvrReg);
DBG_WARNING(et131x_dbginfo, "status is 0x%08x\n",
dev_warn(&adapter->pdev->dev,
"xcvrReg 0x%08x could not be written", xcvrReg);
dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
miiIndicator.value);
DBG_WARNING(et131x_dbginfo, "command is 0x%08x\n",
dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
readl(&mac->mii_mgmt_cmd.value));
MiRead(adapter, xcvrReg, &TempValue);
@ -263,10 +253,6 @@ int MiWrite(struct et131x_adapter *adapter, uint8_t xcvrReg, uint16_t value)
writel(miiAddr.value, &mac->mii_mgmt_addr.value);
writel(miiCmd.value, &mac->mii_mgmt_cmd.value);
DBG_VERBOSE(et131x_dbginfo, " xcvr_addr = 0x%02x, "
"xcvr_reg = 0x%02x, "
"value = 0x%04x.\n", xcvrAddr, xcvrReg, value);
return status;
}
@ -284,8 +270,6 @@ int et131x_xcvr_find(struct et131x_adapter *adapter)
MI_IDR2_t idr2;
uint32_t xcvr_id;
DBG_ENTER(et131x_dbginfo);
/* We need to get xcvr id and address we just get the first one */
for (xcvr_addr = 0; xcvr_addr < 32; xcvr_addr++) {
/* Read the ID from the PHY */
@ -299,10 +283,6 @@ int et131x_xcvr_find(struct et131x_adapter *adapter)
xcvr_id = (uint32_t) ((idr1.value << 16) | idr2.value);
if ((idr1.value != 0) && (idr1.value != 0xffff)) {
DBG_TRACE(et131x_dbginfo,
"Xcvr addr: 0x%02x\tXcvr_id: 0x%08x\n",
xcvr_addr, xcvr_id);
adapter->Stats.xcvr_id = xcvr_id;
adapter->Stats.xcvr_addr = xcvr_addr;
@ -310,8 +290,6 @@ int et131x_xcvr_find(struct et131x_adapter *adapter)
break;
}
}
DBG_LEAVE(et131x_dbginfo);
return status;
}
@ -327,13 +305,9 @@ int et131x_setphy_normal(struct et131x_adapter *adapter)
{
int status;
DBG_ENTER(et131x_dbginfo);
/* Make sure the PHY is powered up */
ET1310_PhyPowerDown(adapter, 0);
status = et131x_xcvr_init(adapter);
DBG_LEAVE(et131x_dbginfo);
return status;
}
@ -350,8 +324,6 @@ static int et131x_xcvr_init(struct et131x_adapter *adapter)
MI_ISR_t isr;
MI_LCR2_t lcr2;
DBG_ENTER(et131x_dbginfo);
/* Zero out the adapter structure variable representing BMSR */
adapter->Bmsr.value = 0;
@ -412,8 +384,6 @@ static int et131x_xcvr_init(struct et131x_adapter *adapter)
/* NOTE - Do we need this? */
ET1310_PhyAccessMiBit(adapter, TRUEPHY_BIT_SET, 0, 9, NULL);
DBG_LEAVE(et131x_dbginfo);
return status;
} else {
ET1310_PhyAutoNeg(adapter, false);
@ -469,7 +439,6 @@ static int et131x_xcvr_init(struct et131x_adapter *adapter)
break;
}
DBG_LEAVE(et131x_dbginfo);
return status;
}
}
@ -486,8 +455,6 @@ void et131x_Mii_check(struct et131x_adapter *etdev,
uint32_t polarity;
unsigned long flags;
DBG_ENTER(et131x_dbginfo);
if (bmsr_ints.bits.link_status) {
if (bmsr.bits.link_status) {
etdev->PoMgmt.TransPhyComaModeOnBoot = 20;
@ -506,8 +473,8 @@ void et131x_Mii_check(struct et131x_adapter *etdev,
if (etdev->RegistryPhyLoopbk == false)
netif_carrier_on(etdev->netdev);
} else {
DBG_WARNING(et131x_dbginfo,
"Link down cable problem\n");
dev_warn(&etdev->pdev->dev,
"Link down - cable problem ?\n");
if (etdev->linkspeed == TRUEPHY_SPEED_10MBPS) {
/* NOTE - Is there a way to query this without
@ -586,11 +553,6 @@ void et131x_Mii_check(struct et131x_adapter *etdev,
etdev->linkspeed = speed;
etdev->duplex_mode = duplex;
DBG_TRACE(et131x_dbginfo,
"etdev->linkspeed 0x%04x, etdev->duplex_mode 0x%08x\n",
etdev->linkspeed,
etdev->duplex_mode);
etdev->PoMgmt.TransPhyComaModeOnBoot = 20;
if (etdev->linkspeed == TRUEPHY_SPEED_10MBPS) {
@ -619,8 +581,6 @@ void et131x_Mii_check(struct et131x_adapter *etdev,
ConfigMACRegs2(etdev);
}
}
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -631,8 +591,6 @@ void et131x_Mii_check(struct et131x_adapter *etdev,
*/
void TPAL_SetPhy10HalfDuplex(struct et131x_adapter *etdev)
{
DBG_ENTER(et131x_dbginfo);
/* Power down PHY */
ET1310_PhyPowerDown(etdev, 1);
@ -646,8 +604,6 @@ void TPAL_SetPhy10HalfDuplex(struct et131x_adapter *etdev)
/* Power up PHY */
ET1310_PhyPowerDown(etdev, 0);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -658,8 +614,6 @@ void TPAL_SetPhy10HalfDuplex(struct et131x_adapter *etdev)
*/
void TPAL_SetPhy10FullDuplex(struct et131x_adapter *etdev)
{
DBG_ENTER(et131x_dbginfo);
/* Power down PHY */
ET1310_PhyPowerDown(etdev, 1);
@ -673,8 +627,6 @@ void TPAL_SetPhy10FullDuplex(struct et131x_adapter *etdev)
/* Power up PHY */
ET1310_PhyPowerDown(etdev, 0);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -683,8 +635,6 @@ void TPAL_SetPhy10FullDuplex(struct et131x_adapter *etdev)
*/
void TPAL_SetPhy10Force(struct et131x_adapter *etdev)
{
DBG_ENTER(et131x_dbginfo);
/* Power down PHY */
ET1310_PhyPowerDown(etdev, 1);
@ -704,8 +654,6 @@ void TPAL_SetPhy10Force(struct et131x_adapter *etdev)
/* Power up PHY */
ET1310_PhyPowerDown(etdev, 0);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -716,8 +664,6 @@ void TPAL_SetPhy10Force(struct et131x_adapter *etdev)
*/
void TPAL_SetPhy100HalfDuplex(struct et131x_adapter *etdev)
{
DBG_ENTER(et131x_dbginfo);
/* Power down PHY */
ET1310_PhyPowerDown(etdev, 1);
@ -734,8 +680,6 @@ void TPAL_SetPhy100HalfDuplex(struct et131x_adapter *etdev)
/* Power up PHY */
ET1310_PhyPowerDown(etdev, 0);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -746,8 +690,6 @@ void TPAL_SetPhy100HalfDuplex(struct et131x_adapter *etdev)
*/
void TPAL_SetPhy100FullDuplex(struct et131x_adapter *etdev)
{
DBG_ENTER(et131x_dbginfo);
/* Power down PHY */
ET1310_PhyPowerDown(etdev, 1);
@ -761,8 +703,6 @@ void TPAL_SetPhy100FullDuplex(struct et131x_adapter *etdev)
/* Power up PHY */
ET1310_PhyPowerDown(etdev, 0);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -771,8 +711,6 @@ void TPAL_SetPhy100FullDuplex(struct et131x_adapter *etdev)
*/
void TPAL_SetPhy100Force(struct et131x_adapter *etdev)
{
DBG_ENTER(et131x_dbginfo);
/* Power down PHY */
ET1310_PhyPowerDown(etdev, 1);
@ -792,8 +730,6 @@ void TPAL_SetPhy100Force(struct et131x_adapter *etdev)
/* Power up PHY */
ET1310_PhyPowerDown(etdev, 0);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -804,8 +740,6 @@ void TPAL_SetPhy100Force(struct et131x_adapter *etdev)
*/
void TPAL_SetPhy1000FullDuplex(struct et131x_adapter *etdev)
{
DBG_ENTER(et131x_dbginfo);
/* Power down PHY */
ET1310_PhyPowerDown(etdev, 1);
@ -819,8 +753,6 @@ void TPAL_SetPhy1000FullDuplex(struct et131x_adapter *etdev)
/* power up PHY */
ET1310_PhyPowerDown(etdev, 0);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -829,8 +761,6 @@ void TPAL_SetPhy1000FullDuplex(struct et131x_adapter *etdev)
*/
void TPAL_SetPhyAutoNeg(struct et131x_adapter *etdev)
{
DBG_ENTER(et131x_dbginfo);
/* Power down PHY */
ET1310_PhyPowerDown(etdev, 1);
@ -849,8 +779,6 @@ void TPAL_SetPhyAutoNeg(struct et131x_adapter *etdev)
/* Power up PHY */
ET1310_PhyPowerDown(etdev, 0);
DBG_LEAVE(et131x_dbginfo);
}

View File

@ -56,7 +56,6 @@
*/
#include "et131x_version.h"
#include "et131x_debug.h"
#include "et131x_defs.h"
#include <linux/init.h>
@ -92,11 +91,6 @@
#include "et131x_adapter.h"
#include "et131x_initpci.h"
/* Data for debugging facilities */
#ifdef CONFIG_ET131X_DEBUG
extern dbg_info_t *et131x_dbginfo;
#endif /* CONFIG_ET131X_DEBUG */
/**
* EnablePhyComa - called when network cable is unplugged
* @etdev: pointer to our adapter structure
@ -122,8 +116,6 @@ void EnablePhyComa(struct et131x_adapter *etdev)
unsigned long flags;
u32 GlobalPmCSR;
DBG_ENTER(et131x_dbginfo);
GlobalPmCSR = readl(&etdev->regs->global.pm_csr);
/* Save the GbE PHY speed and duplex modes. Need to restore this
@ -146,8 +138,6 @@ void EnablePhyComa(struct et131x_adapter *etdev)
/* Program gigE PHY in to Coma mode */
GlobalPmCSR |= ET_PM_PHY_SW_COMA;
writel(GlobalPmCSR, &etdev->regs->global.pm_csr);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -158,8 +148,6 @@ void DisablePhyComa(struct et131x_adapter *etdev)
{
u32 GlobalPmCSR;
DBG_ENTER(et131x_dbginfo);
GlobalPmCSR = readl(&etdev->regs->global.pm_csr);
/* Disable phy_sw_coma register and re-enable JAGCore clocks */
@ -193,7 +181,5 @@ void DisablePhyComa(struct et131x_adapter *etdev)
/* Need to re-enable Rx. */
et131x_rx_dma_enable(etdev);
DBG_LEAVE(et131x_dbginfo);
}

View File

@ -56,7 +56,6 @@
*/
#include "et131x_version.h"
#include "et131x_debug.h"
#include "et131x_defs.h"
#include <linux/pci.h>
@ -93,11 +92,6 @@
#include "et1310_rx.h"
/* Data for debugging facilities */
#ifdef CONFIG_ET131X_DEBUG
extern dbg_info_t *et131x_dbginfo;
#endif /* CONFIG_ET131X_DEBUG */
void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd);
@ -117,8 +111,6 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
uint32_t pktStatRingSize, FBRChunkSize;
RX_RING_t *rx_ring;
DBG_ENTER(et131x_dbginfo);
/* Setup some convenience pointers */
rx_ring = (RX_RING_t *) &adapter->RxRing;
@ -183,9 +175,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
bufsize,
&rx_ring->pFbr1RingPa);
if (!rx_ring->pFbr1RingVa) {
DBG_ERROR(et131x_dbginfo,
dev_err(&adapter->pdev->dev,
"Cannot alloc memory for Free Buffer Ring 1\n");
DBG_LEAVE(et131x_dbginfo);
return -ENOMEM;
}
@ -213,9 +204,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
bufsize,
&rx_ring->pFbr0RingPa);
if (!rx_ring->pFbr0RingVa) {
DBG_ERROR(et131x_dbginfo,
dev_err(&adapter->pdev->dev,
"Cannot alloc memory for Free Buffer Ring 0\n");
DBG_LEAVE(et131x_dbginfo);
return -ENOMEM;
}
@ -262,8 +252,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
&rx_ring->Fbr1MemPa[OuterLoop]);
if (!rx_ring->Fbr1MemVa[OuterLoop]) {
DBG_ERROR(et131x_dbginfo, "Could not alloc memory\n");
DBG_LEAVE(et131x_dbginfo);
dev_err(&adapter->pdev->dev,
"Could not alloc memory\n");
return -ENOMEM;
}
@ -313,8 +303,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
&rx_ring->Fbr0MemPa[OuterLoop]);
if (!rx_ring->Fbr0MemVa[OuterLoop]) {
DBG_ERROR(et131x_dbginfo, "Could not alloc memory\n");
DBG_LEAVE(et131x_dbginfo);
dev_err(&adapter->pdev->dev,
"Could not alloc memory\n");
return -ENOMEM;
}
@ -356,9 +346,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
&rx_ring->pPSRingPa);
if (!rx_ring->pPSRingVa) {
DBG_ERROR(et131x_dbginfo,
dev_err(&adapter->pdev->dev,
"Cannot alloc memory for Packet Status Ring\n");
DBG_LEAVE(et131x_dbginfo);
return -ENOMEM;
}
@ -384,9 +373,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
sizeof(RX_STATUS_BLOCK_t) +
0x7, &rx_ring->pRxStatusPa);
if (!rx_ring->pRxStatusVa) {
DBG_ERROR(et131x_dbginfo,
dev_err(&adapter->pdev->dev,
"Cannot alloc memory for Status Block\n");
DBG_LEAVE(et131x_dbginfo);
return -ENOMEM;
}
@ -422,8 +410,6 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
*/
INIT_LIST_HEAD(&rx_ring->RecvList);
INIT_LIST_HEAD(&rx_ring->RecvPendingList);
DBG_LEAVE(et131x_dbginfo);
return 0;
}
@ -439,13 +425,11 @@ void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
PMP_RFD pMpRfd;
RX_RING_t *rx_ring;
DBG_ENTER(et131x_dbginfo);
/* Setup some convenience pointers */
rx_ring = (RX_RING_t *) &adapter->RxRing;
/* Free RFDs and associated packet descriptors */
DBG_ASSERT(rx_ring->nReadyRecv == rx_ring->NumRfd);
WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd);
while (!list_empty(&rx_ring->RecvList)) {
pMpRfd = (MP_RFD *) list_entry(rx_ring->RecvList.next,
@ -583,8 +567,6 @@ void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
/* Reset Counters */
rx_ring->nReadyRecv = 0;
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -601,8 +583,6 @@ int et131x_init_recv(struct et131x_adapter *adapter)
uint32_t TotalNumRfd = 0;
RX_RING_t *rx_ring = NULL;
DBG_ENTER(et131x_dbginfo);
/* Setup some convenience pointers */
rx_ring = (RX_RING_t *) &adapter->RxRing;
@ -612,7 +592,7 @@ int et131x_init_recv(struct et131x_adapter *adapter)
GFP_ATOMIC | GFP_DMA);
if (!pMpRfd) {
DBG_ERROR(et131x_dbginfo,
dev_err(&adapter->pdev->dev,
"Couldn't alloc RFD out of kmem_cache\n");
status = -ENOMEM;
continue;
@ -620,7 +600,7 @@ int et131x_init_recv(struct et131x_adapter *adapter)
status = et131x_rfd_resources_alloc(adapter, pMpRfd);
if (status != 0) {
DBG_ERROR(et131x_dbginfo,
dev_err(&adapter->pdev->dev,
"Couldn't alloc packet for RFD\n");
kmem_cache_free(rx_ring->RecvLookaside, pMpRfd);
continue;
@ -641,11 +621,9 @@ int et131x_init_recv(struct et131x_adapter *adapter)
if (status != 0) {
kmem_cache_free(rx_ring->RecvLookaside, pMpRfd);
DBG_ERROR(et131x_dbginfo,
dev_err(&adapter->pdev->dev,
"Allocation problems in et131x_init_recv\n");
}
DBG_LEAVE(et131x_dbginfo);
return status;
}
@ -687,8 +665,6 @@ void ConfigRxDmaRegs(struct et131x_adapter *etdev)
RXDMA_PSR_NUM_DES_t psr_num_des;
unsigned long flags;
DBG_ENTER(et131x_dbginfo);
/* Halt RXDMA to perform the reconfigure. */
et131x_rx_dma_disable(etdev);
@ -786,8 +762,6 @@ void ConfigRxDmaRegs(struct et131x_adapter *etdev)
writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time.value);
spin_unlock_irqrestore(&etdev->RcvLock, flags);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -814,8 +788,6 @@ void et131x_rx_dma_disable(struct et131x_adapter *etdev)
{
RXDMA_CSR_t csr;
DBG_ENTER(et131x_dbginfo);
/* Setup the receive dma configuration register */
writel(0x00002001, &etdev->regs->rxdma.csr.value);
csr.value = readl(&etdev->regs->rxdma.csr.value);
@ -823,12 +795,10 @@ void et131x_rx_dma_disable(struct et131x_adapter *etdev)
udelay(5);
csr.value = readl(&etdev->regs->rxdma.csr.value);
if (csr.bits.halt_status != 1)
DBG_ERROR(et131x_dbginfo,
dev_err(&etdev->pdev->dev,
"RX Dma failed to enter halt state. CSR 0x%08x\n",
csr.value);
}
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -837,8 +807,6 @@ void et131x_rx_dma_disable(struct et131x_adapter *etdev)
*/
void et131x_rx_dma_enable(struct et131x_adapter *etdev)
{
DBG_RX_ENTER(et131x_dbginfo);
if (etdev->RegistryPhyLoopbk)
/* RxDMA is disabled for loopback operation. */
writel(0x1, &etdev->regs->rxdma.csr.value);
@ -869,14 +837,12 @@ void et131x_rx_dma_enable(struct et131x_adapter *etdev)
udelay(5);
csr.value = readl(&etdev->regs->rxdma.csr.value);
if (csr.bits.halt_status != 0) {
DBG_ERROR(et131x_dbginfo,
dev_err(&etdev->pdev->dev,
"RX Dma failed to exit halt state. CSR 0x%08x\n",
csr.value);
}
}
}
DBG_RX_LEAVE(et131x_dbginfo);
}
/**
@ -905,9 +871,6 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
uint32_t localLen;
PKT_STAT_DESC_WORD0_t Word0;
DBG_RX_ENTER(et131x_dbginfo);
/* RX Status block is written by the DMA engine prior to every
* interrupt. It contains the next to be used entry in the Packet
* Status Ring, and also the two Free Buffer rings.
@ -919,8 +882,6 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
pRxStatusBlock->Word1.bits.PSRwrap ==
pRxLocal->local_psr_full.bits.psr_full_wrap) {
/* Looks like this ring is not updated yet */
DBG_RX(et131x_dbginfo, "(0)\n");
DBG_RX_LEAVE(et131x_dbginfo);
return NULL;
}
@ -937,23 +898,6 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
bufferIndex = (uint16_t) pPSREntry->word1.bits.bi;
Word0 = pPSREntry->word0;
DBG_RX(et131x_dbginfo, "RX PACKET STATUS\n");
DBG_RX(et131x_dbginfo, "\tlength : %d\n", localLen);
DBG_RX(et131x_dbginfo, "\tringIndex : %d\n", ringIndex);
DBG_RX(et131x_dbginfo, "\tbufferIndex : %d\n", bufferIndex);
DBG_RX(et131x_dbginfo, "\tword0 : 0x%08x\n", Word0.value);
#if 0
/* Check the Status Word that the MAC has appended to the PSR
* entry in case the MAC has detected errors.
*/
if (Word0.value & ALCATEL_BAD_STATUS) {
DBG_ERROR(et131x_dbginfo,
"NICRxPkts >> Alcatel Status Word error."
"Value 0x%08x\n", pPSREntry->word0.value);
}
#endif
/* Indicate that we have used this PSR entry. */
if (++pRxLocal->local_psr_full.bits.psr_full >
pRxLocal->PsrNumEntries - 1) {
@ -966,11 +910,6 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
#ifndef USE_FBR0
if (ringIndex != 1) {
DBG_ERROR(et131x_dbginfo,
"NICRxPkts PSR Entry %d indicates "
"Buffer Ring 0 in use\n",
pRxLocal->local_psr_full.bits.psr_full);
DBG_RX_LEAVE(et131x_dbginfo);
return NULL;
}
#endif
@ -987,12 +926,11 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
#endif
{
/* Illegal buffer or ring index cannot be used by S/W*/
DBG_ERROR(et131x_dbginfo,
dev_err(&etdev->pdev->dev,
"NICRxPkts PSR Entry %d indicates "
"length of %d and/or bad bi(%d)\n",
pRxLocal->local_psr_full.bits.psr_full,
localLen, bufferIndex);
DBG_RX_LEAVE(et131x_dbginfo);
return NULL;
}
@ -1004,9 +942,6 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
pMpRfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
if (pMpRfd == NULL) {
DBG_RX(et131x_dbginfo,
"NULL RFD returned from RecvList via list_entry()\n");
DBG_RX_LEAVE(et131x_dbginfo);
spin_unlock_irqrestore(&etdev->RcvLock, flags);
return NULL;
}
@ -1040,19 +975,6 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
etdev->ReplicaPhyLoopbkPF = 1;
}
}
DBG_WARNING(et131x_dbginfo,
"pBufVa:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
pBufVa[6], pBufVa[7], pBufVa[8],
pBufVa[9], pBufVa[10], pBufVa[11]);
DBG_WARNING(et131x_dbginfo,
"CurrentAddr:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
etdev->CurrentAddress[0],
etdev->CurrentAddress[1],
etdev->CurrentAddress[2],
etdev->CurrentAddress[3],
etdev->CurrentAddress[4],
etdev->CurrentAddress[5]);
}
/* Determine if this is a multicast packet coming in */
@ -1127,9 +1049,8 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
skb = dev_alloc_skb(pMpRfd->PacketSize + 2);
if (!skb) {
DBG_ERROR(et131x_dbginfo,
dev_err(&etdev->pdev->dev,
"Couldn't alloc an SKB for Rx\n");
DBG_RX_LEAVE(et131x_dbginfo);
return NULL;
}
@ -1149,9 +1070,6 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
}
nic_return_rfd(etdev, pMpRfd);
DBG_RX(et131x_dbginfo, "(1)\n");
DBG_RX_LEAVE(et131x_dbginfo);
return pMpRfd;
}
@ -1166,9 +1084,7 @@ void et131x_reset_recv(struct et131x_adapter *etdev)
PMP_RFD pMpRfd;
struct list_head *element;
DBG_ENTER(et131x_dbginfo);
DBG_ASSERT(!list_empty(&etdev->RxRing.RecvList));
WARN_ON(list_empty(&etdev->RxRing.RecvList));
/* Take all the RFD's from the pending list, and stick them on the
* RecvList.
@ -1180,8 +1096,6 @@ void et131x_reset_recv(struct et131x_adapter *etdev)
list_move_tail(&pMpRfd->list_node, &etdev->RxRing.RecvList);
}
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -1200,15 +1114,12 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
uint32_t PacketFreeCount = 0;
bool TempUnfinishedRec = false;
DBG_RX_ENTER(et131x_dbginfo);
PacketsToHandle = NUM_PACKETS_HANDLED;
/* Process up to available RFD's */
while (PacketArrayCount < PacketsToHandle) {
if (list_empty(&etdev->RxRing.RecvList)) {
DBG_ASSERT(etdev->RxRing.nReadyRecv == 0);
DBG_ERROR(et131x_dbginfo, "NO RFD's !!!!!!!!!!!!!\n");
WARN_ON(etdev->RxRing.nReadyRecv != 0);
TempUnfinishedRec = true;
break;
}
@ -1246,8 +1157,8 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
RFDFreeArray[PacketFreeCount] = pMpRfd;
PacketFreeCount++;
DBG_WARNING(et131x_dbginfo,
"RFD's are running out !!!!!!!!!!!!!\n");
dev_warn(&etdev->pdev->dev,
"RFD's are running out\n");
}
PacketArray[PacketArrayCount] = pMpRfd->Packet;
@ -1262,8 +1173,6 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
/* Watchdog timer will disable itself if appropriate. */
etdev->RxRing.UnfinishedReceives = false;
}
DBG_RX_LEAVE(et131x_dbginfo);
}
static inline u32 bump_fbr(u32 *fbr, u32 limit)
@ -1289,8 +1198,6 @@ void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd)
uint8_t ri = pMpRfd->ringindex;
unsigned long flags;
DBG_RX_ENTER(et131x_dbginfo);
/* We don't use any of the OOB data besides status. Otherwise, we
* need to clean up OOB data
*/
@ -1339,7 +1246,7 @@ void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd)
#endif
spin_unlock_irqrestore(&etdev->FbrLock, flags);
} else {
DBG_ERROR(et131x_dbginfo,
dev_err(&etdev->pdev->dev,
"NICReturnRFD illegal Buffer Index returned\n");
}
@ -1351,6 +1258,5 @@ void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd)
rx_local->nReadyRecv++;
spin_unlock_irqrestore(&etdev->RcvLock, flags);
DBG_ASSERT(rx_local->nReadyRecv <= rx_local->NumRfd);
DBG_RX_LEAVE(et131x_dbginfo);
WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd);
}

View File

@ -56,7 +56,6 @@
*/
#include "et131x_version.h"
#include "et131x_debug.h"
#include "et131x_defs.h"
#include <linux/pci.h>
@ -95,11 +94,6 @@
#include "et1310_tx.h"
/* Data for debugging facilities */
#ifdef CONFIG_ET131X_DEBUG
extern dbg_info_t *et131x_dbginfo;
#endif /* CONFIG_ET131X_DEBUG */
static void et131x_update_tcb_list(struct et131x_adapter *etdev);
static void et131x_check_send_wait_list(struct et131x_adapter *etdev);
static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
@ -125,14 +119,11 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
int desc_size = 0;
TX_RING_t *tx_ring = &adapter->TxRing;
DBG_ENTER(et131x_dbginfo);
/* Allocate memory for the TCB's (Transmit Control Block) */
adapter->TxRing.MpTcbMem = (MP_TCB *)kcalloc(NUM_TCB, sizeof(MP_TCB),
GFP_ATOMIC | GFP_DMA);
if (!adapter->TxRing.MpTcbMem) {
DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for TCBs\n");
DBG_LEAVE(et131x_dbginfo);
dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
return -ENOMEM;
}
@ -144,8 +135,7 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
(PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size,
&tx_ring->pTxDescRingPa);
if (!adapter->TxRing.pTxDescRingVa) {
DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for Tx Ring\n");
DBG_LEAVE(et131x_dbginfo);
dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx Ring\n");
return -ENOMEM;
}
@ -170,9 +160,8 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
sizeof(TX_STATUS_BLOCK_t),
&tx_ring->pTxStatusPa);
if (!adapter->TxRing.pTxStatusPa) {
DBG_ERROR(et131x_dbginfo,
"Cannot alloc memory for Tx status block\n");
DBG_LEAVE(et131x_dbginfo);
dev_err(&adapter->pdev->dev,
"Cannot alloc memory for Tx status block\n");
return -ENOMEM;
}
@ -181,13 +170,11 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
NIC_MIN_PACKET_SIZE,
&tx_ring->pTxDummyBlkPa);
if (!adapter->TxRing.pTxDummyBlkPa) {
DBG_ERROR(et131x_dbginfo,
"Cannot alloc memory for Tx dummy buffer\n");
DBG_LEAVE(et131x_dbginfo);
dev_err(&adapter->pdev->dev,
"Cannot alloc memory for Tx dummy buffer\n");
return -ENOMEM;
}
DBG_LEAVE(et131x_dbginfo);
return 0;
}
@ -201,8 +188,6 @@ void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
{
int desc_size = 0;
DBG_ENTER(et131x_dbginfo);
if (adapter->TxRing.pTxDescRingVa) {
/* Free memory relating to Tx rings here */
adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset;
@ -240,8 +225,6 @@ void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
/* Free the memory for MP_TCB structures */
kfree(adapter->TxRing.MpTcbMem);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -252,8 +235,6 @@ void ConfigTxDmaRegs(struct et131x_adapter *etdev)
{
struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma;
DBG_ENTER(et131x_dbginfo);
/* Load the hardware with the start of the transmit descriptor ring. */
writel((uint32_t) (etdev->TxRing.pTxDescRingAdjustedPa >> 32),
&txdma->pr_base_hi);
@ -277,8 +258,6 @@ void ConfigTxDmaRegs(struct et131x_adapter *etdev)
writel(0, &txdma->service_request);
etdev->TxRing.txDmaReadyToSend = 0;
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -287,12 +266,8 @@ void ConfigTxDmaRegs(struct et131x_adapter *etdev)
*/
void et131x_tx_dma_disable(struct et131x_adapter *etdev)
{
DBG_ENTER(et131x_dbginfo);
/* Setup the tramsmit dma configuration register */
writel(0x101, &etdev->regs->txdma.csr.value);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -303,8 +278,6 @@ void et131x_tx_dma_disable(struct et131x_adapter *etdev)
*/
void et131x_tx_dma_enable(struct et131x_adapter *etdev)
{
DBG_ENTER(et131x_dbginfo);
if (etdev->RegistryPhyLoopbk) {
/* TxDMA is disabled for loopback operation. */
writel(0x101, &etdev->regs->txdma.csr.value);
@ -319,8 +292,6 @@ void et131x_tx_dma_enable(struct et131x_adapter *etdev)
csr.bits.cache_thrshld = PARM_DMA_CACHE_DEF;
writel(csr.value, &etdev->regs->txdma.csr.value);
}
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -333,8 +304,6 @@ void et131x_init_send(struct et131x_adapter *adapter)
uint32_t TcbCount;
TX_RING_t *tx_ring;
DBG_ENTER(et131x_dbginfo);
/* Setup some convenience pointers */
tx_ring = &adapter->TxRing;
pMpTcb = adapter->TxRing.MpTcbMem;
@ -364,8 +333,6 @@ void et131x_init_send(struct et131x_adapter *adapter)
tx_ring->CurrSendTail = (PMP_TCB) NULL;
INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -380,8 +347,6 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
int status = 0;
struct et131x_adapter *etdev = NULL;
DBG_TX_ENTER(et131x_dbginfo);
etdev = netdev_priv(netdev);
/* Send these packets
@ -397,7 +362,6 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
* packet under Linux; if we just send an error up to the
* netif layer, it will resend the skb to us.
*/
DBG_VERBOSE(et131x_dbginfo, "TCB Resources Not Available\n");
status = -ENOMEM;
} else {
/* We need to see if the link is up; if it's not, make the
@ -409,9 +373,6 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
*/
if (MP_SHOULD_FAIL_SEND(etdev) || etdev->DriverNoPhyAccess
|| !netif_carrier_ok(netdev)) {
DBG_VERBOSE(et131x_dbginfo,
"Can't Tx, Link is DOWN; drop the packet\n");
dev_kfree_skb_any(skb);
skb = NULL;
@ -426,24 +387,16 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
* send an error up to the netif layer, it
* will resend the skb to us.
*/
DBG_WARNING(et131x_dbginfo,
"Resources problem, Queue tx packet\n");
} else if (status != 0) {
/* On any other error, make netif think we're
* OK and drop the packet
*/
DBG_WARNING(et131x_dbginfo,
"General error, drop packet\n");
dev_kfree_skb_any(skb);
skb = NULL;
etdev->net_stats.tx_dropped++;
}
}
}
DBG_TX_LEAVE(et131x_dbginfo);
return status;
}
@ -464,21 +417,8 @@ static int et131x_send_packet(struct sk_buff *skb,
uint16_t *shbufva;
unsigned long flags;
DBG_TX_ENTER(et131x_dbginfo);
/* Is our buffer scattered, or continuous? */
if (skb_shinfo(skb)->nr_frags == 0) {
DBG_TX(et131x_dbginfo, "Scattered buffer: NO\n");
} else {
DBG_TX(et131x_dbginfo, "Scattered buffer: YES, Num Frags: %d\n",
skb_shinfo(skb)->nr_frags);
}
/* All packets must have at least a MAC address and a protocol type */
if (skb->len < ETH_HLEN) {
DBG_ERROR(et131x_dbginfo,
"Packet size < ETH_HLEN (14 bytes)\n");
DBG_LEAVE(et131x_dbginfo);
return -EIO;
}
@ -489,9 +429,6 @@ static int et131x_send_packet(struct sk_buff *skb,
if (pMpTcb == NULL) {
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
DBG_WARNING(et131x_dbginfo, "Can't obtain a TCB\n");
DBG_TX_LEAVE(et131x_dbginfo);
return -ENOMEM;
}
@ -533,16 +470,10 @@ static int et131x_send_packet(struct sk_buff *skb,
}
etdev->TxRing.TCBReadyQueueTail = pMpTcb;
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
DBG_TX_LEAVE(et131x_dbginfo);
return status;
}
DBG_ASSERT(etdev->TxRing.nBusySend <= NUM_TCB);
DBG_TX_LEAVE(et131x_dbginfo);
WARN_ON(etdev->TxRing.nBusySend > NUM_TCB);
return 0;
}
@ -564,8 +495,6 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
unsigned long flags;
DBG_TX_ENTER(et131x_dbginfo);
/* Part of the optimizations of this send routine restrict us to
* sending 24 fragments at a pass. In practice we should never see
* more than 5 fragments.
@ -575,7 +504,6 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
* although it is less efficient.
*/
if (FragListCount > 23) {
DBG_TX_LEAVE(et131x_dbginfo);
return -EIO;
}
@ -596,15 +524,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
* doesn't seem to like large fragments.
*/
if ((pPacket->len - pPacket->data_len) <= 1514) {
DBG_TX(et131x_dbginfo,
"Got packet of length %d, "
"filling desc entry %d, "
"TCB: 0x%p\n",
(pPacket->len - pPacket->data_len),
etdev->TxRing.txDmaReadyToSend, pMpTcb);
CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
CurDesc[FragmentNumber].word2.bits.
length_in_bytes =
pPacket->len - pPacket->data_len;
@ -624,15 +544,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
pPacket->data_len,
PCI_DMA_TODEVICE);
} else {
DBG_TX(et131x_dbginfo,
"Got packet of length %d, "
"filling desc entry %d, "
"TCB: 0x%p\n",
(pPacket->len - pPacket->data_len),
etdev->TxRing.txDmaReadyToSend, pMpTcb);
CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
CurDesc[FragmentNumber].word2.bits.
length_in_bytes =
((pPacket->len - pPacket->data_len) / 2);
@ -675,16 +587,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
PCI_DMA_TODEVICE);
}
} else {
DBG_TX(et131x_dbginfo,
"Got packet of length %d,"
"filling desc entry %d\n"
"TCB: 0x%p\n",
pFragList[loopIndex].size,
etdev->TxRing.txDmaReadyToSend,
pMpTcb);
CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
CurDesc[FragmentNumber].word2.bits.length_in_bytes =
pFragList[loopIndex - 1].size;
@ -703,10 +606,8 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
}
}
if (FragmentNumber == 0) {
DBG_WARNING(et131x_dbginfo, "No. frags is 0\n");
if (FragmentNumber == 0)
return -EIO;
}
if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
if (++etdev->TxRing.TxPacketsSinceLastinterrupt ==
@ -774,7 +675,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
etdev->TxRing.CurrSendTail = pMpTcb;
DBG_ASSERT(pMpTcb->Next == NULL);
WARN_ON(pMpTcb->Next != NULL);
etdev->TxRing.nBusySend++;
@ -791,432 +692,11 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
&etdev->regs->global.watchdog_timer);
}
spin_unlock_irqrestore(&etdev->SendHWLock, flags);
DBG_TX_LEAVE(et131x_dbginfo);
return 0;
}
/*
* NOTE: For now, keep this older version of NICSendPacket around for
* reference, even though it's not used
*/
#if 0
/**
* NICSendPacket - NIC specific send handler.
* @etdev: pointer to our adapter
* @pMpTcb: pointer to MP_TCB
*
* Returns 0 on succes, errno on failure.
*
* This version of the send routine is designed for version A silicon.
* Assumption - Send spinlock has been acquired.
*/
static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
{
uint32_t loopIndex, fragIndex, loopEnd;
uint32_t splitfirstelem = 0;
uint32_t SegmentSize = 0;
TX_DESC_ENTRY_t CurDesc;
TX_DESC_ENTRY_t *CurDescPostCopy = NULL;
uint32_t SlotsAvailable;
DMA10W_t ServiceComplete;
unsigned int flags;
struct sk_buff *pPacket = pMpTcb->Packet;
uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
DBG_TX_ENTER(et131x_dbginfo);
ServiceComplete.value =
readl(&etdev->regs->txdma.NewServiceComplete.value);
/*
* Attempt to fix TWO hardware bugs:
* 1) NEVER write an odd number of descriptors.
* 2) If packet length is less than NIC_MIN_PACKET_SIZE, then pad the
* packet to NIC_MIN_PACKET_SIZE bytes by adding a new last
* descriptor IN HALF DUPLEX MODE ONLY
* NOTE that (2) interacts with (1). If the packet is less than
* NIC_MIN_PACKET_SIZE bytes then we will append a descriptor.
* Therefore if it is even now, it will eventually end up odd, and
* so will need adjusting.
*
* VLAN tags get involved since VLAN tags add another one or two
* segments.
*/
DBG_TX(et131x_dbginfo,
"pMpTcb->PacketLength: %d\n", pMpTcb->PacketLength);
if ((etdev->duplex_mode == 0)
&& (pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE)) {
DBG_TX(et131x_dbginfo,
"HALF DUPLEX mode AND len < MIN_PKT_SIZE\n");
if ((FragListCount & 0x1) == 0) {
DBG_TX(et131x_dbginfo,
"Even number of descs, split 1st elem\n");
splitfirstelem = 1;
/* SegmentSize = pFragList[0].size / 2; */
SegmentSize = (pPacket->len - pPacket->data_len) / 2;
}
} else if (FragListCount & 0x1) {
DBG_TX(et131x_dbginfo, "Odd number of descs, split 1st elem\n");
splitfirstelem = 1;
/* SegmentSize = pFragList[0].size / 2; */
SegmentSize = (pPacket->len - pPacket->data_len) / 2;
}
spin_lock_irqsave(&etdev->SendHWLock, flags);
if (etdev->TxRing.txDmaReadyToSend.bits.serv_req_wrap ==
ServiceComplete.bits.serv_cpl_wrap) {
/* The ring hasn't wrapped. Slots available should be
* (RING_SIZE) - the difference between the two pointers.
*/
SlotsAvailable = NUM_DESC_PER_RING_TX -
(etdev->TxRing.txDmaReadyToSend.bits.serv_req -
ServiceComplete.bits.serv_cpl);
} else {
/* The ring has wrapped. Slots available should be the
* difference between the two pointers.
*/
SlotsAvailable = ServiceComplete.bits.serv_cpl -
etdev->TxRing.txDmaReadyToSend.bits.serv_req;
}
if ((FragListCount + splitfirstelem) > SlotsAvailable) {
DBG_WARNING(et131x_dbginfo,
"Not Enough Space in Tx Desc Ring\n");
spin_unlock_irqrestore(&etdev->SendHWLock, flags);
return -ENOMEM;
}
loopEnd = (FragListCount) + splitfirstelem;
fragIndex = 0;
DBG_TX(et131x_dbginfo,
"TCB : 0x%p\n"
"Packet (SKB) : 0x%p\t Packet->len: %d\t Packet->data_len: %d\n"
"FragListCount : %d\t splitfirstelem: %d\t loopEnd:%d\n",
pMpTcb,
pPacket, pPacket->len, pPacket->data_len,
FragListCount, splitfirstelem, loopEnd);
for (loopIndex = 0; loopIndex < loopEnd; loopIndex++) {
if (loopIndex > splitfirstelem)
fragIndex++;
DBG_TX(et131x_dbginfo,
"In loop, loopIndex: %d\t fragIndex: %d\n", loopIndex,
fragIndex);
/* If there is something in this element, let's get a
* descriptor from the ring and get the necessary data
*/
DBG_TX(et131x_dbginfo,
"Packet Length %d,"
"filling desc entry %d\n",
pPacket->len,
etdev->TxRing.txDmaReadyToSend.bits.serv_req);
/*
* NOTE - Should we do a paranoia check here to make sure the fragment
* actually has a length? It's HIGHLY unlikely the fragment would
* contain no data...
*/
if (1) {
/* NOTE - Currently always getting 32-bit addrs, and
* dma_addr_t is only 32-bit, so leave "high" ptr
* value out for now
* CurDesc.DataBufferPtrHigh = 0;
*/
CurDesc.word2.value = 0;
CurDesc.word3.value = 0;
if (fragIndex == 0) {
if (splitfirstelem) {
DBG_TX(et131x_dbginfo,
"Split first element: YES\n");
if (loopIndex == 0) {
DBG_TX(et131x_dbginfo,
"Got fragment of length %d, fragIndex: %d\n",
pPacket->len -
pPacket->data_len,
fragIndex);
DBG_TX(et131x_dbginfo,
"SegmentSize: %d\n",
SegmentSize);
CurDesc.word2.bits.
length_in_bytes =
SegmentSize;
CurDesc.DataBufferPtrLow =
pci_map_single(etdev->
pdev,
pPacket->
data,
SegmentSize,
PCI_DMA_TODEVICE);
DBG_TX(et131x_dbginfo,
"pci_map_single() returns: 0x%08x\n",
CurDesc.
DataBufferPtrLow);
} else {
DBG_TX(et131x_dbginfo,
"Got fragment of length %d, fragIndex: %d\n",
pPacket->len -
pPacket->data_len,
fragIndex);
DBG_TX(et131x_dbginfo,
"Leftover Size: %d\n",
(pPacket->len -
pPacket->data_len -
SegmentSize));
CurDesc.word2.bits.
length_in_bytes =
((pPacket->len -
pPacket->data_len) -
SegmentSize);
CurDesc.DataBufferPtrLow =
pci_map_single(etdev->
pdev,
(pPacket->
data +
SegmentSize),
(pPacket->
len -
pPacket->
data_len -
SegmentSize),
PCI_DMA_TODEVICE);
DBG_TX(et131x_dbginfo,
"pci_map_single() returns: 0x%08x\n",
CurDesc.
DataBufferPtrLow);
}
} else {
DBG_TX(et131x_dbginfo,
"Split first element: NO\n");
CurDesc.word2.bits.length_in_bytes =
pPacket->len - pPacket->data_len;
CurDesc.DataBufferPtrLow =
pci_map_single(etdev->pdev,
pPacket->data,
(pPacket->len -
pPacket->data_len),
PCI_DMA_TODEVICE);
DBG_TX(et131x_dbginfo,
"pci_map_single() returns: 0x%08x\n",
CurDesc.DataBufferPtrLow);
}
} else {
CurDesc.word2.bits.length_in_bytes =
pFragList[fragIndex - 1].size;
CurDesc.DataBufferPtrLow =
pci_map_page(etdev->pdev,
pFragList[fragIndex - 1].page,
pFragList[fragIndex -
1].page_offset,
pFragList[fragIndex - 1].size,
PCI_DMA_TODEVICE);
DBG_TX(et131x_dbginfo,
"pci_map_page() returns: 0x%08x\n",
CurDesc.DataBufferPtrLow);
}
if (loopIndex == 0) {
/* This is the first descriptor of the packet
*
* Set the "f" bit to indicate this is the
* first descriptor in the packet.
*/
DBG_TX(et131x_dbginfo,
"This is our FIRST descriptor\n");
CurDesc.word3.bits.f = 1;
pMpTcb->WrIndexStart =
etdev->TxRing.txDmaReadyToSend;
}
if ((loopIndex == (loopEnd - 1)) &&
(etdev->duplex_mode ||
(pMpTcb->PacketLength >= NIC_MIN_PACKET_SIZE))) {
/* This is the Last descriptor of the packet */
DBG_TX(et131x_dbginfo,
"THIS is our LAST descriptor\n");
if (etdev->linkspeed ==
TRUEPHY_SPEED_1000MBPS) {
if (++etdev->TxRing.
TxPacketsSinceLastinterrupt >=
PARM_TX_NUM_BUFS_DEF) {
CurDesc.word3.value = 0x5;
etdev->TxRing.
TxPacketsSinceLastinterrupt
= 0;
} else {
CurDesc.word3.value = 0x1;
}
} else {
CurDesc.word3.value = 0x5;
}
/* Following index will be used during freeing
* of packet
*/
pMpTcb->WrIndex =
etdev->TxRing.txDmaReadyToSend;
pMpTcb->PacketStaleCount = 0;
}
/* Copy the descriptor (filled above) into the
* descriptor ring at the next free entry. Advance
* the "next free entry" variable
*/
memcpy(etdev->TxRing.pTxDescRingVa +
etdev->TxRing.txDmaReadyToSend.bits.serv_req,
&CurDesc, sizeof(TX_DESC_ENTRY_t));
CurDescPostCopy =
etdev->TxRing.pTxDescRingVa +
etdev->TxRing.txDmaReadyToSend.bits.serv_req;
DBG_TX(et131x_dbginfo,
"CURRENT DESCRIPTOR\n"
"\tAddress : 0x%p\n"
"\tDataBufferPtrHigh : 0x%08x\n"
"\tDataBufferPtrLow : 0x%08x\n"
"\tword2 : 0x%08x\n"
"\tword3 : 0x%08x\n",
CurDescPostCopy,
CurDescPostCopy->DataBufferPtrHigh,
CurDescPostCopy->DataBufferPtrLow,
CurDescPostCopy->word2.value,
CurDescPostCopy->word3.value);
if (++etdev->TxRing.txDmaReadyToSend.bits.serv_req >=
NUM_DESC_PER_RING_TX) {
if (etdev->TxRing.txDmaReadyToSend.bits.
serv_req_wrap) {
etdev->TxRing.txDmaReadyToSend.
value = 0;
} else {
etdev->TxRing.txDmaReadyToSend.
value = 0x400;
}
}
}
}
if (etdev->duplex_mode == 0 &&
pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE) {
/* NOTE - Same 32/64-bit issue as above... */
CurDesc.DataBufferPtrHigh = 0x0;
CurDesc.DataBufferPtrLow = etdev->TxRing.pTxDummyBlkPa;
CurDesc.word2.value = 0;
if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
if (++etdev->TxRing.TxPacketsSinceLastinterrupt >=
PARM_TX_NUM_BUFS_DEF) {
CurDesc.word3.value = 0x5;
etdev->TxRing.TxPacketsSinceLastinterrupt =
0;
} else {
CurDesc.word3.value = 0x1;
}
} else {
CurDesc.word3.value = 0x5;
}
CurDesc.word2.bits.length_in_bytes =
NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength;
pMpTcb->WrIndex = etdev->TxRing.txDmaReadyToSend;
memcpy(etdev->TxRing.pTxDescRingVa +
etdev->TxRing.txDmaReadyToSend.bits.serv_req,
&CurDesc, sizeof(TX_DESC_ENTRY_t));
CurDescPostCopy =
etdev->TxRing.pTxDescRingVa +
etdev->TxRing.txDmaReadyToSend.bits.serv_req;
DBG_TX(et131x_dbginfo,
"CURRENT DESCRIPTOR\n"
"\tAddress : 0x%p\n"
"\tDataBufferPtrHigh : 0x%08x\n"
"\tDataBufferPtrLow : 0x%08x\n"
"\tword2 : 0x%08x\n"
"\tword3 : 0x%08x\n",
CurDescPostCopy,
CurDescPostCopy->DataBufferPtrHigh,
CurDescPostCopy->DataBufferPtrLow,
CurDescPostCopy->word2.value,
CurDescPostCopy->word3.value);
if (++etdev->TxRing.txDmaReadyToSend.bits.serv_req >=
NUM_DESC_PER_RING_TX) {
if (etdev->TxRing.txDmaReadyToSend.bits.
serv_req_wrap) {
etdev->TxRing.txDmaReadyToSend.value = 0;
} else {
etdev->TxRing.txDmaReadyToSend.value = 0x400;
}
}
DBG_TX(et131x_dbginfo, "Padding descriptor %d by %d bytes\n",
/* etdev->TxRing.txDmaReadyToSend.value, */
etdev->TxRing.txDmaReadyToSend.bits.serv_req,
NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength);
}
spin_lock(&etdev->TCBSendQLock);
if (etdev->TxRing.CurrSendTail)
etdev->TxRing.CurrSendTail->Next = pMpTcb;
else
etdev->TxRing.CurrSendHead = pMpTcb;
etdev->TxRing.CurrSendTail = pMpTcb;
DBG_ASSERT(pMpTcb->Next == NULL);
etdev->TxRing.nBusySend++;
spin_unlock(&etdev->TCBSendQLock);
/* Write the new write pointer back to the device. */
writel(etdev->TxRing.txDmaReadyToSend.value,
&etdev->regs->txdma.service_request.value);
#ifdef CONFIG_ET131X_DEBUG
DumpDeviceBlock(DBG_TX_ON, etdev, 1);
#endif
/* For Gig only, we use Tx Interrupt coalescing. Enable the software
* timer to wake us up if this packet isn't followed by N more.
*/
if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
&etdev->regs->global.watchdog_timer);
}
spin_unlock_irqrestore(&etdev->SendHWLock, flags);
DBG_TX_LEAVE(et131x_dbginfo);
return 0;
}
#endif
/**
* et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary
@ -1246,37 +726,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
* corresponding to this packet and umap the fragments
* they point to
*/
DBG_TX(et131x_dbginfo,
"Unmap descriptors Here\n"
"TCB : 0x%p\n"
"TCB Next : 0x%p\n"
"TCB PacketLength : %d\n"
"TCB WrIndexS.value : 0x%08x\n"
"TCB WrIndex.value : 0x%08x\n",
pMpTcb,
pMpTcb->Next,
pMpTcb->PacketLength,
pMpTcb->WrIndexStart,
pMpTcb->WrIndex);
do {
desc =
(TX_DESC_ENTRY_t *) (etdev->TxRing.pTxDescRingVa +
INDEX10(pMpTcb->WrIndexStart));
DBG_TX(et131x_dbginfo,
"CURRENT DESCRIPTOR\n"
"\tAddress : 0x%p\n"
"\tDataBufferPtrHigh : 0x%08x\n"
"\tDataBufferPtrLow : 0x%08x\n"
"\tword2 : 0x%08x\n"
"\tword3 : 0x%08x\n",
desc,
desc->DataBufferPtrHigh,
desc->DataBufferPtrLow,
desc->word2.value,
desc->word3.value);
pci_unmap_single(etdev->pdev,
desc->DataBufferPtrLow,
desc->word2.value, PCI_DMA_TODEVICE);
@ -1290,9 +744,6 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
} while (desc != (etdev->TxRing.pTxDescRingVa +
INDEX10(pMpTcb->WrIndex)));
DBG_TX(et131x_dbginfo,
"Free Packet (SKB) : 0x%p\n", pMpTcb->Packet);
dev_kfree_skb_any(pMpTcb->Packet);
}
@ -1313,8 +764,7 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
etdev->TxRing.TCBReadyQueueTail = pMpTcb;
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
DBG_ASSERT(etdev->TxRing.nBusySend >= 0);
WARN_ON(etdev->TxRing.nBusySend < 0);
}
/**
@ -1330,8 +780,6 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
unsigned long flags;
uint32_t FreeCounter = 0;
DBG_ENTER(et131x_dbginfo);
while (!list_empty(&etdev->TxRing.SendWaitQueue)) {
spin_lock_irqsave(&etdev->SendWaitLock, flags);
@ -1360,8 +808,6 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
DBG_VERBOSE(et131x_dbginfo, "pMpTcb = 0x%p\n", pMpTcb);
FreeCounter++;
et131x_free_send_packet(etdev, pMpTcb);
@ -1370,17 +816,11 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
pMpTcb = etdev->TxRing.CurrSendHead;
}
if (FreeCounter == NUM_TCB) {
DBG_ERROR(et131x_dbginfo,
"MpFreeBusySendPackets exited loop for a bad reason\n");
BUG();
}
WARN_ON(FreeCounter == NUM_TCB);
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
etdev->TxRing.nBusySend = 0;
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -1394,8 +834,6 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
*/
void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
{
DBG_TX_ENTER(et131x_dbginfo);
/* Mark as completed any packets which have been sent by the device. */
et131x_update_tcb_list(etdev);
@ -1403,8 +841,6 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
* dequeue and send those packets now, as long as we have free TCBs.
*/
et131x_check_send_wait_list(etdev);
DBG_TX_LEAVE(et131x_dbginfo);
}
/**
@ -1487,15 +923,9 @@ static void et131x_check_send_wait_list(struct et131x_adapter *etdev)
MP_TCB_RESOURCES_AVAILABLE(etdev)) {
struct list_head *entry;
DBG_VERBOSE(et131x_dbginfo, "Tx packets on the wait queue\n");
entry = etdev->TxRing.SendWaitQueue.next;
etdev->TxRing.nWaitSend--;
DBG_WARNING(et131x_dbginfo,
"MpHandleSendInterrupt - sent a queued pkt. Waiting %d\n",
etdev->TxRing.nWaitSend);
}
spin_unlock_irqrestore(&etdev->SendWaitLock, flags);

View File

@ -176,13 +176,6 @@ typedef struct _ce_stats_t {
u32 code_violations;
u32 other_errors;
#ifdef CONFIG_ET131X_DEBUG
u32 UnhandledInterruptsPerSec;
u32 RxDmaInterruptsPerSec;
u32 TxDmaInterruptsPerSec;
u32 WatchDogInterruptsPerSec;
#endif /* CONFIG_ET131X_DEBUG */
u32 SynchrounousIterations;
u32 InterruptStatus;
} CE_STATS_t, *PCE_STATS_t;

View File

@ -1,208 +0,0 @@
/*
* Agere Systems Inc.
* 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
*
* Copyright © 2005 Agere Systems Inc.
* All rights reserved.
* http://www.agere.com
*
*------------------------------------------------------------------------------
*
* et131x_debug.c - Routines used for debugging.
*
*------------------------------------------------------------------------------
*
* SOFTWARE LICENSE
*
* This software is provided subject to the following terms and conditions,
* which you should read carefully before using the software. Using this
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
* Copyright © 2005 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
* modifications, are permitted provided that the following conditions are met:
*
* . Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following Disclaimer as comments in the code as
* well as in the documentation and/or other materials provided with the
* distribution.
*
* . Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following Disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* . Neither the name of Agere Systems Inc. nor the names of the contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* Disclaimer
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
* RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*/
#ifdef CONFIG_ET131X_DEBUG
#include "et131x_version.h"
#include "et131x_debug.h"
#include "et131x_defs.h"
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <asm/system.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ioport.h>
#include <linux/random.h>
#include "et1310_phy.h"
#include "et1310_pm.h"
#include "et1310_jagcore.h"
#include "et131x_adapter.h"
#include "et131x_netdev.h"
#include "et131x_config.h"
#include "et131x_isr.h"
#include "et1310_address_map.h"
#include "et1310_tx.h"
#include "et1310_rx.h"
#include "et1310_mac.h"
/* Data for debugging facilities */
extern dbg_info_t *et131x_dbginfo;
/**
* DumpTxQueueContents - Dump out the tx queue and the shadow pointers
* @etdev: pointer to our adapter structure
*/
void DumpTxQueueContents(int debug, struct et131x_adapter *etdev)
{
MMC_t __iomem *mmc = &etdev->regs->mmc;
u32 txq_addr;
if (DBG_FLAGS(et131x_dbginfo) & debug) {
for (txq_addr = 0x200; txq_addr < 0x3ff; txq_addr++) {
u32 sram_access = readl(&mmc->sram_access);
sram_access &= 0xFFFF;
sram_access |= (txq_addr << 16) | ET_SRAM_REQ_ACCESS;
writel(sram_access, &mmc->sram_access);
DBG_PRINT("Addr 0x%x, Access 0x%08x\t"
"Value 1 0x%08x, Value 2 0x%08x, "
"Value 3 0x%08x, Value 4 0x%08x, \n",
txq_addr,
readl(&mmc->sram_access),
readl(&mmc->sram_word1),
readl(&mmc->sram_word2),
readl(&mmc->sram_word3),
readl(&mmc->sram_word4));
}
DBG_PRINT("Shadow Pointers 0x%08x\n",
readl(&etdev->regs->txmac.shadow_ptr.value));
}
}
#define NUM_BLOCKS 8
static const char *BlockNames[NUM_BLOCKS] = {
"Global", "Tx DMA", "Rx DMA", "Tx MAC",
"Rx MAC", "MAC", "MAC Stat", "MMC"
};
/**
* DumpDeviceBlock
* @etdev: pointer to our adapter
*
* Dumps the first 64 regs of each block of the et-1310 (each block is
* mapped to a new page, each page is 4096 bytes).
*/
void DumpDeviceBlock(int debug, struct et131x_adapter *etdev,
u32 block)
{
u32 addr1, addr2;
u32 __iomem *regs = (u32 __iomem *) etdev->regs;
/* Output the debug counters to the debug terminal */
if (DBG_FLAGS(et131x_dbginfo) & debug) {
DBG_PRINT("%s block\n", BlockNames[block]);
regs += block * 1024;
for (addr1 = 0; addr1 < 8; addr1++) {
for (addr2 = 0; addr2 < 8; addr2++) {
if (block == 0 &&
(addr1 * 8 + addr2) == 6)
DBG_PRINT(" ISR , ");
else
DBG_PRINT("0x%08x, ", readl(regs++));
}
DBG_PRINT("\n");
}
DBG_PRINT("\n");
}
}
/**
* DumpDeviceReg
* @etdev: pointer to our adapter
*
* Dumps the first 64 regs of each block of the et-1310 (each block is
* mapped to a new page, each page is 4096 bytes).
*/
void DumpDeviceReg(int debug, struct et131x_adapter *etdev)
{
u32 addr1, addr2;
u32 block;
u32 __iomem *regs = (u32 __iomem *)etdev->regs;
u32 __iomem *p;
/* Output the debug counters to the debug terminal */
if (DBG_FLAGS(et131x_dbginfo) & debug) {
for (block = 0; block < NUM_BLOCKS; block++) {
DBG_PRINT("%s block\n", BlockNames[block]);
p = regs + block * 1024;
for (addr1 = 0; addr1 < 8; addr1++) {
for (addr2 = 0; addr2 < 8; addr2++)
DBG_PRINT("0x%08x, ", readl(p++));
DBG_PRINT("\n");
}
DBG_PRINT("\n");
}
}
}
#endif /* CONFIG_ET131X_DEBUG */

View File

@ -1,255 +0,0 @@
/*
* Agere Systems Inc.
* 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
*
* Copyright © 2005 Agere Systems Inc.
* All rights reserved.
* http://www.agere.com
*
*------------------------------------------------------------------------------
*
* et131x_debug.h - Defines, structs, enums, prototypes, etc. used for
* outputting debug messages to the system logging facility
* (ksyslogd)
*
*------------------------------------------------------------------------------
*
* SOFTWARE LICENSE
*
* This software is provided subject to the following terms and conditions,
* which you should read carefully before using the software. Using this
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
* Copyright © 2005 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
* modifications, are permitted provided that the following conditions are met:
*
* . Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following Disclaimer as comments in the code as
* well as in the documentation and/or other materials provided with the
* distribution.
*
* . Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following Disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* . Neither the name of Agere Systems Inc. nor the names of the contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* Disclaimer
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
* RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*/
#ifndef __ET131X_DBG_H__
#define __ET131X_DBG_H__
/* Define Masks for debugging types/levels */
#define DBG_ERROR_ON 0x00000001L
#define DBG_WARNING_ON 0x00000002L
#define DBG_NOTICE_ON 0x00000004L
#define DBG_TRACE_ON 0x00000008L
#define DBG_VERBOSE_ON 0x00000010L
#define DBG_PARAM_ON 0x00000020L
#define DBG_BREAK_ON 0x00000040L
#define DBG_RX_ON 0x00000100L
#define DBG_TX_ON 0x00000200L
#ifdef CONFIG_ET131X_DEBUG
/*
* Set the level of debugging if not done with a preprocessor define. See
* et131x_main.c, function et131x_init_module() for how the debug level
* translates into the types of messages displayed.
*/
#ifndef DBG_LVL
#define DBG_LVL 3
#endif /* DBG_LVL */
#define DBG_DEFAULTS (DBG_ERROR_ON | DBG_WARNING_ON | DBG_BREAK_ON)
#define DBG_FLAGS(A) ((A)->dbgFlags)
#define DBG_NAME(A) ((A)->dbgName)
#define DBG_LEVEL(A) ((A)->dbgLevel)
#ifndef DBG_PRINT
#define DBG_PRINT(S...) printk(KERN_DEBUG S)
#endif /* DBG_PRINT */
#ifndef DBG_PRINTC
#define DBG_PRINTC(S...) printk(S)
#endif /* DBG_PRINTC */
#ifndef DBG_TRAP
#define DBG_TRAP do {} while (0) /* BUG() */
#endif /* DBG_TRAP */
#define _ENTER_STR ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
#define _LEAVE_STR "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
#define _DBG_ENTER(A) printk(KERN_DEBUG "%s:%.*s:%s\n", DBG_NAME(A), \
++DBG_LEVEL(A), _ENTER_STR, __func__)
#define _DBG_LEAVE(A) printk(KERN_DEBUG "%s:%.*s:%s\n", DBG_NAME(A), \
DBG_LEVEL(A)--, _LEAVE_STR, __func__)
#define DBG_ENTER(A) \
do { \
if (DBG_FLAGS(A) & DBG_TRACE_ON) \
_DBG_ENTER(A); \
} while (0)
#define DBG_LEAVE(A) \
do { \
if (DBG_FLAGS(A) & DBG_TRACE_ON) \
_DBG_LEAVE(A); \
} while (0)
#define DBG_PARAM(A, N, F, S...) \
do { \
if (DBG_FLAGS(A) & DBG_PARAM_ON) \
DBG_PRINT(" %s -- "F" ", N, S); \
} while (0)
#define DBG_ERROR(A, S...) \
do { \
if (DBG_FLAGS(A) & DBG_ERROR_ON) { \
DBG_PRINT("%s:ERROR:%s ", DBG_NAME(A), __func__);\
DBG_PRINTC(S); \
DBG_TRAP; \
} \
} while (0)
#define DBG_WARNING(A, S...) \
do { \
if (DBG_FLAGS(A) & DBG_WARNING_ON) { \
DBG_PRINT("%s:WARNING:%s ", DBG_NAME(A), __func__); \
DBG_PRINTC(S); \
} \
} while (0)
#define DBG_NOTICE(A, S...) \
do { \
if (DBG_FLAGS(A) & DBG_NOTICE_ON) { \
DBG_PRINT("%s:NOTICE:%s ", DBG_NAME(A), __func__); \
DBG_PRINTC(S); \
} \
} while (0)
#define DBG_TRACE(A, S...) \
do { \
if (DBG_FLAGS(A) & DBG_TRACE_ON) { \
DBG_PRINT("%s:TRACE:%s ", DBG_NAME(A), __func__); \
DBG_PRINTC(S); \
} \
} while (0)
#define DBG_VERBOSE(A, S...) \
do { \
if (DBG_FLAGS(A) & DBG_VERBOSE_ON) { \
DBG_PRINT("%s:VERBOSE:%s ", DBG_NAME(A), __func__); \
DBG_PRINTC(S); \
} \
} while (0)
#define DBG_RX(A, S...) \
do { \
if (DBG_FLAGS(A) & DBG_RX_ON) \
DBG_PRINT(S); \
} while (0)
#define DBG_RX_ENTER(A) \
do { \
if (DBG_FLAGS(A) & DBG_RX_ON) \
_DBG_ENTER(A); \
} while (0)
#define DBG_RX_LEAVE(A) \
do { \
if (DBG_FLAGS(A) & DBG_RX_ON) \
_DBG_LEAVE(A); \
} while (0)
#define DBG_TX(A, S...) \
do { \
if (DBG_FLAGS(A) & DBG_TX_ON) \
DBG_PRINT(S); \
} while (0)
#define DBG_TX_ENTER(A) \
do { \
if (DBG_FLAGS(A) & DBG_TX_ON) \
_DBG_ENTER(A); \
} while (0)
#define DBG_TX_LEAVE(A) \
do { \
if (DBG_FLAGS(A) & DBG_TX_ON) \
_DBG_LEAVE(A); \
} while (0)
#define DBG_ASSERT(C) \
do { \
if (!(C)) { \
DBG_PRINT("ASSERT(%s) -- %s#%d (%s) ", \
#C, __FILE__, __LINE__, __func__); \
DBG_TRAP; \
} \
} while (0)
#define STATIC
typedef struct {
char *dbgName;
int dbgLevel;
unsigned long dbgFlags;
} dbg_info_t;
#else /* CONFIG_ET131X_DEBUG */
#define DBG_DEFN
#define DBG_TRAP
#define DBG_PRINT(S...)
#define DBG_ENTER(A)
#define DBG_LEAVE(A)
#define DBG_PARAM(A, N, F, S...)
#define DBG_ERROR(A, S...)
#define DBG_WARNING(A, S...)
#define DBG_NOTICE(A, S...)
#define DBG_TRACE(A, S...)
#define DBG_VERBOSE(A, S...)
#define DBG_RX(A, S...)
#define DBG_RX_ENTER(A)
#define DBG_RX_LEAVE(A)
#define DBG_TX(A, S...)
#define DBG_TX_ENTER(A)
#define DBG_TX_LEAVE(A)
#define DBG_ASSERT(C)
#define STATIC static
#endif /* CONFIG_ET131X_DEBUG */
/* Forward declaration of the private adapter structure */
struct et131x_adapter;
void DumpTxQueueContents(int dbgLvl, struct et131x_adapter *adapter);
void DumpDeviceBlock(int dbgLvl, struct et131x_adapter *adapter,
unsigned int Block);
void DumpDeviceReg(int dbgLvl, struct et131x_adapter *adapter);
#endif /* __ET131X_DBG_H__ */

View File

@ -58,7 +58,6 @@
*/
#include "et131x_version.h"
#include "et131x_debug.h"
#include "et131x_defs.h"
#include <linux/pci.h>
@ -113,33 +112,6 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_INFO);
MODULE_LICENSE(DRIVER_LICENSE);
/* Module Parameters and related data for debugging facilities */
#ifdef CONFIG_ET131X_DEBUG
static u32 et131x_debug_level = DBG_LVL;
static u32 et131x_debug_flags = DBG_DEFAULTS;
/*
et131x_debug_level :
Level of debugging desired (0-7)
7 : DBG_RX_ON | DBG_TX_ON
6 : DBG_PARAM_ON
5 : DBG_VERBOSE_ON
4 : DBG_TRACE_ON
3 : DBG_NOTICE_ON
2 : no debug info
1 : no debug info
0 : no debug info
*/
module_param(et131x_debug_level, uint, 0);
module_param(et131x_debug_flags, uint, 0);
MODULE_PARM_DESC(et131x_debug_level, "Level of debugging desired (0-7)");
static dbg_info_t et131x_info = { DRIVER_NAME_EXT, 0, 0 };
dbg_info_t *et131x_dbginfo = &et131x_info;
#endif /* CONFIG_ET131X_DEBUG */
/* Defines for Parameter Default/Min/Max vaules */
#define PARM_SPEED_DUPLEX_MIN 0
#define PARM_SPEED_DUPLEX_MAX 5
@ -196,71 +168,29 @@ static struct pci_driver et131x_driver = {
*
* Returns 0 on success, errno on failure (as defined in errno.h)
*/
int et131x_init_module(void)
static int et131x_init_module(void)
{
int result;
#ifdef CONFIG_ET131X_DEBUG
/* Set the level of debug messages displayed using the module
* parameter
*/
et131x_dbginfo->dbgFlags = et131x_debug_flags;
switch (et131x_debug_level) {
case 7:
et131x_dbginfo->dbgFlags |= (DBG_RX_ON | DBG_TX_ON);
case 6:
et131x_dbginfo->dbgFlags |= DBG_PARAM_ON;
case 5:
et131x_dbginfo->dbgFlags |= DBG_VERBOSE_ON;
case 4:
et131x_dbginfo->dbgFlags |= DBG_TRACE_ON;
case 3:
et131x_dbginfo->dbgFlags |= DBG_NOTICE_ON;
case 2:
case 1:
case 0:
default:
break;
}
#endif /* CONFIG_ET131X_DEBUG */
DBG_ENTER(et131x_dbginfo);
DBG_PRINT("%s\n", DRIVER_INFO);
if (et131x_speed_set < PARM_SPEED_DUPLEX_MIN ||
et131x_speed_set > PARM_SPEED_DUPLEX_MAX) {
printk(KERN_WARNING "et131x: invalid speed setting ignored.\n");
printk(KERN_WARNING "et131x: invalid speed setting ignored.\n");
et131x_speed_set = 0;
}
result = pci_register_driver(&et131x_driver);
DBG_LEAVE(et131x_dbginfo);
return result;
return pci_register_driver(&et131x_driver);
}
/**
* et131x_cleanup_module - The entry point called on driver cleanup
*/
void et131x_cleanup_module(void)
static void et131x_cleanup_module(void)
{
DBG_ENTER(et131x_dbginfo);
pci_unregister_driver(&et131x_driver);
DBG_LEAVE(et131x_dbginfo);
}
/*
* These macros map the driver-specific init_module() and cleanup_module()
* routines so they can be called by the kernel.
*/
module_init(et131x_init_module);
module_exit(et131x_cleanup_module);
@ -279,8 +209,6 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev)
uint8_t read_size_reg;
u8 rev;
DBG_ENTER(et131x_dbginfo);
/* Allow disabling of Non-Maskable Interrupts in I/O space, to
* support validation.
*/
@ -311,9 +239,8 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev)
result = pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS,
&eepromStat);
if (result != PCIBIOS_SUCCESSFUL) {
DBG_ERROR(et131x_dbginfo, "Could not read PCI config space for "
dev_err(&pdev->dev, "Could not read PCI config space for "
"EEPROM Status\n");
DBG_LEAVE(et131x_dbginfo);
return -EIO;
}
@ -323,10 +250,9 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev)
if (eepromStat & 0x4C) {
result = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
if (result != PCIBIOS_SUCCESSFUL) {
DBG_ERROR(et131x_dbginfo,
dev_err(&pdev->dev,
"Could not read PCI config space for "
"Revision ID\n");
DBG_LEAVE(et131x_dbginfo);
return -EIO;
} else if (rev == 0x01) {
int32_t nLoop;
@ -341,8 +267,7 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev)
}
}
DBG_ERROR(et131x_dbginfo,
"Fatal EEPROM Status Error - 0x%04x\n", eepromStat);
dev_err(&pdev->dev, "Fatal EEPROM Status Error - 0x%04x\n", eepromStat);
/* This error could mean that there was an error reading the
* eeprom or that the eeprom doesn't exist. We will treat
@ -351,14 +276,9 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev)
* MAC Address
*/
adapter->has_eeprom = 0;
DBG_LEAVE(et131x_dbginfo);
return -EIO;
} else {
DBG_TRACE(et131x_dbginfo, "EEPROM Status Code - 0x%04x\n",
eepromStat);
} else
adapter->has_eeprom = 1;
}
/* Read the EEPROM for information regarding LED behavior. Refer to
* ET1310_phy.c, et131x_xcvr_init(), for its use.
@ -375,9 +295,8 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev)
*/
result = pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &maxPayload);
if (result != PCIBIOS_SUCCESSFUL) {
DBG_ERROR(et131x_dbginfo, "Could not read PCI config space for "
"Max Payload Size\n");
DBG_LEAVE(et131x_dbginfo);
dev_err(&pdev->dev,
"Could not read PCI config space for Max Payload Size\n");
return -EIO;
}
@ -391,20 +310,16 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev)
result = pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
AckNak[maxPayload]);
if (result != PCIBIOS_SUCCESSFUL) {
DBG_ERROR(et131x_dbginfo,
"Could not write PCI config space "
"for ACK/NAK\n");
DBG_LEAVE(et131x_dbginfo);
dev_err(&pdev->dev,
"Could not write PCI config space for ACK/NAK\n");
return -EIO;
}
result = pci_write_config_word(pdev, ET1310_PCI_REPLAY,
Replay[maxPayload]);
if (result != PCIBIOS_SUCCESSFUL) {
DBG_ERROR(et131x_dbginfo,
"Could not write PCI config space "
"for Replay Timer\n");
DBG_LEAVE(et131x_dbginfo);
dev_err(&pdev->dev,
"Could not write PCI config space for Replay Timer\n");
return -EIO;
}
}
@ -414,19 +329,16 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev)
*/
result = pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11);
if (result != PCIBIOS_SUCCESSFUL) {
DBG_ERROR(et131x_dbginfo,
"Could not write PCI config space for "
"Latency Timers\n");
DBG_LEAVE(et131x_dbginfo);
dev_err(&pdev->dev,
"Could not write PCI config space for Latency Timers\n");
return -EIO;
}
/* Change the max read size to 2k */
result = pci_read_config_byte(pdev, 0x51, &read_size_reg);
if (result != PCIBIOS_SUCCESSFUL) {
DBG_ERROR(et131x_dbginfo,
dev_err(&pdev->dev,
"Could not read PCI config space for Max read size\n");
DBG_LEAVE(et131x_dbginfo);
return -EIO;
}
@ -435,9 +347,8 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev)
result = pci_write_config_byte(pdev, 0x51, read_size_reg);
if (result != PCIBIOS_SUCCESSFUL) {
DBG_ERROR(et131x_dbginfo,
dev_err(&pdev->dev,
"Could not write PCI config space for Max read size\n");
DBG_LEAVE(et131x_dbginfo);
return -EIO;
}
@ -452,15 +363,11 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev)
pdev, ET1310_PCI_MAC_ADDRESS + i,
adapter->PermanentAddress + i);
if (result != PCIBIOS_SUCCESSFUL) {
DBG_ERROR(et131x_dbginfo,
"Could not read PCI config space for MAC address\n");
DBG_LEAVE(et131x_dbginfo);
dev_err(&pdev->dev, ";Could not read PCI config space for MAC address\n");
return -EIO;
}
}
}
DBG_LEAVE(et131x_dbginfo);
return 0;
}
@ -481,9 +388,8 @@ void et131x_error_timer_handler(unsigned long data)
if ((pm_csr & ET_PM_PHY_SW_COMA) == 0)
UpdateMacStatHostCounters(etdev);
else
DBG_VERBOSE(et131x_dbginfo,
"No interrupts, in PHY coma, pm_csr = 0x%x\n",
pm_csr);
dev_err(&etdev->pdev->dev,
"No interrupts, in PHY coma, pm_csr = 0x%x\n", pm_csr);
if (!etdev->Bmsr.bits.link_status &&
etdev->RegistryPhyComa &&
@ -541,8 +447,6 @@ void ConfigGlobalRegs(struct et131x_adapter *etdev)
{
struct _GLOBAL_t __iomem *regs = &etdev->regs->global;
DBG_ENTER(et131x_dbginfo);
if (etdev->RegistryPhyLoopbk == false) {
if (etdev->RegistryJumboPacket < 2048) {
/* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
@ -596,8 +500,6 @@ void ConfigGlobalRegs(struct et131x_adapter *etdev)
* a packet is queued.
*/
writel(0, &regs->watchdog_timer);
DBG_LEAVE(et131x_dbginfo);
}
@ -611,8 +513,6 @@ int et131x_adapter_setup(struct et131x_adapter *etdev)
{
int status = 0;
DBG_ENTER(et131x_dbginfo);
/* Configure the JAGCore */
ConfigGlobalRegs(etdev);
@ -634,7 +534,7 @@ int et131x_adapter_setup(struct et131x_adapter *etdev)
status = et131x_xcvr_find(etdev);
if (status != 0)
DBG_WARNING(et131x_dbginfo, "Could not find the xcvr\n");
dev_warn(&etdev->pdev->dev, "Could not find the xcvr\n");
/* Prepare the TRUEPHY library. */
ET1310_PhyInit(etdev);
@ -658,9 +558,7 @@ int et131x_adapter_setup(struct et131x_adapter *etdev)
ET1310_PhyPowerDown(etdev, 0);
et131x_setphy_normal(etdev);
DBG_LEAVE(et131x_dbginfo);
return status;
; return status;
}
/**
@ -669,8 +567,6 @@ int et131x_adapter_setup(struct et131x_adapter *etdev)
*/
void et131x_setup_hardware_properties(struct et131x_adapter *adapter)
{
DBG_ENTER(et131x_dbginfo);
/* If have our default mac from registry and no mac address from
* EEPROM then we need to generate the last octet and set it on the
* device
@ -702,8 +598,6 @@ void et131x_setup_hardware_properties(struct et131x_adapter *adapter)
memcpy(adapter->CurrentAddress,
adapter->PermanentAddress, ETH_ALEN);
}
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -712,8 +606,6 @@ void et131x_setup_hardware_properties(struct et131x_adapter *adapter)
*/
void et131x_soft_reset(struct et131x_adapter *adapter)
{
DBG_ENTER(et131x_dbginfo);
/* Disable MAC Core */
writel(0xc00f0000, &adapter->regs->mac.cfg1.value);
@ -721,8 +613,6 @@ void et131x_soft_reset(struct et131x_adapter *adapter)
writel(0x7F, &adapter->regs->global.sw_reset);
writel(0x000f0000, &adapter->regs->mac.cfg1.value);
writel(0x00000000, &adapter->regs->mac.cfg1.value);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -738,8 +628,6 @@ void et131x_align_allocated_memory(struct et131x_adapter *adapter,
{
uint64_t new_addr;
DBG_ENTER(et131x_dbginfo);
*offset = 0;
new_addr = *phys_addr & ~mask;
@ -752,8 +640,6 @@ void et131x_align_allocated_memory(struct et131x_adapter *adapter,
/* Return new physical address */
*phys_addr = new_addr;
}
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -768,13 +654,11 @@ int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
{
int status = 0;
DBG_ENTER(et131x_dbginfo);
do {
/* Allocate memory for the Tx Ring */
status = et131x_tx_dma_memory_alloc(adapter);
if (status != 0) {
DBG_ERROR(et131x_dbginfo,
dev_err(&adapter->pdev->dev,
"et131x_tx_dma_memory_alloc FAILED\n");
break;
}
@ -782,7 +666,7 @@ int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
/* Receive buffer memory allocation */
status = et131x_rx_dma_memory_alloc(adapter);
if (status != 0) {
DBG_ERROR(et131x_dbginfo,
dev_err(&adapter->pdev->dev,
"et131x_rx_dma_memory_alloc FAILED\n");
et131x_tx_dma_memory_free(adapter);
break;
@ -791,14 +675,13 @@ int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
/* Init receive data structures */
status = et131x_init_recv(adapter);
if (status != 0) {
DBG_ERROR(et131x_dbginfo, "et131x_init_recv FAILED\n");
dev_err(&adapter->pdev->dev,
"et131x_init_recv FAILED\n");
et131x_tx_dma_memory_free(adapter);
et131x_rx_dma_memory_free(adapter);
break;
}
} while (0);
DBG_LEAVE(et131x_dbginfo);
return status;
}
@ -808,13 +691,9 @@ int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
*/
void et131x_adapter_memory_free(struct et131x_adapter *adapter)
{
DBG_ENTER(et131x_dbginfo);
/* Free DMA memory */
et131x_tx_dma_memory_free(adapter);
et131x_rx_dma_memory_free(adapter);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -830,8 +709,6 @@ void __devexit et131x_pci_remove(struct pci_dev *pdev)
struct net_device *netdev;
struct et131x_adapter *adapter;
DBG_ENTER(et131x_dbginfo);
/* Retrieve the net_device pointer from the pci_dev struct, as well
* as the private adapter struct
*/
@ -846,8 +723,6 @@ void __devexit et131x_pci_remove(struct pci_dev *pdev)
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -866,11 +741,9 @@ void et131x_config_parse(struct et131x_adapter *etdev)
static const u8 duplex[] = { 0, 1, 2, 1, 2, 2 };
static const u16 speed[] = { 0, 10, 10, 100, 100, 1000 };
DBG_ENTER(et131x_dbginfo);
if (et131x_speed_set)
DBG_VERBOSE(et131x_dbginfo, "Speed set manually to : %d \n",
et131x_speed_set);
dev_info(&etdev->pdev->dev,
"Speed set manually to : %d \n", et131x_speed_set);
etdev->SpeedDuplex = et131x_speed_set;
etdev->RegistryJumboPacket = 1514; /* 1514-9216 */
@ -894,8 +767,6 @@ void et131x_config_parse(struct et131x_adapter *etdev)
etdev->AiForceSpeed = speed[etdev->SpeedDuplex];
etdev->AiForceDpx = duplex[etdev->SpeedDuplex]; /* Auto FDX */
DBG_LEAVE(et131x_dbginfo);
}
@ -920,18 +791,17 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev,
struct net_device *netdev = NULL;
struct et131x_adapter *adapter = NULL;
DBG_ENTER(et131x_dbginfo);
/* Enable the device via the PCI subsystem */
result = pci_enable_device(pdev);
if (result != 0) {
DBG_ERROR(et131x_dbginfo, "pci_enable_device() failed\n");
dev_err(&adapter->pdev->dev,
"pci_enable_device() failed\n");
goto out;
}
/* Perform some basic PCI checks */
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
DBG_ERROR(et131x_dbginfo,
dev_err(&adapter->pdev->dev,
"Can't find PCI device's base address\n");
result = -ENODEV;
goto out;
@ -939,12 +809,12 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev,
result = pci_request_regions(pdev, DRIVER_NAME);
if (result != 0) {
DBG_ERROR(et131x_dbginfo, "Can't get PCI resources\n");
dev_err(&adapter->pdev->dev,
"Can't get PCI resources\n");
goto err_disable;
}
/* Enable PCI bus mastering */
DBG_TRACE(et131x_dbginfo, "Setting PCI Bus Mastering...\n");
pci_set_master(pdev);
/* Query PCI for Power Mgmt Capabilities
@ -954,7 +824,7 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev,
*/
pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pm_cap == 0) {
DBG_ERROR(et131x_dbginfo,
dev_err(&adapter->pdev->dev,
"Cannot find Power Management capabilities\n");
result = -EIO;
goto err_release_res;
@ -962,40 +832,34 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev,
/* Check the DMA addressing support of this device */
if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
DBG_TRACE(et131x_dbginfo, "64-bit DMA addressing supported\n");
pci_using_dac = true;
result =
pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
if (result != 0) {
DBG_ERROR(et131x_dbginfo,
dev_err(&pdev->dev,
"Unable to obtain 64 bit DMA for consistent allocations\n");
goto err_release_res;
}
} else if (!pci_set_dma_mask(pdev, 0xffffffffULL)) {
DBG_TRACE(et131x_dbginfo,
"64-bit DMA addressing NOT supported\n");
DBG_TRACE(et131x_dbginfo,
"32-bit DMA addressing will be used\n");
pci_using_dac = false;
} else {
DBG_ERROR(et131x_dbginfo, "No usable DMA addressing method\n");
dev_err(&adapter->pdev->dev,
"No usable DMA addressing method\n");
result = -EIO;
goto err_release_res;
}
/* Allocate netdev and private adapter structs */
DBG_TRACE(et131x_dbginfo,
"Allocate netdev and private adapter structs...\n");
netdev = et131x_device_alloc();
if (netdev == NULL) {
DBG_ERROR(et131x_dbginfo, "Couldn't alloc netdev struct\n");
dev_err(&adapter->pdev->dev,
"Couldn't alloc netdev struct\n");
result = -ENOMEM;
goto err_release_res;
}
/* Setup the fundamental net_device and private adapter structure elements */
DBG_TRACE(et131x_dbginfo, "Setting fundamental net_device info...\n");
SET_NETDEV_DEV(netdev, &pdev->dev);
/*
if (pci_using_dac) {
@ -1036,8 +900,6 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev,
netdev->base_addr = pdev->resource[0].start;
/* Initialize spinlocks here */
DBG_TRACE(et131x_dbginfo, "Initialize spinlocks...\n");
spin_lock_init(&adapter->Lock);
spin_lock_init(&adapter->TCBSendQLock);
spin_lock_init(&adapter->TCBReadyQLock);
@ -1061,13 +923,11 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev,
et131x_find_adapter(adapter, pdev);
/* Map the bus-relative registers to system virtual memory */
DBG_TRACE(et131x_dbginfo,
"Mapping bus-relative registers to virtual memory...\n");
adapter->regs = ioremap_nocache(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (adapter->regs == NULL) {
DBG_ERROR(et131x_dbginfo, "Cannot map device registers\n");
dev_err(&pdev->dev, "Cannot map device registers\n");
result = -ENOMEM;
goto err_free_dev;
}
@ -1078,23 +938,19 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev,
writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
/* Issue a global reset to the et1310 */
DBG_TRACE(et131x_dbginfo, "Issuing soft reset...\n");
et131x_soft_reset(adapter);
/* Disable all interrupts (paranoid) */
DBG_TRACE(et131x_dbginfo, "Disable device interrupts...\n");
et131x_disable_interrupts(adapter);
/* Allocate DMA memory */
result = et131x_adapter_memory_alloc(adapter);
if (result != 0) {
DBG_ERROR(et131x_dbginfo,
"Could not alloc adapater memory (DMA)\n");
dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
goto err_iounmap;
}
/* Init send data structures */
DBG_TRACE(et131x_dbginfo, "Init send data structures...\n");
et131x_init_send(adapter);
/* Register the interrupt
@ -1109,13 +965,11 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev,
INIT_WORK(&adapter->task, et131x_isr_handler);
/* Determine MAC Address, and copy into the net_device struct */
DBG_TRACE(et131x_dbginfo, "Retrieve MAC address...\n");
et131x_setup_hardware_properties(adapter);
memcpy(netdev->dev_addr, adapter->CurrentAddress, ETH_ALEN);
/* Setup et1310 as per the documentation */
DBG_TRACE(et131x_dbginfo, "Setup the adapter...\n");
et131x_adapter_setup(adapter);
/* Create a timer to count errors received by the NIC */
@ -1140,10 +994,9 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev,
*/
/* Register the net_device struct with the Linux network layer */
DBG_TRACE(et131x_dbginfo, "Registering net_device...\n");
result = register_netdev(netdev);
if (result != 0) {
DBG_ERROR(et131x_dbginfo, "register_netdev() failed\n");
dev_err(&pdev->dev, "register_netdev() failed\n");
goto err_mem_free;
}
@ -1156,7 +1009,6 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev,
pci_save_state(adapter->pdev);
out:
DBG_LEAVE(et131x_dbginfo);
return result;
err_mem_free:

View File

@ -57,7 +57,6 @@
*/
#include "et131x_version.h"
#include "et131x_debug.h"
#include "et131x_defs.h"
#include <linux/init.h>
@ -76,6 +75,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/pci.h>
#include <asm/system.h>
#include <linux/netdevice.h>
@ -91,11 +91,6 @@
#include "et131x_adapter.h"
/* Data for debugging facilities */
#ifdef CONFIG_ET131X_DEBUG
extern dbg_info_t *et131x_dbginfo;
#endif /* CONFIG_ET131X_DEBUG */
/**
* et131x_enable_interrupts - enable interrupt
* @adapter: et131x device
@ -151,9 +146,7 @@ irqreturn_t et131x_isr(int irq, void *dev_id)
struct et131x_adapter *adapter = NULL;
u32 status;
if (netdev == NULL || !netif_device_present(netdev)) {
DBG_WARNING(et131x_dbginfo,
"No net_device struct or device not present\n");
if (!netif_device_present(netdev)) {
handled = false;
goto out;
}
@ -181,23 +174,12 @@ irqreturn_t et131x_isr(int irq, void *dev_id)
/* Make sure this is our interrupt */
if (!status) {
#ifdef CONFIG_ET131X_DEBUG
adapter->Stats.UnhandledInterruptsPerSec++;
#endif
handled = false;
DBG_VERBOSE(et131x_dbginfo, "NOT OUR INTERRUPT\n");
et131x_enable_interrupts(adapter);
goto out;
}
/* This is our interrupt, so process accordingly */
#ifdef CONFIG_ET131X_DEBUG
if (status & ET_INTR_RXDMA_XFR_DONE)
adapter->Stats.RxDmaInterruptsPerSec++;
if (status & ET_INTR_TXDMA_ISR)
adapter->Stats.TxDmaInterruptsPerSec++;
#endif
if (status & ET_INTR_WATCHDOG) {
PMP_TCB pMpTcb = adapter->TxRing.CurrSendHead;
@ -212,9 +194,6 @@ irqreturn_t et131x_isr(int irq, void *dev_id)
writel(0, &adapter->regs->global.watchdog_timer);
status &= ~ET_INTR_WATCHDOG;
#ifdef CONFIG_ET131X_DEBUG
adapter->Stats.WatchDogInterruptsPerSec++;
#endif
}
if (status == 0) {
@ -263,13 +242,11 @@ void et131x_isr_handler(struct work_struct *work)
*/
/* Handle all the completed Transmit interrupts */
if (status & ET_INTR_TXDMA_ISR) {
DBG_TX(et131x_dbginfo, "TXDMA_ISR interrupt\n");
et131x_handle_send_interrupt(etdev);
}
/* Handle all the completed Receives interrupts */
if (status & ET_INTR_RXDMA_XFR_DONE) {
DBG_RX(et131x_dbginfo, "RXDMA_XFR_DONE interrupt\n");
et131x_handle_recv_interrupt(etdev);
}
@ -283,7 +260,7 @@ void et131x_isr_handler(struct work_struct *work)
/* Following read also clears the register (COR) */
TxDmaErr.value = readl(&iomem->txdma.TxDmaError.value);
DBG_WARNING(et131x_dbginfo,
dev_warn(&etdev->pdev->dev,
"TXDMA_ERR interrupt, error = %d\n",
TxDmaErr.value);
}
@ -304,9 +281,6 @@ void et131x_isr_handler(struct work_struct *work)
* ET1310 for re-use. This interrupt is one method of
* returning resources.
*/
DBG_WARNING(et131x_dbginfo,
"RXDMA_FB_RING0_LOW or "
"RXDMA_FB_RING1_LOW interrupt\n");
/* If the user has flow control on, then we will
* send a pause packet, otherwise just exit
@ -332,8 +306,6 @@ void et131x_isr_handler(struct work_struct *work)
/* Handle Packet Status Ring Low Interrupt */
if (status & ET_INTR_RXDMA_STAT_LOW) {
DBG_WARNING(et131x_dbginfo,
"RXDMA_PKT_STAT_RING_LOW interrupt\n");
/*
* Same idea as with the two Free Buffer Rings.
@ -370,7 +342,7 @@ void et131x_isr_handler(struct work_struct *work)
etdev->TxMacTest.value =
readl(&iomem->txmac.tx_test.value);
DBG_WARNING(et131x_dbginfo,
dev_warn(&etdev->pdev->dev,
"RxDMA_ERR interrupt, error %x\n",
etdev->TxMacTest.value);
}
@ -384,7 +356,7 @@ void et131x_isr_handler(struct work_struct *work)
* message when we are in DBG mode, otherwise we
* will ignore it.
*/
DBG_ERROR(et131x_dbginfo, "WAKE_ON_LAN interrupt\n");
dev_err(&etdev->pdev->dev, "WAKE_ON_LAN interrupt\n");
}
/* Handle the PHY interrupt */
@ -393,8 +365,6 @@ void et131x_isr_handler(struct work_struct *work)
MI_BMSR_t BmsrInts, BmsrData;
MI_ISR_t myIsr;
DBG_VERBOSE(et131x_dbginfo, "PHY interrupt\n");
/* If we are in coma mode when we get this interrupt,
* we need to disable it.
*/
@ -405,9 +375,6 @@ void et131x_isr_handler(struct work_struct *work)
* so, disable it because we will not be able
* to read PHY values until we are out.
*/
DBG_VERBOSE(et131x_dbginfo,
"Device is in COMA mode, "
"need to wake up\n");
DisablePhyComa(etdev);
}
@ -426,11 +393,6 @@ void et131x_isr_handler(struct work_struct *work)
etdev->Bmsr.value ^ BmsrData.value;
etdev->Bmsr.value = BmsrData.value;
DBG_VERBOSE(et131x_dbginfo,
"Bmsr.value = 0x%04x,"
"Bmsr_ints.value = 0x%04x\n",
BmsrData.value, BmsrInts.value);
/* Do all the cable in / cable out stuff */
et131x_Mii_check(etdev, BmsrData, BmsrInts);
}
@ -451,7 +413,7 @@ void et131x_isr_handler(struct work_struct *work)
* a nutshell, the whole Tx path will have to be reset
* and re-configured afterwards.
*/
DBG_WARNING(et131x_dbginfo,
dev_warn(&etdev->pdev->dev,
"TXMAC interrupt, error 0x%08x\n",
etdev->TxRing.TxMacErr.value);
@ -459,7 +421,6 @@ void et131x_isr_handler(struct work_struct *work)
* otherwise we just want the device to be reset and
* continue
*/
/* DBG_TRAP(); */
}
/* Handle RXMAC Interrupt */
@ -473,11 +434,11 @@ void et131x_isr_handler(struct work_struct *work)
/* MP_SET_FLAG( etdev,
fMP_ADAPTER_HARDWARE_ERROR); */
DBG_WARNING(et131x_dbginfo,
dev_warn(&etdev->pdev->dev,
"RXMAC interrupt, error 0x%08x. Requesting reset\n",
readl(&iomem->rxmac.err_reg.value));
DBG_WARNING(et131x_dbginfo,
dev_warn(&etdev->pdev->dev,
"Enable 0x%08x, Diag 0x%08x\n",
readl(&iomem->rxmac.ctrl.value),
readl(&iomem->rxmac.rxq_diag.value));
@ -487,7 +448,6 @@ void et131x_isr_handler(struct work_struct *work)
* otherwise we just want the device to be reset and
* continue
*/
/* TRAP(); */
}
/* Handle MAC_STAT Interrupt */
@ -498,7 +458,6 @@ void et131x_isr_handler(struct work_struct *work)
* to maintain the top, software managed bits of the
* counter(s).
*/
DBG_VERBOSE(et131x_dbginfo, "MAC_STAT interrupt\n");
HandleMacStatInterrupt(etdev);
}
@ -513,7 +472,6 @@ void et131x_isr_handler(struct work_struct *work)
* addressed module is in a power-down state and
* can't respond.
*/
DBG_VERBOSE(et131x_dbginfo, "SLV_TIMEOUT interrupt\n");
}
}
et131x_enable_interrupts(etdev);

View File

@ -56,7 +56,6 @@
*/
#include "et131x_version.h"
#include "et131x_debug.h"
#include "et131x_defs.h"
#include <linux/init.h>
@ -75,6 +74,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/pci.h>
#include <asm/system.h>
#include <linux/mii.h>
@ -94,11 +94,6 @@
#include "et131x_isr.h"
#include "et131x_initpci.h"
/* Data for debugging facilities */
#ifdef CONFIG_ET131X_DEBUG
extern dbg_info_t *et131x_dbginfo;
#endif /* CONFIG_ET131X_DEBUG */
struct net_device_stats *et131x_stats(struct net_device *netdev);
int et131x_open(struct net_device *netdev);
int et131x_close(struct net_device *netdev);
@ -138,15 +133,11 @@ struct net_device *et131x_device_alloc(void)
{
struct net_device *netdev;
DBG_ENTER(et131x_dbginfo);
/* Alloc net_device and adapter structs */
netdev = alloc_etherdev(sizeof(struct et131x_adapter));
if (netdev == NULL) {
DBG_ERROR(et131x_dbginfo,
"Alloc of net_device struct failed\n");
DBG_LEAVE(et131x_dbginfo);
printk(KERN_ERR "et131x: Alloc of net_device struct failed\n");
return NULL;
}
@ -163,8 +154,6 @@ struct net_device *et131x_device_alloc(void)
/* Poll? */
/* netdev->poll = &et131x_poll; */
/* netdev->poll_controller = &et131x_poll_controller; */
DBG_LEAVE(et131x_dbginfo);
return netdev;
}
@ -180,8 +169,6 @@ struct net_device_stats *et131x_stats(struct net_device *netdev)
struct net_device_stats *stats = &adapter->net_stats;
CE_STATS_t *devstat = &adapter->Stats;
DBG_ENTER(et131x_dbginfo);
stats->rx_packets = devstat->ipackets;
stats->tx_packets = devstat->opackets;
stats->rx_errors = devstat->length_err + devstat->alignment_err +
@ -213,8 +200,6 @@ struct net_device_stats *et131x_stats(struct net_device *netdev)
/* stats->tx_fifo_errors = devstat->; */
/* stats->tx_heartbeat_errors = devstat->; */
/* stats->tx_window_errors = devstat->; */
DBG_LEAVE(et131x_dbginfo);
return stats;
}
@ -229,20 +214,15 @@ int et131x_open(struct net_device *netdev)
int result = 0;
struct et131x_adapter *adapter = netdev_priv(netdev);
DBG_ENTER(et131x_dbginfo);
/* Start the timer to track NIC errors */
add_timer(&adapter->ErrorTimer);
/* Register our ISR */
DBG_TRACE(et131x_dbginfo, "Registering ISR...\n");
result =
request_irq(netdev->irq, et131x_isr, IRQF_SHARED, netdev->name,
netdev);
/* Register our IRQ */
result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED,
netdev->name, netdev);
if (result) {
DBG_ERROR(et131x_dbginfo, "Could not register ISR\n");
DBG_LEAVE(et131x_dbginfo);
dev_err(&adapter->pdev->dev, "c ould not register IRQ %d\n",
netdev->irq);
return result;
}
@ -257,8 +237,6 @@ int et131x_open(struct net_device *netdev)
/* We're ready to move some data, so start the queue */
netif_start_queue(netdev);
DBG_LEAVE(et131x_dbginfo);
return result;
}
@ -272,8 +250,6 @@ int et131x_close(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
DBG_ENTER(et131x_dbginfo);
/* First thing is to stop the queue */
netif_stop_queue(netdev);
@ -286,14 +262,10 @@ int et131x_close(struct net_device *netdev)
/* Deregistering ISR */
adapter->Flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
DBG_TRACE(et131x_dbginfo, "Deregistering ISR...\n");
free_irq(netdev->irq, netdev);
/* Stop the error timer */
del_timer_sync(&adapter->ErrorTimer);
DBG_LEAVE(et131x_dbginfo);
return 0;
}
@ -311,39 +283,30 @@ int et131x_ioctl_mii(struct net_device *netdev, struct ifreq *reqbuf, int cmd)
struct et131x_adapter *etdev = netdev_priv(netdev);
struct mii_ioctl_data *data = if_mii(reqbuf);
DBG_ENTER(et131x_dbginfo);
switch (cmd) {
case SIOCGMIIPHY:
DBG_VERBOSE(et131x_dbginfo, "SIOCGMIIPHY\n");
data->phy_id = etdev->Stats.xcvr_addr;
break;
case SIOCGMIIREG:
DBG_VERBOSE(et131x_dbginfo, "SIOCGMIIREG\n");
if (!capable(CAP_NET_ADMIN)) {
if (!capable(CAP_NET_ADMIN))
status = -EPERM;
} else {
else
status = MiRead(etdev,
data->reg_num, &data->val_out);
}
break;
case SIOCSMIIREG:
DBG_VERBOSE(et131x_dbginfo, "SIOCSMIIREG\n");
if (!capable(CAP_NET_ADMIN)) {
if (!capable(CAP_NET_ADMIN))
status = -EPERM;
} else {
else
status = MiWrite(etdev, data->reg_num,
data->val_in);
}
break;
default:
status = -EOPNOTSUPP;
}
DBG_LEAVE(et131x_dbginfo);
return status;
}
@ -359,8 +322,6 @@ int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd)
{
int status = 0;
DBG_ENTER(et131x_dbginfo);
switch (cmd) {
case SIOCGMIIPHY:
case SIOCGMIIREG:
@ -369,12 +330,8 @@ int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd)
break;
default:
DBG_WARNING(et131x_dbginfo, "Unhandled IOCTL Code: 0x%04x\n",
cmd);
status = -EOPNOTSUPP;
}
DBG_LEAVE(et131x_dbginfo);
return status;
}
@ -391,8 +348,6 @@ int et131x_set_packet_filter(struct et131x_adapter *adapter)
RXMAC_CTRL_t ctrl;
RXMAC_PF_CTRL_t pf_ctrl;
DBG_ENTER(et131x_dbginfo);
ctrl.value = readl(&adapter->regs->rxmac.ctrl.value);
pf_ctrl.value = readl(&adapter->regs->rxmac.pf_ctrl.value);
@ -415,12 +370,8 @@ int et131x_set_packet_filter(struct et131x_adapter *adapter)
* multicast entries or (3) we receive none.
*/
if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) {
DBG_VERBOSE(et131x_dbginfo,
"Multicast filtering OFF (Rx ALL MULTICAST)\n");
pf_ctrl.bits.filter_multi_en = 0;
} else {
DBG_VERBOSE(et131x_dbginfo,
"Multicast filtering ON\n");
SetupDeviceForMulticast(adapter);
pf_ctrl.bits.filter_multi_en = 1;
ctrl.bits.pkt_filter_disable = 0;
@ -428,7 +379,6 @@ int et131x_set_packet_filter(struct et131x_adapter *adapter)
/* Set us up with Unicast packet filtering */
if (filter & ET131X_PACKET_TYPE_DIRECTED) {
DBG_VERBOSE(et131x_dbginfo, "Unicast Filtering ON\n");
SetupDeviceForUnicast(adapter);
pf_ctrl.bits.filter_uni_en = 1;
ctrl.bits.pkt_filter_disable = 0;
@ -436,12 +386,9 @@ int et131x_set_packet_filter(struct et131x_adapter *adapter)
/* Set us up with Broadcast packet filtering */
if (filter & ET131X_PACKET_TYPE_BROADCAST) {
DBG_VERBOSE(et131x_dbginfo, "Broadcast Filtering ON\n");
pf_ctrl.bits.filter_broad_en = 1;
ctrl.bits.pkt_filter_disable = 0;
} else {
DBG_VERBOSE(et131x_dbginfo,
"Broadcast Filtering OFF\n");
pf_ctrl.bits.filter_broad_en = 0;
}
@ -453,8 +400,6 @@ int et131x_set_packet_filter(struct et131x_adapter *adapter)
&adapter->regs->rxmac.pf_ctrl.value);
writel(ctrl.value, &adapter->regs->rxmac.ctrl.value);
}
DBG_LEAVE(et131x_dbginfo);
return status;
}
@ -470,8 +415,6 @@ void et131x_multicast(struct net_device *netdev)
unsigned long flags;
struct dev_mc_list *mclist = netdev->mc_list;
DBG_ENTER(et131x_dbginfo);
spin_lock_irqsave(&adapter->Lock, flags);
/* Before we modify the platform-independent filter flags, store them
@ -490,35 +433,25 @@ void et131x_multicast(struct net_device *netdev)
/* Check the net_device flags and set the device independent flags
* accordingly
*/
DBG_VERBOSE(et131x_dbginfo,
"MULTICAST ADDR COUNT: %d\n", netdev->mc_count);
if (netdev->flags & IFF_PROMISC) {
DBG_VERBOSE(et131x_dbginfo, "Request: PROMISCUOUS MODE ON\n");
adapter->PacketFilter |= ET131X_PACKET_TYPE_PROMISCUOUS;
} else {
DBG_VERBOSE(et131x_dbginfo, "Request: PROMISCUOUS MODE OFF\n");
adapter->PacketFilter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
}
if (netdev->flags & IFF_ALLMULTI) {
DBG_VERBOSE(et131x_dbginfo, "Request: ACCEPT ALL MULTICAST\n");
adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
}
if (netdev->mc_count > NIC_MAX_MCAST_LIST) {
DBG_WARNING(et131x_dbginfo,
"ACCEPT ALL MULTICAST for now, as there's more Multicast addresses than the HW supports\n");
adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
}
if (netdev->mc_count < 1) {
DBG_VERBOSE(et131x_dbginfo, "Request: REJECT ALL MULTICAST\n");
adapter->PacketFilter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
adapter->PacketFilter &= ~ET131X_PACKET_TYPE_MULTICAST;
} else {
DBG_VERBOSE(et131x_dbginfo,
"Request: SET MULTICAST FILTER(S)\n");
adapter->PacketFilter |= ET131X_PACKET_TYPE_MULTICAST;
}
@ -526,14 +459,8 @@ void et131x_multicast(struct net_device *netdev)
adapter->MCAddressCount = netdev->mc_count;
if (netdev->mc_count) {
if (mclist->dmi_addrlen != ETH_ALEN)
DBG_WARNING(et131x_dbginfo,
"Multicast addrs are not ETH_ALEN in size\n");
else {
count = netdev->mc_count - 1;
memcpy(adapter->MCList[count], mclist->dmi_addr,
ETH_ALEN);
}
count = netdev->mc_count - 1;
memcpy(adapter->MCList[count], mclist->dmi_addr, ETH_ALEN);
}
/* Are the new flags different from the previous ones? If not, then no
@ -544,17 +471,9 @@ void et131x_multicast(struct net_device *netdev)
*/
if (PacketFilter != adapter->PacketFilter) {
/* Call the device's filter function */
DBG_VERBOSE(et131x_dbginfo, "UPDATE REQUIRED, FLAGS changed\n");
et131x_set_packet_filter(adapter);
} else {
DBG_VERBOSE(et131x_dbginfo,
"NO UPDATE REQUIRED, FLAGS didn't change\n");
}
spin_unlock_irqrestore(&adapter->Lock, flags);
DBG_LEAVE(et131x_dbginfo);
}
/**
@ -568,8 +487,6 @@ int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
{
int status = 0;
DBG_TX_ENTER(et131x_dbginfo);
/* Save the timestamp for the TX timeout watchdog */
netdev->trans_start = jiffies;
@ -579,22 +496,15 @@ int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
/* Check status and manage the netif queue if necessary */
if (status != 0) {
if (status == -ENOMEM) {
DBG_VERBOSE(et131x_dbginfo,
"OUT OF TCBs; STOP NETIF QUEUE\n");
/* Put the queue to sleep until resources are
* available
*/
netif_stop_queue(netdev);
status = NETDEV_TX_BUSY;
} else {
DBG_WARNING(et131x_dbginfo,
"Misc error; drop packet\n");
status = NETDEV_TX_OK;
}
}
DBG_TX_LEAVE(et131x_dbginfo);
return status;
}
@ -612,25 +522,19 @@ void et131x_tx_timeout(struct net_device *netdev)
PMP_TCB pMpTcb;
unsigned long flags;
DBG_WARNING(et131x_dbginfo, "TX TIMEOUT\n");
/* Just skip this part if the adapter is doing link detection */
if (etdev->Flags & fMP_ADAPTER_LINK_DETECTION) {
DBG_ERROR(et131x_dbginfo, "Still doing link detection\n");
if (etdev->Flags & fMP_ADAPTER_LINK_DETECTION)
return;
}
/* Any nonrecoverable hardware error?
* Checks adapter->flags for any failure in phy reading
*/
if (etdev->Flags & fMP_ADAPTER_NON_RECOVER_ERROR) {
DBG_WARNING(et131x_dbginfo, "Non recoverable error - remove\n");
if (etdev->Flags & fMP_ADAPTER_NON_RECOVER_ERROR)
return;
}
/* Hardware failure? */
if (etdev->Flags & fMP_ADAPTER_HARDWARE_ERROR) {
DBG_WARNING(et131x_dbginfo, "hardware error - reset\n");
dev_err(&etdev->pdev->dev, "hardware error - reset\n");
return;
}
@ -643,13 +547,6 @@ void et131x_tx_timeout(struct net_device *netdev)
pMpTcb->Count++;
if (pMpTcb->Count > NIC_SEND_HANG_THRESHOLD) {
#ifdef CONFIG_ET131X_DEBUG
TX_STATUS_BLOCK_t txDmaComplete =
*(etdev->TxRing.pTxStatusVa);
PTX_DESC_ENTRY_t pDesc =
etdev->TxRing.pTxDescRingVa +
INDEX10(pMpTcb->WrIndex);
#endif
TX_DESC_ENTRY_t StuckDescriptors[10];
if (INDEX10(pMpTcb->WrIndex) > 7) {
@ -662,26 +559,11 @@ void et131x_tx_timeout(struct net_device *netdev)
spin_unlock_irqrestore(&etdev->TCBSendQLock,
flags);
DBG_WARNING(et131x_dbginfo,
dev_warn(&etdev->pdev->dev,
"Send stuck - reset. pMpTcb->WrIndex %x, Flags 0x%08x\n",
pMpTcb->WrIndex,
pMpTcb->Flags);
DBG_WARNING(et131x_dbginfo,
"pDesc 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
pDesc->DataBufferPtrHigh,
pDesc->DataBufferPtrLow, pDesc->word2.value,
pDesc->word3.value);
DBG_WARNING(et131x_dbginfo,
"WbStatus 0x%08x\n", txDmaComplete.value);
#ifdef CONFIG_ET131X_DEBUG
DumpDeviceBlock(DBG_WARNING_ON, etdev, 0);
DumpDeviceBlock(DBG_WARNING_ON, etdev, 1);
DumpDeviceBlock(DBG_WARNING_ON, etdev, 3);
DumpDeviceBlock(DBG_WARNING_ON, etdev, 5);
#endif
et131x_close(netdev);
et131x_open(netdev);
@ -704,13 +586,9 @@ int et131x_change_mtu(struct net_device *netdev, int new_mtu)
int result = 0;
struct et131x_adapter *adapter = netdev_priv(netdev);
DBG_ENTER(et131x_dbginfo);
/* Make sure the requested MTU is valid */
if (new_mtu == 0 || new_mtu > 9216) {
DBG_LEAVE(et131x_dbginfo);
if (new_mtu < 64 || new_mtu > 9216)
return -EINVAL;
}
/* Stop the netif queue */
netif_stop_queue(netdev);
@ -737,7 +615,7 @@ int et131x_change_mtu(struct net_device *netdev, int new_mtu)
/* Alloc and init Rx DMA memory */
result = et131x_adapter_memory_alloc(adapter);
if (result != 0) {
DBG_WARNING(et131x_dbginfo,
dev_warn(&adapter->pdev->dev,
"Change MTU failed; couldn't re-alloc DMA memory\n");
return result;
}
@ -760,8 +638,6 @@ int et131x_change_mtu(struct net_device *netdev, int new_mtu)
/* Restart the netif queue */
netif_wake_queue(netdev);
DBG_LEAVE(et131x_dbginfo);
return result;
}
@ -780,20 +656,14 @@ int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
struct et131x_adapter *adapter = netdev_priv(netdev);
struct sockaddr *address = new_mac;
DBG_ENTER(et131x_dbginfo);
/* begin blux */
/* DBG_VERBOSE( et131x_dbginfo, "Function not implemented!!\n" ); */
if (adapter == NULL) {
DBG_LEAVE(et131x_dbginfo);
if (adapter == NULL)
return -ENODEV;
}
/* Make sure the requested MAC is valid */
if (!is_valid_ether_addr(address->sa_data)) {
DBG_LEAVE(et131x_dbginfo);
if (!is_valid_ether_addr(address->sa_data))
return -EINVAL;
}
/* Stop the netif queue */
netif_stop_queue(netdev);
@ -832,7 +702,7 @@ int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
/* Alloc and init Rx DMA memory */
result = et131x_adapter_memory_alloc(adapter);
if (result != 0) {
DBG_WARNING(et131x_dbginfo,
dev_err(&adapter->pdev->dev,
"Change MAC failed; couldn't re-alloc DMA memory\n");
return result;
}
@ -856,7 +726,5 @@ int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
/* Restart the netif queue */
netif_wake_queue(netdev);
DBG_LEAVE(et131x_dbginfo);
return result;
}