Merge branch 'upstream-davem' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
This commit is contained in:
commit
47ac3199ac
|
@ -2104,12 +2104,10 @@ L: netdev@vger.kernel.org
|
|||
S: Maintained
|
||||
|
||||
INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/ixgb/ixgbe)
|
||||
P: Auke Kok
|
||||
M: auke-jan.h.kok@intel.com
|
||||
P: Jesse Brandeburg
|
||||
M: jesse.brandeburg@intel.com
|
||||
P: Jeff Kirsher
|
||||
M: jeffrey.t.kirsher@intel.com
|
||||
P: Jesse Brandeburg
|
||||
M: jesse.brandeburg@intel.com
|
||||
P: Bruce Allan
|
||||
M: bruce.w.allan@intel.com
|
||||
P: John Ronciak
|
||||
|
|
|
@ -53,11 +53,13 @@ int register_memory_notifier(struct notifier_block *nb)
|
|||
{
|
||||
return blocking_notifier_chain_register(&memory_chain, nb);
|
||||
}
|
||||
EXPORT_SYMBOL(register_memory_notifier);
|
||||
|
||||
void unregister_memory_notifier(struct notifier_block *nb)
|
||||
{
|
||||
blocking_notifier_chain_unregister(&memory_chain, nb);
|
||||
}
|
||||
EXPORT_SYMBOL(unregister_memory_notifier);
|
||||
|
||||
/*
|
||||
* register_memory - Setup a sysfs device for a memory block
|
||||
|
|
|
@ -2426,7 +2426,7 @@ config CHELSIO_T3
|
|||
|
||||
config EHEA
|
||||
tristate "eHEA Ethernet support"
|
||||
depends on IBMEBUS && INET && SPARSEMEM
|
||||
depends on IBMEBUS && INET && SPARSEMEM && MEMORY_HOTPLUG
|
||||
select INET_LRO
|
||||
---help---
|
||||
This driver supports the IBM pSeries eHEA ethernet adapter.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
|
||||
* Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
|
||||
* Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
|
||||
* Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
|
||||
*
|
||||
* Derived from Intel e1000 driver
|
||||
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
|
@ -36,7 +36,6 @@
|
|||
* A very incomplete list of things that need to be dealt with:
|
||||
*
|
||||
* TODO:
|
||||
* Wake on LAN.
|
||||
* Add more ethtool functions.
|
||||
* Fix abstruse irq enable/disable condition described here:
|
||||
* http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
|
||||
|
@ -638,21 +637,18 @@ static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
|
|||
}
|
||||
|
||||
/*
|
||||
*TODO: do something or get rid of this
|
||||
* Force the PHY into power saving mode using vendor magic.
|
||||
*/
|
||||
#ifdef CONFIG_PM
|
||||
static s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
|
||||
static void atl1_phy_enter_power_saving(struct atl1_hw *hw)
|
||||
{
|
||||
/* s32 ret_val;
|
||||
* u16 phy_data;
|
||||
*/
|
||||
atl1_write_phy_reg(hw, MII_DBG_ADDR, 0);
|
||||
atl1_write_phy_reg(hw, MII_DBG_DATA, 0x124E);
|
||||
atl1_write_phy_reg(hw, MII_DBG_ADDR, 2);
|
||||
atl1_write_phy_reg(hw, MII_DBG_DATA, 0x3000);
|
||||
atl1_write_phy_reg(hw, MII_DBG_ADDR, 3);
|
||||
atl1_write_phy_reg(hw, MII_DBG_DATA, 0);
|
||||
|
||||
/*
|
||||
ret_val = atl1_write_phy_reg(hw, ...);
|
||||
ret_val = atl1_write_phy_reg(hw, ...);
|
||||
....
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -2784,64 +2780,93 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
|
|||
struct atl1_hw *hw = &adapter->hw;
|
||||
u32 ctrl = 0;
|
||||
u32 wufc = adapter->wol;
|
||||
u32 val;
|
||||
int retval;
|
||||
u16 speed;
|
||||
u16 duplex;
|
||||
|
||||
netif_device_detach(netdev);
|
||||
if (netif_running(netdev))
|
||||
atl1_down(adapter);
|
||||
|
||||
retval = pci_save_state(pdev);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
|
||||
atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
|
||||
if (ctrl & BMSR_LSTATUS)
|
||||
val = ctrl & BMSR_LSTATUS;
|
||||
if (val)
|
||||
wufc &= ~ATLX_WUFC_LNKC;
|
||||
|
||||
/* reduce speed to 10/100M */
|
||||
if (wufc) {
|
||||
atl1_phy_enter_power_saving(hw);
|
||||
/* if resume, let driver to re- setup link */
|
||||
hw->phy_configured = false;
|
||||
atl1_set_mac_addr(hw);
|
||||
atlx_set_multi(netdev);
|
||||
if (val && wufc) {
|
||||
val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
|
||||
if (val) {
|
||||
if (netif_msg_ifdown(adapter))
|
||||
dev_printk(KERN_DEBUG, &pdev->dev,
|
||||
"error getting speed/duplex\n");
|
||||
goto disable_wol;
|
||||
}
|
||||
|
||||
ctrl = 0;
|
||||
/* turn on magic packet wol */
|
||||
|
||||
/* enable magic packet WOL */
|
||||
if (wufc & ATLX_WUFC_MAG)
|
||||
ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
|
||||
|
||||
/* turn on Link change WOL */
|
||||
if (wufc & ATLX_WUFC_LNKC)
|
||||
ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
|
||||
ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN);
|
||||
iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
|
||||
ioread32(hw->hw_addr + REG_WOL_CTRL);
|
||||
|
||||
/* turn on all-multi mode if wake on multicast is enabled */
|
||||
ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
|
||||
ctrl &= ~MAC_CTRL_DBG;
|
||||
ctrl &= ~MAC_CTRL_PROMIS_EN;
|
||||
if (wufc & ATLX_WUFC_MC)
|
||||
ctrl |= MAC_CTRL_MC_ALL_EN;
|
||||
else
|
||||
ctrl &= ~MAC_CTRL_MC_ALL_EN;
|
||||
|
||||
/* turn on broadcast mode if wake on-BC is enabled */
|
||||
if (wufc & ATLX_WUFC_BC)
|
||||
/* configure the mac */
|
||||
ctrl = MAC_CTRL_RX_EN;
|
||||
ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 :
|
||||
MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT);
|
||||
if (duplex == FULL_DUPLEX)
|
||||
ctrl |= MAC_CTRL_DUPLX;
|
||||
ctrl |= (((u32)adapter->hw.preamble_len &
|
||||
MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
|
||||
if (adapter->vlgrp)
|
||||
ctrl |= MAC_CTRL_RMV_VLAN;
|
||||
if (wufc & ATLX_WUFC_MAG)
|
||||
ctrl |= MAC_CTRL_BC_EN;
|
||||
else
|
||||
ctrl &= ~MAC_CTRL_BC_EN;
|
||||
|
||||
/* enable RX */
|
||||
ctrl |= MAC_CTRL_RX_EN;
|
||||
iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
|
||||
pci_enable_wake(pdev, PCI_D3hot, 1);
|
||||
pci_enable_wake(pdev, PCI_D3cold, 1);
|
||||
} else {
|
||||
iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
|
||||
pci_enable_wake(pdev, PCI_D3hot, 0);
|
||||
pci_enable_wake(pdev, PCI_D3cold, 0);
|
||||
ioread32(hw->hw_addr + REG_MAC_CTRL);
|
||||
|
||||
/* poke the PHY */
|
||||
ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
|
||||
ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
|
||||
iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
|
||||
ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
|
||||
|
||||
pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
pci_save_state(pdev);
|
||||
pci_disable_device(pdev);
|
||||
if (!val && wufc) {
|
||||
ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
|
||||
iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
|
||||
ioread32(hw->hw_addr + REG_WOL_CTRL);
|
||||
iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
|
||||
ioread32(hw->hw_addr + REG_MAC_CTRL);
|
||||
hw->phy_configured = false;
|
||||
pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
pci_set_power_state(pdev, PCI_D3hot);
|
||||
disable_wol:
|
||||
iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
|
||||
ioread32(hw->hw_addr + REG_WOL_CTRL);
|
||||
ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
|
||||
ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
|
||||
iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
|
||||
ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
|
||||
atl1_phy_enter_power_saving(hw);
|
||||
hw->phy_configured = false;
|
||||
pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
|
||||
exit:
|
||||
if (netif_running(netdev))
|
||||
pci_disable_msi(adapter->pdev);
|
||||
pci_disable_device(pdev);
|
||||
pci_set_power_state(pdev, pci_choose_state(pdev, state));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2855,20 +2880,26 @@ static int atl1_resume(struct pci_dev *pdev)
|
|||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_restore_state(pdev);
|
||||
|
||||
/* FIXME: check and handle */
|
||||
err = pci_enable_device(pdev);
|
||||
if (err) {
|
||||
if (netif_msg_ifup(adapter))
|
||||
dev_printk(KERN_DEBUG, &pdev->dev,
|
||||
"error enabling pci device\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
|
||||
pci_enable_wake(pdev, PCI_D3hot, 0);
|
||||
pci_enable_wake(pdev, PCI_D3cold, 0);
|
||||
|
||||
iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
|
||||
atl1_reset(adapter);
|
||||
atl1_reset_hw(&adapter->hw);
|
||||
adapter->cmb.cmb->int_stats = 0;
|
||||
|
||||
if (netif_running(netdev))
|
||||
atl1_up(adapter);
|
||||
netif_device_attach(netdev);
|
||||
|
||||
atl1_via_workaround(adapter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
|
@ -2876,6 +2907,13 @@ static int atl1_resume(struct pci_dev *pdev)
|
|||
#define atl1_resume NULL
|
||||
#endif
|
||||
|
||||
static void atl1_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
#ifdef CONFIG_PM
|
||||
atl1_suspend(pdev, PMSG_SUSPEND);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void atl1_poll_controller(struct net_device *netdev)
|
||||
{
|
||||
|
@ -3122,7 +3160,8 @@ static struct pci_driver atl1_driver = {
|
|||
.probe = atl1_probe,
|
||||
.remove = __devexit_p(atl1_remove),
|
||||
.suspend = atl1_suspend,
|
||||
.resume = atl1_resume
|
||||
.resume = atl1_resume,
|
||||
.shutdown = atl1_shutdown
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
|
||||
* Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
|
||||
* Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
|
||||
* Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
|
||||
*
|
||||
* Derived from Intel e1000 driver
|
||||
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
*
|
||||
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
|
||||
* Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
|
||||
* Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
|
||||
* Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
|
||||
* Copyright(c) 2007 Atheros Corporation. All rights reserved.
|
||||
*
|
||||
* Derived from Intel e1000 driver
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
*
|
||||
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
|
||||
* Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
|
||||
* Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
|
||||
* Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
|
||||
* Copyright(c) 2007 Atheros Corporation. All rights reserved.
|
||||
*
|
||||
* Derived from Intel e1000 driver
|
||||
|
@ -29,7 +29,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define ATLX_DRIVER_VERSION "2.1.1"
|
||||
#define ATLX_DRIVER_VERSION "2.1.3"
|
||||
MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
|
||||
Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -460,6 +460,9 @@ MODULE_VERSION(ATLX_DRIVER_VERSION);
|
|||
#define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */
|
||||
#define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
|
||||
|
||||
#define MII_DBG_ADDR 0x1D
|
||||
#define MII_DBG_DATA 0x1E
|
||||
|
||||
/* PCI Command Register Bit Definitions */
|
||||
#define PCI_REG_COMMAND 0x04 /* PCI Command Register */
|
||||
#define CMD_IO_SPACE 0x0001
|
||||
|
|
|
@ -71,6 +71,7 @@ enum { /* adapter flags */
|
|||
USING_MSIX = (1 << 2),
|
||||
QUEUES_BOUND = (1 << 3),
|
||||
TP_PARITY_INIT = (1 << 4),
|
||||
NAPI_INIT = (1 << 5),
|
||||
};
|
||||
|
||||
struct fl_pg_chunk {
|
||||
|
|
|
@ -698,6 +698,7 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
|
|||
void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
|
||||
int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
|
||||
int reset);
|
||||
int t3_replay_prep_adapter(struct adapter *adapter);
|
||||
void t3_led_ready(struct adapter *adapter);
|
||||
void t3_fatal_err(struct adapter *adapter);
|
||||
void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
|
||||
|
|
|
@ -421,6 +421,13 @@ static void init_napi(struct adapter *adap)
|
|||
netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
|
||||
64);
|
||||
}
|
||||
|
||||
/*
|
||||
* netif_napi_add() can be called only once per napi_struct because it
|
||||
* adds each new napi_struct to a list. Be careful not to call it a
|
||||
* second time, e.g., during EEH recovery, by making a note of it.
|
||||
*/
|
||||
adap->flags |= NAPI_INIT;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -896,7 +903,8 @@ static int cxgb_up(struct adapter *adap)
|
|||
goto out;
|
||||
|
||||
setup_rss(adap);
|
||||
init_napi(adap);
|
||||
if (!(adap->flags & NAPI_INIT))
|
||||
init_napi(adap);
|
||||
adap->flags |= FULL_INIT_DONE;
|
||||
}
|
||||
|
||||
|
@ -999,7 +1007,7 @@ static int offload_open(struct net_device *dev)
|
|||
return 0;
|
||||
|
||||
if (!adap_up && (err = cxgb_up(adapter)) < 0)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
t3_tp_set_offload_mode(adapter, 1);
|
||||
tdev->lldev = adapter->port[0];
|
||||
|
@ -1061,10 +1069,8 @@ static int cxgb_open(struct net_device *dev)
|
|||
int other_ports = adapter->open_device_map & PORT_MASK;
|
||||
int err;
|
||||
|
||||
if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
|
||||
quiesce_rx(adapter);
|
||||
if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
set_bit(pi->port_id, &adapter->open_device_map);
|
||||
if (is_offload(adapter) && !ofld_disable) {
|
||||
|
@ -2424,14 +2430,11 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
|
|||
test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
|
||||
offload_close(&adapter->tdev);
|
||||
|
||||
/* Free sge resources */
|
||||
t3_free_sge_resources(adapter);
|
||||
|
||||
adapter->flags &= ~FULL_INIT_DONE;
|
||||
|
||||
pci_disable_device(pdev);
|
||||
|
||||
/* Request a slot slot reset. */
|
||||
/* Request a slot reset. */
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
}
|
||||
|
||||
|
@ -2448,13 +2451,20 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
|
|||
if (pci_enable_device(pdev)) {
|
||||
dev_err(&pdev->dev,
|
||||
"Cannot re-enable PCI device after reset.\n");
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
goto err;
|
||||
}
|
||||
pci_set_master(pdev);
|
||||
pci_restore_state(pdev);
|
||||
|
||||
t3_prep_adapter(adapter, adapter->params.info, 1);
|
||||
/* Free sge resources */
|
||||
t3_free_sge_resources(adapter);
|
||||
|
||||
if (t3_replay_prep_adapter(adapter))
|
||||
goto err;
|
||||
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
err:
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2483,13 +2493,6 @@ static void t3_io_resume(struct pci_dev *pdev)
|
|||
netif_device_attach(netdev);
|
||||
}
|
||||
}
|
||||
|
||||
if (is_offload(adapter)) {
|
||||
__set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
|
||||
if (offload_open(adapter->port[0]))
|
||||
printk(KERN_WARNING
|
||||
"Could not bring back offload capabilities\n");
|
||||
}
|
||||
}
|
||||
|
||||
static struct pci_error_handlers t3_err_handler = {
|
||||
|
@ -2608,6 +2611,7 @@ static int __devinit init_one(struct pci_dev *pdev,
|
|||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
pci_save_state(pdev);
|
||||
|
||||
mmio_start = pci_resource_start(pdev, 0);
|
||||
mmio_len = pci_resource_len(pdev, 0);
|
||||
|
|
|
@ -444,6 +444,14 @@
|
|||
|
||||
#define A_PCIE_CFG 0x88
|
||||
|
||||
#define S_ENABLELINKDWNDRST 21
|
||||
#define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST)
|
||||
#define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U)
|
||||
|
||||
#define S_ENABLELINKDOWNRST 20
|
||||
#define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST)
|
||||
#define F_ENABLELINKDOWNRST V_ENABLELINKDOWNRST(1U)
|
||||
|
||||
#define S_PCIE_CLIDECEN 16
|
||||
#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
|
||||
#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
|
||||
|
|
|
@ -538,6 +538,31 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
|
|||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
* t3_reset_qset - reset a sge qset
|
||||
* @q: the queue set
|
||||
*
|
||||
* Reset the qset structure.
|
||||
* the NAPI structure is preserved in the event of
|
||||
* the qset's reincarnation, for example during EEH recovery.
|
||||
*/
|
||||
static void t3_reset_qset(struct sge_qset *q)
|
||||
{
|
||||
if (q->adap &&
|
||||
!(q->adap->flags & NAPI_INIT)) {
|
||||
memset(q, 0, sizeof(*q));
|
||||
return;
|
||||
}
|
||||
|
||||
q->adap = NULL;
|
||||
memset(&q->rspq, 0, sizeof(q->rspq));
|
||||
memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
|
||||
memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
|
||||
q->txq_stopped = 0;
|
||||
memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* free_qset - free the resources of an SGE queue set
|
||||
* @adapter: the adapter owning the queue set
|
||||
|
@ -594,7 +619,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
|
|||
q->rspq.desc, q->rspq.phys_addr);
|
||||
}
|
||||
|
||||
memset(q, 0, sizeof(*q));
|
||||
t3_reset_qset(q);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1365,7 +1390,7 @@ static void restart_ctrlq(unsigned long data)
|
|||
*/
|
||||
int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
|
||||
{
|
||||
int ret;
|
||||
int ret;
|
||||
local_bh_disable();
|
||||
ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
|
||||
local_bh_enable();
|
||||
|
|
|
@ -3264,6 +3264,7 @@ static void config_pcie(struct adapter *adap)
|
|||
|
||||
t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
|
||||
t3_set_reg_field(adap, A_PCIE_CFG, 0,
|
||||
F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
|
||||
F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
|
||||
}
|
||||
|
||||
|
@ -3655,3 +3656,30 @@ void t3_led_ready(struct adapter *adapter)
|
|||
t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
|
||||
F_GPIO0_OUT_VAL);
|
||||
}
|
||||
|
||||
int t3_replay_prep_adapter(struct adapter *adapter)
|
||||
{
|
||||
const struct adapter_info *ai = adapter->params.info;
|
||||
unsigned int i, j = 0;
|
||||
int ret;
|
||||
|
||||
early_hw_init(adapter, ai);
|
||||
ret = init_parity(adapter);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_port(adapter, i) {
|
||||
struct port_info *p = adap2pinfo(adapter, i);
|
||||
while (!adapter->params.vpd.port_type[j])
|
||||
++j;
|
||||
|
||||
p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
|
||||
ai->mdio_ops);
|
||||
|
||||
p->phy.ops->power_down(&p->phy, 1);
|
||||
++j;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -117,6 +117,9 @@ typedef struct board_info {
|
|||
|
||||
struct mutex addr_lock; /* phy and eeprom access lock */
|
||||
|
||||
struct delayed_work phy_poll;
|
||||
struct net_device *ndev;
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
struct mii_if_info mii;
|
||||
|
@ -297,6 +300,10 @@ static void dm9000_set_io(struct board_info *db, int byte_width)
|
|||
}
|
||||
}
|
||||
|
||||
static void dm9000_schedule_poll(board_info_t *db)
|
||||
{
|
||||
schedule_delayed_work(&db->phy_poll, HZ * 2);
|
||||
}
|
||||
|
||||
/* Our watchdog timed out. Called by the networking layer */
|
||||
static void dm9000_timeout(struct net_device *dev)
|
||||
|
@ -465,6 +472,17 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
|
|||
.set_eeprom = dm9000_set_eeprom,
|
||||
};
|
||||
|
||||
static void
|
||||
dm9000_poll_work(struct work_struct *w)
|
||||
{
|
||||
struct delayed_work *dw = container_of(w, struct delayed_work, work);
|
||||
board_info_t *db = container_of(dw, board_info_t, phy_poll);
|
||||
|
||||
mii_check_media(&db->mii, netif_msg_link(db), 0);
|
||||
|
||||
if (netif_running(db->ndev))
|
||||
dm9000_schedule_poll(db);
|
||||
}
|
||||
|
||||
/* dm9000_release_board
|
||||
*
|
||||
|
@ -503,7 +521,7 @@ dm9000_release_board(struct platform_device *pdev, struct board_info *db)
|
|||
/*
|
||||
* Search DM9000 board, allocate space and register it
|
||||
*/
|
||||
static int
|
||||
static int __devinit
|
||||
dm9000_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct dm9000_plat_data *pdata = pdev->dev.platform_data;
|
||||
|
@ -525,17 +543,21 @@ dm9000_probe(struct platform_device *pdev)
|
|||
|
||||
SET_NETDEV_DEV(ndev, &pdev->dev);
|
||||
|
||||
dev_dbg(&pdev->dev, "dm9000_probe()");
|
||||
dev_dbg(&pdev->dev, "dm9000_probe()\n");
|
||||
|
||||
/* setup board info structure */
|
||||
db = (struct board_info *) ndev->priv;
|
||||
memset(db, 0, sizeof (*db));
|
||||
|
||||
db->dev = &pdev->dev;
|
||||
db->ndev = ndev;
|
||||
|
||||
spin_lock_init(&db->lock);
|
||||
mutex_init(&db->addr_lock);
|
||||
|
||||
INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
|
||||
|
||||
|
||||
if (pdev->num_resources < 2) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
|
@ -761,6 +783,8 @@ dm9000_open(struct net_device *dev)
|
|||
|
||||
mii_check_media(&db->mii, netif_msg_link(db), 1);
|
||||
netif_start_queue(dev);
|
||||
|
||||
dm9000_schedule_poll(db);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -879,6 +903,8 @@ dm9000_stop(struct net_device *ndev)
|
|||
if (netif_msg_ifdown(db))
|
||||
dev_dbg(db->dev, "shutting down %s\n", ndev->name);
|
||||
|
||||
cancel_delayed_work(&db->phy_poll);
|
||||
|
||||
netif_stop_queue(ndev);
|
||||
netif_carrier_off(ndev);
|
||||
|
||||
|
@ -1288,6 +1314,8 @@ dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
|
|||
spin_unlock_irqrestore(&db->lock,flags);
|
||||
|
||||
mutex_unlock(&db->addr_lock);
|
||||
|
||||
dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1301,6 +1329,7 @@ dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value)
|
|||
unsigned long flags;
|
||||
unsigned long reg_save;
|
||||
|
||||
dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
|
||||
mutex_lock(&db->addr_lock);
|
||||
|
||||
spin_lock_irqsave(&db->lock,flags);
|
||||
|
@ -1372,7 +1401,7 @@ dm9000_drv_resume(struct platform_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
static int __devexit
|
||||
dm9000_drv_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct net_device *ndev = platform_get_drvdata(pdev);
|
||||
|
@ -1393,7 +1422,7 @@ static struct platform_driver dm9000_driver = {
|
|||
.owner = THIS_MODULE,
|
||||
},
|
||||
.probe = dm9000_probe,
|
||||
.remove = dm9000_drv_remove,
|
||||
.remove = __devexit_p(dm9000_drv_remove),
|
||||
.suspend = dm9000_drv_suspend,
|
||||
.resume = dm9000_drv_resume,
|
||||
};
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
#include <asm/io.h>
|
||||
|
||||
#define DRV_NAME "ehea"
|
||||
#define DRV_VERSION "EHEA_0090"
|
||||
#define DRV_VERSION "EHEA_0091"
|
||||
|
||||
/* eHEA capability flags */
|
||||
#define DLPAR_PORT_ADD_REM 1
|
||||
|
@ -118,6 +118,13 @@
|
|||
#define EHEA_MR_ACC_CTRL 0x00800000
|
||||
|
||||
#define EHEA_BUSMAP_START 0x8000000000000000ULL
|
||||
#define EHEA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
|
||||
#define EHEA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
|
||||
#define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2)
|
||||
#define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT)
|
||||
#define EHEA_MAP_SIZE (0x10000) /* currently fixed map size */
|
||||
#define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1)
|
||||
|
||||
|
||||
#define EHEA_WATCH_DOG_TIMEOUT 10*HZ
|
||||
|
||||
|
@ -192,10 +199,20 @@ struct h_epas {
|
|||
set to 0 if unused */
|
||||
};
|
||||
|
||||
struct ehea_busmap {
|
||||
unsigned int entries; /* total number of entries */
|
||||
unsigned int valid_sections; /* number of valid sections */
|
||||
u64 *vaddr;
|
||||
/*
|
||||
* Memory map data structures
|
||||
*/
|
||||
struct ehea_dir_bmap
|
||||
{
|
||||
u64 ent[EHEA_MAP_ENTRIES];
|
||||
};
|
||||
struct ehea_top_bmap
|
||||
{
|
||||
struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES];
|
||||
};
|
||||
struct ehea_bmap
|
||||
{
|
||||
struct ehea_top_bmap *top[EHEA_MAP_ENTRIES];
|
||||
};
|
||||
|
||||
struct ehea_qp;
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/if_ether.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/memory.h>
|
||||
#include <asm/kexec.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
|
@ -3503,6 +3504,24 @@ void ehea_crash_handler(void)
|
|||
0, H_DEREG_BCMC);
|
||||
}
|
||||
|
||||
static int ehea_mem_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
switch (action) {
|
||||
case MEM_OFFLINE:
|
||||
ehea_info("memory has been removed");
|
||||
ehea_rereg_mrs(NULL);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block ehea_mem_nb = {
|
||||
.notifier_call = ehea_mem_notifier,
|
||||
};
|
||||
|
||||
static int ehea_reboot_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *unused)
|
||||
{
|
||||
|
@ -3581,6 +3600,10 @@ int __init ehea_module_init(void)
|
|||
if (ret)
|
||||
ehea_info("failed registering reboot notifier");
|
||||
|
||||
ret = register_memory_notifier(&ehea_mem_nb);
|
||||
if (ret)
|
||||
ehea_info("failed registering memory remove notifier");
|
||||
|
||||
ret = crash_shutdown_register(&ehea_crash_handler);
|
||||
if (ret)
|
||||
ehea_info("failed registering crash handler");
|
||||
|
@ -3604,6 +3627,7 @@ int __init ehea_module_init(void)
|
|||
out3:
|
||||
ibmebus_unregister_driver(&ehea_driver);
|
||||
out2:
|
||||
unregister_memory_notifier(&ehea_mem_nb);
|
||||
unregister_reboot_notifier(&ehea_reboot_nb);
|
||||
crash_shutdown_unregister(&ehea_crash_handler);
|
||||
out:
|
||||
|
@ -3621,6 +3645,7 @@ static void __exit ehea_module_exit(void)
|
|||
ret = crash_shutdown_unregister(&ehea_crash_handler);
|
||||
if (ret)
|
||||
ehea_info("failed unregistering crash handler");
|
||||
unregister_memory_notifier(&ehea_mem_nb);
|
||||
kfree(ehea_fw_handles.arr);
|
||||
kfree(ehea_bcmc_regs.arr);
|
||||
ehea_destroy_busmap();
|
||||
|
|
|
@ -31,8 +31,8 @@
|
|||
#include "ehea_phyp.h"
|
||||
#include "ehea_qmr.h"
|
||||
|
||||
struct ehea_bmap *ehea_bmap = NULL;
|
||||
|
||||
struct ehea_busmap ehea_bmap = { 0, 0, NULL };
|
||||
|
||||
|
||||
static void *hw_qpageit_get_inc(struct hw_queue *queue)
|
||||
|
@ -559,125 +559,253 @@ int ehea_destroy_qp(struct ehea_qp *qp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int ehea_create_busmap(void)
|
||||
static inline int ehea_calc_index(unsigned long i, unsigned long s)
|
||||
{
|
||||
u64 vaddr = EHEA_BUSMAP_START;
|
||||
unsigned long high_section_index = 0;
|
||||
int i;
|
||||
return (i >> s) & EHEA_INDEX_MASK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sections are not in ascending order -> Loop over all sections and
|
||||
* find the highest PFN to compute the required map size.
|
||||
*/
|
||||
ehea_bmap.valid_sections = 0;
|
||||
static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
|
||||
int dir)
|
||||
{
|
||||
if(!ehea_top_bmap->dir[dir]) {
|
||||
ehea_top_bmap->dir[dir] =
|
||||
kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
|
||||
if (!ehea_top_bmap->dir[dir])
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < NR_MEM_SECTIONS; i++)
|
||||
if (valid_section_nr(i))
|
||||
high_section_index = i;
|
||||
static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
|
||||
{
|
||||
if(!ehea_bmap->top[top]) {
|
||||
ehea_bmap->top[top] =
|
||||
kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
|
||||
if (!ehea_bmap->top[top])
|
||||
return -ENOMEM;
|
||||
}
|
||||
return ehea_init_top_bmap(ehea_bmap->top[top], dir);
|
||||
}
|
||||
|
||||
ehea_bmap.entries = high_section_index + 1;
|
||||
ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr));
|
||||
static int ehea_create_busmap_callback(unsigned long pfn,
|
||||
unsigned long nr_pages, void *arg)
|
||||
{
|
||||
unsigned long i, mr_len, start_section, end_section;
|
||||
start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
|
||||
end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
|
||||
mr_len = *(unsigned long *)arg;
|
||||
|
||||
if (!ehea_bmap.vaddr)
|
||||
ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
|
||||
if (!ehea_bmap)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0 ; i < ehea_bmap.entries; i++) {
|
||||
unsigned long pfn = section_nr_to_pfn(i);
|
||||
for (i = start_section; i < end_section; i++) {
|
||||
int ret;
|
||||
int top, dir, idx;
|
||||
u64 vaddr;
|
||||
|
||||
if (pfn_valid(pfn)) {
|
||||
ehea_bmap.vaddr[i] = vaddr;
|
||||
vaddr += EHEA_SECTSIZE;
|
||||
ehea_bmap.valid_sections++;
|
||||
} else
|
||||
ehea_bmap.vaddr[i] = 0;
|
||||
top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
|
||||
dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
|
||||
|
||||
ret = ehea_init_bmap(ehea_bmap, top, dir);
|
||||
if(ret)
|
||||
return ret;
|
||||
|
||||
idx = i & EHEA_INDEX_MASK;
|
||||
vaddr = EHEA_BUSMAP_START + mr_len + i * EHEA_SECTSIZE;
|
||||
|
||||
ehea_bmap->top[top]->dir[dir]->ent[idx] = vaddr;
|
||||
}
|
||||
|
||||
mr_len += nr_pages * PAGE_SIZE;
|
||||
*(unsigned long *)arg = mr_len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long ehea_mr_len;
|
||||
|
||||
static DEFINE_MUTEX(ehea_busmap_mutex);
|
||||
|
||||
int ehea_create_busmap(void)
|
||||
{
|
||||
int ret;
|
||||
mutex_lock(&ehea_busmap_mutex);
|
||||
ehea_mr_len = 0;
|
||||
ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, &ehea_mr_len,
|
||||
ehea_create_busmap_callback);
|
||||
mutex_unlock(&ehea_busmap_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ehea_destroy_busmap(void)
|
||||
{
|
||||
vfree(ehea_bmap.vaddr);
|
||||
int top, dir;
|
||||
mutex_lock(&ehea_busmap_mutex);
|
||||
if (!ehea_bmap)
|
||||
goto out_destroy;
|
||||
|
||||
for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
|
||||
if (!ehea_bmap->top[top])
|
||||
continue;
|
||||
|
||||
for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
|
||||
if (!ehea_bmap->top[top]->dir[dir])
|
||||
continue;
|
||||
|
||||
kfree(ehea_bmap->top[top]->dir[dir]);
|
||||
}
|
||||
|
||||
kfree(ehea_bmap->top[top]);
|
||||
}
|
||||
|
||||
kfree(ehea_bmap);
|
||||
ehea_bmap = NULL;
|
||||
out_destroy:
|
||||
mutex_unlock(&ehea_busmap_mutex);
|
||||
}
|
||||
|
||||
u64 ehea_map_vaddr(void *caddr)
|
||||
{
|
||||
u64 mapped_addr;
|
||||
unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS;
|
||||
int top, dir, idx;
|
||||
unsigned long index, offset;
|
||||
|
||||
if (likely(index < ehea_bmap.entries)) {
|
||||
mapped_addr = ehea_bmap.vaddr[index];
|
||||
if (likely(mapped_addr))
|
||||
mapped_addr |= (((unsigned long)caddr)
|
||||
& (EHEA_SECTSIZE - 1));
|
||||
else
|
||||
mapped_addr = -1;
|
||||
} else
|
||||
mapped_addr = -1;
|
||||
if (!ehea_bmap)
|
||||
return EHEA_INVAL_ADDR;
|
||||
|
||||
if (unlikely(mapped_addr == -1))
|
||||
if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
|
||||
schedule_work(&ehea_rereg_mr_task);
|
||||
index = virt_to_abs(caddr) >> SECTION_SIZE_BITS;
|
||||
top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
|
||||
if (!ehea_bmap->top[top])
|
||||
return EHEA_INVAL_ADDR;
|
||||
|
||||
return mapped_addr;
|
||||
dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
|
||||
if (!ehea_bmap->top[top]->dir[dir])
|
||||
return EHEA_INVAL_ADDR;
|
||||
|
||||
idx = index & EHEA_INDEX_MASK;
|
||||
if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
|
||||
return EHEA_INVAL_ADDR;
|
||||
|
||||
offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
|
||||
return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
|
||||
}
|
||||
|
||||
static inline void *ehea_calc_sectbase(int top, int dir, int idx)
|
||||
{
|
||||
unsigned long ret = idx;
|
||||
ret |= dir << EHEA_DIR_INDEX_SHIFT;
|
||||
ret |= top << EHEA_TOP_INDEX_SHIFT;
|
||||
return abs_to_virt(ret << SECTION_SIZE_BITS);
|
||||
}
|
||||
|
||||
static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
|
||||
struct ehea_adapter *adapter,
|
||||
struct ehea_mr *mr)
|
||||
{
|
||||
void *pg;
|
||||
u64 j, m, hret;
|
||||
unsigned long k = 0;
|
||||
u64 pt_abs = virt_to_abs(pt);
|
||||
|
||||
void *sectbase = ehea_calc_sectbase(top, dir, idx);
|
||||
|
||||
for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
|
||||
|
||||
for (m = 0; m < EHEA_MAX_RPAGE; m++) {
|
||||
pg = sectbase + ((k++) * EHEA_PAGESIZE);
|
||||
pt[m] = virt_to_abs(pg);
|
||||
}
|
||||
hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
|
||||
0, pt_abs, EHEA_MAX_RPAGE);
|
||||
|
||||
if ((hret != H_SUCCESS)
|
||||
&& (hret != H_PAGE_REGISTERED)) {
|
||||
ehea_h_free_resource(adapter->handle, mr->handle,
|
||||
FORCE_FREE);
|
||||
ehea_error("register_rpage_mr failed");
|
||||
return hret;
|
||||
}
|
||||
}
|
||||
return hret;
|
||||
}
|
||||
|
||||
static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
|
||||
struct ehea_adapter *adapter,
|
||||
struct ehea_mr *mr)
|
||||
{
|
||||
u64 hret = H_SUCCESS;
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
|
||||
if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
|
||||
continue;
|
||||
|
||||
hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
|
||||
if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
|
||||
return hret;
|
||||
}
|
||||
return hret;
|
||||
}
|
||||
|
||||
static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
|
||||
struct ehea_adapter *adapter,
|
||||
struct ehea_mr *mr)
|
||||
{
|
||||
u64 hret = H_SUCCESS;
|
||||
int dir;
|
||||
|
||||
for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
|
||||
if (!ehea_bmap->top[top]->dir[dir])
|
||||
continue;
|
||||
|
||||
hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
|
||||
if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
|
||||
return hret;
|
||||
}
|
||||
return hret;
|
||||
}
|
||||
|
||||
int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
|
||||
{
|
||||
int ret;
|
||||
u64 *pt;
|
||||
void *pg;
|
||||
u64 hret, pt_abs, i, j, m, mr_len;
|
||||
u64 hret;
|
||||
u32 acc_ctrl = EHEA_MR_ACC_CTRL;
|
||||
|
||||
mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE;
|
||||
unsigned long top;
|
||||
|
||||
pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!pt) {
|
||||
ehea_error("no mem");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
pt_abs = virt_to_abs(pt);
|
||||
|
||||
hret = ehea_h_alloc_resource_mr(adapter->handle,
|
||||
EHEA_BUSMAP_START, mr_len,
|
||||
acc_ctrl, adapter->pd,
|
||||
hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
|
||||
ehea_mr_len, acc_ctrl, adapter->pd,
|
||||
&mr->handle, &mr->lkey);
|
||||
|
||||
if (hret != H_SUCCESS) {
|
||||
ehea_error("alloc_resource_mr failed");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0 ; i < ehea_bmap.entries; i++)
|
||||
if (ehea_bmap.vaddr[i]) {
|
||||
void *sectbase = __va(i << SECTION_SIZE_BITS);
|
||||
unsigned long k = 0;
|
||||
if (!ehea_bmap) {
|
||||
ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
|
||||
ehea_error("no busmap available");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (j = 0; j < (EHEA_PAGES_PER_SECTION /
|
||||
EHEA_MAX_RPAGE); j++) {
|
||||
for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
|
||||
if (!ehea_bmap->top[top])
|
||||
continue;
|
||||
|
||||
for (m = 0; m < EHEA_MAX_RPAGE; m++) {
|
||||
pg = sectbase + ((k++) * EHEA_PAGESIZE);
|
||||
pt[m] = virt_to_abs(pg);
|
||||
}
|
||||
|
||||
hret = ehea_h_register_rpage_mr(adapter->handle,
|
||||
mr->handle,
|
||||
0, 0, pt_abs,
|
||||
EHEA_MAX_RPAGE);
|
||||
if ((hret != H_SUCCESS)
|
||||
&& (hret != H_PAGE_REGISTERED)) {
|
||||
ehea_h_free_resource(adapter->handle,
|
||||
mr->handle,
|
||||
FORCE_FREE);
|
||||
ehea_error("register_rpage_mr failed");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
|
||||
if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
|
||||
break;
|
||||
}
|
||||
|
||||
if (hret != H_SUCCESS) {
|
||||
ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
|
||||
|
|
|
@ -635,6 +635,8 @@ static void free_skb_resources(struct gfar_private *priv)
|
|||
dev_kfree_skb_any(priv->tx_skbuff[i]);
|
||||
priv->tx_skbuff[i] = NULL;
|
||||
}
|
||||
|
||||
txbdp++;
|
||||
}
|
||||
|
||||
kfree(priv->tx_skbuff);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -10,7 +10,7 @@ struct mcp_dma_addr {
|
|||
__be32 low;
|
||||
};
|
||||
|
||||
/* 4 Bytes. 8 Bytes for NDIS drivers. */
|
||||
/* 4 Bytes */
|
||||
struct mcp_slot {
|
||||
__sum16 checksum;
|
||||
__be16 length;
|
||||
|
@ -144,6 +144,7 @@ enum myri10ge_mcp_cmd_type {
|
|||
* a power of 2 number of entries. */
|
||||
|
||||
MXGEFW_CMD_SET_INTRQ_SIZE, /* in bytes */
|
||||
#define MXGEFW_CMD_SET_INTRQ_SIZE_FLAG_NO_STRICT_SIZE_CHECK (1 << 31)
|
||||
|
||||
/* command to bring ethernet interface up. Above parameters
|
||||
* (plus mtu & mac address) must have been exchanged prior
|
||||
|
@ -221,10 +222,14 @@ enum myri10ge_mcp_cmd_type {
|
|||
MXGEFW_CMD_GET_MAX_RSS_QUEUES,
|
||||
MXGEFW_CMD_ENABLE_RSS_QUEUES,
|
||||
/* data0 = number of slices n (0, 1, ..., n-1) to enable
|
||||
* data1 = interrupt mode. 0=share one INTx/MSI, 1=use one MSI-X per queue.
|
||||
* data1 = interrupt mode.
|
||||
* 0=share one INTx/MSI, 1=use one MSI-X per queue.
|
||||
* If all queues share one interrupt, the driver must have set
|
||||
* RSS_SHARED_INTERRUPT_DMA before enabling queues.
|
||||
*/
|
||||
#define MXGEFW_SLICE_INTR_MODE_SHARED 0
|
||||
#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 1
|
||||
|
||||
MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET,
|
||||
MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA,
|
||||
/* data0, data1 = bus address lsw, msw */
|
||||
|
@ -241,10 +246,14 @@ enum myri10ge_mcp_cmd_type {
|
|||
* 0: disable rss. nic does not distribute receive packets.
|
||||
* 1: enable rss. nic distributes receive packets among queues.
|
||||
* data1 = hash type
|
||||
* 1: IPV4
|
||||
* 2: TCP_IPV4
|
||||
* 3: IPV4 | TCP_IPV4
|
||||
* 1: IPV4 (required by RSS)
|
||||
* 2: TCP_IPV4 (required by RSS)
|
||||
* 3: IPV4 | TCP_IPV4 (required by RSS)
|
||||
* 4: source port
|
||||
*/
|
||||
#define MXGEFW_RSS_HASH_TYPE_IPV4 0x1
|
||||
#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2
|
||||
#define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4
|
||||
|
||||
MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
|
||||
/* Return data = the max. size of the entire headers of a IPv6 TSO packet.
|
||||
|
@ -260,6 +269,8 @@ enum myri10ge_mcp_cmd_type {
|
|||
* 0: Linux/FreeBSD style (NIC default)
|
||||
* 1: NDIS/NetBSD style
|
||||
*/
|
||||
#define MXGEFW_TSO_MODE_LINUX 0
|
||||
#define MXGEFW_TSO_MODE_NDIS 1
|
||||
|
||||
MXGEFW_CMD_MDIO_READ,
|
||||
/* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */
|
||||
|
@ -286,6 +297,38 @@ enum myri10ge_mcp_cmd_type {
|
|||
/* Return data = NIC memory offset of mcp_vpump_public_global */
|
||||
MXGEFW_CMD_RESET_VPUMP,
|
||||
/* Resets the VPUMP state */
|
||||
|
||||
MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE,
|
||||
/* data0 = mcp_slot type to use.
|
||||
* 0 = the default 4B mcp_slot
|
||||
* 1 = 8B mcp_slot_8
|
||||
*/
|
||||
#define MXGEFW_RSS_MCP_SLOT_TYPE_MIN 0
|
||||
#define MXGEFW_RSS_MCP_SLOT_TYPE_WITH_HASH 1
|
||||
|
||||
MXGEFW_CMD_SET_THROTTLE_FACTOR,
|
||||
/* set the throttle factor for ethp_z8e
|
||||
* data0 = throttle_factor
|
||||
* throttle_factor = 256 * pcie-raw-speed / tx_speed
|
||||
* tx_speed = 256 * pcie-raw-speed / throttle_factor
|
||||
*
|
||||
* For PCI-E x8: pcie-raw-speed == 16Gb/s
|
||||
* For PCI-E x4: pcie-raw-speed == 8Gb/s
|
||||
*
|
||||
* ex1: throttle_factor == 0x1a0 (416), tx_speed == 1.23GB/s == 9.846 Gb/s
|
||||
* ex2: throttle_factor == 0x200 (512), tx_speed == 1.0GB/s == 8 Gb/s
|
||||
*
|
||||
* with tx_boundary == 2048, max-throttle-factor == 8191 => min-speed == 500Mb/s
|
||||
* with tx_boundary == 4096, max-throttle-factor == 4095 => min-speed == 1Gb/s
|
||||
*/
|
||||
|
||||
MXGEFW_CMD_VPUMP_UP,
|
||||
/* Allocates VPump Connection, Send Request and Zero copy buffer address tables */
|
||||
MXGEFW_CMD_GET_VPUMP_CLK,
|
||||
/* Get the lanai clock */
|
||||
|
||||
MXGEFW_CMD_GET_DCA_OFFSET,
|
||||
/* offset of dca control for WDMAs */
|
||||
};
|
||||
|
||||
enum myri10ge_mcp_cmd_status {
|
||||
|
@ -302,7 +345,8 @@ enum myri10ge_mcp_cmd_status {
|
|||
MXGEFW_CMD_ERROR_UNALIGNED,
|
||||
MXGEFW_CMD_ERROR_NO_MDIO,
|
||||
MXGEFW_CMD_ERROR_XFP_FAILURE,
|
||||
MXGEFW_CMD_ERROR_XFP_ABSENT
|
||||
MXGEFW_CMD_ERROR_XFP_ABSENT,
|
||||
MXGEFW_CMD_ERROR_BAD_PCIE_LINK
|
||||
};
|
||||
|
||||
#define MXGEFW_OLD_IRQ_DATA_LEN 40
|
||||
|
|
|
@ -1,30 +1,6 @@
|
|||
#ifndef __MYRI10GE_MCP_GEN_HEADER_H__
|
||||
#define __MYRI10GE_MCP_GEN_HEADER_H__
|
||||
|
||||
/* this file define a standard header used as a first entry point to
|
||||
* exchange information between firmware/driver and driver. The
|
||||
* header structure can be anywhere in the mcp. It will usually be in
|
||||
* the .data section, because some fields needs to be initialized at
|
||||
* compile time.
|
||||
* The 32bit word at offset MX_HEADER_PTR_OFFSET in the mcp must
|
||||
* contains the location of the header.
|
||||
*
|
||||
* Typically a MCP will start with the following:
|
||||
* .text
|
||||
* .space 52 ! to help catch MEMORY_INT errors
|
||||
* bt start ! jump to real code
|
||||
* nop
|
||||
* .long _gen_mcp_header
|
||||
*
|
||||
* The source will have a definition like:
|
||||
*
|
||||
* mcp_gen_header_t gen_mcp_header = {
|
||||
* .header_length = sizeof(mcp_gen_header_t),
|
||||
* .mcp_type = MCP_TYPE_XXX,
|
||||
* .version = "something $Id: mcp_gen_header.h,v 1.2 2006/05/13 10:04:35 bgoglin Exp $",
|
||||
* .mcp_globals = (unsigned)&Globals
|
||||
* };
|
||||
*/
|
||||
|
||||
#define MCP_HEADER_PTR_OFFSET 0x3c
|
||||
|
||||
|
@ -32,13 +8,14 @@
|
|||
#define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */
|
||||
#define MCP_TYPE_ETH 0x45544820 /* "ETH " */
|
||||
#define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */
|
||||
#define MCP_TYPE_DFLT 0x20202020 /* " " */
|
||||
|
||||
struct mcp_gen_header {
|
||||
/* the first 4 fields are filled at compile time */
|
||||
unsigned header_length;
|
||||
__be32 mcp_type;
|
||||
char version[128];
|
||||
unsigned mcp_globals; /* pointer to mcp-type specific structure */
|
||||
unsigned mcp_private; /* pointer to mcp-type specific structure */
|
||||
|
||||
/* filled by the MCP at run-time */
|
||||
unsigned sram_size;
|
||||
|
@ -53,6 +30,18 @@ struct mcp_gen_header {
|
|||
*
|
||||
* Never remove any field. Keep everything naturally align.
|
||||
*/
|
||||
|
||||
/* Specifies if the running mcp is mcp0, 1, or 2. */
|
||||
unsigned char mcp_index;
|
||||
unsigned char disable_rabbit;
|
||||
unsigned char unaligned_tlp;
|
||||
unsigned char pad1;
|
||||
unsigned counters_addr;
|
||||
unsigned copy_block_info; /* for small mcps loaded with "lload -d" */
|
||||
unsigned short handoff_id_major; /* must be equal */
|
||||
unsigned short handoff_id_caps; /* bitfield: new mcp must have superset */
|
||||
unsigned msix_table_addr; /* start address of msix table in firmware */
|
||||
/* 8 */
|
||||
};
|
||||
|
||||
#endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \
|
||||
i2c-direct.o ethtool.o xfp_phy.o mdio_10g.o \
|
||||
tenxpress.o boards.o sfe4001.o
|
||||
i2c-direct.o selftest.o ethtool.o xfp_phy.o \
|
||||
mdio_10g.o tenxpress.o boards.o sfe4001.o
|
||||
|
||||
obj-$(CONFIG_SFC) += sfc.o
|
||||
|
|
|
@ -22,5 +22,7 @@ enum efx_board_type {
|
|||
extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
|
||||
extern int sfe4001_poweron(struct efx_nic *efx);
|
||||
extern void sfe4001_poweroff(struct efx_nic *efx);
|
||||
/* Are we putting the PHY into flash config mode */
|
||||
extern unsigned int sfe4001_phy_flash_cfg;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1873,6 +1873,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
|
|||
tx_queue->queue = i;
|
||||
tx_queue->buffer = NULL;
|
||||
tx_queue->channel = &efx->channel[0]; /* for safety */
|
||||
tx_queue->tso_headers_free = NULL;
|
||||
}
|
||||
for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
|
||||
rx_queue = &efx->rx_queue[i];
|
||||
|
@ -2071,7 +2072,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
|
|||
net_dev = alloc_etherdev(sizeof(*efx));
|
||||
if (!net_dev)
|
||||
return -ENOMEM;
|
||||
net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
|
||||
net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
|
||||
NETIF_F_HIGHDMA | NETIF_F_TSO);
|
||||
if (lro)
|
||||
net_dev->features |= NETIF_F_LRO;
|
||||
efx = net_dev->priv;
|
||||
|
|
|
@ -10,6 +10,55 @@
|
|||
#ifndef EFX_ENUM_H
|
||||
#define EFX_ENUM_H
|
||||
|
||||
/**
|
||||
* enum efx_loopback_mode - loopback modes
|
||||
* @LOOPBACK_NONE: no loopback
|
||||
* @LOOPBACK_XGMII: loopback within MAC at XGMII level
|
||||
* @LOOPBACK_XGXS: loopback within MAC at XGXS level
|
||||
* @LOOPBACK_XAUI: loopback within MAC at XAUI level
|
||||
* @LOOPBACK_PHYXS: loopback within PHY at PHYXS level
|
||||
* @LOOPBACK_PCS: loopback within PHY at PCS level
|
||||
* @LOOPBACK_PMAPMD: loopback within PHY at PMAPMD level
|
||||
* @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!)
|
||||
*/
|
||||
/* Please keep in order and up-to-date w.r.t the following two #defines */
|
||||
enum efx_loopback_mode {
|
||||
LOOPBACK_NONE = 0,
|
||||
LOOPBACK_MAC = 1,
|
||||
LOOPBACK_XGMII = 2,
|
||||
LOOPBACK_XGXS = 3,
|
||||
LOOPBACK_XAUI = 4,
|
||||
LOOPBACK_PHY = 5,
|
||||
LOOPBACK_PHYXS = 6,
|
||||
LOOPBACK_PCS = 7,
|
||||
LOOPBACK_PMAPMD = 8,
|
||||
LOOPBACK_NETWORK = 9,
|
||||
LOOPBACK_MAX
|
||||
};
|
||||
|
||||
#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
|
||||
|
||||
extern const char *efx_loopback_mode_names[];
|
||||
#define LOOPBACK_MODE_NAME(mode) \
|
||||
STRING_TABLE_LOOKUP(mode, efx_loopback_mode)
|
||||
#define LOOPBACK_MODE(efx) \
|
||||
LOOPBACK_MODE_NAME(efx->loopback_mode)
|
||||
|
||||
/* These loopbacks occur within the controller */
|
||||
#define LOOPBACKS_10G_INTERNAL ((1 << LOOPBACK_XGMII)| \
|
||||
(1 << LOOPBACK_XGXS) | \
|
||||
(1 << LOOPBACK_XAUI))
|
||||
|
||||
#define LOOPBACK_MASK(_efx) \
|
||||
(1 << (_efx)->loopback_mode)
|
||||
|
||||
#define LOOPBACK_INTERNAL(_efx) \
|
||||
((LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)) ? 1 : 0)
|
||||
|
||||
#define LOOPBACK_OUT_OF(_from, _to, _mask) \
|
||||
(((LOOPBACK_MASK(_from) & (_mask)) && \
|
||||
((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0)
|
||||
|
||||
/*****************************************************************************/
|
||||
|
||||
/**
|
||||
|
|
|
@ -12,12 +12,26 @@
|
|||
#include <linux/ethtool.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include "net_driver.h"
|
||||
#include "selftest.h"
|
||||
#include "efx.h"
|
||||
#include "ethtool.h"
|
||||
#include "falcon.h"
|
||||
#include "gmii.h"
|
||||
#include "mac.h"
|
||||
|
||||
const char *efx_loopback_mode_names[] = {
|
||||
[LOOPBACK_NONE] = "NONE",
|
||||
[LOOPBACK_MAC] = "MAC",
|
||||
[LOOPBACK_XGMII] = "XGMII",
|
||||
[LOOPBACK_XGXS] = "XGXS",
|
||||
[LOOPBACK_XAUI] = "XAUI",
|
||||
[LOOPBACK_PHY] = "PHY",
|
||||
[LOOPBACK_PHYXS] = "PHY(XS)",
|
||||
[LOOPBACK_PCS] = "PHY(PCS)",
|
||||
[LOOPBACK_PMAPMD] = "PHY(PMAPMD)",
|
||||
[LOOPBACK_NETWORK] = "NETWORK",
|
||||
};
|
||||
|
||||
static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable);
|
||||
|
||||
struct ethtool_string {
|
||||
|
@ -217,23 +231,179 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
|
|||
strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_fill_test - fill in an individual self-test entry
|
||||
* @test_index: Index of the test
|
||||
* @strings: Ethtool strings, or %NULL
|
||||
* @data: Ethtool test results, or %NULL
|
||||
* @test: Pointer to test result (used only if data != %NULL)
|
||||
* @unit_format: Unit name format (e.g. "channel\%d")
|
||||
* @unit_id: Unit id (e.g. 0 for "channel0")
|
||||
* @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
|
||||
* @test_id: Test id (e.g. "PHY" for "loopback.PHY.tx_sent")
|
||||
*
|
||||
* Fill in an individual self-test entry.
|
||||
*/
|
||||
static void efx_fill_test(unsigned int test_index,
|
||||
struct ethtool_string *strings, u64 *data,
|
||||
int *test, const char *unit_format, int unit_id,
|
||||
const char *test_format, const char *test_id)
|
||||
{
|
||||
struct ethtool_string unit_str, test_str;
|
||||
|
||||
/* Fill data value, if applicable */
|
||||
if (data)
|
||||
data[test_index] = *test;
|
||||
|
||||
/* Fill string, if applicable */
|
||||
if (strings) {
|
||||
snprintf(unit_str.name, sizeof(unit_str.name),
|
||||
unit_format, unit_id);
|
||||
snprintf(test_str.name, sizeof(test_str.name),
|
||||
test_format, test_id);
|
||||
snprintf(strings[test_index].name,
|
||||
sizeof(strings[test_index].name),
|
||||
"%-9s%-17s", unit_str.name, test_str.name);
|
||||
}
|
||||
}
|
||||
|
||||
#define EFX_PORT_NAME "port%d", 0
|
||||
#define EFX_CHANNEL_NAME(_channel) "channel%d", _channel->channel
|
||||
#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
|
||||
#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
|
||||
#define EFX_LOOPBACK_NAME(_mode, _counter) \
|
||||
"loopback.%s." _counter, LOOPBACK_MODE_NAME(mode)
|
||||
|
||||
/**
|
||||
* efx_fill_loopback_test - fill in a block of loopback self-test entries
|
||||
* @efx: Efx NIC
|
||||
* @lb_tests: Efx loopback self-test results structure
|
||||
* @mode: Loopback test mode
|
||||
* @test_index: Starting index of the test
|
||||
* @strings: Ethtool strings, or %NULL
|
||||
* @data: Ethtool test results, or %NULL
|
||||
*/
|
||||
static int efx_fill_loopback_test(struct efx_nic *efx,
|
||||
struct efx_loopback_self_tests *lb_tests,
|
||||
enum efx_loopback_mode mode,
|
||||
unsigned int test_index,
|
||||
struct ethtool_string *strings, u64 *data)
|
||||
{
|
||||
struct efx_tx_queue *tx_queue;
|
||||
|
||||
efx_for_each_tx_queue(tx_queue, efx) {
|
||||
efx_fill_test(test_index++, strings, data,
|
||||
&lb_tests->tx_sent[tx_queue->queue],
|
||||
EFX_TX_QUEUE_NAME(tx_queue),
|
||||
EFX_LOOPBACK_NAME(mode, "tx_sent"));
|
||||
efx_fill_test(test_index++, strings, data,
|
||||
&lb_tests->tx_done[tx_queue->queue],
|
||||
EFX_TX_QUEUE_NAME(tx_queue),
|
||||
EFX_LOOPBACK_NAME(mode, "tx_done"));
|
||||
}
|
||||
efx_fill_test(test_index++, strings, data,
|
||||
&lb_tests->rx_good,
|
||||
EFX_PORT_NAME,
|
||||
EFX_LOOPBACK_NAME(mode, "rx_good"));
|
||||
efx_fill_test(test_index++, strings, data,
|
||||
&lb_tests->rx_bad,
|
||||
EFX_PORT_NAME,
|
||||
EFX_LOOPBACK_NAME(mode, "rx_bad"));
|
||||
|
||||
return test_index;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_ethtool_fill_self_tests - get self-test details
|
||||
* @efx: Efx NIC
|
||||
* @tests: Efx self-test results structure, or %NULL
|
||||
* @strings: Ethtool strings, or %NULL
|
||||
* @data: Ethtool test results, or %NULL
|
||||
*/
|
||||
static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
|
||||
struct efx_self_tests *tests,
|
||||
struct ethtool_string *strings,
|
||||
u64 *data)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
unsigned int n = 0;
|
||||
enum efx_loopback_mode mode;
|
||||
|
||||
/* Interrupt */
|
||||
efx_fill_test(n++, strings, data, &tests->interrupt,
|
||||
"core", 0, "interrupt", NULL);
|
||||
|
||||
/* Event queues */
|
||||
efx_for_each_channel(channel, efx) {
|
||||
efx_fill_test(n++, strings, data,
|
||||
&tests->eventq_dma[channel->channel],
|
||||
EFX_CHANNEL_NAME(channel),
|
||||
"eventq.dma", NULL);
|
||||
efx_fill_test(n++, strings, data,
|
||||
&tests->eventq_int[channel->channel],
|
||||
EFX_CHANNEL_NAME(channel),
|
||||
"eventq.int", NULL);
|
||||
efx_fill_test(n++, strings, data,
|
||||
&tests->eventq_poll[channel->channel],
|
||||
EFX_CHANNEL_NAME(channel),
|
||||
"eventq.poll", NULL);
|
||||
}
|
||||
|
||||
/* PHY presence */
|
||||
efx_fill_test(n++, strings, data, &tests->phy_ok,
|
||||
EFX_PORT_NAME, "phy_ok", NULL);
|
||||
|
||||
/* Loopback tests */
|
||||
efx_fill_test(n++, strings, data, &tests->loopback_speed,
|
||||
EFX_PORT_NAME, "loopback.speed", NULL);
|
||||
efx_fill_test(n++, strings, data, &tests->loopback_full_duplex,
|
||||
EFX_PORT_NAME, "loopback.full_duplex", NULL);
|
||||
for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
|
||||
if (!(efx->loopback_modes & (1 << mode)))
|
||||
continue;
|
||||
n = efx_fill_loopback_test(efx,
|
||||
&tests->loopback[mode], mode, n,
|
||||
strings, data);
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static int efx_ethtool_get_stats_count(struct net_device *net_dev)
|
||||
{
|
||||
return EFX_ETHTOOL_NUM_STATS;
|
||||
}
|
||||
|
||||
static int efx_ethtool_self_test_count(struct net_device *net_dev)
|
||||
{
|
||||
struct efx_nic *efx = net_dev->priv;
|
||||
|
||||
return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
static void efx_ethtool_get_strings(struct net_device *net_dev,
|
||||
u32 string_set, u8 *strings)
|
||||
{
|
||||
struct efx_nic *efx = net_dev->priv;
|
||||
struct ethtool_string *ethtool_strings =
|
||||
(struct ethtool_string *)strings;
|
||||
int i;
|
||||
|
||||
if (string_set == ETH_SS_STATS)
|
||||
switch (string_set) {
|
||||
case ETH_SS_STATS:
|
||||
for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
|
||||
strncpy(ethtool_strings[i].name,
|
||||
efx_ethtool_stats[i].name,
|
||||
sizeof(ethtool_strings[i].name));
|
||||
break;
|
||||
case ETH_SS_TEST:
|
||||
efx_ethtool_fill_self_tests(efx, NULL,
|
||||
ethtool_strings, NULL);
|
||||
break;
|
||||
default:
|
||||
/* No other string sets */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void efx_ethtool_get_stats(struct net_device *net_dev,
|
||||
|
@ -272,6 +442,22 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
|
|||
}
|
||||
}
|
||||
|
||||
static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
|
||||
{
|
||||
int rc;
|
||||
|
||||
/* Our TSO requires TX checksumming, so force TX checksumming
|
||||
* on when TSO is enabled.
|
||||
*/
|
||||
if (enable) {
|
||||
rc = efx_ethtool_set_tx_csum(net_dev, 1);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
return ethtool_op_set_tso(net_dev, enable);
|
||||
}
|
||||
|
||||
static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
|
||||
{
|
||||
struct efx_nic *efx = net_dev->priv;
|
||||
|
@ -283,6 +469,15 @@ static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
|
|||
|
||||
efx_flush_queues(efx);
|
||||
|
||||
/* Our TSO requires TX checksumming, so disable TSO when
|
||||
* checksumming is disabled
|
||||
*/
|
||||
if (!enable) {
|
||||
rc = efx_ethtool_set_tso(net_dev, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -305,6 +500,64 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
|
|||
return efx->rx_checksum_enabled;
|
||||
}
|
||||
|
||||
static void efx_ethtool_self_test(struct net_device *net_dev,
|
||||
struct ethtool_test *test, u64 *data)
|
||||
{
|
||||
struct efx_nic *efx = net_dev->priv;
|
||||
struct efx_self_tests efx_tests;
|
||||
int offline, already_up;
|
||||
int rc;
|
||||
|
||||
ASSERT_RTNL();
|
||||
if (efx->state != STATE_RUNNING) {
|
||||
rc = -EIO;
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
/* We need rx buffers and interrupts. */
|
||||
already_up = (efx->net_dev->flags & IFF_UP);
|
||||
if (!already_up) {
|
||||
rc = dev_open(efx->net_dev);
|
||||
if (rc) {
|
||||
EFX_ERR(efx, "failed opening device.\n");
|
||||
goto fail2;
|
||||
}
|
||||
}
|
||||
|
||||
memset(&efx_tests, 0, sizeof(efx_tests));
|
||||
offline = (test->flags & ETH_TEST_FL_OFFLINE);
|
||||
|
||||
/* Perform online self tests first */
|
||||
rc = efx_online_test(efx, &efx_tests);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
/* Perform offline tests only if online tests passed */
|
||||
if (offline) {
|
||||
/* Stop the kernel from sending packets during the test. */
|
||||
efx_stop_queue(efx);
|
||||
rc = efx_flush_queues(efx);
|
||||
if (!rc)
|
||||
rc = efx_offline_test(efx, &efx_tests,
|
||||
efx->loopback_modes);
|
||||
efx_wake_queue(efx);
|
||||
}
|
||||
|
||||
out:
|
||||
if (!already_up)
|
||||
dev_close(efx->net_dev);
|
||||
|
||||
EFX_LOG(efx, "%s all %sline self-tests\n",
|
||||
rc == 0 ? "passed" : "failed", offline ? "off" : "on");
|
||||
|
||||
fail2:
|
||||
fail1:
|
||||
/* Fill ethtool results structures */
|
||||
efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data);
|
||||
if (rc)
|
||||
test->flags |= ETH_TEST_FL_FAILED;
|
||||
}
|
||||
|
||||
/* Restart autonegotiation */
|
||||
static int efx_ethtool_nway_reset(struct net_device *net_dev)
|
||||
{
|
||||
|
@ -451,8 +704,12 @@ struct ethtool_ops efx_ethtool_ops = {
|
|||
.set_tx_csum = efx_ethtool_set_tx_csum,
|
||||
.get_sg = ethtool_op_get_sg,
|
||||
.set_sg = ethtool_op_set_sg,
|
||||
.get_tso = ethtool_op_get_tso,
|
||||
.set_tso = efx_ethtool_set_tso,
|
||||
.get_flags = ethtool_op_get_flags,
|
||||
.set_flags = ethtool_op_set_flags,
|
||||
.self_test_count = efx_ethtool_self_test_count,
|
||||
.self_test = efx_ethtool_self_test,
|
||||
.get_strings = efx_ethtool_get_strings,
|
||||
.phys_id = efx_ethtool_phys_id,
|
||||
.get_stats_count = efx_ethtool_get_stats_count,
|
||||
|
|
|
@ -1129,6 +1129,7 @@ static void falcon_handle_driver_event(struct efx_channel *channel,
|
|||
case RX_RECOVERY_EV_DECODE:
|
||||
EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
|
||||
"Resetting.\n", channel->channel);
|
||||
atomic_inc(&efx->rx_reset);
|
||||
efx_schedule_reset(efx,
|
||||
EFX_WORKAROUND_6555(efx) ?
|
||||
RESET_TYPE_RX_RECOVERY :
|
||||
|
@ -1731,7 +1732,8 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
|
|||
efx_oword_t temp;
|
||||
int count;
|
||||
|
||||
if (FALCON_REV(efx) < FALCON_REV_B0)
|
||||
if ((FALCON_REV(efx) < FALCON_REV_B0) ||
|
||||
(efx->loopback_mode != LOOPBACK_NONE))
|
||||
return;
|
||||
|
||||
falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
|
||||
|
@ -2091,6 +2093,8 @@ static int falcon_probe_phy(struct efx_nic *efx)
|
|||
efx->phy_type);
|
||||
return -1;
|
||||
}
|
||||
|
||||
efx->loopback_modes = LOOPBACKS_10G_INTERNAL | efx->phy_op->loopbacks;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2468,14 +2472,12 @@ int falcon_probe_nic(struct efx_nic *efx)
|
|||
fail5:
|
||||
falcon_free_buffer(efx, &efx->irq_status);
|
||||
fail4:
|
||||
/* fall-thru */
|
||||
fail3:
|
||||
if (nic_data->pci_dev2) {
|
||||
pci_dev_put(nic_data->pci_dev2);
|
||||
nic_data->pci_dev2 = NULL;
|
||||
}
|
||||
fail2:
|
||||
/* fall-thru */
|
||||
fail1:
|
||||
kfree(efx->nic_data);
|
||||
return rc;
|
||||
|
|
|
@ -636,6 +636,14 @@
|
|||
#define XX_HIDRVA_WIDTH 1
|
||||
#define XX_LODRVA_LBN 8
|
||||
#define XX_LODRVA_WIDTH 1
|
||||
#define XX_LPBKD_LBN 3
|
||||
#define XX_LPBKD_WIDTH 1
|
||||
#define XX_LPBKC_LBN 2
|
||||
#define XX_LPBKC_WIDTH 1
|
||||
#define XX_LPBKB_LBN 1
|
||||
#define XX_LPBKB_WIDTH 1
|
||||
#define XX_LPBKA_LBN 0
|
||||
#define XX_LPBKA_WIDTH 1
|
||||
|
||||
#define XX_TXDRV_CTL_REG_MAC 0x12
|
||||
#define XX_DEQD_LBN 28
|
||||
|
@ -656,8 +664,14 @@
|
|||
#define XX_DTXA_WIDTH 4
|
||||
|
||||
/* XAUI XGXS core status register */
|
||||
#define XX_FORCE_SIG_DECODE_FORCED 0xff
|
||||
#define XX_CORE_STAT_REG_MAC 0x16
|
||||
#define XX_FORCE_SIG_LBN 24
|
||||
#define XX_FORCE_SIG_WIDTH 8
|
||||
#define XX_FORCE_SIG_DECODE_FORCED 0xff
|
||||
#define XX_XGXS_LB_EN_LBN 23
|
||||
#define XX_XGXS_LB_EN_WIDTH 1
|
||||
#define XX_XGMII_LB_EN_LBN 22
|
||||
#define XX_XGMII_LB_EN_WIDTH 1
|
||||
#define XX_ALIGN_DONE_LBN 20
|
||||
#define XX_ALIGN_DONE_WIDTH 1
|
||||
#define XX_SYNC_STAT_LBN 16
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
(FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE))
|
||||
|
||||
void falcon_xmac_writel(struct efx_nic *efx,
|
||||
efx_dword_t *value, unsigned int mac_reg)
|
||||
efx_dword_t *value, unsigned int mac_reg)
|
||||
{
|
||||
efx_oword_t temp;
|
||||
|
||||
|
@ -69,6 +69,10 @@ static int falcon_reset_xmac(struct efx_nic *efx)
|
|||
udelay(10);
|
||||
}
|
||||
|
||||
/* This often fails when DSP is disabled, ignore it */
|
||||
if (sfe4001_phy_flash_cfg != 0)
|
||||
return 0;
|
||||
|
||||
EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
@ -223,7 +227,7 @@ static int falcon_xgmii_status(struct efx_nic *efx)
|
|||
/* The ISR latches, so clear it and re-read */
|
||||
falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0);
|
||||
falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0);
|
||||
|
||||
|
||||
if (EFX_DWORD_FIELD(reg, XM_LCLFLT) ||
|
||||
EFX_DWORD_FIELD(reg, XM_RMTFLT)) {
|
||||
EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg));
|
||||
|
@ -237,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
|
|||
{
|
||||
efx_dword_t reg;
|
||||
|
||||
if (FALCON_REV(efx) < FALCON_REV_B0)
|
||||
if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
|
||||
return;
|
||||
|
||||
/* Flush the ISR */
|
||||
|
@ -284,6 +288,9 @@ int falcon_xaui_link_ok(struct efx_nic *efx)
|
|||
efx_dword_t reg;
|
||||
int align_done, sync_status, link_ok = 0;
|
||||
|
||||
if (LOOPBACK_INTERNAL(efx))
|
||||
return 1;
|
||||
|
||||
/* Read link status */
|
||||
falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC);
|
||||
|
||||
|
@ -374,6 +381,61 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
|
|||
falcon_xmac_writel(efx, ®, XM_ADR_HI_REG_MAC);
|
||||
}
|
||||
|
||||
static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
|
||||
{
|
||||
efx_dword_t reg;
|
||||
int xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS) ? 1 : 0;
|
||||
int xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI) ? 1 : 0;
|
||||
int xgmii_loopback =
|
||||
(efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0;
|
||||
|
||||
/* XGXS block is flaky and will need to be reset if moving
|
||||
* into our out of XGMII, XGXS or XAUI loopbacks. */
|
||||
if (EFX_WORKAROUND_5147(efx)) {
|
||||
int old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
|
||||
int reset_xgxs;
|
||||
|
||||
falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC);
|
||||
old_xgxs_loopback = EFX_DWORD_FIELD(reg, XX_XGXS_LB_EN);
|
||||
old_xgmii_loopback = EFX_DWORD_FIELD(reg, XX_XGMII_LB_EN);
|
||||
|
||||
falcon_xmac_readl(efx, ®, XX_SD_CTL_REG_MAC);
|
||||
old_xaui_loopback = EFX_DWORD_FIELD(reg, XX_LPBKA);
|
||||
|
||||
/* The PHY driver may have turned XAUI off */
|
||||
reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
|
||||
(xaui_loopback != old_xaui_loopback) ||
|
||||
(xgmii_loopback != old_xgmii_loopback));
|
||||
if (reset_xgxs) {
|
||||
falcon_xmac_readl(efx, ®, XX_PWR_RST_REG_MAC);
|
||||
EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
|
||||
EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
|
||||
falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC);
|
||||
udelay(1);
|
||||
EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 0);
|
||||
EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 0);
|
||||
falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC);
|
||||
udelay(1);
|
||||
}
|
||||
}
|
||||
|
||||
falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC);
|
||||
EFX_SET_DWORD_FIELD(reg, XX_FORCE_SIG,
|
||||
(xgxs_loopback || xaui_loopback) ?
|
||||
XX_FORCE_SIG_DECODE_FORCED : 0);
|
||||
EFX_SET_DWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback);
|
||||
EFX_SET_DWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback);
|
||||
falcon_xmac_writel(efx, ®, XX_CORE_STAT_REG_MAC);
|
||||
|
||||
falcon_xmac_readl(efx, ®, XX_SD_CTL_REG_MAC);
|
||||
EFX_SET_DWORD_FIELD(reg, XX_LPBKD, xaui_loopback);
|
||||
EFX_SET_DWORD_FIELD(reg, XX_LPBKC, xaui_loopback);
|
||||
EFX_SET_DWORD_FIELD(reg, XX_LPBKB, xaui_loopback);
|
||||
EFX_SET_DWORD_FIELD(reg, XX_LPBKA, xaui_loopback);
|
||||
falcon_xmac_writel(efx, ®, XX_SD_CTL_REG_MAC);
|
||||
}
|
||||
|
||||
|
||||
/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
|
||||
* to come back up. Bash it until it comes back up */
|
||||
static int falcon_check_xaui_link_up(struct efx_nic *efx)
|
||||
|
@ -382,7 +444,8 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
|
|||
tries = EFX_WORKAROUND_5147(efx) ? 5 : 1;
|
||||
max_tries = tries;
|
||||
|
||||
if (efx->phy_type == PHY_TYPE_NONE)
|
||||
if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
|
||||
(efx->phy_type == PHY_TYPE_NONE))
|
||||
return 0;
|
||||
|
||||
while (tries) {
|
||||
|
@ -408,8 +471,13 @@ void falcon_reconfigure_xmac(struct efx_nic *efx)
|
|||
falcon_mask_status_intr(efx, 0);
|
||||
|
||||
falcon_deconfigure_mac_wrapper(efx);
|
||||
|
||||
efx->tx_disabled = LOOPBACK_INTERNAL(efx);
|
||||
efx->phy_op->reconfigure(efx);
|
||||
|
||||
falcon_reconfigure_xgxs_core(efx);
|
||||
falcon_reconfigure_xmac_core(efx);
|
||||
|
||||
falcon_reconfigure_mac_wrapper(efx);
|
||||
|
||||
/* Ensure XAUI link is up */
|
||||
|
@ -491,13 +559,15 @@ void falcon_update_stats_xmac(struct efx_nic *efx)
|
|||
(mac_stats->rx_bytes - mac_stats->rx_good_bytes);
|
||||
}
|
||||
|
||||
#define EFX_XAUI_RETRAIN_MAX 8
|
||||
|
||||
int falcon_check_xmac(struct efx_nic *efx)
|
||||
{
|
||||
unsigned xaui_link_ok;
|
||||
int rc;
|
||||
|
||||
if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
|
||||
(efx->phy_type == PHY_TYPE_NONE))
|
||||
return 0;
|
||||
|
||||
falcon_mask_status_intr(efx, 0);
|
||||
xaui_link_ok = falcon_xaui_link_ok(efx);
|
||||
|
||||
|
|
|
@ -44,6 +44,9 @@ static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd,
|
|||
int status;
|
||||
int phy_id = efx->mii.phy_id;
|
||||
|
||||
if (LOOPBACK_INTERNAL(efx))
|
||||
return 0;
|
||||
|
||||
/* Read MMD STATUS2 to check it is responding. */
|
||||
status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2);
|
||||
if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) &
|
||||
|
@ -164,6 +167,22 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
|
|||
int mmd = 0;
|
||||
int good;
|
||||
|
||||
/* If the port is in loopback, then we should only consider a subset
|
||||
* of mmd's */
|
||||
if (LOOPBACK_INTERNAL(efx))
|
||||
return 1;
|
||||
else if (efx->loopback_mode == LOOPBACK_NETWORK)
|
||||
return 0;
|
||||
else if (efx->loopback_mode == LOOPBACK_PHYXS)
|
||||
mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS |
|
||||
MDIO_MMDREG_DEVS0_PCS |
|
||||
MDIO_MMDREG_DEVS0_PMAPMD);
|
||||
else if (efx->loopback_mode == LOOPBACK_PCS)
|
||||
mmd_mask &= ~(MDIO_MMDREG_DEVS0_PCS |
|
||||
MDIO_MMDREG_DEVS0_PMAPMD);
|
||||
else if (efx->loopback_mode == LOOPBACK_PMAPMD)
|
||||
mmd_mask &= ~MDIO_MMDREG_DEVS0_PMAPMD;
|
||||
|
||||
while (mmd_mask) {
|
||||
if (mmd_mask & 1) {
|
||||
/* Double reads because link state is latched, and a
|
||||
|
@ -182,6 +201,65 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
|
|||
return ok;
|
||||
}
|
||||
|
||||
void mdio_clause45_transmit_disable(struct efx_nic *efx)
|
||||
{
|
||||
int phy_id = efx->mii.phy_id;
|
||||
int ctrl1, ctrl2;
|
||||
|
||||
ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
|
||||
MDIO_MMDREG_TXDIS);
|
||||
if (efx->tx_disabled)
|
||||
ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
|
||||
else
|
||||
ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
|
||||
if (ctrl1 != ctrl2)
|
||||
mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
|
||||
MDIO_MMDREG_TXDIS, ctrl2);
|
||||
}
|
||||
|
||||
void mdio_clause45_phy_reconfigure(struct efx_nic *efx)
|
||||
{
|
||||
int phy_id = efx->mii.phy_id;
|
||||
int ctrl1, ctrl2;
|
||||
|
||||
/* Handle (with debouncing) PMA/PMD loopback */
|
||||
ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
|
||||
MDIO_MMDREG_CTRL1);
|
||||
|
||||
if (efx->loopback_mode == LOOPBACK_PMAPMD)
|
||||
ctrl2 |= (1 << MDIO_PMAPMD_CTRL1_LBACK_LBN);
|
||||
else
|
||||
ctrl2 &= ~(1 << MDIO_PMAPMD_CTRL1_LBACK_LBN);
|
||||
|
||||
if (ctrl1 != ctrl2)
|
||||
mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
|
||||
MDIO_MMDREG_CTRL1, ctrl2);
|
||||
|
||||
/* Handle (with debouncing) PCS loopback */
|
||||
ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS,
|
||||
MDIO_MMDREG_CTRL1);
|
||||
if (efx->loopback_mode == LOOPBACK_PCS)
|
||||
ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
|
||||
else
|
||||
ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
|
||||
|
||||
if (ctrl1 != ctrl2)
|
||||
mdio_clause45_write(efx, phy_id, MDIO_MMD_PCS,
|
||||
MDIO_MMDREG_CTRL1, ctrl2);
|
||||
|
||||
/* Handle (with debouncing) PHYXS network loopback */
|
||||
ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
|
||||
MDIO_MMDREG_CTRL1);
|
||||
if (efx->loopback_mode == LOOPBACK_NETWORK)
|
||||
ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
|
||||
else
|
||||
ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
|
||||
|
||||
if (ctrl1 != ctrl2)
|
||||
mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS,
|
||||
MDIO_MMDREG_CTRL1, ctrl2);
|
||||
}
|
||||
|
||||
/**
|
||||
* mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO.
|
||||
* @efx: Efx NIC
|
||||
|
|
|
@ -44,11 +44,16 @@
|
|||
#define MDIO_MMDREG_DEVS1 (6)
|
||||
#define MDIO_MMDREG_CTRL2 (7)
|
||||
#define MDIO_MMDREG_STAT2 (8)
|
||||
#define MDIO_MMDREG_TXDIS (9)
|
||||
|
||||
/* Bits in MMDREG_CTRL1 */
|
||||
/* Reset */
|
||||
#define MDIO_MMDREG_CTRL1_RESET_LBN (15)
|
||||
#define MDIO_MMDREG_CTRL1_RESET_WIDTH (1)
|
||||
/* Loopback */
|
||||
/* Loopback bit for WIS, PCS, PHYSX and DTEXS */
|
||||
#define MDIO_MMDREG_CTRL1_LBACK_LBN (14)
|
||||
#define MDIO_MMDREG_CTRL1_LBACK_WIDTH (1)
|
||||
|
||||
/* Bits in MMDREG_STAT1 */
|
||||
#define MDIO_MMDREG_STAT1_FAULT_LBN (7)
|
||||
|
@ -56,6 +61,9 @@
|
|||
/* Link state */
|
||||
#define MDIO_MMDREG_STAT1_LINK_LBN (2)
|
||||
#define MDIO_MMDREG_STAT1_LINK_WIDTH (1)
|
||||
/* Low power ability */
|
||||
#define MDIO_MMDREG_STAT1_LPABLE_LBN (1)
|
||||
#define MDIO_MMDREG_STAT1_LPABLE_WIDTH (1)
|
||||
|
||||
/* Bits in ID reg */
|
||||
#define MDIO_ID_REV(_id32) (_id32 & 0xf)
|
||||
|
@ -76,6 +84,14 @@
|
|||
#define MDIO_MMDREG_STAT2_PRESENT_LBN (14)
|
||||
#define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2)
|
||||
|
||||
/* Bits in MMDREG_TXDIS */
|
||||
#define MDIO_MMDREG_TXDIS_GLOBAL_LBN (0)
|
||||
#define MDIO_MMDREG_TXDIS_GLOBAL_WIDTH (1)
|
||||
|
||||
/* MMD-specific bits, ordered by MMD, then register */
|
||||
#define MDIO_PMAPMD_CTRL1_LBACK_LBN (0)
|
||||
#define MDIO_PMAPMD_CTRL1_LBACK_WIDTH (1)
|
||||
|
||||
/* PMA type (4 bits) */
|
||||
#define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0)
|
||||
#define MDIO_PMAPMD_CTRL2_10G_EW (0x1)
|
||||
|
@ -95,7 +111,7 @@
|
|||
#define MDIO_PMAPMD_CTRL2_10_BT (0xf)
|
||||
#define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf)
|
||||
|
||||
/* /\* PHY XGXS lane state *\/ */
|
||||
/* PHY XGXS lane state */
|
||||
#define MDIO_PHYXS_LANE_STATE (0x18)
|
||||
#define MDIO_PHYXS_LANE_ALIGNED_LBN (12)
|
||||
|
||||
|
@ -217,6 +233,12 @@ int mdio_clause45_check_mmds(struct efx_nic *efx,
|
|||
extern int mdio_clause45_links_ok(struct efx_nic *efx,
|
||||
unsigned int mmd_mask);
|
||||
|
||||
/* Generic transmit disable support though PMAPMD */
|
||||
extern void mdio_clause45_transmit_disable(struct efx_nic *efx);
|
||||
|
||||
/* Generic part of reconfigure: set/clear loopback bits */
|
||||
extern void mdio_clause45_phy_reconfigure(struct efx_nic *efx);
|
||||
|
||||
/* Read (some of) the PHY settings over MDIO */
|
||||
extern void mdio_clause45_get_settings(struct efx_nic *efx,
|
||||
struct ethtool_cmd *ecmd);
|
||||
|
|
|
@ -134,6 +134,8 @@ struct efx_special_buffer {
|
|||
* Set only on the final fragment of a packet; %NULL for all other
|
||||
* fragments. When this fragment completes, then we can free this
|
||||
* skb.
|
||||
* @tsoh: The associated TSO header structure, or %NULL if this
|
||||
* buffer is not a TSO header.
|
||||
* @dma_addr: DMA address of the fragment.
|
||||
* @len: Length of this fragment.
|
||||
* This field is zero when the queue slot is empty.
|
||||
|
@ -144,6 +146,7 @@ struct efx_special_buffer {
|
|||
*/
|
||||
struct efx_tx_buffer {
|
||||
const struct sk_buff *skb;
|
||||
struct efx_tso_header *tsoh;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned short len;
|
||||
unsigned char continuation;
|
||||
|
@ -187,6 +190,13 @@ struct efx_tx_buffer {
|
|||
* variable indicates that the queue is full. This is to
|
||||
* avoid cache-line ping-pong between the xmit path and the
|
||||
* completion path.
|
||||
* @tso_headers_free: A list of TSO headers allocated for this TX queue
|
||||
* that are not in use, and so available for new TSO sends. The list
|
||||
* is protected by the TX queue lock.
|
||||
* @tso_bursts: Number of times TSO xmit invoked by kernel
|
||||
* @tso_long_headers: Number of packets with headers too long for standard
|
||||
* blocks
|
||||
* @tso_packets: Number of packets via the TSO xmit path
|
||||
*/
|
||||
struct efx_tx_queue {
|
||||
/* Members which don't change on the fast path */
|
||||
|
@ -206,6 +216,10 @@ struct efx_tx_queue {
|
|||
unsigned int insert_count ____cacheline_aligned_in_smp;
|
||||
unsigned int write_count;
|
||||
unsigned int old_read_count;
|
||||
struct efx_tso_header *tso_headers_free;
|
||||
unsigned int tso_bursts;
|
||||
unsigned int tso_long_headers;
|
||||
unsigned int tso_packets;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -434,6 +448,9 @@ struct efx_board {
|
|||
struct efx_blinker blinker;
|
||||
};
|
||||
|
||||
#define STRING_TABLE_LOOKUP(val, member) \
|
||||
member ## _names[val]
|
||||
|
||||
enum efx_int_mode {
|
||||
/* Be careful if altering to correct macro below */
|
||||
EFX_INT_MODE_MSIX = 0,
|
||||
|
@ -506,6 +523,7 @@ enum efx_fc_type {
|
|||
* @check_hw: Check hardware
|
||||
* @reset_xaui: Reset XAUI side of PHY for (software sequenced reset)
|
||||
* @mmds: MMD presence mask
|
||||
* @loopbacks: Supported loopback modes mask
|
||||
*/
|
||||
struct efx_phy_operations {
|
||||
int (*init) (struct efx_nic *efx);
|
||||
|
@ -515,6 +533,7 @@ struct efx_phy_operations {
|
|||
int (*check_hw) (struct efx_nic *efx);
|
||||
void (*reset_xaui) (struct efx_nic *efx);
|
||||
int mmds;
|
||||
unsigned loopbacks;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -653,7 +672,6 @@ union efx_multicast_hash {
|
|||
* @phy_op: PHY interface
|
||||
* @phy_data: PHY private data (including PHY-specific stats)
|
||||
* @mii: PHY interface
|
||||
* @phy_powered: PHY power state
|
||||
* @tx_disabled: PHY transmitter turned off
|
||||
* @link_up: Link status
|
||||
* @link_options: Link options (MII/GMII format)
|
||||
|
@ -662,6 +680,9 @@ union efx_multicast_hash {
|
|||
* @multicast_hash: Multicast hash table
|
||||
* @flow_control: Flow control flags - separate RX/TX so can't use link_options
|
||||
* @reconfigure_work: work item for dealing with PHY events
|
||||
* @loopback_mode: Loopback status
|
||||
* @loopback_modes: Supported loopback mode bitmask
|
||||
* @loopback_selftest: Offline self-test private state
|
||||
*
|
||||
* The @priv field of the corresponding &struct net_device points to
|
||||
* this.
|
||||
|
@ -721,6 +742,7 @@ struct efx_nic {
|
|||
struct efx_phy_operations *phy_op;
|
||||
void *phy_data;
|
||||
struct mii_if_info mii;
|
||||
unsigned tx_disabled;
|
||||
|
||||
int link_up;
|
||||
unsigned int link_options;
|
||||
|
@ -732,6 +754,10 @@ struct efx_nic {
|
|||
struct work_struct reconfigure_work;
|
||||
|
||||
atomic_t rx_reset;
|
||||
enum efx_loopback_mode loopback_mode;
|
||||
unsigned int loopback_modes;
|
||||
|
||||
void *loopback_selftest;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "rx.h"
|
||||
#include "efx.h"
|
||||
#include "falcon.h"
|
||||
#include "selftest.h"
|
||||
#include "workarounds.h"
|
||||
|
||||
/* Number of RX descriptors pushed at once. */
|
||||
|
@ -683,6 +684,15 @@ void __efx_rx_packet(struct efx_channel *channel,
|
|||
struct sk_buff *skb;
|
||||
int lro = efx->net_dev->features & NETIF_F_LRO;
|
||||
|
||||
/* If we're in loopback test, then pass the packet directly to the
|
||||
* loopback layer, and free the rx_buf here
|
||||
*/
|
||||
if (unlikely(efx->loopback_selftest)) {
|
||||
efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
|
||||
efx_free_rx_buffer(efx, rx_buf);
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (rx_buf->skb) {
|
||||
prefetch(skb_shinfo(rx_buf->skb));
|
||||
|
||||
|
@ -736,7 +746,6 @@ void __efx_rx_packet(struct efx_channel *channel,
|
|||
/* Update allocation strategy method */
|
||||
channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
|
||||
|
||||
/* fall-thru */
|
||||
done:
|
||||
efx->net_dev->last_rx = jiffies;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,717 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare Solarstorm network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2008 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <asm/io.h>
|
||||
#include "net_driver.h"
|
||||
#include "ethtool.h"
|
||||
#include "efx.h"
|
||||
#include "falcon.h"
|
||||
#include "selftest.h"
|
||||
#include "boards.h"
|
||||
#include "workarounds.h"
|
||||
#include "mac.h"
|
||||
|
||||
/*
|
||||
* Loopback test packet structure
|
||||
*
|
||||
* The self-test should stress every RSS vector, and unfortunately
|
||||
* Falcon only performs RSS on TCP/UDP packets.
|
||||
*/
|
||||
struct efx_loopback_payload {
|
||||
struct ethhdr header;
|
||||
struct iphdr ip;
|
||||
struct udphdr udp;
|
||||
__be16 iteration;
|
||||
const char msg[64];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* Loopback test source MAC address */
|
||||
static const unsigned char payload_source[ETH_ALEN] = {
|
||||
0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
|
||||
};
|
||||
|
||||
static const char *payload_msg =
|
||||
"Hello world! This is an Efx loopback test in progress!";
|
||||
|
||||
/**
|
||||
* efx_selftest_state - persistent state during a selftest
|
||||
* @flush: Drop all packets in efx_loopback_rx_packet
|
||||
* @packet_count: Number of packets being used in this test
|
||||
* @skbs: An array of skbs transmitted
|
||||
* @rx_good: RX good packet count
|
||||
* @rx_bad: RX bad packet count
|
||||
* @payload: Payload used in tests
|
||||
*/
|
||||
struct efx_selftest_state {
|
||||
int flush;
|
||||
int packet_count;
|
||||
struct sk_buff **skbs;
|
||||
atomic_t rx_good;
|
||||
atomic_t rx_bad;
|
||||
struct efx_loopback_payload payload;
|
||||
};
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Configurable values
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/* Level of loopback testing
|
||||
*
|
||||
* The maximum packet burst length is 16**(n-1), i.e.
|
||||
*
|
||||
* - Level 0 : no packets
|
||||
* - Level 1 : 1 packet
|
||||
* - Level 2 : 17 packets (1 * 1 packet, 1 * 16 packets)
|
||||
* - Level 3 : 273 packets (1 * 1 packet, 1 * 16 packet, 1 * 256 packets)
|
||||
*
|
||||
*/
|
||||
static unsigned int loopback_test_level = 3;
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Interrupt and event queue testing
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/* Test generation and receipt of interrupts */
|
||||
static int efx_test_interrupts(struct efx_nic *efx,
|
||||
struct efx_self_tests *tests)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
|
||||
EFX_LOG(efx, "testing interrupts\n");
|
||||
tests->interrupt = -1;
|
||||
|
||||
/* Reset interrupt flag */
|
||||
efx->last_irq_cpu = -1;
|
||||
smp_wmb();
|
||||
|
||||
/* ACK each interrupting event queue. Receiving an interrupt due to
|
||||
* traffic before a test event is raised is considered a pass */
|
||||
efx_for_each_channel_with_interrupt(channel, efx) {
|
||||
if (channel->work_pending)
|
||||
efx_process_channel_now(channel);
|
||||
if (efx->last_irq_cpu >= 0)
|
||||
goto success;
|
||||
}
|
||||
|
||||
falcon_generate_interrupt(efx);
|
||||
|
||||
/* Wait for arrival of test interrupt. */
|
||||
EFX_LOG(efx, "waiting for test interrupt\n");
|
||||
schedule_timeout_uninterruptible(HZ / 10);
|
||||
if (efx->last_irq_cpu >= 0)
|
||||
goto success;
|
||||
|
||||
EFX_ERR(efx, "timed out waiting for interrupt\n");
|
||||
return -ETIMEDOUT;
|
||||
|
||||
success:
|
||||
EFX_LOG(efx, "test interrupt (mode %d) seen on CPU%d\n",
|
||||
efx->interrupt_mode, efx->last_irq_cpu);
|
||||
tests->interrupt = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Test generation and receipt of non-interrupting events */
|
||||
static int efx_test_eventq(struct efx_channel *channel,
|
||||
struct efx_self_tests *tests)
|
||||
{
|
||||
unsigned int magic;
|
||||
|
||||
/* Channel specific code, limited to 20 bits */
|
||||
magic = (0x00010150 + channel->channel);
|
||||
EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
|
||||
channel->channel, magic);
|
||||
|
||||
tests->eventq_dma[channel->channel] = -1;
|
||||
tests->eventq_int[channel->channel] = 1; /* fake pass */
|
||||
tests->eventq_poll[channel->channel] = 1; /* fake pass */
|
||||
|
||||
/* Reset flag and zero magic word */
|
||||
channel->efx->last_irq_cpu = -1;
|
||||
channel->eventq_magic = 0;
|
||||
smp_wmb();
|
||||
|
||||
falcon_generate_test_event(channel, magic);
|
||||
udelay(1);
|
||||
|
||||
efx_process_channel_now(channel);
|
||||
if (channel->eventq_magic != magic) {
|
||||
EFX_ERR(channel->efx, "channel %d failed to see test event\n",
|
||||
channel->channel);
|
||||
return -ETIMEDOUT;
|
||||
} else {
|
||||
tests->eventq_dma[channel->channel] = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Test generation and receipt of interrupting events */
|
||||
static int efx_test_eventq_irq(struct efx_channel *channel,
|
||||
struct efx_self_tests *tests)
|
||||
{
|
||||
unsigned int magic, count;
|
||||
|
||||
/* Channel specific code, limited to 20 bits */
|
||||
magic = (0x00010150 + channel->channel);
|
||||
EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
|
||||
channel->channel, magic);
|
||||
|
||||
tests->eventq_dma[channel->channel] = -1;
|
||||
tests->eventq_int[channel->channel] = -1;
|
||||
tests->eventq_poll[channel->channel] = -1;
|
||||
|
||||
/* Reset flag and zero magic word */
|
||||
channel->efx->last_irq_cpu = -1;
|
||||
channel->eventq_magic = 0;
|
||||
smp_wmb();
|
||||
|
||||
falcon_generate_test_event(channel, magic);
|
||||
|
||||
/* Wait for arrival of interrupt */
|
||||
count = 0;
|
||||
do {
|
||||
schedule_timeout_uninterruptible(HZ / 100);
|
||||
|
||||
if (channel->work_pending)
|
||||
efx_process_channel_now(channel);
|
||||
|
||||
if (channel->eventq_magic == magic)
|
||||
goto eventq_ok;
|
||||
} while (++count < 2);
|
||||
|
||||
EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n",
|
||||
channel->channel);
|
||||
|
||||
/* See if interrupt arrived */
|
||||
if (channel->efx->last_irq_cpu >= 0) {
|
||||
EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d "
|
||||
"during event queue test\n", channel->channel,
|
||||
raw_smp_processor_id());
|
||||
tests->eventq_int[channel->channel] = 1;
|
||||
}
|
||||
|
||||
/* Check to see if event was received even if interrupt wasn't */
|
||||
efx_process_channel_now(channel);
|
||||
if (channel->eventq_magic == magic) {
|
||||
EFX_ERR(channel->efx, "channel %d event was generated, but "
|
||||
"failed to trigger an interrupt\n", channel->channel);
|
||||
tests->eventq_dma[channel->channel] = 1;
|
||||
}
|
||||
|
||||
return -ETIMEDOUT;
|
||||
eventq_ok:
|
||||
EFX_LOG(channel->efx, "channel %d event queue passed\n",
|
||||
channel->channel);
|
||||
tests->eventq_dma[channel->channel] = 1;
|
||||
tests->eventq_int[channel->channel] = 1;
|
||||
tests->eventq_poll[channel->channel] = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* PHY testing
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/* Check PHY presence by reading the PHY ID registers */
|
||||
static int efx_test_phy(struct efx_nic *efx,
|
||||
struct efx_self_tests *tests)
|
||||
{
|
||||
u16 physid1, physid2;
|
||||
struct mii_if_info *mii = &efx->mii;
|
||||
struct net_device *net_dev = efx->net_dev;
|
||||
|
||||
if (efx->phy_type == PHY_TYPE_NONE)
|
||||
return 0;
|
||||
|
||||
EFX_LOG(efx, "testing PHY presence\n");
|
||||
tests->phy_ok = -1;
|
||||
|
||||
physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1);
|
||||
physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2);
|
||||
|
||||
if ((physid1 != 0x0000) && (physid1 != 0xffff) &&
|
||||
(physid2 != 0x0000) && (physid2 != 0xffff)) {
|
||||
EFX_LOG(efx, "found MII PHY %d ID 0x%x:%x\n",
|
||||
mii->phy_id, physid1, physid2);
|
||||
tests->phy_ok = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
EFX_ERR(efx, "no MII PHY present with ID %d\n", mii->phy_id);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Loopback testing
|
||||
* NB Only one loopback test can be executing concurrently.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/* Loopback test RX callback
|
||||
* This is called for each received packet during loopback testing.
|
||||
*/
|
||||
void efx_loopback_rx_packet(struct efx_nic *efx,
|
||||
const char *buf_ptr, int pkt_len)
|
||||
{
|
||||
struct efx_selftest_state *state = efx->loopback_selftest;
|
||||
struct efx_loopback_payload *received;
|
||||
struct efx_loopback_payload *payload;
|
||||
|
||||
BUG_ON(!buf_ptr);
|
||||
|
||||
/* If we are just flushing, then drop the packet */
|
||||
if ((state == NULL) || state->flush)
|
||||
return;
|
||||
|
||||
payload = &state->payload;
|
||||
|
||||
received = (struct efx_loopback_payload *)(char *) buf_ptr;
|
||||
received->ip.saddr = payload->ip.saddr;
|
||||
received->ip.check = payload->ip.check;
|
||||
|
||||
/* Check that header exists */
|
||||
if (pkt_len < sizeof(received->header)) {
|
||||
EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback "
|
||||
"test\n", pkt_len, LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check that the ethernet header exists */
|
||||
if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
|
||||
EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check packet length */
|
||||
if (pkt_len != sizeof(*payload)) {
|
||||
EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in "
|
||||
"%s loopback test\n", pkt_len, (int)sizeof(*payload),
|
||||
LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check that IP header matches */
|
||||
if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
|
||||
EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check that msg and padding matches */
|
||||
if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
|
||||
EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check that iteration matches */
|
||||
if (received->iteration != payload->iteration) {
|
||||
EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in "
|
||||
"%s loopback test\n", ntohs(received->iteration),
|
||||
ntohs(payload->iteration), LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Increase correct RX count */
|
||||
EFX_TRACE(efx, "got loopback RX in %s loopback test\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
|
||||
atomic_inc(&state->rx_good);
|
||||
return;
|
||||
|
||||
err:
|
||||
#ifdef EFX_ENABLE_DEBUG
|
||||
if (atomic_read(&state->rx_bad) == 0) {
|
||||
EFX_ERR(efx, "received packet:\n");
|
||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
|
||||
buf_ptr, pkt_len, 0);
|
||||
EFX_ERR(efx, "expected packet:\n");
|
||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
|
||||
&state->payload, sizeof(state->payload), 0);
|
||||
}
|
||||
#endif
|
||||
atomic_inc(&state->rx_bad);
|
||||
}
|
||||
|
||||
/* Initialise an efx_selftest_state for a new iteration */
|
||||
static void efx_iterate_state(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_selftest_state *state = efx->loopback_selftest;
|
||||
struct net_device *net_dev = efx->net_dev;
|
||||
struct efx_loopback_payload *payload = &state->payload;
|
||||
|
||||
/* Initialise the layerII header */
|
||||
memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN);
|
||||
memcpy(&payload->header.h_source, &payload_source, ETH_ALEN);
|
||||
payload->header.h_proto = htons(ETH_P_IP);
|
||||
|
||||
/* saddr set later and used as incrementing count */
|
||||
payload->ip.daddr = htonl(INADDR_LOOPBACK);
|
||||
payload->ip.ihl = 5;
|
||||
payload->ip.check = htons(0xdead);
|
||||
payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
|
||||
payload->ip.version = IPVERSION;
|
||||
payload->ip.protocol = IPPROTO_UDP;
|
||||
|
||||
/* Initialise udp header */
|
||||
payload->udp.source = 0;
|
||||
payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
|
||||
sizeof(struct iphdr));
|
||||
payload->udp.check = 0; /* checksum ignored */
|
||||
|
||||
/* Fill out payload */
|
||||
payload->iteration = htons(ntohs(payload->iteration) + 1);
|
||||
memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
|
||||
|
||||
/* Fill out remaining state members */
|
||||
atomic_set(&state->rx_good, 0);
|
||||
atomic_set(&state->rx_bad, 0);
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct efx_selftest_state *state = efx->loopback_selftest;
|
||||
struct efx_loopback_payload *payload;
|
||||
struct sk_buff *skb;
|
||||
int i, rc;
|
||||
|
||||
/* Transmit N copies of buffer */
|
||||
for (i = 0; i < state->packet_count; i++) {
|
||||
/* Allocate an skb, holding an extra reference for
|
||||
* transmit completion counting */
|
||||
skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
state->skbs[i] = skb;
|
||||
skb_get(skb);
|
||||
|
||||
/* Copy the payload in, incrementing the source address to
|
||||
* exercise the rss vectors */
|
||||
payload = ((struct efx_loopback_payload *)
|
||||
skb_put(skb, sizeof(state->payload)));
|
||||
memcpy(payload, &state->payload, sizeof(state->payload));
|
||||
payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
|
||||
|
||||
/* Ensure everything we've written is visible to the
|
||||
* interrupt handler. */
|
||||
smp_wmb();
|
||||
|
||||
if (NET_DEV_REGISTERED(efx))
|
||||
netif_tx_lock_bh(efx->net_dev);
|
||||
rc = efx_xmit(efx, tx_queue, skb);
|
||||
if (NET_DEV_REGISTERED(efx))
|
||||
netif_tx_unlock_bh(efx->net_dev);
|
||||
|
||||
if (rc != NETDEV_TX_OK) {
|
||||
EFX_ERR(efx, "TX queue %d could not transmit packet %d "
|
||||
"of %d in %s loopback test\n", tx_queue->queue,
|
||||
i + 1, state->packet_count, LOOPBACK_MODE(efx));
|
||||
|
||||
/* Defer cleaning up the other skbs for the caller */
|
||||
kfree_skb(skb);
|
||||
return -EPIPE;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
|
||||
struct efx_loopback_self_tests *lb_tests)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct efx_selftest_state *state = efx->loopback_selftest;
|
||||
struct sk_buff *skb;
|
||||
int tx_done = 0, rx_good, rx_bad;
|
||||
int i, rc = 0;
|
||||
|
||||
if (NET_DEV_REGISTERED(efx))
|
||||
netif_tx_lock_bh(efx->net_dev);
|
||||
|
||||
/* Count the number of tx completions, and decrement the refcnt. Any
|
||||
* skbs not already completed will be free'd when the queue is flushed */
|
||||
for (i=0; i < state->packet_count; i++) {
|
||||
skb = state->skbs[i];
|
||||
if (skb && !skb_shared(skb))
|
||||
++tx_done;
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
if (NET_DEV_REGISTERED(efx))
|
||||
netif_tx_unlock_bh(efx->net_dev);
|
||||
|
||||
/* Check TX completion and received packet counts */
|
||||
rx_good = atomic_read(&state->rx_good);
|
||||
rx_bad = atomic_read(&state->rx_bad);
|
||||
if (tx_done != state->packet_count) {
|
||||
/* Don't free the skbs; they will be picked up on TX
|
||||
* overflow or channel teardown.
|
||||
*/
|
||||
EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d "
|
||||
"TX completion events in %s loopback test\n",
|
||||
tx_queue->queue, tx_done, state->packet_count,
|
||||
LOOPBACK_MODE(efx));
|
||||
rc = -ETIMEDOUT;
|
||||
/* Allow to fall through so we see the RX errors as well */
|
||||
}
|
||||
|
||||
/* We may always be up to a flush away from our desired packet total */
|
||||
if (rx_good != state->packet_count) {
|
||||
EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d "
|
||||
"received packets in %s loopback test\n",
|
||||
tx_queue->queue, rx_good, state->packet_count,
|
||||
LOOPBACK_MODE(efx));
|
||||
rc = -ETIMEDOUT;
|
||||
/* Fall through */
|
||||
}
|
||||
|
||||
/* Update loopback test structure */
|
||||
lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
|
||||
lb_tests->tx_done[tx_queue->queue] += tx_done;
|
||||
lb_tests->rx_good += rx_good;
|
||||
lb_tests->rx_bad += rx_bad;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
efx_test_loopback(struct efx_tx_queue *tx_queue,
|
||||
struct efx_loopback_self_tests *lb_tests)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct efx_selftest_state *state = efx->loopback_selftest;
|
||||
struct efx_channel *channel;
|
||||
int i, rc = 0;
|
||||
|
||||
for (i = 0; i < loopback_test_level; i++) {
|
||||
/* Determine how many packets to send */
|
||||
state->packet_count = (efx->type->txd_ring_mask + 1) / 3;
|
||||
state->packet_count = min(1 << (i << 2), state->packet_count);
|
||||
state->skbs = kzalloc(sizeof(state->skbs[0]) *
|
||||
state->packet_count, GFP_KERNEL);
|
||||
state->flush = 0;
|
||||
|
||||
EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
|
||||
"packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
|
||||
state->packet_count);
|
||||
|
||||
efx_iterate_state(efx);
|
||||
rc = efx_tx_loopback(tx_queue);
|
||||
|
||||
/* NAPI polling is not enabled, so process channels synchronously */
|
||||
schedule_timeout_uninterruptible(HZ / 50);
|
||||
efx_for_each_channel_with_interrupt(channel, efx) {
|
||||
if (channel->work_pending)
|
||||
efx_process_channel_now(channel);
|
||||
}
|
||||
|
||||
rc |= efx_rx_loopback(tx_queue, lb_tests);
|
||||
kfree(state->skbs);
|
||||
|
||||
if (rc) {
|
||||
/* Wait a while to ensure there are no packets
|
||||
* floating around after a failure. */
|
||||
schedule_timeout_uninterruptible(HZ / 10);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length "
|
||||
"of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
|
||||
state->packet_count);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int efx_test_loopbacks(struct efx_nic *efx,
|
||||
struct efx_self_tests *tests,
|
||||
unsigned int loopback_modes)
|
||||
{
|
||||
struct efx_selftest_state *state = efx->loopback_selftest;
|
||||
struct ethtool_cmd ecmd, ecmd_loopback;
|
||||
struct efx_tx_queue *tx_queue;
|
||||
enum efx_loopback_mode old_mode, mode;
|
||||
int count, rc = 0, link_up;
|
||||
|
||||
rc = efx_ethtool_get_settings(efx->net_dev, &ecmd);
|
||||
if (rc) {
|
||||
EFX_ERR(efx, "could not get GMII settings\n");
|
||||
return rc;
|
||||
}
|
||||
old_mode = efx->loopback_mode;
|
||||
|
||||
/* Disable autonegotiation for the purposes of loopback */
|
||||
memcpy(&ecmd_loopback, &ecmd, sizeof(ecmd_loopback));
|
||||
if (ecmd_loopback.autoneg == AUTONEG_ENABLE) {
|
||||
ecmd_loopback.autoneg = AUTONEG_DISABLE;
|
||||
ecmd_loopback.duplex = DUPLEX_FULL;
|
||||
ecmd_loopback.speed = SPEED_10000;
|
||||
}
|
||||
|
||||
rc = efx_ethtool_set_settings(efx->net_dev, &ecmd_loopback);
|
||||
if (rc) {
|
||||
EFX_ERR(efx, "could not disable autonegotiation\n");
|
||||
goto out;
|
||||
}
|
||||
tests->loopback_speed = ecmd_loopback.speed;
|
||||
tests->loopback_full_duplex = ecmd_loopback.duplex;
|
||||
|
||||
/* Test all supported loopback modes */
|
||||
for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
|
||||
if (!(loopback_modes & (1 << mode)))
|
||||
continue;
|
||||
|
||||
/* Move the port into the specified loopback mode. */
|
||||
state->flush = 1;
|
||||
efx->loopback_mode = mode;
|
||||
efx_reconfigure_port(efx);
|
||||
|
||||
/* Wait for the PHY to signal the link is up */
|
||||
count = 0;
|
||||
do {
|
||||
struct efx_channel *channel = &efx->channel[0];
|
||||
|
||||
falcon_check_xmac(efx);
|
||||
schedule_timeout_uninterruptible(HZ / 10);
|
||||
if (channel->work_pending)
|
||||
efx_process_channel_now(channel);
|
||||
/* Wait for PHY events to be processed */
|
||||
flush_workqueue(efx->workqueue);
|
||||
rmb();
|
||||
|
||||
/* efx->link_up can be 1 even if the XAUI link is down,
|
||||
* (bug5762). Usually, it's not worth bothering with the
|
||||
* difference, but for selftests, we need that extra
|
||||
* guarantee that the link is really, really, up.
|
||||
*/
|
||||
link_up = efx->link_up;
|
||||
if (!falcon_xaui_link_ok(efx))
|
||||
link_up = 0;
|
||||
|
||||
} while ((++count < 20) && !link_up);
|
||||
|
||||
/* The link should now be up. If it isn't, there is no point
|
||||
* in attempting a loopback test */
|
||||
if (!link_up) {
|
||||
EFX_ERR(efx, "loopback %s never came up\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
rc = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
EFX_LOG(efx, "link came up in %s loopback in %d iterations\n",
|
||||
LOOPBACK_MODE(efx), count);
|
||||
|
||||
/* Test every TX queue */
|
||||
efx_for_each_tx_queue(tx_queue, efx) {
|
||||
rc |= efx_test_loopback(tx_queue,
|
||||
&tests->loopback[mode]);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
/* Take out of loopback and restore PHY settings */
|
||||
state->flush = 1;
|
||||
efx->loopback_mode = old_mode;
|
||||
efx_ethtool_set_settings(efx->net_dev, &ecmd);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Entry points
|
||||
*
|
||||
*************************************************************************/
|
||||
|
||||
/* Online (i.e. non-disruptive) testing
|
||||
* This checks interrupt generation, event delivery and PHY presence. */
|
||||
int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
int rc = 0;
|
||||
|
||||
EFX_LOG(efx, "performing online self-tests\n");
|
||||
|
||||
rc |= efx_test_interrupts(efx, tests);
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (channel->has_interrupt)
|
||||
rc |= efx_test_eventq_irq(channel, tests);
|
||||
else
|
||||
rc |= efx_test_eventq(channel, tests);
|
||||
}
|
||||
rc |= efx_test_phy(efx, tests);
|
||||
|
||||
if (rc)
|
||||
EFX_ERR(efx, "failed online self-tests\n");
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Offline (i.e. disruptive) testing
|
||||
* This checks MAC and PHY loopback on the specified port. */
|
||||
int efx_offline_test(struct efx_nic *efx,
|
||||
struct efx_self_tests *tests, unsigned int loopback_modes)
|
||||
{
|
||||
struct efx_selftest_state *state;
|
||||
int rc = 0;
|
||||
|
||||
EFX_LOG(efx, "performing offline self-tests\n");
|
||||
|
||||
/* Create a selftest_state structure to hold state for the test */
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
if (state == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Set the port loopback_selftest member. From this point on
|
||||
* all received packets will be dropped. Mark the state as
|
||||
* "flushing" so all inflight packets are dropped */
|
||||
BUG_ON(efx->loopback_selftest);
|
||||
state->flush = 1;
|
||||
efx->loopback_selftest = (void *)state;
|
||||
|
||||
rc = efx_test_loopbacks(efx, tests, loopback_modes);
|
||||
|
||||
efx->loopback_selftest = NULL;
|
||||
wmb();
|
||||
kfree(state);
|
||||
|
||||
out:
|
||||
if (rc)
|
||||
EFX_ERR(efx, "failed offline self-tests\n");
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare Solarstorm network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2008 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_SELFTEST_H
|
||||
#define EFX_SELFTEST_H
|
||||
|
||||
#include "net_driver.h"
|
||||
|
||||
/*
|
||||
* Self tests
|
||||
*/
|
||||
|
||||
struct efx_loopback_self_tests {
|
||||
int tx_sent[EFX_MAX_TX_QUEUES];
|
||||
int tx_done[EFX_MAX_TX_QUEUES];
|
||||
int rx_good;
|
||||
int rx_bad;
|
||||
};
|
||||
|
||||
/* Efx self test results
|
||||
* For fields which are not counters, 1 indicates success and -1
|
||||
* indicates failure.
|
||||
*/
|
||||
struct efx_self_tests {
|
||||
int interrupt;
|
||||
int eventq_dma[EFX_MAX_CHANNELS];
|
||||
int eventq_int[EFX_MAX_CHANNELS];
|
||||
int eventq_poll[EFX_MAX_CHANNELS];
|
||||
int phy_ok;
|
||||
int loopback_speed;
|
||||
int loopback_full_duplex;
|
||||
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX];
|
||||
};
|
||||
|
||||
extern void efx_loopback_rx_packet(struct efx_nic *efx,
|
||||
const char *buf_ptr, int pkt_len);
|
||||
extern int efx_online_test(struct efx_nic *efx,
|
||||
struct efx_self_tests *tests);
|
||||
extern int efx_offline_test(struct efx_nic *efx,
|
||||
struct efx_self_tests *tests,
|
||||
unsigned int loopback_modes);
|
||||
|
||||
#endif /* EFX_SELFTEST_H */
|
|
@ -130,6 +130,15 @@ void sfe4001_poweroff(struct efx_nic *efx)
|
|||
(void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
|
||||
}
|
||||
|
||||
/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected
|
||||
* to the FLASH_CFG_1 input on the DSP. We must keep it high at power-
|
||||
* up to allow writing the flash (done through MDIO from userland).
|
||||
*/
|
||||
unsigned int sfe4001_phy_flash_cfg;
|
||||
module_param_named(phy_flash_cfg, sfe4001_phy_flash_cfg, uint, 0444);
|
||||
MODULE_PARM_DESC(phy_flash_cfg,
|
||||
"Force PHY to enter flash configuration mode");
|
||||
|
||||
/* This board uses an I2C expander to provider power to the PHY, which needs to
|
||||
* be turned on before the PHY can be used.
|
||||
* Context: Process context, rtnl lock held
|
||||
|
@ -203,6 +212,8 @@ int sfe4001_poweron(struct efx_nic *efx)
|
|||
out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
|
||||
(1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
|
||||
(1 << P0_X_TRST_LBN));
|
||||
if (sfe4001_phy_flash_cfg)
|
||||
out |= 1 << P0_EN_3V3X_LBN;
|
||||
|
||||
rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
|
||||
if (rc)
|
||||
|
@ -226,6 +237,9 @@ int sfe4001_poweron(struct efx_nic *efx)
|
|||
if (in & (1 << P1_AFE_PWD_LBN))
|
||||
goto done;
|
||||
|
||||
/* DSP doesn't look powered in flash config mode */
|
||||
if (sfe4001_phy_flash_cfg)
|
||||
goto done;
|
||||
} while (++count < 20);
|
||||
|
||||
EFX_INFO(efx, "timed out waiting for power\n");
|
||||
|
|
|
@ -24,6 +24,11 @@
|
|||
MDIO_MMDREG_DEVS0_PCS | \
|
||||
MDIO_MMDREG_DEVS0_PHYXS)
|
||||
|
||||
#define TENXPRESS_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \
|
||||
(1 << LOOPBACK_PCS) | \
|
||||
(1 << LOOPBACK_PMAPMD) | \
|
||||
(1 << LOOPBACK_NETWORK))
|
||||
|
||||
/* We complain if we fail to see the link partner as 10G capable this many
|
||||
* times in a row (must be > 1 as sampling the autoneg. registers is racy)
|
||||
*/
|
||||
|
@ -72,6 +77,10 @@
|
|||
#define PMA_PMD_BIST_RXD_LBN (1)
|
||||
#define PMA_PMD_BIST_AFE_LBN (0)
|
||||
|
||||
/* Special Software reset register */
|
||||
#define PMA_PMD_EXT_CTRL_REG 49152
|
||||
#define PMA_PMD_EXT_SSR_LBN 15
|
||||
|
||||
#define BIST_MAX_DELAY (1000)
|
||||
#define BIST_POLL_DELAY (10)
|
||||
|
||||
|
@ -86,6 +95,11 @@
|
|||
#define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */
|
||||
#define CLK312_EN_LBN 3
|
||||
|
||||
/* PHYXS registers */
|
||||
#define PHYXS_TEST1 (49162)
|
||||
#define LOOPBACK_NEAR_LBN (8)
|
||||
#define LOOPBACK_NEAR_WIDTH (1)
|
||||
|
||||
/* Boot status register */
|
||||
#define PCS_BOOT_STATUS_REG (0xd000)
|
||||
#define PCS_BOOT_FATAL_ERR_LBN (0)
|
||||
|
@ -106,7 +120,9 @@ MODULE_PARM_DESC(crc_error_reset_threshold,
|
|||
|
||||
struct tenxpress_phy_data {
|
||||
enum tenxpress_state state;
|
||||
enum efx_loopback_mode loopback_mode;
|
||||
atomic_t bad_crc_count;
|
||||
int tx_disabled;
|
||||
int bad_lp_tries;
|
||||
};
|
||||
|
||||
|
@ -199,10 +215,12 @@ static int tenxpress_phy_init(struct efx_nic *efx)
|
|||
|
||||
tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
|
||||
|
||||
rc = mdio_clause45_wait_reset_mmds(efx,
|
||||
TENXPRESS_REQUIRED_DEVS);
|
||||
if (rc < 0)
|
||||
goto fail;
|
||||
if (!sfe4001_phy_flash_cfg) {
|
||||
rc = mdio_clause45_wait_reset_mmds(efx,
|
||||
TENXPRESS_REQUIRED_DEVS);
|
||||
if (rc < 0)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
|
||||
if (rc < 0)
|
||||
|
@ -225,6 +243,35 @@ static int tenxpress_phy_init(struct efx_nic *efx)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int tenxpress_special_reset(struct efx_nic *efx)
|
||||
{
|
||||
int rc, reg;
|
||||
|
||||
EFX_TRACE(efx, "%s\n", __func__);
|
||||
|
||||
/* Initiate reset */
|
||||
reg = mdio_clause45_read(efx, efx->mii.phy_id,
|
||||
MDIO_MMD_PMAPMD, PMA_PMD_EXT_CTRL_REG);
|
||||
reg |= (1 << PMA_PMD_EXT_SSR_LBN);
|
||||
mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
|
||||
PMA_PMD_EXT_CTRL_REG, reg);
|
||||
|
||||
msleep(200);
|
||||
|
||||
/* Wait for the blocks to come out of reset */
|
||||
rc = mdio_clause45_wait_reset_mmds(efx,
|
||||
TENXPRESS_REQUIRED_DEVS);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* Try and reconfigure the device */
|
||||
rc = tenxpress_init(efx);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp)
|
||||
{
|
||||
struct tenxpress_phy_data *pd = efx->phy_data;
|
||||
|
@ -299,11 +346,46 @@ static int tenxpress_link_ok(struct efx_nic *efx, int check_lp)
|
|||
return ok;
|
||||
}
|
||||
|
||||
static void tenxpress_phyxs_loopback(struct efx_nic *efx)
|
||||
{
|
||||
int phy_id = efx->mii.phy_id;
|
||||
int ctrl1, ctrl2;
|
||||
|
||||
ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
|
||||
PHYXS_TEST1);
|
||||
if (efx->loopback_mode == LOOPBACK_PHYXS)
|
||||
ctrl2 |= (1 << LOOPBACK_NEAR_LBN);
|
||||
else
|
||||
ctrl2 &= ~(1 << LOOPBACK_NEAR_LBN);
|
||||
if (ctrl1 != ctrl2)
|
||||
mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS,
|
||||
PHYXS_TEST1, ctrl2);
|
||||
}
|
||||
|
||||
static void tenxpress_phy_reconfigure(struct efx_nic *efx)
|
||||
{
|
||||
struct tenxpress_phy_data *phy_data = efx->phy_data;
|
||||
int loop_change = LOOPBACK_OUT_OF(phy_data, efx,
|
||||
TENXPRESS_LOOPBACKS);
|
||||
|
||||
if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL))
|
||||
return;
|
||||
|
||||
/* When coming out of transmit disable, coming out of low power
|
||||
* mode, or moving out of any PHY internal loopback mode,
|
||||
* perform a special software reset */
|
||||
if ((phy_data->tx_disabled && !efx->tx_disabled) ||
|
||||
loop_change) {
|
||||
(void) tenxpress_special_reset(efx);
|
||||
falcon_reset_xaui(efx);
|
||||
}
|
||||
|
||||
mdio_clause45_transmit_disable(efx);
|
||||
mdio_clause45_phy_reconfigure(efx);
|
||||
tenxpress_phyxs_loopback(efx);
|
||||
|
||||
phy_data->tx_disabled = efx->tx_disabled;
|
||||
phy_data->loopback_mode = efx->loopback_mode;
|
||||
efx->link_up = tenxpress_link_ok(efx, 0);
|
||||
efx->link_options = GM_LPA_10000FULL;
|
||||
}
|
||||
|
@ -431,4 +513,5 @@ struct efx_phy_operations falcon_tenxpress_phy_ops = {
|
|||
.clear_interrupt = tenxpress_phy_clear_interrupt,
|
||||
.reset_xaui = tenxpress_reset_xaui,
|
||||
.mmds = TENXPRESS_REQUIRED_DEVS,
|
||||
.loopbacks = TENXPRESS_LOOPBACKS,
|
||||
};
|
||||
|
|
|
@ -82,6 +82,46 @@ static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* struct efx_tso_header - a DMA mapped buffer for packet headers
|
||||
* @next: Linked list of free ones.
|
||||
* The list is protected by the TX queue lock.
|
||||
* @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
|
||||
* @dma_addr: The DMA address of the header below.
|
||||
*
|
||||
* This controls the memory used for a TSO header. Use TSOH_DATA()
|
||||
* to find the packet header data. Use TSOH_SIZE() to calculate the
|
||||
* total size required for a given packet header length. TSO headers
|
||||
* in the free list are exactly %TSOH_STD_SIZE bytes in size.
|
||||
*/
|
||||
struct efx_tso_header {
|
||||
union {
|
||||
struct efx_tso_header *next;
|
||||
size_t unmap_len;
|
||||
};
|
||||
dma_addr_t dma_addr;
|
||||
};
|
||||
|
||||
static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
|
||||
const struct sk_buff *skb);
|
||||
static void efx_fini_tso(struct efx_tx_queue *tx_queue);
|
||||
static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tso_header *tsoh);
|
||||
|
||||
static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tx_buffer *buffer)
|
||||
{
|
||||
if (buffer->tsoh) {
|
||||
if (likely(!buffer->tsoh->unmap_len)) {
|
||||
buffer->tsoh->next = tx_queue->tso_headers_free;
|
||||
tx_queue->tso_headers_free = buffer->tsoh;
|
||||
} else {
|
||||
efx_tsoh_heap_free(tx_queue, buffer->tsoh);
|
||||
}
|
||||
buffer->tsoh = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Add a socket buffer to a TX queue
|
||||
|
@ -114,6 +154,9 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
|
|||
|
||||
EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
|
||||
|
||||
if (skb_shinfo((struct sk_buff *)skb)->gso_size)
|
||||
return efx_enqueue_skb_tso(tx_queue, skb);
|
||||
|
||||
/* Get size of the initial fragment */
|
||||
len = skb_headlen(skb);
|
||||
|
||||
|
@ -166,6 +209,8 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
|
|||
insert_ptr = (tx_queue->insert_count &
|
||||
efx->type->txd_ring_mask);
|
||||
buffer = &tx_queue->buffer[insert_ptr];
|
||||
efx_tsoh_free(tx_queue, buffer);
|
||||
EFX_BUG_ON_PARANOID(buffer->tsoh);
|
||||
EFX_BUG_ON_PARANOID(buffer->skb);
|
||||
EFX_BUG_ON_PARANOID(buffer->len);
|
||||
EFX_BUG_ON_PARANOID(buffer->continuation != 1);
|
||||
|
@ -432,6 +477,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
|
|||
|
||||
efx_release_tx_buffers(tx_queue);
|
||||
|
||||
/* Free up TSO header cache */
|
||||
efx_fini_tso(tx_queue);
|
||||
|
||||
/* Release queue's stop on port, if any */
|
||||
if (tx_queue->stopped) {
|
||||
tx_queue->stopped = 0;
|
||||
|
@ -450,3 +498,619 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
|
|||
}
|
||||
|
||||
|
||||
/* Efx TCP segmentation acceleration.
|
||||
*
|
||||
* Why? Because by doing it here in the driver we can go significantly
|
||||
* faster than the GSO.
|
||||
*
|
||||
* Requires TX checksum offload support.
|
||||
*/
|
||||
|
||||
/* Number of bytes inserted at the start of a TSO header buffer,
|
||||
* similar to NET_IP_ALIGN.
|
||||
*/
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
#define TSOH_OFFSET 0
|
||||
#else
|
||||
#define TSOH_OFFSET NET_IP_ALIGN
|
||||
#endif
|
||||
|
||||
#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
|
||||
|
||||
/* Total size of struct efx_tso_header, buffer and padding */
|
||||
#define TSOH_SIZE(hdr_len) \
|
||||
(sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
|
||||
|
||||
/* Size of blocks on free list. Larger blocks must be allocated from
|
||||
* the heap.
|
||||
*/
|
||||
#define TSOH_STD_SIZE 128
|
||||
|
||||
#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
|
||||
#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
|
||||
#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
|
||||
#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
|
||||
|
||||
/**
|
||||
* struct tso_state - TSO state for an SKB
|
||||
* @remaining_len: Bytes of data we've yet to segment
|
||||
* @seqnum: Current sequence number
|
||||
* @packet_space: Remaining space in current packet
|
||||
* @ifc: Input fragment cursor.
|
||||
* Where we are in the current fragment of the incoming SKB. These
|
||||
* values get updated in place when we split a fragment over
|
||||
* multiple packets.
|
||||
* @p: Parameters.
|
||||
* These values are set once at the start of the TSO send and do
|
||||
* not get changed as the routine progresses.
|
||||
*
|
||||
* The state used during segmentation. It is put into this data structure
|
||||
* just to make it easy to pass into inline functions.
|
||||
*/
|
||||
struct tso_state {
|
||||
unsigned remaining_len;
|
||||
unsigned seqnum;
|
||||
unsigned packet_space;
|
||||
|
||||
struct {
|
||||
/* DMA address of current position */
|
||||
dma_addr_t dma_addr;
|
||||
/* Remaining length */
|
||||
unsigned int len;
|
||||
/* DMA address and length of the whole fragment */
|
||||
unsigned int unmap_len;
|
||||
dma_addr_t unmap_addr;
|
||||
struct page *page;
|
||||
unsigned page_off;
|
||||
} ifc;
|
||||
|
||||
struct {
|
||||
/* The number of bytes of header */
|
||||
unsigned int header_length;
|
||||
|
||||
/* The number of bytes to put in each outgoing segment. */
|
||||
int full_packet_size;
|
||||
|
||||
/* Current IPv4 ID, host endian. */
|
||||
unsigned ipv4_id;
|
||||
} p;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Verify that our various assumptions about sk_buffs and the conditions
|
||||
* under which TSO will be attempted hold true.
|
||||
*/
|
||||
static inline void efx_tso_check_safe(const struct sk_buff *skb)
|
||||
{
|
||||
EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP));
|
||||
EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
|
||||
skb->protocol);
|
||||
EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
|
||||
EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
|
||||
+ (tcp_hdr(skb)->doff << 2u)) >
|
||||
skb_headlen(skb));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Allocate a page worth of efx_tso_header structures, and string them
|
||||
* into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
|
||||
*/
|
||||
static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
|
||||
struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
|
||||
struct efx_tso_header *tsoh;
|
||||
dma_addr_t dma_addr;
|
||||
u8 *base_kva, *kva;
|
||||
|
||||
base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
|
||||
if (base_kva == NULL) {
|
||||
EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO"
|
||||
" headers\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* pci_alloc_consistent() allocates pages. */
|
||||
EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
|
||||
|
||||
for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
|
||||
tsoh = (struct efx_tso_header *)kva;
|
||||
tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
|
||||
tsoh->next = tx_queue->tso_headers_free;
|
||||
tx_queue->tso_headers_free = tsoh;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Free up a TSO header, and all others in the same page. */
|
||||
static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tso_header *tsoh,
|
||||
struct pci_dev *pci_dev)
|
||||
{
|
||||
struct efx_tso_header **p;
|
||||
unsigned long base_kva;
|
||||
dma_addr_t base_dma;
|
||||
|
||||
base_kva = (unsigned long)tsoh & PAGE_MASK;
|
||||
base_dma = tsoh->dma_addr & PAGE_MASK;
|
||||
|
||||
p = &tx_queue->tso_headers_free;
|
||||
while (*p != NULL)
|
||||
if (((unsigned long)*p & PAGE_MASK) == base_kva)
|
||||
*p = (*p)->next;
|
||||
else
|
||||
p = &(*p)->next;
|
||||
|
||||
pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
|
||||
}
|
||||
|
||||
static struct efx_tso_header *
|
||||
efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
|
||||
{
|
||||
struct efx_tso_header *tsoh;
|
||||
|
||||
tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
|
||||
if (unlikely(!tsoh))
|
||||
return NULL;
|
||||
|
||||
tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
|
||||
TSOH_BUFFER(tsoh), header_len,
|
||||
PCI_DMA_TODEVICE);
|
||||
if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) {
|
||||
kfree(tsoh);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tsoh->unmap_len = header_len;
|
||||
return tsoh;
|
||||
}
|
||||
|
||||
static void
|
||||
efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
|
||||
{
|
||||
pci_unmap_single(tx_queue->efx->pci_dev,
|
||||
tsoh->dma_addr, tsoh->unmap_len,
|
||||
PCI_DMA_TODEVICE);
|
||||
kfree(tsoh);
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_tx_queue_insert - push descriptors onto the TX queue
|
||||
* @tx_queue: Efx TX queue
|
||||
* @dma_addr: DMA address of fragment
|
||||
* @len: Length of fragment
|
||||
* @skb: Only non-null for end of last segment
|
||||
* @end_of_packet: True if last fragment in a packet
|
||||
* @unmap_addr: DMA address of fragment for unmapping
|
||||
* @unmap_len: Only set this in last segment of a fragment
|
||||
*
|
||||
* Push descriptors onto the TX queue. Return 0 on success or 1 if
|
||||
* @tx_queue full.
|
||||
*/
|
||||
static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
|
||||
dma_addr_t dma_addr, unsigned len,
|
||||
const struct sk_buff *skb, int end_of_packet,
|
||||
dma_addr_t unmap_addr, unsigned unmap_len)
|
||||
{
|
||||
struct efx_tx_buffer *buffer;
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
unsigned dma_len, fill_level, insert_ptr, misalign;
|
||||
int q_space;
|
||||
|
||||
EFX_BUG_ON_PARANOID(len <= 0);
|
||||
|
||||
fill_level = tx_queue->insert_count - tx_queue->old_read_count;
|
||||
/* -1 as there is no way to represent all descriptors used */
|
||||
q_space = efx->type->txd_ring_mask - 1 - fill_level;
|
||||
|
||||
while (1) {
|
||||
if (unlikely(q_space-- <= 0)) {
|
||||
/* It might be that completions have happened
|
||||
* since the xmit path last checked. Update
|
||||
* the xmit path's copy of read_count.
|
||||
*/
|
||||
++tx_queue->stopped;
|
||||
/* This memory barrier protects the change of
|
||||
* stopped from the access of read_count. */
|
||||
smp_mb();
|
||||
tx_queue->old_read_count =
|
||||
*(volatile unsigned *)&tx_queue->read_count;
|
||||
fill_level = (tx_queue->insert_count
|
||||
- tx_queue->old_read_count);
|
||||
q_space = efx->type->txd_ring_mask - 1 - fill_level;
|
||||
if (unlikely(q_space-- <= 0))
|
||||
return 1;
|
||||
smp_mb();
|
||||
--tx_queue->stopped;
|
||||
}
|
||||
|
||||
insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
|
||||
buffer = &tx_queue->buffer[insert_ptr];
|
||||
++tx_queue->insert_count;
|
||||
|
||||
EFX_BUG_ON_PARANOID(tx_queue->insert_count -
|
||||
tx_queue->read_count >
|
||||
efx->type->txd_ring_mask);
|
||||
|
||||
efx_tsoh_free(tx_queue, buffer);
|
||||
EFX_BUG_ON_PARANOID(buffer->len);
|
||||
EFX_BUG_ON_PARANOID(buffer->unmap_len);
|
||||
EFX_BUG_ON_PARANOID(buffer->skb);
|
||||
EFX_BUG_ON_PARANOID(buffer->continuation != 1);
|
||||
EFX_BUG_ON_PARANOID(buffer->tsoh);
|
||||
|
||||
buffer->dma_addr = dma_addr;
|
||||
|
||||
/* Ensure we do not cross a boundary unsupported by H/W */
|
||||
dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1;
|
||||
|
||||
misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
|
||||
if (misalign && dma_len + misalign > 512)
|
||||
dma_len = 512 - misalign;
|
||||
|
||||
/* If there is enough space to send then do so */
|
||||
if (dma_len >= len)
|
||||
break;
|
||||
|
||||
buffer->len = dma_len; /* Don't set the other members */
|
||||
dma_addr += dma_len;
|
||||
len -= dma_len;
|
||||
}
|
||||
|
||||
EFX_BUG_ON_PARANOID(!len);
|
||||
buffer->len = len;
|
||||
buffer->skb = skb;
|
||||
buffer->continuation = !end_of_packet;
|
||||
buffer->unmap_addr = unmap_addr;
|
||||
buffer->unmap_len = unmap_len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Put a TSO header into the TX queue.
|
||||
*
|
||||
* This is special-cased because we know that it is small enough to fit in
|
||||
* a single fragment, and we know it doesn't cross a page boundary. It
|
||||
* also allows us to not worry about end-of-packet etc.
|
||||
*/
|
||||
static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tso_header *tsoh, unsigned len)
|
||||
{
|
||||
struct efx_tx_buffer *buffer;
|
||||
|
||||
buffer = &tx_queue->buffer[tx_queue->insert_count &
|
||||
tx_queue->efx->type->txd_ring_mask];
|
||||
efx_tsoh_free(tx_queue, buffer);
|
||||
EFX_BUG_ON_PARANOID(buffer->len);
|
||||
EFX_BUG_ON_PARANOID(buffer->unmap_len);
|
||||
EFX_BUG_ON_PARANOID(buffer->skb);
|
||||
EFX_BUG_ON_PARANOID(buffer->continuation != 1);
|
||||
EFX_BUG_ON_PARANOID(buffer->tsoh);
|
||||
buffer->len = len;
|
||||
buffer->dma_addr = tsoh->dma_addr;
|
||||
buffer->tsoh = tsoh;
|
||||
|
||||
++tx_queue->insert_count;
|
||||
}
|
||||
|
||||
|
||||
/* Remove descriptors put into a tx_queue. */
|
||||
static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_tx_buffer *buffer;
|
||||
|
||||
/* Work backwards until we hit the original insert pointer value */
|
||||
while (tx_queue->insert_count != tx_queue->write_count) {
|
||||
--tx_queue->insert_count;
|
||||
buffer = &tx_queue->buffer[tx_queue->insert_count &
|
||||
tx_queue->efx->type->txd_ring_mask];
|
||||
efx_tsoh_free(tx_queue, buffer);
|
||||
EFX_BUG_ON_PARANOID(buffer->skb);
|
||||
buffer->len = 0;
|
||||
buffer->continuation = 1;
|
||||
if (buffer->unmap_len) {
|
||||
pci_unmap_page(tx_queue->efx->pci_dev,
|
||||
buffer->unmap_addr,
|
||||
buffer->unmap_len, PCI_DMA_TODEVICE);
|
||||
buffer->unmap_len = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Parse the SKB header and initialise state. */
|
||||
static inline void tso_start(struct tso_state *st, const struct sk_buff *skb)
|
||||
{
|
||||
/* All ethernet/IP/TCP headers combined size is TCP header size
|
||||
* plus offset of TCP header relative to start of packet.
|
||||
*/
|
||||
st->p.header_length = ((tcp_hdr(skb)->doff << 2u)
|
||||
+ PTR_DIFF(tcp_hdr(skb), skb->data));
|
||||
st->p.full_packet_size = (st->p.header_length
|
||||
+ skb_shinfo(skb)->gso_size);
|
||||
|
||||
st->p.ipv4_id = ntohs(ip_hdr(skb)->id);
|
||||
st->seqnum = ntohl(tcp_hdr(skb)->seq);
|
||||
|
||||
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
|
||||
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
|
||||
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
|
||||
|
||||
st->packet_space = st->p.full_packet_size;
|
||||
st->remaining_len = skb->len - st->p.header_length;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* tso_get_fragment - record fragment details and map for DMA
|
||||
* @st: TSO state
|
||||
* @efx: Efx NIC
|
||||
* @data: Pointer to fragment data
|
||||
* @len: Length of fragment
|
||||
*
|
||||
* Record fragment details and map for DMA. Return 0 on success, or
|
||||
* -%ENOMEM if DMA mapping fails.
|
||||
*/
|
||||
static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
|
||||
int len, struct page *page, int page_off)
|
||||
{
|
||||
|
||||
st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
|
||||
len, PCI_DMA_TODEVICE);
|
||||
if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) {
|
||||
st->ifc.unmap_len = len;
|
||||
st->ifc.len = len;
|
||||
st->ifc.dma_addr = st->ifc.unmap_addr;
|
||||
st->ifc.page = page;
|
||||
st->ifc.page_off = page_off;
|
||||
return 0;
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* tso_fill_packet_with_fragment - form descriptors for the current fragment
|
||||
* @tx_queue: Efx TX queue
|
||||
* @skb: Socket buffer
|
||||
* @st: TSO state
|
||||
*
|
||||
* Form descriptors for the current fragment, until we reach the end
|
||||
* of fragment or end-of-packet. Return 0 on success, 1 if not enough
|
||||
* space in @tx_queue.
|
||||
*/
|
||||
static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
|
||||
const struct sk_buff *skb,
|
||||
struct tso_state *st)
|
||||
{
|
||||
|
||||
int n, end_of_packet, rc;
|
||||
|
||||
if (st->ifc.len == 0)
|
||||
return 0;
|
||||
if (st->packet_space == 0)
|
||||
return 0;
|
||||
|
||||
EFX_BUG_ON_PARANOID(st->ifc.len <= 0);
|
||||
EFX_BUG_ON_PARANOID(st->packet_space <= 0);
|
||||
|
||||
n = min(st->ifc.len, st->packet_space);
|
||||
|
||||
st->packet_space -= n;
|
||||
st->remaining_len -= n;
|
||||
st->ifc.len -= n;
|
||||
st->ifc.page_off += n;
|
||||
end_of_packet = st->remaining_len == 0 || st->packet_space == 0;
|
||||
|
||||
rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n,
|
||||
st->remaining_len ? NULL : skb,
|
||||
end_of_packet, st->ifc.unmap_addr,
|
||||
st->ifc.len ? 0 : st->ifc.unmap_len);
|
||||
|
||||
st->ifc.dma_addr += n;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* tso_start_new_packet - generate a new header and prepare for the new packet
|
||||
* @tx_queue: Efx TX queue
|
||||
* @skb: Socket buffer
|
||||
* @st: TSO state
|
||||
*
|
||||
* Generate a new header and prepare for the new packet. Return 0 on
|
||||
* success, or -1 if failed to alloc header.
|
||||
*/
|
||||
static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
|
||||
const struct sk_buff *skb,
|
||||
struct tso_state *st)
|
||||
{
|
||||
struct efx_tso_header *tsoh;
|
||||
struct iphdr *tsoh_iph;
|
||||
struct tcphdr *tsoh_th;
|
||||
unsigned ip_length;
|
||||
u8 *header;
|
||||
|
||||
/* Allocate a DMA-mapped header buffer. */
|
||||
if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) {
|
||||
if (tx_queue->tso_headers_free == NULL)
|
||||
if (efx_tsoh_block_alloc(tx_queue))
|
||||
return -1;
|
||||
EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
|
||||
tsoh = tx_queue->tso_headers_free;
|
||||
tx_queue->tso_headers_free = tsoh->next;
|
||||
tsoh->unmap_len = 0;
|
||||
} else {
|
||||
tx_queue->tso_long_headers++;
|
||||
tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length);
|
||||
if (unlikely(!tsoh))
|
||||
return -1;
|
||||
}
|
||||
|
||||
header = TSOH_BUFFER(tsoh);
|
||||
tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
|
||||
tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb));
|
||||
|
||||
/* Copy and update the headers. */
|
||||
memcpy(header, skb->data, st->p.header_length);
|
||||
|
||||
tsoh_th->seq = htonl(st->seqnum);
|
||||
st->seqnum += skb_shinfo(skb)->gso_size;
|
||||
if (st->remaining_len > skb_shinfo(skb)->gso_size) {
|
||||
/* This packet will not finish the TSO burst. */
|
||||
ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb);
|
||||
tsoh_th->fin = 0;
|
||||
tsoh_th->psh = 0;
|
||||
} else {
|
||||
/* This packet will be the last in the TSO burst. */
|
||||
ip_length = (st->p.header_length - ETH_HDR_LEN(skb)
|
||||
+ st->remaining_len);
|
||||
tsoh_th->fin = tcp_hdr(skb)->fin;
|
||||
tsoh_th->psh = tcp_hdr(skb)->psh;
|
||||
}
|
||||
tsoh_iph->tot_len = htons(ip_length);
|
||||
|
||||
/* Linux leaves suitable gaps in the IP ID space for us to fill. */
|
||||
tsoh_iph->id = htons(st->p.ipv4_id);
|
||||
st->p.ipv4_id++;
|
||||
|
||||
st->packet_space = skb_shinfo(skb)->gso_size;
|
||||
++tx_queue->tso_packets;
|
||||
|
||||
/* Form a descriptor for this header. */
|
||||
efx_tso_put_header(tx_queue, tsoh, st->p.header_length);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
|
||||
* @tx_queue: Efx TX queue
|
||||
* @skb: Socket buffer
|
||||
*
|
||||
* Context: You must hold netif_tx_lock() to call this function.
|
||||
*
|
||||
* Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
|
||||
* @skb was not enqueued. In all cases @skb is consumed. Return
|
||||
* %NETDEV_TX_OK or %NETDEV_TX_BUSY.
|
||||
*/
|
||||
static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
int frag_i, rc, rc2 = NETDEV_TX_OK;
|
||||
struct tso_state state;
|
||||
skb_frag_t *f;
|
||||
|
||||
/* Verify TSO is safe - these checks should never fail. */
|
||||
efx_tso_check_safe(skb);
|
||||
|
||||
EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
|
||||
|
||||
tso_start(&state, skb);
|
||||
|
||||
/* Assume that skb header area contains exactly the headers, and
|
||||
* all payload is in the frag list.
|
||||
*/
|
||||
if (skb_headlen(skb) == state.p.header_length) {
|
||||
/* Grab the first payload fragment. */
|
||||
EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
|
||||
frag_i = 0;
|
||||
f = &skb_shinfo(skb)->frags[frag_i];
|
||||
rc = tso_get_fragment(&state, tx_queue->efx,
|
||||
f->size, f->page, f->page_offset);
|
||||
if (rc)
|
||||
goto mem_err;
|
||||
} else {
|
||||
/* It may look like this code fragment assumes that the
|
||||
* skb->data portion does not cross a page boundary, but
|
||||
* that is not the case. It is guaranteed to be direct
|
||||
* mapped memory, and therefore is physically contiguous,
|
||||
* and so DMA will work fine. kmap_atomic() on this region
|
||||
* will just return the direct mapping, so that will work
|
||||
* too.
|
||||
*/
|
||||
int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1);
|
||||
int hl = state.p.header_length;
|
||||
rc = tso_get_fragment(&state, tx_queue->efx,
|
||||
skb_headlen(skb) - hl,
|
||||
virt_to_page(skb->data), page_off + hl);
|
||||
if (rc)
|
||||
goto mem_err;
|
||||
frag_i = -1;
|
||||
}
|
||||
|
||||
if (tso_start_new_packet(tx_queue, skb, &state) < 0)
|
||||
goto mem_err;
|
||||
|
||||
while (1) {
|
||||
rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
|
||||
if (unlikely(rc))
|
||||
goto stop;
|
||||
|
||||
/* Move onto the next fragment? */
|
||||
if (state.ifc.len == 0) {
|
||||
if (++frag_i >= skb_shinfo(skb)->nr_frags)
|
||||
/* End of payload reached. */
|
||||
break;
|
||||
f = &skb_shinfo(skb)->frags[frag_i];
|
||||
rc = tso_get_fragment(&state, tx_queue->efx,
|
||||
f->size, f->page, f->page_offset);
|
||||
if (rc)
|
||||
goto mem_err;
|
||||
}
|
||||
|
||||
/* Start at new packet? */
|
||||
if (state.packet_space == 0 &&
|
||||
tso_start_new_packet(tx_queue, skb, &state) < 0)
|
||||
goto mem_err;
|
||||
}
|
||||
|
||||
/* Pass off to hardware */
|
||||
falcon_push_buffers(tx_queue);
|
||||
|
||||
tx_queue->tso_bursts++;
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
mem_err:
|
||||
EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping"
|
||||
" error\n");
|
||||
dev_kfree_skb_any((struct sk_buff *)skb);
|
||||
goto unwind;
|
||||
|
||||
stop:
|
||||
rc2 = NETDEV_TX_BUSY;
|
||||
|
||||
/* Stop the queue if it wasn't stopped before. */
|
||||
if (tx_queue->stopped == 1)
|
||||
efx_stop_queue(tx_queue->efx);
|
||||
|
||||
unwind:
|
||||
efx_enqueue_unwind(tx_queue);
|
||||
return rc2;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Free up all TSO datastructures associated with tx_queue. This
|
||||
* routine should be called only once the tx_queue is both empty and
|
||||
* will no longer be used.
|
||||
*/
|
||||
static void efx_fini_tso(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
if (tx_queue->buffer)
|
||||
for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
|
||||
efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
|
||||
|
||||
while (tx_queue->tso_headers_free != NULL)
|
||||
efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
|
||||
tx_queue->efx->pci_dev);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,10 @@
|
|||
MDIO_MMDREG_DEVS0_PMAPMD | \
|
||||
MDIO_MMDREG_DEVS0_PHYXS)
|
||||
|
||||
#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \
|
||||
(1 << LOOPBACK_PMAPMD) | \
|
||||
(1 << LOOPBACK_NETWORK))
|
||||
|
||||
/****************************************************************************/
|
||||
/* Quake-specific MDIO registers */
|
||||
#define MDIO_QUAKE_LED0_REG (0xD006)
|
||||
|
@ -35,6 +39,10 @@ void xfp_set_led(struct efx_nic *p, int led, int mode)
|
|||
mode);
|
||||
}
|
||||
|
||||
struct xfp_phy_data {
|
||||
int tx_disabled;
|
||||
};
|
||||
|
||||
#define XFP_MAX_RESET_TIME 500
|
||||
#define XFP_RESET_WAIT 10
|
||||
|
||||
|
@ -72,18 +80,31 @@ static int xfp_reset_phy(struct efx_nic *efx)
|
|||
|
||||
static int xfp_phy_init(struct efx_nic *efx)
|
||||
{
|
||||
struct xfp_phy_data *phy_data;
|
||||
u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS);
|
||||
int rc;
|
||||
|
||||
phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL);
|
||||
efx->phy_data = (void *) phy_data;
|
||||
|
||||
EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision"
|
||||
" %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
|
||||
MDIO_ID_REV(devid));
|
||||
|
||||
phy_data->tx_disabled = efx->tx_disabled;
|
||||
|
||||
rc = xfp_reset_phy(efx);
|
||||
|
||||
EFX_INFO(efx, "XFP: PHY init %s.\n",
|
||||
rc ? "failed" : "successful");
|
||||
if (rc < 0)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
kfree(efx->phy_data);
|
||||
efx->phy_data = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -110,6 +131,16 @@ static int xfp_phy_check_hw(struct efx_nic *efx)
|
|||
|
||||
static void xfp_phy_reconfigure(struct efx_nic *efx)
|
||||
{
|
||||
struct xfp_phy_data *phy_data = efx->phy_data;
|
||||
|
||||
/* Reset the PHY when moving from tx off to tx on */
|
||||
if (phy_data->tx_disabled && !efx->tx_disabled)
|
||||
xfp_reset_phy(efx);
|
||||
|
||||
mdio_clause45_transmit_disable(efx);
|
||||
mdio_clause45_phy_reconfigure(efx);
|
||||
|
||||
phy_data->tx_disabled = efx->tx_disabled;
|
||||
efx->link_up = xfp_link_ok(efx);
|
||||
efx->link_options = GM_LPA_10000FULL;
|
||||
}
|
||||
|
@ -119,6 +150,10 @@ static void xfp_phy_fini(struct efx_nic *efx)
|
|||
{
|
||||
/* Clobber the LED if it was blinking */
|
||||
efx->board_info.blink(efx, 0);
|
||||
|
||||
/* Free the context block */
|
||||
kfree(efx->phy_data);
|
||||
efx->phy_data = NULL;
|
||||
}
|
||||
|
||||
struct efx_phy_operations falcon_xfp_phy_ops = {
|
||||
|
@ -129,4 +164,5 @@ struct efx_phy_operations falcon_xfp_phy_ops = {
|
|||
.clear_interrupt = xfp_phy_clear_interrupt,
|
||||
.reset_xaui = efx_port_dummy_op_void,
|
||||
.mmds = XFP_REQUIRED_DEVS,
|
||||
.loopbacks = XFP_LOOPBACKS,
|
||||
};
|
||||
|
|
|
@ -1966,13 +1966,13 @@ struct sky2_status_le {
|
|||
struct tx_ring_info {
|
||||
struct sk_buff *skb;
|
||||
DECLARE_PCI_UNMAP_ADDR(mapaddr);
|
||||
DECLARE_PCI_UNMAP_ADDR(maplen);
|
||||
DECLARE_PCI_UNMAP_LEN(maplen);
|
||||
};
|
||||
|
||||
struct rx_ring_info {
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t data_addr;
|
||||
DECLARE_PCI_UNMAP_ADDR(data_size);
|
||||
DECLARE_PCI_UNMAP_LEN(data_size);
|
||||
dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue