2019-05-21 01:07:58 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2007-02-08 23:42:37 +08:00
|
|
|
/*
|
|
|
|
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
|
2008-02-03 09:50:04 +08:00
|
|
|
* Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
|
2008-05-10 11:12:09 +08:00
|
|
|
* Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
|
2007-02-08 23:42:37 +08:00
|
|
|
*
|
|
|
|
* Derived from Intel e1000 driver
|
|
|
|
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
|
|
|
*
|
|
|
|
* Contact Information:
|
2008-09-27 12:17:23 +08:00
|
|
|
* Xiong Huang <xiong.huang@atheros.com>
|
|
|
|
* Jie Yang <jie.yang@atheros.com>
|
2007-02-08 23:42:37 +08:00
|
|
|
* Chris Snook <csnook@redhat.com>
|
|
|
|
* Jay Cliburn <jcliburn@gmail.com>
|
|
|
|
*
|
2008-09-27 12:17:23 +08:00
|
|
|
* This version is adapted from the Attansic reference driver.
|
2007-02-08 23:42:37 +08:00
|
|
|
*
|
|
|
|
* TODO:
|
2007-07-16 00:03:27 +08:00
|
|
|
* Add more ethtool functions.
|
2007-02-08 23:42:37 +08:00
|
|
|
* Fix abstruse irq enable/disable condition described here:
|
|
|
|
* http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
|
|
|
|
*
|
|
|
|
* NEEDS TESTING:
|
|
|
|
* VLAN
|
|
|
|
* multicast
|
|
|
|
* promiscuous mode
|
|
|
|
* interrupt coalescing
|
|
|
|
* SMP torture testing
|
|
|
|
*/
|
|
|
|
|
2011-07-27 07:09:06 +08:00
|
|
|
#include <linux/atomic.h>
|
2008-02-03 09:50:04 +08:00
|
|
|
#include <asm/byteorder.h>
|
|
|
|
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#include <linux/crc32.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2007-02-08 23:42:37 +08:00
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/hardirq.h>
|
2008-02-03 09:50:04 +08:00
|
|
|
#include <linux/if_ether.h>
|
|
|
|
#include <linux/if_vlan.h>
|
|
|
|
#include <linux/in.h>
|
2007-02-08 23:42:37 +08:00
|
|
|
#include <linux/interrupt.h>
|
2008-02-03 09:50:04 +08:00
|
|
|
#include <linux/ip.h>
|
2007-02-08 23:42:37 +08:00
|
|
|
#include <linux/irqflags.h>
|
2008-02-03 09:50:04 +08:00
|
|
|
#include <linux/irqreturn.h>
|
|
|
|
#include <linux/jiffies.h>
|
|
|
|
#include <linux/mii.h>
|
|
|
|
#include <linux/module.h>
|
2007-02-08 23:42:37 +08:00
|
|
|
#include <linux/net.h>
|
2008-02-03 09:50:04 +08:00
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/pci_ids.h>
|
2007-02-08 23:42:37 +08:00
|
|
|
#include <linux/pm.h>
|
2008-02-03 09:50:04 +08:00
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/string.h>
|
2007-02-08 23:42:37 +08:00
|
|
|
#include <linux/tcp.h>
|
2008-02-03 09:50:04 +08:00
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/workqueue.h>
|
2007-02-08 23:42:37 +08:00
|
|
|
|
|
|
|
#include <net/checksum.h>
|
|
|
|
|
|
|
|
#include "atl1.h"
|
|
|
|
|
2011-04-24 11:38:19 +08:00
|
|
|
MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, "
|
|
|
|
"Chris Snook <csnook@redhat.com>, "
|
|
|
|
"Jay Cliburn <jcliburn@gmail.com>");
|
2009-05-27 11:50:12 +08:00
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
|
2008-02-03 09:50:04 +08:00
|
|
|
/* Temporary hack for merging atl1 and atl2 */
|
|
|
|
#include "atlx.c"
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2010-10-21 15:50:50 +08:00
|
|
|
static const struct ethtool_ops atl1_ethtool_ops;
|
|
|
|
|
2008-04-19 09:51:53 +08:00
|
|
|
/*
|
|
|
|
* This is the only thing that needs to be changed to adjust the
|
|
|
|
* maximum number of ports that the driver can manage.
|
|
|
|
*/
|
|
|
|
#define ATL1_MAX_NIC 4
|
|
|
|
|
|
|
|
#define OPTION_UNSET -1
|
|
|
|
#define OPTION_DISABLED 0
|
|
|
|
#define OPTION_ENABLED 1
|
|
|
|
|
|
|
|
#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interrupt Moderate Timer in units of 2 us
|
|
|
|
*
|
|
|
|
* Valid Range: 10-65535
|
|
|
|
*
|
|
|
|
* Default Value: 100 (200us)
|
|
|
|
*/
|
2012-12-03 22:23:56 +08:00
|
|
|
static int int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
|
2009-02-14 19:15:17 +08:00
|
|
|
static unsigned int num_int_mod_timer;
|
2008-04-19 09:51:53 +08:00
|
|
|
module_param_array_named(int_mod_timer, int_mod_timer, int,
|
|
|
|
&num_int_mod_timer, 0);
|
|
|
|
MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
|
|
|
|
|
|
|
|
#define DEFAULT_INT_MOD_CNT 100 /* 200us */
|
|
|
|
#define MAX_INT_MOD_CNT 65000
|
|
|
|
#define MIN_INT_MOD_CNT 50
|
|
|
|
|
|
|
|
struct atl1_option {
|
|
|
|
enum { enable_option, range_option, list_option } type;
|
|
|
|
char *name;
|
|
|
|
char *err;
|
|
|
|
int def;
|
|
|
|
union {
|
|
|
|
struct { /* range_option info */
|
|
|
|
int min;
|
|
|
|
int max;
|
|
|
|
} r;
|
|
|
|
struct { /* list_option info */
|
|
|
|
int nr;
|
|
|
|
struct atl1_opt_list {
|
|
|
|
int i;
|
|
|
|
char *str;
|
|
|
|
} *p;
|
|
|
|
} l;
|
|
|
|
} arg;
|
|
|
|
};
|
|
|
|
|
2012-12-03 22:23:56 +08:00
|
|
|
static int atl1_validate_option(int *value, struct atl1_option *opt,
|
2012-12-06 22:30:56 +08:00
|
|
|
struct pci_dev *pdev)
|
2008-04-19 09:51:53 +08:00
|
|
|
{
|
|
|
|
if (*value == OPTION_UNSET) {
|
|
|
|
*value = opt->def;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (opt->type) {
|
|
|
|
case enable_option:
|
|
|
|
switch (*value) {
|
|
|
|
case OPTION_ENABLED:
|
|
|
|
dev_info(&pdev->dev, "%s enabled\n", opt->name);
|
|
|
|
return 0;
|
|
|
|
case OPTION_DISABLED:
|
|
|
|
dev_info(&pdev->dev, "%s disabled\n", opt->name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case range_option:
|
|
|
|
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
|
|
|
|
dev_info(&pdev->dev, "%s set to %i\n", opt->name,
|
|
|
|
*value);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case list_option:{
|
|
|
|
int i;
|
|
|
|
struct atl1_opt_list *ent;
|
|
|
|
|
|
|
|
for (i = 0; i < opt->arg.l.nr; i++) {
|
|
|
|
ent = &opt->arg.l.p[i];
|
|
|
|
if (*value == ent->i) {
|
|
|
|
if (ent->str[0] != '\0')
|
|
|
|
dev_info(&pdev->dev, "%s\n",
|
|
|
|
ent->str);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
|
|
|
|
opt->name, *value, opt->err);
|
|
|
|
*value = opt->def;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-07-10 18:56:00 +08:00
|
|
|
/**
|
2008-04-19 09:51:53 +08:00
|
|
|
* atl1_check_options - Range Checking for Command Line Parameters
|
|
|
|
* @adapter: board private structure
|
|
|
|
*
|
|
|
|
* This routine checks all command line parameters for valid user
|
|
|
|
* input. If an invalid value is given, or if no user specified
|
|
|
|
* value exists, a default value is used. The final value is stored
|
|
|
|
* in a variable in the adapter structure.
|
|
|
|
*/
|
2012-12-03 22:23:56 +08:00
|
|
|
static void atl1_check_options(struct atl1_adapter *adapter)
|
2008-04-19 09:51:53 +08:00
|
|
|
{
|
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
|
|
|
int bd = adapter->bd_number;
|
|
|
|
if (bd >= ATL1_MAX_NIC) {
|
|
|
|
dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
|
|
|
|
dev_notice(&pdev->dev, "using defaults for all values\n");
|
|
|
|
}
|
|
|
|
{ /* Interrupt Moderate Timer */
|
|
|
|
struct atl1_option opt = {
|
|
|
|
.type = range_option,
|
|
|
|
.name = "Interrupt Moderator Timer",
|
|
|
|
.err = "using default of "
|
|
|
|
__MODULE_STRING(DEFAULT_INT_MOD_CNT),
|
|
|
|
.def = DEFAULT_INT_MOD_CNT,
|
|
|
|
.arg = {.r = {.min = MIN_INT_MOD_CNT,
|
|
|
|
.max = MAX_INT_MOD_CNT} }
|
|
|
|
};
|
|
|
|
int val;
|
|
|
|
if (num_int_mod_timer > bd) {
|
|
|
|
val = int_mod_timer[bd];
|
|
|
|
atl1_validate_option(&val, &opt, pdev);
|
|
|
|
adapter->imt = (u16) val;
|
|
|
|
} else
|
|
|
|
adapter->imt = (u16) (opt.def);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-02-08 23:42:37 +08:00
|
|
|
/*
|
|
|
|
* atl1_pci_tbl - PCI Device ID Table
|
|
|
|
*/
|
2014-08-08 21:56:03 +08:00
|
|
|
static const struct pci_device_id atl1_pci_tbl[] = {
|
2007-02-15 10:17:01 +08:00
|
|
|
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
|
2007-02-08 23:42:37 +08:00
|
|
|
/* required last entry */
|
|
|
|
{0,}
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
|
|
|
|
|
2008-02-03 09:50:09 +08:00
|
|
|
static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
|
|
|
|
NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
|
|
|
|
|
|
|
|
static int debug = -1;
|
|
|
|
module_param(debug, int, 0);
|
|
|
|
MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)");
|
|
|
|
|
2007-02-08 23:42:37 +08:00
|
|
|
/*
|
2008-02-03 09:50:12 +08:00
|
|
|
* Reset the transmit and receive units; mask and clear all interrupts.
|
|
|
|
* hw - Struct containing variables accessed by shared code
|
|
|
|
* return : 0 or idle status (if error)
|
2007-02-08 23:42:37 +08:00
|
|
|
*/
|
2008-02-03 09:50:12 +08:00
|
|
|
static s32 atl1_reset_hw(struct atl1_hw *hw)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
struct pci_dev *pdev = hw->back->pdev;
|
|
|
|
struct atl1_adapter *adapter = hw->back;
|
|
|
|
u32 icr;
|
|
|
|
int i;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* Clear Interrupt mask to stop board from generating
|
|
|
|
* interrupts & Clear any pending interrupt events
|
|
|
|
*/
|
|
|
|
/*
|
2012-04-13 14:09:49 +08:00
|
|
|
* atlx_irq_disable(adapter);
|
2008-02-03 09:50:12 +08:00
|
|
|
* iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
|
|
|
|
*/
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* Issue Soft Reset to the MAC. This will reset the chip's
|
|
|
|
* transmit, receive, DMA. It will not effect
|
|
|
|
* the current PCI configuration. The global reset bit is self-
|
|
|
|
* clearing, and should clear within a microsecond.
|
|
|
|
*/
|
|
|
|
iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
|
|
|
|
ioread32(hw->hw_addr + REG_MASTER_CTRL);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
iowrite16(1, hw->hw_addr + REG_PHY_ENABLE);
|
|
|
|
ioread16(hw->hw_addr + REG_PHY_ENABLE);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/* delay about 1ms */
|
|
|
|
msleep(1);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/* Wait at least 10ms for All module to be Idle */
|
|
|
|
for (i = 0; i < 10; i++) {
|
|
|
|
icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
|
|
|
|
if (!icr)
|
|
|
|
break;
|
|
|
|
/* delay 1 ms */
|
|
|
|
msleep(1);
|
|
|
|
/* FIXME: still the right way to do this? */
|
|
|
|
cpu_relax();
|
|
|
|
}
|
2007-07-16 00:03:29 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (icr) {
|
|
|
|
if (netif_msg_hw(adapter))
|
|
|
|
dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
|
|
|
|
return icr;
|
|
|
|
}
|
2007-07-16 00:03:29 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
return 0;
|
2007-07-16 00:03:29 +08:00
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/* function about EEPROM
|
|
|
|
*
|
|
|
|
* check_eeprom_exist
|
|
|
|
* return 0 if eeprom exist
|
|
|
|
*/
|
|
|
|
static int atl1_check_eeprom_exist(struct atl1_hw *hw)
|
2007-07-16 00:03:29 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
u32 value;
|
|
|
|
value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
|
|
|
|
if (value & SPI_FLASH_CTRL_EN_VPD) {
|
|
|
|
value &= ~SPI_FLASH_CTRL_EN_VPD;
|
|
|
|
iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
|
|
|
|
}
|
2007-07-16 00:03:29 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
|
|
|
|
return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
|
2007-07-16 00:03:29 +08:00
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
|
2007-07-16 00:03:29 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
int i;
|
|
|
|
u32 control;
|
2007-07-16 00:03:29 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (offset & 3)
|
|
|
|
/* address do not align */
|
|
|
|
return false;
|
2007-07-16 00:03:29 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
iowrite32(0, hw->hw_addr + REG_VPD_DATA);
|
|
|
|
control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
|
|
|
|
iowrite32(control, hw->hw_addr + REG_VPD_CAP);
|
|
|
|
ioread32(hw->hw_addr + REG_VPD_CAP);
|
2007-07-16 00:03:29 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
for (i = 0; i < 10; i++) {
|
|
|
|
msleep(2);
|
|
|
|
control = ioread32(hw->hw_addr + REG_VPD_CAP);
|
|
|
|
if (control & VPD_CAP_VPD_FLAG)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (control & VPD_CAP_VPD_FLAG) {
|
|
|
|
*p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
/* timeout */
|
|
|
|
return false;
|
2007-07-16 00:03:29 +08:00
|
|
|
}
|
|
|
|
|
2007-02-08 23:42:37 +08:00
|
|
|
/*
|
2008-02-03 09:50:12 +08:00
|
|
|
* Reads the value from a PHY register
|
|
|
|
* hw - Struct containing variables accessed by shared code
|
|
|
|
* reg_addr - address of the PHY register to read
|
2007-02-08 23:42:37 +08:00
|
|
|
*/
|
2010-10-21 15:50:50 +08:00
|
|
|
static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
u32 val;
|
|
|
|
int i;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
|
|
|
|
MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
|
|
|
|
MDIO_CLK_SEL_SHIFT;
|
|
|
|
iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
|
|
|
|
ioread32(hw->hw_addr + REG_MDIO_CTRL);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
for (i = 0; i < MDIO_WAIT_TIMES; i++) {
|
|
|
|
udelay(2);
|
|
|
|
val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
|
|
|
|
if (!(val & (MDIO_START | MDIO_BUSY)))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!(val & (MDIO_START | MDIO_BUSY))) {
|
|
|
|
*phy_data = (u16) val;
|
|
|
|
return 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
2008-02-03 09:50:12 +08:00
|
|
|
return ATLX_ERR_PHY;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
#define CUSTOM_SPI_CS_SETUP 2
|
|
|
|
#define CUSTOM_SPI_CLK_HI 2
|
|
|
|
#define CUSTOM_SPI_CLK_LO 2
|
|
|
|
#define CUSTOM_SPI_CS_HOLD 2
|
|
|
|
#define CUSTOM_SPI_CS_HI 3
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
u32 value;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
iowrite32(0, hw->hw_addr + REG_SPI_DATA);
|
|
|
|
iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
|
2007-07-16 00:03:28 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
value = SPI_FLASH_CTRL_WAIT_READY |
|
|
|
|
(CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
|
|
|
|
SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
|
|
|
|
SPI_FLASH_CTRL_CLK_HI_MASK) <<
|
|
|
|
SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
|
|
|
|
SPI_FLASH_CTRL_CLK_LO_MASK) <<
|
|
|
|
SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
|
|
|
|
SPI_FLASH_CTRL_CS_HOLD_MASK) <<
|
|
|
|
SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
|
|
|
|
SPI_FLASH_CTRL_CS_HI_MASK) <<
|
|
|
|
SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
|
|
|
|
SPI_FLASH_CTRL_INS_SHIFT;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
value |= SPI_FLASH_CTRL_START;
|
|
|
|
iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
|
|
|
|
ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
for (i = 0; i < 10; i++) {
|
|
|
|
msleep(1);
|
|
|
|
value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
|
|
|
|
if (!(value & SPI_FLASH_CTRL_START))
|
|
|
|
break;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (value & SPI_FLASH_CTRL_START)
|
|
|
|
return false;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
*buf = ioread32(hw->hw_addr + REG_SPI_DATA);
|
2007-07-16 00:03:28 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
return true;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-02-03 09:50:12 +08:00
|
|
|
* get_permanent_address
|
|
|
|
* return 0 if get valid mac address,
|
2007-02-08 23:42:37 +08:00
|
|
|
*/
|
2008-02-03 09:50:12 +08:00
|
|
|
static int atl1_get_permanent_address(struct atl1_hw *hw)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
u32 addr[2];
|
|
|
|
u32 i, control;
|
|
|
|
u16 reg;
|
|
|
|
u8 eth_addr[ETH_ALEN];
|
|
|
|
bool key_valid;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (is_valid_ether_addr(hw->perm_mac_addr))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* init */
|
|
|
|
addr[0] = addr[1] = 0;
|
|
|
|
|
|
|
|
if (!atl1_check_eeprom_exist(hw)) {
|
|
|
|
reg = 0;
|
|
|
|
key_valid = false;
|
|
|
|
/* Read out all EEPROM content */
|
|
|
|
i = 0;
|
|
|
|
while (1) {
|
|
|
|
if (atl1_read_eeprom(hw, i + 0x100, &control)) {
|
|
|
|
if (key_valid) {
|
|
|
|
if (reg == REG_MAC_STA_ADDR)
|
|
|
|
addr[0] = control;
|
|
|
|
else if (reg == (REG_MAC_STA_ADDR + 4))
|
|
|
|
addr[1] = control;
|
|
|
|
key_valid = false;
|
|
|
|
} else if ((control & 0xff) == 0x5A) {
|
|
|
|
key_valid = true;
|
|
|
|
reg = (u16) (control >> 16);
|
|
|
|
} else
|
|
|
|
break;
|
|
|
|
} else
|
|
|
|
/* read error */
|
|
|
|
break;
|
|
|
|
i += 4;
|
2007-07-16 00:03:29 +08:00
|
|
|
}
|
2008-02-03 09:50:12 +08:00
|
|
|
|
|
|
|
*(u32 *) ð_addr[2] = swab32(addr[0]);
|
|
|
|
*(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]);
|
|
|
|
if (is_valid_ether_addr(eth_addr)) {
|
|
|
|
memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
|
|
|
|
return 0;
|
2007-07-16 00:03:29 +08:00
|
|
|
}
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/* see if SPI FLAGS exist ? */
|
|
|
|
addr[0] = addr[1] = 0;
|
|
|
|
reg = 0;
|
|
|
|
key_valid = false;
|
|
|
|
i = 0;
|
|
|
|
while (1) {
|
|
|
|
if (atl1_spi_read(hw, i + 0x1f000, &control)) {
|
|
|
|
if (key_valid) {
|
|
|
|
if (reg == REG_MAC_STA_ADDR)
|
|
|
|
addr[0] = control;
|
|
|
|
else if (reg == (REG_MAC_STA_ADDR + 4))
|
|
|
|
addr[1] = control;
|
|
|
|
key_valid = false;
|
|
|
|
} else if ((control & 0xff) == 0x5A) {
|
|
|
|
key_valid = true;
|
|
|
|
reg = (u16) (control >> 16);
|
|
|
|
} else
|
|
|
|
/* data end */
|
|
|
|
break;
|
|
|
|
} else
|
|
|
|
/* read error */
|
|
|
|
break;
|
|
|
|
i += 4;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
*(u32 *) ð_addr[2] = swab32(addr[0]);
|
|
|
|
*(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]);
|
|
|
|
if (is_valid_ether_addr(eth_addr)) {
|
|
|
|
memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
|
|
|
|
return 0;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* On some motherboards, the MAC address is written by the
|
|
|
|
* BIOS directly to the MAC register during POST, and is
|
|
|
|
* not stored in eeprom. If all else thus far has failed
|
|
|
|
* to fetch the permanent MAC address, try reading it directly.
|
|
|
|
*/
|
|
|
|
addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
|
|
|
|
addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
|
|
|
|
*(u32 *) ð_addr[2] = swab32(addr[0]);
|
|
|
|
*(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]);
|
|
|
|
if (is_valid_ether_addr(eth_addr)) {
|
|
|
|
memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
|
|
|
|
return 0;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
return 1;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/*
|
2008-02-03 09:50:12 +08:00
|
|
|
* Reads the adapter's MAC address from the EEPROM
|
|
|
|
* hw - Struct containing variables accessed by shared code
|
2007-07-16 00:03:29 +08:00
|
|
|
*/
|
2008-12-26 15:58:35 +08:00
|
|
|
static s32 atl1_read_mac_addr(struct atl1_hw *hw)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2012-02-17 13:43:30 +08:00
|
|
|
s32 ret = 0;
|
2008-02-03 09:50:12 +08:00
|
|
|
u16 i;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2012-02-17 13:43:30 +08:00
|
|
|
if (atl1_get_permanent_address(hw)) {
|
2012-07-13 03:33:06 +08:00
|
|
|
eth_random_addr(hw->perm_mac_addr);
|
2012-02-17 13:43:30 +08:00
|
|
|
ret = 1;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
for (i = 0; i < ETH_ALEN; i++)
|
|
|
|
hw->mac_addr[i] = hw->perm_mac_addr[i];
|
2012-02-17 13:43:30 +08:00
|
|
|
return ret;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-02-03 09:50:12 +08:00
|
|
|
* Hashes an address to determine its location in the multicast table
|
|
|
|
* hw - Struct containing variables accessed by shared code
|
|
|
|
* mc_addr - the multicast address to hash
|
2007-07-16 00:03:29 +08:00
|
|
|
*
|
2008-02-03 09:50:12 +08:00
|
|
|
* atl1_hash_mc_addr
|
|
|
|
* purpose
|
|
|
|
* set hash value for a multicast address
|
|
|
|
* hash calcu processing :
|
|
|
|
* 1. calcu 32bit CRC for multicast address
|
|
|
|
* 2. reverse crc with MSB to LSB
|
2007-02-08 23:42:37 +08:00
|
|
|
*/
|
2010-10-21 15:50:50 +08:00
|
|
|
static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
u32 crc32, value = 0;
|
|
|
|
int i;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
crc32 = ether_crc_le(6, mc_addr);
|
|
|
|
for (i = 0; i < 32; i++)
|
|
|
|
value |= (((crc32 >> i) & 1) << (31 - i));
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
return value;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* Sets the bit in the multicast table corresponding to the hash value.
|
|
|
|
* hw - Struct containing variables accessed by shared code
|
|
|
|
* hash_value - Multicast address hash value
|
|
|
|
*/
|
2010-10-21 15:50:50 +08:00
|
|
|
static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
u32 hash_bit, hash_reg;
|
|
|
|
u32 mta;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The HASH Table is a register array of 2 32-bit registers.
|
|
|
|
* It is treated like an array of 64 bits. We want to set
|
|
|
|
* bit BitArray[hash_value]. So we figure out what register
|
|
|
|
* the bit is in, read it, OR in the new bit, then write
|
|
|
|
* back the new value. The register is determined by the
|
|
|
|
* upper 7 bits of the hash value and the bit within that
|
|
|
|
* register are determined by the lower 5 bits of the value.
|
2007-07-16 00:03:29 +08:00
|
|
|
*/
|
2008-02-03 09:50:12 +08:00
|
|
|
hash_reg = (hash_value >> 31) & 0x1;
|
|
|
|
hash_bit = (hash_value >> 26) & 0x1F;
|
|
|
|
mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
|
|
|
|
mta |= (1 << hash_bit);
|
|
|
|
iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
|
2007-07-16 00:03:29 +08:00
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* Writes a value to a PHY register
|
|
|
|
* hw - Struct containing variables accessed by shared code
|
|
|
|
* reg_addr - address of the PHY register to write
|
|
|
|
* data - data to write to the PHY
|
|
|
|
*/
|
|
|
|
static s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
|
2007-07-16 00:03:29 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
int i;
|
|
|
|
u32 val;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
|
|
|
|
(reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
|
|
|
|
MDIO_SUP_PREAMBLE |
|
|
|
|
MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
|
|
|
|
iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
|
|
|
|
ioread32(hw->hw_addr + REG_MDIO_CTRL);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
for (i = 0; i < MDIO_WAIT_TIMES; i++) {
|
|
|
|
udelay(2);
|
|
|
|
val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
|
|
|
|
if (!(val & (MDIO_START | MDIO_BUSY)))
|
|
|
|
break;
|
2007-07-16 00:03:29 +08:00
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (!(val & (MDIO_START | MDIO_BUSY)))
|
2008-02-03 09:50:04 +08:00
|
|
|
return 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
return ATLX_ERR_PHY;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* Make L001's PHY out of Power Saving State (bug)
|
|
|
|
* hw - Struct containing variables accessed by shared code
|
|
|
|
* when power on, L001's PHY always on Power saving State
|
|
|
|
* (Gigabit Link forbidden)
|
|
|
|
*/
|
|
|
|
static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
|
|
|
|
{
|
|
|
|
s32 ret;
|
|
|
|
ret = atl1_write_phy_reg(hw, 29, 0x0029);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
return atl1_write_phy_reg(hw, 30, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Resets the PHY and make all config validate
|
|
|
|
* hw - Struct containing variables accessed by shared code
|
|
|
|
*
|
|
|
|
* Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
|
|
|
|
*/
|
|
|
|
static s32 atl1_phy_reset(struct atl1_hw *hw)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = hw->back->pdev;
|
|
|
|
struct atl1_adapter *adapter = hw->back;
|
|
|
|
s32 ret_val;
|
|
|
|
u16 phy_data;
|
|
|
|
|
|
|
|
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
|
|
|
|
hw->media_type == MEDIA_TYPE_1000M_FULL)
|
|
|
|
phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
|
|
|
|
else {
|
2007-07-16 00:03:29 +08:00
|
|
|
switch (hw->media_type) {
|
|
|
|
case MEDIA_TYPE_100M_FULL:
|
2008-02-03 09:50:12 +08:00
|
|
|
phy_data =
|
|
|
|
MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
|
|
|
|
MII_CR_RESET;
|
2007-07-16 00:03:29 +08:00
|
|
|
break;
|
|
|
|
case MEDIA_TYPE_100M_HALF:
|
|
|
|
phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
|
|
|
|
break;
|
|
|
|
case MEDIA_TYPE_10M_FULL:
|
|
|
|
phy_data =
|
|
|
|
MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
|
|
|
|
break;
|
2008-02-03 09:50:04 +08:00
|
|
|
default:
|
|
|
|
/* MEDIA_TYPE_10M_HALF: */
|
2007-07-16 00:03:29 +08:00
|
|
|
phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
|
|
|
|
break;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
|
|
|
|
if (ret_val) {
|
|
|
|
u32 val;
|
|
|
|
int i;
|
|
|
|
/* pcie serdes link may be down! */
|
|
|
|
if (netif_msg_hw(adapter))
|
|
|
|
dev_dbg(&pdev->dev, "pcie phy link down\n");
|
|
|
|
|
|
|
|
for (i = 0; i < 25; i++) {
|
|
|
|
msleep(1);
|
|
|
|
val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
|
|
|
|
if (!(val & (MDIO_START | MDIO_BUSY)))
|
|
|
|
break;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
|
|
|
|
if (netif_msg_hw(adapter))
|
|
|
|
dev_warn(&pdev->dev,
|
|
|
|
"pcie link down at least 25ms\n");
|
|
|
|
return ret_val;
|
|
|
|
}
|
|
|
|
}
|
2008-02-03 09:50:04 +08:00
|
|
|
return 0;
|
2007-07-16 00:03:28 +08:00
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* Configures PHY autoneg and flow control advertisement settings
|
|
|
|
* hw - Struct containing variables accessed by shared code
|
|
|
|
*/
|
|
|
|
static s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
|
2007-07-16 00:03:29 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
s32 ret_val;
|
|
|
|
s16 mii_autoneg_adv_reg;
|
|
|
|
s16 mii_1000t_ctrl_reg;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
|
|
|
|
mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/* Read the MII 1000Base-T Control Register (Address 9). */
|
|
|
|
mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* First we clear all the 10/100 mb speed bits in the Auto-Neg
|
|
|
|
* Advertisement Register (Address 4) and the 1000 mb speed bits in
|
|
|
|
* the 1000Base-T Control Register (Address 9).
|
|
|
|
*/
|
|
|
|
mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
|
|
|
|
mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* Need to parse media_type and set up
|
|
|
|
* the appropriate PHY registers.
|
|
|
|
*/
|
|
|
|
switch (hw->media_type) {
|
|
|
|
case MEDIA_TYPE_AUTO_SENSOR:
|
|
|
|
mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
|
|
|
|
MII_AR_10T_FD_CAPS |
|
|
|
|
MII_AR_100TX_HD_CAPS |
|
|
|
|
MII_AR_100TX_FD_CAPS);
|
|
|
|
mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
|
|
|
|
break;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
case MEDIA_TYPE_1000M_FULL:
|
|
|
|
mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
|
|
|
|
break;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
case MEDIA_TYPE_100M_FULL:
|
|
|
|
mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
|
|
|
|
break;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
case MEDIA_TYPE_100M_HALF:
|
|
|
|
mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
|
|
|
|
break;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
case MEDIA_TYPE_10M_FULL:
|
|
|
|
mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
|
2007-07-16 00:03:29 +08:00
|
|
|
break;
|
2008-02-03 09:50:12 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
default:
|
2008-02-03 09:50:12 +08:00
|
|
|
mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
|
2007-07-16 00:03:29 +08:00
|
|
|
break;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/* flow control fixed to enable all */
|
|
|
|
mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
|
|
|
|
hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
|
|
|
|
if (ret_val)
|
|
|
|
return ret_val;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg);
|
|
|
|
if (ret_val)
|
|
|
|
return ret_val;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
return 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/*
|
2008-02-03 09:50:12 +08:00
|
|
|
* Configures link settings.
|
|
|
|
* hw - Struct containing variables accessed by shared code
|
|
|
|
* Assumes the hardware has previously been reset and the
|
|
|
|
* transmitter and receiver are not enabled.
|
2007-07-16 00:03:29 +08:00
|
|
|
*/
|
2008-02-03 09:50:12 +08:00
|
|
|
static s32 atl1_setup_link(struct atl1_hw *hw)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
struct pci_dev *pdev = hw->back->pdev;
|
|
|
|
struct atl1_adapter *adapter = hw->back;
|
|
|
|
s32 ret_val;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* Options:
|
|
|
|
* PHY will advertise value(s) parsed from
|
|
|
|
* autoneg_advertised and fc
|
|
|
|
* no matter what autoneg is , We will not wait link result.
|
|
|
|
*/
|
|
|
|
ret_val = atl1_phy_setup_autoneg_adv(hw);
|
|
|
|
if (ret_val) {
|
|
|
|
if (netif_msg_link(adapter))
|
|
|
|
dev_dbg(&pdev->dev,
|
|
|
|
"error setting up autonegotiation\n");
|
|
|
|
return ret_val;
|
|
|
|
}
|
|
|
|
/* SW.Reset , En-Auto-Neg if needed */
|
|
|
|
ret_val = atl1_phy_reset(hw);
|
|
|
|
if (ret_val) {
|
|
|
|
if (netif_msg_link(adapter))
|
|
|
|
dev_dbg(&pdev->dev, "error resetting phy\n");
|
|
|
|
return ret_val;
|
|
|
|
}
|
|
|
|
hw->phy_configured = true;
|
|
|
|
return ret_val;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static void atl1_init_flash_opcode(struct atl1_hw *hw)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
|
|
|
|
/* Atmel */
|
|
|
|
hw->flash_vendor = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/* Init OP table */
|
|
|
|
iowrite8(flash_table[hw->flash_vendor].cmd_program,
|
|
|
|
hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
|
|
|
|
iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
|
|
|
|
hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
|
|
|
|
iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
|
|
|
|
hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
|
|
|
|
iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
|
|
|
|
hw->hw_addr + REG_SPI_FLASH_OP_RDID);
|
|
|
|
iowrite8(flash_table[hw->flash_vendor].cmd_wren,
|
|
|
|
hw->hw_addr + REG_SPI_FLASH_OP_WREN);
|
|
|
|
iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
|
|
|
|
hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
|
|
|
|
iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
|
|
|
|
hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
|
|
|
|
iowrite8(flash_table[hw->flash_vendor].cmd_read,
|
|
|
|
hw->hw_addr + REG_SPI_FLASH_OP_READ);
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* Performs basic configuration of the adapter.
|
|
|
|
* hw - Struct containing variables accessed by shared code
|
|
|
|
* Assumes that the controller has previously been reset and is in a
|
|
|
|
* post-reset uninitialized state. Initializes multicast table,
|
|
|
|
* and Calls routines to setup link
|
|
|
|
* Leaves the transmit and receive units disabled and uninitialized.
|
|
|
|
*/
|
|
|
|
static s32 atl1_init_hw(struct atl1_hw *hw)
|
2007-07-16 00:03:29 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
u32 ret_val = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/* Zero out the Multicast HASH table */
|
|
|
|
iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
|
|
|
|
/* clear the old settings from the multicast hash table */
|
|
|
|
iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
atl1_init_flash_opcode(hw);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (!hw->phy_configured) {
|
2011-06-24 01:01:55 +08:00
|
|
|
/* enable GPHY LinkChange Interrupt */
|
2008-02-03 09:50:12 +08:00
|
|
|
ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
|
|
|
|
if (ret_val)
|
|
|
|
return ret_val;
|
|
|
|
/* make PHY out of power-saving state */
|
|
|
|
ret_val = atl1_phy_leave_power_saving(hw);
|
|
|
|
if (ret_val)
|
|
|
|
return ret_val;
|
|
|
|
/* Call a subroutine to configure the link */
|
|
|
|
ret_val = atl1_setup_link(hw);
|
|
|
|
}
|
|
|
|
return ret_val;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-02-03 09:50:12 +08:00
|
|
|
* Detects the current speed and duplex settings of the hardware.
|
|
|
|
* hw - Struct containing variables accessed by shared code
|
|
|
|
* speed - Speed of the connection
|
|
|
|
* duplex - Duplex setting of the connection
|
2007-02-08 23:42:37 +08:00
|
|
|
*/
|
2008-02-03 09:50:12 +08:00
|
|
|
static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
struct pci_dev *pdev = hw->back->pdev;
|
|
|
|
struct atl1_adapter *adapter = hw->back;
|
|
|
|
s32 ret_val;
|
|
|
|
u16 phy_data;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/* ; --- Read PHY Specific Status Register (17) */
|
|
|
|
ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
|
|
|
|
if (ret_val)
|
|
|
|
return ret_val;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
|
|
|
|
return ATLX_ERR_PHY_RES;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
switch (phy_data & MII_ATLX_PSSR_SPEED) {
|
|
|
|
case MII_ATLX_PSSR_1000MBS:
|
|
|
|
*speed = SPEED_1000;
|
|
|
|
break;
|
|
|
|
case MII_ATLX_PSSR_100MBS:
|
|
|
|
*speed = SPEED_100;
|
|
|
|
break;
|
|
|
|
case MII_ATLX_PSSR_10MBS:
|
|
|
|
*speed = SPEED_10;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (netif_msg_hw(adapter))
|
|
|
|
dev_dbg(&pdev->dev, "error getting speed\n");
|
|
|
|
return ATLX_ERR_PHY_SPEED;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
2008-02-03 09:50:12 +08:00
|
|
|
if (phy_data & MII_ATLX_PSSR_DPLX)
|
|
|
|
*duplex = FULL_DUPLEX;
|
|
|
|
else
|
|
|
|
*duplex = HALF_DUPLEX;
|
|
|
|
|
|
|
|
return 0;
|
2007-07-16 00:03:29 +08:00
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2010-10-21 15:50:50 +08:00
|
|
|
static void atl1_set_mac_addr(struct atl1_hw *hw)
|
2007-07-16 00:03:29 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
u32 value;
|
|
|
|
/*
|
|
|
|
* 00-0B-6A-F6-00-DC
|
|
|
|
* 0: 6AF600DC 1: 000B
|
|
|
|
* low dword
|
|
|
|
*/
|
|
|
|
value = (((u32) hw->mac_addr[2]) << 24) |
|
|
|
|
(((u32) hw->mac_addr[3]) << 16) |
|
|
|
|
(((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
|
|
|
|
iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
|
|
|
|
/* high dword */
|
|
|
|
value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
|
|
|
|
iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
|
2007-07-16 00:03:29 +08:00
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2012-07-10 18:56:00 +08:00
|
|
|
/**
|
2007-02-08 23:42:37 +08:00
|
|
|
* atl1_sw_init - Initialize general software structures (struct atl1_adapter)
|
|
|
|
* @adapter: board private structure to initialize
|
|
|
|
*
|
|
|
|
* atl1_sw_init initializes the Adapter private data structure.
|
|
|
|
* Fields are initialized based on PCI device information and
|
|
|
|
* OS network device settings (MTU size).
|
|
|
|
*/
|
2012-12-03 22:23:56 +08:00
|
|
|
static int atl1_sw_init(struct atl1_adapter *adapter)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
|
|
|
struct atl1_hw *hw = &adapter->hw;
|
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
|
|
2008-01-15 09:56:41 +08:00
|
|
|
hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
|
2007-07-20 07:45:14 +08:00
|
|
|
hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
|
|
|
adapter->wol = 0;
|
2011-02-10 14:55:19 +08:00
|
|
|
device_set_wakeup_enable(&adapter->pdev->dev, false);
|
2007-02-08 23:42:37 +08:00
|
|
|
adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
|
2008-02-03 09:50:12 +08:00
|
|
|
adapter->ict = 50000; /* 100ms */
|
2007-02-08 23:42:37 +08:00
|
|
|
adapter->link_speed = SPEED_0; /* hardware init */
|
|
|
|
adapter->link_duplex = FULL_DUPLEX;
|
|
|
|
|
|
|
|
hw->phy_configured = false;
|
|
|
|
hw->preamble_len = 7;
|
|
|
|
hw->ipgt = 0x60;
|
|
|
|
hw->min_ifg = 0x50;
|
|
|
|
hw->ipgr1 = 0x40;
|
|
|
|
hw->ipgr2 = 0x60;
|
|
|
|
hw->max_retry = 0xf;
|
|
|
|
hw->lcol = 0x37;
|
|
|
|
hw->jam_ipg = 7;
|
|
|
|
hw->rfd_burst = 8;
|
|
|
|
hw->rrd_burst = 8;
|
|
|
|
hw->rfd_fetch_gap = 1;
|
|
|
|
hw->rx_jumbo_th = adapter->rx_buffer_len / 8;
|
|
|
|
hw->rx_jumbo_lkah = 1;
|
|
|
|
hw->rrd_ret_timer = 16;
|
|
|
|
hw->tpd_burst = 4;
|
|
|
|
hw->tpd_fetch_th = 16;
|
|
|
|
hw->txf_burst = 0x100;
|
|
|
|
hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3;
|
|
|
|
hw->tpd_fetch_gap = 1;
|
|
|
|
hw->rcb_value = atl1_rcb_64;
|
|
|
|
hw->dma_ord = atl1_dma_ord_enh;
|
|
|
|
hw->dmar_block = atl1_dma_req_256;
|
|
|
|
hw->dmaw_block = atl1_dma_req_256;
|
|
|
|
hw->cmb_rrd = 4;
|
|
|
|
hw->cmb_tpd = 4;
|
|
|
|
hw->cmb_rx_timer = 1; /* about 2us */
|
|
|
|
hw->cmb_tx_timer = 1; /* about 2us */
|
|
|
|
hw->smb_timer = 100000; /* about 200ms */
|
|
|
|
|
|
|
|
spin_lock_init(&adapter->lock);
|
|
|
|
spin_lock_init(&adapter->mb_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
|
|
|
|
{
|
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
u16 result;
|
|
|
|
|
|
|
|
atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
|
|
|
|
int val)
|
|
|
|
{
|
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
|
|
|
atl1_write_phy_reg(&adapter->hw, reg_num, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
|
|
|
|
{
|
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
unsigned long flags;
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
if (!netif_running(netdev))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adapter->lock, flags);
|
|
|
|
retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
|
|
|
|
spin_unlock_irqrestore(&adapter->lock, flags);
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2012-07-10 18:56:00 +08:00
|
|
|
/**
|
2021-05-17 12:45:15 +08:00
|
|
|
* atl1_setup_ring_resources - allocate Tx / RX descriptor resources
|
2007-02-08 23:42:37 +08:00
|
|
|
* @adapter: board private structure
|
|
|
|
*
|
|
|
|
* Return 0 on success, negative on failure
|
|
|
|
*/
|
2008-02-03 09:50:12 +08:00
|
|
|
static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
|
|
|
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
|
|
|
|
struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
|
|
|
|
struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
|
|
|
|
struct atl1_ring_header *ring_header = &adapter->ring_header;
|
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
|
|
|
int size;
|
|
|
|
u8 offset = 0;
|
|
|
|
|
|
|
|
size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
|
|
|
|
tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (unlikely(!tpd_ring->buffer_info)) {
|
2008-02-03 09:50:12 +08:00
|
|
|
if (netif_msg_drv(adapter))
|
|
|
|
dev_err(&pdev->dev, "kzalloc failed , size = D%d\n",
|
|
|
|
size);
|
2007-02-08 23:42:37 +08:00
|
|
|
goto err_nomem;
|
|
|
|
}
|
|
|
|
rfd_ring->buffer_info =
|
2012-06-04 20:44:16 +08:00
|
|
|
(tpd_ring->buffer_info + tpd_ring->count);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* real ring DMA buffer
|
2007-07-16 00:03:27 +08:00
|
|
|
* each ring/block may need up to 8 bytes for alignment, hence the
|
|
|
|
* additional 40 bytes tacked onto the end.
|
|
|
|
*/
|
2020-05-09 06:33:21 +08:00
|
|
|
ring_header->size =
|
2007-07-16 00:03:27 +08:00
|
|
|
sizeof(struct tx_packet_desc) * tpd_ring->count
|
|
|
|
+ sizeof(struct rx_free_desc) * rfd_ring->count
|
|
|
|
+ sizeof(struct rx_return_desc) * rrd_ring->count
|
|
|
|
+ sizeof(struct coals_msg_block)
|
|
|
|
+ sizeof(struct stats_msg_block)
|
|
|
|
+ 40;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
net: atheros: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'atl1e_setup_ring_resources()' (atl1e_main.c),
'atl1_setup_ring_resources()' (atl1.c) and 'atl2_setup_ring_resources()'
(atl2.c) GFP_KERNEL can be used because it can be called from a .ndo_open.
'atl1_setup_ring_resources()' (atl1.c) can also be called from a
'.set_ringparam' (see struct ethtool_ops) where sleep is also allowed.
Both cases are protected by 'rtnl_lock()' which is a mutex. So these
function can sleep.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-08-23 16:03:53 +08:00
|
|
|
ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
|
|
|
|
&ring_header->dma, GFP_KERNEL);
|
2007-02-08 23:42:37 +08:00
|
|
|
if (unlikely(!ring_header->desc)) {
|
2008-02-03 09:50:12 +08:00
|
|
|
if (netif_msg_drv(adapter))
|
net: atheros: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'atl1e_setup_ring_resources()' (atl1e_main.c),
'atl1_setup_ring_resources()' (atl1.c) and 'atl2_setup_ring_resources()'
(atl2.c) GFP_KERNEL can be used because it can be called from a .ndo_open.
'atl1_setup_ring_resources()' (atl1.c) can also be called from a
'.set_ringparam' (see struct ethtool_ops) where sleep is also allowed.
Both cases are protected by 'rtnl_lock()' which is a mutex. So these
function can sleep.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-08-23 16:03:53 +08:00
|
|
|
dev_err(&pdev->dev, "dma_alloc_coherent failed\n");
|
2007-02-08 23:42:37 +08:00
|
|
|
goto err_nomem;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* init TPD ring */
|
|
|
|
tpd_ring->dma = ring_header->dma;
|
|
|
|
offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
|
|
|
|
tpd_ring->dma += offset;
|
|
|
|
tpd_ring->desc = (u8 *) ring_header->desc + offset;
|
|
|
|
tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
|
|
|
|
|
|
|
|
/* init RFD ring */
|
|
|
|
rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
|
|
|
|
offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0;
|
|
|
|
rfd_ring->dma += offset;
|
|
|
|
rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
|
|
|
|
rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
|
2007-07-16 00:03:28 +08:00
|
|
|
|
2007-02-08 23:42:37 +08:00
|
|
|
|
|
|
|
/* init RRD ring */
|
|
|
|
rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
|
|
|
|
offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0;
|
|
|
|
rrd_ring->dma += offset;
|
|
|
|
rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
|
|
|
|
rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
|
2007-07-16 00:03:28 +08:00
|
|
|
|
2007-02-08 23:42:37 +08:00
|
|
|
|
|
|
|
/* init CMB */
|
|
|
|
adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
|
|
|
|
offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
|
|
|
|
adapter->cmb.dma += offset;
|
2007-07-16 00:03:27 +08:00
|
|
|
adapter->cmb.cmb = (struct coals_msg_block *)
|
|
|
|
((u8 *) rrd_ring->desc + (rrd_ring->size + offset));
|
2007-02-08 23:42:37 +08:00
|
|
|
|
|
|
|
/* init SMB */
|
|
|
|
adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
|
|
|
|
offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
|
|
|
|
adapter->smb.dma += offset;
|
|
|
|
adapter->smb.smb = (struct stats_msg_block *)
|
2007-07-16 00:03:27 +08:00
|
|
|
((u8 *) adapter->cmb.cmb +
|
|
|
|
(sizeof(struct coals_msg_block) + offset));
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
return 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
|
|
|
err_nomem:
|
|
|
|
kfree(tpd_ring->buffer_info);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2007-07-24 04:38:39 +08:00
|
|
|
static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2007-07-16 00:03:28 +08:00
|
|
|
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
|
|
|
|
struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
|
|
|
|
struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:28 +08:00
|
|
|
atomic_set(&tpd_ring->next_to_use, 0);
|
|
|
|
atomic_set(&tpd_ring->next_to_clean, 0);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:28 +08:00
|
|
|
rfd_ring->next_to_clean = 0;
|
|
|
|
atomic_set(&rfd_ring->next_to_use, 0);
|
|
|
|
|
|
|
|
rrd_ring->next_to_use = 0;
|
|
|
|
atomic_set(&rrd_ring->next_to_clean, 0);
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2012-07-10 18:56:00 +08:00
|
|
|
/**
|
2007-07-16 00:03:29 +08:00
|
|
|
* atl1_clean_rx_ring - Free RFD Buffers
|
2007-02-08 23:42:37 +08:00
|
|
|
* @adapter: board private structure
|
|
|
|
*/
|
2007-07-16 00:03:29 +08:00
|
|
|
static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2007-07-16 00:03:29 +08:00
|
|
|
struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
|
|
|
|
struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
|
|
|
|
struct atl1_buffer *buffer_info;
|
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
|
|
|
unsigned long size;
|
|
|
|
unsigned int i;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* Free all the Rx ring sk_buffs */
|
|
|
|
for (i = 0; i < rfd_ring->count; i++) {
|
|
|
|
buffer_info = &rfd_ring->buffer_info[i];
|
|
|
|
if (buffer_info->dma) {
|
net: atheros: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'atl1e_setup_ring_resources()' (atl1e_main.c),
'atl1_setup_ring_resources()' (atl1.c) and 'atl2_setup_ring_resources()'
(atl2.c) GFP_KERNEL can be used because it can be called from a .ndo_open.
'atl1_setup_ring_resources()' (atl1.c) can also be called from a
'.set_ringparam' (see struct ethtool_ops) where sleep is also allowed.
Both cases are protected by 'rtnl_lock()' which is a mutex. So these
function can sleep.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-08-23 16:03:53 +08:00
|
|
|
dma_unmap_page(&pdev->dev, buffer_info->dma,
|
|
|
|
buffer_info->length, DMA_FROM_DEVICE);
|
2007-07-16 00:03:29 +08:00
|
|
|
buffer_info->dma = 0;
|
|
|
|
}
|
|
|
|
if (buffer_info->skb) {
|
|
|
|
dev_kfree_skb(buffer_info->skb);
|
|
|
|
buffer_info->skb = NULL;
|
|
|
|
}
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
size = sizeof(struct atl1_buffer) * rfd_ring->count;
|
|
|
|
memset(rfd_ring->buffer_info, 0, size);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* Zero out the descriptor ring */
|
|
|
|
memset(rfd_ring->desc, 0, rfd_ring->size);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
rfd_ring->next_to_clean = 0;
|
|
|
|
atomic_set(&rfd_ring->next_to_use, 0);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
rrd_ring->next_to_use = 0;
|
|
|
|
atomic_set(&rrd_ring->next_to_clean, 0);
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2012-07-10 18:56:00 +08:00
|
|
|
/**
|
2007-07-16 00:03:29 +08:00
|
|
|
* atl1_clean_tx_ring - Free Tx Buffers
|
|
|
|
* @adapter: board private structure
|
|
|
|
*/
|
|
|
|
static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2007-07-16 00:03:29 +08:00
|
|
|
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
|
|
|
|
struct atl1_buffer *buffer_info;
|
2007-07-16 00:03:27 +08:00
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
2007-07-16 00:03:29 +08:00
|
|
|
unsigned long size;
|
|
|
|
unsigned int i;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* Free all the Tx ring sk_buffs */
|
|
|
|
for (i = 0; i < tpd_ring->count; i++) {
|
|
|
|
buffer_info = &tpd_ring->buffer_info[i];
|
|
|
|
if (buffer_info->dma) {
|
net: atheros: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'atl1e_setup_ring_resources()' (atl1e_main.c),
'atl1_setup_ring_resources()' (atl1.c) and 'atl2_setup_ring_resources()'
(atl2.c) GFP_KERNEL can be used because it can be called from a .ndo_open.
'atl1_setup_ring_resources()' (atl1.c) can also be called from a
'.set_ringparam' (see struct ethtool_ops) where sleep is also allowed.
Both cases are protected by 'rtnl_lock()' which is a mutex. So these
function can sleep.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-08-23 16:03:53 +08:00
|
|
|
dma_unmap_page(&pdev->dev, buffer_info->dma,
|
|
|
|
buffer_info->length, DMA_TO_DEVICE);
|
2007-07-16 00:03:29 +08:00
|
|
|
buffer_info->dma = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
for (i = 0; i < tpd_ring->count; i++) {
|
|
|
|
buffer_info = &tpd_ring->buffer_info[i];
|
|
|
|
if (buffer_info->skb) {
|
|
|
|
dev_kfree_skb_any(buffer_info->skb);
|
|
|
|
buffer_info->skb = NULL;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
size = sizeof(struct atl1_buffer) * tpd_ring->count;
|
|
|
|
memset(tpd_ring->buffer_info, 0, size);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* Zero out the descriptor ring */
|
|
|
|
memset(tpd_ring->desc, 0, tpd_ring->size);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
atomic_set(&tpd_ring->next_to_use, 0);
|
|
|
|
atomic_set(&tpd_ring->next_to_clean, 0);
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2012-07-10 18:56:00 +08:00
|
|
|
/**
|
2007-07-16 00:03:29 +08:00
|
|
|
* atl1_free_ring_resources - Free Tx / RX descriptor Resources
|
|
|
|
* @adapter: board private structure
|
|
|
|
*
|
|
|
|
* Free all transmit software resources
|
2007-02-08 23:42:37 +08:00
|
|
|
*/
|
2008-02-03 09:50:12 +08:00
|
|
|
static void atl1_free_ring_resources(struct atl1_adapter *adapter)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
2007-07-16 00:03:29 +08:00
|
|
|
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
|
|
|
|
struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
|
|
|
|
struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
|
|
|
|
struct atl1_ring_header *ring_header = &adapter->ring_header;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
atl1_clean_tx_ring(adapter);
|
|
|
|
atl1_clean_rx_ring(adapter);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
kfree(tpd_ring->buffer_info);
|
net: atheros: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'atl1e_setup_ring_resources()' (atl1e_main.c),
'atl1_setup_ring_resources()' (atl1.c) and 'atl2_setup_ring_resources()'
(atl2.c) GFP_KERNEL can be used because it can be called from a .ndo_open.
'atl1_setup_ring_resources()' (atl1.c) can also be called from a
'.set_ringparam' (see struct ethtool_ops) where sleep is also allowed.
Both cases are protected by 'rtnl_lock()' which is a mutex. So these
function can sleep.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-08-23 16:03:53 +08:00
|
|
|
dma_free_coherent(&pdev->dev, ring_header->size, ring_header->desc,
|
|
|
|
ring_header->dma);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
tpd_ring->buffer_info = NULL;
|
|
|
|
tpd_ring->desc = NULL;
|
|
|
|
tpd_ring->dma = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
rfd_ring->buffer_info = NULL;
|
|
|
|
rfd_ring->desc = NULL;
|
|
|
|
rfd_ring->dma = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
rrd_ring->desc = NULL;
|
|
|
|
rrd_ring->dma = 0;
|
2010-09-22 18:42:31 +08:00
|
|
|
|
|
|
|
adapter->cmb.dma = 0;
|
|
|
|
adapter->cmb.cmb = NULL;
|
|
|
|
|
|
|
|
adapter->smb.dma = 0;
|
|
|
|
adapter->smb.smb = NULL;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
|
|
|
u32 value;
|
2007-07-16 00:03:29 +08:00
|
|
|
struct atl1_hw *hw = &adapter->hw;
|
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
|
/* Config MAC CTRL Register */
|
|
|
|
value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
|
|
|
|
/* duplex */
|
|
|
|
if (FULL_DUPLEX == adapter->link_duplex)
|
|
|
|
value |= MAC_CTRL_DUPLX;
|
|
|
|
/* speed */
|
|
|
|
value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
|
|
|
|
MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
|
|
|
|
MAC_CTRL_SPEED_SHIFT);
|
|
|
|
/* flow control */
|
|
|
|
value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
|
|
|
|
/* PAD & CRC */
|
|
|
|
value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
|
|
|
|
/* preamble length */
|
|
|
|
value |= (((u32) adapter->hw.preamble_len
|
|
|
|
& MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
|
|
|
|
/* vlan */
|
2011-07-20 12:54:33 +08:00
|
|
|
__atlx_vlan_mode(netdev->features, &value);
|
2007-07-16 00:03:29 +08:00
|
|
|
/* rx checksum
|
|
|
|
if (adapter->rx_csum)
|
|
|
|
value |= MAC_CTRL_RX_CHKSUM_EN;
|
|
|
|
*/
|
|
|
|
/* filter mode */
|
|
|
|
value |= MAC_CTRL_BC_EN;
|
|
|
|
if (netdev->flags & IFF_PROMISC)
|
|
|
|
value |= MAC_CTRL_PROMIS_EN;
|
|
|
|
else if (netdev->flags & IFF_ALLMULTI)
|
|
|
|
value |= MAC_CTRL_MC_ALL_EN;
|
|
|
|
/* value |= MAC_CTRL_LOOPBACK; */
|
|
|
|
iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
static u32 atl1_check_link(struct atl1_adapter *adapter)
|
|
|
|
{
|
|
|
|
struct atl1_hw *hw = &adapter->hw;
|
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
|
u32 ret_val;
|
|
|
|
u16 speed, duplex, phy_data;
|
|
|
|
int reconfig = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* MII_BMSR must read twice */
|
|
|
|
atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
|
|
|
|
atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
|
2008-02-03 09:50:12 +08:00
|
|
|
if (!(phy_data & BMSR_LSTATUS)) {
|
|
|
|
/* link down */
|
|
|
|
if (netif_carrier_ok(netdev)) {
|
|
|
|
/* old link state: Up */
|
|
|
|
if (netif_msg_link(adapter))
|
|
|
|
dev_info(&adapter->pdev->dev, "link is down\n");
|
2007-07-16 00:03:29 +08:00
|
|
|
adapter->link_speed = SPEED_0;
|
|
|
|
netif_carrier_off(netdev);
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
2008-02-03 09:50:12 +08:00
|
|
|
return 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* Link Up */
|
|
|
|
ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
|
|
|
|
if (ret_val)
|
|
|
|
return ret_val;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
switch (hw->media_type) {
|
|
|
|
case MEDIA_TYPE_1000M_FULL:
|
|
|
|
if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
|
|
|
|
reconfig = 1;
|
|
|
|
break;
|
|
|
|
case MEDIA_TYPE_100M_FULL:
|
|
|
|
if (speed != SPEED_100 || duplex != FULL_DUPLEX)
|
|
|
|
reconfig = 1;
|
|
|
|
break;
|
|
|
|
case MEDIA_TYPE_100M_HALF:
|
|
|
|
if (speed != SPEED_100 || duplex != HALF_DUPLEX)
|
|
|
|
reconfig = 1;
|
|
|
|
break;
|
|
|
|
case MEDIA_TYPE_10M_FULL:
|
|
|
|
if (speed != SPEED_10 || duplex != FULL_DUPLEX)
|
|
|
|
reconfig = 1;
|
|
|
|
break;
|
|
|
|
case MEDIA_TYPE_10M_HALF:
|
|
|
|
if (speed != SPEED_10 || duplex != HALF_DUPLEX)
|
|
|
|
reconfig = 1;
|
|
|
|
break;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* link result is our setting */
|
|
|
|
if (!reconfig) {
|
2009-12-03 15:58:21 +08:00
|
|
|
if (adapter->link_speed != speed ||
|
|
|
|
adapter->link_duplex != duplex) {
|
2007-07-16 00:03:29 +08:00
|
|
|
adapter->link_speed = speed;
|
|
|
|
adapter->link_duplex = duplex;
|
|
|
|
atl1_setup_mac_ctrl(adapter);
|
2008-02-03 09:50:12 +08:00
|
|
|
if (netif_msg_link(adapter))
|
|
|
|
dev_info(&adapter->pdev->dev,
|
|
|
|
"%s link is up %d Mbps %s\n",
|
|
|
|
netdev->name, adapter->link_speed,
|
|
|
|
adapter->link_duplex == FULL_DUPLEX ?
|
|
|
|
"full duplex" : "half duplex");
|
2007-07-16 00:03:29 +08:00
|
|
|
}
|
2008-02-03 09:50:12 +08:00
|
|
|
if (!netif_carrier_ok(netdev)) {
|
|
|
|
/* Link down -> Up */
|
2007-07-16 00:03:29 +08:00
|
|
|
netif_carrier_on(netdev);
|
|
|
|
}
|
2008-02-03 09:50:12 +08:00
|
|
|
return 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/* change original link status */
|
2007-07-16 00:03:29 +08:00
|
|
|
if (netif_carrier_ok(netdev)) {
|
|
|
|
adapter->link_speed = SPEED_0;
|
|
|
|
netif_carrier_off(netdev);
|
|
|
|
netif_stop_queue(netdev);
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
|
|
|
|
hw->media_type != MEDIA_TYPE_1000M_FULL) {
|
|
|
|
switch (hw->media_type) {
|
|
|
|
case MEDIA_TYPE_100M_FULL:
|
|
|
|
phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
|
|
|
|
MII_CR_RESET;
|
|
|
|
break;
|
|
|
|
case MEDIA_TYPE_100M_HALF:
|
|
|
|
phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
|
|
|
|
break;
|
|
|
|
case MEDIA_TYPE_10M_FULL:
|
|
|
|
phy_data =
|
|
|
|
MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
|
|
|
|
break;
|
2008-02-03 09:50:12 +08:00
|
|
|
default:
|
|
|
|
/* MEDIA_TYPE_10M_HALF: */
|
2007-07-16 00:03:29 +08:00
|
|
|
phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
|
|
|
|
break;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
2007-07-16 00:03:29 +08:00
|
|
|
atl1_write_phy_reg(hw, MII_BMCR, phy_data);
|
2008-02-03 09:50:12 +08:00
|
|
|
return 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* auto-neg, insert timer to re-config phy */
|
|
|
|
if (!adapter->phy_timer_pending) {
|
|
|
|
adapter->phy_timer_pending = true;
|
2008-11-01 07:52:04 +08:00
|
|
|
mod_timer(&adapter->phy_config_timer,
|
|
|
|
round_jiffies(jiffies + 3 * HZ));
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
static void set_flow_ctrl_old(struct atl1_adapter *adapter)
|
|
|
|
{
|
|
|
|
u32 hi, lo, value;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* RFD Flow Control */
|
|
|
|
value = adapter->rfd_ring.count;
|
|
|
|
hi = value / 16;
|
|
|
|
if (hi < 2)
|
|
|
|
hi = 2;
|
|
|
|
lo = value * 7 / 8;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
|
|
|
|
((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
|
|
|
|
iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* RRD Flow Control */
|
|
|
|
value = adapter->rrd_ring.count;
|
|
|
|
lo = value / 16;
|
|
|
|
hi = value * 7 / 8;
|
|
|
|
if (lo < 2)
|
|
|
|
lo = 2;
|
|
|
|
value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
|
|
|
|
((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
|
|
|
|
iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
static void set_flow_ctrl_new(struct atl1_hw *hw)
|
|
|
|
{
|
|
|
|
u32 hi, lo, value;
|
|
|
|
|
|
|
|
/* RXF Flow Control */
|
|
|
|
value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
|
|
|
|
lo = value / 16;
|
|
|
|
if (lo < 192)
|
|
|
|
lo = 192;
|
|
|
|
hi = value * 7 / 8;
|
|
|
|
if (hi < lo)
|
|
|
|
hi = lo + 16;
|
|
|
|
value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
|
|
|
|
((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
|
|
|
|
iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
|
|
|
|
|
|
|
|
/* RRD Flow Control */
|
|
|
|
value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
|
|
|
|
lo = value / 8;
|
|
|
|
hi = value * 7 / 8;
|
|
|
|
if (lo < 2)
|
|
|
|
lo = 2;
|
|
|
|
if (hi < lo)
|
|
|
|
hi = lo + 3;
|
|
|
|
value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
|
|
|
|
((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
|
|
|
|
iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
|
|
|
|
}
|
|
|
|
|
2012-07-10 18:56:00 +08:00
|
|
|
/**
|
2007-07-16 00:03:29 +08:00
|
|
|
* atl1_configure - Configure Transmit&Receive Unit after Reset
|
|
|
|
* @adapter: board private structure
|
|
|
|
*
|
|
|
|
* Configure the Tx /Rx unit of the MAC after a reset.
|
|
|
|
*/
|
|
|
|
static u32 atl1_configure(struct atl1_adapter *adapter)
|
|
|
|
{
|
|
|
|
struct atl1_hw *hw = &adapter->hw;
|
|
|
|
u32 value;
|
|
|
|
|
|
|
|
/* clear interrupt status */
|
|
|
|
iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
|
|
|
|
|
|
|
|
/* set MAC Address */
|
|
|
|
value = (((u32) hw->mac_addr[2]) << 24) |
|
|
|
|
(((u32) hw->mac_addr[3]) << 16) |
|
|
|
|
(((u32) hw->mac_addr[4]) << 8) |
|
|
|
|
(((u32) hw->mac_addr[5]));
|
|
|
|
iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
|
|
|
|
value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
|
|
|
|
iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
|
|
|
|
|
|
|
|
/* tx / rx ring */
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* HI base address */
|
|
|
|
iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
|
|
|
|
hw->hw_addr + REG_DESC_BASE_ADDR_HI);
|
|
|
|
/* LO base address */
|
|
|
|
iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
|
|
|
|
hw->hw_addr + REG_DESC_RFD_ADDR_LO);
|
|
|
|
iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
|
|
|
|
hw->hw_addr + REG_DESC_RRD_ADDR_LO);
|
|
|
|
iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
|
|
|
|
hw->hw_addr + REG_DESC_TPD_ADDR_LO);
|
|
|
|
iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
|
|
|
|
hw->hw_addr + REG_DESC_CMB_ADDR_LO);
|
|
|
|
iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
|
|
|
|
hw->hw_addr + REG_DESC_SMB_ADDR_LO);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* element count */
|
|
|
|
value = adapter->rrd_ring.count;
|
|
|
|
value <<= 16;
|
|
|
|
value += adapter->rfd_ring.count;
|
|
|
|
iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
|
|
|
|
iowrite32(adapter->tpd_ring.count, hw->hw_addr +
|
|
|
|
REG_DESC_TPD_RING_SIZE);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* Load Ptr */
|
|
|
|
iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* config Mailbox */
|
|
|
|
value = ((atomic_read(&adapter->tpd_ring.next_to_use)
|
|
|
|
& MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
|
|
|
|
((atomic_read(&adapter->rrd_ring.next_to_clean)
|
|
|
|
& MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
|
|
|
|
((atomic_read(&adapter->rfd_ring.next_to_use)
|
|
|
|
& MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
|
|
|
|
iowrite32(value, hw->hw_addr + REG_MAILBOX);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* config IPG/IFG */
|
|
|
|
value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
|
|
|
|
<< MAC_IPG_IFG_IPGT_SHIFT) |
|
|
|
|
(((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
|
|
|
|
<< MAC_IPG_IFG_MIFG_SHIFT) |
|
|
|
|
(((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
|
|
|
|
<< MAC_IPG_IFG_IPGR1_SHIFT) |
|
|
|
|
(((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
|
|
|
|
<< MAC_IPG_IFG_IPGR2_SHIFT);
|
|
|
|
iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* config Half-Duplex Control */
|
|
|
|
value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
|
|
|
|
(((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
|
|
|
|
<< MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
|
|
|
|
MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
|
|
|
|
(0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
|
|
|
|
(((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
|
|
|
|
<< MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
|
|
|
|
iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* set Interrupt Moderator Timer */
|
|
|
|
iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
|
|
|
|
iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* set Interrupt Clear Timer */
|
|
|
|
iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-01-15 09:56:41 +08:00
|
|
|
/* set max frame size hw will accept */
|
|
|
|
iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* jumbo size & rrd retirement timer */
|
|
|
|
value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
|
|
|
|
<< RXQ_JMBOSZ_TH_SHIFT) |
|
|
|
|
(((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
|
|
|
|
<< RXQ_JMBO_LKAH_SHIFT) |
|
|
|
|
(((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
|
|
|
|
<< RXQ_RRD_TIMER_SHIFT);
|
|
|
|
iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* Flow Control */
|
|
|
|
switch (hw->dev_rev) {
|
|
|
|
case 0x8001:
|
|
|
|
case 0x9001:
|
|
|
|
case 0x9002:
|
|
|
|
case 0x9003:
|
|
|
|
set_flow_ctrl_old(adapter);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
set_flow_ctrl_new(hw);
|
|
|
|
break;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* config TXQ */
|
|
|
|
value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
|
|
|
|
<< TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
|
|
|
|
(((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
|
|
|
|
<< TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
|
|
|
|
(((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
|
|
|
|
<< TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE |
|
|
|
|
TXQ_CTRL_EN;
|
|
|
|
iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
|
|
|
|
value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
|
|
|
|
<< TX_JUMBO_TASK_TH_SHIFT) |
|
|
|
|
(((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
|
|
|
|
<< TX_TPD_MIN_IPG_SHIFT);
|
|
|
|
iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* config RXQ */
|
|
|
|
value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
|
|
|
|
<< RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
|
|
|
|
(((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
|
|
|
|
<< RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
|
|
|
|
(((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
|
|
|
|
<< RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN |
|
|
|
|
RXQ_CTRL_EN;
|
|
|
|
iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* config DMA Engine */
|
|
|
|
value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
|
|
|
|
<< DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
|
2007-07-20 07:45:11 +08:00
|
|
|
((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
|
|
|
|
<< DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
|
2007-07-16 00:03:29 +08:00
|
|
|
DMA_CTRL_DMAW_EN;
|
|
|
|
value |= (u32) hw->dma_ord;
|
|
|
|
if (atl1_rcb_128 == hw->rcb_value)
|
|
|
|
value |= DMA_CTRL_RCB_VALUE;
|
|
|
|
iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* config CMB / SMB */
|
2007-07-20 07:45:12 +08:00
|
|
|
value = (hw->cmb_tpd > adapter->tpd_ring.count) ?
|
|
|
|
hw->cmb_tpd : adapter->tpd_ring.count;
|
|
|
|
value <<= 16;
|
|
|
|
value |= hw->cmb_rrd;
|
2007-07-16 00:03:29 +08:00
|
|
|
iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
|
|
|
|
value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
|
|
|
|
iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
|
|
|
|
iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* --- enable CMB / SMB */
|
|
|
|
value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
|
|
|
|
iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
value = ioread32(adapter->hw.hw_addr + REG_ISR);
|
|
|
|
if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
|
|
|
|
value = 1; /* config failed */
|
|
|
|
else
|
|
|
|
value = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* clear all interrupt status */
|
|
|
|
iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
|
|
|
|
iowrite32(0, adapter->hw.hw_addr + REG_ISR);
|
|
|
|
return value;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/*
|
|
|
|
* atl1_pcie_patch - Patch for PCIE module
|
|
|
|
*/
|
|
|
|
static void atl1_pcie_patch(struct atl1_adapter *adapter)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2007-07-16 00:03:29 +08:00
|
|
|
u32 value;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* much vendor magic here */
|
|
|
|
value = 0x6500;
|
|
|
|
iowrite32(value, adapter->hw.hw_addr + 0x12FC);
|
|
|
|
/* pcie flow control mode change */
|
|
|
|
value = ioread32(adapter->hw.hw_addr + 0x1008);
|
|
|
|
value |= 0x8000;
|
|
|
|
iowrite32(value, adapter->hw.hw_addr + 0x1008);
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-07-16 00:03:29 +08:00
|
|
|
* When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
|
|
|
|
* on PCI Command register is disable.
|
|
|
|
* The function enable this bit.
|
|
|
|
* Brackett, 2006/03/15
|
2007-02-08 23:42:37 +08:00
|
|
|
*/
|
2007-07-16 00:03:29 +08:00
|
|
|
static void atl1_via_workaround(struct atl1_adapter *adapter)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2007-07-16 00:03:29 +08:00
|
|
|
unsigned long value;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
|
|
|
|
if (value & PCI_COMMAND_INTX_DISABLE)
|
|
|
|
value &= ~PCI_COMMAND_INTX_DISABLE;
|
|
|
|
iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
static void atl1_inc_smb(struct atl1_adapter *adapter)
|
|
|
|
{
|
2008-11-01 07:52:03 +08:00
|
|
|
struct net_device *netdev = adapter->netdev;
|
2007-07-16 00:03:29 +08:00
|
|
|
struct stats_msg_block *smb = adapter->smb.smb;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2014-01-13 01:50:40 +08:00
|
|
|
u64 new_rx_errors = smb->rx_frag +
|
|
|
|
smb->rx_fcs_err +
|
|
|
|
smb->rx_len_err +
|
|
|
|
smb->rx_sz_ov +
|
|
|
|
smb->rx_rxf_ov +
|
|
|
|
smb->rx_rrd_ov +
|
|
|
|
smb->rx_align_err;
|
|
|
|
u64 new_tx_errors = smb->tx_late_col +
|
|
|
|
smb->tx_abort_col +
|
|
|
|
smb->tx_underrun +
|
|
|
|
smb->tx_trunc;
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* Fill out the OS statistics structure */
|
2014-01-13 01:50:40 +08:00
|
|
|
adapter->soft_stats.rx_packets += smb->rx_ok + new_rx_errors;
|
|
|
|
adapter->soft_stats.tx_packets += smb->tx_ok + new_tx_errors;
|
2007-07-16 00:03:29 +08:00
|
|
|
adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
|
|
|
|
adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
|
|
|
|
adapter->soft_stats.multicast += smb->rx_mcast;
|
2014-01-13 01:50:40 +08:00
|
|
|
adapter->soft_stats.collisions += smb->tx_1_col +
|
|
|
|
smb->tx_2_col +
|
|
|
|
smb->tx_late_col +
|
|
|
|
smb->tx_abort_col;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* Rx Errors */
|
2014-01-13 01:50:40 +08:00
|
|
|
adapter->soft_stats.rx_errors += new_rx_errors;
|
2007-07-16 00:03:29 +08:00
|
|
|
adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
|
|
|
|
adapter->soft_stats.rx_length_errors += smb->rx_len_err;
|
|
|
|
adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
|
|
|
|
adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
adapter->soft_stats.rx_pause += smb->rx_pause;
|
|
|
|
adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
|
|
|
|
adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* Tx Errors */
|
2014-01-13 01:50:40 +08:00
|
|
|
adapter->soft_stats.tx_errors += new_tx_errors;
|
2007-07-16 00:03:29 +08:00
|
|
|
adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
|
|
|
|
adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
|
|
|
|
adapter->soft_stats.tx_window_errors += smb->tx_late_col;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
adapter->soft_stats.excecol += smb->tx_abort_col;
|
|
|
|
adapter->soft_stats.deffer += smb->tx_defer;
|
|
|
|
adapter->soft_stats.scc += smb->tx_1_col;
|
|
|
|
adapter->soft_stats.mcc += smb->tx_2_col;
|
|
|
|
adapter->soft_stats.latecol += smb->tx_late_col;
|
2019-04-23 22:30:07 +08:00
|
|
|
adapter->soft_stats.tx_underrun += smb->tx_underrun;
|
2007-07-16 00:03:29 +08:00
|
|
|
adapter->soft_stats.tx_trunc += smb->tx_trunc;
|
|
|
|
adapter->soft_stats.tx_pause += smb->tx_pause;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-11-01 07:52:03 +08:00
|
|
|
netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes;
|
|
|
|
netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes;
|
|
|
|
netdev->stats.multicast = adapter->soft_stats.multicast;
|
|
|
|
netdev->stats.collisions = adapter->soft_stats.collisions;
|
|
|
|
netdev->stats.rx_errors = adapter->soft_stats.rx_errors;
|
|
|
|
netdev->stats.rx_length_errors =
|
2007-07-16 00:03:29 +08:00
|
|
|
adapter->soft_stats.rx_length_errors;
|
2008-11-01 07:52:03 +08:00
|
|
|
netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
|
|
|
|
netdev->stats.rx_frame_errors =
|
2007-07-16 00:03:29 +08:00
|
|
|
adapter->soft_stats.rx_frame_errors;
|
2008-11-01 07:52:03 +08:00
|
|
|
netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
|
2014-01-13 01:50:40 +08:00
|
|
|
netdev->stats.rx_dropped = adapter->soft_stats.rx_rrd_ov;
|
2008-11-01 07:52:03 +08:00
|
|
|
netdev->stats.tx_errors = adapter->soft_stats.tx_errors;
|
|
|
|
netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
|
|
|
|
netdev->stats.tx_aborted_errors =
|
2007-07-16 00:03:29 +08:00
|
|
|
adapter->soft_stats.tx_aborted_errors;
|
2008-11-01 07:52:03 +08:00
|
|
|
netdev->stats.tx_window_errors =
|
2007-07-16 00:03:29 +08:00
|
|
|
adapter->soft_stats.tx_window_errors;
|
2008-11-01 07:52:03 +08:00
|
|
|
netdev->stats.tx_carrier_errors =
|
2007-07-16 00:03:29 +08:00
|
|
|
adapter->soft_stats.tx_carrier_errors;
|
2014-01-13 01:50:40 +08:00
|
|
|
|
|
|
|
netdev->stats.rx_packets = adapter->soft_stats.rx_packets;
|
|
|
|
netdev->stats.tx_packets = adapter->soft_stats.tx_packets;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
static void atl1_update_mailbox(struct atl1_adapter *adapter)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2007-07-16 00:03:29 +08:00
|
|
|
unsigned long flags;
|
|
|
|
u32 tpd_next_to_use;
|
|
|
|
u32 rfd_next_to_use;
|
|
|
|
u32 rrd_next_to_clean;
|
2007-02-08 23:42:37 +08:00
|
|
|
u32 value;
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
spin_lock_irqsave(&adapter->mb_lock, flags);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
|
|
|
|
rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
|
|
|
|
rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
|
|
|
|
MB_RFD_PROD_INDX_SHIFT) |
|
|
|
|
((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
|
|
|
|
MB_RRD_CONS_INDX_SHIFT) |
|
|
|
|
((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
|
|
|
|
MB_TPD_PROD_INDX_SHIFT);
|
|
|
|
iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
spin_unlock_irqrestore(&adapter->mb_lock, flags);
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
static void atl1_clean_alloc_flag(struct atl1_adapter *adapter,
|
|
|
|
struct rx_return_desc *rrd, u16 offset)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2007-07-16 00:03:29 +08:00
|
|
|
struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) {
|
|
|
|
rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0;
|
|
|
|
if (++rfd_ring->next_to_clean == rfd_ring->count) {
|
|
|
|
rfd_ring->next_to_clean = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
}
|
2007-07-16 00:03:29 +08:00
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
static void atl1_update_rfd_index(struct atl1_adapter *adapter,
|
|
|
|
struct rx_return_desc *rrd)
|
|
|
|
{
|
|
|
|
u16 num_buf;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) /
|
|
|
|
adapter->rx_buffer_len;
|
|
|
|
if (rrd->num_buf == num_buf)
|
|
|
|
/* clean alloc flag for bad rrd */
|
|
|
|
atl1_clean_alloc_flag(adapter, rrd, num_buf);
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
static void atl1_rx_checksum(struct atl1_adapter *adapter,
|
|
|
|
struct rx_return_desc *rrd, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-08-05 08:05:10 +08:00
|
|
|
/*
|
|
|
|
* The L1 hardware contains a bug that erroneously sets the
|
|
|
|
* PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a
|
|
|
|
* fragmented IP packet is received, even though the packet
|
|
|
|
* is perfectly valid and its checksum is correct. There's
|
|
|
|
* no way to distinguish between one of these good packets
|
|
|
|
* and a packet that actually contains a TCP/UDP checksum
|
|
|
|
* error, so all we can do is allow it to be handed up to
|
|
|
|
* the higher layers and let it be sorted out there.
|
|
|
|
*/
|
|
|
|
|
2010-09-03 04:07:41 +08:00
|
|
|
skb_checksum_none_assert(skb);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
|
|
|
|
if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
|
|
|
|
ERR_FLAG_CODE | ERR_FLAG_OV)) {
|
|
|
|
adapter->hw_csum_err++;
|
2008-02-03 09:50:12 +08:00
|
|
|
if (netif_msg_rx_err(adapter))
|
|
|
|
dev_printk(KERN_DEBUG, &pdev->dev,
|
|
|
|
"rx checksum error\n");
|
2007-07-16 00:03:29 +08:00
|
|
|
return;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* not IPv4 */
|
|
|
|
if (!(rrd->pkt_flg & PACKET_FLAG_IPV4))
|
|
|
|
/* checksum is invalid, but it's not an IPv4 pkt, so ok */
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* IPv4 packet */
|
|
|
|
if (likely(!(rrd->err_flg &
|
|
|
|
(ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) {
|
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
adapter->hw_csum_good++;
|
|
|
|
return;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-10 18:56:00 +08:00
|
|
|
/**
|
2007-07-16 00:03:29 +08:00
|
|
|
* atl1_alloc_rx_buffers - Replace used receive buffers
|
|
|
|
* @adapter: address of board private structure
|
|
|
|
*/
|
|
|
|
static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2007-07-16 00:03:29 +08:00
|
|
|
struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
|
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
|
|
|
struct page *page;
|
|
|
|
unsigned long offset;
|
|
|
|
struct atl1_buffer *buffer_info, *next_info;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
u16 num_alloc = 0;
|
|
|
|
u16 rfd_next_to_use, next_next;
|
|
|
|
struct rx_free_desc *rfd_desc;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use);
|
|
|
|
if (++next_next == rfd_ring->count)
|
|
|
|
next_next = 0;
|
|
|
|
buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
|
|
|
|
next_info = &rfd_ring->buffer_info[next_next];
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
while (!buffer_info->alloced && !next_info->alloced) {
|
|
|
|
if (buffer_info->skb) {
|
|
|
|
buffer_info->alloced = 1;
|
|
|
|
goto next;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2009-10-13 13:34:20 +08:00
|
|
|
skb = netdev_alloc_skb_ip_align(adapter->netdev,
|
|
|
|
adapter->rx_buffer_len);
|
2008-02-03 09:50:12 +08:00
|
|
|
if (unlikely(!skb)) {
|
|
|
|
/* Better luck next round */
|
2014-01-13 01:50:40 +08:00
|
|
|
adapter->soft_stats.rx_dropped++;
|
2007-07-16 00:03:29 +08:00
|
|
|
break;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
buffer_info->alloced = 1;
|
|
|
|
buffer_info->skb = skb;
|
|
|
|
buffer_info->length = (u16) adapter->rx_buffer_len;
|
|
|
|
page = virt_to_page(skb->data);
|
2017-04-22 09:21:10 +08:00
|
|
|
offset = offset_in_page(skb->data);
|
net: atheros: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'atl1e_setup_ring_resources()' (atl1e_main.c),
'atl1_setup_ring_resources()' (atl1.c) and 'atl2_setup_ring_resources()'
(atl2.c) GFP_KERNEL can be used because it can be called from a .ndo_open.
'atl1_setup_ring_resources()' (atl1.c) can also be called from a
'.set_ringparam' (see struct ethtool_ops) where sleep is also allowed.
Both cases are protected by 'rtnl_lock()' which is a mutex. So these
function can sleep.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-08-23 16:03:53 +08:00
|
|
|
buffer_info->dma = dma_map_page(&pdev->dev, page, offset,
|
2007-07-16 00:03:29 +08:00
|
|
|
adapter->rx_buffer_len,
|
net: atheros: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'atl1e_setup_ring_resources()' (atl1e_main.c),
'atl1_setup_ring_resources()' (atl1.c) and 'atl2_setup_ring_resources()'
(atl2.c) GFP_KERNEL can be used because it can be called from a .ndo_open.
'atl1_setup_ring_resources()' (atl1.c) can also be called from a
'.set_ringparam' (see struct ethtool_ops) where sleep is also allowed.
Both cases are protected by 'rtnl_lock()' which is a mutex. So these
function can sleep.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-08-23 16:03:53 +08:00
|
|
|
DMA_FROM_DEVICE);
|
2007-07-16 00:03:29 +08:00
|
|
|
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
|
|
|
|
rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
|
|
|
|
rfd_desc->coalese = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
next:
|
|
|
|
rfd_next_to_use = next_next;
|
|
|
|
if (unlikely(++next_next == rfd_ring->count))
|
|
|
|
next_next = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
|
|
|
|
next_info = &rfd_ring->buffer_info[next_next];
|
|
|
|
num_alloc++;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
if (num_alloc) {
|
|
|
|
/*
|
|
|
|
* Force memory writes to complete before letting h/w
|
|
|
|
* know there are new descriptors to fetch. (Only
|
|
|
|
* applicable for weak-ordered memory model archs,
|
|
|
|
* such as IA-64).
|
|
|
|
*/
|
|
|
|
wmb();
|
|
|
|
atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use);
|
|
|
|
}
|
|
|
|
return num_alloc;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2012-04-13 14:09:47 +08:00
|
|
|
static int atl1_intr_rx(struct atl1_adapter *adapter, int budget)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2007-07-16 00:03:29 +08:00
|
|
|
int i, count;
|
|
|
|
u16 length;
|
|
|
|
u16 rrd_next_to_clean;
|
2007-02-08 23:42:37 +08:00
|
|
|
u32 value;
|
2007-07-16 00:03:29 +08:00
|
|
|
struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
|
|
|
|
struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
|
|
|
|
struct atl1_buffer *buffer_info;
|
|
|
|
struct rx_return_desc *rrd;
|
|
|
|
struct sk_buff *skb;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
count = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2012-04-13 14:09:47 +08:00
|
|
|
while (count < budget) {
|
2007-07-16 00:03:29 +08:00
|
|
|
rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
|
|
|
|
i = 1;
|
|
|
|
if (likely(rrd->xsz.valid)) { /* packet valid */
|
|
|
|
chk_rrd:
|
|
|
|
/* check rrd status */
|
|
|
|
if (likely(rrd->num_buf == 1))
|
|
|
|
goto rrd_ok;
|
2008-02-03 09:50:12 +08:00
|
|
|
else if (netif_msg_rx_err(adapter)) {
|
|
|
|
dev_printk(KERN_DEBUG, &adapter->pdev->dev,
|
|
|
|
"unexpected RRD buffer count\n");
|
|
|
|
dev_printk(KERN_DEBUG, &adapter->pdev->dev,
|
|
|
|
"rx_buf_len = %d\n",
|
|
|
|
adapter->rx_buffer_len);
|
|
|
|
dev_printk(KERN_DEBUG, &adapter->pdev->dev,
|
|
|
|
"RRD num_buf = %d\n",
|
|
|
|
rrd->num_buf);
|
|
|
|
dev_printk(KERN_DEBUG, &adapter->pdev->dev,
|
|
|
|
"RRD pkt_len = %d\n",
|
|
|
|
rrd->xsz.xsum_sz.pkt_size);
|
|
|
|
dev_printk(KERN_DEBUG, &adapter->pdev->dev,
|
|
|
|
"RRD pkt_flg = 0x%08X\n",
|
|
|
|
rrd->pkt_flg);
|
|
|
|
dev_printk(KERN_DEBUG, &adapter->pdev->dev,
|
|
|
|
"RRD err_flg = 0x%08X\n",
|
|
|
|
rrd->err_flg);
|
|
|
|
dev_printk(KERN_DEBUG, &adapter->pdev->dev,
|
|
|
|
"RRD vlan_tag = 0x%08X\n",
|
|
|
|
rrd->vlan_tag);
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* rrd seems to be bad */
|
|
|
|
if (unlikely(i-- > 0)) {
|
|
|
|
/* rrd may not be DMAed completely */
|
|
|
|
udelay(1);
|
|
|
|
goto chk_rrd;
|
|
|
|
}
|
|
|
|
/* bad rrd */
|
2008-02-03 09:50:12 +08:00
|
|
|
if (netif_msg_rx_err(adapter))
|
|
|
|
dev_printk(KERN_DEBUG, &adapter->pdev->dev,
|
|
|
|
"bad RRD\n");
|
2007-07-16 00:03:29 +08:00
|
|
|
/* see if update RFD index */
|
|
|
|
if (rrd->num_buf > 1)
|
|
|
|
atl1_update_rfd_index(adapter, rrd);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* update rrd */
|
|
|
|
rrd->xsz.valid = 0;
|
|
|
|
if (++rrd_next_to_clean == rrd_ring->count)
|
|
|
|
rrd_next_to_clean = 0;
|
|
|
|
count++;
|
|
|
|
continue;
|
|
|
|
} else { /* current rrd still not be updated */
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
rrd_ok:
|
|
|
|
/* clean alloc flag for bad rrd */
|
|
|
|
atl1_clean_alloc_flag(adapter, rrd, 0);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
|
|
|
|
if (++rfd_ring->next_to_clean == rfd_ring->count)
|
|
|
|
rfd_ring->next_to_clean = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* update rrd next to clean */
|
|
|
|
if (++rrd_next_to_clean == rrd_ring->count)
|
|
|
|
rrd_next_to_clean = 0;
|
|
|
|
count++;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
|
|
|
|
if (!(rrd->err_flg &
|
|
|
|
(ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM
|
|
|
|
| ERR_FLAG_LEN))) {
|
|
|
|
/* packet error, don't need upstream */
|
|
|
|
buffer_info->alloced = 0;
|
|
|
|
rrd->xsz.valid = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* Good Receive */
|
net: atheros: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'atl1e_setup_ring_resources()' (atl1e_main.c),
'atl1_setup_ring_resources()' (atl1.c) and 'atl2_setup_ring_resources()'
(atl2.c) GFP_KERNEL can be used because it can be called from a .ndo_open.
'atl1_setup_ring_resources()' (atl1.c) can also be called from a
'.set_ringparam' (see struct ethtool_ops) where sleep is also allowed.
Both cases are protected by 'rtnl_lock()' which is a mutex. So these
function can sleep.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-08-23 16:03:53 +08:00
|
|
|
dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
|
|
|
|
buffer_info->length, DMA_FROM_DEVICE);
|
2008-05-23 06:00:25 +08:00
|
|
|
buffer_info->dma = 0;
|
2007-07-16 00:03:29 +08:00
|
|
|
skb = buffer_info->skb;
|
|
|
|
length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-20 07:45:14 +08:00
|
|
|
skb_put(skb, length - ETH_FCS_LEN);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* Receive Checksum Offload */
|
|
|
|
atl1_rx_checksum(adapter, rrd, skb);
|
|
|
|
skb->protocol = eth_type_trans(skb, adapter->netdev);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2011-07-20 12:54:33 +08:00
|
|
|
if (rrd->pkt_flg & PACKET_FLAG_VLAN_INS) {
|
2007-07-16 00:03:29 +08:00
|
|
|
u16 vlan_tag = (rrd->vlan_tag >> 4) |
|
|
|
|
((rrd->vlan_tag & 7) << 13) |
|
|
|
|
((rrd->vlan_tag & 8) << 9);
|
2011-07-20 12:54:33 +08:00
|
|
|
|
2013-04-19 10:04:30 +08:00
|
|
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
2011-07-20 12:54:33 +08:00
|
|
|
}
|
2012-04-13 14:09:47 +08:00
|
|
|
netif_receive_skb(skb);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* let protocol layer free skb */
|
|
|
|
buffer_info->skb = NULL;
|
|
|
|
buffer_info->alloced = 0;
|
|
|
|
rrd->xsz.valid = 0;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
atl1_alloc_rx_buffers(adapter);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
/* update mailbox ? */
|
|
|
|
if (count) {
|
|
|
|
u32 tpd_next_to_use;
|
|
|
|
u32 rfd_next_to_use;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
spin_lock(&adapter->mb_lock);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
|
|
|
|
rfd_next_to_use =
|
|
|
|
atomic_read(&adapter->rfd_ring.next_to_use);
|
|
|
|
rrd_next_to_clean =
|
|
|
|
atomic_read(&adapter->rrd_ring.next_to_clean);
|
|
|
|
value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
|
|
|
|
MB_RFD_PROD_INDX_SHIFT) |
|
|
|
|
((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
|
|
|
|
MB_RRD_CONS_INDX_SHIFT) |
|
|
|
|
((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
|
|
|
|
MB_TPD_PROD_INDX_SHIFT);
|
|
|
|
iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
|
|
|
|
spin_unlock(&adapter->mb_lock);
|
|
|
|
}
|
2012-04-13 14:09:47 +08:00
|
|
|
|
|
|
|
return count;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2012-04-13 14:09:48 +08:00
|
|
|
static int atl1_intr_tx(struct atl1_adapter *adapter)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2007-07-16 00:03:29 +08:00
|
|
|
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
|
|
|
|
struct atl1_buffer *buffer_info;
|
|
|
|
u16 sw_tpd_next_to_clean;
|
|
|
|
u16 cmb_tpd_next_to_clean;
|
2012-04-13 14:09:48 +08:00
|
|
|
int count = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
|
|
|
|
cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
|
|
|
|
buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
|
|
|
|
if (buffer_info->dma) {
|
net: atheros: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'atl1e_setup_ring_resources()' (atl1e_main.c),
'atl1_setup_ring_resources()' (atl1.c) and 'atl2_setup_ring_resources()'
(atl2.c) GFP_KERNEL can be used because it can be called from a .ndo_open.
'atl1_setup_ring_resources()' (atl1.c) can also be called from a
'.set_ringparam' (see struct ethtool_ops) where sleep is also allowed.
Both cases are protected by 'rtnl_lock()' which is a mutex. So these
function can sleep.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-08-23 16:03:53 +08:00
|
|
|
dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
|
|
|
|
buffer_info->length, DMA_TO_DEVICE);
|
2007-07-16 00:03:29 +08:00
|
|
|
buffer_info->dma = 0;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
if (buffer_info->skb) {
|
2019-02-12 23:51:45 +08:00
|
|
|
dev_consume_skb_irq(buffer_info->skb);
|
2007-07-16 00:03:29 +08:00
|
|
|
buffer_info->skb = NULL;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
if (++sw_tpd_next_to_clean == tpd_ring->count)
|
|
|
|
sw_tpd_next_to_clean = 0;
|
2012-04-13 14:09:48 +08:00
|
|
|
|
|
|
|
count++;
|
2007-07-16 00:03:29 +08:00
|
|
|
}
|
|
|
|
atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
|
|
|
|
|
2009-12-03 15:58:21 +08:00
|
|
|
if (netif_queue_stopped(adapter->netdev) &&
|
|
|
|
netif_carrier_ok(adapter->netdev))
|
2007-07-16 00:03:29 +08:00
|
|
|
netif_wake_queue(adapter->netdev);
|
2012-04-13 14:09:48 +08:00
|
|
|
|
|
|
|
return count;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2007-07-20 07:45:10 +08:00
|
|
|
static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
|
|
|
u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
|
|
|
|
u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
|
2010-09-23 13:40:09 +08:00
|
|
|
return (next_to_clean > next_to_use) ?
|
2007-07-16 00:03:27 +08:00
|
|
|
next_to_clean - next_to_use - 1 :
|
2010-09-23 13:40:09 +08:00
|
|
|
tpd_ring->count + next_to_clean - next_to_use - 1;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
|
2014-03-29 19:26:14 +08:00
|
|
|
struct tx_packet_desc *ptpd)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
u8 hdr_len, ip_off;
|
|
|
|
u32 real_len;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
|
|
|
if (skb_shinfo(skb)->gso_size) {
|
2014-03-29 19:26:14 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = skb_cow_head(skb, 0);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-05-21 08:34:30 +08:00
|
|
|
if (skb->protocol == htons(ETH_P_IP)) {
|
2007-04-21 13:47:35 +08:00
|
|
|
struct iphdr *iph = ip_hdr(skb);
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
real_len = (((unsigned char *)iph - skb->data) +
|
|
|
|
ntohs(iph->tot_len));
|
|
|
|
if (real_len < skb->len)
|
|
|
|
pskb_trim(skb, real_len);
|
2022-06-30 23:07:50 +08:00
|
|
|
hdr_len = skb_tcp_all_headers(skb);
|
2008-02-03 09:50:12 +08:00
|
|
|
if (skb->len == hdr_len) {
|
|
|
|
iph->check = 0;
|
|
|
|
tcp_hdr(skb)->check =
|
|
|
|
~csum_tcpudp_magic(iph->saddr,
|
|
|
|
iph->daddr, tcp_hdrlen(skb),
|
|
|
|
IPPROTO_TCP, 0);
|
|
|
|
ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
|
|
|
|
TPD_IPHL_SHIFT;
|
|
|
|
ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
|
|
|
|
TPD_TCPHDRLEN_MASK) <<
|
|
|
|
TPD_TCPHDRLEN_SHIFT;
|
|
|
|
ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT;
|
|
|
|
ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2007-04-21 13:47:35 +08:00
|
|
|
iph->check = 0;
|
2007-04-11 12:04:22 +08:00
|
|
|
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
|
2008-02-03 09:50:12 +08:00
|
|
|
iph->daddr, 0, IPPROTO_TCP, 0);
|
|
|
|
ip_off = (unsigned char *)iph -
|
|
|
|
(unsigned char *) skb_network_header(skb);
|
|
|
|
if (ip_off == 8) /* 802.3-SNAP frame */
|
|
|
|
ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT;
|
|
|
|
else if (ip_off != 0)
|
|
|
|
return -2;
|
|
|
|
|
|
|
|
ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
|
|
|
|
TPD_IPHL_SHIFT;
|
|
|
|
ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
|
|
|
|
TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT;
|
|
|
|
ptpd->word3 |= (skb_shinfo(skb)->gso_size &
|
|
|
|
TPD_MSS_MASK) << TPD_MSS_SHIFT;
|
|
|
|
ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
|
|
|
|
return 3;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
}
|
2014-03-29 19:26:14 +08:00
|
|
|
return 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
|
2008-02-03 09:50:12 +08:00
|
|
|
struct tx_packet_desc *ptpd)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
|
|
|
u8 css, cso;
|
|
|
|
|
|
|
|
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
2010-12-14 23:24:08 +08:00
|
|
|
css = skb_checksum_start_offset(skb);
|
2008-02-03 09:50:12 +08:00
|
|
|
cso = css + (u8) skb->csum_offset;
|
|
|
|
if (unlikely(css & 0x1)) {
|
|
|
|
/* L1 hardware requires an even number here */
|
|
|
|
if (netif_msg_tx_err(adapter))
|
|
|
|
dev_printk(KERN_DEBUG, &adapter->pdev->dev,
|
|
|
|
"payload offset not an even number\n");
|
2007-02-08 23:42:37 +08:00
|
|
|
return -1;
|
|
|
|
}
|
2008-02-03 09:50:12 +08:00
|
|
|
ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) <<
|
|
|
|
TPD_PLOADOFFSET_SHIFT;
|
|
|
|
ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) <<
|
|
|
|
TPD_CCSUMOFFSET_SHIFT;
|
|
|
|
ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT;
|
2007-02-08 23:42:37 +08:00
|
|
|
return true;
|
|
|
|
}
|
2008-02-03 09:50:12 +08:00
|
|
|
return 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:27 +08:00
|
|
|
static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
|
2008-02-03 09:50:12 +08:00
|
|
|
struct tx_packet_desc *ptpd)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
|
|
|
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
|
|
|
|
struct atl1_buffer *buffer_info;
|
2008-02-03 09:50:12 +08:00
|
|
|
u16 buf_len = skb->len;
|
2007-02-08 23:42:37 +08:00
|
|
|
struct page *page;
|
|
|
|
unsigned long offset;
|
|
|
|
unsigned int nr_frags;
|
|
|
|
unsigned int f;
|
2008-02-03 09:50:12 +08:00
|
|
|
int retval;
|
|
|
|
u16 next_to_use;
|
|
|
|
u16 data_len;
|
|
|
|
u8 hdr_len;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
buf_len -= skb->data_len;
|
2007-02-08 23:42:37 +08:00
|
|
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
2008-02-03 09:50:12 +08:00
|
|
|
next_to_use = atomic_read(&tpd_ring->next_to_use);
|
|
|
|
buffer_info = &tpd_ring->buffer_info[next_to_use];
|
2009-04-11 22:50:23 +08:00
|
|
|
BUG_ON(buffer_info->skb);
|
2008-02-03 09:50:12 +08:00
|
|
|
/* put skb in last TPD */
|
|
|
|
buffer_info->skb = NULL;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
|
|
|
|
if (retval) {
|
|
|
|
/* TSO */
|
2022-06-30 23:07:50 +08:00
|
|
|
hdr_len = skb_tcp_all_headers(skb);
|
2008-02-03 09:50:12 +08:00
|
|
|
buffer_info->length = hdr_len;
|
2007-02-08 23:42:37 +08:00
|
|
|
page = virt_to_page(skb->data);
|
2017-04-22 09:21:10 +08:00
|
|
|
offset = offset_in_page(skb->data);
|
net: atheros: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'atl1e_setup_ring_resources()' (atl1e_main.c),
'atl1_setup_ring_resources()' (atl1.c) and 'atl2_setup_ring_resources()'
(atl2.c) GFP_KERNEL can be used because it can be called from a .ndo_open.
'atl1_setup_ring_resources()' (atl1.c) can also be called from a
'.set_ringparam' (see struct ethtool_ops) where sleep is also allowed.
Both cases are protected by 'rtnl_lock()' which is a mutex. So these
function can sleep.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-08-23 16:03:53 +08:00
|
|
|
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
|
2008-02-03 09:50:12 +08:00
|
|
|
offset, hdr_len,
|
net: atheros: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'atl1e_setup_ring_resources()' (atl1e_main.c),
'atl1_setup_ring_resources()' (atl1.c) and 'atl2_setup_ring_resources()'
(atl2.c) GFP_KERNEL can be used because it can be called from a .ndo_open.
'atl1_setup_ring_resources()' (atl1.c) can also be called from a
'.set_ringparam' (see struct ethtool_ops) where sleep is also allowed.
Both cases are protected by 'rtnl_lock()' which is a mutex. So these
function can sleep.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-08-23 16:03:53 +08:00
|
|
|
DMA_TO_DEVICE);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (++next_to_use == tpd_ring->count)
|
|
|
|
next_to_use = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (buf_len > hdr_len) {
|
|
|
|
int i, nseg;
|
2007-10-06 08:19:47 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
data_len = buf_len - hdr_len;
|
|
|
|
nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) /
|
2007-07-16 00:03:27 +08:00
|
|
|
ATL1_MAX_TX_BUF_LEN;
|
2008-02-03 09:50:12 +08:00
|
|
|
for (i = 0; i < nseg; i++) {
|
2007-02-08 23:42:37 +08:00
|
|
|
buffer_info =
|
2008-02-03 09:50:12 +08:00
|
|
|
&tpd_ring->buffer_info[next_to_use];
|
2007-02-08 23:42:37 +08:00
|
|
|
buffer_info->skb = NULL;
|
|
|
|
buffer_info->length =
|
2007-07-16 00:03:26 +08:00
|
|
|
(ATL1_MAX_TX_BUF_LEN >=
|
2008-02-03 09:50:12 +08:00
|
|
|
data_len) ? ATL1_MAX_TX_BUF_LEN : data_len;
|
|
|
|
data_len -= buffer_info->length;
|
2007-02-08 23:42:37 +08:00
|
|
|
page = virt_to_page(skb->data +
|
2008-02-03 09:50:12 +08:00
|
|
|
(hdr_len + i * ATL1_MAX_TX_BUF_LEN));
|
2017-04-22 09:21:10 +08:00
|
|
|
offset = offset_in_page(skb->data +
|
|
|
|
(hdr_len + i * ATL1_MAX_TX_BUF_LEN));
|
net: atheros: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'atl1e_setup_ring_resources()' (atl1e_main.c),
'atl1_setup_ring_resources()' (atl1.c) and 'atl2_setup_ring_resources()'
(atl2.c) GFP_KERNEL can be used because it can be called from a .ndo_open.
'atl1_setup_ring_resources()' (atl1.c) can also be called from a
'.set_ringparam' (see struct ethtool_ops) where sleep is also allowed.
Both cases are protected by 'rtnl_lock()' which is a mutex. So these
function can sleep.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-08-23 16:03:53 +08:00
|
|
|
buffer_info->dma = dma_map_page(&adapter->pdev->dev,
|
|
|
|
page, offset,
|
|
|
|
buffer_info->length,
|
|
|
|
DMA_TO_DEVICE);
|
2008-02-03 09:50:12 +08:00
|
|
|
if (++next_to_use == tpd_ring->count)
|
|
|
|
next_to_use = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2008-02-03 09:50:12 +08:00
|
|
|
/* not TSO */
|
|
|
|
buffer_info->length = buf_len;
|
2007-02-08 23:42:37 +08:00
|
|
|
page = virt_to_page(skb->data);
|
2017-04-22 09:21:10 +08:00
|
|
|
offset = offset_in_page(skb->data);
|
net: atheros: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'atl1e_setup_ring_resources()' (atl1e_main.c),
'atl1_setup_ring_resources()' (atl1.c) and 'atl2_setup_ring_resources()'
(atl2.c) GFP_KERNEL can be used because it can be called from a .ndo_open.
'atl1_setup_ring_resources()' (atl1.c) can also be called from a
'.set_ringparam' (see struct ethtool_ops) where sleep is also allowed.
Both cases are protected by 'rtnl_lock()' which is a mutex. So these
function can sleep.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-08-23 16:03:53 +08:00
|
|
|
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
|
|
|
|
offset, buf_len,
|
|
|
|
DMA_TO_DEVICE);
|
2008-02-03 09:50:12 +08:00
|
|
|
if (++next_to_use == tpd_ring->count)
|
|
|
|
next_to_use = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (f = 0; f < nr_frags; f++) {
|
2019-07-23 11:08:25 +08:00
|
|
|
const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
|
2008-02-03 09:50:12 +08:00
|
|
|
u16 i, nseg;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2011-10-19 05:00:24 +08:00
|
|
|
buf_len = skb_frag_size(frag);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
|
|
|
|
ATL1_MAX_TX_BUF_LEN;
|
|
|
|
for (i = 0; i < nseg; i++) {
|
|
|
|
buffer_info = &tpd_ring->buffer_info[next_to_use];
|
2009-04-11 22:50:23 +08:00
|
|
|
BUG_ON(buffer_info->skb);
|
|
|
|
|
2007-02-08 23:42:37 +08:00
|
|
|
buffer_info->skb = NULL;
|
2008-02-03 09:50:12 +08:00
|
|
|
buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
|
|
|
|
ATL1_MAX_TX_BUF_LEN : buf_len;
|
|
|
|
buf_len -= buffer_info->length;
|
2011-08-30 07:18:22 +08:00
|
|
|
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
|
|
|
|
frag, i * ATL1_MAX_TX_BUF_LEN,
|
2011-10-06 18:10:48 +08:00
|
|
|
buffer_info->length, DMA_TO_DEVICE);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (++next_to_use == tpd_ring->count)
|
|
|
|
next_to_use = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* last tpd's buffer-info */
|
|
|
|
buffer_info->skb = skb;
|
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
|
|
|
|
struct tx_packet_desc *ptpd)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
|
|
|
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
|
|
|
|
struct atl1_buffer *buffer_info;
|
|
|
|
struct tx_packet_desc *tpd;
|
2008-02-03 09:50:12 +08:00
|
|
|
u16 j;
|
|
|
|
u32 val;
|
|
|
|
u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
|
|
|
for (j = 0; j < count; j++) {
|
2008-02-03 09:50:12 +08:00
|
|
|
buffer_info = &tpd_ring->buffer_info[next_to_use];
|
|
|
|
tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use);
|
|
|
|
if (tpd != ptpd)
|
|
|
|
memcpy(tpd, ptpd, sizeof(struct tx_packet_desc));
|
2007-02-08 23:42:37 +08:00
|
|
|
tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
|
2008-10-30 00:01:36 +08:00
|
|
|
tpd->word2 &= ~(TPD_BUFLEN_MASK << TPD_BUFLEN_SHIFT);
|
|
|
|
tpd->word2 |= (cpu_to_le16(buffer_info->length) &
|
2008-02-03 09:50:12 +08:00
|
|
|
TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* if this is the first packet in a TSO chain, set
|
|
|
|
* TPD_HDRFLAG, otherwise, clear it.
|
|
|
|
*/
|
|
|
|
val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) &
|
|
|
|
TPD_SEGMENT_EN_MASK;
|
|
|
|
if (val) {
|
|
|
|
if (!j)
|
|
|
|
tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
|
|
|
|
else
|
|
|
|
tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT);
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
|
|
|
if (j == (count - 1))
|
2008-02-03 09:50:12 +08:00
|
|
|
tpd->word3 |= 1 << TPD_EOP_SHIFT;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (++next_to_use == tpd_ring->count)
|
|
|
|
next_to_use = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Force memory writes to complete before letting h/w
|
|
|
|
* know there are new descriptors to fetch. (Only
|
|
|
|
* applicable for weak-ordered memory model archs,
|
|
|
|
* such as IA-64).
|
|
|
|
*/
|
|
|
|
wmb();
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
atomic_set(&tpd_ring->next_to_use, next_to_use);
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2009-09-01 03:50:58 +08:00
|
|
|
static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
|
|
|
|
struct net_device *netdev)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
2008-02-03 09:50:12 +08:00
|
|
|
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
|
2010-04-15 06:59:40 +08:00
|
|
|
int len;
|
2007-02-08 23:42:37 +08:00
|
|
|
int tso;
|
|
|
|
int count = 1;
|
|
|
|
int ret_val;
|
2008-02-03 09:50:12 +08:00
|
|
|
struct tx_packet_desc *ptpd;
|
2007-02-08 23:42:37 +08:00
|
|
|
u16 vlan_tag;
|
|
|
|
unsigned int nr_frags = 0;
|
|
|
|
unsigned int mss = 0;
|
|
|
|
unsigned int f;
|
|
|
|
unsigned int proto_hdr_len;
|
|
|
|
|
2010-04-15 06:59:40 +08:00
|
|
|
len = skb_headlen(skb);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (unlikely(skb->len <= 0)) {
|
2007-02-08 23:42:37 +08:00
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
|
for (f = 0; f < nr_frags; f++) {
|
2011-10-19 05:00:24 +08:00
|
|
|
unsigned int f_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
|
|
|
|
count += (f_size + ATL1_MAX_TX_BUF_LEN - 1) /
|
|
|
|
ATL1_MAX_TX_BUF_LEN;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
mss = skb_shinfo(skb)->gso_size;
|
|
|
|
if (mss) {
|
2009-06-12 08:23:24 +08:00
|
|
|
if (skb->protocol == htons(ETH_P_IP)) {
|
2022-06-30 23:07:50 +08:00
|
|
|
proto_hdr_len = skb_tcp_all_headers(skb);
|
2007-02-08 23:42:37 +08:00
|
|
|
if (unlikely(proto_hdr_len > len)) {
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
/* need additional TPD ? */
|
|
|
|
if (proto_hdr_len != len)
|
|
|
|
count += (len - proto_hdr_len +
|
2007-07-16 00:03:27 +08:00
|
|
|
ATL1_MAX_TX_BUF_LEN - 1) /
|
|
|
|
ATL1_MAX_TX_BUF_LEN;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-07-20 07:45:10 +08:00
|
|
|
if (atl1_tpd_avail(&adapter->tpd_ring) < count) {
|
2007-02-08 23:42:37 +08:00
|
|
|
/* not enough descriptors */
|
|
|
|
netif_stop_queue(netdev);
|
2008-02-03 09:50:12 +08:00
|
|
|
if (netif_msg_tx_queued(adapter))
|
|
|
|
dev_printk(KERN_DEBUG, &adapter->pdev->dev,
|
|
|
|
"tx busy\n");
|
2007-02-08 23:42:37 +08:00
|
|
|
return NETDEV_TX_BUSY;
|
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
ptpd = ATL1_TPD_DESC(tpd_ring,
|
|
|
|
(u16) atomic_read(&tpd_ring->next_to_use));
|
|
|
|
memset(ptpd, 0, sizeof(struct tx_packet_desc));
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2015-01-14 00:13:44 +08:00
|
|
|
if (skb_vlan_tag_present(skb)) {
|
|
|
|
vlan_tag = skb_vlan_tag_get(skb);
|
2007-02-08 23:42:37 +08:00
|
|
|
vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
|
|
|
|
((vlan_tag >> 9) & 0x8);
|
2008-02-03 09:50:12 +08:00
|
|
|
ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
|
2008-10-30 00:01:36 +08:00
|
|
|
ptpd->word2 |= (vlan_tag & TPD_VLANTAG_MASK) <<
|
|
|
|
TPD_VLANTAG_SHIFT;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
tso = atl1_tso(adapter, skb, ptpd);
|
2007-02-08 23:42:37 +08:00
|
|
|
if (tso < 0) {
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!tso) {
|
2008-02-03 09:50:12 +08:00
|
|
|
ret_val = atl1_tx_csum(adapter, skb, ptpd);
|
2007-02-08 23:42:37 +08:00
|
|
|
if (ret_val < 0) {
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
atl1_tx_map(adapter, skb, ptpd);
|
|
|
|
atl1_tx_queue(adapter, count, ptpd);
|
2007-02-08 23:42:37 +08:00
|
|
|
atl1_update_mailbox(adapter);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
2012-04-13 14:09:48 +08:00
|
|
|
static int atl1_rings_clean(struct napi_struct *napi, int budget)
|
2012-04-13 14:09:47 +08:00
|
|
|
{
|
|
|
|
struct atl1_adapter *adapter = container_of(napi, struct atl1_adapter, napi);
|
|
|
|
int work_done = atl1_intr_rx(adapter, budget);
|
|
|
|
|
2012-04-13 14:09:48 +08:00
|
|
|
if (atl1_intr_tx(adapter))
|
|
|
|
work_done = budget;
|
|
|
|
|
2012-04-13 14:09:47 +08:00
|
|
|
/* Let's come again to process some more packets */
|
|
|
|
if (work_done >= budget)
|
|
|
|
return work_done;
|
|
|
|
|
2017-01-31 00:22:01 +08:00
|
|
|
napi_complete_done(napi, work_done);
|
2012-04-13 14:09:47 +08:00
|
|
|
/* re-enable Interrupt */
|
2012-04-13 14:09:52 +08:00
|
|
|
if (likely(adapter->int_enabled))
|
|
|
|
atlx_imr_set(adapter, IMR_NORMAL_MASK);
|
2012-04-13 14:09:47 +08:00
|
|
|
return work_done;
|
|
|
|
}
|
|
|
|
|
2012-04-13 14:09:48 +08:00
|
|
|
static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
|
2012-04-13 14:09:47 +08:00
|
|
|
{
|
2012-04-13 14:09:52 +08:00
|
|
|
if (!napi_schedule_prep(&adapter->napi))
|
|
|
|
/* It is possible in case even the RX/TX ints are disabled via IMR
|
|
|
|
* register the ISR bits are set anyway (but do not produce IRQ).
|
|
|
|
* To handle such situation the napi functions used to check is
|
|
|
|
* something scheduled or not.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
__napi_schedule(&adapter->napi);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable RX/TX ints via IMR register if it is
|
|
|
|
* allowed. NAPI handler must reenable them in same
|
|
|
|
* way.
|
|
|
|
*/
|
|
|
|
if (!adapter->int_enabled)
|
2012-04-13 14:09:47 +08:00
|
|
|
return 1;
|
|
|
|
|
2012-04-13 14:09:52 +08:00
|
|
|
atlx_imr_set(adapter, IMR_NORXTX_MASK);
|
|
|
|
return 1;
|
2012-04-13 14:09:47 +08:00
|
|
|
}
|
|
|
|
|
2012-07-10 18:56:00 +08:00
|
|
|
/**
|
2007-07-16 00:03:29 +08:00
|
|
|
* atl1_intr - Interrupt Handler
|
|
|
|
* @irq: interrupt number
|
|
|
|
* @data: pointer to a network interface device structure
|
2007-02-08 23:42:37 +08:00
|
|
|
*/
|
2007-07-16 00:03:29 +08:00
|
|
|
static irqreturn_t atl1_intr(int irq, void *data)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2007-07-16 00:03:29 +08:00
|
|
|
struct atl1_adapter *adapter = netdev_priv(data);
|
|
|
|
u32 status;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
status = adapter->cmb.cmb->int_stats;
|
|
|
|
if (!status)
|
|
|
|
return IRQ_NONE;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2012-04-13 14:09:54 +08:00
|
|
|
/* clear CMB interrupt status at once,
|
|
|
|
* but leave rx/tx interrupt status in case it should be dropped
|
|
|
|
* only if rx/tx processing queued. In other case interrupt
|
|
|
|
* can be lost.
|
|
|
|
*/
|
|
|
|
adapter->cmb.cmb->int_stats = status & (ISR_CMB_TX | ISR_CMB_RX);
|
2007-07-16 00:03:29 +08:00
|
|
|
|
2012-04-13 14:09:53 +08:00
|
|
|
if (status & ISR_GPHY) /* clear phy status */
|
|
|
|
atlx_clear_phy_int(adapter);
|
2007-07-16 00:03:29 +08:00
|
|
|
|
2012-04-13 14:09:53 +08:00
|
|
|
/* clear ISR status, and Enable CMB DMA/Disable Interrupt */
|
|
|
|
iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
|
2007-07-16 00:03:29 +08:00
|
|
|
|
2012-04-13 14:09:53 +08:00
|
|
|
/* check if SMB intr */
|
|
|
|
if (status & ISR_SMB)
|
|
|
|
atl1_inc_smb(adapter);
|
2007-07-16 00:03:29 +08:00
|
|
|
|
2012-04-13 14:09:53 +08:00
|
|
|
/* check if PCIE PHY Link down */
|
|
|
|
if (status & ISR_PHY_LINKDOWN) {
|
|
|
|
if (netif_msg_intr(adapter))
|
|
|
|
dev_printk(KERN_DEBUG, &adapter->pdev->dev,
|
|
|
|
"pcie phy link down %x\n", status);
|
|
|
|
if (netif_running(adapter->netdev)) { /* reset MAC */
|
2012-04-13 14:09:49 +08:00
|
|
|
atlx_irq_disable(adapter);
|
2012-04-11 14:15:03 +08:00
|
|
|
schedule_work(&adapter->reset_dev_task);
|
2007-07-16 00:03:29 +08:00
|
|
|
return IRQ_HANDLED;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
2012-04-13 14:09:53 +08:00
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2012-04-13 14:09:53 +08:00
|
|
|
/* check if DMA read/write error ? */
|
|
|
|
if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
|
|
|
|
if (netif_msg_intr(adapter))
|
|
|
|
dev_printk(KERN_DEBUG, &adapter->pdev->dev,
|
|
|
|
"pcie DMA r/w error (status = 0x%x)\n",
|
|
|
|
status);
|
|
|
|
atlx_irq_disable(adapter);
|
2012-04-16 01:19:04 +08:00
|
|
|
schedule_work(&adapter->reset_dev_task);
|
2012-04-13 14:09:53 +08:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2012-04-13 14:09:53 +08:00
|
|
|
/* link event */
|
|
|
|
if (status & ISR_GPHY) {
|
|
|
|
adapter->soft_stats.tx_carrier_errors++;
|
|
|
|
atl1_check_for_link(adapter);
|
|
|
|
}
|
2007-07-16 00:03:29 +08:00
|
|
|
|
2012-04-13 14:09:53 +08:00
|
|
|
/* transmit or receive event */
|
2012-04-13 14:09:54 +08:00
|
|
|
if (status & (ISR_CMB_TX | ISR_CMB_RX) &&
|
|
|
|
atl1_sched_rings_clean(adapter))
|
|
|
|
adapter->cmb.cmb->int_stats = adapter->cmb.cmb->int_stats &
|
|
|
|
~(ISR_CMB_TX | ISR_CMB_RX);
|
2012-04-13 14:09:53 +08:00
|
|
|
|
|
|
|
/* rx exception */
|
|
|
|
if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
|
|
|
|
ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
|
|
|
|
ISR_HOST_RRD_OV))) {
|
|
|
|
if (netif_msg_intr(adapter))
|
|
|
|
dev_printk(KERN_DEBUG,
|
|
|
|
&adapter->pdev->dev,
|
|
|
|
"rx exception, ISR = 0x%x\n",
|
|
|
|
status);
|
|
|
|
atl1_sched_rings_clean(adapter);
|
|
|
|
}
|
2007-07-16 00:03:29 +08:00
|
|
|
|
|
|
|
/* re-enable Interrupt */
|
|
|
|
iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
|
|
|
|
return IRQ_HANDLED;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-07-10 18:56:00 +08:00
|
|
|
/**
|
2007-07-16 00:03:29 +08:00
|
|
|
* atl1_phy_config - Timer Call-back
|
drivers/net/ethernet: clean up mis-targeted comments
As part of the W=1 cleanups for ethernet, a million [*] driver
comments had to be cleaned up to get the W=1 compilation to
succeed. This change finally makes the drivers/net/ethernet tree
compile with W=1 set on the command line. NOTE: The kernel uses
kdoc style (see Documentation/process/kernel-doc.rst) when
documenting code, not doxygen or other styles.
After this patch the x86_64 build has no warnings from W=1, however
scripts/kernel-doc says there are 1545 more warnings in source files, that
I need to develop a script to fix in a followup patch.
The errors fixed here are all kdoc of a few classes, with a few outliers:
In file included from drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c:10:
drivers/net/ethernet/qlogic/netxen/netxen_nic.h:1193:18: warning: ‘FW_DUMP_LEVELS’ defined but not used [-Wunused-const-variable=]
1193 | static const u32 FW_DUMP_LEVELS[] = { 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
| ^~~~~~~~~~~~~~
... repeats 4 times...
drivers/net/ethernet/sun/cassini.c:2084:24: warning: suggest braces around empty body in an ‘else’ statement [-Wempty-body]
2084 | RX_USED_ADD(page, i);
drivers/net/ethernet/natsemi/ns83820.c: In function ‘phy_intr’:
drivers/net/ethernet/natsemi/ns83820.c:603:6: warning: variable ‘tbisr’ set but not used [-Wunused-but-set-variable]
603 | u32 tbisr, tanar, tanlpar;
| ^~~~~
drivers/net/ethernet/natsemi/ns83820.c: In function ‘ns83820_get_link_ksettings’:
drivers/net/ethernet/natsemi/ns83820.c:1207:11: warning: variable ‘tanar’ set but not used [-Wunused-but-set-variable]
1207 | u32 cfg, tanar, tbicr;
| ^~~~~
drivers/net/ethernet/packetengines/yellowfin.c:1063:18: warning: variable ‘yf_size’ set but not used [-Wunused-but-set-variable]
1063 | int data_size, yf_size;
| ^~~~~~~
Normal kdoc fixes:
warning: Function parameter or member 'x' not described in 'y'
warning: Excess function parameter 'x' description in 'y'
warning: Cannot understand <string> on line <NNN> - I thought it was a doc line
[*] - ok it wasn't quite a million, but it felt like it.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-26 06:24:45 +08:00
|
|
|
* @t: timer_list containing pointer to netdev cast into an unsigned long
|
2007-07-16 00:03:29 +08:00
|
|
|
*/
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 05:43:17 +08:00
|
|
|
static void atl1_phy_config(struct timer_list *t)
|
2007-07-16 00:03:29 +08:00
|
|
|
{
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 05:43:17 +08:00
|
|
|
struct atl1_adapter *adapter = from_timer(adapter, t,
|
|
|
|
phy_config_timer);
|
2008-02-03 09:50:12 +08:00
|
|
|
struct atl1_hw *hw = &adapter->hw;
|
2007-07-16 00:03:29 +08:00
|
|
|
unsigned long flags;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
spin_lock_irqsave(&adapter->lock, flags);
|
2008-02-03 09:50:12 +08:00
|
|
|
adapter->phy_timer_pending = false;
|
|
|
|
atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
|
|
|
|
atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg);
|
|
|
|
atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
|
2007-07-16 00:03:29 +08:00
|
|
|
spin_unlock_irqrestore(&adapter->lock, flags);
|
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* Orphaned vendor comment left intact here:
|
|
|
|
* <vendor comment>
|
|
|
|
* If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
|
|
|
|
* will assert. We do soft reset <0x1400=1> according
|
|
|
|
* with the SPEC. BUT, it seemes that PCIE or DMA
|
|
|
|
* state-machine will not be reset. DMAR_TO_INT will
|
|
|
|
* assert again and again.
|
|
|
|
* </vendor comment>
|
|
|
|
*/
|
2007-07-16 00:03:29 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static int atl1_reset(struct atl1_adapter *adapter)
|
2007-07-16 00:03:29 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
ret = atl1_reset_hw(&adapter->hw);
|
2008-02-03 09:50:12 +08:00
|
|
|
if (ret)
|
2007-07-16 00:03:29 +08:00
|
|
|
return ret;
|
|
|
|
return atl1_init_hw(&adapter->hw);
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static s32 atl1_up(struct atl1_adapter *adapter)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
|
int err;
|
2011-03-29 00:27:31 +08:00
|
|
|
int irq_flags = 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
|
|
|
/* hardware has been reset, we need to reload some things */
|
2008-02-03 09:50:12 +08:00
|
|
|
atlx_set_multi(netdev);
|
2007-07-16 00:03:28 +08:00
|
|
|
atl1_init_ring_ptrs(adapter);
|
2008-02-03 09:50:12 +08:00
|
|
|
atlx_restore_vlan(adapter);
|
2007-02-08 23:42:37 +08:00
|
|
|
err = atl1_alloc_rx_buffers(adapter);
|
2008-02-03 09:50:12 +08:00
|
|
|
if (unlikely(!err))
|
|
|
|
/* no RX BUFFER allocated */
|
2007-02-08 23:42:37 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (unlikely(atl1_configure(adapter))) {
|
|
|
|
err = -EIO;
|
|
|
|
goto err_up;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = pci_enable_msi(adapter->pdev);
|
|
|
|
if (err) {
|
2008-02-03 09:50:12 +08:00
|
|
|
if (netif_msg_ifup(adapter))
|
|
|
|
dev_info(&adapter->pdev->dev,
|
|
|
|
"Unable to enable MSI: %d\n", err);
|
2007-02-08 23:42:37 +08:00
|
|
|
irq_flags |= IRQF_SHARED;
|
|
|
|
}
|
|
|
|
|
2009-11-19 15:29:17 +08:00
|
|
|
err = request_irq(adapter->pdev->irq, atl1_intr, irq_flags,
|
2007-02-08 23:42:37 +08:00
|
|
|
netdev->name, netdev);
|
|
|
|
if (unlikely(err))
|
|
|
|
goto err_up;
|
|
|
|
|
2012-04-13 14:09:47 +08:00
|
|
|
napi_enable(&adapter->napi);
|
2008-02-03 09:50:12 +08:00
|
|
|
atlx_irq_enable(adapter);
|
2007-02-08 23:42:37 +08:00
|
|
|
atl1_check_link(adapter);
|
2008-07-21 23:28:37 +08:00
|
|
|
netif_start_queue(netdev);
|
2007-02-08 23:42:37 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_up:
|
|
|
|
pci_disable_msi(adapter->pdev);
|
|
|
|
/* free rx_buffers */
|
|
|
|
atl1_clean_rx_ring(adapter);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static void atl1_down(struct atl1_adapter *adapter)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
|
|
2012-04-13 14:09:47 +08:00
|
|
|
napi_disable(&adapter->napi);
|
2008-09-27 12:17:20 +08:00
|
|
|
netif_stop_queue(netdev);
|
2007-02-08 23:42:37 +08:00
|
|
|
del_timer_sync(&adapter->phy_config_timer);
|
|
|
|
adapter->phy_timer_pending = false;
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
atlx_irq_disable(adapter);
|
2007-02-08 23:42:37 +08:00
|
|
|
free_irq(adapter->pdev->irq, netdev);
|
|
|
|
pci_disable_msi(adapter->pdev);
|
|
|
|
atl1_reset_hw(&adapter->hw);
|
|
|
|
adapter->cmb.cmb->int_stats = 0;
|
|
|
|
|
|
|
|
adapter->link_speed = SPEED_0;
|
|
|
|
adapter->link_duplex = -1;
|
|
|
|
netif_carrier_off(netdev);
|
|
|
|
|
|
|
|
atl1_clean_tx_ring(adapter);
|
|
|
|
atl1_clean_rx_ring(adapter);
|
|
|
|
}
|
|
|
|
|
2012-04-11 14:15:03 +08:00
|
|
|
static void atl1_reset_dev_task(struct work_struct *work)
|
2008-02-03 09:50:12 +08:00
|
|
|
{
|
|
|
|
struct atl1_adapter *adapter =
|
2012-04-11 14:15:03 +08:00
|
|
|
container_of(work, struct atl1_adapter, reset_dev_task);
|
2008-02-03 09:50:12 +08:00
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
|
|
|
|
|
netif_device_detach(netdev);
|
|
|
|
atl1_down(adapter);
|
2008-02-03 09:50:04 +08:00
|
|
|
atl1_up(adapter);
|
2008-02-03 09:50:12 +08:00
|
|
|
netif_device_attach(netdev);
|
2008-02-03 09:50:04 +08:00
|
|
|
}
|
|
|
|
|
2012-07-10 18:56:00 +08:00
|
|
|
/**
|
2008-02-03 09:50:12 +08:00
|
|
|
* atl1_change_mtu - Change the Maximum Transfer Unit
|
|
|
|
* @netdev: network interface device structure
|
|
|
|
* @new_mtu: new value for maximum frame size
|
|
|
|
*
|
|
|
|
* Returns 0 on success, negative on failure
|
|
|
|
*/
|
|
|
|
static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
|
2008-02-03 09:50:04 +08:00
|
|
|
{
|
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
2008-02-03 09:50:12 +08:00
|
|
|
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
adapter->hw.max_frame_size = max_frame;
|
|
|
|
adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
|
|
|
|
adapter->rx_buffer_len = (max_frame + 7) & ~7;
|
|
|
|
adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
|
|
|
|
|
|
|
|
netdev->mtu = new_mtu;
|
2016-10-18 03:54:03 +08:00
|
|
|
if (netif_running(netdev)) {
|
2008-02-03 09:50:12 +08:00
|
|
|
atl1_down(adapter);
|
|
|
|
atl1_up(adapter);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2008-02-03 09:50:04 +08:00
|
|
|
}
|
|
|
|
|
2012-07-10 18:56:00 +08:00
|
|
|
/**
|
2007-02-08 23:42:37 +08:00
|
|
|
* atl1_open - Called when a network interface is made active
|
|
|
|
* @netdev: network interface device structure
|
|
|
|
*
|
|
|
|
* Returns 0 on success, negative value on failure
|
|
|
|
*
|
|
|
|
* The open entry point is called when a network interface is made
|
|
|
|
* active by the system (IFF_UP). At this point all resources needed
|
|
|
|
* for transmit and receive operations are allocated, the interrupt
|
|
|
|
* handler is registered with the OS, the watchdog timer is started,
|
|
|
|
* and the stack is notified that the interface is ready.
|
|
|
|
*/
|
|
|
|
static int atl1_open(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
int err;
|
|
|
|
|
2008-09-27 12:17:20 +08:00
|
|
|
netif_carrier_off(netdev);
|
|
|
|
|
2007-02-08 23:42:37 +08:00
|
|
|
/* allocate transmit descriptors */
|
|
|
|
err = atl1_setup_ring_resources(adapter);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = atl1_up(adapter);
|
|
|
|
if (err)
|
|
|
|
goto err_up;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_up:
|
|
|
|
atl1_reset(adapter);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2012-07-10 18:56:00 +08:00
|
|
|
/**
|
2007-02-08 23:42:37 +08:00
|
|
|
* atl1_close - Disables a network interface
|
|
|
|
* @netdev: network interface device structure
|
|
|
|
*
|
|
|
|
* Returns 0, this is not allowed to fail
|
|
|
|
*
|
|
|
|
* The close entry point is called when an interface is de-activated
|
|
|
|
* by the OS. The hardware is still under the drivers control, but
|
|
|
|
* needs to be disabled. A global MAC reset is issued to stop the
|
|
|
|
* hardware, and all transmit and receive resources are freed.
|
|
|
|
*/
|
|
|
|
static int atl1_close(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
atl1_down(adapter);
|
|
|
|
atl1_free_ring_resources(adapter);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-26 05:03:25 +08:00
|
|
|
#ifdef CONFIG_PM_SLEEP
|
2011-02-10 14:55:19 +08:00
|
|
|
static int atl1_suspend(struct device *dev)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2019-07-23 21:18:56 +08:00
|
|
|
struct net_device *netdev = dev_get_drvdata(dev);
|
2007-07-16 00:03:29 +08:00
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
struct atl1_hw *hw = &adapter->hw;
|
|
|
|
u32 ctrl = 0;
|
|
|
|
u32 wufc = adapter->wol;
|
2008-05-10 11:12:07 +08:00
|
|
|
u32 val;
|
|
|
|
u16 speed;
|
|
|
|
u16 duplex;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
|
|
|
netif_device_detach(netdev);
|
2007-07-16 00:03:29 +08:00
|
|
|
if (netif_running(netdev))
|
|
|
|
atl1_down(adapter);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
|
|
|
|
atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
|
2008-05-10 11:12:07 +08:00
|
|
|
val = ctrl & BMSR_LSTATUS;
|
|
|
|
if (val)
|
2008-02-03 09:50:12 +08:00
|
|
|
wufc &= ~ATLX_WUFC_LNKC;
|
2011-02-10 14:55:19 +08:00
|
|
|
if (!wufc)
|
|
|
|
goto disable_wol;
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2011-02-10 14:55:19 +08:00
|
|
|
if (val) {
|
2008-05-10 11:12:07 +08:00
|
|
|
val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
|
|
|
|
if (val) {
|
|
|
|
if (netif_msg_ifdown(adapter))
|
2019-07-23 21:18:56 +08:00
|
|
|
dev_printk(KERN_DEBUG, dev,
|
2008-05-10 11:12:07 +08:00
|
|
|
"error getting speed/duplex\n");
|
|
|
|
goto disable_wol;
|
|
|
|
}
|
2007-07-16 00:03:29 +08:00
|
|
|
|
|
|
|
ctrl = 0;
|
|
|
|
|
2008-05-10 11:12:07 +08:00
|
|
|
/* enable magic packet WOL */
|
|
|
|
if (wufc & ATLX_WUFC_MAG)
|
|
|
|
ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN);
|
2007-07-16 00:03:29 +08:00
|
|
|
iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
|
2008-05-10 11:12:07 +08:00
|
|
|
ioread32(hw->hw_addr + REG_WOL_CTRL);
|
|
|
|
|
|
|
|
/* configure the mac */
|
|
|
|
ctrl = MAC_CTRL_RX_EN;
|
|
|
|
ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 :
|
|
|
|
MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT);
|
|
|
|
if (duplex == FULL_DUPLEX)
|
|
|
|
ctrl |= MAC_CTRL_DUPLX;
|
|
|
|
ctrl |= (((u32)adapter->hw.preamble_len &
|
|
|
|
MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
|
2011-07-20 12:54:33 +08:00
|
|
|
__atlx_vlan_mode(netdev->features, &ctrl);
|
2008-05-10 11:12:07 +08:00
|
|
|
if (wufc & ATLX_WUFC_MAG)
|
2007-07-16 00:03:29 +08:00
|
|
|
ctrl |= MAC_CTRL_BC_EN;
|
|
|
|
iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
|
2008-05-10 11:12:07 +08:00
|
|
|
ioread32(hw->hw_addr + REG_MAC_CTRL);
|
|
|
|
|
|
|
|
/* poke the PHY */
|
|
|
|
ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
|
|
|
|
ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
|
|
|
|
iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
|
|
|
|
ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
|
2011-02-10 14:55:19 +08:00
|
|
|
} else {
|
2008-05-10 11:12:07 +08:00
|
|
|
ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
|
|
|
|
iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
|
|
|
|
ioread32(hw->hw_addr + REG_WOL_CTRL);
|
|
|
|
iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
|
|
|
|
ioread32(hw->hw_addr + REG_MAC_CTRL);
|
|
|
|
hw->phy_configured = false;
|
|
|
|
}
|
2007-07-16 00:03:29 +08:00
|
|
|
|
2011-02-10 14:55:19 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
disable_wol:
|
2008-05-10 11:12:07 +08:00
|
|
|
iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
|
|
|
|
ioread32(hw->hw_addr + REG_WOL_CTRL);
|
|
|
|
ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
|
|
|
|
ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
|
|
|
|
iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
|
|
|
|
ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
|
|
|
|
hw->phy_configured = false;
|
2007-07-16 00:03:29 +08:00
|
|
|
|
|
|
|
return 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
|
|
|
|
2011-02-10 14:55:19 +08:00
|
|
|
static int atl1_resume(struct device *dev)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2019-07-23 21:18:56 +08:00
|
|
|
struct net_device *netdev = dev_get_drvdata(dev);
|
2007-07-16 00:03:29 +08:00
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
2007-07-16 00:03:27 +08:00
|
|
|
|
2008-05-10 11:12:07 +08:00
|
|
|
iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
|
2007-07-16 00:03:29 +08:00
|
|
|
|
2008-05-10 11:12:07 +08:00
|
|
|
atl1_reset_hw(&adapter->hw);
|
2007-07-16 00:03:29 +08:00
|
|
|
|
2010-09-22 18:41:58 +08:00
|
|
|
if (netif_running(netdev)) {
|
|
|
|
adapter->cmb.cmb->int_stats = 0;
|
2007-07-16 00:03:29 +08:00
|
|
|
atl1_up(adapter);
|
2010-09-22 18:41:58 +08:00
|
|
|
}
|
2007-07-16 00:03:29 +08:00
|
|
|
netif_device_attach(netdev);
|
|
|
|
|
|
|
|
return 0;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
2013-04-16 17:28:28 +08:00
|
|
|
#endif
|
2011-02-10 14:55:19 +08:00
|
|
|
|
|
|
|
static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-05-10 11:12:08 +08:00
|
|
|
static void atl1_shutdown(struct pci_dev *pdev)
|
|
|
|
{
|
2011-02-10 14:55:19 +08:00
|
|
|
struct net_device *netdev = pci_get_drvdata(pdev);
|
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
2013-04-17 05:35:00 +08:00
|
|
|
#ifdef CONFIG_PM_SLEEP
|
2011-02-10 14:55:19 +08:00
|
|
|
atl1_suspend(&pdev->dev);
|
2013-04-17 05:35:00 +08:00
|
|
|
#endif
|
2011-02-10 14:55:19 +08:00
|
|
|
pci_wake_from_d3(pdev, adapter->wol);
|
|
|
|
pci_set_power_state(pdev, PCI_D3hot);
|
2008-05-10 11:12:08 +08:00
|
|
|
}
|
|
|
|
|
2007-07-16 00:03:29 +08:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
static void atl1_poll_controller(struct net_device *netdev)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
2007-07-16 00:03:29 +08:00
|
|
|
disable_irq(netdev->irq);
|
|
|
|
atl1_intr(netdev->irq, netdev);
|
|
|
|
enable_irq(netdev->irq);
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
2007-07-16 00:03:29 +08:00
|
|
|
#endif
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-11-20 14:14:17 +08:00
|
|
|
static const struct net_device_ops atl1_netdev_ops = {
|
|
|
|
.ndo_open = atl1_open,
|
|
|
|
.ndo_stop = atl1_close,
|
2008-11-21 12:14:53 +08:00
|
|
|
.ndo_start_xmit = atl1_xmit_frame,
|
2011-08-16 14:29:01 +08:00
|
|
|
.ndo_set_rx_mode = atlx_set_multi,
|
2008-11-20 14:14:17 +08:00
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
|
|
.ndo_set_mac_address = atl1_set_mac,
|
|
|
|
.ndo_change_mtu = atl1_change_mtu,
|
2011-07-20 12:54:33 +08:00
|
|
|
.ndo_fix_features = atlx_fix_features,
|
|
|
|
.ndo_set_features = atlx_set_features,
|
2021-07-27 21:45:13 +08:00
|
|
|
.ndo_eth_ioctl = atlx_ioctl,
|
2008-11-21 12:14:53 +08:00
|
|
|
.ndo_tx_timeout = atlx_tx_timeout,
|
2008-11-20 14:14:17 +08:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
.ndo_poll_controller = atl1_poll_controller,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2012-07-10 18:56:00 +08:00
|
|
|
/**
|
2007-02-08 23:42:37 +08:00
|
|
|
* atl1_probe - Device Initialization Routine
|
|
|
|
* @pdev: PCI device information struct
|
|
|
|
* @ent: entry in atl1_pci_tbl
|
|
|
|
*
|
|
|
|
* Returns 0 on success, negative on failure
|
|
|
|
*
|
|
|
|
* atl1_probe initializes an adapter identified by a pci_dev structure.
|
|
|
|
* The OS initialization, configuring of the adapter private structure,
|
|
|
|
* and a hardware reset occur.
|
|
|
|
*/
|
2012-12-06 22:30:56 +08:00
|
|
|
static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
|
|
|
struct net_device *netdev;
|
|
|
|
struct atl1_adapter *adapter;
|
|
|
|
static int cards_found = 0;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = pci_enable_device(pdev);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2007-09-08 08:25:01 +08:00
|
|
|
/*
|
2007-09-21 03:57:15 +08:00
|
|
|
* The atl1 chip can DMA to 64-bit addresses, but it uses a single
|
|
|
|
* shared register for the high 32 bits, so only a single, aligned,
|
|
|
|
* 4 GB physical address range can be used at a time.
|
|
|
|
*
|
|
|
|
* Supporting 64-bit DMA on this hardware is more trouble than it's
|
|
|
|
* worth. It is far easier to limit to 32-bit DMA than update
|
|
|
|
* various kernel subsystems to support the mechanics required by a
|
|
|
|
* fixed-high-32-bit system.
|
2007-09-08 08:25:01 +08:00
|
|
|
*/
|
net: atheros: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'atl1e_setup_ring_resources()' (atl1e_main.c),
'atl1_setup_ring_resources()' (atl1.c) and 'atl2_setup_ring_resources()'
(atl2.c) GFP_KERNEL can be used because it can be called from a .ndo_open.
'atl1_setup_ring_resources()' (atl1.c) can also be called from a
'.set_ringparam' (see struct ethtool_ops) where sleep is also allowed.
Both cases are protected by 'rtnl_lock()' which is a mutex. So these
function can sleep.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-08-23 16:03:53 +08:00
|
|
|
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
|
2007-02-08 23:42:37 +08:00
|
|
|
if (err) {
|
2007-09-08 08:25:01 +08:00
|
|
|
dev_err(&pdev->dev, "no usable DMA configuration\n");
|
|
|
|
goto err_dma;
|
2007-02-08 23:42:37 +08:00
|
|
|
}
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* Mark all PCI regions associated with PCI device
|
2007-02-08 23:42:37 +08:00
|
|
|
* pdev as being reserved by owner atl1_driver_name
|
|
|
|
*/
|
2008-02-03 09:50:12 +08:00
|
|
|
err = pci_request_regions(pdev, ATLX_DRIVER_NAME);
|
2007-02-08 23:42:37 +08:00
|
|
|
if (err)
|
|
|
|
goto err_request_regions;
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* Enables bus-mastering on the device and calls
|
2007-02-08 23:42:37 +08:00
|
|
|
* pcibios_set_master to do the needed arch specific settings
|
|
|
|
*/
|
|
|
|
pci_set_master(pdev);
|
|
|
|
|
|
|
|
netdev = alloc_etherdev(sizeof(struct atl1_adapter));
|
|
|
|
if (!netdev) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_alloc_etherdev;
|
|
|
|
}
|
|
|
|
SET_NETDEV_DEV(netdev, &pdev->dev);
|
|
|
|
|
|
|
|
pci_set_drvdata(pdev, netdev);
|
|
|
|
adapter = netdev_priv(netdev);
|
|
|
|
adapter->netdev = netdev;
|
|
|
|
adapter->pdev = pdev;
|
|
|
|
adapter->hw.back = adapter;
|
2008-02-03 09:50:12 +08:00
|
|
|
adapter->msg_enable = netif_msg_init(debug, atl1_default_msg);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
|
|
|
adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
|
|
|
|
if (!adapter->hw.hw_addr) {
|
|
|
|
err = -EIO;
|
|
|
|
goto err_pci_iomap;
|
|
|
|
}
|
|
|
|
/* get device revision number */
|
2007-04-30 10:42:10 +08:00
|
|
|
adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
|
2007-07-16 00:03:27 +08:00
|
|
|
(REG_MASTER_CTRL + 2));
|
2007-02-08 23:42:37 +08:00
|
|
|
|
|
|
|
/* set default ring resource counts */
|
|
|
|
adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
|
|
|
|
adapter->tpd_ring.count = ATL1_DEFAULT_TPD;
|
|
|
|
|
|
|
|
adapter->mii.dev = netdev;
|
|
|
|
adapter->mii.mdio_read = mdio_read;
|
|
|
|
adapter->mii.mdio_write = mdio_write;
|
|
|
|
adapter->mii.phy_id_mask = 0x1f;
|
|
|
|
adapter->mii.reg_num_mask = 0x1f;
|
|
|
|
|
2008-11-20 14:14:17 +08:00
|
|
|
netdev->netdev_ops = &atl1_netdev_ops;
|
2007-02-08 23:42:37 +08:00
|
|
|
netdev->watchdog_timeo = 5 * HZ;
|
2022-09-27 21:27:53 +08:00
|
|
|
netif_napi_add(netdev, &adapter->napi, atl1_rings_clean);
|
2007-06-02 00:44:00 +08:00
|
|
|
|
2007-02-08 23:42:37 +08:00
|
|
|
netdev->ethtool_ops = &atl1_ethtool_ops;
|
|
|
|
adapter->bd_number = cards_found;
|
|
|
|
|
|
|
|
/* setup the private structure */
|
|
|
|
err = atl1_sw_init(adapter);
|
|
|
|
if (err)
|
|
|
|
goto err_common;
|
|
|
|
|
|
|
|
netdev->features = NETIF_F_HW_CSUM;
|
|
|
|
netdev->features |= NETIF_F_SG;
|
2013-04-19 10:04:27 +08:00
|
|
|
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2011-07-20 12:54:33 +08:00
|
|
|
netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO |
|
2013-04-19 10:04:27 +08:00
|
|
|
NETIF_F_HW_VLAN_CTAG_RX;
|
2011-04-07 15:32:18 +08:00
|
|
|
|
|
|
|
/* is this valid? see atl1_setup_mac_ctrl() */
|
|
|
|
netdev->features |= NETIF_F_RXCSUM;
|
|
|
|
|
2016-10-18 03:54:03 +08:00
|
|
|
/* MTU range: 42 - 10218 */
|
|
|
|
netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN);
|
|
|
|
netdev->max_mtu = MAX_JUMBO_FRAME_SIZE -
|
|
|
|
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
|
|
|
|
|
2007-02-08 23:42:37 +08:00
|
|
|
/*
|
|
|
|
* patch for some L1 of old version,
|
|
|
|
* the final version of L1 may not need these
|
|
|
|
* patches
|
|
|
|
*/
|
|
|
|
/* atl1_pcie_patch(adapter); */
|
|
|
|
|
|
|
|
/* really reset GPHY core */
|
2008-02-03 09:50:12 +08:00
|
|
|
iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* reset the controller to
|
|
|
|
* put the device in a known good starting state
|
|
|
|
*/
|
|
|
|
if (atl1_reset_hw(&adapter->hw)) {
|
|
|
|
err = -EIO;
|
|
|
|
goto err_common;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* copy the MAC address out of the EEPROM */
|
2012-02-17 13:43:30 +08:00
|
|
|
if (atl1_read_mac_addr(&adapter->hw)) {
|
|
|
|
/* mark random mac */
|
2013-01-01 11:30:15 +08:00
|
|
|
netdev->addr_assign_type = NET_ADDR_RANDOM;
|
2012-02-17 13:43:30 +08:00
|
|
|
}
|
2021-10-05 00:05:21 +08:00
|
|
|
eth_hw_addr_set(netdev, adapter->hw.mac_addr);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
|
|
|
if (!is_valid_ether_addr(netdev->dev_addr)) {
|
|
|
|
err = -EIO;
|
|
|
|
goto err_common;
|
|
|
|
}
|
|
|
|
|
|
|
|
atl1_check_options(adapter);
|
|
|
|
|
|
|
|
/* pre-init the MAC, and setup link */
|
|
|
|
err = atl1_init_hw(&adapter->hw);
|
|
|
|
if (err) {
|
|
|
|
err = -EIO;
|
|
|
|
goto err_common;
|
|
|
|
}
|
|
|
|
|
|
|
|
atl1_pcie_patch(adapter);
|
|
|
|
/* assume we have no link for now */
|
|
|
|
netif_carrier_off(netdev);
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 05:43:17 +08:00
|
|
|
timer_setup(&adapter->phy_config_timer, atl1_phy_config, 0);
|
2007-02-08 23:42:37 +08:00
|
|
|
adapter->phy_timer_pending = false;
|
|
|
|
|
2012-04-11 14:15:03 +08:00
|
|
|
INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
|
2007-02-08 23:42:37 +08:00
|
|
|
|
|
|
|
err = register_netdev(netdev);
|
|
|
|
if (err)
|
|
|
|
goto err_common;
|
|
|
|
|
|
|
|
cards_found++;
|
|
|
|
atl1_via_workaround(adapter);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_common:
|
|
|
|
pci_iounmap(pdev, adapter->hw.hw_addr);
|
|
|
|
err_pci_iomap:
|
|
|
|
free_netdev(netdev);
|
|
|
|
err_alloc_etherdev:
|
|
|
|
pci_release_regions(pdev);
|
|
|
|
err_dma:
|
|
|
|
err_request_regions:
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2012-07-10 18:56:00 +08:00
|
|
|
/**
|
2007-02-08 23:42:37 +08:00
|
|
|
* atl1_remove - Device Removal Routine
|
|
|
|
* @pdev: PCI device information struct
|
|
|
|
*
|
|
|
|
* atl1_remove is called by the PCI subsystem to alert the driver
|
|
|
|
* that it should release a PCI device. The could be caused by a
|
|
|
|
* Hot-Plug event, or because the driver is going to be removed from
|
|
|
|
* memory.
|
|
|
|
*/
|
2012-12-03 22:23:56 +08:00
|
|
|
static void atl1_remove(struct pci_dev *pdev)
|
2007-02-08 23:42:37 +08:00
|
|
|
{
|
|
|
|
struct net_device *netdev = pci_get_drvdata(pdev);
|
|
|
|
struct atl1_adapter *adapter;
|
|
|
|
/* Device not available. Return. */
|
|
|
|
if (!netdev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
adapter = netdev_priv(netdev);
|
2007-03-29 08:51:51 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
/*
|
|
|
|
* Some atl1 boards lack persistent storage for their MAC, and get it
|
2007-03-29 08:51:51 +08:00
|
|
|
* from the BIOS during POST. If we've been messing with the MAC
|
|
|
|
* address, we need to save the permanent one.
|
|
|
|
*/
|
2013-12-26 19:40:27 +08:00
|
|
|
if (!ether_addr_equal_unaligned(adapter->hw.mac_addr,
|
|
|
|
adapter->hw.perm_mac_addr)) {
|
2007-07-16 00:03:27 +08:00
|
|
|
memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
|
|
|
|
ETH_ALEN);
|
2007-03-29 08:51:51 +08:00
|
|
|
atl1_set_mac_addr(&adapter->hw);
|
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
|
2007-02-08 23:42:37 +08:00
|
|
|
unregister_netdev(netdev);
|
|
|
|
pci_iounmap(pdev, adapter->hw.hw_addr);
|
|
|
|
pci_release_regions(pdev);
|
|
|
|
free_netdev(netdev);
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pci_driver atl1_driver = {
|
2008-02-03 09:50:12 +08:00
|
|
|
.name = ATLX_DRIVER_NAME,
|
2007-02-08 23:42:37 +08:00
|
|
|
.id_table = atl1_pci_tbl,
|
|
|
|
.probe = atl1_probe,
|
2012-12-03 22:23:56 +08:00
|
|
|
.remove = atl1_remove,
|
2011-02-10 14:55:19 +08:00
|
|
|
.shutdown = atl1_shutdown,
|
2013-04-16 17:28:28 +08:00
|
|
|
.driver.pm = &atl1_pm_ops,
|
2007-02-08 23:42:37 +08:00
|
|
|
};
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
struct atl1_stats {
|
|
|
|
char stat_string[ETH_GSTRING_LEN];
|
|
|
|
int sizeof_stat;
|
|
|
|
int stat_offset;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define ATL1_STAT(m) \
|
|
|
|
sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m)
|
|
|
|
|
|
|
|
static struct atl1_stats atl1_gstrings_stats[] = {
|
|
|
|
{"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
|
|
|
|
{"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
|
|
|
|
{"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
|
|
|
|
{"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
|
|
|
|
{"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
|
|
|
|
{"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
|
|
|
|
{"multicast", ATL1_STAT(soft_stats.multicast)},
|
|
|
|
{"collisions", ATL1_STAT(soft_stats.collisions)},
|
|
|
|
{"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
|
|
|
|
{"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
|
|
|
|
{"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
|
|
|
|
{"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
|
|
|
|
{"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
|
|
|
|
{"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
|
|
|
|
{"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
|
|
|
|
{"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
|
|
|
|
{"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
|
|
|
|
{"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
|
|
|
|
{"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
|
|
|
|
{"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
|
|
|
|
{"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
|
|
|
|
{"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
|
|
|
|
{"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
|
2019-04-23 22:30:07 +08:00
|
|
|
{"tx_underrun", ATL1_STAT(soft_stats.tx_underrun)},
|
2008-02-03 09:50:12 +08:00
|
|
|
{"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
|
|
|
|
{"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
|
|
|
|
{"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
|
|
|
|
{"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
|
|
|
|
{"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
|
|
|
|
};
|
|
|
|
|
|
|
|
static void atl1_get_ethtool_stats(struct net_device *netdev,
|
|
|
|
struct ethtool_stats *stats, u64 *data)
|
2008-02-03 09:50:04 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
2008-02-03 09:50:04 +08:00
|
|
|
int i;
|
2008-02-03 09:50:12 +08:00
|
|
|
char *p;
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
|
|
|
|
p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
|
|
|
|
data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
|
|
|
|
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
|
2008-02-03 09:50:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static int atl1_get_sset_count(struct net_device *netdev, int sset)
|
2008-02-03 09:50:04 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
switch (sset) {
|
|
|
|
case ETH_SS_STATS:
|
|
|
|
return ARRAY_SIZE(atl1_gstrings_stats);
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
2008-02-03 09:50:04 +08:00
|
|
|
}
|
|
|
|
|
2016-11-14 01:35:14 +08:00
|
|
|
static int atl1_get_link_ksettings(struct net_device *netdev,
|
|
|
|
struct ethtool_link_ksettings *cmd)
|
2008-02-03 09:50:04 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
struct atl1_hw *hw = &adapter->hw;
|
2016-11-14 01:35:14 +08:00
|
|
|
u32 supported, advertising;
|
2008-02-03 09:50:12 +08:00
|
|
|
|
2016-11-14 01:35:14 +08:00
|
|
|
supported = (SUPPORTED_10baseT_Half |
|
2008-02-03 09:50:12 +08:00
|
|
|
SUPPORTED_10baseT_Full |
|
|
|
|
SUPPORTED_100baseT_Half |
|
|
|
|
SUPPORTED_100baseT_Full |
|
|
|
|
SUPPORTED_1000baseT_Full |
|
|
|
|
SUPPORTED_Autoneg | SUPPORTED_TP);
|
2016-11-14 01:35:14 +08:00
|
|
|
advertising = ADVERTISED_TP;
|
2008-02-03 09:50:12 +08:00
|
|
|
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
|
|
|
|
hw->media_type == MEDIA_TYPE_1000M_FULL) {
|
2016-11-14 01:35:14 +08:00
|
|
|
advertising |= ADVERTISED_Autoneg;
|
2008-02-03 09:50:12 +08:00
|
|
|
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
|
2016-11-14 01:35:14 +08:00
|
|
|
advertising |= ADVERTISED_Autoneg;
|
|
|
|
advertising |=
|
2008-02-03 09:50:12 +08:00
|
|
|
(ADVERTISED_10baseT_Half |
|
|
|
|
ADVERTISED_10baseT_Full |
|
|
|
|
ADVERTISED_100baseT_Half |
|
|
|
|
ADVERTISED_100baseT_Full |
|
|
|
|
ADVERTISED_1000baseT_Full);
|
|
|
|
} else
|
2016-11-14 01:35:14 +08:00
|
|
|
advertising |= (ADVERTISED_1000baseT_Full);
|
2008-02-03 09:50:12 +08:00
|
|
|
}
|
2016-11-14 01:35:14 +08:00
|
|
|
cmd->base.port = PORT_TP;
|
|
|
|
cmd->base.phy_address = 0;
|
2008-02-03 09:50:12 +08:00
|
|
|
|
|
|
|
if (netif_carrier_ok(adapter->netdev)) {
|
|
|
|
u16 link_speed, link_duplex;
|
|
|
|
atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
|
2016-11-14 01:35:14 +08:00
|
|
|
cmd->base.speed = link_speed;
|
2008-02-03 09:50:12 +08:00
|
|
|
if (link_duplex == FULL_DUPLEX)
|
2016-11-14 01:35:14 +08:00
|
|
|
cmd->base.duplex = DUPLEX_FULL;
|
2008-02-03 09:50:12 +08:00
|
|
|
else
|
2016-11-14 01:35:14 +08:00
|
|
|
cmd->base.duplex = DUPLEX_HALF;
|
2008-02-03 09:50:12 +08:00
|
|
|
} else {
|
2016-11-14 01:35:14 +08:00
|
|
|
cmd->base.speed = SPEED_UNKNOWN;
|
|
|
|
cmd->base.duplex = DUPLEX_UNKNOWN;
|
2008-02-03 09:50:12 +08:00
|
|
|
}
|
|
|
|
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
|
|
|
|
hw->media_type == MEDIA_TYPE_1000M_FULL)
|
2016-11-14 01:35:14 +08:00
|
|
|
cmd->base.autoneg = AUTONEG_ENABLE;
|
2008-02-03 09:50:12 +08:00
|
|
|
else
|
2016-11-14 01:35:14 +08:00
|
|
|
cmd->base.autoneg = AUTONEG_DISABLE;
|
|
|
|
|
|
|
|
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
|
|
|
|
supported);
|
|
|
|
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
|
|
|
|
advertising);
|
2008-02-03 09:50:04 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-14 01:35:14 +08:00
|
|
|
static int atl1_set_link_ksettings(struct net_device *netdev,
|
|
|
|
const struct ethtool_link_ksettings *cmd)
|
2008-02-03 09:50:04 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
struct atl1_hw *hw = &adapter->hw;
|
2008-02-03 09:50:04 +08:00
|
|
|
u16 phy_data;
|
2008-02-03 09:50:12 +08:00
|
|
|
int ret_val = 0;
|
|
|
|
u16 old_media_type = hw->media_type;
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (netif_running(adapter->netdev)) {
|
|
|
|
if (netif_msg_link(adapter))
|
|
|
|
dev_dbg(&adapter->pdev->dev,
|
|
|
|
"ethtool shutting down adapter\n");
|
|
|
|
atl1_down(adapter);
|
|
|
|
}
|
|
|
|
|
2016-11-14 01:35:14 +08:00
|
|
|
if (cmd->base.autoneg == AUTONEG_ENABLE)
|
2008-02-03 09:50:12 +08:00
|
|
|
hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
|
|
|
|
else {
|
2016-11-14 01:35:14 +08:00
|
|
|
u32 speed = cmd->base.speed;
|
2011-04-28 02:32:39 +08:00
|
|
|
if (speed == SPEED_1000) {
|
2016-11-14 01:35:14 +08:00
|
|
|
if (cmd->base.duplex != DUPLEX_FULL) {
|
2008-02-03 09:50:12 +08:00
|
|
|
if (netif_msg_link(adapter))
|
|
|
|
dev_warn(&adapter->pdev->dev,
|
|
|
|
"1000M half is invalid\n");
|
|
|
|
ret_val = -EINVAL;
|
|
|
|
goto exit_sset;
|
|
|
|
}
|
|
|
|
hw->media_type = MEDIA_TYPE_1000M_FULL;
|
2011-04-28 02:32:39 +08:00
|
|
|
} else if (speed == SPEED_100) {
|
2016-11-14 01:35:14 +08:00
|
|
|
if (cmd->base.duplex == DUPLEX_FULL)
|
2008-02-03 09:50:12 +08:00
|
|
|
hw->media_type = MEDIA_TYPE_100M_FULL;
|
|
|
|
else
|
|
|
|
hw->media_type = MEDIA_TYPE_100M_HALF;
|
|
|
|
} else {
|
2016-11-14 01:35:14 +08:00
|
|
|
if (cmd->base.duplex == DUPLEX_FULL)
|
2008-02-03 09:50:12 +08:00
|
|
|
hw->media_type = MEDIA_TYPE_10M_FULL;
|
|
|
|
else
|
|
|
|
hw->media_type = MEDIA_TYPE_10M_HALF;
|
|
|
|
}
|
|
|
|
}
|
2018-09-20 10:41:30 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (atl1_phy_setup_autoneg_adv(hw)) {
|
|
|
|
ret_val = -EINVAL;
|
|
|
|
if (netif_msg_link(adapter))
|
|
|
|
dev_warn(&adapter->pdev->dev,
|
|
|
|
"invalid ethtool speed/duplex setting\n");
|
|
|
|
goto exit_sset;
|
|
|
|
}
|
2008-02-03 09:50:04 +08:00
|
|
|
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
|
|
|
|
hw->media_type == MEDIA_TYPE_1000M_FULL)
|
|
|
|
phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
|
|
|
|
else {
|
|
|
|
switch (hw->media_type) {
|
|
|
|
case MEDIA_TYPE_100M_FULL:
|
|
|
|
phy_data =
|
|
|
|
MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
|
|
|
|
MII_CR_RESET;
|
|
|
|
break;
|
|
|
|
case MEDIA_TYPE_100M_HALF:
|
|
|
|
phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
|
|
|
|
break;
|
|
|
|
case MEDIA_TYPE_10M_FULL:
|
|
|
|
phy_data =
|
|
|
|
MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* MEDIA_TYPE_10M_HALF: */
|
|
|
|
phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2008-02-03 09:50:12 +08:00
|
|
|
atl1_write_phy_reg(hw, MII_BMCR, phy_data);
|
|
|
|
exit_sset:
|
|
|
|
if (ret_val)
|
|
|
|
hw->media_type = old_media_type;
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (netif_running(adapter->netdev)) {
|
|
|
|
if (netif_msg_link(adapter))
|
|
|
|
dev_dbg(&adapter->pdev->dev,
|
|
|
|
"ethtool starting adapter\n");
|
|
|
|
atl1_up(adapter);
|
|
|
|
} else if (!ret_val) {
|
|
|
|
if (netif_msg_link(adapter))
|
|
|
|
dev_dbg(&adapter->pdev->dev,
|
|
|
|
"ethtool resetting adapter\n");
|
|
|
|
atl1_reset(adapter);
|
|
|
|
}
|
|
|
|
return ret_val;
|
|
|
|
}
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static void atl1_get_drvinfo(struct net_device *netdev,
|
|
|
|
struct ethtool_drvinfo *drvinfo)
|
|
|
|
{
|
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2022-08-31 04:14:54 +08:00
|
|
|
strscpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
|
|
|
|
strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
|
2008-02-03 09:50:12 +08:00
|
|
|
sizeof(drvinfo->bus_info));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void atl1_get_wol(struct net_device *netdev,
|
|
|
|
struct ethtool_wolinfo *wol)
|
|
|
|
{
|
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
2008-11-10 05:05:30 +08:00
|
|
|
wol->supported = WAKE_MAGIC;
|
2008-02-03 09:50:12 +08:00
|
|
|
wol->wolopts = 0;
|
|
|
|
if (adapter->wol & ATLX_WUFC_MAG)
|
|
|
|
wol->wolopts |= WAKE_MAGIC;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atl1_set_wol(struct net_device *netdev,
|
|
|
|
struct ethtool_wolinfo *wol)
|
|
|
|
{
|
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
2008-11-10 05:05:30 +08:00
|
|
|
if (wol->wolopts & (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
|
|
|
|
WAKE_ARP | WAKE_MAGICSECURE))
|
2008-02-03 09:50:12 +08:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
adapter->wol = 0;
|
|
|
|
if (wol->wolopts & WAKE_MAGIC)
|
|
|
|
adapter->wol |= ATLX_WUFC_MAG;
|
2011-02-10 14:55:19 +08:00
|
|
|
|
|
|
|
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
|
|
|
|
|
2008-02-03 09:50:04 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static u32 atl1_get_msglevel(struct net_device *netdev)
|
2008-02-03 09:50:04 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
return adapter->msg_enable;
|
|
|
|
}
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static void atl1_set_msglevel(struct net_device *netdev, u32 value)
|
|
|
|
{
|
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
adapter->msg_enable = value;
|
|
|
|
}
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static int atl1_get_regs_len(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
return ATL1_REG_COUNT * sizeof(u32);
|
|
|
|
}
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
|
|
|
|
void *p)
|
|
|
|
{
|
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
struct atl1_hw *hw = &adapter->hw;
|
|
|
|
unsigned int i;
|
|
|
|
u32 *regbuf = p;
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
for (i = 0; i < ATL1_REG_COUNT; i++) {
|
|
|
|
/*
|
|
|
|
* This switch statement avoids reserved regions
|
|
|
|
* of register space.
|
|
|
|
*/
|
|
|
|
switch (i) {
|
|
|
|
case 6 ... 9:
|
|
|
|
case 14:
|
|
|
|
case 29 ... 31:
|
|
|
|
case 34 ... 63:
|
|
|
|
case 75 ... 127:
|
|
|
|
case 136 ... 1023:
|
|
|
|
case 1027 ... 1087:
|
|
|
|
case 1091 ... 1151:
|
|
|
|
case 1194 ... 1195:
|
|
|
|
case 1200 ... 1201:
|
|
|
|
case 1206 ... 1213:
|
|
|
|
case 1216 ... 1279:
|
|
|
|
case 1290 ... 1311:
|
|
|
|
case 1323 ... 1343:
|
|
|
|
case 1358 ... 1359:
|
|
|
|
case 1368 ... 1375:
|
|
|
|
case 1378 ... 1383:
|
|
|
|
case 1388 ... 1391:
|
|
|
|
case 1393 ... 1395:
|
|
|
|
case 1402 ... 1403:
|
|
|
|
case 1410 ... 1471:
|
|
|
|
case 1522 ... 1535:
|
|
|
|
/* reserved region; don't read it */
|
|
|
|
regbuf[i] = 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* unreserved region */
|
|
|
|
regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static void atl1_get_ringparam(struct net_device *netdev,
|
2021-11-18 20:12:43 +08:00
|
|
|
struct ethtool_ringparam *ring,
|
|
|
|
struct kernel_ethtool_ringparam *kernel_ring,
|
|
|
|
struct netlink_ext_ack *extack)
|
2008-02-03 09:50:12 +08:00
|
|
|
{
|
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
|
|
|
|
struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
ring->rx_max_pending = ATL1_MAX_RFD;
|
|
|
|
ring->tx_max_pending = ATL1_MAX_TPD;
|
|
|
|
ring->rx_pending = rxdr->count;
|
|
|
|
ring->tx_pending = txdr->count;
|
|
|
|
}
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static int atl1_set_ringparam(struct net_device *netdev,
|
2021-11-18 20:12:43 +08:00
|
|
|
struct ethtool_ringparam *ring,
|
|
|
|
struct kernel_ethtool_ringparam *kernel_ring,
|
|
|
|
struct netlink_ext_ack *extack)
|
2008-02-03 09:50:12 +08:00
|
|
|
{
|
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
|
|
|
|
struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
|
|
|
|
struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
struct atl1_tpd_ring tpd_old, tpd_new;
|
|
|
|
struct atl1_rfd_ring rfd_old, rfd_new;
|
|
|
|
struct atl1_rrd_ring rrd_old, rrd_new;
|
|
|
|
struct atl1_ring_header rhdr_old, rhdr_new;
|
2011-01-01 13:02:12 +08:00
|
|
|
struct atl1_smb smb;
|
|
|
|
struct atl1_cmb cmb;
|
2008-02-03 09:50:12 +08:00
|
|
|
int err;
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
tpd_old = adapter->tpd_ring;
|
|
|
|
rfd_old = adapter->rfd_ring;
|
|
|
|
rrd_old = adapter->rrd_ring;
|
|
|
|
rhdr_old = adapter->ring_header;
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (netif_running(adapter->netdev))
|
|
|
|
atl1_down(adapter);
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
|
|
|
|
rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
|
|
|
|
rfdr->count;
|
|
|
|
rfdr->count = (rfdr->count + 3) & ~3;
|
|
|
|
rrdr->count = rfdr->count;
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
|
|
|
|
tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
|
|
|
|
tpdr->count;
|
|
|
|
tpdr->count = (tpdr->count + 3) & ~3;
|
|
|
|
|
|
|
|
if (netif_running(adapter->netdev)) {
|
|
|
|
/* try to get new resources before deleting old */
|
|
|
|
err = atl1_setup_ring_resources(adapter);
|
|
|
|
if (err)
|
|
|
|
goto err_setup_ring;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* save the new, restore the old in order to free it,
|
|
|
|
* then restore the new back again
|
|
|
|
*/
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
rfd_new = adapter->rfd_ring;
|
|
|
|
rrd_new = adapter->rrd_ring;
|
|
|
|
tpd_new = adapter->tpd_ring;
|
|
|
|
rhdr_new = adapter->ring_header;
|
|
|
|
adapter->rfd_ring = rfd_old;
|
|
|
|
adapter->rrd_ring = rrd_old;
|
|
|
|
adapter->tpd_ring = tpd_old;
|
|
|
|
adapter->ring_header = rhdr_old;
|
2011-01-01 13:02:12 +08:00
|
|
|
/*
|
|
|
|
* Save SMB and CMB, since atl1_free_ring_resources
|
|
|
|
* will clear them.
|
|
|
|
*/
|
|
|
|
smb = adapter->smb;
|
|
|
|
cmb = adapter->cmb;
|
2008-02-03 09:50:12 +08:00
|
|
|
atl1_free_ring_resources(adapter);
|
|
|
|
adapter->rfd_ring = rfd_new;
|
|
|
|
adapter->rrd_ring = rrd_new;
|
|
|
|
adapter->tpd_ring = tpd_new;
|
|
|
|
adapter->ring_header = rhdr_new;
|
2011-01-01 13:02:12 +08:00
|
|
|
adapter->smb = smb;
|
|
|
|
adapter->cmb = cmb;
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
err = atl1_up(adapter);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
2008-02-03 09:50:04 +08:00
|
|
|
return 0;
|
2008-02-03 09:50:12 +08:00
|
|
|
|
|
|
|
err_setup_ring:
|
|
|
|
adapter->rfd_ring = rfd_old;
|
|
|
|
adapter->rrd_ring = rrd_old;
|
|
|
|
adapter->tpd_ring = tpd_old;
|
|
|
|
adapter->ring_header = rhdr_old;
|
|
|
|
atl1_up(adapter);
|
|
|
|
return err;
|
2008-02-03 09:50:04 +08:00
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static void atl1_get_pauseparam(struct net_device *netdev,
|
|
|
|
struct ethtool_pauseparam *epause)
|
2008-02-03 09:50:04 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
struct atl1_hw *hw = &adapter->hw;
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
|
|
|
|
hw->media_type == MEDIA_TYPE_1000M_FULL) {
|
|
|
|
epause->autoneg = AUTONEG_ENABLE;
|
|
|
|
} else {
|
|
|
|
epause->autoneg = AUTONEG_DISABLE;
|
2008-02-03 09:50:04 +08:00
|
|
|
}
|
2008-02-03 09:50:12 +08:00
|
|
|
epause->rx_pause = 1;
|
|
|
|
epause->tx_pause = 1;
|
2008-02-03 09:50:04 +08:00
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static int atl1_set_pauseparam(struct net_device *netdev,
|
|
|
|
struct ethtool_pauseparam *epause)
|
2008-02-03 09:50:04 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
struct atl1_hw *hw = &adapter->hw;
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
|
|
|
|
hw->media_type == MEDIA_TYPE_1000M_FULL) {
|
|
|
|
epause->autoneg = AUTONEG_ENABLE;
|
|
|
|
} else {
|
|
|
|
epause->autoneg = AUTONEG_DISABLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
epause->rx_pause = 1;
|
|
|
|
epause->tx_pause = 1;
|
|
|
|
|
|
|
|
return 0;
|
2008-02-03 09:50:04 +08:00
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static void atl1_get_strings(struct net_device *netdev, u32 stringset,
|
|
|
|
u8 *data)
|
|
|
|
{
|
|
|
|
u8 *p = data;
|
|
|
|
int i;
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
switch (stringset) {
|
|
|
|
case ETH_SS_STATS:
|
|
|
|
for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
|
|
|
|
memcpy(p, atl1_gstrings_stats[i].stat_string,
|
|
|
|
ETH_GSTRING_LEN);
|
|
|
|
p += ETH_GSTRING_LEN;
|
|
|
|
}
|
|
|
|
break;
|
2008-02-03 09:50:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
static int atl1_nway_reset(struct net_device *netdev)
|
2008-02-03 09:50:04 +08:00
|
|
|
{
|
2008-02-03 09:50:12 +08:00
|
|
|
struct atl1_adapter *adapter = netdev_priv(netdev);
|
|
|
|
struct atl1_hw *hw = &adapter->hw;
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (netif_running(netdev)) {
|
|
|
|
u16 phy_data;
|
|
|
|
atl1_down(adapter);
|
2008-02-03 09:50:04 +08:00
|
|
|
|
2008-02-03 09:50:12 +08:00
|
|
|
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
|
|
|
|
hw->media_type == MEDIA_TYPE_1000M_FULL) {
|
|
|
|
phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
|
|
|
|
} else {
|
|
|
|
switch (hw->media_type) {
|
|
|
|
case MEDIA_TYPE_100M_FULL:
|
|
|
|
phy_data = MII_CR_FULL_DUPLEX |
|
|
|
|
MII_CR_SPEED_100 | MII_CR_RESET;
|
|
|
|
break;
|
|
|
|
case MEDIA_TYPE_100M_HALF:
|
|
|
|
phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
|
|
|
|
break;
|
|
|
|
case MEDIA_TYPE_10M_FULL:
|
|
|
|
phy_data = MII_CR_FULL_DUPLEX |
|
|
|
|
MII_CR_SPEED_10 | MII_CR_RESET;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* MEDIA_TYPE_10M_HALF */
|
|
|
|
phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
atl1_write_phy_reg(hw, MII_BMCR, phy_data);
|
|
|
|
atl1_up(adapter);
|
2008-02-03 09:50:04 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-10-21 15:50:50 +08:00
|
|
|
static const struct ethtool_ops atl1_ethtool_ops = {
|
2008-02-03 09:50:12 +08:00
|
|
|
.get_drvinfo = atl1_get_drvinfo,
|
|
|
|
.get_wol = atl1_get_wol,
|
|
|
|
.set_wol = atl1_set_wol,
|
|
|
|
.get_msglevel = atl1_get_msglevel,
|
|
|
|
.set_msglevel = atl1_set_msglevel,
|
|
|
|
.get_regs_len = atl1_get_regs_len,
|
|
|
|
.get_regs = atl1_get_regs,
|
|
|
|
.get_ringparam = atl1_get_ringparam,
|
|
|
|
.set_ringparam = atl1_set_ringparam,
|
|
|
|
.get_pauseparam = atl1_get_pauseparam,
|
|
|
|
.set_pauseparam = atl1_set_pauseparam,
|
|
|
|
.get_link = ethtool_op_get_link,
|
|
|
|
.get_strings = atl1_get_strings,
|
|
|
|
.nway_reset = atl1_nway_reset,
|
|
|
|
.get_ethtool_stats = atl1_get_ethtool_stats,
|
|
|
|
.get_sset_count = atl1_get_sset_count,
|
2016-11-14 01:35:14 +08:00
|
|
|
.get_link_ksettings = atl1_get_link_ksettings,
|
|
|
|
.set_link_ksettings = atl1_set_link_ksettings,
|
2008-02-03 09:50:12 +08:00
|
|
|
};
|
2013-05-21 20:42:09 +08:00
|
|
|
|
|
|
|
module_pci_driver(atl1_driver);
|