Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/sh_eth.c
This commit is contained in:
commit
cfadf853f6
|
@ -83,19 +83,19 @@ static __inline__ isdn_net_local * isdn_net_get_locked_lp(isdn_net_dev *nd)
|
|||
|
||||
spin_lock_irqsave(&nd->queue_lock, flags);
|
||||
lp = nd->queue; /* get lp on top of queue */
|
||||
spin_lock(&nd->queue->xmit_lock);
|
||||
while (isdn_net_lp_busy(nd->queue)) {
|
||||
spin_unlock(&nd->queue->xmit_lock);
|
||||
nd->queue = nd->queue->next;
|
||||
if (nd->queue == lp) { /* not found -- should never happen */
|
||||
lp = NULL;
|
||||
goto errout;
|
||||
}
|
||||
spin_lock(&nd->queue->xmit_lock);
|
||||
}
|
||||
lp = nd->queue;
|
||||
nd->queue = nd->queue->next;
|
||||
spin_unlock_irqrestore(&nd->queue_lock, flags);
|
||||
spin_lock(&lp->xmit_lock);
|
||||
local_bh_disable();
|
||||
return lp;
|
||||
errout:
|
||||
spin_unlock_irqrestore(&nd->queue_lock, flags);
|
||||
return lp;
|
||||
|
|
|
@ -1741,6 +1741,7 @@ config KS8851
|
|||
config KS8851_MLL
|
||||
tristate "Micrel KS8851 MLL"
|
||||
depends on HAS_IOMEM
|
||||
select MII
|
||||
help
|
||||
This platform driver is for Micrel KS8851 Address/data bus
|
||||
multiplexed network chip.
|
||||
|
@ -2482,6 +2483,8 @@ config S6GMAC
|
|||
To compile this driver as a module, choose M here. The module
|
||||
will be called s6gmac.
|
||||
|
||||
source "drivers/net/stmmac/Kconfig"
|
||||
|
||||
endif # NETDEV_1000
|
||||
|
||||
#
|
||||
|
@ -3232,7 +3235,7 @@ config VIRTIO_NET
|
|||
|
||||
config VMXNET3
|
||||
tristate "VMware VMXNET3 ethernet driver"
|
||||
depends on PCI && X86
|
||||
depends on PCI && X86 && INET
|
||||
help
|
||||
This driver supports VMware's vmxnet3 virtual ethernet NIC.
|
||||
To compile this driver as a module, choose M here: the
|
||||
|
|
|
@ -100,6 +100,7 @@ obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
|
|||
obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
|
||||
obj-$(CONFIG_RIONET) += rionet.o
|
||||
obj-$(CONFIG_SH_ETH) += sh_eth.o
|
||||
obj-$(CONFIG_STMMAC_ETH) += stmmac/
|
||||
|
||||
#
|
||||
# end link order section
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
*
|
||||
*
|
||||
*/
|
||||
#include <linux/capability.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
|
|
|
@ -243,15 +243,26 @@ static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
|
|||
|
||||
int be_cmd_POST(struct be_adapter *adapter)
|
||||
{
|
||||
u16 stage, error;
|
||||
u16 stage;
|
||||
int status, timeout = 0;
|
||||
|
||||
error = be_POST_stage_get(adapter, &stage);
|
||||
if (error || stage != POST_STAGE_ARMFW_RDY) {
|
||||
dev_err(&adapter->pdev->dev, "POST failed.\n");
|
||||
return -1;
|
||||
}
|
||||
do {
|
||||
status = be_POST_stage_get(adapter, &stage);
|
||||
if (status) {
|
||||
dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
|
||||
stage);
|
||||
return -1;
|
||||
} else if (stage != POST_STAGE_ARMFW_RDY) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout(2 * HZ);
|
||||
timeout += 2;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
} while (timeout < 20);
|
||||
|
||||
return 0;
|
||||
dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline void *embedded_payload(struct be_mcc_wrb *wrb)
|
||||
|
@ -729,8 +740,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
|
|||
/* Create an rx filtering policy configuration on an i/f
|
||||
* Uses mbox
|
||||
*/
|
||||
int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
|
||||
bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
|
||||
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
|
||||
u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
|
||||
{
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_if_create *req;
|
||||
|
@ -746,8 +757,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
|
|||
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
|
||||
OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
|
||||
|
||||
req->capability_flags = cpu_to_le32(flags);
|
||||
req->enable_flags = cpu_to_le32(flags);
|
||||
req->capability_flags = cpu_to_le32(cap_flags);
|
||||
req->enable_flags = cpu_to_le32(en_flags);
|
||||
req->pmac_invalid = pmac_invalid;
|
||||
if (!pmac_invalid)
|
||||
memcpy(req->mac_addr, mac, ETH_ALEN);
|
||||
|
|
|
@ -753,8 +753,9 @@ extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
|
|||
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
|
||||
u32 if_id, u32 *pmac_id);
|
||||
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
|
||||
extern int be_cmd_if_create(struct be_adapter *adapter, u32 if_flags, u8 *mac,
|
||||
bool pmac_invalid, u32 *if_handle, u32 *pmac_id);
|
||||
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
|
||||
u32 en_flags, u8 *mac, bool pmac_invalid,
|
||||
u32 *if_handle, u32 *pmac_id);
|
||||
extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
|
||||
extern int be_cmd_eq_create(struct be_adapter *adapter,
|
||||
struct be_queue_info *eq, int eq_delay);
|
||||
|
|
|
@ -1616,19 +1616,22 @@ static int be_open(struct net_device *netdev)
|
|||
static int be_setup(struct be_adapter *adapter)
|
||||
{
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
u32 if_flags;
|
||||
u32 cap_flags, en_flags;
|
||||
int status;
|
||||
|
||||
if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS |
|
||||
BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED |
|
||||
BE_IF_FLAGS_PASS_L3L4_ERRORS;
|
||||
status = be_cmd_if_create(adapter, if_flags, netdev->dev_addr,
|
||||
false/* pmac_invalid */, &adapter->if_handle,
|
||||
&adapter->pmac_id);
|
||||
cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
|
||||
BE_IF_FLAGS_MCAST_PROMISCUOUS |
|
||||
BE_IF_FLAGS_PROMISCUOUS |
|
||||
BE_IF_FLAGS_PASS_L3L4_ERRORS;
|
||||
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
|
||||
BE_IF_FLAGS_PASS_L3L4_ERRORS;
|
||||
|
||||
status = be_cmd_if_create(adapter, cap_flags, en_flags,
|
||||
netdev->dev_addr, false/* pmac_invalid */,
|
||||
&adapter->if_handle, &adapter->pmac_id);
|
||||
if (status != 0)
|
||||
goto do_none;
|
||||
|
||||
|
||||
status = be_tx_queues_create(adapter);
|
||||
if (status != 0)
|
||||
goto if_destroy;
|
||||
|
@ -2051,6 +2054,10 @@ static int be_hw_up(struct be_adapter *adapter)
|
|||
if (status)
|
||||
return status;
|
||||
|
||||
status = be_cmd_reset_function(adapter);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
|
||||
if (status)
|
||||
return status;
|
||||
|
@ -2104,10 +2111,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
|
|||
if (status)
|
||||
goto free_netdev;
|
||||
|
||||
status = be_cmd_reset_function(adapter);
|
||||
if (status)
|
||||
goto ctrl_clean;
|
||||
|
||||
status = be_stats_init(adapter);
|
||||
if (status)
|
||||
goto ctrl_clean;
|
||||
|
|
|
@ -3704,10 +3704,10 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb,
|
|||
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
|
||||
(data->h_dest[5] ^ bond_dev->dev_addr[5])) % count;
|
||||
(data->h_dest[5] ^ data->h_source[5])) % count;
|
||||
}
|
||||
|
||||
return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count;
|
||||
return (data->h_dest[5] ^ data->h_source[5]) % count;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3734,7 +3734,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb,
|
|||
|
||||
}
|
||||
|
||||
return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count;
|
||||
return (data->h_dest[5] ^ data->h_source[5]) % count;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3745,7 +3745,7 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb,
|
|||
{
|
||||
struct ethhdr *data = (struct ethhdr *)skb->data;
|
||||
|
||||
return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count;
|
||||
return (data->h_dest[5] ^ data->h_source[5]) % count;
|
||||
}
|
||||
|
||||
/*-------------------------- Device entry points ----------------------------*/
|
||||
|
|
|
@ -213,6 +213,7 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = {
|
|||
{.compatible = "nxp,sja1000"},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
|
||||
|
||||
static struct of_platform_driver sja1000_ofp_driver = {
|
||||
.owner = THIS_MODULE,
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
#define DM9000_RCSR 0x32
|
||||
|
||||
#define CHIPR_DM9000A 0x19
|
||||
#define CHIPR_DM9000B 0x1B
|
||||
#define CHIPR_DM9000B 0x1A
|
||||
|
||||
#define DM9000_MRCMDX 0xF0
|
||||
#define DM9000_MRCMD 0xF2
|
||||
|
|
|
@ -518,9 +518,13 @@ extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
|
|||
extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
|
||||
extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
|
||||
extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
|
||||
u16 *data);
|
||||
extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
|
||||
extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
|
||||
extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
|
||||
u16 data);
|
||||
extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
|
||||
extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
|
||||
extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
|
||||
|
@ -537,7 +541,11 @@ extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
|
|||
extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
|
||||
extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
|
||||
u16 data);
|
||||
extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
|
||||
u16 *data);
|
||||
extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
|
||||
u32 usec_interval, bool *success);
|
||||
extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
|
||||
|
@ -545,7 +553,11 @@ extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
|
|||
extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
extern s32 e1000e_check_downshift(struct e1000_hw *hw);
|
||||
extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
|
||||
u16 *data);
|
||||
extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
|
||||
u16 data);
|
||||
extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
|
||||
extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
|
||||
extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
|
||||
|
|
|
@ -764,11 +764,13 @@ struct e1000_phy_operations {
|
|||
s32 (*get_cable_length)(struct e1000_hw *);
|
||||
s32 (*get_phy_info)(struct e1000_hw *);
|
||||
s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
|
||||
s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
|
||||
void (*release_phy)(struct e1000_hw *);
|
||||
s32 (*reset_phy)(struct e1000_hw *);
|
||||
s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
|
||||
s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
|
||||
s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
|
||||
s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
|
||||
s32 (*cfg_on_link_up)(struct e1000_hw *);
|
||||
};
|
||||
|
||||
|
|
|
@ -122,6 +122,13 @@
|
|||
|
||||
#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
|
||||
|
||||
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
|
||||
|
||||
/* OEM Bits Phy Register */
|
||||
#define HV_OEM_BITS PHY_REG(768, 25)
|
||||
#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
|
||||
#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
|
||||
|
||||
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
|
||||
/* Offset 04h HSFSTS */
|
||||
union ich8_hws_flash_status {
|
||||
|
@ -200,6 +207,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
|
|||
static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
|
||||
static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
|
||||
static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
|
||||
static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
|
||||
|
||||
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
|
||||
{
|
||||
|
@ -242,7 +250,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
|
|||
|
||||
phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
|
||||
phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
|
||||
phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
|
||||
phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
|
||||
phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
|
||||
phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
|
||||
phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
|
||||
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
|
||||
|
||||
phy->id = e1000_phy_unknown;
|
||||
|
@ -303,6 +315,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
|
|||
case IGP03E1000_E_PHY_ID:
|
||||
phy->type = e1000_phy_igp_3;
|
||||
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
|
||||
phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
|
||||
phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
|
||||
break;
|
||||
case IFE_E_PHY_ID:
|
||||
case IFE_PLUS_E_PHY_ID:
|
||||
|
@ -567,13 +581,40 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
|
|||
|
||||
static DEFINE_MUTEX(nvm_mutex);
|
||||
|
||||
/**
|
||||
* e1000_acquire_nvm_ich8lan - Acquire NVM mutex
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Acquires the mutex for performing NVM operations.
|
||||
**/
|
||||
static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
mutex_lock(&nvm_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_release_nvm_ich8lan - Release NVM mutex
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Releases the mutex used while performing NVM operations.
|
||||
**/
|
||||
static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
mutex_unlock(&nvm_mutex);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(swflag_mutex);
|
||||
|
||||
/**
|
||||
* e1000_acquire_swflag_ich8lan - Acquire software control flag
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Acquires the software control flag for performing NVM and PHY
|
||||
* operations. This is a function pointer entry point only called by
|
||||
* read/write routines for the PHY and NVM parts.
|
||||
* Acquires the software control flag for performing PHY and select
|
||||
* MAC CSR accesses.
|
||||
**/
|
||||
static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
|
@ -582,7 +623,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
|
|||
|
||||
might_sleep();
|
||||
|
||||
mutex_lock(&nvm_mutex);
|
||||
mutex_lock(&swflag_mutex);
|
||||
|
||||
while (timeout) {
|
||||
extcnf_ctrl = er32(EXTCNF_CTRL);
|
||||
|
@ -599,7 +640,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
|
|||
goto out;
|
||||
}
|
||||
|
||||
timeout = PHY_CFG_TIMEOUT * 2;
|
||||
timeout = SW_FLAG_TIMEOUT;
|
||||
|
||||
extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
|
||||
ew32(EXTCNF_CTRL, extcnf_ctrl);
|
||||
|
@ -623,7 +664,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
|
|||
|
||||
out:
|
||||
if (ret_val)
|
||||
mutex_unlock(&nvm_mutex);
|
||||
mutex_unlock(&swflag_mutex);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
@ -632,9 +673,8 @@ out:
|
|||
* e1000_release_swflag_ich8lan - Release software control flag
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Releases the software control flag for performing NVM and PHY operations.
|
||||
* This is a function pointer entry point only called by read/write
|
||||
* routines for the PHY and NVM parts.
|
||||
* Releases the software control flag for performing PHY and select
|
||||
* MAC CSR accesses.
|
||||
**/
|
||||
static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
|
@ -644,7 +684,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
|
|||
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
|
||||
ew32(EXTCNF_CTRL, extcnf_ctrl);
|
||||
|
||||
mutex_unlock(&nvm_mutex);
|
||||
mutex_unlock(&swflag_mutex);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -844,7 +886,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
|
|||
u32 i;
|
||||
u32 data, cnf_size, cnf_base_addr, sw_cfg_mask;
|
||||
s32 ret_val;
|
||||
u16 word_addr, reg_data, reg_addr, phy_page = 0;
|
||||
u16 reg, word_addr, reg_data, reg_addr, phy_page = 0;
|
||||
|
||||
ret_val = e1000e_phy_hw_reset_generic(hw);
|
||||
if (ret_val)
|
||||
|
@ -859,6 +901,10 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
|
|||
return ret_val;
|
||||
}
|
||||
|
||||
/* Dummy read to clear the phy wakeup bit after lcd reset */
|
||||
if (hw->mac.type == e1000_pchlan)
|
||||
e1e_rphy(hw, BM_WUC, ®);
|
||||
|
||||
/*
|
||||
* Initialize the PHY from the NVM on ICH platforms. This
|
||||
* is needed due to an issue where the NVM configuration is
|
||||
|
@ -1053,6 +1099,38 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
|
|||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_set_lplu_state_pchlan - Set Low Power Link Up state
|
||||
* @hw: pointer to the HW structure
|
||||
* @active: true to enable LPLU, false to disable
|
||||
*
|
||||
* Sets the LPLU state according to the active flag. For PCH, if OEM write
|
||||
* bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
|
||||
* the phy speed. This function will manually set the LPLU bit and restart
|
||||
* auto-neg as hw would do. D3 and D0 LPLU will call the same function
|
||||
* since it configures the same bit.
|
||||
**/
|
||||
static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
u16 oem_reg;
|
||||
|
||||
ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
if (active)
|
||||
oem_reg |= HV_OEM_BITS_LPLU;
|
||||
else
|
||||
oem_reg &= ~HV_OEM_BITS_LPLU;
|
||||
|
||||
oem_reg |= HV_OEM_BITS_RESTART_AN;
|
||||
ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg);
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
|
||||
* @hw: pointer to the HW structure
|
||||
|
@ -1314,12 +1392,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
|
|||
if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
|
||||
(words == 0)) {
|
||||
hw_dbg(hw, "nvm parameter(s) out of bounds\n");
|
||||
return -E1000_ERR_NVM;
|
||||
ret_val = -E1000_ERR_NVM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret_val = e1000_acquire_swflag_ich8lan(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
nvm->ops.acquire_nvm(hw);
|
||||
|
||||
ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
|
||||
if (ret_val) {
|
||||
|
@ -1345,7 +1422,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
|
|||
}
|
||||
}
|
||||
|
||||
e1000_release_swflag_ich8lan(hw);
|
||||
nvm->ops.release_nvm(hw);
|
||||
|
||||
out:
|
||||
if (ret_val)
|
||||
|
@ -1603,11 +1680,15 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
|
|||
return -E1000_ERR_NVM;
|
||||
}
|
||||
|
||||
nvm->ops.acquire_nvm(hw);
|
||||
|
||||
for (i = 0; i < words; i++) {
|
||||
dev_spec->shadow_ram[offset+i].modified = 1;
|
||||
dev_spec->shadow_ram[offset+i].value = data[i];
|
||||
}
|
||||
|
||||
nvm->ops.release_nvm(hw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1637,9 +1718,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
|
|||
if (nvm->type != e1000_nvm_flash_sw)
|
||||
goto out;
|
||||
|
||||
ret_val = e1000_acquire_swflag_ich8lan(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
nvm->ops.acquire_nvm(hw);
|
||||
|
||||
/*
|
||||
* We're writing to the opposite bank so if we're on bank 1,
|
||||
|
@ -1657,7 +1736,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
|
|||
old_bank_offset = 0;
|
||||
ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
|
||||
if (ret_val) {
|
||||
e1000_release_swflag_ich8lan(hw);
|
||||
nvm->ops.release_nvm(hw);
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
|
@ -1665,7 +1744,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
|
|||
new_bank_offset = 0;
|
||||
ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
|
||||
if (ret_val) {
|
||||
e1000_release_swflag_ich8lan(hw);
|
||||
nvm->ops.release_nvm(hw);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -1723,7 +1802,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
|
|||
if (ret_val) {
|
||||
/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
|
||||
hw_dbg(hw, "Flash commit failed.\n");
|
||||
e1000_release_swflag_ich8lan(hw);
|
||||
nvm->ops.release_nvm(hw);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1736,7 +1815,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
|
|||
act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
|
||||
ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
|
||||
if (ret_val) {
|
||||
e1000_release_swflag_ich8lan(hw);
|
||||
nvm->ops.release_nvm(hw);
|
||||
goto out;
|
||||
}
|
||||
data &= 0xBFFF;
|
||||
|
@ -1744,7 +1823,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
|
|||
act_offset * 2 + 1,
|
||||
(u8)(data >> 8));
|
||||
if (ret_val) {
|
||||
e1000_release_swflag_ich8lan(hw);
|
||||
nvm->ops.release_nvm(hw);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1757,7 +1836,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
|
|||
act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
|
||||
ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
|
||||
if (ret_val) {
|
||||
e1000_release_swflag_ich8lan(hw);
|
||||
nvm->ops.release_nvm(hw);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1767,7 +1846,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
|
|||
dev_spec->shadow_ram[i].value = 0xFFFF;
|
||||
}
|
||||
|
||||
e1000_release_swflag_ich8lan(hw);
|
||||
nvm->ops.release_nvm(hw);
|
||||
|
||||
/*
|
||||
* Reload the EEPROM, or else modifications will not appear
|
||||
|
@ -1831,14 +1910,12 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
|
|||
**/
|
||||
void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_nvm_info *nvm = &hw->nvm;
|
||||
union ich8_flash_protected_range pr0;
|
||||
union ich8_hws_flash_status hsfsts;
|
||||
u32 gfpreg;
|
||||
s32 ret_val;
|
||||
|
||||
ret_val = e1000_acquire_swflag_ich8lan(hw);
|
||||
if (ret_val)
|
||||
return;
|
||||
nvm->ops.acquire_nvm(hw);
|
||||
|
||||
gfpreg = er32flash(ICH_FLASH_GFPREG);
|
||||
|
||||
|
@ -1859,7 +1936,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
|
|||
hsfsts.hsf_status.flockdn = true;
|
||||
ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
|
||||
|
||||
e1000_release_swflag_ich8lan(hw);
|
||||
nvm->ops.release_nvm(hw);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2229,6 +2306,7 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
|
|||
**/
|
||||
static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
u16 reg;
|
||||
u32 ctrl, icr, kab;
|
||||
s32 ret_val;
|
||||
|
||||
|
@ -2304,6 +2382,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
|
|||
hw_dbg(hw, "Auto Read Done did not complete\n");
|
||||
}
|
||||
}
|
||||
/* Dummy read to clear the phy wakeup bit after lcd reset */
|
||||
if (hw->mac.type == e1000_pchlan)
|
||||
e1e_rphy(hw, BM_WUC, ®);
|
||||
|
||||
/*
|
||||
* For PCH, this write will make sure that any noise
|
||||
|
@ -2843,9 +2924,8 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
|
|||
E1000_PHY_CTRL_GBE_DISABLE;
|
||||
ew32(PHY_CTRL, phy_ctrl);
|
||||
|
||||
/* Workaround SWFLAG unexpectedly set during S0->Sx */
|
||||
if (hw->mac.type == e1000_pchlan)
|
||||
udelay(500);
|
||||
e1000_phy_hw_reset_ich8lan(hw);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -3113,9 +3193,9 @@ static struct e1000_phy_operations ich8_phy_ops = {
|
|||
};
|
||||
|
||||
static struct e1000_nvm_operations ich8_nvm_ops = {
|
||||
.acquire_nvm = e1000_acquire_swflag_ich8lan,
|
||||
.acquire_nvm = e1000_acquire_nvm_ich8lan,
|
||||
.read_nvm = e1000_read_nvm_ich8lan,
|
||||
.release_nvm = e1000_release_swflag_ich8lan,
|
||||
.release_nvm = e1000_release_nvm_ich8lan,
|
||||
.update_nvm = e1000_update_nvm_checksum_ich8lan,
|
||||
.valid_led_default = e1000_valid_led_default_ich8lan,
|
||||
.validate_nvm = e1000_validate_nvm_checksum_ich8lan,
|
||||
|
|
|
@ -164,16 +164,25 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
|
|||
* MDIC mode. No harm in trying again in this case since
|
||||
* the PHY ID is unknown at this point anyway
|
||||
*/
|
||||
ret_val = phy->ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
phy->ops.release_phy(hw);
|
||||
|
||||
retry_count++;
|
||||
}
|
||||
out:
|
||||
/* Revert to MDIO fast mode, if applicable */
|
||||
if (retry_count)
|
||||
if (retry_count) {
|
||||
ret_val = phy->ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
|
||||
phy->ops.release_phy(hw);
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
@ -354,38 +363,117 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
|
|||
}
|
||||
|
||||
/**
|
||||
* e1000e_read_phy_reg_igp - Read igp PHY register
|
||||
* __e1000e_read_phy_reg_igp - Read igp PHY register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to be read
|
||||
* @data: pointer to the read data
|
||||
* @locked: semaphore has already been acquired or not
|
||||
*
|
||||
* Acquires semaphore, if necessary, then reads the PHY register at offset
|
||||
* and storing the retrieved information in data. Release any acquired
|
||||
* and stores the retrieved information in data. Release any acquired
|
||||
* semaphores before exiting.
|
||||
**/
|
||||
s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
|
||||
static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
|
||||
bool locked)
|
||||
{
|
||||
s32 ret_val;
|
||||
s32 ret_val = 0;
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
if (!locked) {
|
||||
if (!(hw->phy.ops.acquire_phy))
|
||||
goto out;
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (offset > MAX_PHY_MULTI_PAGE_REG) {
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw,
|
||||
IGP01E1000_PHY_PAGE_SELECT,
|
||||
(u16)offset);
|
||||
if (ret_val) {
|
||||
hw->phy.ops.release_phy(hw);
|
||||
return ret_val;
|
||||
}
|
||||
if (ret_val)
|
||||
goto release;
|
||||
}
|
||||
|
||||
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
|
||||
data);
|
||||
data);
|
||||
|
||||
hw->phy.ops.release_phy(hw);
|
||||
release:
|
||||
if (!locked)
|
||||
hw->phy.ops.release_phy(hw);
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000e_read_phy_reg_igp - Read igp PHY register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to be read
|
||||
* @data: pointer to the read data
|
||||
*
|
||||
* Acquires semaphore then reads the PHY register at offset and stores the
|
||||
* retrieved information in data.
|
||||
* Release the acquired semaphore before exiting.
|
||||
**/
|
||||
s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
|
||||
{
|
||||
return __e1000e_read_phy_reg_igp(hw, offset, data, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000e_read_phy_reg_igp_locked - Read igp PHY register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to be read
|
||||
* @data: pointer to the read data
|
||||
*
|
||||
* Reads the PHY register at offset and stores the retrieved information
|
||||
* in data. Assumes semaphore already acquired.
|
||||
**/
|
||||
s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
|
||||
{
|
||||
return __e1000e_read_phy_reg_igp(hw, offset, data, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000e_write_phy_reg_igp - Write igp PHY register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to write to
|
||||
* @data: data to write at register offset
|
||||
* @locked: semaphore has already been acquired or not
|
||||
*
|
||||
* Acquires semaphore, if necessary, then writes the data to PHY register
|
||||
* at the offset. Release any acquired semaphores before exiting.
|
||||
**/
|
||||
static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
|
||||
bool locked)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
|
||||
if (!locked) {
|
||||
if (!(hw->phy.ops.acquire_phy))
|
||||
goto out;
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (offset > MAX_PHY_MULTI_PAGE_REG) {
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw,
|
||||
IGP01E1000_PHY_PAGE_SELECT,
|
||||
(u16)offset);
|
||||
if (ret_val)
|
||||
goto release;
|
||||
}
|
||||
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
|
||||
data);
|
||||
|
||||
release:
|
||||
if (!locked)
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
|
@ -395,53 +483,53 @@ s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
|
|||
* @offset: register offset to write to
|
||||
* @data: data to write at register offset
|
||||
*
|
||||
* Acquires semaphore, if necessary, then writes the data to PHY register
|
||||
* Acquires semaphore then writes the data to PHY register
|
||||
* at the offset. Release any acquired semaphores before exiting.
|
||||
**/
|
||||
s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
|
||||
{
|
||||
s32 ret_val;
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
if (offset > MAX_PHY_MULTI_PAGE_REG) {
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw,
|
||||
IGP01E1000_PHY_PAGE_SELECT,
|
||||
(u16)offset);
|
||||
if (ret_val) {
|
||||
hw->phy.ops.release_phy(hw);
|
||||
return ret_val;
|
||||
}
|
||||
}
|
||||
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
|
||||
data);
|
||||
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
return ret_val;
|
||||
return __e1000e_write_phy_reg_igp(hw, offset, data, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000e_read_kmrn_reg - Read kumeran register
|
||||
* e1000e_write_phy_reg_igp_locked - Write igp PHY register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to write to
|
||||
* @data: data to write at register offset
|
||||
*
|
||||
* Writes the data to PHY register at the offset.
|
||||
* Assumes semaphore already acquired.
|
||||
**/
|
||||
s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
|
||||
{
|
||||
return __e1000e_write_phy_reg_igp(hw, offset, data, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* __e1000_read_kmrn_reg - Read kumeran register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to be read
|
||||
* @data: pointer to the read data
|
||||
* @locked: semaphore has already been acquired or not
|
||||
*
|
||||
* Acquires semaphore, if necessary. Then reads the PHY register at offset
|
||||
* using the kumeran interface. The information retrieved is stored in data.
|
||||
* Release any acquired semaphores before exiting.
|
||||
**/
|
||||
s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
|
||||
static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
|
||||
bool locked)
|
||||
{
|
||||
u32 kmrnctrlsta;
|
||||
s32 ret_val;
|
||||
s32 ret_val = 0;
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
if (!locked) {
|
||||
if (!(hw->phy.ops.acquire_phy))
|
||||
goto out;
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
}
|
||||
|
||||
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
|
||||
E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
|
||||
|
@ -452,40 +540,110 @@ s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
|
|||
kmrnctrlsta = er32(KMRNCTRLSTA);
|
||||
*data = (u16)kmrnctrlsta;
|
||||
|
||||
hw->phy.ops.release_phy(hw);
|
||||
if (!locked)
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000e_write_kmrn_reg - Write kumeran register
|
||||
* e1000e_read_kmrn_reg - Read kumeran register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to be read
|
||||
* @data: pointer to the read data
|
||||
*
|
||||
* Acquires semaphore then reads the PHY register at offset using the
|
||||
* kumeran interface. The information retrieved is stored in data.
|
||||
* Release the acquired semaphore before exiting.
|
||||
**/
|
||||
s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
|
||||
{
|
||||
return __e1000_read_kmrn_reg(hw, offset, data, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_read_kmrn_reg_locked - Read kumeran register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to be read
|
||||
* @data: pointer to the read data
|
||||
*
|
||||
* Reads the PHY register at offset using the kumeran interface. The
|
||||
* information retrieved is stored in data.
|
||||
* Assumes semaphore already acquired.
|
||||
**/
|
||||
s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
|
||||
{
|
||||
return __e1000_read_kmrn_reg(hw, offset, data, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* __e1000_write_kmrn_reg - Write kumeran register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to write to
|
||||
* @data: data to write at register offset
|
||||
* @locked: semaphore has already been acquired or not
|
||||
*
|
||||
* Acquires semaphore, if necessary. Then write the data to PHY register
|
||||
* at the offset using the kumeran interface. Release any acquired semaphores
|
||||
* before exiting.
|
||||
**/
|
||||
s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
|
||||
static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
|
||||
bool locked)
|
||||
{
|
||||
u32 kmrnctrlsta;
|
||||
s32 ret_val;
|
||||
s32 ret_val = 0;
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
if (!locked) {
|
||||
if (!(hw->phy.ops.acquire_phy))
|
||||
goto out;
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
}
|
||||
|
||||
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
|
||||
E1000_KMRNCTRLSTA_OFFSET) | data;
|
||||
ew32(KMRNCTRLSTA, kmrnctrlsta);
|
||||
|
||||
udelay(2);
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
if (!locked)
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000e_write_kmrn_reg - Write kumeran register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to write to
|
||||
* @data: data to write at register offset
|
||||
*
|
||||
* Acquires semaphore then writes the data to the PHY register at the offset
|
||||
* using the kumeran interface. Release the acquired semaphore before exiting.
|
||||
**/
|
||||
s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
|
||||
{
|
||||
return __e1000_write_kmrn_reg(hw, offset, data, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_write_kmrn_reg_locked - Write kumeran register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to write to
|
||||
* @data: data to write at register offset
|
||||
*
|
||||
* Write the data to PHY register at the offset using the kumeran interface.
|
||||
* Assumes semaphore already acquired.
|
||||
**/
|
||||
s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
|
||||
{
|
||||
return __e1000_write_kmrn_reg(hw, offset, data, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
|
||||
* @hw: pointer to the HW structure
|
||||
|
@ -2105,6 +2263,10 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
|
|||
u32 page = offset >> IGP_PAGE_SHIFT;
|
||||
u32 page_shift = 0;
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
/* Page 800 works differently than the rest so it has its own func */
|
||||
if (page == BM_WUC_PAGE) {
|
||||
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
|
||||
|
@ -2112,10 +2274,6 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
|
||||
|
||||
if (offset > MAX_PHY_MULTI_PAGE_REG) {
|
||||
|
@ -2135,18 +2293,15 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
|
|||
/* Page is shifted left, PHY expects (page x 32) */
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
|
||||
(page << page_shift));
|
||||
if (ret_val) {
|
||||
hw->phy.ops.release_phy(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
|
||||
data);
|
||||
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
out:
|
||||
hw->phy.ops.release_phy(hw);
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
|
@ -2167,6 +2322,10 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
|
|||
u32 page = offset >> IGP_PAGE_SHIFT;
|
||||
u32 page_shift = 0;
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
/* Page 800 works differently than the rest so it has its own func */
|
||||
if (page == BM_WUC_PAGE) {
|
||||
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
|
||||
|
@ -2174,10 +2333,6 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
|
||||
|
||||
if (offset > MAX_PHY_MULTI_PAGE_REG) {
|
||||
|
@ -2197,17 +2352,14 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
|
|||
/* Page is shifted left, PHY expects (page x 32) */
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
|
||||
(page << page_shift));
|
||||
if (ret_val) {
|
||||
hw->phy.ops.release_phy(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
|
||||
data);
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
out:
|
||||
hw->phy.ops.release_phy(hw);
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
|
@ -2226,17 +2378,17 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
|
|||
s32 ret_val;
|
||||
u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
/* Page 800 works differently than the rest so it has its own func */
|
||||
if (page == BM_WUC_PAGE) {
|
||||
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
|
||||
true);
|
||||
return ret_val;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
hw->phy.addr = 1;
|
||||
|
||||
if (offset > MAX_PHY_MULTI_PAGE_REG) {
|
||||
|
@ -2245,16 +2397,14 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
|
|||
ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
|
||||
page);
|
||||
|
||||
if (ret_val) {
|
||||
hw->phy.ops.release_phy(hw);
|
||||
return ret_val;
|
||||
}
|
||||
if (ret_val)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
|
||||
data);
|
||||
out:
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
|
@ -2272,17 +2422,17 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
|
|||
s32 ret_val;
|
||||
u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
/* Page 800 works differently than the rest so it has its own func */
|
||||
if (page == BM_WUC_PAGE) {
|
||||
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
|
||||
false);
|
||||
return ret_val;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
hw->phy.addr = 1;
|
||||
|
||||
if (offset > MAX_PHY_MULTI_PAGE_REG) {
|
||||
|
@ -2290,17 +2440,15 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
|
|||
ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
|
||||
page);
|
||||
|
||||
if (ret_val) {
|
||||
hw->phy.ops.release_phy(hw);
|
||||
return ret_val;
|
||||
}
|
||||
if (ret_val)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
|
||||
data);
|
||||
|
||||
out:
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
|
@ -2320,6 +2468,8 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
|
|||
* 3) Write the address using the address opcode (0x11)
|
||||
* 4) Read or write the data using the data opcode (0x12)
|
||||
* 5) Restore 769_17.2 to its original value
|
||||
*
|
||||
* Assumes semaphore already acquired.
|
||||
**/
|
||||
static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
|
||||
u16 *data, bool read)
|
||||
|
@ -2327,20 +2477,12 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
|
|||
s32 ret_val;
|
||||
u16 reg = BM_PHY_REG_NUM(offset);
|
||||
u16 phy_reg = 0;
|
||||
u8 phy_acquired = 1;
|
||||
|
||||
|
||||
/* Gig must be disabled for MDIO accesses to page 800 */
|
||||
if ((hw->mac.type == e1000_pchlan) &&
|
||||
(!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
|
||||
hw_dbg(hw, "Attempting to access page 800 while gig enabled\n");
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val) {
|
||||
phy_acquired = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* All operations in this function are phy address 1 */
|
||||
hw->phy.addr = 1;
|
||||
|
||||
|
@ -2397,8 +2539,6 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
|
|||
ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
|
||||
|
||||
out:
|
||||
if (phy_acquired == 1)
|
||||
hw->phy.ops.release_phy(hw);
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
|
@ -2439,52 +2579,63 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
|
||||
* @hw: pointer to the HW structure
|
||||
* @slow: true for slow mode, false for normal mode
|
||||
*
|
||||
* Assumes semaphore already acquired.
|
||||
**/
|
||||
s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
u16 data = 0;
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
/* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
|
||||
hw->phy.addr = 1;
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
|
||||
(BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
|
||||
if (ret_val) {
|
||||
hw->phy.ops.release_phy(hw);
|
||||
return ret_val;
|
||||
}
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
|
||||
(0x2180 | (slow << 10)));
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
/* dummy read when reverting to fast mode - throw away result */
|
||||
if (!slow)
|
||||
e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
|
||||
|
||||
hw->phy.ops.release_phy(hw);
|
||||
ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_read_phy_reg_hv - Read HV PHY register
|
||||
* __e1000_read_phy_reg_hv - Read HV PHY register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to be read
|
||||
* @data: pointer to the read data
|
||||
* @locked: semaphore has already been acquired or not
|
||||
*
|
||||
* Acquires semaphore, if necessary, then reads the PHY register at offset
|
||||
* and storing the retrieved information in data. Release any acquired
|
||||
* and stores the retrieved information in data. Release any acquired
|
||||
* semaphore before exiting.
|
||||
**/
|
||||
s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
|
||||
static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
|
||||
bool locked)
|
||||
{
|
||||
s32 ret_val;
|
||||
u16 page = BM_PHY_REG_PAGE(offset);
|
||||
u16 reg = BM_PHY_REG_NUM(offset);
|
||||
bool in_slow_mode = false;
|
||||
|
||||
if (!locked) {
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/* Workaround failure in MDIO access while cable is disconnected */
|
||||
if ((hw->phy.type == e1000_phy_82577) &&
|
||||
!(er32(STATUS) & E1000_STATUS_LU)) {
|
||||
|
@ -2508,10 +2659,6 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
|
||||
|
||||
if (page == HV_INTC_FC_PAGE_START)
|
||||
|
@ -2529,42 +2676,76 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
|
|||
ret_val = e1000e_write_phy_reg_mdic(hw,
|
||||
IGP01E1000_PHY_PAGE_SELECT,
|
||||
(page << IGP_PAGE_SHIFT));
|
||||
if (ret_val) {
|
||||
hw->phy.ops.release_phy(hw);
|
||||
goto out;
|
||||
}
|
||||
hw->phy.addr = phy_addr;
|
||||
}
|
||||
}
|
||||
|
||||
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
|
||||
data);
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
out:
|
||||
/* Revert to MDIO fast mode, if applicable */
|
||||
if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
|
||||
ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
|
||||
|
||||
if (!locked)
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_write_phy_reg_hv - Write HV PHY register
|
||||
* e1000_read_phy_reg_hv - Read HV PHY register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to be read
|
||||
* @data: pointer to the read data
|
||||
*
|
||||
* Acquires semaphore then reads the PHY register at offset and stores
|
||||
* the retrieved information in data. Release the acquired semaphore
|
||||
* before exiting.
|
||||
**/
|
||||
s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
|
||||
{
|
||||
return __e1000_read_phy_reg_hv(hw, offset, data, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_read_phy_reg_hv_locked - Read HV PHY register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to be read
|
||||
* @data: pointer to the read data
|
||||
*
|
||||
* Reads the PHY register at offset and stores the retrieved information
|
||||
* in data. Assumes semaphore already acquired.
|
||||
**/
|
||||
s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
|
||||
{
|
||||
return __e1000_read_phy_reg_hv(hw, offset, data, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* __e1000_write_phy_reg_hv - Write HV PHY register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to write to
|
||||
* @data: data to write at register offset
|
||||
* @locked: semaphore has already been acquired or not
|
||||
*
|
||||
* Acquires semaphore, if necessary, then writes the data to PHY register
|
||||
* at the offset. Release any acquired semaphores before exiting.
|
||||
**/
|
||||
s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
|
||||
static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
|
||||
bool locked)
|
||||
{
|
||||
s32 ret_val;
|
||||
u16 page = BM_PHY_REG_PAGE(offset);
|
||||
u16 reg = BM_PHY_REG_NUM(offset);
|
||||
bool in_slow_mode = false;
|
||||
|
||||
if (!locked) {
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/* Workaround failure in MDIO access while cable is disconnected */
|
||||
if ((hw->phy.type == e1000_phy_82577) &&
|
||||
!(er32(STATUS) & E1000_STATUS_LU)) {
|
||||
|
@ -2588,10 +2769,6 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
|
||||
|
||||
if (page == HV_INTC_FC_PAGE_START)
|
||||
|
@ -2607,15 +2784,10 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
|
|||
((MAX_PHY_REG_ADDRESS & reg) == 0) &&
|
||||
(data & (1 << 11))) {
|
||||
u16 data2 = 0x7EFF;
|
||||
hw->phy.ops.release_phy(hw);
|
||||
ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3,
|
||||
&data2, false);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (reg > MAX_PHY_MULTI_PAGE_REG) {
|
||||
|
@ -2630,26 +2802,52 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
|
|||
ret_val = e1000e_write_phy_reg_mdic(hw,
|
||||
IGP01E1000_PHY_PAGE_SELECT,
|
||||
(page << IGP_PAGE_SHIFT));
|
||||
if (ret_val) {
|
||||
hw->phy.ops.release_phy(hw);
|
||||
goto out;
|
||||
}
|
||||
hw->phy.addr = phy_addr;
|
||||
}
|
||||
}
|
||||
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
|
||||
data);
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
out:
|
||||
/* Revert to MDIO fast mode, if applicable */
|
||||
if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
|
||||
ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
|
||||
|
||||
if (!locked)
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_write_phy_reg_hv - Write HV PHY register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to write to
|
||||
* @data: data to write at register offset
|
||||
*
|
||||
* Acquires semaphore then writes the data to PHY register at the offset.
|
||||
* Release the acquired semaphores before exiting.
|
||||
**/
|
||||
s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
|
||||
{
|
||||
return __e1000_write_phy_reg_hv(hw, offset, data, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_write_phy_reg_hv_locked - Write HV PHY register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to write to
|
||||
* @data: data to write at register offset
|
||||
*
|
||||
* Writes the data to PHY register at the offset. Assumes semaphore
|
||||
* already acquired.
|
||||
**/
|
||||
s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
|
||||
{
|
||||
return __e1000_write_phy_reg_hv(hw, offset, data, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
|
||||
* @page: page to be accessed
|
||||
|
@ -2671,10 +2869,9 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page)
|
|||
* @data: pointer to the data to be read or written
|
||||
* @read: determines if operation is read or written
|
||||
*
|
||||
* Acquires semaphore, if necessary, then reads the PHY register at offset
|
||||
* and storing the retreived information in data. Release any acquired
|
||||
* semaphores before exiting. Note that the procedure to read these regs
|
||||
* uses the address port and data port to read/write.
|
||||
* Reads the PHY register at offset and stores the retreived information
|
||||
* in data. Assumes semaphore already acquired. Note that the procedure
|
||||
* to read these regs uses the address port and data port to read/write.
|
||||
**/
|
||||
static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
|
||||
u16 *data, bool read)
|
||||
|
@ -2682,20 +2879,12 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
|
|||
s32 ret_val;
|
||||
u32 addr_reg = 0;
|
||||
u32 data_reg = 0;
|
||||
u8 phy_acquired = 1;
|
||||
|
||||
/* This takes care of the difference with desktop vs mobile phy */
|
||||
addr_reg = (hw->phy.type == e1000_phy_82578) ?
|
||||
I82578_ADDR_REG : I82577_ADDR_REG;
|
||||
data_reg = addr_reg + 1;
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val) {
|
||||
hw_dbg(hw, "Could not acquire PHY\n");
|
||||
phy_acquired = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* All operations in this function are phy address 2 */
|
||||
hw->phy.addr = 2;
|
||||
|
||||
|
@ -2718,8 +2907,6 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
|
|||
}
|
||||
|
||||
out:
|
||||
if (phy_acquired == 1)
|
||||
hw->phy.ops.release_phy(hw);
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
|
|
|
@ -222,24 +222,25 @@ struct ethoc_bd {
|
|||
u32 addr;
|
||||
};
|
||||
|
||||
static u32 ethoc_read(struct ethoc *dev, loff_t offset)
|
||||
static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
|
||||
{
|
||||
return ioread32(dev->iobase + offset);
|
||||
}
|
||||
|
||||
static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
|
||||
static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
|
||||
{
|
||||
iowrite32(data, dev->iobase + offset);
|
||||
}
|
||||
|
||||
static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd)
|
||||
static inline void ethoc_read_bd(struct ethoc *dev, int index,
|
||||
struct ethoc_bd *bd)
|
||||
{
|
||||
loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
|
||||
bd->stat = ethoc_read(dev, offset + 0);
|
||||
bd->addr = ethoc_read(dev, offset + 4);
|
||||
}
|
||||
|
||||
static void ethoc_write_bd(struct ethoc *dev, int index,
|
||||
static inline void ethoc_write_bd(struct ethoc *dev, int index,
|
||||
const struct ethoc_bd *bd)
|
||||
{
|
||||
loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
|
||||
|
@ -247,33 +248,33 @@ static void ethoc_write_bd(struct ethoc *dev, int index,
|
|||
ethoc_write(dev, offset + 4, bd->addr);
|
||||
}
|
||||
|
||||
static void ethoc_enable_irq(struct ethoc *dev, u32 mask)
|
||||
static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
|
||||
{
|
||||
u32 imask = ethoc_read(dev, INT_MASK);
|
||||
imask |= mask;
|
||||
ethoc_write(dev, INT_MASK, imask);
|
||||
}
|
||||
|
||||
static void ethoc_disable_irq(struct ethoc *dev, u32 mask)
|
||||
static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
|
||||
{
|
||||
u32 imask = ethoc_read(dev, INT_MASK);
|
||||
imask &= ~mask;
|
||||
ethoc_write(dev, INT_MASK, imask);
|
||||
}
|
||||
|
||||
static void ethoc_ack_irq(struct ethoc *dev, u32 mask)
|
||||
static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
|
||||
{
|
||||
ethoc_write(dev, INT_SOURCE, mask);
|
||||
}
|
||||
|
||||
static void ethoc_enable_rx_and_tx(struct ethoc *dev)
|
||||
static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
|
||||
{
|
||||
u32 mode = ethoc_read(dev, MODER);
|
||||
mode |= MODER_RXEN | MODER_TXEN;
|
||||
ethoc_write(dev, MODER, mode);
|
||||
}
|
||||
|
||||
static void ethoc_disable_rx_and_tx(struct ethoc *dev)
|
||||
static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
|
||||
{
|
||||
u32 mode = ethoc_read(dev, MODER);
|
||||
mode &= ~(MODER_RXEN | MODER_TXEN);
|
||||
|
@ -507,7 +508,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
|
|||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
ethoc_ack_irq(priv, INT_MASK_ALL);
|
||||
ethoc_ack_irq(priv, pending);
|
||||
|
||||
if (pending & INT_MASK_BUSY) {
|
||||
dev_err(&dev->dev, "packet dropped\n");
|
||||
|
|
|
@ -1654,7 +1654,7 @@ static const struct net_device_ops fec_netdev_ops = {
|
|||
*
|
||||
* index is only used in legacy code
|
||||
*/
|
||||
int __init fec_enet_init(struct net_device *dev, int index)
|
||||
static int fec_enet_init(struct net_device *dev, int index)
|
||||
{
|
||||
struct fec_enet_private *fep = netdev_priv(dev);
|
||||
struct bufdesc *cbd_base;
|
||||
|
|
|
@ -759,12 +759,6 @@ static void mpc52xx_fec_reset(struct net_device *dev)
|
|||
|
||||
mpc52xx_fec_hw_init(dev);
|
||||
|
||||
if (priv->phydev) {
|
||||
phy_stop(priv->phydev);
|
||||
phy_write(priv->phydev, MII_BMCR, BMCR_RESET);
|
||||
phy_start(priv->phydev);
|
||||
}
|
||||
|
||||
bcom_fec_rx_reset(priv->rx_dmatsk);
|
||||
bcom_fec_tx_reset(priv->tx_dmatsk);
|
||||
|
||||
|
|
|
@ -155,6 +155,7 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = {
|
|||
{ .compatible = "mpc5200b-fec-phy", },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
|
||||
|
||||
struct of_platform_driver mpc52xx_fec_mdio_driver = {
|
||||
.name = "mpc5200b-fec-phy",
|
||||
|
|
|
@ -1110,6 +1110,7 @@ static struct of_device_id fs_enet_match[] = {
|
|||
#endif
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, fs_enet_match);
|
||||
|
||||
static struct of_platform_driver fs_enet_driver = {
|
||||
.name = "fs_enet",
|
||||
|
|
|
@ -221,6 +221,7 @@ static struct of_device_id fs_enet_mdio_bb_match[] = {
|
|||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
|
||||
|
||||
static struct of_platform_driver fs_enet_bb_mdio_driver = {
|
||||
.name = "fsl-bb-mdio",
|
||||
|
|
|
@ -219,6 +219,7 @@ static struct of_device_id fs_enet_mdio_fec_match[] = {
|
|||
#endif
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
|
||||
|
||||
static struct of_platform_driver fs_enet_fec_mdio_driver = {
|
||||
.name = "fsl-fec-mdio",
|
||||
|
|
|
@ -407,6 +407,7 @@ static struct of_device_id fsl_pq_mdio_match[] = {
|
|||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
|
||||
|
||||
static struct of_platform_driver fsl_pq_mdio_driver = {
|
||||
.name = "fsl-pq_mdio",
|
||||
|
|
|
@ -2397,9 +2397,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* work with hotplug and coldplug */
|
||||
MODULE_ALIAS("platform:fsl-gianfar");
|
||||
|
||||
static struct of_device_id gfar_match[] =
|
||||
{
|
||||
{
|
||||
|
@ -2408,6 +2405,7 @@ static struct of_device_id gfar_match[] =
|
|||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, gfar_match);
|
||||
|
||||
/* Structure for a device driver */
|
||||
static struct of_platform_driver gfar_driver = {
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/errno.h>
|
||||
|
@ -2990,6 +2991,7 @@ static struct of_device_id emac_match[] =
|
|||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, emac_match);
|
||||
|
||||
static struct of_platform_driver emac_driver = {
|
||||
.name = "emac",
|
||||
|
|
|
@ -98,12 +98,13 @@ static void ri_tasklet(unsigned long dev)
|
|||
stats->tx_packets++;
|
||||
stats->tx_bytes +=skb->len;
|
||||
|
||||
skb->dev = __dev_get_by_index(&init_net, skb->iif);
|
||||
skb->dev = dev_get_by_index(&init_net, skb->iif);
|
||||
if (!skb->dev) {
|
||||
dev_kfree_skb(skb);
|
||||
stats->tx_dropped++;
|
||||
break;
|
||||
}
|
||||
dev_put(skb->dev);
|
||||
skb->iif = _dev->ifindex;
|
||||
|
||||
if (from & AT_EGRESS) {
|
||||
|
|
|
@ -739,7 +739,7 @@ static int igb_set_ringparam(struct net_device *netdev,
|
|||
{
|
||||
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||
struct igb_ring *temp_ring;
|
||||
int i, err;
|
||||
int i, err = 0;
|
||||
u32 new_rx_count, new_tx_count;
|
||||
|
||||
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
||||
|
@ -759,18 +759,30 @@ static int igb_set_ringparam(struct net_device *netdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
|
||||
msleep(1);
|
||||
|
||||
if (!netif_running(adapter->netdev)) {
|
||||
for (i = 0; i < adapter->num_tx_queues; i++)
|
||||
adapter->tx_ring[i].count = new_tx_count;
|
||||
for (i = 0; i < adapter->num_rx_queues; i++)
|
||||
adapter->rx_ring[i].count = new_rx_count;
|
||||
adapter->tx_ring_count = new_tx_count;
|
||||
adapter->rx_ring_count = new_rx_count;
|
||||
goto clear_reset;
|
||||
}
|
||||
|
||||
if (adapter->num_tx_queues > adapter->num_rx_queues)
|
||||
temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
|
||||
else
|
||||
temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
|
||||
if (!temp_ring)
|
||||
return -ENOMEM;
|
||||
|
||||
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
|
||||
msleep(1);
|
||||
if (!temp_ring) {
|
||||
err = -ENOMEM;
|
||||
goto clear_reset;
|
||||
}
|
||||
|
||||
if (netif_running(adapter->netdev))
|
||||
igb_down(adapter);
|
||||
igb_down(adapter);
|
||||
|
||||
/*
|
||||
* We can't just free everything and then setup again,
|
||||
|
@ -827,14 +839,11 @@ static int igb_set_ringparam(struct net_device *netdev,
|
|||
|
||||
adapter->rx_ring_count = new_rx_count;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
err_setup:
|
||||
if (netif_running(adapter->netdev))
|
||||
igb_up(adapter);
|
||||
|
||||
clear_bit(__IGB_RESETTING, &adapter->state);
|
||||
igb_up(adapter);
|
||||
vfree(temp_ring);
|
||||
clear_reset:
|
||||
clear_bit(__IGB_RESETTING, &adapter->state);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -279,7 +279,7 @@ static int igbvf_set_ringparam(struct net_device *netdev,
|
|||
{
|
||||
struct igbvf_adapter *adapter = netdev_priv(netdev);
|
||||
struct igbvf_ring *temp_ring;
|
||||
int err;
|
||||
int err = 0;
|
||||
u32 new_rx_count, new_tx_count;
|
||||
|
||||
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
||||
|
@ -299,15 +299,22 @@ static int igbvf_set_ringparam(struct net_device *netdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
temp_ring = vmalloc(sizeof(struct igbvf_ring));
|
||||
if (!temp_ring)
|
||||
return -ENOMEM;
|
||||
|
||||
while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
|
||||
msleep(1);
|
||||
|
||||
if (netif_running(adapter->netdev))
|
||||
igbvf_down(adapter);
|
||||
if (!netif_running(adapter->netdev)) {
|
||||
adapter->tx_ring->count = new_tx_count;
|
||||
adapter->rx_ring->count = new_rx_count;
|
||||
goto clear_reset;
|
||||
}
|
||||
|
||||
temp_ring = vmalloc(sizeof(struct igbvf_ring));
|
||||
if (!temp_ring) {
|
||||
err = -ENOMEM;
|
||||
goto clear_reset;
|
||||
}
|
||||
|
||||
igbvf_down(adapter);
|
||||
|
||||
/*
|
||||
* We can't just free everything and then setup again,
|
||||
|
@ -339,14 +346,11 @@ static int igbvf_set_ringparam(struct net_device *netdev,
|
|||
|
||||
memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
|
||||
}
|
||||
|
||||
err = 0;
|
||||
err_setup:
|
||||
if (netif_running(adapter->netdev))
|
||||
igbvf_up(adapter);
|
||||
|
||||
clear_bit(__IGBVF_RESETTING, &adapter->state);
|
||||
igbvf_up(adapter);
|
||||
vfree(temp_ring);
|
||||
clear_reset:
|
||||
clear_bit(__IGBVF_RESETTING, &adapter->state);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -806,7 +806,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
|||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
|
||||
int i, err;
|
||||
int i, err = 0;
|
||||
u32 new_rx_count, new_tx_count;
|
||||
bool need_update = false;
|
||||
|
||||
|
@ -830,6 +830,16 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
|||
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
|
||||
msleep(1);
|
||||
|
||||
if (!netif_running(adapter->netdev)) {
|
||||
for (i = 0; i < adapter->num_tx_queues; i++)
|
||||
adapter->tx_ring[i].count = new_tx_count;
|
||||
for (i = 0; i < adapter->num_rx_queues; i++)
|
||||
adapter->rx_ring[i].count = new_rx_count;
|
||||
adapter->tx_ring_count = new_tx_count;
|
||||
adapter->rx_ring_count = new_rx_count;
|
||||
goto err_setup;
|
||||
}
|
||||
|
||||
temp_tx_ring = kcalloc(adapter->num_tx_queues,
|
||||
sizeof(struct ixgbe_ring), GFP_KERNEL);
|
||||
if (!temp_tx_ring) {
|
||||
|
@ -887,8 +897,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
|||
|
||||
/* if rings need to be updated, here's the place to do it in one shot */
|
||||
if (need_update) {
|
||||
if (netif_running(netdev))
|
||||
ixgbe_down(adapter);
|
||||
ixgbe_down(adapter);
|
||||
|
||||
/* tx */
|
||||
if (new_tx_count != adapter->tx_ring_count) {
|
||||
|
@ -905,13 +914,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
|||
temp_rx_ring = NULL;
|
||||
adapter->rx_ring_count = new_rx_count;
|
||||
}
|
||||
}
|
||||
|
||||
/* success! */
|
||||
err = 0;
|
||||
if (netif_running(netdev))
|
||||
ixgbe_up(adapter);
|
||||
|
||||
}
|
||||
err_setup:
|
||||
clear_bit(__IXGBE_RESETTING, &adapter->state);
|
||||
return err;
|
||||
|
|
|
@ -170,6 +170,36 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
|
|||
ks_err(ks, "spi_sync() failed\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* ks8851_wrreg8 - write 8bit register value to chip
|
||||
* @ks: The chip state
|
||||
* @reg: The register address
|
||||
* @val: The value to write
|
||||
*
|
||||
* Issue a write to put the value @val into the register specified in @reg.
|
||||
*/
|
||||
static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
|
||||
{
|
||||
struct spi_transfer *xfer = &ks->spi_xfer1;
|
||||
struct spi_message *msg = &ks->spi_msg1;
|
||||
__le16 txb[2];
|
||||
int ret;
|
||||
int bit;
|
||||
|
||||
bit = 1 << (reg & 3);
|
||||
|
||||
txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR);
|
||||
txb[1] = val;
|
||||
|
||||
xfer->tx_buf = txb;
|
||||
xfer->rx_buf = NULL;
|
||||
xfer->len = 3;
|
||||
|
||||
ret = spi_sync(ks->spidev, msg);
|
||||
if (ret < 0)
|
||||
ks_err(ks, "spi_sync() failed\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* ks8851_rx_1msg - select whether to use one or two messages for spi read
|
||||
* @ks: The device structure
|
||||
|
@ -322,13 +352,12 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
|
|||
static int ks8851_write_mac_addr(struct net_device *dev)
|
||||
{
|
||||
struct ks8851_net *ks = netdev_priv(dev);
|
||||
u16 *mcp = (u16 *)dev->dev_addr;
|
||||
int i;
|
||||
|
||||
mutex_lock(&ks->lock);
|
||||
|
||||
ks8851_wrreg16(ks, KS_MARL, mcp[0]);
|
||||
ks8851_wrreg16(ks, KS_MARM, mcp[1]);
|
||||
ks8851_wrreg16(ks, KS_MARH, mcp[2]);
|
||||
for (i = 0; i < ETH_ALEN; i++)
|
||||
ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
|
||||
|
||||
mutex_unlock(&ks->lock);
|
||||
|
||||
|
@ -951,7 +980,7 @@ static void ks8851_set_rx_mode(struct net_device *dev)
|
|||
mcptr = mcptr->next;
|
||||
}
|
||||
|
||||
rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA;
|
||||
rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA;
|
||||
} else {
|
||||
/* just accept broadcast / unicast */
|
||||
rxctrl.rxcr1 = RXCR1_RXPAFMA;
|
||||
|
@ -1239,6 +1268,9 @@ static int __devinit ks8851_probe(struct spi_device *spi)
|
|||
ndev->netdev_ops = &ks8851_netdev_ops;
|
||||
ndev->irq = spi->irq;
|
||||
|
||||
/* issue a global soft reset to reset the device. */
|
||||
ks8851_soft_reset(ks, GRR_GSR);
|
||||
|
||||
/* simple check for a valid chip being connected to the bus */
|
||||
|
||||
if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#define CCR_32PIN (1 << 0)
|
||||
|
||||
/* MAC address registers */
|
||||
#define KS_MAR(_m) 0x15 - (_m)
|
||||
#define KS_MARL 0x10
|
||||
#define KS_MARM 0x12
|
||||
#define KS_MARH 0x14
|
||||
|
|
|
@ -75,7 +75,7 @@
|
|||
#include "myri10ge_mcp.h"
|
||||
#include "myri10ge_mcp_gen_header.h"
|
||||
|
||||
#define MYRI10GE_VERSION_STR "1.5.0-1.432"
|
||||
#define MYRI10GE_VERSION_STR "1.5.1-1.451"
|
||||
|
||||
MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
|
||||
MODULE_AUTHOR("Maintainer: help@myri.com");
|
||||
|
@ -1623,10 +1623,21 @@ myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
if (*ptr == 'R' || *ptr == 'Q') {
|
||||
/* We've found either an XFP or quad ribbon fiber */
|
||||
if (*ptr == '2')
|
||||
ptr++;
|
||||
if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
|
||||
/* We've found either an XFP, quad ribbon fiber, or SFP+ */
|
||||
cmd->port = PORT_FIBRE;
|
||||
cmd->supported |= SUPPORTED_FIBRE;
|
||||
cmd->advertising |= ADVERTISED_FIBRE;
|
||||
} else {
|
||||
cmd->port = PORT_OTHER;
|
||||
}
|
||||
if (*ptr == 'R' || *ptr == 'S')
|
||||
cmd->transceiver = XCVR_EXTERNAL;
|
||||
else
|
||||
cmd->transceiver = XCVR_INTERNAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -419,6 +419,7 @@ enum {
|
|||
#define NETXEN_CRB_ROMUSB \
|
||||
NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB)
|
||||
#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q)
|
||||
#define NETXEN_CRB_I2C0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0)
|
||||
#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB)
|
||||
#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64)
|
||||
|
||||
|
|
|
@ -1778,22 +1778,16 @@ netxen_setup_hwops(struct netxen_adapter *adapter)
|
|||
|
||||
int netxen_nic_get_board_info(struct netxen_adapter *adapter)
|
||||
{
|
||||
int offset, board_type, magic, header_version;
|
||||
int offset, board_type, magic;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
|
||||
offset = NX_FW_MAGIC_OFFSET;
|
||||
if (netxen_rom_fast_read(adapter, offset, &magic))
|
||||
return -EIO;
|
||||
|
||||
offset = NX_HDR_VERSION_OFFSET;
|
||||
if (netxen_rom_fast_read(adapter, offset, &header_version))
|
||||
return -EIO;
|
||||
|
||||
if (magic != NETXEN_BDINFO_MAGIC ||
|
||||
header_version != NETXEN_BDINFO_VERSION) {
|
||||
dev_err(&pdev->dev,
|
||||
"invalid board config, magic=%08x, version=%08x\n",
|
||||
magic, header_version);
|
||||
if (magic != NETXEN_BDINFO_MAGIC) {
|
||||
dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
|
||||
magic);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
|
|
@ -514,6 +514,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter)
|
|||
continue;
|
||||
|
||||
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
|
||||
if (off == (NETXEN_CRB_I2C0 + 0x1c))
|
||||
continue;
|
||||
/* do not reset PCI */
|
||||
if (off == (ROMUSB_GLB + 0xbc))
|
||||
continue;
|
||||
|
@ -537,12 +539,6 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (off == NETXEN_ADDR_ERROR) {
|
||||
printk(KERN_ERR "%s: Err: Unknown addr: 0x%08x\n",
|
||||
netxen_nic_driver_name, buf[i].addr);
|
||||
continue;
|
||||
}
|
||||
|
||||
init_delay = 1;
|
||||
/* After writing this register, HW needs time for CRB */
|
||||
/* to quiet down (else crb_window returns 0xffffffff) */
|
||||
|
|
|
@ -1925,6 +1925,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
|
|||
|
||||
request_reset:
|
||||
adapter->need_fw_reset = 1;
|
||||
clear_bit(__NX_RESETTING, &adapter->state);
|
||||
}
|
||||
|
||||
struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
|
||||
|
|
|
@ -3545,7 +3545,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
|
|||
rp->rcr_index = index;
|
||||
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
__pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX));
|
||||
__pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN));
|
||||
|
||||
rp->rx_packets++;
|
||||
rp->rx_bytes += skb->len;
|
||||
|
|
|
@ -238,6 +238,7 @@ static struct of_device_id mdio_ofgpio_match[] = {
|
|||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
|
||||
|
||||
static struct of_platform_driver mdio_ofgpio_driver = {
|
||||
.name = "mdio-gpio",
|
||||
|
|
|
@ -111,9 +111,6 @@ struct pppoe_net {
|
|||
rwlock_t hash_lock;
|
||||
};
|
||||
|
||||
/* to eliminate a race btw pppoe_flush_dev and pppoe_release */
|
||||
static DEFINE_SPINLOCK(flush_lock);
|
||||
|
||||
/*
|
||||
* PPPoE could be in the following stages:
|
||||
* 1) Discovery stage (to obtain remote MAC and Session ID)
|
||||
|
@ -303,45 +300,48 @@ static void pppoe_flush_dev(struct net_device *dev)
|
|||
write_lock_bh(&pn->hash_lock);
|
||||
for (i = 0; i < PPPOE_HASH_SIZE; i++) {
|
||||
struct pppox_sock *po = pn->hash_table[i];
|
||||
struct sock *sk;
|
||||
|
||||
while (po != NULL) {
|
||||
struct sock *sk;
|
||||
if (po->pppoe_dev != dev) {
|
||||
while (po) {
|
||||
while (po && po->pppoe_dev != dev) {
|
||||
po = po->next;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!po)
|
||||
break;
|
||||
|
||||
sk = sk_pppox(po);
|
||||
spin_lock(&flush_lock);
|
||||
po->pppoe_dev = NULL;
|
||||
spin_unlock(&flush_lock);
|
||||
dev_put(dev);
|
||||
|
||||
/* We always grab the socket lock, followed by the
|
||||
* hash_lock, in that order. Since we should
|
||||
* hold the sock lock while doing any unbinding,
|
||||
* we need to release the lock we're holding.
|
||||
* Hold a reference to the sock so it doesn't disappear
|
||||
* as we're jumping between locks.
|
||||
* hash_lock, in that order. Since we should hold the
|
||||
* sock lock while doing any unbinding, we need to
|
||||
* release the lock we're holding. Hold a reference to
|
||||
* the sock so it doesn't disappear as we're jumping
|
||||
* between locks.
|
||||
*/
|
||||
|
||||
sock_hold(sk);
|
||||
|
||||
write_unlock_bh(&pn->hash_lock);
|
||||
lock_sock(sk);
|
||||
|
||||
if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
|
||||
if (po->pppoe_dev == dev
|
||||
&& sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
|
||||
pppox_unbind_sock(sk);
|
||||
sk->sk_state = PPPOX_ZOMBIE;
|
||||
sk->sk_state_change(sk);
|
||||
po->pppoe_dev = NULL;
|
||||
dev_put(dev);
|
||||
}
|
||||
|
||||
release_sock(sk);
|
||||
sock_put(sk);
|
||||
|
||||
/* Restart scan at the beginning of this hash chain.
|
||||
* While the lock was dropped the chain contents may
|
||||
* have changed.
|
||||
/* Restart the process from the start of the current
|
||||
* hash chain. We dropped locks so the world may have
|
||||
* change from underneath us.
|
||||
*/
|
||||
|
||||
BUG_ON(pppoe_pernet(dev_net(dev)) == NULL);
|
||||
write_lock_bh(&pn->hash_lock);
|
||||
po = pn->hash_table[i];
|
||||
}
|
||||
|
@ -388,11 +388,16 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
|
|||
struct pppox_sock *po = pppox_sk(sk);
|
||||
struct pppox_sock *relay_po;
|
||||
|
||||
/* Backlog receive. Semantics of backlog rcv preclude any code from
|
||||
* executing in lock_sock()/release_sock() bounds; meaning sk->sk_state
|
||||
* can't change.
|
||||
*/
|
||||
|
||||
if (sk->sk_state & PPPOX_BOUND) {
|
||||
ppp_input(&po->chan, skb);
|
||||
} else if (sk->sk_state & PPPOX_RELAY) {
|
||||
relay_po = get_item_by_addr(dev_net(po->pppoe_dev),
|
||||
&po->pppoe_relay);
|
||||
relay_po = get_item_by_addr(sock_net(sk),
|
||||
&po->pppoe_relay);
|
||||
if (relay_po == NULL)
|
||||
goto abort_kfree;
|
||||
|
||||
|
@ -447,6 +452,10 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||
goto drop;
|
||||
|
||||
pn = pppoe_pernet(dev_net(dev));
|
||||
|
||||
/* Note that get_item does a sock_hold(), so sk_pppox(po)
|
||||
* is known to be safe.
|
||||
*/
|
||||
po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
|
||||
if (!po)
|
||||
goto drop;
|
||||
|
@ -561,6 +570,7 @@ static int pppoe_release(struct socket *sock)
|
|||
struct sock *sk = sock->sk;
|
||||
struct pppox_sock *po;
|
||||
struct pppoe_net *pn;
|
||||
struct net *net = NULL;
|
||||
|
||||
if (!sk)
|
||||
return 0;
|
||||
|
@ -571,44 +581,28 @@ static int pppoe_release(struct socket *sock)
|
|||
return -EBADF;
|
||||
}
|
||||
|
||||
po = pppox_sk(sk);
|
||||
|
||||
if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
|
||||
dev_put(po->pppoe_dev);
|
||||
po->pppoe_dev = NULL;
|
||||
}
|
||||
|
||||
pppox_unbind_sock(sk);
|
||||
|
||||
/* Signal the death of the socket. */
|
||||
sk->sk_state = PPPOX_DEAD;
|
||||
|
||||
/*
|
||||
* pppoe_flush_dev could lead to a race with
|
||||
* this routine so we use flush_lock to eliminate
|
||||
* such a case (we only need per-net specific data)
|
||||
*/
|
||||
spin_lock(&flush_lock);
|
||||
po = pppox_sk(sk);
|
||||
if (!po->pppoe_dev) {
|
||||
spin_unlock(&flush_lock);
|
||||
goto out;
|
||||
}
|
||||
pn = pppoe_pernet(dev_net(po->pppoe_dev));
|
||||
spin_unlock(&flush_lock);
|
||||
net = sock_net(sk);
|
||||
pn = pppoe_pernet(net);
|
||||
|
||||
/*
|
||||
* protect "po" from concurrent updates
|
||||
* on pppoe_flush_dev
|
||||
*/
|
||||
write_lock_bh(&pn->hash_lock);
|
||||
delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
|
||||
po->pppoe_ifindex);
|
||||
|
||||
po = pppox_sk(sk);
|
||||
if (stage_session(po->pppoe_pa.sid))
|
||||
__delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
|
||||
po->pppoe_ifindex);
|
||||
|
||||
if (po->pppoe_dev) {
|
||||
dev_put(po->pppoe_dev);
|
||||
po->pppoe_dev = NULL;
|
||||
}
|
||||
|
||||
write_unlock_bh(&pn->hash_lock);
|
||||
|
||||
out:
|
||||
sock_orphan(sk);
|
||||
sock->sk = NULL;
|
||||
|
||||
|
@ -625,8 +619,9 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|||
struct sock *sk = sock->sk;
|
||||
struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr;
|
||||
struct pppox_sock *po = pppox_sk(sk);
|
||||
struct net_device *dev;
|
||||
struct net_device *dev = NULL;
|
||||
struct pppoe_net *pn;
|
||||
struct net *net = NULL;
|
||||
int error;
|
||||
|
||||
lock_sock(sk);
|
||||
|
@ -652,12 +647,14 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|||
/* Delete the old binding */
|
||||
if (stage_session(po->pppoe_pa.sid)) {
|
||||
pppox_unbind_sock(sk);
|
||||
pn = pppoe_pernet(sock_net(sk));
|
||||
delete_item(pn, po->pppoe_pa.sid,
|
||||
po->pppoe_pa.remote, po->pppoe_ifindex);
|
||||
if (po->pppoe_dev) {
|
||||
pn = pppoe_pernet(dev_net(po->pppoe_dev));
|
||||
delete_item(pn, po->pppoe_pa.sid,
|
||||
po->pppoe_pa.remote, po->pppoe_ifindex);
|
||||
dev_put(po->pppoe_dev);
|
||||
po->pppoe_dev = NULL;
|
||||
}
|
||||
|
||||
memset(sk_pppox(po) + 1, 0,
|
||||
sizeof(struct pppox_sock) - sizeof(struct sock));
|
||||
sk->sk_state = PPPOX_NONE;
|
||||
|
@ -666,16 +663,15 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|||
/* Re-bind in session stage only */
|
||||
if (stage_session(sp->sa_addr.pppoe.sid)) {
|
||||
error = -ENODEV;
|
||||
dev = dev_get_by_name(sock_net(sk), sp->sa_addr.pppoe.dev);
|
||||
net = sock_net(sk);
|
||||
dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev);
|
||||
if (!dev)
|
||||
goto end;
|
||||
goto err_put;
|
||||
|
||||
po->pppoe_dev = dev;
|
||||
po->pppoe_ifindex = dev->ifindex;
|
||||
pn = pppoe_pernet(dev_net(dev));
|
||||
write_lock_bh(&pn->hash_lock);
|
||||
pn = pppoe_pernet(net);
|
||||
if (!(dev->flags & IFF_UP)) {
|
||||
write_unlock_bh(&pn->hash_lock);
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
|
@ -683,6 +679,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|||
&sp->sa_addr.pppoe,
|
||||
sizeof(struct pppoe_addr));
|
||||
|
||||
write_lock_bh(&pn->hash_lock);
|
||||
error = __set_item(pn, po);
|
||||
write_unlock_bh(&pn->hash_lock);
|
||||
if (error < 0)
|
||||
|
@ -696,8 +693,11 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|||
po->chan.ops = &pppoe_chan_ops;
|
||||
|
||||
error = ppp_register_net_channel(dev_net(dev), &po->chan);
|
||||
if (error)
|
||||
if (error) {
|
||||
delete_item(pn, po->pppoe_pa.sid,
|
||||
po->pppoe_pa.remote, po->pppoe_ifindex);
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
sk->sk_state = PPPOX_CONNECTED;
|
||||
}
|
||||
|
@ -915,6 +915,14 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
|
|||
struct pppoe_hdr *ph;
|
||||
int data_len = skb->len;
|
||||
|
||||
/* The higher-level PPP code (ppp_unregister_channel()) ensures the PPP
|
||||
* xmit operations conclude prior to an unregistration call. Thus
|
||||
* sk->sk_state cannot change, so we don't need to do lock_sock().
|
||||
* But, we also can't do a lock_sock since that introduces a potential
|
||||
* deadlock as we'd reverse the lock ordering used when calling
|
||||
* ppp_unregister_channel().
|
||||
*/
|
||||
|
||||
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
|
||||
goto abort;
|
||||
|
||||
|
@ -944,7 +952,6 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
|
|||
po->pppoe_pa.remote, NULL, data_len);
|
||||
|
||||
dev_queue_xmit(skb);
|
||||
|
||||
return 1;
|
||||
|
||||
abort:
|
||||
|
|
|
@ -1029,7 +1029,10 @@ static void rtl8169_vlan_rx_register(struct net_device *dev,
|
|||
|
||||
spin_lock_irqsave(&tp->lock, flags);
|
||||
tp->vlgrp = grp;
|
||||
if (tp->vlgrp)
|
||||
/*
|
||||
* Do not disable RxVlan on 8110SCd.
|
||||
*/
|
||||
if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05))
|
||||
tp->cp_cmd |= RxVlan;
|
||||
else
|
||||
tp->cp_cmd &= ~RxVlan;
|
||||
|
@ -3197,6 +3200,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
}
|
||||
|
||||
rtl8169_init_phy(dev, tp);
|
||||
|
||||
/*
|
||||
* Pretend we are using VLANs; This bypasses a nasty bug where
|
||||
* Interrupts stop flowing on high load on 8110SCd controllers.
|
||||
*/
|
||||
if (tp->mac_version == RTL_GIGA_MAC_VER_05)
|
||||
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
|
||||
|
||||
device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
|
||||
|
||||
out:
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
#include <linux/cache.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#include "sh_eth.h"
|
||||
|
||||
/* There is CPU dependent code */
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
config STMMAC_ETH
|
||||
tristate "STMicroelectronics 10/100/1000 Ethernet driver"
|
||||
select MII
|
||||
select PHYLIB
|
||||
depends on NETDEVICES && CPU_SUBTYPE_ST40
|
||||
help
|
||||
This is the driver for the ST MAC 10/100/1000 on-chip Ethernet
|
||||
controllers. ST Ethernet IPs are built around a Synopsys IP Core.
|
||||
|
||||
if STMMAC_ETH
|
||||
|
||||
config STMMAC_DA
|
||||
bool "STMMAC DMA arbitration scheme"
|
||||
default n
|
||||
help
|
||||
Selecting this option, rx has priority over Tx (only for Giga
|
||||
Ethernet device).
|
||||
By default, the DMA arbitration scheme is based on Round-robin
|
||||
(rx:tx priority is 1:1).
|
||||
|
||||
config STMMAC_DUAL_MAC
|
||||
bool "STMMAC: dual mac support (EXPERIMENTAL)"
|
||||
default n
|
||||
depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER
|
||||
help
|
||||
Some ST SoCs (for example the stx7141 and stx7200c2) have two
|
||||
Ethernet Controllers. This option turns on the second Ethernet
|
||||
device on this kind of platforms.
|
||||
|
||||
config STMMAC_TIMER
|
||||
bool "STMMAC Timer optimisation"
|
||||
default n
|
||||
help
|
||||
Use an external timer for mitigating the number of network
|
||||
interrupts.
|
||||
|
||||
choice
|
||||
prompt "Select Timer device"
|
||||
depends on STMMAC_TIMER
|
||||
|
||||
config STMMAC_TMU_TIMER
|
||||
bool "TMU channel 2"
|
||||
depends on CPU_SH4
|
||||
help
|
||||
|
||||
config STMMAC_RTC_TIMER
|
||||
bool "Real time clock"
|
||||
depends on RTC_CLASS
|
||||
help
|
||||
|
||||
endchoice
|
||||
|
||||
endif
|
|
@ -0,0 +1,4 @@
|
|||
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
|
||||
stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
|
||||
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
|
||||
mac100.o gmac.o $(stmmac-y)
|
|
@ -0,0 +1,330 @@
|
|||
/*******************************************************************************
|
||||
STMMAC Common Header File
|
||||
|
||||
Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
version 2, as published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
The full GNU General Public License is included in this distribution in
|
||||
the file called "COPYING".
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#include "descs.h"
|
||||
#include <linux/io.h>
|
||||
|
||||
/* *********************************************
|
||||
DMA CRS Control and Status Register Mapping
|
||||
* *********************************************/
|
||||
#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
|
||||
#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
|
||||
#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
|
||||
#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
|
||||
#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
|
||||
#define DMA_STATUS 0x00001014 /* Status Register */
|
||||
#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
|
||||
#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
|
||||
#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
|
||||
#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
|
||||
#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
|
||||
|
||||
/* ********************************
|
||||
DMA Control register defines
|
||||
* ********************************/
|
||||
#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
|
||||
#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
|
||||
|
||||
/* **************************************
|
||||
DMA Interrupt Enable register defines
|
||||
* **************************************/
|
||||
/**** NORMAL INTERRUPT ****/
|
||||
#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
|
||||
#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
|
||||
#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
|
||||
#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
|
||||
#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
|
||||
|
||||
#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
|
||||
DMA_INTR_ENA_TIE)
|
||||
|
||||
/**** ABNORMAL INTERRUPT ****/
|
||||
#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
|
||||
#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
|
||||
#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
|
||||
#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
|
||||
#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
|
||||
#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
|
||||
#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
|
||||
#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
|
||||
#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
|
||||
#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
|
||||
|
||||
#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
|
||||
DMA_INTR_ENA_UNE)
|
||||
|
||||
/* DMA default interrupt mask */
|
||||
#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
|
||||
|
||||
/* ****************************
|
||||
* DMA Status register defines
|
||||
* ****************************/
|
||||
#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
|
||||
#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
|
||||
#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */
|
||||
#define DMA_STATUS_GMI 0x08000000
|
||||
#define DMA_STATUS_GLI 0x04000000
|
||||
#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
|
||||
#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
|
||||
#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
|
||||
#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
|
||||
#define DMA_STATUS_TS_SHIFT 20
|
||||
#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
|
||||
#define DMA_STATUS_RS_SHIFT 17
|
||||
#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
|
||||
#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
|
||||
#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
|
||||
#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
|
||||
#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
|
||||
#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
|
||||
#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
|
||||
#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
|
||||
#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
|
||||
#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
|
||||
#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
|
||||
#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
|
||||
#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
|
||||
#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
|
||||
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
|
||||
|
||||
/* Other defines */
|
||||
#define HASH_TABLE_SIZE 64
|
||||
#define PAUSE_TIME 0x200
|
||||
|
||||
/* Flow Control defines */
|
||||
#define FLOW_OFF 0
|
||||
#define FLOW_RX 1
|
||||
#define FLOW_TX 2
|
||||
#define FLOW_AUTO (FLOW_TX | FLOW_RX)
|
||||
|
||||
/* DMA STORE-AND-FORWARD Operation Mode */
|
||||
#define SF_DMA_MODE 1
|
||||
|
||||
#define HW_CSUM 1
|
||||
#define NO_HW_CSUM 0
|
||||
|
||||
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
|
||||
#define BUF_SIZE_16KiB 16384
|
||||
#define BUF_SIZE_8KiB 8192
|
||||
#define BUF_SIZE_4KiB 4096
|
||||
#define BUF_SIZE_2KiB 2048
|
||||
|
||||
/* Power Down and WOL */
|
||||
#define PMT_NOT_SUPPORTED 0
|
||||
#define PMT_SUPPORTED 1
|
||||
|
||||
/* Common MAC defines */
|
||||
#define MAC_CTRL_REG 0x00000000 /* MAC Control */
|
||||
#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
|
||||
#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
|
||||
|
||||
/* MAC Management Counters register */
|
||||
#define MMC_CONTROL 0x00000100 /* MMC Control */
|
||||
#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
|
||||
#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
|
||||
#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
|
||||
#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
|
||||
|
||||
#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
|
||||
#define MMC_CONTROL_MAX_FRM_SHIFT 3
|
||||
#define MMC_CONTROL_MAX_FRAME 0x7FF
|
||||
|
||||
struct stmmac_extra_stats {
|
||||
/* Transmit errors */
|
||||
unsigned long tx_underflow ____cacheline_aligned;
|
||||
unsigned long tx_carrier;
|
||||
unsigned long tx_losscarrier;
|
||||
unsigned long tx_heartbeat;
|
||||
unsigned long tx_deferred;
|
||||
unsigned long tx_vlan;
|
||||
unsigned long tx_jabber;
|
||||
unsigned long tx_frame_flushed;
|
||||
unsigned long tx_payload_error;
|
||||
unsigned long tx_ip_header_error;
|
||||
/* Receive errors */
|
||||
unsigned long rx_desc;
|
||||
unsigned long rx_partial;
|
||||
unsigned long rx_runt;
|
||||
unsigned long rx_toolong;
|
||||
unsigned long rx_collision;
|
||||
unsigned long rx_crc;
|
||||
unsigned long rx_lenght;
|
||||
unsigned long rx_mii;
|
||||
unsigned long rx_multicast;
|
||||
unsigned long rx_gmac_overflow;
|
||||
unsigned long rx_watchdog;
|
||||
unsigned long da_rx_filter_fail;
|
||||
unsigned long sa_rx_filter_fail;
|
||||
unsigned long rx_missed_cntr;
|
||||
unsigned long rx_overflow_cntr;
|
||||
unsigned long rx_vlan;
|
||||
/* Tx/Rx IRQ errors */
|
||||
unsigned long tx_undeflow_irq;
|
||||
unsigned long tx_process_stopped_irq;
|
||||
unsigned long tx_jabber_irq;
|
||||
unsigned long rx_overflow_irq;
|
||||
unsigned long rx_buf_unav_irq;
|
||||
unsigned long rx_process_stopped_irq;
|
||||
unsigned long rx_watchdog_irq;
|
||||
unsigned long tx_early_irq;
|
||||
unsigned long fatal_bus_error_irq;
|
||||
/* Extra info */
|
||||
unsigned long threshold;
|
||||
unsigned long tx_pkt_n;
|
||||
unsigned long rx_pkt_n;
|
||||
unsigned long poll_n;
|
||||
unsigned long sched_timer_n;
|
||||
unsigned long normal_irq_n;
|
||||
};
|
||||
|
||||
/* GMAC core can compute the checksums in HW. */
|
||||
enum rx_frame_status {
|
||||
good_frame = 0,
|
||||
discard_frame = 1,
|
||||
csum_none = 2,
|
||||
};
|
||||
|
||||
static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
|
||||
unsigned int high, unsigned int low)
|
||||
{
|
||||
unsigned long data;
|
||||
|
||||
data = (addr[5] << 8) | addr[4];
|
||||
writel(data, ioaddr + high);
|
||||
data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
|
||||
writel(data, ioaddr + low);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void stmmac_get_mac_addr(unsigned long ioaddr,
|
||||
unsigned char *addr, unsigned int high,
|
||||
unsigned int low)
|
||||
{
|
||||
unsigned int hi_addr, lo_addr;
|
||||
|
||||
/* Read the MAC address from the hardware */
|
||||
hi_addr = readl(ioaddr + high);
|
||||
lo_addr = readl(ioaddr + low);
|
||||
|
||||
/* Extract the MAC address from the high and low words */
|
||||
addr[0] = lo_addr & 0xff;
|
||||
addr[1] = (lo_addr >> 8) & 0xff;
|
||||
addr[2] = (lo_addr >> 16) & 0xff;
|
||||
addr[3] = (lo_addr >> 24) & 0xff;
|
||||
addr[4] = hi_addr & 0xff;
|
||||
addr[5] = (hi_addr >> 8) & 0xff;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
struct stmmac_ops {
|
||||
/* MAC core initialization */
|
||||
void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
|
||||
/* DMA core initialization */
|
||||
int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
|
||||
/* Dump MAC registers */
|
||||
void (*dump_mac_regs) (unsigned long ioaddr);
|
||||
/* Dump DMA registers */
|
||||
void (*dump_dma_regs) (unsigned long ioaddr);
|
||||
/* Set tx/rx threshold in the csr6 register
|
||||
* An invalid value enables the store-and-forward mode */
|
||||
void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
|
||||
/* To track extra statistic (if supported) */
|
||||
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
|
||||
unsigned long ioaddr);
|
||||
/* RX descriptor ring initialization */
|
||||
void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
|
||||
int disable_rx_ic);
|
||||
/* TX descriptor ring initialization */
|
||||
void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
|
||||
|
||||
/* Invoked by the xmit function to prepare the tx descriptor */
|
||||
void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
|
||||
int csum_flag);
|
||||
/* Set/get the owner of the descriptor */
|
||||
void (*set_tx_owner) (struct dma_desc *p);
|
||||
int (*get_tx_owner) (struct dma_desc *p);
|
||||
/* Invoked by the xmit function to close the tx descriptor */
|
||||
void (*close_tx_desc) (struct dma_desc *p);
|
||||
/* Clean the tx descriptor as soon as the tx irq is received */
|
||||
void (*release_tx_desc) (struct dma_desc *p);
|
||||
/* Clear interrupt on tx frame completion. When this bit is
|
||||
* set an interrupt happens as soon as the frame is transmitted */
|
||||
void (*clear_tx_ic) (struct dma_desc *p);
|
||||
/* Last tx segment reports the transmit status */
|
||||
int (*get_tx_ls) (struct dma_desc *p);
|
||||
/* Return the transmit status looking at the TDES1 */
|
||||
int (*tx_status) (void *data, struct stmmac_extra_stats *x,
|
||||
struct dma_desc *p, unsigned long ioaddr);
|
||||
/* Get the buffer size from the descriptor */
|
||||
int (*get_tx_len) (struct dma_desc *p);
|
||||
/* Handle extra events on specific interrupts hw dependent */
|
||||
void (*host_irq_status) (unsigned long ioaddr);
|
||||
int (*get_rx_owner) (struct dma_desc *p);
|
||||
void (*set_rx_owner) (struct dma_desc *p);
|
||||
/* Get the receive frame size */
|
||||
int (*get_rx_frame_len) (struct dma_desc *p);
|
||||
/* Return the reception status looking at the RDES1 */
|
||||
int (*rx_status) (void *data, struct stmmac_extra_stats *x,
|
||||
struct dma_desc *p);
|
||||
/* Multicast filter setting */
|
||||
void (*set_filter) (struct net_device *dev);
|
||||
/* Flow control setting */
|
||||
void (*flow_ctrl) (unsigned long ioaddr, unsigned int duplex,
|
||||
unsigned int fc, unsigned int pause_time);
|
||||
/* Set power management mode (e.g. magic frame) */
|
||||
void (*pmt) (unsigned long ioaddr, unsigned long mode);
|
||||
/* Set/Get Unicast MAC addresses */
|
||||
void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
|
||||
unsigned int reg_n);
|
||||
void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
|
||||
unsigned int reg_n);
|
||||
};
|
||||
|
||||
struct mac_link {
|
||||
int port;
|
||||
int duplex;
|
||||
int speed;
|
||||
};
|
||||
|
||||
struct mii_regs {
|
||||
unsigned int addr; /* MII Address */
|
||||
unsigned int data; /* MII Data */
|
||||
};
|
||||
|
||||
struct hw_cap {
|
||||
unsigned int version; /* Core Version register (GMAC) */
|
||||
unsigned int pmt; /* Power-Down mode (GMAC) */
|
||||
struct mac_link link;
|
||||
struct mii_regs mii;
|
||||
};
|
||||
|
||||
struct mac_device_info {
|
||||
struct hw_cap hw;
|
||||
struct stmmac_ops *ops;
|
||||
};
|
||||
|
||||
struct mac_device_info *gmac_setup(unsigned long addr);
|
||||
struct mac_device_info *mac100_setup(unsigned long addr);
|
|
@ -0,0 +1,163 @@
|
|||
/*******************************************************************************
|
||||
Header File to describe the DMA descriptors
|
||||
Use enhanced descriptors in case of GMAC Cores.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
version 2, as published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
The full GNU General Public License is included in this distribution in
|
||||
the file called "COPYING".
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
struct dma_desc {
|
||||
/* Receive descriptor */
|
||||
union {
|
||||
struct {
|
||||
/* RDES0 */
|
||||
u32 reserved1:1;
|
||||
u32 crc_error:1;
|
||||
u32 dribbling:1;
|
||||
u32 mii_error:1;
|
||||
u32 receive_watchdog:1;
|
||||
u32 frame_type:1;
|
||||
u32 collision:1;
|
||||
u32 frame_too_long:1;
|
||||
u32 last_descriptor:1;
|
||||
u32 first_descriptor:1;
|
||||
u32 multicast_frame:1;
|
||||
u32 run_frame:1;
|
||||
u32 length_error:1;
|
||||
u32 partial_frame_error:1;
|
||||
u32 descriptor_error:1;
|
||||
u32 error_summary:1;
|
||||
u32 frame_length:14;
|
||||
u32 filtering_fail:1;
|
||||
u32 own:1;
|
||||
/* RDES1 */
|
||||
u32 buffer1_size:11;
|
||||
u32 buffer2_size:11;
|
||||
u32 reserved2:2;
|
||||
u32 second_address_chained:1;
|
||||
u32 end_ring:1;
|
||||
u32 reserved3:5;
|
||||
u32 disable_ic:1;
|
||||
} rx;
|
||||
struct {
|
||||
/* RDES0 */
|
||||
u32 payload_csum_error:1;
|
||||
u32 crc_error:1;
|
||||
u32 dribbling:1;
|
||||
u32 error_gmii:1;
|
||||
u32 receive_watchdog:1;
|
||||
u32 frame_type:1;
|
||||
u32 late_collision:1;
|
||||
u32 ipc_csum_error:1;
|
||||
u32 last_descriptor:1;
|
||||
u32 first_descriptor:1;
|
||||
u32 vlan_tag:1;
|
||||
u32 overflow_error:1;
|
||||
u32 length_error:1;
|
||||
u32 sa_filter_fail:1;
|
||||
u32 descriptor_error:1;
|
||||
u32 error_summary:1;
|
||||
u32 frame_length:14;
|
||||
u32 da_filter_fail:1;
|
||||
u32 own:1;
|
||||
/* RDES1 */
|
||||
u32 buffer1_size:13;
|
||||
u32 reserved1:1;
|
||||
u32 second_address_chained:1;
|
||||
u32 end_ring:1;
|
||||
u32 buffer2_size:13;
|
||||
u32 reserved2:2;
|
||||
u32 disable_ic:1;
|
||||
} erx; /* -- enhanced -- */
|
||||
|
||||
/* Transmit descriptor */
|
||||
struct {
|
||||
/* TDES0 */
|
||||
u32 deferred:1;
|
||||
u32 underflow_error:1;
|
||||
u32 excessive_deferral:1;
|
||||
u32 collision_count:4;
|
||||
u32 heartbeat_fail:1;
|
||||
u32 excessive_collisions:1;
|
||||
u32 late_collision:1;
|
||||
u32 no_carrier:1;
|
||||
u32 loss_carrier:1;
|
||||
u32 reserved1:3;
|
||||
u32 error_summary:1;
|
||||
u32 reserved2:15;
|
||||
u32 own:1;
|
||||
/* TDES1 */
|
||||
u32 buffer1_size:11;
|
||||
u32 buffer2_size:11;
|
||||
u32 reserved3:1;
|
||||
u32 disable_padding:1;
|
||||
u32 second_address_chained:1;
|
||||
u32 end_ring:1;
|
||||
u32 crc_disable:1;
|
||||
u32 reserved4:2;
|
||||
u32 first_segment:1;
|
||||
u32 last_segment:1;
|
||||
u32 interrupt:1;
|
||||
} tx;
|
||||
struct {
|
||||
/* TDES0 */
|
||||
u32 deferred:1;
|
||||
u32 underflow_error:1;
|
||||
u32 excessive_deferral:1;
|
||||
u32 collision_count:4;
|
||||
u32 vlan_frame:1;
|
||||
u32 excessive_collisions:1;
|
||||
u32 late_collision:1;
|
||||
u32 no_carrier:1;
|
||||
u32 loss_carrier:1;
|
||||
u32 payload_error:1;
|
||||
u32 frame_flushed:1;
|
||||
u32 jabber_timeout:1;
|
||||
u32 error_summary:1;
|
||||
u32 ip_header_error:1;
|
||||
u32 time_stamp_status:1;
|
||||
u32 reserved1:2;
|
||||
u32 second_address_chained:1;
|
||||
u32 end_ring:1;
|
||||
u32 checksum_insertion:2;
|
||||
u32 reserved2:1;
|
||||
u32 time_stamp_enable:1;
|
||||
u32 disable_padding:1;
|
||||
u32 crc_disable:1;
|
||||
u32 first_segment:1;
|
||||
u32 last_segment:1;
|
||||
u32 interrupt:1;
|
||||
u32 own:1;
|
||||
/* TDES1 */
|
||||
u32 buffer1_size:13;
|
||||
u32 reserved3:3;
|
||||
u32 buffer2_size:13;
|
||||
u32 reserved4:3;
|
||||
} etx; /* -- enhanced -- */
|
||||
} des01;
|
||||
unsigned int des2;
|
||||
unsigned int des3;
|
||||
};
|
||||
|
||||
/* Transmit checksum insertion control */
|
||||
enum tdes_csum_insertion {
|
||||
cic_disabled = 0, /* Checksum Insertion Control */
|
||||
cic_only_ip = 1, /* Only IP header */
|
||||
cic_no_pseudoheader = 2, /* IP header but pseudoheader
|
||||
* is not calculated */
|
||||
cic_full = 3, /* IP header and pseudoheader */
|
||||
};
|
|
@ -0,0 +1,693 @@
|
|||
/*******************************************************************************
|
||||
This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
|
||||
DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
|
||||
developing this code.
|
||||
|
||||
Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
version 2, as published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
The full GNU General Public License is included in this distribution in
|
||||
the file called "COPYING".
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/mii.h>
|
||||
#include <linux/phy.h>
|
||||
|
||||
#include "stmmac.h"
|
||||
#include "gmac.h"
|
||||
|
||||
#undef GMAC_DEBUG
|
||||
/*#define GMAC_DEBUG*/
|
||||
#undef FRAME_FILTER_DEBUG
|
||||
/*#define FRAME_FILTER_DEBUG*/
|
||||
#ifdef GMAC_DEBUG
|
||||
#define DBG(fmt, args...) printk(fmt, ## args)
|
||||
#else
|
||||
#define DBG(fmt, args...) do { } while (0)
|
||||
#endif
|
||||
|
||||
static void gmac_dump_regs(unsigned long ioaddr)
|
||||
{
|
||||
int i;
|
||||
pr_info("\t----------------------------------------------\n"
|
||||
"\t GMAC registers (base addr = 0x%8x)\n"
|
||||
"\t----------------------------------------------\n",
|
||||
(unsigned int)ioaddr);
|
||||
|
||||
for (i = 0; i < 55; i++) {
|
||||
int offset = i * 4;
|
||||
pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
|
||||
offset, readl(ioaddr + offset));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
|
||||
{
|
||||
u32 value = readl(ioaddr + DMA_BUS_MODE);
|
||||
/* DMA SW reset */
|
||||
value |= DMA_BUS_MODE_SFT_RESET;
|
||||
writel(value, ioaddr + DMA_BUS_MODE);
|
||||
do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
|
||||
|
||||
value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
|
||||
((pbl << DMA_BUS_MODE_PBL_SHIFT) |
|
||||
(pbl << DMA_BUS_MODE_RPBL_SHIFT));
|
||||
|
||||
#ifdef CONFIG_STMMAC_DA
|
||||
value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
|
||||
#endif
|
||||
writel(value, ioaddr + DMA_BUS_MODE);
|
||||
|
||||
/* Mask interrupts by writing to CSR7 */
|
||||
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
|
||||
|
||||
/* The base address of the RX/TX descriptor lists must be written into
|
||||
* DMA CSR3 and CSR4, respectively. */
|
||||
writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
|
||||
writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Transmit FIFO flush operation */
|
||||
static void gmac_flush_tx_fifo(unsigned long ioaddr)
|
||||
{
|
||||
u32 csr6 = readl(ioaddr + DMA_CONTROL);
|
||||
writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
|
||||
|
||||
do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
|
||||
}
|
||||
|
||||
static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
|
||||
int rxmode)
|
||||
{
|
||||
u32 csr6 = readl(ioaddr + DMA_CONTROL);
|
||||
|
||||
if (txmode == SF_DMA_MODE) {
|
||||
DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
|
||||
/* Transmit COE type 2 cannot be done in cut-through mode. */
|
||||
csr6 |= DMA_CONTROL_TSF;
|
||||
/* Operating on second frame increase the performance
|
||||
* especially when transmit store-and-forward is used.*/
|
||||
csr6 |= DMA_CONTROL_OSF;
|
||||
} else {
|
||||
DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
|
||||
" (threshold = %d)\n", txmode);
|
||||
csr6 &= ~DMA_CONTROL_TSF;
|
||||
csr6 &= DMA_CONTROL_TC_TX_MASK;
|
||||
/* Set the transmit threashold */
|
||||
if (txmode <= 32)
|
||||
csr6 |= DMA_CONTROL_TTC_32;
|
||||
else if (txmode <= 64)
|
||||
csr6 |= DMA_CONTROL_TTC_64;
|
||||
else if (txmode <= 128)
|
||||
csr6 |= DMA_CONTROL_TTC_128;
|
||||
else if (txmode <= 192)
|
||||
csr6 |= DMA_CONTROL_TTC_192;
|
||||
else
|
||||
csr6 |= DMA_CONTROL_TTC_256;
|
||||
}
|
||||
|
||||
if (rxmode == SF_DMA_MODE) {
|
||||
DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
|
||||
csr6 |= DMA_CONTROL_RSF;
|
||||
} else {
|
||||
DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
|
||||
" (threshold = %d)\n", rxmode);
|
||||
csr6 &= ~DMA_CONTROL_RSF;
|
||||
csr6 &= DMA_CONTROL_TC_RX_MASK;
|
||||
if (rxmode <= 32)
|
||||
csr6 |= DMA_CONTROL_RTC_32;
|
||||
else if (rxmode <= 64)
|
||||
csr6 |= DMA_CONTROL_RTC_64;
|
||||
else if (rxmode <= 96)
|
||||
csr6 |= DMA_CONTROL_RTC_96;
|
||||
else
|
||||
csr6 |= DMA_CONTROL_RTC_128;
|
||||
}
|
||||
|
||||
writel(csr6, ioaddr + DMA_CONTROL);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Not yet implemented --- no RMON module */
|
||||
static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
|
||||
unsigned long ioaddr)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static void gmac_dump_dma_regs(unsigned long ioaddr)
|
||||
{
|
||||
int i;
|
||||
pr_info(" DMA registers\n");
|
||||
for (i = 0; i < 22; i++) {
|
||||
if ((i < 9) || (i > 17)) {
|
||||
int offset = i * 4;
|
||||
pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
|
||||
(DMA_BUS_MODE + offset),
|
||||
readl(ioaddr + DMA_BUS_MODE + offset));
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
|
||||
struct dma_desc *p, unsigned long ioaddr)
|
||||
{
|
||||
int ret = 0;
|
||||
struct net_device_stats *stats = (struct net_device_stats *)data;
|
||||
|
||||
if (unlikely(p->des01.etx.error_summary)) {
|
||||
DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
|
||||
if (unlikely(p->des01.etx.jabber_timeout)) {
|
||||
DBG(KERN_ERR "\tjabber_timeout error\n");
|
||||
x->tx_jabber++;
|
||||
}
|
||||
|
||||
if (unlikely(p->des01.etx.frame_flushed)) {
|
||||
DBG(KERN_ERR "\tframe_flushed error\n");
|
||||
x->tx_frame_flushed++;
|
||||
gmac_flush_tx_fifo(ioaddr);
|
||||
}
|
||||
|
||||
if (unlikely(p->des01.etx.loss_carrier)) {
|
||||
DBG(KERN_ERR "\tloss_carrier error\n");
|
||||
x->tx_losscarrier++;
|
||||
stats->tx_carrier_errors++;
|
||||
}
|
||||
if (unlikely(p->des01.etx.no_carrier)) {
|
||||
DBG(KERN_ERR "\tno_carrier error\n");
|
||||
x->tx_carrier++;
|
||||
stats->tx_carrier_errors++;
|
||||
}
|
||||
if (unlikely(p->des01.etx.late_collision)) {
|
||||
DBG(KERN_ERR "\tlate_collision error\n");
|
||||
stats->collisions += p->des01.etx.collision_count;
|
||||
}
|
||||
if (unlikely(p->des01.etx.excessive_collisions)) {
|
||||
DBG(KERN_ERR "\texcessive_collisions\n");
|
||||
stats->collisions += p->des01.etx.collision_count;
|
||||
}
|
||||
if (unlikely(p->des01.etx.excessive_deferral)) {
|
||||
DBG(KERN_INFO "\texcessive tx_deferral\n");
|
||||
x->tx_deferred++;
|
||||
}
|
||||
|
||||
if (unlikely(p->des01.etx.underflow_error)) {
|
||||
DBG(KERN_ERR "\tunderflow error\n");
|
||||
gmac_flush_tx_fifo(ioaddr);
|
||||
x->tx_underflow++;
|
||||
}
|
||||
|
||||
if (unlikely(p->des01.etx.ip_header_error)) {
|
||||
DBG(KERN_ERR "\tTX IP header csum error\n");
|
||||
x->tx_ip_header_error++;
|
||||
}
|
||||
|
||||
if (unlikely(p->des01.etx.payload_error)) {
|
||||
DBG(KERN_ERR "\tAddr/Payload csum error\n");
|
||||
x->tx_payload_error++;
|
||||
gmac_flush_tx_fifo(ioaddr);
|
||||
}
|
||||
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
if (unlikely(p->des01.etx.deferred)) {
|
||||
DBG(KERN_INFO "GMAC TX status: tx deferred\n");
|
||||
x->tx_deferred++;
|
||||
}
|
||||
#ifdef STMMAC_VLAN_TAG_USED
|
||||
if (p->des01.etx.vlan_frame) {
|
||||
DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
|
||||
x->tx_vlan++;
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gmac_get_tx_len(struct dma_desc *p)
|
||||
{
|
||||
return p->des01.etx.buffer1_size;
|
||||
}
|
||||
|
||||
static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
|
||||
{
|
||||
int ret = good_frame;
|
||||
u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
|
||||
|
||||
/* bits 5 7 0 | Frame status
|
||||
* ----------------------------------------------------------
|
||||
* 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
|
||||
* 1 0 0 | IPv4/6 No CSUM errorS.
|
||||
* 1 0 1 | IPv4/6 CSUM PAYLOAD error
|
||||
* 1 1 0 | IPv4/6 CSUM IP HR error
|
||||
* 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
|
||||
* 0 0 1 | IPv4/6 unsupported IP PAYLOAD
|
||||
* 0 1 1 | COE bypassed.. no IPv4/6 frame
|
||||
* 0 1 0 | Reserved.
|
||||
*/
|
||||
if (status == 0x0) {
|
||||
DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
|
||||
ret = good_frame;
|
||||
} else if (status == 0x4) {
|
||||
DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
|
||||
ret = good_frame;
|
||||
} else if (status == 0x5) {
|
||||
DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
|
||||
ret = csum_none;
|
||||
} else if (status == 0x6) {
|
||||
DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
|
||||
ret = csum_none;
|
||||
} else if (status == 0x7) {
|
||||
DBG(KERN_ERR
|
||||
"RX Des0 status: IPv4/6 Header and Payload Error.\n");
|
||||
ret = csum_none;
|
||||
} else if (status == 0x1) {
|
||||
DBG(KERN_ERR
|
||||
"RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
|
||||
ret = discard_frame;
|
||||
} else if (status == 0x3) {
|
||||
DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
|
||||
ret = discard_frame;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
|
||||
struct dma_desc *p)
|
||||
{
|
||||
int ret = good_frame;
|
||||
struct net_device_stats *stats = (struct net_device_stats *)data;
|
||||
|
||||
if (unlikely(p->des01.erx.error_summary)) {
|
||||
DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
|
||||
if (unlikely(p->des01.erx.descriptor_error)) {
|
||||
DBG(KERN_ERR "\tdescriptor error\n");
|
||||
x->rx_desc++;
|
||||
stats->rx_length_errors++;
|
||||
}
|
||||
if (unlikely(p->des01.erx.overflow_error)) {
|
||||
DBG(KERN_ERR "\toverflow error\n");
|
||||
x->rx_gmac_overflow++;
|
||||
}
|
||||
|
||||
if (unlikely(p->des01.erx.ipc_csum_error))
|
||||
DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
|
||||
|
||||
if (unlikely(p->des01.erx.late_collision)) {
|
||||
DBG(KERN_ERR "\tlate_collision error\n");
|
||||
stats->collisions++;
|
||||
stats->collisions++;
|
||||
}
|
||||
if (unlikely(p->des01.erx.receive_watchdog)) {
|
||||
DBG(KERN_ERR "\treceive_watchdog error\n");
|
||||
x->rx_watchdog++;
|
||||
}
|
||||
if (unlikely(p->des01.erx.error_gmii)) {
|
||||
DBG(KERN_ERR "\tReceive Error\n");
|
||||
x->rx_mii++;
|
||||
}
|
||||
if (unlikely(p->des01.erx.crc_error)) {
|
||||
DBG(KERN_ERR "\tCRC error\n");
|
||||
x->rx_crc++;
|
||||
stats->rx_crc_errors++;
|
||||
}
|
||||
ret = discard_frame;
|
||||
}
|
||||
|
||||
/* After a payload csum error, the ES bit is set.
|
||||
* It doesn't match with the information reported into the databook.
|
||||
* At any rate, we need to understand if the CSUM hw computation is ok
|
||||
* and report this info to the upper layers. */
|
||||
ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
|
||||
p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
|
||||
|
||||
if (unlikely(p->des01.erx.dribbling)) {
|
||||
DBG(KERN_ERR "GMAC RX: dribbling error\n");
|
||||
ret = discard_frame;
|
||||
}
|
||||
if (unlikely(p->des01.erx.sa_filter_fail)) {
|
||||
DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
|
||||
x->sa_rx_filter_fail++;
|
||||
ret = discard_frame;
|
||||
}
|
||||
if (unlikely(p->des01.erx.da_filter_fail)) {
|
||||
DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
|
||||
x->da_rx_filter_fail++;
|
||||
ret = discard_frame;
|
||||
}
|
||||
if (unlikely(p->des01.erx.length_error)) {
|
||||
DBG(KERN_ERR "GMAC RX: length_error error\n");
|
||||
x->rx_lenght++;
|
||||
ret = discard_frame;
|
||||
}
|
||||
#ifdef STMMAC_VLAN_TAG_USED
|
||||
if (p->des01.erx.vlan_tag) {
|
||||
DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
|
||||
x->rx_vlan++;
|
||||
}
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void gmac_irq_status(unsigned long ioaddr)
|
||||
{
|
||||
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
|
||||
|
||||
/* Not used events (e.g. MMC interrupts) are not handled. */
|
||||
if ((intr_status & mmc_tx_irq))
|
||||
DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
|
||||
readl(ioaddr + GMAC_MMC_TX_INTR));
|
||||
if (unlikely(intr_status & mmc_rx_irq))
|
||||
DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
|
||||
readl(ioaddr + GMAC_MMC_RX_INTR));
|
||||
if (unlikely(intr_status & mmc_rx_csum_offload_irq))
|
||||
DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
|
||||
readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
|
||||
if (unlikely(intr_status & pmt_irq)) {
|
||||
DBG(KERN_DEBUG "GMAC: received Magic frame\n");
|
||||
/* clear the PMT bits 5 and 6 by reading the PMT
|
||||
* status register. */
|
||||
readl(ioaddr + GMAC_PMT);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void gmac_core_init(unsigned long ioaddr)
|
||||
{
|
||||
u32 value = readl(ioaddr + GMAC_CONTROL);
|
||||
value |= GMAC_CORE_INIT;
|
||||
writel(value, ioaddr + GMAC_CONTROL);
|
||||
|
||||
/* STBus Bridge Configuration */
|
||||
/*writel(0xc5608, ioaddr + 0x00007000);*/
|
||||
|
||||
/* Freeze MMC counters */
|
||||
writel(0x8, ioaddr + GMAC_MMC_CTRL);
|
||||
/* Mask GMAC interrupts */
|
||||
writel(0x207, ioaddr + GMAC_INT_MASK);
|
||||
|
||||
#ifdef STMMAC_VLAN_TAG_USED
|
||||
/* Tag detection without filtering */
|
||||
writel(0x0, ioaddr + GMAC_VLAN_TAG);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
|
||||
unsigned int reg_n)
|
||||
{
|
||||
stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
|
||||
GMAC_ADDR_LOW(reg_n));
|
||||
}
|
||||
|
||||
static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
|
||||
unsigned int reg_n)
|
||||
{
|
||||
stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
|
||||
GMAC_ADDR_LOW(reg_n));
|
||||
}
|
||||
|
||||
static void gmac_set_filter(struct net_device *dev)
|
||||
{
|
||||
unsigned long ioaddr = dev->base_addr;
|
||||
unsigned int value = 0;
|
||||
|
||||
DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
|
||||
__func__, dev->mc_count, dev->uc_count);
|
||||
|
||||
if (dev->flags & IFF_PROMISC)
|
||||
value = GMAC_FRAME_FILTER_PR;
|
||||
else if ((dev->mc_count > HASH_TABLE_SIZE)
|
||||
|| (dev->flags & IFF_ALLMULTI)) {
|
||||
value = GMAC_FRAME_FILTER_PM; /* pass all multi */
|
||||
writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
|
||||
writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
|
||||
} else if (dev->mc_count > 0) {
|
||||
int i;
|
||||
u32 mc_filter[2];
|
||||
struct dev_mc_list *mclist;
|
||||
|
||||
/* Hash filter for multicast */
|
||||
value = GMAC_FRAME_FILTER_HMC;
|
||||
|
||||
memset(mc_filter, 0, sizeof(mc_filter));
|
||||
for (i = 0, mclist = dev->mc_list;
|
||||
mclist && i < dev->mc_count; i++, mclist = mclist->next) {
|
||||
/* The upper 6 bits of the calculated CRC are used to
|
||||
index the contens of the hash table */
|
||||
int bit_nr =
|
||||
bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
|
||||
/* The most significant bit determines the register to
|
||||
* use (H/L) while the other 5 bits determine the bit
|
||||
* within the register. */
|
||||
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
|
||||
}
|
||||
writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
|
||||
writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
|
||||
}
|
||||
|
||||
/* Handle multiple unicast addresses (perfect filtering)*/
|
||||
if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
|
||||
/* Switch to promiscuous mode is more than 16 addrs
|
||||
are required */
|
||||
value |= GMAC_FRAME_FILTER_PR;
|
||||
else {
|
||||
int i;
|
||||
struct dev_addr_list *uc_ptr = dev->uc_list;
|
||||
|
||||
for (i = 0; i < dev->uc_count; i++) {
|
||||
gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
|
||||
i + 1);
|
||||
|
||||
DBG(KERN_INFO "\t%d "
|
||||
"- Unicast addr %02x:%02x:%02x:%02x:%02x:"
|
||||
"%02x\n", i + 1,
|
||||
uc_ptr->da_addr[0], uc_ptr->da_addr[1],
|
||||
uc_ptr->da_addr[2], uc_ptr->da_addr[3],
|
||||
uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
|
||||
uc_ptr = uc_ptr->next;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef FRAME_FILTER_DEBUG
|
||||
/* Enable Receive all mode (to debug filtering_fail errors) */
|
||||
value |= GMAC_FRAME_FILTER_RA;
|
||||
#endif
|
||||
writel(value, ioaddr + GMAC_FRAME_FILTER);
|
||||
|
||||
DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
|
||||
"HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
|
||||
readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
|
||||
unsigned int fc, unsigned int pause_time)
|
||||
{
|
||||
unsigned int flow = 0;
|
||||
|
||||
DBG(KERN_DEBUG "GMAC Flow-Control:\n");
|
||||
if (fc & FLOW_RX) {
|
||||
DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
|
||||
flow |= GMAC_FLOW_CTRL_RFE;
|
||||
}
|
||||
if (fc & FLOW_TX) {
|
||||
DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
|
||||
flow |= GMAC_FLOW_CTRL_TFE;
|
||||
}
|
||||
|
||||
if (duplex) {
|
||||
DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
|
||||
flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
|
||||
}
|
||||
|
||||
writel(flow, ioaddr + GMAC_FLOW_CTRL);
|
||||
return;
|
||||
}
|
||||
|
||||
static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
|
||||
{
|
||||
unsigned int pmt = 0;
|
||||
|
||||
if (mode == WAKE_MAGIC) {
|
||||
DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
|
||||
pmt |= power_down | magic_pkt_en;
|
||||
} else if (mode == WAKE_UCAST) {
|
||||
DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
|
||||
pmt |= global_unicast;
|
||||
}
|
||||
|
||||
writel(pmt, ioaddr + GMAC_PMT);
|
||||
return;
|
||||
}
|
||||
|
||||
static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
|
||||
int disable_rx_ic)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < ring_size; i++) {
|
||||
p->des01.erx.own = 1;
|
||||
p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
|
||||
/* To support jumbo frames */
|
||||
p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
|
||||
if (i == ring_size - 1)
|
||||
p->des01.erx.end_ring = 1;
|
||||
if (disable_rx_ic)
|
||||
p->des01.erx.disable_ic = 1;
|
||||
p++;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ring_size; i++) {
|
||||
p->des01.etx.own = 0;
|
||||
if (i == ring_size - 1)
|
||||
p->des01.etx.end_ring = 1;
|
||||
p++;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int gmac_get_tx_owner(struct dma_desc *p)
|
||||
{
|
||||
return p->des01.etx.own;
|
||||
}
|
||||
|
||||
static int gmac_get_rx_owner(struct dma_desc *p)
|
||||
{
|
||||
return p->des01.erx.own;
|
||||
}
|
||||
|
||||
static void gmac_set_tx_owner(struct dma_desc *p)
|
||||
{
|
||||
p->des01.etx.own = 1;
|
||||
}
|
||||
|
||||
static void gmac_set_rx_owner(struct dma_desc *p)
|
||||
{
|
||||
p->des01.erx.own = 1;
|
||||
}
|
||||
|
||||
static int gmac_get_tx_ls(struct dma_desc *p)
|
||||
{
|
||||
return p->des01.etx.last_segment;
|
||||
}
|
||||
|
||||
static void gmac_release_tx_desc(struct dma_desc *p)
|
||||
{
|
||||
int ter = p->des01.etx.end_ring;
|
||||
|
||||
memset(p, 0, sizeof(struct dma_desc));
|
||||
p->des01.etx.end_ring = ter;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
|
||||
int csum_flag)
|
||||
{
|
||||
p->des01.etx.first_segment = is_fs;
|
||||
if (unlikely(len > BUF_SIZE_4KiB)) {
|
||||
p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
|
||||
p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
|
||||
} else {
|
||||
p->des01.etx.buffer1_size = len;
|
||||
}
|
||||
if (likely(csum_flag))
|
||||
p->des01.etx.checksum_insertion = cic_full;
|
||||
}
|
||||
|
||||
static void gmac_clear_tx_ic(struct dma_desc *p)
|
||||
{
|
||||
p->des01.etx.interrupt = 0;
|
||||
}
|
||||
|
||||
static void gmac_close_tx_desc(struct dma_desc *p)
|
||||
{
|
||||
p->des01.etx.last_segment = 1;
|
||||
p->des01.etx.interrupt = 1;
|
||||
}
|
||||
|
||||
static int gmac_get_rx_frame_len(struct dma_desc *p)
|
||||
{
|
||||
return p->des01.erx.frame_length;
|
||||
}
|
||||
|
||||
struct stmmac_ops gmac_driver = {
|
||||
.core_init = gmac_core_init,
|
||||
.dump_mac_regs = gmac_dump_regs,
|
||||
.dma_init = gmac_dma_init,
|
||||
.dump_dma_regs = gmac_dump_dma_regs,
|
||||
.dma_mode = gmac_dma_operation_mode,
|
||||
.dma_diagnostic_fr = gmac_dma_diagnostic_fr,
|
||||
.tx_status = gmac_get_tx_frame_status,
|
||||
.rx_status = gmac_get_rx_frame_status,
|
||||
.get_tx_len = gmac_get_tx_len,
|
||||
.set_filter = gmac_set_filter,
|
||||
.flow_ctrl = gmac_flow_ctrl,
|
||||
.pmt = gmac_pmt,
|
||||
.init_rx_desc = gmac_init_rx_desc,
|
||||
.init_tx_desc = gmac_init_tx_desc,
|
||||
.get_tx_owner = gmac_get_tx_owner,
|
||||
.get_rx_owner = gmac_get_rx_owner,
|
||||
.release_tx_desc = gmac_release_tx_desc,
|
||||
.prepare_tx_desc = gmac_prepare_tx_desc,
|
||||
.clear_tx_ic = gmac_clear_tx_ic,
|
||||
.close_tx_desc = gmac_close_tx_desc,
|
||||
.get_tx_ls = gmac_get_tx_ls,
|
||||
.set_tx_owner = gmac_set_tx_owner,
|
||||
.set_rx_owner = gmac_set_rx_owner,
|
||||
.get_rx_frame_len = gmac_get_rx_frame_len,
|
||||
.host_irq_status = gmac_irq_status,
|
||||
.set_umac_addr = gmac_set_umac_addr,
|
||||
.get_umac_addr = gmac_get_umac_addr,
|
||||
};
|
||||
|
||||
struct mac_device_info *gmac_setup(unsigned long ioaddr)
|
||||
{
|
||||
struct mac_device_info *mac;
|
||||
u32 uid = readl(ioaddr + GMAC_VERSION);
|
||||
|
||||
pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
|
||||
((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
|
||||
|
||||
mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
|
||||
|
||||
mac->ops = &gmac_driver;
|
||||
mac->hw.pmt = PMT_SUPPORTED;
|
||||
mac->hw.link.port = GMAC_CONTROL_PS;
|
||||
mac->hw.link.duplex = GMAC_CONTROL_DM;
|
||||
mac->hw.link.speed = GMAC_CONTROL_FES;
|
||||
mac->hw.mii.addr = GMAC_MII_ADDR;
|
||||
mac->hw.mii.data = GMAC_MII_DATA;
|
||||
|
||||
return mac;
|
||||
}
|
|
@ -0,0 +1,204 @@
|
|||
/*******************************************************************************
|
||||
Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
version 2, as published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
The full GNU General Public License is included in this distribution in
|
||||
the file called "COPYING".
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#define GMAC_CONTROL 0x00000000 /* Configuration */
|
||||
#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
|
||||
#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
|
||||
#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
|
||||
#define GMAC_MII_ADDR 0x00000010 /* MII Address */
|
||||
#define GMAC_MII_DATA 0x00000014 /* MII Data */
|
||||
#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */
|
||||
#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */
|
||||
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
|
||||
#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
|
||||
|
||||
#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
|
||||
enum gmac_irq_status {
|
||||
time_stamp_irq = 0x0200,
|
||||
mmc_rx_csum_offload_irq = 0x0080,
|
||||
mmc_tx_irq = 0x0040,
|
||||
mmc_rx_irq = 0x0020,
|
||||
mmc_irq = 0x0010,
|
||||
pmt_irq = 0x0008,
|
||||
pcs_ane_irq = 0x0004,
|
||||
pcs_link_irq = 0x0002,
|
||||
rgmii_irq = 0x0001,
|
||||
};
|
||||
#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */
|
||||
|
||||
/* PMT Control and Status */
|
||||
#define GMAC_PMT 0x0000002c
|
||||
enum power_event {
|
||||
pointer_reset = 0x80000000,
|
||||
global_unicast = 0x00000200,
|
||||
wake_up_rx_frame = 0x00000040,
|
||||
magic_frame = 0x00000020,
|
||||
wake_up_frame_en = 0x00000004,
|
||||
magic_pkt_en = 0x00000002,
|
||||
power_down = 0x00000001,
|
||||
};
|
||||
|
||||
/* GMAC HW ADDR regs */
|
||||
#define GMAC_ADDR_HIGH(reg) (0x00000040+(reg * 8))
|
||||
#define GMAC_ADDR_LOW(reg) (0x00000044+(reg * 8))
|
||||
#define GMAC_MAX_UNICAST_ADDRESSES 16
|
||||
|
||||
#define GMAC_AN_CTRL 0x000000c0 /* AN control */
|
||||
#define GMAC_AN_STATUS 0x000000c4 /* AN status */
|
||||
#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */
|
||||
#define GMAC_ANE_LINK 0x000000cc /* Auto-Neg. link partener ability */
|
||||
#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */
|
||||
#define GMAC_TBI 0x000000d4 /* TBI extend status */
|
||||
#define GMAC_GMII_STATUS 0x000000d8 /* S/R-GMII status */
|
||||
|
||||
/* GMAC Configuration defines */
|
||||
#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
|
||||
#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
|
||||
#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */
|
||||
#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */
|
||||
#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
|
||||
enum inter_frame_gap {
|
||||
GMAC_CONTROL_IFG_88 = 0x00040000,
|
||||
GMAC_CONTROL_IFG_80 = 0x00020000,
|
||||
GMAC_CONTROL_IFG_40 = 0x000e0000,
|
||||
};
|
||||
#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense during tx */
|
||||
#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
|
||||
#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
|
||||
#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
|
||||
#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
|
||||
#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
|
||||
#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
|
||||
#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
|
||||
#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
|
||||
#define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad Stripping */
|
||||
#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
|
||||
#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
|
||||
#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
|
||||
|
||||
#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
|
||||
GMAC_CONTROL_IPC | GMAC_CONTROL_JE | GMAC_CONTROL_BE)
|
||||
|
||||
/* GMAC Frame Filter defines */
|
||||
#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
|
||||
#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
|
||||
#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
|
||||
#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
|
||||
#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
|
||||
#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
|
||||
#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
|
||||
#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
|
||||
#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
|
||||
#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
|
||||
/* GMII ADDR defines */
|
||||
#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
|
||||
#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
|
||||
/* GMAC FLOW CTRL defines */
|
||||
#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
|
||||
#define GMAC_FLOW_CTRL_PT_SHIFT 16
|
||||
#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
|
||||
#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
|
||||
#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
|
||||
|
||||
/*--- DMA BLOCK defines ---*/
|
||||
/* DMA Bus Mode register defines */
|
||||
#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
|
||||
#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */
|
||||
#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
|
||||
#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
|
||||
/* Programmable burst length (passed thorugh platform)*/
|
||||
#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
|
||||
#define DMA_BUS_MODE_PBL_SHIFT 8
|
||||
|
||||
enum rx_tx_priority_ratio {
|
||||
double_ratio = 0x00004000, /*2:1 */
|
||||
triple_ratio = 0x00008000, /*3:1 */
|
||||
quadruple_ratio = 0x0000c000, /*4:1 */
|
||||
};
|
||||
|
||||
#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
|
||||
#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
|
||||
#define DMA_BUS_MODE_RPBL_SHIFT 17
|
||||
#define DMA_BUS_MODE_USP 0x00800000
|
||||
#define DMA_BUS_MODE_4PBL 0x01000000
|
||||
#define DMA_BUS_MODE_AAL 0x02000000
|
||||
|
||||
/* DMA CRS Control and Status Register Mapping */
|
||||
#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */
|
||||
#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */
|
||||
/* DMA Bus Mode register defines */
|
||||
#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
|
||||
#define DMA_BUS_PR_RATIO_SHIFT 14
|
||||
#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
|
||||
|
||||
/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/
|
||||
#define DMA_CONTROL_DT 0x04000000 /* Disable Drop TCP/IP csum error */
|
||||
#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */
|
||||
#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */
|
||||
/* Theshold for Activating the FC */
|
||||
enum rfa {
|
||||
act_full_minus_1 = 0x00800000,
|
||||
act_full_minus_2 = 0x00800200,
|
||||
act_full_minus_3 = 0x00800400,
|
||||
act_full_minus_4 = 0x00800600,
|
||||
};
|
||||
/* Theshold for Deactivating the FC */
|
||||
enum rfd {
|
||||
deac_full_minus_1 = 0x00400000,
|
||||
deac_full_minus_2 = 0x00400800,
|
||||
deac_full_minus_3 = 0x00401000,
|
||||
deac_full_minus_4 = 0x00401800,
|
||||
};
|
||||
#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
|
||||
#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
|
||||
|
||||
enum ttc_control {
|
||||
DMA_CONTROL_TTC_64 = 0x00000000,
|
||||
DMA_CONTROL_TTC_128 = 0x00004000,
|
||||
DMA_CONTROL_TTC_192 = 0x00008000,
|
||||
DMA_CONTROL_TTC_256 = 0x0000c000,
|
||||
DMA_CONTROL_TTC_40 = 0x00010000,
|
||||
DMA_CONTROL_TTC_32 = 0x00014000,
|
||||
DMA_CONTROL_TTC_24 = 0x00018000,
|
||||
DMA_CONTROL_TTC_16 = 0x0001c000,
|
||||
};
|
||||
#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff
|
||||
|
||||
#define DMA_CONTROL_EFC 0x00000100
|
||||
#define DMA_CONTROL_FEF 0x00000080
|
||||
#define DMA_CONTROL_FUF 0x00000040
|
||||
|
||||
enum rtc_control {
|
||||
DMA_CONTROL_RTC_64 = 0x00000000,
|
||||
DMA_CONTROL_RTC_32 = 0x00000008,
|
||||
DMA_CONTROL_RTC_96 = 0x00000010,
|
||||
DMA_CONTROL_RTC_128 = 0x00000018,
|
||||
};
|
||||
#define DMA_CONTROL_TC_RX_MASK 0xffffffe7
|
||||
|
||||
#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */
|
||||
|
||||
/* MMC registers offset */
|
||||
#define GMAC_MMC_CTRL 0x100
|
||||
#define GMAC_MMC_RX_INTR 0x104
|
||||
#define GMAC_MMC_TX_INTR 0x108
|
||||
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
|
|
@ -0,0 +1,517 @@
|
|||
/*******************************************************************************
|
||||
This is the driver for the MAC 10/100 on-chip Ethernet controller
|
||||
currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
|
||||
|
||||
DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
|
||||
this code.
|
||||
|
||||
Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
version 2, as published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
The full GNU General Public License is included in this distribution in
|
||||
the file called "COPYING".
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/mii.h>
|
||||
#include <linux/phy.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "mac100.h"
|
||||
|
||||
#undef MAC100_DEBUG
|
||||
/*#define MAC100_DEBUG*/
|
||||
#ifdef MAC100_DEBUG
|
||||
#define DBG(fmt, args...) printk(fmt, ## args)
|
||||
#else
|
||||
#define DBG(fmt, args...) do { } while (0)
|
||||
#endif
|
||||
|
||||
static void mac100_core_init(unsigned long ioaddr)
|
||||
{
|
||||
u32 value = readl(ioaddr + MAC_CONTROL);
|
||||
|
||||
writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
|
||||
|
||||
#ifdef STMMAC_VLAN_TAG_USED
|
||||
writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
static void mac100_dump_mac_regs(unsigned long ioaddr)
|
||||
{
|
||||
pr_info("\t----------------------------------------------\n"
|
||||
"\t MAC100 CSR (base addr = 0x%8x)\n"
|
||||
"\t----------------------------------------------\n",
|
||||
(unsigned int)ioaddr);
|
||||
pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
|
||||
readl(ioaddr + MAC_CONTROL));
|
||||
pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
|
||||
readl(ioaddr + MAC_ADDR_HIGH));
|
||||
pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
|
||||
readl(ioaddr + MAC_ADDR_LOW));
|
||||
pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
|
||||
MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
|
||||
pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
|
||||
MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
|
||||
pr_info("\tflow control (offset 0x%x): 0x%08x\n",
|
||||
MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
|
||||
pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
|
||||
readl(ioaddr + MAC_VLAN1));
|
||||
pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
|
||||
readl(ioaddr + MAC_VLAN2));
|
||||
pr_info("\n\tMAC management counter registers\n");
|
||||
pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
|
||||
MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
|
||||
pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
|
||||
MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
|
||||
pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
|
||||
MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
|
||||
pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
|
||||
MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
|
||||
pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
|
||||
MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
|
||||
return;
|
||||
}
|
||||
|
||||
static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
|
||||
u32 dma_rx)
|
||||
{
|
||||
u32 value = readl(ioaddr + DMA_BUS_MODE);
|
||||
/* DMA SW reset */
|
||||
value |= DMA_BUS_MODE_SFT_RESET;
|
||||
writel(value, ioaddr + DMA_BUS_MODE);
|
||||
do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
|
||||
|
||||
/* Enable Application Access by writing to DMA CSR0 */
|
||||
writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
|
||||
ioaddr + DMA_BUS_MODE);
|
||||
|
||||
/* Mask interrupts by writing to CSR7 */
|
||||
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
|
||||
|
||||
/* The base address of the RX/TX descriptor lists must be written into
|
||||
* DMA CSR3 and CSR4, respectively. */
|
||||
writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
|
||||
writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Store and Forward capability is not used at all..
|
||||
* The transmit threshold can be programmed by
|
||||
* setting the TTC bits in the DMA control register.*/
|
||||
static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
|
||||
int rxmode)
|
||||
{
|
||||
u32 csr6 = readl(ioaddr + DMA_CONTROL);
|
||||
|
||||
if (txmode <= 32)
|
||||
csr6 |= DMA_CONTROL_TTC_32;
|
||||
else if (txmode <= 64)
|
||||
csr6 |= DMA_CONTROL_TTC_64;
|
||||
else
|
||||
csr6 |= DMA_CONTROL_TTC_128;
|
||||
|
||||
writel(csr6, ioaddr + DMA_CONTROL);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void mac100_dump_dma_regs(unsigned long ioaddr)
|
||||
{
|
||||
int i;
|
||||
|
||||
DBG(KERN_DEBUG "MAC100 DMA CSR \n");
|
||||
for (i = 0; i < 9; i++)
|
||||
pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
|
||||
(DMA_BUS_MODE + i * 4),
|
||||
readl(ioaddr + DMA_BUS_MODE + i * 4));
|
||||
DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
|
||||
DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
|
||||
DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
|
||||
DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
/* DMA controller has two counters to track the number of
|
||||
the receive missed frames. */
|
||||
static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
|
||||
unsigned long ioaddr)
|
||||
{
|
||||
struct net_device_stats *stats = (struct net_device_stats *)data;
|
||||
u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
|
||||
|
||||
if (unlikely(csr8)) {
|
||||
if (csr8 & DMA_MISSED_FRAME_OVE) {
|
||||
stats->rx_over_errors += 0x800;
|
||||
x->rx_overflow_cntr += 0x800;
|
||||
} else {
|
||||
unsigned int ove_cntr;
|
||||
ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
|
||||
stats->rx_over_errors += ove_cntr;
|
||||
x->rx_overflow_cntr += ove_cntr;
|
||||
}
|
||||
|
||||
if (csr8 & DMA_MISSED_FRAME_OVE_M) {
|
||||
stats->rx_missed_errors += 0xffff;
|
||||
x->rx_missed_cntr += 0xffff;
|
||||
} else {
|
||||
unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
|
||||
stats->rx_missed_errors += miss_f;
|
||||
x->rx_missed_cntr += miss_f;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
|
||||
struct dma_desc *p, unsigned long ioaddr)
|
||||
{
|
||||
int ret = 0;
|
||||
struct net_device_stats *stats = (struct net_device_stats *)data;
|
||||
|
||||
if (unlikely(p->des01.tx.error_summary)) {
|
||||
if (unlikely(p->des01.tx.underflow_error)) {
|
||||
x->tx_underflow++;
|
||||
stats->tx_fifo_errors++;
|
||||
}
|
||||
if (unlikely(p->des01.tx.no_carrier)) {
|
||||
x->tx_carrier++;
|
||||
stats->tx_carrier_errors++;
|
||||
}
|
||||
if (unlikely(p->des01.tx.loss_carrier)) {
|
||||
x->tx_losscarrier++;
|
||||
stats->tx_carrier_errors++;
|
||||
}
|
||||
if (unlikely((p->des01.tx.excessive_deferral) ||
|
||||
(p->des01.tx.excessive_collisions) ||
|
||||
(p->des01.tx.late_collision)))
|
||||
stats->collisions += p->des01.tx.collision_count;
|
||||
ret = -1;
|
||||
}
|
||||
if (unlikely(p->des01.tx.heartbeat_fail)) {
|
||||
x->tx_heartbeat++;
|
||||
stats->tx_heartbeat_errors++;
|
||||
ret = -1;
|
||||
}
|
||||
if (unlikely(p->des01.tx.deferred))
|
||||
x->tx_deferred++;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mac100_get_tx_len(struct dma_desc *p)
|
||||
{
|
||||
return p->des01.tx.buffer1_size;
|
||||
}
|
||||
|
||||
/* This function verifies if each incoming frame has some errors
|
||||
* and, if required, updates the multicast statistics.
|
||||
* In case of success, it returns csum_none becasue the device
|
||||
* is not able to compute the csum in HW. */
|
||||
static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
|
||||
struct dma_desc *p)
|
||||
{
|
||||
int ret = csum_none;
|
||||
struct net_device_stats *stats = (struct net_device_stats *)data;
|
||||
|
||||
if (unlikely(p->des01.rx.last_descriptor == 0)) {
|
||||
pr_warning("mac100 Error: Oversized Ethernet "
|
||||
"frame spanned multiple buffers\n");
|
||||
stats->rx_length_errors++;
|
||||
return discard_frame;
|
||||
}
|
||||
|
||||
if (unlikely(p->des01.rx.error_summary)) {
|
||||
if (unlikely(p->des01.rx.descriptor_error))
|
||||
x->rx_desc++;
|
||||
if (unlikely(p->des01.rx.partial_frame_error))
|
||||
x->rx_partial++;
|
||||
if (unlikely(p->des01.rx.run_frame))
|
||||
x->rx_runt++;
|
||||
if (unlikely(p->des01.rx.frame_too_long))
|
||||
x->rx_toolong++;
|
||||
if (unlikely(p->des01.rx.collision)) {
|
||||
x->rx_collision++;
|
||||
stats->collisions++;
|
||||
}
|
||||
if (unlikely(p->des01.rx.crc_error)) {
|
||||
x->rx_crc++;
|
||||
stats->rx_crc_errors++;
|
||||
}
|
||||
ret = discard_frame;
|
||||
}
|
||||
if (unlikely(p->des01.rx.dribbling))
|
||||
ret = discard_frame;
|
||||
|
||||
if (unlikely(p->des01.rx.length_error)) {
|
||||
x->rx_lenght++;
|
||||
ret = discard_frame;
|
||||
}
|
||||
if (unlikely(p->des01.rx.mii_error)) {
|
||||
x->rx_mii++;
|
||||
ret = discard_frame;
|
||||
}
|
||||
if (p->des01.rx.multicast_frame) {
|
||||
x->rx_multicast++;
|
||||
stats->multicast++;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mac100_irq_status(unsigned long ioaddr)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
|
||||
unsigned int reg_n)
|
||||
{
|
||||
stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
|
||||
}
|
||||
|
||||
static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
|
||||
unsigned int reg_n)
|
||||
{
|
||||
stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
|
||||
}
|
||||
|
||||
static void mac100_set_filter(struct net_device *dev)
|
||||
{
|
||||
unsigned long ioaddr = dev->base_addr;
|
||||
u32 value = readl(ioaddr + MAC_CONTROL);
|
||||
|
||||
if (dev->flags & IFF_PROMISC) {
|
||||
value |= MAC_CONTROL_PR;
|
||||
value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
|
||||
MAC_CONTROL_HP);
|
||||
} else if ((dev->mc_count > HASH_TABLE_SIZE)
|
||||
|| (dev->flags & IFF_ALLMULTI)) {
|
||||
value |= MAC_CONTROL_PM;
|
||||
value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
|
||||
writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
|
||||
writel(0xffffffff, ioaddr + MAC_HASH_LOW);
|
||||
} else if (dev->mc_count == 0) { /* no multicast */
|
||||
value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
|
||||
MAC_CONTROL_HO | MAC_CONTROL_HP);
|
||||
} else {
|
||||
int i;
|
||||
u32 mc_filter[2];
|
||||
struct dev_mc_list *mclist;
|
||||
|
||||
/* Perfect filter mode for physical address and Hash
|
||||
filter for multicast */
|
||||
value |= MAC_CONTROL_HP;
|
||||
value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF
|
||||
| MAC_CONTROL_HO);
|
||||
|
||||
memset(mc_filter, 0, sizeof(mc_filter));
|
||||
for (i = 0, mclist = dev->mc_list;
|
||||
mclist && i < dev->mc_count; i++, mclist = mclist->next) {
|
||||
/* The upper 6 bits of the calculated CRC are used to
|
||||
* index the contens of the hash table */
|
||||
int bit_nr =
|
||||
ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
|
||||
/* The most significant bit determines the register to
|
||||
* use (H/L) while the other 5 bits determine the bit
|
||||
* within the register. */
|
||||
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
|
||||
}
|
||||
writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
|
||||
writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
|
||||
}
|
||||
|
||||
writel(value, ioaddr + MAC_CONTROL);
|
||||
|
||||
DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
|
||||
"HI 0x%08x, LO 0x%08x\n",
|
||||
__func__, readl(ioaddr + MAC_CONTROL),
|
||||
readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
|
||||
return;
|
||||
}
|
||||
|
||||
static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
|
||||
unsigned int fc, unsigned int pause_time)
|
||||
{
|
||||
unsigned int flow = MAC_FLOW_CTRL_ENABLE;
|
||||
|
||||
if (duplex)
|
||||
flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
|
||||
writel(flow, ioaddr + MAC_FLOW_CTRL);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* No PMT module supported in our SoC for the Ethernet Controller. */
|
||||
static void mac100_pmt(unsigned long ioaddr, unsigned long mode)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
|
||||
int disable_rx_ic)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < ring_size; i++) {
|
||||
p->des01.rx.own = 1;
|
||||
p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
|
||||
if (i == ring_size - 1)
|
||||
p->des01.rx.end_ring = 1;
|
||||
if (disable_rx_ic)
|
||||
p->des01.rx.disable_ic = 1;
|
||||
p++;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < ring_size; i++) {
|
||||
p->des01.tx.own = 0;
|
||||
if (i == ring_size - 1)
|
||||
p->des01.tx.end_ring = 1;
|
||||
p++;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static int mac100_get_tx_owner(struct dma_desc *p)
|
||||
{
|
||||
return p->des01.tx.own;
|
||||
}
|
||||
|
||||
static int mac100_get_rx_owner(struct dma_desc *p)
|
||||
{
|
||||
return p->des01.rx.own;
|
||||
}
|
||||
|
||||
static void mac100_set_tx_owner(struct dma_desc *p)
|
||||
{
|
||||
p->des01.tx.own = 1;
|
||||
}
|
||||
|
||||
static void mac100_set_rx_owner(struct dma_desc *p)
|
||||
{
|
||||
p->des01.rx.own = 1;
|
||||
}
|
||||
|
||||
static int mac100_get_tx_ls(struct dma_desc *p)
|
||||
{
|
||||
return p->des01.tx.last_segment;
|
||||
}
|
||||
|
||||
static void mac100_release_tx_desc(struct dma_desc *p)
|
||||
{
|
||||
int ter = p->des01.tx.end_ring;
|
||||
|
||||
/* clean field used within the xmit */
|
||||
p->des01.tx.first_segment = 0;
|
||||
p->des01.tx.last_segment = 0;
|
||||
p->des01.tx.buffer1_size = 0;
|
||||
|
||||
/* clean status reported */
|
||||
p->des01.tx.error_summary = 0;
|
||||
p->des01.tx.underflow_error = 0;
|
||||
p->des01.tx.no_carrier = 0;
|
||||
p->des01.tx.loss_carrier = 0;
|
||||
p->des01.tx.excessive_deferral = 0;
|
||||
p->des01.tx.excessive_collisions = 0;
|
||||
p->des01.tx.late_collision = 0;
|
||||
p->des01.tx.heartbeat_fail = 0;
|
||||
p->des01.tx.deferred = 0;
|
||||
|
||||
/* set termination field */
|
||||
p->des01.tx.end_ring = ter;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
|
||||
int csum_flag)
|
||||
{
|
||||
p->des01.tx.first_segment = is_fs;
|
||||
p->des01.tx.buffer1_size = len;
|
||||
}
|
||||
|
||||
static void mac100_clear_tx_ic(struct dma_desc *p)
|
||||
{
|
||||
p->des01.tx.interrupt = 0;
|
||||
}
|
||||
|
||||
static void mac100_close_tx_desc(struct dma_desc *p)
|
||||
{
|
||||
p->des01.tx.last_segment = 1;
|
||||
p->des01.tx.interrupt = 1;
|
||||
}
|
||||
|
||||
static int mac100_get_rx_frame_len(struct dma_desc *p)
|
||||
{
|
||||
return p->des01.rx.frame_length;
|
||||
}
|
||||
|
||||
struct stmmac_ops mac100_driver = {
|
||||
.core_init = mac100_core_init,
|
||||
.dump_mac_regs = mac100_dump_mac_regs,
|
||||
.dma_init = mac100_dma_init,
|
||||
.dump_dma_regs = mac100_dump_dma_regs,
|
||||
.dma_mode = mac100_dma_operation_mode,
|
||||
.dma_diagnostic_fr = mac100_dma_diagnostic_fr,
|
||||
.tx_status = mac100_get_tx_frame_status,
|
||||
.rx_status = mac100_get_rx_frame_status,
|
||||
.get_tx_len = mac100_get_tx_len,
|
||||
.set_filter = mac100_set_filter,
|
||||
.flow_ctrl = mac100_flow_ctrl,
|
||||
.pmt = mac100_pmt,
|
||||
.init_rx_desc = mac100_init_rx_desc,
|
||||
.init_tx_desc = mac100_init_tx_desc,
|
||||
.get_tx_owner = mac100_get_tx_owner,
|
||||
.get_rx_owner = mac100_get_rx_owner,
|
||||
.release_tx_desc = mac100_release_tx_desc,
|
||||
.prepare_tx_desc = mac100_prepare_tx_desc,
|
||||
.clear_tx_ic = mac100_clear_tx_ic,
|
||||
.close_tx_desc = mac100_close_tx_desc,
|
||||
.get_tx_ls = mac100_get_tx_ls,
|
||||
.set_tx_owner = mac100_set_tx_owner,
|
||||
.set_rx_owner = mac100_set_rx_owner,
|
||||
.get_rx_frame_len = mac100_get_rx_frame_len,
|
||||
.host_irq_status = mac100_irq_status,
|
||||
.set_umac_addr = mac100_set_umac_addr,
|
||||
.get_umac_addr = mac100_get_umac_addr,
|
||||
};
|
||||
|
||||
struct mac_device_info *mac100_setup(unsigned long ioaddr)
|
||||
{
|
||||
struct mac_device_info *mac;
|
||||
|
||||
mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
|
||||
|
||||
pr_info("\tMAC 10/100\n");
|
||||
|
||||
mac->ops = &mac100_driver;
|
||||
mac->hw.pmt = PMT_NOT_SUPPORTED;
|
||||
mac->hw.link.port = MAC_CONTROL_PS;
|
||||
mac->hw.link.duplex = MAC_CONTROL_F;
|
||||
mac->hw.link.speed = 0;
|
||||
mac->hw.mii.addr = MAC_MII_ADDR;
|
||||
mac->hw.mii.data = MAC_MII_DATA;
|
||||
|
||||
return mac;
|
||||
}
|
|
@ -0,0 +1,116 @@
|
|||
/*******************************************************************************
|
||||
MAC 10/100 Header File
|
||||
|
||||
Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
version 2, as published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
The full GNU General Public License is included in this distribution in
|
||||
the file called "COPYING".
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
/*----------------------------------------------------------------------------
|
||||
* MAC BLOCK defines
|
||||
*---------------------------------------------------------------------------*/
|
||||
/* MAC CSR offset */
|
||||
#define MAC_CONTROL 0x00000000 /* MAC Control */
|
||||
#define MAC_ADDR_HIGH 0x00000004 /* MAC Address High */
|
||||
#define MAC_ADDR_LOW 0x00000008 /* MAC Address Low */
|
||||
#define MAC_HASH_HIGH 0x0000000c /* Multicast Hash Table High */
|
||||
#define MAC_HASH_LOW 0x00000010 /* Multicast Hash Table Low */
|
||||
#define MAC_MII_ADDR 0x00000014 /* MII Address */
|
||||
#define MAC_MII_DATA 0x00000018 /* MII Data */
|
||||
#define MAC_FLOW_CTRL 0x0000001c /* Flow Control */
|
||||
#define MAC_VLAN1 0x00000020 /* VLAN1 Tag */
|
||||
#define MAC_VLAN2 0x00000024 /* VLAN2 Tag */
|
||||
|
||||
/* MAC CTRL defines */
|
||||
#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */
|
||||
#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */
|
||||
#define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */
|
||||
#define MAC_CONTROL_PS 0x08000000 /* Port Select */
|
||||
#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */
|
||||
#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */
|
||||
#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */
|
||||
#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */
|
||||
#define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */
|
||||
#define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */
|
||||
#define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */
|
||||
#define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */
|
||||
#define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */
|
||||
#define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */
|
||||
#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */
|
||||
#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */
|
||||
#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */
|
||||
#define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */
|
||||
#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */
|
||||
#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */
|
||||
#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */
|
||||
#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */
|
||||
#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */
|
||||
#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
|
||||
#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */
|
||||
|
||||
#define MAC_CORE_INIT (MAC_CONTROL_HBD | MAC_CONTROL_ASTP)
|
||||
|
||||
/* MAC FLOW CTRL defines */
|
||||
#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
|
||||
#define MAC_FLOW_CTRL_PT_SHIFT 16
|
||||
#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */
|
||||
#define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */
|
||||
#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */
|
||||
|
||||
/* MII ADDR defines */
|
||||
#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
|
||||
#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
|
||||
|
||||
/*----------------------------------------------------------------------------
|
||||
* DMA BLOCK defines
|
||||
*---------------------------------------------------------------------------*/
|
||||
|
||||
/* DMA Bus Mode register defines */
|
||||
#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */
|
||||
#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */
|
||||
#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
|
||||
#define DMA_BUS_MODE_PBL_SHIFT 8
|
||||
#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
|
||||
#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
|
||||
#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */
|
||||
#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
|
||||
#define DMA_BUS_MODE_DEFAULT 0x00000000
|
||||
|
||||
/* DMA Control register defines */
|
||||
#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */
|
||||
|
||||
/* Transmit Threshold Control */
|
||||
enum ttc_control {
|
||||
DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */
|
||||
DMA_CONTROL_TTC_64 = 0x00004000, /* Threshold is 64 DWORDS */
|
||||
DMA_CONTROL_TTC_128 = 0x00008000, /* Threshold is 128 DWORDS */
|
||||
DMA_CONTROL_TTC_256 = 0x0000c000, /* Threshold is 256 DWORDS */
|
||||
DMA_CONTROL_TTC_18 = 0x00400000, /* Threshold is 18 DWORDS */
|
||||
DMA_CONTROL_TTC_24 = 0x00404000, /* Threshold is 24 DWORDS */
|
||||
DMA_CONTROL_TTC_32 = 0x00408000, /* Threshold is 32 DWORDS */
|
||||
DMA_CONTROL_TTC_40 = 0x0040c000, /* Threshold is 40 DWORDS */
|
||||
DMA_CONTROL_SE = 0x00000008, /* Stop On Empty */
|
||||
DMA_CONTROL_OSF = 0x00000004, /* Operate On 2nd Frame */
|
||||
};
|
||||
|
||||
/* STMAC110 DMA Missed Frame Counter register defines */
|
||||
#define DMA_MISSED_FRAME_OVE 0x10000000 /* FIFO Overflow Overflow */
|
||||
#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */
|
||||
#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
|
||||
#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
|
|
@ -0,0 +1,98 @@
|
|||
/*******************************************************************************
|
||||
Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
version 2, as published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
The full GNU General Public License is included in this distribution in
|
||||
the file called "COPYING".
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#define DRV_MODULE_VERSION "Oct_09"
|
||||
|
||||
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
|
||||
#define STMMAC_VLAN_TAG_USED
|
||||
#include <linux/if_vlan.h>
|
||||
#endif
|
||||
|
||||
#include "common.h"
|
||||
#ifdef CONFIG_STMMAC_TIMER
|
||||
#include "stmmac_timer.h"
|
||||
#endif
|
||||
|
||||
struct stmmac_priv {
|
||||
/* Frequently used values are kept adjacent for cache effect */
|
||||
struct dma_desc *dma_tx ____cacheline_aligned;
|
||||
dma_addr_t dma_tx_phy;
|
||||
struct sk_buff **tx_skbuff;
|
||||
unsigned int cur_tx;
|
||||
unsigned int dirty_tx;
|
||||
unsigned int dma_tx_size;
|
||||
int tx_coe;
|
||||
int tx_coalesce;
|
||||
|
||||
struct dma_desc *dma_rx ;
|
||||
unsigned int cur_rx;
|
||||
unsigned int dirty_rx;
|
||||
struct sk_buff **rx_skbuff;
|
||||
dma_addr_t *rx_skbuff_dma;
|
||||
struct sk_buff_head rx_recycle;
|
||||
|
||||
struct net_device *dev;
|
||||
int is_gmac;
|
||||
dma_addr_t dma_rx_phy;
|
||||
unsigned int dma_rx_size;
|
||||
int rx_csum;
|
||||
unsigned int dma_buf_sz;
|
||||
struct device *device;
|
||||
struct mac_device_info *mac_type;
|
||||
|
||||
struct stmmac_extra_stats xstats;
|
||||
struct napi_struct napi;
|
||||
|
||||
phy_interface_t phy_interface;
|
||||
int pbl;
|
||||
int bus_id;
|
||||
int phy_addr;
|
||||
int phy_mask;
|
||||
int (*phy_reset) (void *priv);
|
||||
void (*fix_mac_speed) (void *priv, unsigned int speed);
|
||||
void *bsp_priv;
|
||||
|
||||
int phy_irq;
|
||||
struct phy_device *phydev;
|
||||
int oldlink;
|
||||
int speed;
|
||||
int oldduplex;
|
||||
unsigned int flow_ctrl;
|
||||
unsigned int pause;
|
||||
struct mii_bus *mii;
|
||||
|
||||
u32 msg_enable;
|
||||
spinlock_t lock;
|
||||
int wolopts;
|
||||
int wolenabled;
|
||||
int shutdown;
|
||||
#ifdef CONFIG_STMMAC_TIMER
|
||||
struct stmmac_timer *tm;
|
||||
#endif
|
||||
#ifdef STMMAC_VLAN_TAG_USED
|
||||
struct vlan_group *vlgrp;
|
||||
#endif
|
||||
};
|
||||
|
||||
extern int stmmac_mdio_unregister(struct net_device *ndev);
|
||||
extern int stmmac_mdio_register(struct net_device *ndev);
|
||||
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
|
|
@ -0,0 +1,395 @@
|
|||
/*******************************************************************************
|
||||
STMMAC Ethtool support
|
||||
|
||||
Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
version 2, as published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
The full GNU General Public License is included in this distribution in
|
||||
the file called "COPYING".
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/mii.h>
|
||||
#include <linux/phy.h>
|
||||
|
||||
#include "stmmac.h"
|
||||
|
||||
#define REG_SPACE_SIZE 0x1054
|
||||
#define MAC100_ETHTOOL_NAME "st_mac100"
|
||||
#define GMAC_ETHTOOL_NAME "st_gmac"
|
||||
|
||||
struct stmmac_stats {
|
||||
char stat_string[ETH_GSTRING_LEN];
|
||||
int sizeof_stat;
|
||||
int stat_offset;
|
||||
};
|
||||
|
||||
#define STMMAC_STAT(m) \
|
||||
{ #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \
|
||||
offsetof(struct stmmac_priv, xstats.m)}
|
||||
|
||||
static const struct stmmac_stats stmmac_gstrings_stats[] = {
|
||||
STMMAC_STAT(tx_underflow),
|
||||
STMMAC_STAT(tx_carrier),
|
||||
STMMAC_STAT(tx_losscarrier),
|
||||
STMMAC_STAT(tx_heartbeat),
|
||||
STMMAC_STAT(tx_deferred),
|
||||
STMMAC_STAT(tx_vlan),
|
||||
STMMAC_STAT(rx_vlan),
|
||||
STMMAC_STAT(tx_jabber),
|
||||
STMMAC_STAT(tx_frame_flushed),
|
||||
STMMAC_STAT(tx_payload_error),
|
||||
STMMAC_STAT(tx_ip_header_error),
|
||||
STMMAC_STAT(rx_desc),
|
||||
STMMAC_STAT(rx_partial),
|
||||
STMMAC_STAT(rx_runt),
|
||||
STMMAC_STAT(rx_toolong),
|
||||
STMMAC_STAT(rx_collision),
|
||||
STMMAC_STAT(rx_crc),
|
||||
STMMAC_STAT(rx_lenght),
|
||||
STMMAC_STAT(rx_mii),
|
||||
STMMAC_STAT(rx_multicast),
|
||||
STMMAC_STAT(rx_gmac_overflow),
|
||||
STMMAC_STAT(rx_watchdog),
|
||||
STMMAC_STAT(da_rx_filter_fail),
|
||||
STMMAC_STAT(sa_rx_filter_fail),
|
||||
STMMAC_STAT(rx_missed_cntr),
|
||||
STMMAC_STAT(rx_overflow_cntr),
|
||||
STMMAC_STAT(tx_undeflow_irq),
|
||||
STMMAC_STAT(tx_process_stopped_irq),
|
||||
STMMAC_STAT(tx_jabber_irq),
|
||||
STMMAC_STAT(rx_overflow_irq),
|
||||
STMMAC_STAT(rx_buf_unav_irq),
|
||||
STMMAC_STAT(rx_process_stopped_irq),
|
||||
STMMAC_STAT(rx_watchdog_irq),
|
||||
STMMAC_STAT(tx_early_irq),
|
||||
STMMAC_STAT(fatal_bus_error_irq),
|
||||
STMMAC_STAT(threshold),
|
||||
STMMAC_STAT(tx_pkt_n),
|
||||
STMMAC_STAT(rx_pkt_n),
|
||||
STMMAC_STAT(poll_n),
|
||||
STMMAC_STAT(sched_timer_n),
|
||||
STMMAC_STAT(normal_irq_n),
|
||||
};
|
||||
#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
|
||||
|
||||
void stmmac_ethtool_getdrvinfo(struct net_device *dev,
|
||||
struct ethtool_drvinfo *info)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
|
||||
if (!priv->is_gmac)
|
||||
strcpy(info->driver, MAC100_ETHTOOL_NAME);
|
||||
else
|
||||
strcpy(info->driver, GMAC_ETHTOOL_NAME);
|
||||
|
||||
strcpy(info->version, DRV_MODULE_VERSION);
|
||||
info->fw_version[0] = '\0';
|
||||
info->n_stats = STMMAC_STATS_LEN;
|
||||
return;
|
||||
}
|
||||
|
||||
int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
struct phy_device *phy = priv->phydev;
|
||||
int rc;
|
||||
if (phy == NULL) {
|
||||
pr_err("%s: %s: PHY is not registered\n",
|
||||
__func__, dev->name);
|
||||
return -ENODEV;
|
||||
}
|
||||
if (!netif_running(dev)) {
|
||||
pr_err("%s: interface is disabled: we cannot track "
|
||||
"link speed / duplex setting\n", dev->name);
|
||||
return -EBUSY;
|
||||
}
|
||||
cmd->transceiver = XCVR_INTERNAL;
|
||||
spin_lock_irq(&priv->lock);
|
||||
rc = phy_ethtool_gset(phy, cmd);
|
||||
spin_unlock_irq(&priv->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int stmmac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
struct phy_device *phy = priv->phydev;
|
||||
int rc;
|
||||
|
||||
spin_lock(&priv->lock);
|
||||
rc = phy_ethtool_sset(phy, cmd);
|
||||
spin_unlock(&priv->lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
return priv->msg_enable;
|
||||
}
|
||||
|
||||
void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
priv->msg_enable = level;
|
||||
|
||||
}
|
||||
|
||||
int stmmac_check_if_running(struct net_device *dev)
|
||||
{
|
||||
if (!netif_running(dev))
|
||||
return -EBUSY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int stmmac_ethtool_get_regs_len(struct net_device *dev)
|
||||
{
|
||||
return REG_SPACE_SIZE;
|
||||
}
|
||||
|
||||
void stmmac_ethtool_gregs(struct net_device *dev,
|
||||
struct ethtool_regs *regs, void *space)
|
||||
{
|
||||
int i;
|
||||
u32 *reg_space = (u32 *) space;
|
||||
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
|
||||
memset(reg_space, 0x0, REG_SPACE_SIZE);
|
||||
|
||||
if (!priv->is_gmac) {
|
||||
/* MAC registers */
|
||||
for (i = 0; i < 12; i++)
|
||||
reg_space[i] = readl(dev->base_addr + (i * 4));
|
||||
/* DMA registers */
|
||||
for (i = 0; i < 9; i++)
|
||||
reg_space[i + 12] =
|
||||
readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
|
||||
reg_space[22] = readl(dev->base_addr + DMA_CUR_TX_BUF_ADDR);
|
||||
reg_space[23] = readl(dev->base_addr + DMA_CUR_RX_BUF_ADDR);
|
||||
} else {
|
||||
/* MAC registers */
|
||||
for (i = 0; i < 55; i++)
|
||||
reg_space[i] = readl(dev->base_addr + (i * 4));
|
||||
/* DMA registers */
|
||||
for (i = 0; i < 22; i++)
|
||||
reg_space[i + 55] =
|
||||
readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
|
||||
{
|
||||
if (data)
|
||||
netdev->features |= NETIF_F_HW_CSUM;
|
||||
else
|
||||
netdev->features &= ~NETIF_F_HW_CSUM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
|
||||
return priv->rx_csum;
|
||||
}
|
||||
|
||||
static void
|
||||
stmmac_get_pauseparam(struct net_device *netdev,
|
||||
struct ethtool_pauseparam *pause)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(netdev);
|
||||
|
||||
spin_lock(&priv->lock);
|
||||
|
||||
pause->rx_pause = 0;
|
||||
pause->tx_pause = 0;
|
||||
pause->autoneg = priv->phydev->autoneg;
|
||||
|
||||
if (priv->flow_ctrl & FLOW_RX)
|
||||
pause->rx_pause = 1;
|
||||
if (priv->flow_ctrl & FLOW_TX)
|
||||
pause->tx_pause = 1;
|
||||
|
||||
spin_unlock(&priv->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
static int
|
||||
stmmac_set_pauseparam(struct net_device *netdev,
|
||||
struct ethtool_pauseparam *pause)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(netdev);
|
||||
struct phy_device *phy = priv->phydev;
|
||||
int new_pause = FLOW_OFF;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&priv->lock);
|
||||
|
||||
if (pause->rx_pause)
|
||||
new_pause |= FLOW_RX;
|
||||
if (pause->tx_pause)
|
||||
new_pause |= FLOW_TX;
|
||||
|
||||
priv->flow_ctrl = new_pause;
|
||||
|
||||
if (phy->autoneg) {
|
||||
if (netif_running(netdev)) {
|
||||
struct ethtool_cmd cmd;
|
||||
/* auto-negotiation automatically restarted */
|
||||
cmd.cmd = ETHTOOL_NWAY_RST;
|
||||
cmd.supported = phy->supported;
|
||||
cmd.advertising = phy->advertising;
|
||||
cmd.autoneg = phy->autoneg;
|
||||
cmd.speed = phy->speed;
|
||||
cmd.duplex = phy->duplex;
|
||||
cmd.phy_address = phy->addr;
|
||||
ret = phy_ethtool_sset(phy, &cmd);
|
||||
}
|
||||
} else {
|
||||
unsigned long ioaddr = netdev->base_addr;
|
||||
priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex,
|
||||
priv->flow_ctrl, priv->pause);
|
||||
}
|
||||
spin_unlock(&priv->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void stmmac_get_ethtool_stats(struct net_device *dev,
|
||||
struct ethtool_stats *dummy, u64 *data)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
unsigned long ioaddr = dev->base_addr;
|
||||
int i;
|
||||
|
||||
/* Update HW stats if supported */
|
||||
priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats,
|
||||
ioaddr);
|
||||
|
||||
for (i = 0; i < STMMAC_STATS_LEN; i++) {
|
||||
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
|
||||
data[i] = (stmmac_gstrings_stats[i].sizeof_stat ==
|
||||
sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int stmmac_get_sset_count(struct net_device *netdev, int sset)
|
||||
{
|
||||
switch (sset) {
|
||||
case ETH_SS_STATS:
|
||||
return STMMAC_STATS_LEN;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
||||
{
|
||||
int i;
|
||||
u8 *p = data;
|
||||
|
||||
switch (stringset) {
|
||||
case ETH_SS_STATS:
|
||||
for (i = 0; i < STMMAC_STATS_LEN; i++) {
|
||||
memcpy(p, stmmac_gstrings_stats[i].stat_string,
|
||||
ETH_GSTRING_LEN);
|
||||
p += ETH_GSTRING_LEN;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
break;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/* Currently only support WOL through Magic packet. */
|
||||
static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
|
||||
spin_lock_irq(&priv->lock);
|
||||
if (priv->wolenabled == PMT_SUPPORTED) {
|
||||
wol->supported = WAKE_MAGIC;
|
||||
wol->wolopts = priv->wolopts;
|
||||
}
|
||||
spin_unlock_irq(&priv->lock);
|
||||
}
|
||||
|
||||
static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
u32 support = WAKE_MAGIC;
|
||||
|
||||
if (priv->wolenabled == PMT_NOT_SUPPORTED)
|
||||
return -EINVAL;
|
||||
|
||||
if (wol->wolopts & ~support)
|
||||
return -EINVAL;
|
||||
|
||||
if (wol->wolopts == 0)
|
||||
device_set_wakeup_enable(priv->device, 0);
|
||||
else
|
||||
device_set_wakeup_enable(priv->device, 1);
|
||||
|
||||
spin_lock_irq(&priv->lock);
|
||||
priv->wolopts = wol->wolopts;
|
||||
spin_unlock_irq(&priv->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ethtool_ops stmmac_ethtool_ops = {
|
||||
.begin = stmmac_check_if_running,
|
||||
.get_drvinfo = stmmac_ethtool_getdrvinfo,
|
||||
.get_settings = stmmac_ethtool_getsettings,
|
||||
.set_settings = stmmac_ethtool_setsettings,
|
||||
.get_msglevel = stmmac_ethtool_getmsglevel,
|
||||
.set_msglevel = stmmac_ethtool_setmsglevel,
|
||||
.get_regs = stmmac_ethtool_gregs,
|
||||
.get_regs_len = stmmac_ethtool_get_regs_len,
|
||||
.get_link = ethtool_op_get_link,
|
||||
.get_rx_csum = stmmac_ethtool_get_rx_csum,
|
||||
.get_tx_csum = ethtool_op_get_tx_csum,
|
||||
.set_tx_csum = stmmac_ethtool_set_tx_csum,
|
||||
.get_sg = ethtool_op_get_sg,
|
||||
.set_sg = ethtool_op_set_sg,
|
||||
.get_pauseparam = stmmac_get_pauseparam,
|
||||
.set_pauseparam = stmmac_set_pauseparam,
|
||||
.get_ethtool_stats = stmmac_get_ethtool_stats,
|
||||
.get_strings = stmmac_get_strings,
|
||||
.get_wol = stmmac_get_wol,
|
||||
.set_wol = stmmac_set_wol,
|
||||
.get_sset_count = stmmac_get_sset_count,
|
||||
#ifdef NETIF_F_TSO
|
||||
.get_tso = ethtool_op_get_tso,
|
||||
.set_tso = ethtool_op_set_tso,
|
||||
#endif
|
||||
};
|
||||
|
||||
void stmmac_set_ethtool_ops(struct net_device *netdev)
|
||||
{
|
||||
SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,217 @@
|
|||
/*******************************************************************************
|
||||
STMMAC Ethernet Driver -- MDIO bus implementation
|
||||
Provides Bus interface for MII registers
|
||||
|
||||
Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
version 2, as published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
The full GNU General Public License is included in this distribution in
|
||||
the file called "COPYING".
|
||||
|
||||
Author: Carl Shaw <carl.shaw@st.com>
|
||||
Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/mii.h>
|
||||
#include <linux/phy.h>
|
||||
|
||||
#include "stmmac.h"
|
||||
|
||||
#define MII_BUSY 0x00000001
|
||||
#define MII_WRITE 0x00000002
|
||||
|
||||
/**
|
||||
* stmmac_mdio_read
|
||||
* @bus: points to the mii_bus structure
|
||||
* @phyaddr: MII addr reg bits 15-11
|
||||
* @phyreg: MII addr reg bits 10-6
|
||||
* Description: it reads data from the MII register from within the phy device.
|
||||
* For the 7111 GMAC, we must set the bit 0 in the MII address register while
|
||||
* accessing the PHY registers.
|
||||
* Fortunately, it seems this has no drawback for the 7109 MAC.
|
||||
*/
|
||||
static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
|
||||
{
|
||||
struct net_device *ndev = bus->priv;
|
||||
struct stmmac_priv *priv = netdev_priv(ndev);
|
||||
unsigned long ioaddr = ndev->base_addr;
|
||||
unsigned int mii_address = priv->mac_type->hw.mii.addr;
|
||||
unsigned int mii_data = priv->mac_type->hw.mii.data;
|
||||
|
||||
int data;
|
||||
u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
|
||||
((phyreg << 6) & (0x000007C0)));
|
||||
regValue |= MII_BUSY; /* in case of GMAC */
|
||||
|
||||
do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
|
||||
writel(regValue, ioaddr + mii_address);
|
||||
do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
|
||||
|
||||
/* Read the data from the MII data register */
|
||||
data = (int)readl(ioaddr + mii_data);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* stmmac_mdio_write
|
||||
* @bus: points to the mii_bus structure
|
||||
* @phyaddr: MII addr reg bits 15-11
|
||||
* @phyreg: MII addr reg bits 10-6
|
||||
* @phydata: phy data
|
||||
* Description: it writes the data into the MII register from within the device.
|
||||
*/
|
||||
static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
|
||||
u16 phydata)
|
||||
{
|
||||
struct net_device *ndev = bus->priv;
|
||||
struct stmmac_priv *priv = netdev_priv(ndev);
|
||||
unsigned long ioaddr = ndev->base_addr;
|
||||
unsigned int mii_address = priv->mac_type->hw.mii.addr;
|
||||
unsigned int mii_data = priv->mac_type->hw.mii.data;
|
||||
|
||||
u16 value =
|
||||
(((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
|
||||
| MII_WRITE;
|
||||
|
||||
value |= MII_BUSY;
|
||||
|
||||
/* Wait until any existing MII operation is complete */
|
||||
do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
|
||||
|
||||
/* Set the MII address register to write */
|
||||
writel(phydata, ioaddr + mii_data);
|
||||
writel(value, ioaddr + mii_address);
|
||||
|
||||
/* Wait until any existing MII operation is complete */
|
||||
do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* stmmac_mdio_reset
|
||||
* @bus: points to the mii_bus structure
|
||||
* Description: reset the MII bus
|
||||
*/
|
||||
static int stmmac_mdio_reset(struct mii_bus *bus)
|
||||
{
|
||||
struct net_device *ndev = bus->priv;
|
||||
struct stmmac_priv *priv = netdev_priv(ndev);
|
||||
unsigned long ioaddr = ndev->base_addr;
|
||||
unsigned int mii_address = priv->mac_type->hw.mii.addr;
|
||||
|
||||
if (priv->phy_reset) {
|
||||
pr_debug("stmmac_mdio_reset: calling phy_reset\n");
|
||||
priv->phy_reset(priv->bsp_priv);
|
||||
}
|
||||
|
||||
/* This is a workaround for problems with the STE101P PHY.
|
||||
* It doesn't complete its reset until at least one clock cycle
|
||||
* on MDC, so perform a dummy mdio read.
|
||||
*/
|
||||
writel(0, ioaddr + mii_address);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* stmmac_mdio_register
|
||||
* @ndev: net device structure
|
||||
* Description: it registers the MII bus
|
||||
*/
|
||||
int stmmac_mdio_register(struct net_device *ndev)
|
||||
{
|
||||
int err = 0;
|
||||
struct mii_bus *new_bus;
|
||||
int *irqlist;
|
||||
struct stmmac_priv *priv = netdev_priv(ndev);
|
||||
int addr, found;
|
||||
|
||||
new_bus = mdiobus_alloc();
|
||||
if (new_bus == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
irqlist = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
|
||||
if (irqlist == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto irqlist_alloc_fail;
|
||||
}
|
||||
|
||||
/* Assign IRQ to phy at address phy_addr */
|
||||
if (priv->phy_addr != -1)
|
||||
irqlist[priv->phy_addr] = priv->phy_irq;
|
||||
|
||||
new_bus->name = "STMMAC MII Bus";
|
||||
new_bus->read = &stmmac_mdio_read;
|
||||
new_bus->write = &stmmac_mdio_write;
|
||||
new_bus->reset = &stmmac_mdio_reset;
|
||||
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
|
||||
new_bus->priv = ndev;
|
||||
new_bus->irq = irqlist;
|
||||
new_bus->phy_mask = priv->phy_mask;
|
||||
new_bus->parent = priv->device;
|
||||
err = mdiobus_register(new_bus);
|
||||
if (err != 0) {
|
||||
pr_err("%s: Cannot register as MDIO bus\n", new_bus->name);
|
||||
goto bus_register_fail;
|
||||
}
|
||||
|
||||
priv->mii = new_bus;
|
||||
|
||||
found = 0;
|
||||
for (addr = 0; addr < 32; addr++) {
|
||||
struct phy_device *phydev = new_bus->phy_map[addr];
|
||||
if (phydev) {
|
||||
if (priv->phy_addr == -1) {
|
||||
priv->phy_addr = addr;
|
||||
phydev->irq = priv->phy_irq;
|
||||
irqlist[addr] = priv->phy_irq;
|
||||
}
|
||||
pr_info("%s: PHY ID %08x at %d IRQ %d (%s)%s\n",
|
||||
ndev->name, phydev->phy_id, addr,
|
||||
phydev->irq, dev_name(&phydev->dev),
|
||||
(addr == priv->phy_addr) ? " active" : "");
|
||||
found = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
pr_warning("%s: No PHY found\n", ndev->name);
|
||||
|
||||
return 0;
|
||||
bus_register_fail:
|
||||
kfree(irqlist);
|
||||
irqlist_alloc_fail:
|
||||
kfree(new_bus);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* stmmac_mdio_unregister
|
||||
* @ndev: net device structure
|
||||
* Description: it unregisters the MII bus
|
||||
*/
|
||||
int stmmac_mdio_unregister(struct net_device *ndev)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(ndev);
|
||||
|
||||
mdiobus_unregister(priv->mii);
|
||||
priv->mii->priv = NULL;
|
||||
kfree(priv->mii);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,140 @@
|
|||
/*******************************************************************************
|
||||
STMMAC external timer support.
|
||||
|
||||
Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
version 2, as published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
The full GNU General Public License is included in this distribution in
|
||||
the file called "COPYING".
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include "stmmac_timer.h"
|
||||
|
||||
static void stmmac_timer_handler(void *data)
|
||||
{
|
||||
struct net_device *dev = (struct net_device *)data;
|
||||
|
||||
stmmac_schedule(dev);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
#define STMMAC_TIMER_MSG(timer, freq) \
|
||||
printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq);
|
||||
|
||||
#if defined(CONFIG_STMMAC_RTC_TIMER)
|
||||
#include <linux/rtc.h>
|
||||
static struct rtc_device *stmmac_rtc;
|
||||
static rtc_task_t stmmac_task;
|
||||
|
||||
static void stmmac_rtc_start(unsigned int new_freq)
|
||||
{
|
||||
rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
|
||||
rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
static void stmmac_rtc_stop(void)
|
||||
{
|
||||
rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
|
||||
{
|
||||
stmmac_task.private_data = dev;
|
||||
stmmac_task.func = stmmac_timer_handler;
|
||||
|
||||
stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
|
||||
if (stmmac_rtc == NULL) {
|
||||
pr_error("open rtc device failed\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
rtc_irq_register(stmmac_rtc, &stmmac_task);
|
||||
|
||||
/* Periodic mode is not supported */
|
||||
if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
|
||||
pr_error("set periodic failed\n");
|
||||
rtc_irq_unregister(stmmac_rtc, &stmmac_task);
|
||||
rtc_class_close(stmmac_rtc);
|
||||
return -1;
|
||||
}
|
||||
|
||||
STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq);
|
||||
|
||||
tm->timer_start = stmmac_rtc_start;
|
||||
tm->timer_stop = stmmac_rtc_stop;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int stmmac_close_ext_timer(void)
|
||||
{
|
||||
rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
|
||||
rtc_irq_unregister(stmmac_rtc, &stmmac_task);
|
||||
rtc_class_close(stmmac_rtc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#elif defined(CONFIG_STMMAC_TMU_TIMER)
|
||||
#include <linux/clk.h>
|
||||
#define TMU_CHANNEL "tmu2_clk"
|
||||
static struct clk *timer_clock;
|
||||
|
||||
static void stmmac_tmu_start(unsigned int new_freq)
|
||||
{
|
||||
clk_set_rate(timer_clock, new_freq);
|
||||
clk_enable(timer_clock);
|
||||
return;
|
||||
}
|
||||
|
||||
static void stmmac_tmu_stop(void)
|
||||
{
|
||||
clk_disable(timer_clock);
|
||||
return;
|
||||
}
|
||||
|
||||
int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
|
||||
{
|
||||
timer_clock = clk_get(NULL, TMU_CHANNEL);
|
||||
|
||||
if (timer_clock == NULL)
|
||||
return -1;
|
||||
|
||||
if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) {
|
||||
timer_clock = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
STMMAC_TIMER_MSG("TMU2", tm->freq);
|
||||
tm->timer_start = stmmac_tmu_start;
|
||||
tm->timer_stop = stmmac_tmu_stop;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int stmmac_close_ext_timer(void)
|
||||
{
|
||||
clk_disable(timer_clock);
|
||||
tmu2_unregister_user();
|
||||
clk_put(timer_clock);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
|
@ -0,0 +1,41 @@
|
|||
/*******************************************************************************
|
||||
STMMAC external timer Header File.
|
||||
|
||||
Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
version 2, as published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
The full GNU General Public License is included in this distribution in
|
||||
the file called "COPYING".
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
struct stmmac_timer {
|
||||
void (*timer_start) (unsigned int new_freq);
|
||||
void (*timer_stop) (void);
|
||||
unsigned int freq;
|
||||
};
|
||||
|
||||
/* Open the HW timer device and return 0 in case of success */
|
||||
int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm);
|
||||
/* Stop the timer and release it */
|
||||
int stmmac_close_ext_timer(void);
|
||||
/* Function used for scheduling task within the stmmac */
|
||||
void stmmac_schedule(struct net_device *dev);
|
||||
|
||||
#if defined(CONFIG_STMMAC_TMU_TIMER)
|
||||
extern int tmu2_register_user(void *fnt, void *data);
|
||||
extern void tmu2_unregister_user(void);
|
||||
#endif
|
|
@ -451,7 +451,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
|
|||
vi->dev->stats.tx_bytes += skb->len;
|
||||
vi->dev->stats.tx_packets++;
|
||||
tot_sgs += skb_vnet_hdr(skb)->num_sg;
|
||||
kfree_skb(skb);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
return tot_sgs;
|
||||
}
|
||||
|
|
|
@ -481,7 +481,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
|
|||
}
|
||||
rq->uncommitted[ring_idx] += num_allocated;
|
||||
|
||||
dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"alloc_rx_buf: %d allocated, next2fill %u, next2comp "
|
||||
"%u, uncommited %u\n", num_allocated, ring->next2fill,
|
||||
ring->next2comp, rq->uncommitted[ring_idx]);
|
||||
|
||||
|
@ -539,7 +540,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
|||
tbi = tq->buf_info + tq->tx_ring.next2fill;
|
||||
tbi->map_type = VMXNET3_MAP_NONE;
|
||||
|
||||
dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
||||
tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
|
||||
ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
|
||||
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
||||
|
@ -572,7 +574,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
|||
gdesc->dword[2] = dw2 | buf_size;
|
||||
gdesc->dword[3] = 0;
|
||||
|
||||
dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
||||
tq->tx_ring.next2fill, gdesc->txd.addr,
|
||||
gdesc->dword[2], gdesc->dword[3]);
|
||||
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
||||
|
@ -600,7 +603,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
|||
gdesc->dword[2] = dw2 | frag->size;
|
||||
gdesc->dword[3] = 0;
|
||||
|
||||
dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"txd[%u]: 0x%llu %u %u\n",
|
||||
tq->tx_ring.next2fill, gdesc->txd.addr,
|
||||
gdesc->dword[2], gdesc->dword[3]);
|
||||
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
||||
|
@ -697,7 +701,8 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|||
tdd = tq->data_ring.base + tq->tx_ring.next2fill;
|
||||
|
||||
memcpy(tdd->data, skb->data, ctx->copy_size);
|
||||
dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"copy %u bytes to dataRing[%u]\n",
|
||||
ctx->copy_size, tq->tx_ring.next2fill);
|
||||
return 1;
|
||||
|
||||
|
@ -808,7 +813,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|||
|
||||
if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
|
||||
tq->stats.tx_ring_full++;
|
||||
dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u"
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"tx queue stopped on %s, next2comp %u"
|
||||
" next2fill %u\n", adapter->netdev->name,
|
||||
tq->tx_ring.next2comp, tq->tx_ring.next2fill);
|
||||
|
||||
|
@ -853,7 +859,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|||
|
||||
/* finally flips the GEN bit of the SOP desc */
|
||||
gdesc->dword[2] ^= VMXNET3_TXD_GEN;
|
||||
dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
|
||||
(u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
|
||||
tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
|
||||
gdesc->dword[3]);
|
||||
|
@ -990,7 +997,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|||
if (unlikely(rcd->len == 0)) {
|
||||
/* Pretend the rx buffer is skipped. */
|
||||
BUG_ON(!(rcd->sop && rcd->eop));
|
||||
dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"rxRing[%u][%u] 0 length\n",
|
||||
ring_idx, idx);
|
||||
goto rcd_done;
|
||||
}
|
||||
|
@ -1314,9 +1322,11 @@ vmxnet3_netpoll(struct net_device *netdev)
|
|||
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
||||
int irq;
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
if (adapter->intr.type == VMXNET3_IT_MSIX)
|
||||
irq = adapter->intr.msix_entries[0].vector;
|
||||
else
|
||||
#endif
|
||||
irq = adapter->pdev->irq;
|
||||
|
||||
disable_irq(irq);
|
||||
|
@ -1330,12 +1340,15 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
|
|||
{
|
||||
int err;
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
if (adapter->intr.type == VMXNET3_IT_MSIX) {
|
||||
/* we only use 1 MSI-X vector */
|
||||
err = request_irq(adapter->intr.msix_entries[0].vector,
|
||||
vmxnet3_intr, 0, adapter->netdev->name,
|
||||
adapter->netdev);
|
||||
} else if (adapter->intr.type == VMXNET3_IT_MSI) {
|
||||
} else
|
||||
#endif
|
||||
if (adapter->intr.type == VMXNET3_IT_MSI) {
|
||||
err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
|
||||
adapter->netdev->name, adapter->netdev);
|
||||
} else {
|
||||
|
@ -1376,6 +1389,7 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
|
|||
adapter->intr.num_intrs <= 0);
|
||||
|
||||
switch (adapter->intr.type) {
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
case VMXNET3_IT_MSIX:
|
||||
{
|
||||
int i;
|
||||
|
@ -1385,6 +1399,7 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
|
|||
adapter->netdev);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
case VMXNET3_IT_MSI:
|
||||
free_irq(adapter->pdev->irq, adapter->netdev);
|
||||
break;
|
||||
|
@ -1676,7 +1691,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
|
|||
int err;
|
||||
u32 ret;
|
||||
|
||||
dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
|
||||
" %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
|
||||
adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
|
||||
adapter->rx_queue.rx_ring[0].size,
|
||||
|
@ -2134,6 +2150,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
|
|||
if (adapter->intr.type == VMXNET3_IT_AUTO) {
|
||||
int err;
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
adapter->intr.msix_entries[0].entry = 0;
|
||||
err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
|
||||
VMXNET3_LINUX_MAX_MSIX_VECT);
|
||||
|
@ -2142,6 +2159,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
|
|||
adapter->intr.type = VMXNET3_IT_MSIX;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
err = pci_enable_msi(adapter->pdev);
|
||||
if (!err) {
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/ethtool.h>
|
||||
|
@ -59,7 +60,6 @@
|
|||
#include <linux/if_vlan.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/inetdevice.h>
|
||||
#include <linux/dst.h>
|
||||
|
||||
#include "vmxnet3_defs.h"
|
||||
|
||||
|
|
|
@ -130,11 +130,11 @@ struct inet_timewait_sock {
|
|||
__u16 tw_num;
|
||||
kmemcheck_bitfield_begin(flags);
|
||||
/* And these are ours. */
|
||||
__u8 tw_ipv6only:1,
|
||||
tw_transparent:1;
|
||||
/* 14 bits hole, try to pack */
|
||||
unsigned int tw_ipv6only : 1,
|
||||
tw_transparent : 1,
|
||||
tw_pad : 14, /* 14 bits hole */
|
||||
tw_ipv6_offset : 16;
|
||||
kmemcheck_bitfield_end(flags);
|
||||
__u16 tw_ipv6_offset;
|
||||
unsigned long tw_ttd;
|
||||
struct inet_bind_bucket *tw_tb;
|
||||
struct hlist_node tw_death_node;
|
||||
|
|
|
@ -92,6 +92,8 @@ static void add_conn(struct work_struct *work)
|
|||
|
||||
dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
|
||||
|
||||
dev_set_drvdata(&conn->dev, conn);
|
||||
|
||||
if (device_add(&conn->dev) < 0) {
|
||||
BT_ERR("Failed to register connection device");
|
||||
return;
|
||||
|
@ -144,8 +146,6 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
|
|||
conn->dev.class = bt_class;
|
||||
conn->dev.parent = &hdev->dev;
|
||||
|
||||
dev_set_drvdata(&conn->dev, conn);
|
||||
|
||||
device_initialize(&conn->dev);
|
||||
|
||||
INIT_WORK(&conn->work_add, add_conn);
|
||||
|
|
|
@ -555,12 +555,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
|
|||
|
||||
conn->feat_mask = 0;
|
||||
|
||||
setup_timer(&conn->info_timer, l2cap_info_timeout,
|
||||
(unsigned long) conn);
|
||||
|
||||
spin_lock_init(&conn->lock);
|
||||
rwlock_init(&conn->chan_list.lock);
|
||||
|
||||
setup_timer(&conn->info_timer, l2cap_info_timeout,
|
||||
(unsigned long) conn);
|
||||
|
||||
conn->disc_reason = 0x13;
|
||||
|
||||
return conn;
|
||||
|
@ -783,6 +783,9 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
|
|||
/* Default config options */
|
||||
pi->conf_len = 0;
|
||||
pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
|
||||
skb_queue_head_init(TX_QUEUE(sk));
|
||||
skb_queue_head_init(SREJ_QUEUE(sk));
|
||||
INIT_LIST_HEAD(SREJ_LIST(sk));
|
||||
}
|
||||
|
||||
static struct proto l2cap_proto = {
|
||||
|
|
|
@ -335,6 +335,7 @@ struct pktgen_dev {
|
|||
__u32 cur_src_mac_offset;
|
||||
__be32 cur_saddr;
|
||||
__be32 cur_daddr;
|
||||
__u16 ip_id;
|
||||
__u16 cur_udp_dst;
|
||||
__u16 cur_udp_src;
|
||||
__u16 cur_queue_map;
|
||||
|
@ -2630,6 +2631,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
|
|||
iph->protocol = IPPROTO_UDP; /* UDP */
|
||||
iph->saddr = pkt_dev->cur_saddr;
|
||||
iph->daddr = pkt_dev->cur_daddr;
|
||||
iph->id = htons(pkt_dev->ip_id);
|
||||
pkt_dev->ip_id++;
|
||||
iph->frag_off = 0;
|
||||
iplen = 20 + 8 + datalen;
|
||||
iph->tot_len = htons(iplen);
|
||||
|
@ -2641,24 +2644,26 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
|
|||
skb->dev = odev;
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
|
||||
if (pkt_dev->nfrags <= 0)
|
||||
if (pkt_dev->nfrags <= 0) {
|
||||
pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
|
||||
else {
|
||||
memset(pgh + 1, 0, datalen - sizeof(struct pktgen_hdr));
|
||||
} else {
|
||||
int frags = pkt_dev->nfrags;
|
||||
int i;
|
||||
int i, len;
|
||||
|
||||
pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
|
||||
|
||||
if (frags > MAX_SKB_FRAGS)
|
||||
frags = MAX_SKB_FRAGS;
|
||||
if (datalen > frags * PAGE_SIZE) {
|
||||
skb_put(skb, datalen - frags * PAGE_SIZE);
|
||||
len = datalen - frags * PAGE_SIZE;
|
||||
memset(skb_put(skb, len), 0, len);
|
||||
datalen = frags * PAGE_SIZE;
|
||||
}
|
||||
|
||||
i = 0;
|
||||
while (datalen > 0) {
|
||||
struct page *page = alloc_pages(GFP_KERNEL, 0);
|
||||
struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
|
||||
skb_shinfo(skb)->frags[i].page = page;
|
||||
skb_shinfo(skb)->frags[i].page_offset = 0;
|
||||
skb_shinfo(skb)->frags[i].size =
|
||||
|
|
|
@ -447,6 +447,28 @@ extern int sysctl_tcp_synack_retries;
|
|||
|
||||
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
|
||||
|
||||
/* Decide when to expire the request and when to resend SYN-ACK */
|
||||
static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
|
||||
const int max_retries,
|
||||
const u8 rskq_defer_accept,
|
||||
int *expire, int *resend)
|
||||
{
|
||||
if (!rskq_defer_accept) {
|
||||
*expire = req->retrans >= thresh;
|
||||
*resend = 1;
|
||||
return;
|
||||
}
|
||||
*expire = req->retrans >= thresh &&
|
||||
(!inet_rsk(req)->acked || req->retrans >= max_retries);
|
||||
/*
|
||||
* Do not resend while waiting for data after ACK,
|
||||
* start to resend on end of deferring period to give
|
||||
* last chance for data or ACK to create established socket.
|
||||
*/
|
||||
*resend = !inet_rsk(req)->acked ||
|
||||
req->retrans >= rskq_defer_accept - 1;
|
||||
}
|
||||
|
||||
void inet_csk_reqsk_queue_prune(struct sock *parent,
|
||||
const unsigned long interval,
|
||||
const unsigned long timeout,
|
||||
|
@ -502,9 +524,15 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
|
|||
reqp=&lopt->syn_table[i];
|
||||
while ((req = *reqp) != NULL) {
|
||||
if (time_after_eq(now, req->expires)) {
|
||||
if ((req->retrans < thresh ||
|
||||
(inet_rsk(req)->acked && req->retrans < max_retries))
|
||||
&& !req->rsk_ops->rtx_syn_ack(parent, req)) {
|
||||
int expire = 0, resend = 0;
|
||||
|
||||
syn_ack_recalc(req, thresh, max_retries,
|
||||
queue->rskq_defer_accept,
|
||||
&expire, &resend);
|
||||
if (!expire &&
|
||||
(!resend ||
|
||||
!req->rsk_ops->rtx_syn_ack(parent, req) ||
|
||||
inet_rsk(req)->acked)) {
|
||||
unsigned long timeo;
|
||||
|
||||
if (req->retrans++ == 0)
|
||||
|
|
|
@ -634,17 +634,16 @@ static int do_ip_setsockopt(struct sock *sk, int level,
|
|||
break;
|
||||
}
|
||||
dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
|
||||
if (dev) {
|
||||
if (dev)
|
||||
mreq.imr_ifindex = dev->ifindex;
|
||||
dev_put(dev);
|
||||
}
|
||||
} else
|
||||
dev = __dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
|
||||
dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
|
||||
|
||||
|
||||
err = -EADDRNOTAVAIL;
|
||||
if (!dev)
|
||||
break;
|
||||
dev_put(dev);
|
||||
|
||||
err = -EINVAL;
|
||||
if (sk->sk_bound_dev_if &&
|
||||
|
|
|
@ -326,6 +326,43 @@ void tcp_enter_memory_pressure(struct sock *sk)
|
|||
|
||||
EXPORT_SYMBOL(tcp_enter_memory_pressure);
|
||||
|
||||
/* Convert seconds to retransmits based on initial and max timeout */
|
||||
static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
|
||||
{
|
||||
u8 res = 0;
|
||||
|
||||
if (seconds > 0) {
|
||||
int period = timeout;
|
||||
|
||||
res = 1;
|
||||
while (seconds > period && res < 255) {
|
||||
res++;
|
||||
timeout <<= 1;
|
||||
if (timeout > rto_max)
|
||||
timeout = rto_max;
|
||||
period += timeout;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Convert retransmits to seconds based on initial and max timeout */
|
||||
static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
|
||||
{
|
||||
int period = 0;
|
||||
|
||||
if (retrans > 0) {
|
||||
period = timeout;
|
||||
while (--retrans) {
|
||||
timeout <<= 1;
|
||||
if (timeout > rto_max)
|
||||
timeout = rto_max;
|
||||
period += timeout;
|
||||
}
|
||||
}
|
||||
return period;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for a TCP event.
|
||||
*
|
||||
|
@ -1405,7 +1442,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
goto found_ok_skb;
|
||||
if (tcp_hdr(skb)->fin)
|
||||
goto found_fin_ok;
|
||||
WARN_ON(!(flags & MSG_PEEK));
|
||||
WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
|
||||
"copied %X seq %X\n", *seq,
|
||||
TCP_SKB_CB(skb)->seq);
|
||||
}
|
||||
|
||||
/* Well, if we have backlog, try to process it now yet. */
|
||||
|
@ -2163,16 +2202,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
|
|||
break;
|
||||
|
||||
case TCP_DEFER_ACCEPT:
|
||||
icsk->icsk_accept_queue.rskq_defer_accept = 0;
|
||||
if (val > 0) {
|
||||
/* Translate value in seconds to number of
|
||||
* retransmits */
|
||||
while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
|
||||
val > ((TCP_TIMEOUT_INIT / HZ) <<
|
||||
icsk->icsk_accept_queue.rskq_defer_accept))
|
||||
icsk->icsk_accept_queue.rskq_defer_accept++;
|
||||
icsk->icsk_accept_queue.rskq_defer_accept++;
|
||||
}
|
||||
/* Translate value in seconds to number of retransmits */
|
||||
icsk->icsk_accept_queue.rskq_defer_accept =
|
||||
secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
|
||||
TCP_RTO_MAX / HZ);
|
||||
break;
|
||||
|
||||
case TCP_WINDOW_CLAMP:
|
||||
|
@ -2353,8 +2386,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
|
|||
val = (val ? : sysctl_tcp_fin_timeout) / HZ;
|
||||
break;
|
||||
case TCP_DEFER_ACCEPT:
|
||||
val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
|
||||
((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
|
||||
val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
|
||||
TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
|
||||
break;
|
||||
case TCP_WINDOW_CLAMP:
|
||||
val = tp->window_clamp;
|
||||
|
|
|
@ -641,10 +641,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
|
|||
if (!(flg & TCP_FLAG_ACK))
|
||||
return NULL;
|
||||
|
||||
/* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
|
||||
if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
|
||||
/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
|
||||
if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
|
||||
TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
|
||||
inet_csk(sk)->icsk_accept_queue.rskq_defer_accept--;
|
||||
inet_rsk(req)->acked = 1;
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -497,13 +497,17 @@ done:
|
|||
goto e_inval;
|
||||
|
||||
if (val) {
|
||||
struct net_device *dev;
|
||||
|
||||
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
|
||||
goto e_inval;
|
||||
|
||||
if (__dev_get_by_index(net, val) == NULL) {
|
||||
dev = dev_get_by_index(net, val);
|
||||
if (!dev) {
|
||||
retv = -ENODEV;
|
||||
break;
|
||||
}
|
||||
dev_put(dev);
|
||||
}
|
||||
np->mcast_oif = val;
|
||||
retv = 0;
|
||||
|
|
|
@ -1074,6 +1074,8 @@ restart:
|
|||
err = -ECONNREFUSED;
|
||||
if (other->sk_state != TCP_LISTEN)
|
||||
goto out_unlock;
|
||||
if (other->sk_shutdown & RCV_SHUTDOWN)
|
||||
goto out_unlock;
|
||||
|
||||
if (unix_recvq_full(other)) {
|
||||
err = -EAGAIN;
|
||||
|
|
Loading…
Reference in New Issue