Merge branch 'netcp-fixes'

Murali Karicheri says:

====================
net: netcp: a set of bug fixes

This patch series fixes a set of issues in netcp driver seen during internal
testing of the driver. While at it, do some clean up as well.

The fixes are tested on K2HK, K2L and K2E EVMs and the boot up logs can be
seen at

 http://pastebin.ubuntu.com/12533100/
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-09-23 14:37:38 -07:00
commit 3c6cb3acee
2 changed files with 55 additions and 66 deletions

View File

@ -291,13 +291,6 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
interface_list) {
struct netcp_intf_modpriv *intf_modpriv;
/* If interface not registered then register now */
if (!netcp_intf->netdev_registered)
ret = netcp_register_interface(netcp_intf);
if (ret)
return -ENODEV;
intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
GFP_KERNEL);
if (!intf_modpriv)
@ -306,6 +299,11 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
interface = of_parse_phandle(netcp_intf->node_interface,
module->name, 0);
if (!interface) {
devm_kfree(dev, intf_modpriv);
continue;
}
intf_modpriv->netcp_priv = netcp_intf;
intf_modpriv->netcp_module = module;
list_add_tail(&intf_modpriv->intf_list,
@ -323,6 +321,18 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
continue;
}
}
/* Now register the interface with netdev */
list_for_each_entry(netcp_intf,
&netcp_device->interface_head,
interface_list) {
/* If interface not registered then register now */
if (!netcp_intf->netdev_registered) {
ret = netcp_register_interface(netcp_intf);
if (ret)
return -ENODEV;
}
}
return 0;
}
@ -357,7 +367,6 @@ int netcp_register_module(struct netcp_module *module)
if (ret < 0)
goto fail;
}
mutex_unlock(&netcp_modules_lock);
return 0;
@ -796,7 +805,7 @@ static void netcp_rxpool_free(struct netcp_intf *netcp)
netcp->rx_pool = NULL;
}
static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
{
struct knav_dma_desc *hwdesc;
unsigned int buf_len, dma_sz;
@ -810,7 +819,7 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
hwdesc = knav_pool_desc_get(netcp->rx_pool);
if (IS_ERR_OR_NULL(hwdesc)) {
dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
return;
return -ENOMEM;
}
if (likely(fdq == 0)) {
@ -862,25 +871,26 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
&dma_sz);
knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
return;
return 0;
fail:
knav_pool_desc_put(netcp->rx_pool, hwdesc);
return -ENOMEM;
}
/* Refill Rx FDQ with descriptors & attached buffers */
static void netcp_rxpool_refill(struct netcp_intf *netcp)
{
u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
int i;
int i, ret = 0;
/* Calculate the FDQ deficit and refill */
for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
fdq_deficit[i] = netcp->rx_queue_depths[i] -
knav_queue_get_count(netcp->rx_fdq[i]);
while (fdq_deficit[i]--)
netcp_allocate_rx_buf(netcp, i);
while (fdq_deficit[i]-- && !ret)
ret = netcp_allocate_rx_buf(netcp, i);
} /* end for fdqs */
}
@ -893,12 +903,12 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget)
packets = netcp_process_rx_packets(netcp, budget);
netcp_rxpool_refill(netcp);
if (packets < budget) {
napi_complete(&netcp->rx_napi);
knav_queue_enable_notify(netcp->rx_queue);
}
netcp_rxpool_refill(netcp);
return packets;
}
@ -1384,7 +1394,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp)
continue;
dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
naddr->addr, naddr->type);
mutex_lock(&netcp_modules_lock);
for_each_module(netcp, priv) {
module = priv->netcp_module;
if (!module->del_addr)
@ -1393,7 +1402,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp)
naddr);
WARN_ON(error);
}
mutex_unlock(&netcp_modules_lock);
netcp_addr_del(netcp, naddr);
}
}
@ -1410,7 +1418,7 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp)
continue;
dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
naddr->addr, naddr->type);
mutex_lock(&netcp_modules_lock);
for_each_module(netcp, priv) {
module = priv->netcp_module;
if (!module->add_addr)
@ -1418,7 +1426,6 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp)
error = module->add_addr(priv->module_priv, naddr);
WARN_ON(error);
}
mutex_unlock(&netcp_modules_lock);
}
}
@ -1432,6 +1439,7 @@ static void netcp_set_rx_mode(struct net_device *ndev)
ndev->flags & IFF_ALLMULTI ||
netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
spin_lock(&netcp->lock);
/* first clear all marks */
netcp_addr_clear_mark(netcp);
@ -1450,6 +1458,7 @@ static void netcp_set_rx_mode(struct net_device *ndev)
/* finally sweep and callout into modules */
netcp_addr_sweep_del(netcp);
netcp_addr_sweep_add(netcp);
spin_unlock(&netcp->lock);
}
static void netcp_free_navigator_resources(struct netcp_intf *netcp)
@ -1614,7 +1623,6 @@ static int netcp_ndo_open(struct net_device *ndev)
goto fail;
}
mutex_lock(&netcp_modules_lock);
for_each_module(netcp, intf_modpriv) {
module = intf_modpriv->netcp_module;
if (module->open) {
@ -1625,7 +1633,6 @@ static int netcp_ndo_open(struct net_device *ndev)
}
}
}
mutex_unlock(&netcp_modules_lock);
napi_enable(&netcp->rx_napi);
napi_enable(&netcp->tx_napi);
@ -1642,7 +1649,6 @@ fail_open:
if (module->close)
module->close(intf_modpriv->module_priv, ndev);
}
mutex_unlock(&netcp_modules_lock);
fail:
netcp_free_navigator_resources(netcp);
@ -1666,7 +1672,6 @@ static int netcp_ndo_stop(struct net_device *ndev)
napi_disable(&netcp->rx_napi);
napi_disable(&netcp->tx_napi);
mutex_lock(&netcp_modules_lock);
for_each_module(netcp, intf_modpriv) {
module = intf_modpriv->netcp_module;
if (module->close) {
@ -1675,7 +1680,6 @@ static int netcp_ndo_stop(struct net_device *ndev)
dev_err(netcp->ndev_dev, "Close failed\n");
}
}
mutex_unlock(&netcp_modules_lock);
/* Recycle Rx descriptors from completion queue */
netcp_empty_rx_queue(netcp);
@ -1703,7 +1707,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev,
if (!netif_running(ndev))
return -EINVAL;
mutex_lock(&netcp_modules_lock);
for_each_module(netcp, intf_modpriv) {
module = intf_modpriv->netcp_module;
if (!module->ioctl)
@ -1719,7 +1722,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev,
}
out:
mutex_unlock(&netcp_modules_lock);
return (ret == 0) ? 0 : err;
}
@ -1754,11 +1756,12 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
struct netcp_intf *netcp = netdev_priv(ndev);
struct netcp_intf_modpriv *intf_modpriv;
struct netcp_module *module;
unsigned long flags;
int err = 0;
dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
mutex_lock(&netcp_modules_lock);
spin_lock_irqsave(&netcp->lock, flags);
for_each_module(netcp, intf_modpriv) {
module = intf_modpriv->netcp_module;
if ((module->add_vid) && (vid != 0)) {
@ -1770,7 +1773,8 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
}
}
}
mutex_unlock(&netcp_modules_lock);
spin_unlock_irqrestore(&netcp->lock, flags);
return err;
}
@ -1779,11 +1783,12 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
struct netcp_intf *netcp = netdev_priv(ndev);
struct netcp_intf_modpriv *intf_modpriv;
struct netcp_module *module;
unsigned long flags;
int err = 0;
dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
mutex_lock(&netcp_modules_lock);
spin_lock_irqsave(&netcp->lock, flags);
for_each_module(netcp, intf_modpriv) {
module = intf_modpriv->netcp_module;
if (module->del_vid) {
@ -1795,7 +1800,7 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
}
}
}
mutex_unlock(&netcp_modules_lock);
spin_unlock_irqrestore(&netcp->lock, flags);
return err;
}
@ -2040,7 +2045,6 @@ static int netcp_probe(struct platform_device *pdev)
struct device_node *child, *interfaces;
struct netcp_device *netcp_device;
struct device *dev = &pdev->dev;
struct netcp_module *module;
int ret;
if (!node) {
@ -2087,14 +2091,6 @@ static int netcp_probe(struct platform_device *pdev)
/* Add the device instance to the list */
list_add_tail(&netcp_device->device_list, &netcp_devices);
/* Probe & attach any modules already registered */
mutex_lock(&netcp_modules_lock);
for_each_netcp_module(module) {
ret = netcp_module_probe(netcp_device, module);
if (ret < 0)
dev_err(dev, "module(%s) probe failed\n", module->name);
}
mutex_unlock(&netcp_modules_lock);
return 0;
probe_quit_interface:

View File

@ -77,6 +77,7 @@
#define GBENU_ALE_OFFSET 0x1e000
#define GBENU_HOST_PORT_NUM 0
#define GBENU_NUM_ALE_ENTRIES 1024
#define GBENU_SGMII_MODULE_SIZE 0x100
/* 10G Ethernet SS defines */
#define XGBE_MODULE_NAME "netcp-xgbe"
@ -149,8 +150,8 @@
#define XGBE_STATS2_MODULE 2
/* s: 0-based slave_port */
#define SGMII_BASE(s) \
(((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
#define SGMII_BASE(d, s) \
(((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
#define GBE_TX_QUEUE 648
#define GBE_TXHOOK_ORDER 0
@ -1997,13 +1998,8 @@ static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
return;
if (!SLAVE_LINK_IS_XGMII(slave)) {
if (gbe_dev->ss_version == GBE_SS_VERSION_14)
sgmii_link_state =
netcp_sgmii_get_port_link(SGMII_BASE(sp), sp);
else
sgmii_link_state =
netcp_sgmii_get_port_link(
gbe_dev->sgmii_port_regs, sp);
sgmii_link_state =
netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
}
phy_link_state = gbe_phy_link_status(slave);
@ -2100,17 +2096,11 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
static void gbe_sgmii_rtreset(struct gbe_priv *priv,
struct gbe_slave *slave, bool set)
{
void __iomem *sgmii_port_regs;
if (SLAVE_LINK_IS_XGMII(slave))
return;
if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
sgmii_port_regs = priv->sgmii_port34_regs;
else
sgmii_port_regs = priv->sgmii_port_regs;
netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
slave->slave_num, set);
}
static void gbe_slave_stop(struct gbe_intf *intf)
@ -2136,17 +2126,12 @@ static void gbe_slave_stop(struct gbe_intf *intf)
static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
{
void __iomem *sgmii_port_regs;
if (SLAVE_LINK_IS_XGMII(slave))
return;
sgmii_port_regs = priv->sgmii_port_regs;
if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
sgmii_port_regs = priv->sgmii_port34_regs;
if (!SLAVE_LINK_IS_XGMII(slave)) {
netcp_sgmii_reset(sgmii_port_regs, slave->slave_num);
netcp_sgmii_config(sgmii_port_regs, slave->slave_num,
slave->link_interface);
}
netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
slave->link_interface);
}
static int gbe_slave_open(struct gbe_intf *gbe_intf)
@ -2997,6 +2982,14 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
gbe_dev->switch_regs = regs;
gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
/* Although sgmii modules are mem mapped to one contiguous
* region on GBENU devices, setting sgmii_port34_regs allows
* consistent code when accessing sgmii api
*/
gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
(2 * GBENU_SGMII_MODULE_SIZE);
gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
for (i = 0; i < (gbe_dev->max_num_ports); i++)