net: mvneta: Make the default queue related for each port

Instead of using the same default queue for all the port. Move it in the
port struct. It will allow have a different default queue for each port.

Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Gregory CLEMENT 2015-12-09 18:23:48 +01:00 committed by David S. Miller
parent 6e71b29908
commit 90b74c013d
1 changed files with 18 additions and 15 deletions

View File

@ -356,6 +356,7 @@ struct mvneta_port {
struct mvneta_tx_queue *txqs; struct mvneta_tx_queue *txqs;
struct net_device *dev; struct net_device *dev;
struct notifier_block cpu_notifier; struct notifier_block cpu_notifier;
int rxq_def;
/* Core clock */ /* Core clock */
struct clk *clk; struct clk *clk;
@ -819,7 +820,7 @@ static void mvneta_port_up(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_TXQ_CMD, q_map); mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
/* Enable all initialized RXQs. */ /* Enable all initialized RXQs. */
mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def)); mvreg_write(pp, MVNETA_RXQ_CMD, BIT(pp->rxq_def));
} }
/* Stop the Ethernet port activity */ /* Stop the Ethernet port activity */
@ -1067,7 +1068,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_ACC_MODE, val); mvreg_write(pp, MVNETA_ACC_MODE, val);
/* Update val of portCfg register accordingly with all RxQueue types */ /* Update val of portCfg register accordingly with all RxQueue types */
val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def); val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
mvreg_write(pp, MVNETA_PORT_CONFIG, val); mvreg_write(pp, MVNETA_PORT_CONFIG, val);
val = 0; val = 0;
@ -2101,19 +2102,19 @@ static void mvneta_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { if (dev->flags & IFF_PROMISC) {
/* Accept all: Multicast + Unicast */ /* Accept all: Multicast + Unicast */
mvneta_rx_unicast_promisc_set(pp, 1); mvneta_rx_unicast_promisc_set(pp, 1);
mvneta_set_ucast_table(pp, rxq_def); mvneta_set_ucast_table(pp, pp->rxq_def);
mvneta_set_special_mcast_table(pp, rxq_def); mvneta_set_special_mcast_table(pp, pp->rxq_def);
mvneta_set_other_mcast_table(pp, rxq_def); mvneta_set_other_mcast_table(pp, pp->rxq_def);
} else { } else {
/* Accept single Unicast */ /* Accept single Unicast */
mvneta_rx_unicast_promisc_set(pp, 0); mvneta_rx_unicast_promisc_set(pp, 0);
mvneta_set_ucast_table(pp, -1); mvneta_set_ucast_table(pp, -1);
mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
if (dev->flags & IFF_ALLMULTI) { if (dev->flags & IFF_ALLMULTI) {
/* Accept all multicast */ /* Accept all multicast */
mvneta_set_special_mcast_table(pp, rxq_def); mvneta_set_special_mcast_table(pp, pp->rxq_def);
mvneta_set_other_mcast_table(pp, rxq_def); mvneta_set_other_mcast_table(pp, pp->rxq_def);
} else { } else {
/* Accept only initialized multicast */ /* Accept only initialized multicast */
mvneta_set_special_mcast_table(pp, -1); mvneta_set_special_mcast_table(pp, -1);
@ -2122,7 +2123,7 @@ static void mvneta_set_rx_mode(struct net_device *dev)
if (!netdev_mc_empty(dev)) { if (!netdev_mc_empty(dev)) {
netdev_for_each_mc_addr(ha, dev) { netdev_for_each_mc_addr(ha, dev) {
mvneta_mcast_addr_set(pp, ha->addr, mvneta_mcast_addr_set(pp, ha->addr,
rxq_def); pp->rxq_def);
} }
} }
} }
@ -2205,7 +2206,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
* RX packets * RX packets
*/ */
cause_rx_tx |= port->cause_rx_tx; cause_rx_tx |= port->cause_rx_tx;
rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]); rx_done = mvneta_rx(pp, budget, &pp->rxqs[pp->rxq_def]);
budget -= rx_done; budget -= rx_done;
if (budget > 0) { if (budget > 0) {
@ -2418,17 +2419,17 @@ static void mvneta_cleanup_txqs(struct mvneta_port *pp)
/* Cleanup all Rx queues */ /* Cleanup all Rx queues */
static void mvneta_cleanup_rxqs(struct mvneta_port *pp) static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
{ {
mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]); mvneta_rxq_deinit(pp, &pp->rxqs[pp->rxq_def]);
} }
/* Init all Rx queues */ /* Init all Rx queues */
static int mvneta_setup_rxqs(struct mvneta_port *pp) static int mvneta_setup_rxqs(struct mvneta_port *pp)
{ {
int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]); int err = mvneta_rxq_init(pp, &pp->rxqs[pp->rxq_def]);
if (err) { if (err) {
netdev_err(pp->dev, "%s: can't create rxq=%d\n", netdev_err(pp->dev, "%s: can't create rxq=%d\n",
__func__, rxq_def); __func__, pp->rxq_def);
mvneta_cleanup_rxqs(pp); mvneta_cleanup_rxqs(pp);
return err; return err;
} }
@ -2634,7 +2635,7 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
mvneta_mac_addr_set(pp, dev->dev_addr, -1); mvneta_mac_addr_set(pp, dev->dev_addr, -1);
/* Set new addr in hw */ /* Set new addr in hw */
mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def); mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
eth_commit_mac_addr_change(dev, addr); eth_commit_mac_addr_change(dev, addr);
return 0; return 0;
@ -2753,7 +2754,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
{ {
int online_cpu_idx, cpu, i = 0; int online_cpu_idx, cpu, i = 0;
online_cpu_idx = rxq_def % num_online_cpus(); online_cpu_idx = pp->rxq_def % num_online_cpus();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (i == online_cpu_idx) if (i == online_cpu_idx)
@ -3363,6 +3364,8 @@ static int mvneta_probe(struct platform_device *pdev)
strcmp(managed, "in-band-status") == 0); strcmp(managed, "in-band-status") == 0);
pp->cpu_notifier.notifier_call = mvneta_percpu_notifier; pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
pp->rxq_def = rxq_def;
pp->clk = devm_clk_get(&pdev->dev, NULL); pp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pp->clk)) { if (IS_ERR(pp->clk)) {
err = PTR_ERR(pp->clk); err = PTR_ERR(pp->clk);