net: remove dev_txq_stats_fold()
After recent changes, (percpu stats on vlan/tunnels...), we dont need
anymore per struct netdev_queue tx_bytes/tx_packets/tx_dropped counters.
Only remaining users are ixgbe, sch_teql, gianfar & macvlan :
1) ixgbe can be converted to use existing tx_ring counters.
2) macvlan incremented txq->tx_dropped, it can use the
dev->stats.tx_dropped counter.
3) sch_teql : almost revert ab35cd4b8f
(Use net_device internal stats)
Now we have ndo_get_stats64(), use it, even for "unsigned long"
fields (No need to bring back a struct net_device_stats)
4) gianfar adds a stats structure per tx queue to hold
tx_bytes/tx_packets
This removes a lockdep warning (and possible lockup) in rndis gadget,
calling dev_get_stats() from hard IRQ context.
Ref: http://www.spinics.net/lists/netdev/msg149202.html
Reported-by: Neil Jones <neiljay@gmail.com>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
CC: Jarek Poplawski <jarkao2@gmail.com>
CC: Alexander Duyck <alexander.h.duyck@intel.com>
CC: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
CC: Sandeep Gopalpet <sandeep.kumar@freescale.com>
CC: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
1949e084bf
commit
1ac9ad1394
|
@ -433,7 +433,6 @@ static void gfar_init_mac(struct net_device *ndev)
|
||||||
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
|
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
struct netdev_queue *txq;
|
|
||||||
unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
|
unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
|
||||||
unsigned long tx_packets = 0, tx_bytes = 0;
|
unsigned long tx_packets = 0, tx_bytes = 0;
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
@ -449,9 +448,8 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
|
||||||
dev->stats.rx_dropped = rx_dropped;
|
dev->stats.rx_dropped = rx_dropped;
|
||||||
|
|
||||||
for (i = 0; i < priv->num_tx_queues; i++) {
|
for (i = 0; i < priv->num_tx_queues; i++) {
|
||||||
txq = netdev_get_tx_queue(dev, i);
|
tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
|
||||||
tx_bytes += txq->tx_bytes;
|
tx_packets += priv->tx_queue[i]->stats.tx_packets;
|
||||||
tx_packets += txq->tx_packets;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dev->stats.tx_bytes = tx_bytes;
|
dev->stats.tx_bytes = tx_bytes;
|
||||||
|
@ -2108,8 +2106,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update transmit stats */
|
/* Update transmit stats */
|
||||||
txq->tx_bytes += skb->len;
|
tx_queue->stats.tx_bytes += skb->len;
|
||||||
txq->tx_packets ++;
|
tx_queue->stats.tx_packets++;
|
||||||
|
|
||||||
txbdp = txbdp_start = tx_queue->cur_tx;
|
txbdp = txbdp_start = tx_queue->cur_tx;
|
||||||
lstatus = txbdp->lstatus;
|
lstatus = txbdp->lstatus;
|
||||||
|
|
|
@ -907,12 +907,21 @@ enum {
|
||||||
MQ_MG_MODE
|
MQ_MG_MODE
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Per TX queue stats
|
||||||
|
*/
|
||||||
|
struct tx_q_stats {
|
||||||
|
unsigned long tx_packets;
|
||||||
|
unsigned long tx_bytes;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct gfar_priv_tx_q - per tx queue structure
|
* struct gfar_priv_tx_q - per tx queue structure
|
||||||
* @txlock: per queue tx spin lock
|
* @txlock: per queue tx spin lock
|
||||||
* @tx_skbuff:skb pointers
|
* @tx_skbuff:skb pointers
|
||||||
* @skb_curtx: to be used skb pointer
|
* @skb_curtx: to be used skb pointer
|
||||||
* @skb_dirtytx:the last used skb pointer
|
* @skb_dirtytx:the last used skb pointer
|
||||||
|
* @stats: bytes/packets stats
|
||||||
* @qindex: index of this queue
|
* @qindex: index of this queue
|
||||||
* @dev: back pointer to the dev structure
|
* @dev: back pointer to the dev structure
|
||||||
* @grp: back pointer to the group to which this queue belongs
|
* @grp: back pointer to the group to which this queue belongs
|
||||||
|
@ -934,6 +943,7 @@ struct gfar_priv_tx_q {
|
||||||
struct txbd8 *tx_bd_base;
|
struct txbd8 *tx_bd_base;
|
||||||
struct txbd8 *cur_tx;
|
struct txbd8 *cur_tx;
|
||||||
struct txbd8 *dirty_tx;
|
struct txbd8 *dirty_tx;
|
||||||
|
struct tx_q_stats stats;
|
||||||
struct net_device *dev;
|
struct net_device *dev;
|
||||||
struct gfar_priv_grp *grp;
|
struct gfar_priv_grp *grp;
|
||||||
u16 skb_curtx;
|
u16 skb_curtx;
|
||||||
|
|
|
@ -6667,8 +6667,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
||||||
struct ixgbe_adapter *adapter,
|
struct ixgbe_adapter *adapter,
|
||||||
struct ixgbe_ring *tx_ring)
|
struct ixgbe_ring *tx_ring)
|
||||||
{
|
{
|
||||||
struct net_device *netdev = tx_ring->netdev;
|
|
||||||
struct netdev_queue *txq;
|
|
||||||
unsigned int first;
|
unsigned int first;
|
||||||
unsigned int tx_flags = 0;
|
unsigned int tx_flags = 0;
|
||||||
u8 hdr_len = 0;
|
u8 hdr_len = 0;
|
||||||
|
@ -6765,9 +6763,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
||||||
/* add the ATR filter if ATR is on */
|
/* add the ATR filter if ATR is on */
|
||||||
if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
|
if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
|
||||||
ixgbe_atr(tx_ring, skb, tx_flags, protocol);
|
ixgbe_atr(tx_ring, skb, tx_flags, protocol);
|
||||||
txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
|
|
||||||
txq->tx_bytes += skb->len;
|
|
||||||
txq->tx_packets++;
|
|
||||||
ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
|
ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
|
||||||
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
|
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
|
||||||
|
|
||||||
|
@ -6925,8 +6920,6 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
|
||||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* accurate rx/tx bytes/packets stats */
|
|
||||||
dev_txq_stats_fold(netdev, stats);
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||||
struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
|
struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
|
||||||
|
@ -6943,6 +6936,22 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
|
||||||
stats->rx_bytes += bytes;
|
stats->rx_bytes += bytes;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||||
|
struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
|
||||||
|
u64 bytes, packets;
|
||||||
|
unsigned int start;
|
||||||
|
|
||||||
|
if (ring) {
|
||||||
|
do {
|
||||||
|
start = u64_stats_fetch_begin_bh(&ring->syncp);
|
||||||
|
packets = ring->stats.packets;
|
||||||
|
bytes = ring->stats.bytes;
|
||||||
|
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
|
||||||
|
stats->tx_packets += packets;
|
||||||
|
stats->tx_bytes += bytes;
|
||||||
|
}
|
||||||
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
/* following stats updated by ixgbe_watchdog_task() */
|
/* following stats updated by ixgbe_watchdog_task() */
|
||||||
stats->multicast = netdev->stats.multicast;
|
stats->multicast = netdev->stats.multicast;
|
||||||
|
|
|
@ -585,7 +585,7 @@ err:
|
||||||
rcu_read_lock_bh();
|
rcu_read_lock_bh();
|
||||||
vlan = rcu_dereference(q->vlan);
|
vlan = rcu_dereference(q->vlan);
|
||||||
if (vlan)
|
if (vlan)
|
||||||
netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++;
|
vlan->dev->stats.tx_dropped++;
|
||||||
rcu_read_unlock_bh();
|
rcu_read_unlock_bh();
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -520,9 +520,6 @@ struct netdev_queue {
|
||||||
* please use this field instead of dev->trans_start
|
* please use this field instead of dev->trans_start
|
||||||
*/
|
*/
|
||||||
unsigned long trans_start;
|
unsigned long trans_start;
|
||||||
u64 tx_bytes;
|
|
||||||
u64 tx_packets;
|
|
||||||
u64 tx_dropped;
|
|
||||||
} ____cacheline_aligned_in_smp;
|
} ____cacheline_aligned_in_smp;
|
||||||
|
|
||||||
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
|
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
|
||||||
|
@ -2265,8 +2262,6 @@ extern void dev_load(struct net *net, const char *name);
|
||||||
extern void dev_mcast_init(void);
|
extern void dev_mcast_init(void);
|
||||||
extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
|
extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
|
||||||
struct rtnl_link_stats64 *storage);
|
struct rtnl_link_stats64 *storage);
|
||||||
extern void dev_txq_stats_fold(const struct net_device *dev,
|
|
||||||
struct rtnl_link_stats64 *stats);
|
|
||||||
|
|
||||||
extern int netdev_max_backlog;
|
extern int netdev_max_backlog;
|
||||||
extern int netdev_tstamp_prequeue;
|
extern int netdev_tstamp_prequeue;
|
||||||
|
|
|
@ -5523,34 +5523,6 @@ void netdev_run_todo(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* dev_txq_stats_fold - fold tx_queues stats
|
|
||||||
* @dev: device to get statistics from
|
|
||||||
* @stats: struct rtnl_link_stats64 to hold results
|
|
||||||
*/
|
|
||||||
void dev_txq_stats_fold(const struct net_device *dev,
|
|
||||||
struct rtnl_link_stats64 *stats)
|
|
||||||
{
|
|
||||||
u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
|
|
||||||
unsigned int i;
|
|
||||||
struct netdev_queue *txq;
|
|
||||||
|
|
||||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
|
||||||
txq = netdev_get_tx_queue(dev, i);
|
|
||||||
spin_lock_bh(&txq->_xmit_lock);
|
|
||||||
tx_bytes += txq->tx_bytes;
|
|
||||||
tx_packets += txq->tx_packets;
|
|
||||||
tx_dropped += txq->tx_dropped;
|
|
||||||
spin_unlock_bh(&txq->_xmit_lock);
|
|
||||||
}
|
|
||||||
if (tx_bytes || tx_packets || tx_dropped) {
|
|
||||||
stats->tx_bytes = tx_bytes;
|
|
||||||
stats->tx_packets = tx_packets;
|
|
||||||
stats->tx_dropped = tx_dropped;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(dev_txq_stats_fold);
|
|
||||||
|
|
||||||
/* Convert net_device_stats to rtnl_link_stats64. They have the same
|
/* Convert net_device_stats to rtnl_link_stats64. They have the same
|
||||||
* fields in the same order, with only the type differing.
|
* fields in the same order, with only the type differing.
|
||||||
*/
|
*/
|
||||||
|
@ -5594,7 +5566,6 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
|
||||||
netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
|
netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
|
||||||
} else {
|
} else {
|
||||||
netdev_stats_to_stats64(storage, &dev->stats);
|
netdev_stats_to_stats64(storage, &dev->stats);
|
||||||
dev_txq_stats_fold(dev, storage);
|
|
||||||
}
|
}
|
||||||
storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
|
storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
|
||||||
return storage;
|
return storage;
|
||||||
|
|
|
@ -59,6 +59,10 @@ struct teql_master
|
||||||
struct net_device *dev;
|
struct net_device *dev;
|
||||||
struct Qdisc *slaves;
|
struct Qdisc *slaves;
|
||||||
struct list_head master_list;
|
struct list_head master_list;
|
||||||
|
unsigned long tx_bytes;
|
||||||
|
unsigned long tx_packets;
|
||||||
|
unsigned long tx_errors;
|
||||||
|
unsigned long tx_dropped;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct teql_sched_data
|
struct teql_sched_data
|
||||||
|
@ -274,7 +278,6 @@ static inline int teql_resolve(struct sk_buff *skb,
|
||||||
static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
|
static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct teql_master *master = netdev_priv(dev);
|
struct teql_master *master = netdev_priv(dev);
|
||||||
struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
|
|
||||||
struct Qdisc *start, *q;
|
struct Qdisc *start, *q;
|
||||||
int busy;
|
int busy;
|
||||||
int nores;
|
int nores;
|
||||||
|
@ -314,8 +317,8 @@ restart:
|
||||||
__netif_tx_unlock(slave_txq);
|
__netif_tx_unlock(slave_txq);
|
||||||
master->slaves = NEXT_SLAVE(q);
|
master->slaves = NEXT_SLAVE(q);
|
||||||
netif_wake_queue(dev);
|
netif_wake_queue(dev);
|
||||||
txq->tx_packets++;
|
master->tx_packets++;
|
||||||
txq->tx_bytes += length;
|
master->tx_bytes += length;
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
__netif_tx_unlock(slave_txq);
|
__netif_tx_unlock(slave_txq);
|
||||||
|
@ -342,10 +345,10 @@ restart:
|
||||||
netif_stop_queue(dev);
|
netif_stop_queue(dev);
|
||||||
return NETDEV_TX_BUSY;
|
return NETDEV_TX_BUSY;
|
||||||
}
|
}
|
||||||
dev->stats.tx_errors++;
|
master->tx_errors++;
|
||||||
|
|
||||||
drop:
|
drop:
|
||||||
txq->tx_dropped++;
|
master->tx_dropped++;
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
@ -398,6 +401,18 @@ static int teql_master_close(struct net_device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev,
|
||||||
|
struct rtnl_link_stats64 *stats)
|
||||||
|
{
|
||||||
|
struct teql_master *m = netdev_priv(dev);
|
||||||
|
|
||||||
|
stats->tx_packets = m->tx_packets;
|
||||||
|
stats->tx_bytes = m->tx_bytes;
|
||||||
|
stats->tx_errors = m->tx_errors;
|
||||||
|
stats->tx_dropped = m->tx_dropped;
|
||||||
|
return stats;
|
||||||
|
}
|
||||||
|
|
||||||
static int teql_master_mtu(struct net_device *dev, int new_mtu)
|
static int teql_master_mtu(struct net_device *dev, int new_mtu)
|
||||||
{
|
{
|
||||||
struct teql_master *m = netdev_priv(dev);
|
struct teql_master *m = netdev_priv(dev);
|
||||||
|
@ -422,6 +437,7 @@ static const struct net_device_ops teql_netdev_ops = {
|
||||||
.ndo_open = teql_master_open,
|
.ndo_open = teql_master_open,
|
||||||
.ndo_stop = teql_master_close,
|
.ndo_stop = teql_master_close,
|
||||||
.ndo_start_xmit = teql_master_xmit,
|
.ndo_start_xmit = teql_master_xmit,
|
||||||
|
.ndo_get_stats64 = teql_master_stats64,
|
||||||
.ndo_change_mtu = teql_master_mtu,
|
.ndo_change_mtu = teql_master_mtu,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue