Merge branch 'net-more-bulk-free-users'

Jesper Dangaard Brouer says:

====================
net: bulk free adjustment and two driver use-cases

I've split out the bulk free adjustments, from the bulk alloc patches,
as I want the adjustment to napi_consume_skb be in same kernel cycle
the API was introduced.

Adjustments based on discussion:
 Subj: "mlx4: use napi_consume_skb API to get bulk free operations"
 http://thread.gmane.org/gmane.linux.network/402503/focus=403386

Patchset based on net-next at commit 3ebeac1d02

V4: more nitpicks from Sergei
V3: spelling fixes from Sergei
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-03-13 22:35:36 -04:00
commit 5d6084142e
5 changed files with 15 additions and 12 deletions

View File

@ -276,7 +276,8 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring, struct mlx4_en_tx_ring *ring,
int index, u8 owner, u64 timestamp) int index, u8 owner, u64 timestamp,
int napi_mode)
{ {
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
@ -347,7 +348,8 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
} }
} }
} }
dev_consume_skb_any(skb); napi_consume_skb(skb, napi_mode);
return tx_info->nr_txbb; return tx_info->nr_txbb;
} }
@ -371,7 +373,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
while (ring->cons != ring->prod) { while (ring->cons != ring->prod) {
ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
ring->cons & ring->size_mask, ring->cons & ring->size_mask,
!!(ring->cons & ring->size), 0); !!(ring->cons & ring->size), 0,
0 /* Non-NAPI caller */);
ring->cons += ring->last_nr_txbb; ring->cons += ring->last_nr_txbb;
cnt++; cnt++;
} }
@ -385,7 +388,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
} }
static bool mlx4_en_process_tx_cq(struct net_device *dev, static bool mlx4_en_process_tx_cq(struct net_device *dev,
struct mlx4_en_cq *cq) struct mlx4_en_cq *cq, int napi_budget)
{ {
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq; struct mlx4_cq *mcq = &cq->mcq;
@ -451,7 +454,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
last_nr_txbb = mlx4_en_free_tx_desc( last_nr_txbb = mlx4_en_free_tx_desc(
priv, ring, ring_index, priv, ring, ring_index,
!!((ring_cons + txbbs_skipped) & !!((ring_cons + txbbs_skipped) &
ring->size), timestamp); ring->size), timestamp, napi_budget);
mlx4_en_stamp_wqe(priv, ring, stamp_index, mlx4_en_stamp_wqe(priv, ring, stamp_index,
!!((ring_cons + txbbs_stamp) & !!((ring_cons + txbbs_stamp) &
@ -511,7 +514,7 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
int clean_complete; int clean_complete;
clean_complete = mlx4_en_process_tx_cq(dev, cq); clean_complete = mlx4_en_process_tx_cq(dev, cq, budget);
if (!clean_complete) if (!clean_complete)
return budget; return budget;

View File

@ -629,7 +629,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
void mlx5e_completion_event(struct mlx5_core_cq *mcq); void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget); int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);

View File

@ -339,7 +339,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
return mlx5e_sq_xmit(sq, skb); return mlx5e_sq_xmit(sq, skb);
} }
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{ {
struct mlx5e_sq *sq; struct mlx5e_sq *sq;
u32 dma_fifo_cc; u32 dma_fifo_cc;
@ -411,7 +411,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
npkts++; npkts++;
nbytes += wi->num_bytes; nbytes += wi->num_bytes;
sqcc += wi->num_wqebbs; sqcc += wi->num_wqebbs;
dev_kfree_skb(skb); napi_consume_skb(skb, napi_budget);
} while (!last_wqe); } while (!last_wqe);
} }

View File

@ -60,7 +60,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags); clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
for (i = 0; i < c->num_tc; i++) for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq); busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget; busy |= work_done == budget;

View File

@ -801,9 +801,9 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
if (unlikely(!skb)) if (unlikely(!skb))
return; return;
/* if budget is 0 assume netpoll w/ IRQs disabled */ /* Zero budget indicate non-NAPI context called us, like netpoll */
if (unlikely(!budget)) { if (unlikely(!budget)) {
dev_consume_skb_irq(skb); dev_consume_skb_any(skb);
return; return;
} }