net: Enable Tx queue selection based on Rx queues

This patch adds support to pick Tx queue based on the Rx queue(s) map
configuration set by the admin through the sysfs attribute
for each Tx queue. If the user configuration for receive queue(s) map
does not apply, then the Tx queue selection falls back to CPU(s) map
based selection and finally to hashing.

Signed-off-by: Amritha Nambiar <amritha.nambiar@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Amritha Nambiar 2018-06-29 21:27:02 -07:00 committed by David S. Miller
parent c6345ce7d3
commit fc9bab24e9
2 changed files with 55 additions and 17 deletions

View File

@ -1730,6 +1730,16 @@ static inline void sk_rx_queue_clear(struct sock *sk)
#endif #endif
} }
#ifdef CONFIG_XPS
static inline int sk_rx_queue_get(const struct sock *sk)
{
if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
return sk->sk_rx_queue_mapping;
return -1;
}
#endif
static inline void sk_set_socket(struct sock *sk, struct socket *sock) static inline void sk_set_socket(struct sock *sk, struct socket *sock)
{ {
sk_tx_queue_clear(sk); sk_tx_queue_clear(sk);

View File

@ -3459,21 +3459,13 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
} }
#endif /* CONFIG_NET_EGRESS */ #endif /* CONFIG_NET_EGRESS */
static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
{
#ifdef CONFIG_XPS #ifdef CONFIG_XPS
struct xps_dev_maps *dev_maps; static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
struct xps_dev_maps *dev_maps, unsigned int tci)
{
struct xps_map *map; struct xps_map *map;
int queue_index = -1; int queue_index = -1;
if (!static_key_false(&xps_needed))
return -1;
rcu_read_lock();
dev_maps = rcu_dereference(dev->xps_cpus_map);
if (dev_maps) {
unsigned int tci = skb->sender_cpu - 1;
if (dev->num_tc) { if (dev->num_tc) {
tci *= dev->num_tc; tci *= dev->num_tc;
tci += netdev_get_prio_tc_map(dev, skb->priority); tci += netdev_get_prio_tc_map(dev, skb->priority);
@ -3484,11 +3476,47 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
if (map->len == 1) if (map->len == 1)
queue_index = map->queues[0]; queue_index = map->queues[0];
else else
queue_index = map->queues[reciprocal_scale(skb_get_hash(skb), queue_index = map->queues[reciprocal_scale(
map->len)]; skb_get_hash(skb), map->len)];
if (unlikely(queue_index >= dev->real_num_tx_queues)) if (unlikely(queue_index >= dev->real_num_tx_queues))
queue_index = -1; queue_index = -1;
} }
return queue_index;
}
#endif
static int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
{
#ifdef CONFIG_XPS
struct xps_dev_maps *dev_maps;
struct sock *sk = skb->sk;
int queue_index = -1;
if (!static_key_false(&xps_needed))
return -1;
rcu_read_lock();
if (!static_key_false(&xps_rxqs_needed))
goto get_cpus_map;
dev_maps = rcu_dereference(dev->xps_rxqs_map);
if (dev_maps) {
int tci = sk_rx_queue_get(sk);
if (tci >= 0 && tci < dev->num_rx_queues)
queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
tci);
}
get_cpus_map:
if (queue_index < 0) {
dev_maps = rcu_dereference(dev->xps_cpus_map);
if (dev_maps) {
unsigned int tci = skb->sender_cpu - 1;
queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
tci);
}
} }
rcu_read_unlock(); rcu_read_unlock();