net: tun: remove useless codes of tun_automq_select_queue

Because the function __skb_get_hash_symmetric always returns non-zero.

Signed-off-by: Zhang Yu <zhangyu31@baidu.com>
Signed-off-by: Wang Li <wangli39@baidu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Wang Li 2018-10-09 10:32:04 +08:00 committed by David S. Miller
parent 0c465be183
commit 4b035271fe
1 changed files with 13 additions and 22 deletions

View File

@ -562,12 +562,11 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
e->rps_rxhash = hash; e->rps_rxhash = hash;
} }
/* We try to identify a flow through its rxhash first. The reason that /* We try to identify a flow through its rxhash. The reason that
* we do not check rxq no. is because some cards(e.g 82599), chooses * we do not check rxq no. is because some cards(e.g 82599), chooses
* the rxq based on the txq where the last packet of the flow comes. As * the rxq based on the txq where the last packet of the flow comes. As
* the userspace application move between processors, we may get a * the userspace application move between processors, we may get a
* different rxq no. here. If we could not get rxhash, then we would * different rxq no. here.
* hope the rxq no. may help here.
*/ */
static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
{ {
@ -578,18 +577,13 @@ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
numqueues = READ_ONCE(tun->numqueues); numqueues = READ_ONCE(tun->numqueues);
txq = __skb_get_hash_symmetric(skb); txq = __skb_get_hash_symmetric(skb);
if (txq) { e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); if (e) {
if (e) { tun_flow_save_rps_rxhash(e, txq);
tun_flow_save_rps_rxhash(e, txq); txq = e->queue_index;
txq = e->queue_index; } else {
} else /* use multiply and shift instead of expensive divide */
/* use multiply and shift instead of expensive divide */ txq = ((u64)txq * numqueues) >> 32;
txq = ((u64)txq * numqueues) >> 32;
} else if (likely(skb_rx_queue_recorded(skb))) {
txq = skb_get_rx_queue(skb);
while (unlikely(txq >= numqueues))
txq -= numqueues;
} }
return txq; return txq;
@ -1047,16 +1041,13 @@ static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
/* Select queue was not called for the skbuff, so we extract the /* Select queue was not called for the skbuff, so we extract the
* RPS hash and save it into the flow_table here. * RPS hash and save it into the flow_table here.
*/ */
struct tun_flow_entry *e;
__u32 rxhash; __u32 rxhash;
rxhash = __skb_get_hash_symmetric(skb); rxhash = __skb_get_hash_symmetric(skb);
if (rxhash) { e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
struct tun_flow_entry *e; if (e)
e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], tun_flow_save_rps_rxhash(e, rxhash);
rxhash);
if (e)
tun_flow_save_rps_rxhash(e, rxhash);
}
} }
#endif #endif
} }