Merge branch 'rfs-lockless-annotate'
Eric Dumazet says: ==================== rfs: annotate lockless accesses rfs runs without locks held, so we should annotate read and writes to shared variables. It should prevent compilers forcing writes in the following situation: if (var != val) var = val; A compiler could indeed simply avoid the conditional: var = val; This matters if var is shared between many cpus. v2: aligns one closing bracket (Simon) adds Fixes: tags (Jakub) ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e3144ff52f
|
@ -768,8 +768,11 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
|
|||
/* We only give a hint, preemption can change CPU under us */
|
||||
val |= raw_smp_processor_id();
|
||||
|
||||
if (table->ents[index] != val)
|
||||
table->ents[index] = val;
|
||||
/* The following WRITE_ONCE() is paired with the READ_ONCE()
|
||||
* here, and another one in get_rps_cpu().
|
||||
*/
|
||||
if (READ_ONCE(table->ents[index]) != val)
|
||||
WRITE_ONCE(table->ents[index], val);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1152,8 +1152,12 @@ static inline void sock_rps_record_flow(const struct sock *sk)
|
|||
* OR an additional socket flag
|
||||
* [1] : sk_state and sk_prot are in the same cache line.
|
||||
*/
|
||||
if (sk->sk_state == TCP_ESTABLISHED)
|
||||
sock_rps_record_flow_hash(sk->sk_rxhash);
|
||||
if (sk->sk_state == TCP_ESTABLISHED) {
|
||||
/* This READ_ONCE() is paired with the WRITE_ONCE()
|
||||
* from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
|
||||
*/
|
||||
sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -1162,15 +1166,19 @@ static inline void sock_rps_save_rxhash(struct sock *sk,
|
|||
const struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_RPS
|
||||
if (unlikely(sk->sk_rxhash != skb->hash))
|
||||
sk->sk_rxhash = skb->hash;
|
||||
/* The following WRITE_ONCE() is paired with the READ_ONCE()
|
||||
* here, and another one in sock_rps_record_flow().
|
||||
*/
|
||||
if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash))
|
||||
WRITE_ONCE(sk->sk_rxhash, skb->hash);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void sock_rps_reset_rxhash(struct sock *sk)
|
||||
{
|
||||
#ifdef CONFIG_RPS
|
||||
sk->sk_rxhash = 0;
|
||||
/* Paired with READ_ONCE() in sock_rps_record_flow() */
|
||||
WRITE_ONCE(sk->sk_rxhash, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -4471,8 +4471,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
|
|||
u32 next_cpu;
|
||||
u32 ident;
|
||||
|
||||
/* First check into global flow table if there is a match */
|
||||
ident = sock_flow_table->ents[hash & sock_flow_table->mask];
|
||||
/* First check into global flow table if there is a match.
|
||||
* This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
|
||||
*/
|
||||
ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
|
||||
if ((ident ^ hash) & ~rps_cpu_mask)
|
||||
goto try_rps;
|
||||
|
||||
|
|
Loading…
Reference in New Issue