[NET]: Memory barrier cleanups
I believe all the below memory barriers only matter on SMP so therefore the smp_* variant of the barrier should be used. I'm wondering if the barrier in net/ipv4/inet_timewait_sock.c should be dropped entirely. schedule_work's implementation currently implies a memory barrier and I think sane semantics of schedule_work() should imply a memory barrier, as needed so the caller shouldn't have to worry. It's not quite obvious why the barrier in net/packet/af_packet.c is needed; maybe it should be implied through flush_dcache_page? Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
26db167702
commit
e16aa207cc
|
@ -2130,7 +2130,7 @@ int iw_handler_set_spy(struct net_device * dev,
|
||||||
* The rtnl_lock() make sure we don't race with the other iw_handlers.
|
* The rtnl_lock() make sure we don't race with the other iw_handlers.
|
||||||
* This make sure wireless_spy_update() "see" that the spy list
|
* This make sure wireless_spy_update() "see" that the spy list
|
||||||
* is temporarily disabled. */
|
* is temporarily disabled. */
|
||||||
wmb();
|
smp_wmb();
|
||||||
|
|
||||||
/* Are there are addresses to copy? */
|
/* Are there are addresses to copy? */
|
||||||
if(wrqu->data.length > 0) {
|
if(wrqu->data.length > 0) {
|
||||||
|
@ -2159,7 +2159,7 @@ int iw_handler_set_spy(struct net_device * dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Make sure above is updated before re-enabling */
|
/* Make sure above is updated before re-enabling */
|
||||||
wmb();
|
smp_wmb();
|
||||||
|
|
||||||
/* Enable addresses */
|
/* Enable addresses */
|
||||||
spydata->spy_number = wrqu->data.length;
|
spydata->spy_number = wrqu->data.length;
|
||||||
|
|
|
@ -178,7 +178,7 @@ void inet_twdr_hangman(unsigned long data)
|
||||||
need_timer = 0;
|
need_timer = 0;
|
||||||
if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
|
if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
|
||||||
twdr->thread_slots |= (1 << twdr->slot);
|
twdr->thread_slots |= (1 << twdr->slot);
|
||||||
mb();
|
smp_mb();
|
||||||
schedule_work(&twdr->twkill_work);
|
schedule_work(&twdr->twkill_work);
|
||||||
need_timer = 1;
|
need_timer = 1;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -4235,7 +4235,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
|
||||||
* Change state from SYN-SENT only after copied_seq
|
* Change state from SYN-SENT only after copied_seq
|
||||||
* is initialized. */
|
* is initialized. */
|
||||||
tp->copied_seq = tp->rcv_nxt;
|
tp->copied_seq = tp->rcv_nxt;
|
||||||
mb();
|
smp_mb();
|
||||||
tcp_set_state(sk, TCP_ESTABLISHED);
|
tcp_set_state(sk, TCP_ESTABLISHED);
|
||||||
|
|
||||||
security_inet_conn_established(sk, skb);
|
security_inet_conn_established(sk, skb);
|
||||||
|
@ -4483,7 +4483,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
||||||
case TCP_SYN_RECV:
|
case TCP_SYN_RECV:
|
||||||
if (acceptable) {
|
if (acceptable) {
|
||||||
tp->copied_seq = tp->rcv_nxt;
|
tp->copied_seq = tp->rcv_nxt;
|
||||||
mb();
|
smp_mb();
|
||||||
tcp_set_state(sk, TCP_ESTABLISHED);
|
tcp_set_state(sk, TCP_ESTABLISHED);
|
||||||
sk->sk_state_change(sk);
|
sk->sk_state_change(sk);
|
||||||
|
|
||||||
|
|
|
@ -660,7 +660,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
|
||||||
sll->sll_ifindex = dev->ifindex;
|
sll->sll_ifindex = dev->ifindex;
|
||||||
|
|
||||||
h->tp_status = status;
|
h->tp_status = status;
|
||||||
mb();
|
smp_mb();
|
||||||
|
|
||||||
{
|
{
|
||||||
struct page *p_start, *p_end;
|
struct page *p_start, *p_end;
|
||||||
|
|
Loading…
Reference in New Issue