net: use skb_queue_empty_lockless() in busy poll contexts

Busy polling usually runs without locks.
Let's use skb_queue_empty_lockless() instead of skb_queue_empty()

Also uses READ_ONCE() in __skb_try_recv_datagram() to address
a similar potential problem.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2019-10-23 22:44:51 -07:00 committed by David S. Miller
parent 3ef7cf57c7
commit 3f926af3f4
6 changed files with 6 additions and 6 deletions

View File

@ -1702,7 +1702,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
return peekmsg(sk, msg, len, nonblock, flags); return peekmsg(sk, msg, len, nonblock, flags);
if (sk_can_busy_loop(sk) && if (sk_can_busy_loop(sk) &&
skb_queue_empty(&sk->sk_receive_queue) && skb_queue_empty_lockless(&sk->sk_receive_queue) &&
sk->sk_state == TCP_ESTABLISHED) sk->sk_state == TCP_ESTABLISHED)
sk_busy_loop(sk, nonblock); sk_busy_loop(sk, nonblock);

View File

@ -2219,7 +2219,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
struct nvme_tcp_queue *queue = hctx->driver_data; struct nvme_tcp_queue *queue = hctx->driver_data;
struct sock *sk = queue->sock->sk; struct sock *sk = queue->sock->sk;
if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue)) if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
sk_busy_loop(sk, true); sk_busy_loop(sk, true);
nvme_tcp_try_recv(queue); nvme_tcp_try_recv(queue);
return queue->nr_cqe; return queue->nr_cqe;

View File

@ -278,7 +278,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
break; break;
sk_busy_loop(sk, flags & MSG_DONTWAIT); sk_busy_loop(sk, flags & MSG_DONTWAIT);
} while (sk->sk_receive_queue.prev != *last); } while (READ_ONCE(sk->sk_receive_queue.prev) != *last);
error = -EAGAIN; error = -EAGAIN;

View File

@ -3600,7 +3600,7 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
{ {
struct sock *sk = p; struct sock *sk = p;
return !skb_queue_empty(&sk->sk_receive_queue) || return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
sk_busy_loop_timeout(sk, start_time); sk_busy_loop_timeout(sk, start_time);
} }
EXPORT_SYMBOL(sk_busy_loop_end); EXPORT_SYMBOL(sk_busy_loop_end);

View File

@ -1964,7 +1964,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
if (unlikely(flags & MSG_ERRQUEUE)) if (unlikely(flags & MSG_ERRQUEUE))
return inet_recv_error(sk, msg, len, addr_len); return inet_recv_error(sk, msg, len, addr_len);
if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) &&
(sk->sk_state == TCP_ESTABLISHED)) (sk->sk_state == TCP_ESTABLISHED))
sk_busy_loop(sk, nonblock); sk_busy_loop(sk, nonblock);

View File

@ -8871,7 +8871,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
if (sk_can_busy_loop(sk)) { if (sk_can_busy_loop(sk)) {
sk_busy_loop(sk, noblock); sk_busy_loop(sk, noblock);
if (!skb_queue_empty(&sk->sk_receive_queue)) if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
continue; continue;
} }