Revert "net: simplify sock_poll_wait"
This reverts commit dd979b4df8
.
This broke tcp_poll for SMC fallback: An AF_SMC socket establishes an
internal TCP socket for the initial handshake with the remote peer.
Whenever the SMC connection can not be established this TCP socket is
used as a fallback. All socket operations on the SMC socket are then
forwarded to the TCP socket. In case of poll, the file->private_data
pointer references the SMC socket because the TCP socket has no file
assigned. This causes tcp_poll to wait on the wrong socket.
Signed-off-by: Karsten Graul <kgraul@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6b7a02f708
commit
89ab066d42
|
@ -1071,7 +1071,7 @@ __poll_t af_alg_poll(struct file *file, struct socket *sock,
|
||||||
struct af_alg_ctx *ctx = ask->private;
|
struct af_alg_ctx *ctx = ask->private;
|
||||||
__poll_t mask;
|
__poll_t mask;
|
||||||
|
|
||||||
sock_poll_wait(file, wait);
|
sock_poll_wait(file, sock, wait);
|
||||||
mask = 0;
|
mask = 0;
|
||||||
|
|
||||||
if (!ctx->more || ctx->used)
|
if (!ctx->more || ctx->used)
|
||||||
|
|
|
@ -2059,14 +2059,20 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
|
||||||
/**
|
/**
|
||||||
* sock_poll_wait - place memory barrier behind the poll_wait call.
|
* sock_poll_wait - place memory barrier behind the poll_wait call.
|
||||||
* @filp: file
|
* @filp: file
|
||||||
|
* @sock: socket to wait on
|
||||||
* @p: poll_table
|
* @p: poll_table
|
||||||
*
|
*
|
||||||
* See the comments in the wq_has_sleeper function.
|
* See the comments in the wq_has_sleeper function.
|
||||||
|
*
|
||||||
|
* Do not derive sock from filp->private_data here. An SMC socket establishes
|
||||||
|
* an internal TCP socket that is used in the fallback case. All socket
|
||||||
|
* operations on the SMC socket are then forwarded to the TCP socket. In case of
|
||||||
|
* poll, the filp->private_data pointer references the SMC socket because the
|
||||||
|
* TCP socket has no file assigned.
|
||||||
*/
|
*/
|
||||||
static inline void sock_poll_wait(struct file *filp, poll_table *p)
|
static inline void sock_poll_wait(struct file *filp, struct socket *sock,
|
||||||
|
poll_table *p)
|
||||||
{
|
{
|
||||||
struct socket *sock = filp->private_data;
|
|
||||||
|
|
||||||
if (!poll_does_not_wait(p)) {
|
if (!poll_does_not_wait(p)) {
|
||||||
poll_wait(filp, &sock->wq->wait, p);
|
poll_wait(filp, &sock->wq->wait, p);
|
||||||
/* We need to be sure we are in sync with the
|
/* We need to be sure we are in sync with the
|
||||||
|
|
|
@ -653,7 +653,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||||
struct atm_vcc *vcc;
|
struct atm_vcc *vcc;
|
||||||
__poll_t mask;
|
__poll_t mask;
|
||||||
|
|
||||||
sock_poll_wait(file, wait);
|
sock_poll_wait(file, sock, wait);
|
||||||
mask = 0;
|
mask = 0;
|
||||||
|
|
||||||
vcc = ATM_SD(sock);
|
vcc = ATM_SD(sock);
|
||||||
|
|
|
@ -941,7 +941,7 @@ static __poll_t caif_poll(struct file *file,
|
||||||
__poll_t mask;
|
__poll_t mask;
|
||||||
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
|
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
|
||||||
|
|
||||||
sock_poll_wait(file, wait);
|
sock_poll_wait(file, sock, wait);
|
||||||
mask = 0;
|
mask = 0;
|
||||||
|
|
||||||
/* exceptional events? */
|
/* exceptional events? */
|
||||||
|
|
|
@ -837,7 +837,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
|
||||||
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
||||||
__poll_t mask;
|
__poll_t mask;
|
||||||
|
|
||||||
sock_poll_wait(file, wait);
|
sock_poll_wait(file, sock, wait);
|
||||||
mask = 0;
|
mask = 0;
|
||||||
|
|
||||||
/* exceptional events? */
|
/* exceptional events? */
|
||||||
|
|
|
@ -325,7 +325,7 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
|
||||||
__poll_t mask;
|
__poll_t mask;
|
||||||
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
||||||
|
|
||||||
sock_poll_wait(file, wait);
|
sock_poll_wait(file, sock, wait);
|
||||||
if (sk->sk_state == DCCP_LISTEN)
|
if (sk->sk_state == DCCP_LISTEN)
|
||||||
return inet_csk_listen_poll(sk);
|
return inet_csk_listen_poll(sk);
|
||||||
|
|
||||||
|
|
|
@ -507,7 +507,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||||
const struct tcp_sock *tp = tcp_sk(sk);
|
const struct tcp_sock *tp = tcp_sk(sk);
|
||||||
int state;
|
int state;
|
||||||
|
|
||||||
sock_poll_wait(file, wait);
|
sock_poll_wait(file, sock, wait);
|
||||||
|
|
||||||
state = inet_sk_state_load(sk);
|
state = inet_sk_state_load(sk);
|
||||||
if (state == TCP_LISTEN)
|
if (state == TCP_LISTEN)
|
||||||
|
|
|
@ -1504,7 +1504,7 @@ __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
|
||||||
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
||||||
__poll_t mask = 0;
|
__poll_t mask = 0;
|
||||||
|
|
||||||
sock_poll_wait(file, wait);
|
sock_poll_wait(file, sock, wait);
|
||||||
|
|
||||||
if (sk->sk_state == IUCV_LISTEN)
|
if (sk->sk_state == IUCV_LISTEN)
|
||||||
return iucv_accept_poll(sk);
|
return iucv_accept_poll(sk);
|
||||||
|
|
|
@ -556,7 +556,7 @@ static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
|
||||||
|
|
||||||
pr_debug("%p\n", sk);
|
pr_debug("%p\n", sk);
|
||||||
|
|
||||||
sock_poll_wait(file, wait);
|
sock_poll_wait(file, sock, wait);
|
||||||
|
|
||||||
if (sk->sk_state == LLCP_LISTEN)
|
if (sk->sk_state == LLCP_LISTEN)
|
||||||
return llcp_accept_poll(sk);
|
return llcp_accept_poll(sk);
|
||||||
|
|
|
@ -756,7 +756,7 @@ static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
|
||||||
struct rxrpc_sock *rx = rxrpc_sk(sk);
|
struct rxrpc_sock *rx = rxrpc_sk(sk);
|
||||||
__poll_t mask;
|
__poll_t mask;
|
||||||
|
|
||||||
sock_poll_wait(file, wait);
|
sock_poll_wait(file, sock, wait);
|
||||||
mask = 0;
|
mask = 0;
|
||||||
|
|
||||||
/* the socket is readable if there are any messages waiting on the Rx
|
/* the socket is readable if there are any messages waiting on the Rx
|
||||||
|
|
|
@ -1543,7 +1543,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
|
||||||
mask |= EPOLLERR;
|
mask |= EPOLLERR;
|
||||||
} else {
|
} else {
|
||||||
if (sk->sk_state != SMC_CLOSED)
|
if (sk->sk_state != SMC_CLOSED)
|
||||||
sock_poll_wait(file, wait);
|
sock_poll_wait(file, sock, wait);
|
||||||
if (sk->sk_err)
|
if (sk->sk_err)
|
||||||
mask |= EPOLLERR;
|
mask |= EPOLLERR;
|
||||||
if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
|
if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
|
||||||
|
|
|
@ -717,7 +717,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
|
||||||
struct tipc_sock *tsk = tipc_sk(sk);
|
struct tipc_sock *tsk = tipc_sk(sk);
|
||||||
__poll_t revents = 0;
|
__poll_t revents = 0;
|
||||||
|
|
||||||
sock_poll_wait(file, wait);
|
sock_poll_wait(file, sock, wait);
|
||||||
|
|
||||||
if (sk->sk_shutdown & RCV_SHUTDOWN)
|
if (sk->sk_shutdown & RCV_SHUTDOWN)
|
||||||
revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
|
revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
|
||||||
|
|
|
@ -2642,7 +2642,7 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
|
||||||
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
||||||
__poll_t mask;
|
__poll_t mask;
|
||||||
|
|
||||||
sock_poll_wait(file, wait);
|
sock_poll_wait(file, sock, wait);
|
||||||
mask = 0;
|
mask = 0;
|
||||||
|
|
||||||
/* exceptional events? */
|
/* exceptional events? */
|
||||||
|
@ -2679,7 +2679,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
|
||||||
unsigned int writable;
|
unsigned int writable;
|
||||||
__poll_t mask;
|
__poll_t mask;
|
||||||
|
|
||||||
sock_poll_wait(file, wait);
|
sock_poll_wait(file, sock, wait);
|
||||||
mask = 0;
|
mask = 0;
|
||||||
|
|
||||||
/* exceptional events? */
|
/* exceptional events? */
|
||||||
|
|
Loading…
Reference in New Issue