tipc: check tsk->group in tipc_wait_for_cond()
tipc_wait_for_cond() drops socket lock before going to sleep, but tsk->group could be freed right after that release_sock(). So we have to re-check and reload tsk->group after it wakes up. After this patch, tipc_wait_for_cond() returns -ERESTARTSYS when tsk->group is NULL, instead of continuing with the assumption of a non-NULL tsk->group. (It looks like 'dsts' should be re-checked and reloaded too, but it is a different bug.) Similar for tipc_send_group_unicast() and tipc_send_group_anycast(). Reported-by: syzbot+10a9db47c3a0e13eb31c@syzkaller.appspotmail.com Fixes:b7d4263551
("tipc: introduce flow control for group broadcast messages") Fixes:ee106d7f94
("tipc: introduce group anycast messaging") Fixes:27bd9ec027
("tipc: introduce group unicast messaging") Cc: Ying Xue <ying.xue@windriver.com> Cc: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com> Acked-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
65cab850f0
commit
143ece654f
|
@ -880,7 +880,6 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
|
|||
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
|
||||
int blks = tsk_blocks(GROUP_H_SIZE + dlen);
|
||||
struct tipc_sock *tsk = tipc_sk(sk);
|
||||
struct tipc_group *grp = tsk->group;
|
||||
struct net *net = sock_net(sk);
|
||||
struct tipc_member *mb = NULL;
|
||||
u32 node, port;
|
||||
|
@ -894,7 +893,9 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
|
|||
/* Block or return if destination link or member is congested */
|
||||
rc = tipc_wait_for_cond(sock, &timeout,
|
||||
!tipc_dest_find(&tsk->cong_links, node, 0) &&
|
||||
!tipc_group_cong(grp, node, port, blks, &mb));
|
||||
tsk->group &&
|
||||
!tipc_group_cong(tsk->group, node, port, blks,
|
||||
&mb));
|
||||
if (unlikely(rc))
|
||||
return rc;
|
||||
|
||||
|
@ -924,7 +925,6 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
|
|||
struct tipc_sock *tsk = tipc_sk(sk);
|
||||
struct list_head *cong_links = &tsk->cong_links;
|
||||
int blks = tsk_blocks(GROUP_H_SIZE + dlen);
|
||||
struct tipc_group *grp = tsk->group;
|
||||
struct tipc_msg *hdr = &tsk->phdr;
|
||||
struct tipc_member *first = NULL;
|
||||
struct tipc_member *mbr = NULL;
|
||||
|
@ -941,9 +941,10 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
|
|||
type = msg_nametype(hdr);
|
||||
inst = dest->addr.name.name.instance;
|
||||
scope = msg_lookup_scope(hdr);
|
||||
exclude = tipc_group_exclude(grp);
|
||||
|
||||
while (++lookups < 4) {
|
||||
exclude = tipc_group_exclude(tsk->group);
|
||||
|
||||
first = NULL;
|
||||
|
||||
/* Look for a non-congested destination member, if any */
|
||||
|
@ -952,7 +953,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
|
|||
&dstcnt, exclude, false))
|
||||
return -EHOSTUNREACH;
|
||||
tipc_dest_pop(&dsts, &node, &port);
|
||||
cong = tipc_group_cong(grp, node, port, blks, &mbr);
|
||||
cong = tipc_group_cong(tsk->group, node, port, blks,
|
||||
&mbr);
|
||||
if (!cong)
|
||||
break;
|
||||
if (mbr == first)
|
||||
|
@ -971,7 +973,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
|
|||
/* Block or return if destination link or member is congested */
|
||||
rc = tipc_wait_for_cond(sock, &timeout,
|
||||
!tipc_dest_find(cong_links, node, 0) &&
|
||||
!tipc_group_cong(grp, node, port,
|
||||
tsk->group &&
|
||||
!tipc_group_cong(tsk->group, node, port,
|
||||
blks, &mbr));
|
||||
if (unlikely(rc))
|
||||
return rc;
|
||||
|
@ -1006,8 +1009,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
|
|||
struct sock *sk = sock->sk;
|
||||
struct net *net = sock_net(sk);
|
||||
struct tipc_sock *tsk = tipc_sk(sk);
|
||||
struct tipc_group *grp = tsk->group;
|
||||
struct tipc_nlist *dsts = tipc_group_dests(grp);
|
||||
struct tipc_nlist *dsts = tipc_group_dests(tsk->group);
|
||||
struct tipc_mc_method *method = &tsk->mc_method;
|
||||
bool ack = method->mandatory && method->rcast;
|
||||
int blks = tsk_blocks(MCAST_H_SIZE + dlen);
|
||||
|
@ -1020,8 +1022,9 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
|
|||
return -EHOSTUNREACH;
|
||||
|
||||
/* Block or return if any destination link or member is congested */
|
||||
rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt &&
|
||||
!tipc_group_bc_cong(grp, blks));
|
||||
rc = tipc_wait_for_cond(sock, &timeout,
|
||||
!tsk->cong_link_cnt && tsk->group &&
|
||||
!tipc_group_bc_cong(tsk->group, blks));
|
||||
if (unlikely(rc))
|
||||
return rc;
|
||||
|
||||
|
@ -1036,7 +1039,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
|
|||
msg_set_hdr_sz(hdr, GROUP_H_SIZE);
|
||||
msg_set_destport(hdr, 0);
|
||||
msg_set_destnode(hdr, 0);
|
||||
msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp));
|
||||
msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
|
||||
|
||||
/* Avoid getting stuck with repeated forced replicasts */
|
||||
msg_set_grp_bc_ack_req(hdr, ack);
|
||||
|
|
Loading…
Reference in New Issue