inet{6}_request_sock: Init ->opt and ->pktopts in the constructor
Wei Yongjun noticed that we may call reqsk_free on request sock objects where the opt fields may not be initialized, fix it by introducing inet_reqsk_alloc where we initialize ->opt to NULL and set ->pktopts to NULL in inet6_reqsk_alloc. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
45d465bc23
commit
ce4a7d0d48
|
@ -396,8 +396,10 @@ static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *op
|
||||||
{
|
{
|
||||||
struct request_sock *req = reqsk_alloc(ops);
|
struct request_sock *req = reqsk_alloc(ops);
|
||||||
|
|
||||||
if (req != NULL)
|
if (req != NULL) {
|
||||||
inet_rsk(req)->inet6_rsk_offset = inet6_rsk_offset(req);
|
inet_rsk(req)->inet6_rsk_offset = inet6_rsk_offset(req);
|
||||||
|
inet6_rsk(req)->pktopts = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return req;
|
return req;
|
||||||
}
|
}
|
||||||
|
|
|
@ -197,4 +197,14 @@ static inline int inet_iif(const struct sk_buff *skb)
|
||||||
return skb->rtable->rt_iif;
|
return skb->rtable->rt_iif;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
|
||||||
|
{
|
||||||
|
struct request_sock *req = reqsk_alloc(ops);
|
||||||
|
|
||||||
|
if (req != NULL)
|
||||||
|
inet_rsk(req)->opt = NULL;
|
||||||
|
|
||||||
|
return req;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _INET_SOCK_H */
|
#endif /* _INET_SOCK_H */
|
||||||
|
|
|
@ -589,7 +589,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||||
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
|
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
|
||||||
goto drop;
|
goto drop;
|
||||||
|
|
||||||
req = reqsk_alloc(&dccp_request_sock_ops);
|
req = inet_reqsk_alloc(&dccp_request_sock_ops);
|
||||||
if (req == NULL)
|
if (req == NULL)
|
||||||
goto drop;
|
goto drop;
|
||||||
|
|
||||||
|
@ -605,7 +605,6 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||||
ireq = inet_rsk(req);
|
ireq = inet_rsk(req);
|
||||||
ireq->loc_addr = ip_hdr(skb)->daddr;
|
ireq->loc_addr = ip_hdr(skb)->daddr;
|
||||||
ireq->rmt_addr = ip_hdr(skb)->saddr;
|
ireq->rmt_addr = ip_hdr(skb)->saddr;
|
||||||
ireq->opt = NULL;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Step 3: Process LISTEN state
|
* Step 3: Process LISTEN state
|
||||||
|
|
|
@ -421,7 +421,6 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||||
ireq6 = inet6_rsk(req);
|
ireq6 = inet6_rsk(req);
|
||||||
ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
|
ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
|
||||||
ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
|
ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
|
||||||
ireq6->pktopts = NULL;
|
|
||||||
|
|
||||||
if (ipv6_opt_accepted(sk, skb) ||
|
if (ipv6_opt_accepted(sk, skb) ||
|
||||||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
|
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
|
||||||
|
|
|
@ -285,7 +285,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
|
||||||
cookie_check_timestamp(&tcp_opt);
|
cookie_check_timestamp(&tcp_opt);
|
||||||
|
|
||||||
ret = NULL;
|
ret = NULL;
|
||||||
req = reqsk_alloc(&tcp_request_sock_ops); /* for safety */
|
req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
|
||||||
if (!req)
|
if (!req)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -301,7 +301,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
|
||||||
ireq->rmt_port = th->source;
|
ireq->rmt_port = th->source;
|
||||||
ireq->loc_addr = ip_hdr(skb)->daddr;
|
ireq->loc_addr = ip_hdr(skb)->daddr;
|
||||||
ireq->rmt_addr = ip_hdr(skb)->saddr;
|
ireq->rmt_addr = ip_hdr(skb)->saddr;
|
||||||
ireq->opt = NULL;
|
|
||||||
ireq->snd_wscale = tcp_opt.snd_wscale;
|
ireq->snd_wscale = tcp_opt.snd_wscale;
|
||||||
ireq->rcv_wscale = tcp_opt.rcv_wscale;
|
ireq->rcv_wscale = tcp_opt.rcv_wscale;
|
||||||
ireq->sack_ok = tcp_opt.sack_ok;
|
ireq->sack_ok = tcp_opt.sack_ok;
|
||||||
|
|
|
@ -1285,7 +1285,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||||
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
|
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
|
||||||
goto drop;
|
goto drop;
|
||||||
|
|
||||||
req = reqsk_alloc(&tcp_request_sock_ops);
|
req = inet_reqsk_alloc(&tcp_request_sock_ops);
|
||||||
if (!req)
|
if (!req)
|
||||||
goto drop;
|
goto drop;
|
||||||
|
|
||||||
|
|
|
@ -198,7 +198,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
|
||||||
ireq = inet_rsk(req);
|
ireq = inet_rsk(req);
|
||||||
ireq6 = inet6_rsk(req);
|
ireq6 = inet6_rsk(req);
|
||||||
treq = tcp_rsk(req);
|
treq = tcp_rsk(req);
|
||||||
ireq6->pktopts = NULL;
|
|
||||||
|
|
||||||
if (security_inet_conn_request(sk, skb, req)) {
|
if (security_inet_conn_request(sk, skb, req)) {
|
||||||
reqsk_free(req);
|
reqsk_free(req);
|
||||||
|
|
|
@ -1299,7 +1299,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||||
treq = inet6_rsk(req);
|
treq = inet6_rsk(req);
|
||||||
ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
|
ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
|
||||||
ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
|
ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
|
||||||
treq->pktopts = NULL;
|
|
||||||
if (!want_cookie)
|
if (!want_cookie)
|
||||||
TCP_ECN_create_request(req, tcp_hdr(skb));
|
TCP_ECN_create_request(req, tcp_hdr(skb));
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue