Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== Netfilter fixes for net The following patchset contains Netfilter fixes for your net tree, they are: 1) Fix OOM that syskaller triggers with ipt_replace.size = -1 and IPT_SO_SET_REPLACE socket option, from Dmitry Vyukov. 2) Check for too long extension name in xt_request_find_{match|target} that result in out-of-bound reads, from Eric Dumazet. 3) Fix memory exhaustion bug in ipset hash:*net* types when adding ranges that look like x.x.x.x-255.255.255.255, from Jozsef Kadlecsik. 4) Fix pointer leaks to userspace in x_tables, from Dmitry Vyukov. 5) Insufficient sanity checks in clusterip_tg_check(), also from Dmitry. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
b9a40729e7
|
@ -1255,11 +1255,8 @@ int ip_setsockopt(struct sock *sk, int level,
|
||||||
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
|
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
|
||||||
optname != IP_IPSEC_POLICY &&
|
optname != IP_IPSEC_POLICY &&
|
||||||
optname != IP_XFRM_POLICY &&
|
optname != IP_XFRM_POLICY &&
|
||||||
!ip_mroute_opt(optname)) {
|
!ip_mroute_opt(optname))
|
||||||
lock_sock(sk);
|
|
||||||
err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
|
err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
|
||||||
release_sock(sk);
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -1284,12 +1281,9 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname,
|
||||||
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
|
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
|
||||||
optname != IP_IPSEC_POLICY &&
|
optname != IP_IPSEC_POLICY &&
|
||||||
optname != IP_XFRM_POLICY &&
|
optname != IP_XFRM_POLICY &&
|
||||||
!ip_mroute_opt(optname)) {
|
!ip_mroute_opt(optname))
|
||||||
lock_sock(sk);
|
err = compat_nf_setsockopt(sk, PF_INET, optname, optval,
|
||||||
err = compat_nf_setsockopt(sk, PF_INET, optname,
|
optlen);
|
||||||
optval, optlen);
|
|
||||||
release_sock(sk);
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -431,7 +431,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
|
||||||
struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
|
struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
|
||||||
const struct ipt_entry *e = par->entryinfo;
|
const struct ipt_entry *e = par->entryinfo;
|
||||||
struct clusterip_config *config;
|
struct clusterip_config *config;
|
||||||
int ret;
|
int ret, i;
|
||||||
|
|
||||||
if (par->nft_compat) {
|
if (par->nft_compat) {
|
||||||
pr_err("cannot use CLUSTERIP target from nftables compat\n");
|
pr_err("cannot use CLUSTERIP target from nftables compat\n");
|
||||||
|
@ -450,8 +450,18 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
|
||||||
pr_info("Please specify destination IP\n");
|
pr_info("Please specify destination IP\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
if (cipinfo->num_local_nodes > ARRAY_SIZE(cipinfo->local_nodes)) {
|
||||||
/* FIXME: further sanity checks */
|
pr_info("bad num_local_nodes %u\n", cipinfo->num_local_nodes);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
for (i = 0; i < cipinfo->num_local_nodes; i++) {
|
||||||
|
if (cipinfo->local_nodes[i] - 1 >=
|
||||||
|
sizeof(config->local_nodes) * 8) {
|
||||||
|
pr_info("bad local_nodes[%d] %u\n",
|
||||||
|
i, cipinfo->local_nodes[i]);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
config = clusterip_config_find_get(par->net, e->ip.dst.s_addr, 1);
|
config = clusterip_config_find_get(par->net, e->ip.dst.s_addr, 1);
|
||||||
if (!config) {
|
if (!config) {
|
||||||
|
|
|
@ -213,15 +213,19 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
|
||||||
struct nf_conntrack_tuple tuple;
|
struct nf_conntrack_tuple tuple;
|
||||||
|
|
||||||
memset(&tuple, 0, sizeof(tuple));
|
memset(&tuple, 0, sizeof(tuple));
|
||||||
|
|
||||||
|
lock_sock(sk);
|
||||||
tuple.src.u3.ip = inet->inet_rcv_saddr;
|
tuple.src.u3.ip = inet->inet_rcv_saddr;
|
||||||
tuple.src.u.tcp.port = inet->inet_sport;
|
tuple.src.u.tcp.port = inet->inet_sport;
|
||||||
tuple.dst.u3.ip = inet->inet_daddr;
|
tuple.dst.u3.ip = inet->inet_daddr;
|
||||||
tuple.dst.u.tcp.port = inet->inet_dport;
|
tuple.dst.u.tcp.port = inet->inet_dport;
|
||||||
tuple.src.l3num = PF_INET;
|
tuple.src.l3num = PF_INET;
|
||||||
tuple.dst.protonum = sk->sk_protocol;
|
tuple.dst.protonum = sk->sk_protocol;
|
||||||
|
release_sock(sk);
|
||||||
|
|
||||||
/* We only do TCP and SCTP at the moment: is there a better way? */
|
/* We only do TCP and SCTP at the moment: is there a better way? */
|
||||||
if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) {
|
if (tuple.dst.protonum != IPPROTO_TCP &&
|
||||||
|
tuple.dst.protonum != IPPROTO_SCTP) {
|
||||||
pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n");
|
pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n");
|
||||||
return -ENOPROTOOPT;
|
return -ENOPROTOOPT;
|
||||||
}
|
}
|
||||||
|
|
|
@ -923,12 +923,8 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname,
|
||||||
#ifdef CONFIG_NETFILTER
|
#ifdef CONFIG_NETFILTER
|
||||||
/* we need to exclude all possible ENOPROTOOPTs except default case */
|
/* we need to exclude all possible ENOPROTOOPTs except default case */
|
||||||
if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
|
if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
|
||||||
optname != IPV6_XFRM_POLICY) {
|
optname != IPV6_XFRM_POLICY)
|
||||||
lock_sock(sk);
|
err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen);
|
||||||
err = nf_setsockopt(sk, PF_INET6, optname, optval,
|
|
||||||
optlen);
|
|
||||||
release_sock(sk);
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -958,12 +954,9 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
|
||||||
#ifdef CONFIG_NETFILTER
|
#ifdef CONFIG_NETFILTER
|
||||||
/* we need to exclude all possible ENOPROTOOPTs except default case */
|
/* we need to exclude all possible ENOPROTOOPTs except default case */
|
||||||
if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
|
if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
|
||||||
optname != IPV6_XFRM_POLICY) {
|
optname != IPV6_XFRM_POLICY)
|
||||||
lock_sock(sk);
|
err = compat_nf_setsockopt(sk, PF_INET6, optname, optval,
|
||||||
err = compat_nf_setsockopt(sk, PF_INET6, optname,
|
optlen);
|
||||||
optval, optlen);
|
|
||||||
release_sock(sk);
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -221,20 +221,27 @@ static const struct nf_hook_ops ipv6_conntrack_ops[] = {
|
||||||
static int
|
static int
|
||||||
ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
|
ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
|
||||||
{
|
{
|
||||||
const struct inet_sock *inet = inet_sk(sk);
|
struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
|
||||||
const struct ipv6_pinfo *inet6 = inet6_sk(sk);
|
const struct ipv6_pinfo *inet6 = inet6_sk(sk);
|
||||||
|
const struct inet_sock *inet = inet_sk(sk);
|
||||||
const struct nf_conntrack_tuple_hash *h;
|
const struct nf_conntrack_tuple_hash *h;
|
||||||
struct sockaddr_in6 sin6;
|
struct sockaddr_in6 sin6;
|
||||||
struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
|
|
||||||
struct nf_conn *ct;
|
struct nf_conn *ct;
|
||||||
|
__be32 flow_label;
|
||||||
|
int bound_dev_if;
|
||||||
|
|
||||||
|
lock_sock(sk);
|
||||||
tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
|
tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
|
||||||
tuple.src.u.tcp.port = inet->inet_sport;
|
tuple.src.u.tcp.port = inet->inet_sport;
|
||||||
tuple.dst.u3.in6 = sk->sk_v6_daddr;
|
tuple.dst.u3.in6 = sk->sk_v6_daddr;
|
||||||
tuple.dst.u.tcp.port = inet->inet_dport;
|
tuple.dst.u.tcp.port = inet->inet_dport;
|
||||||
tuple.dst.protonum = sk->sk_protocol;
|
tuple.dst.protonum = sk->sk_protocol;
|
||||||
|
bound_dev_if = sk->sk_bound_dev_if;
|
||||||
|
flow_label = inet6->flow_label;
|
||||||
|
release_sock(sk);
|
||||||
|
|
||||||
if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP)
|
if (tuple.dst.protonum != IPPROTO_TCP &&
|
||||||
|
tuple.dst.protonum != IPPROTO_SCTP)
|
||||||
return -ENOPROTOOPT;
|
return -ENOPROTOOPT;
|
||||||
|
|
||||||
if (*len < 0 || (unsigned int) *len < sizeof(sin6))
|
if (*len < 0 || (unsigned int) *len < sizeof(sin6))
|
||||||
|
@ -252,14 +259,13 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
|
||||||
|
|
||||||
sin6.sin6_family = AF_INET6;
|
sin6.sin6_family = AF_INET6;
|
||||||
sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
|
sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
|
||||||
sin6.sin6_flowinfo = inet6->flow_label & IPV6_FLOWINFO_MASK;
|
sin6.sin6_flowinfo = flow_label & IPV6_FLOWINFO_MASK;
|
||||||
memcpy(&sin6.sin6_addr,
|
memcpy(&sin6.sin6_addr,
|
||||||
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6,
|
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6,
|
||||||
sizeof(sin6.sin6_addr));
|
sizeof(sin6.sin6_addr));
|
||||||
|
|
||||||
nf_ct_put(ct);
|
nf_ct_put(ct);
|
||||||
sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr,
|
sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr, bound_dev_if);
|
||||||
sk->sk_bound_dev_if);
|
|
||||||
return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0;
|
return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -168,7 +168,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||||
struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 };
|
struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 };
|
||||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||||
u32 ip = 0, ip_to = 0, p = 0, port, port_to;
|
u32 ip = 0, ip_to = 0, p = 0, port, port_to;
|
||||||
u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
|
u32 ip2_from = 0, ip2_to = 0, ip2;
|
||||||
bool with_ports = false;
|
bool with_ports = false;
|
||||||
u8 cidr;
|
u8 cidr;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -269,22 +269,21 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||||
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1);
|
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (retried)
|
if (retried) {
|
||||||
ip = ntohl(h->next.ip);
|
ip = ntohl(h->next.ip);
|
||||||
|
p = ntohs(h->next.port);
|
||||||
|
ip2 = ntohl(h->next.ip2);
|
||||||
|
} else {
|
||||||
|
p = port;
|
||||||
|
ip2 = ip2_from;
|
||||||
|
}
|
||||||
for (; ip <= ip_to; ip++) {
|
for (; ip <= ip_to; ip++) {
|
||||||
e.ip = htonl(ip);
|
e.ip = htonl(ip);
|
||||||
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
|
|
||||||
: port;
|
|
||||||
for (; p <= port_to; p++) {
|
for (; p <= port_to; p++) {
|
||||||
e.port = htons(p);
|
e.port = htons(p);
|
||||||
ip2 = retried &&
|
do {
|
||||||
ip == ntohl(h->next.ip) &&
|
|
||||||
p == ntohs(h->next.port)
|
|
||||||
? ntohl(h->next.ip2) : ip2_from;
|
|
||||||
while (ip2 <= ip2_to) {
|
|
||||||
e.ip2 = htonl(ip2);
|
e.ip2 = htonl(ip2);
|
||||||
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
|
ip2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr);
|
||||||
&cidr);
|
|
||||||
e.cidr = cidr - 1;
|
e.cidr = cidr - 1;
|
||||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||||
|
|
||||||
|
@ -292,9 +291,10 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
ip2 = ip2_last + 1;
|
} while (ip2++ < ip2_to);
|
||||||
}
|
ip2 = ip2_from;
|
||||||
}
|
}
|
||||||
|
p = port;
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -143,7 +143,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||||
struct hash_net4_elem e = { .cidr = HOST_MASK };
|
struct hash_net4_elem e = { .cidr = HOST_MASK };
|
||||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||||
u32 ip = 0, ip_to = 0, last;
|
u32 ip = 0, ip_to = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (tb[IPSET_ATTR_LINENO])
|
if (tb[IPSET_ATTR_LINENO])
|
||||||
|
@ -193,16 +193,15 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||||
}
|
}
|
||||||
if (retried)
|
if (retried)
|
||||||
ip = ntohl(h->next.ip);
|
ip = ntohl(h->next.ip);
|
||||||
while (ip <= ip_to) {
|
do {
|
||||||
e.ip = htonl(ip);
|
e.ip = htonl(ip);
|
||||||
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
|
ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
|
||||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||||
if (ret && !ip_set_eexist(ret, flags))
|
if (ret && !ip_set_eexist(ret, flags))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
ip = last + 1;
|
} while (ip++ < ip_to);
|
||||||
}
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -200,7 +200,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||||
struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
|
struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
|
||||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||||
u32 ip = 0, ip_to = 0, last;
|
u32 ip = 0, ip_to = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (tb[IPSET_ATTR_LINENO])
|
if (tb[IPSET_ATTR_LINENO])
|
||||||
|
@ -255,17 +255,16 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||||
|
|
||||||
if (retried)
|
if (retried)
|
||||||
ip = ntohl(h->next.ip);
|
ip = ntohl(h->next.ip);
|
||||||
while (ip <= ip_to) {
|
do {
|
||||||
e.ip = htonl(ip);
|
e.ip = htonl(ip);
|
||||||
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
|
ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
|
||||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||||
|
|
||||||
if (ret && !ip_set_eexist(ret, flags))
|
if (ret && !ip_set_eexist(ret, flags))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
ip = last + 1;
|
} while (ip++ < ip_to);
|
||||||
}
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -169,8 +169,8 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||||
struct hash_netnet4_elem e = { };
|
struct hash_netnet4_elem e = { };
|
||||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||||
u32 ip = 0, ip_to = 0, last;
|
u32 ip = 0, ip_to = 0;
|
||||||
u32 ip2 = 0, ip2_from = 0, ip2_to = 0, last2;
|
u32 ip2 = 0, ip2_from = 0, ip2_to = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (tb[IPSET_ATTR_LINENO])
|
if (tb[IPSET_ATTR_LINENO])
|
||||||
|
@ -247,27 +247,27 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||||
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
|
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (retried)
|
if (retried) {
|
||||||
ip = ntohl(h->next.ip[0]);
|
ip = ntohl(h->next.ip[0]);
|
||||||
|
ip2 = ntohl(h->next.ip[1]);
|
||||||
|
} else {
|
||||||
|
ip2 = ip2_from;
|
||||||
|
}
|
||||||
|
|
||||||
while (ip <= ip_to) {
|
do {
|
||||||
e.ip[0] = htonl(ip);
|
e.ip[0] = htonl(ip);
|
||||||
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
|
ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
|
||||||
ip2 = (retried &&
|
do {
|
||||||
ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
|
|
||||||
: ip2_from;
|
|
||||||
while (ip2 <= ip2_to) {
|
|
||||||
e.ip[1] = htonl(ip2);
|
e.ip[1] = htonl(ip2);
|
||||||
last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
|
ip2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
|
||||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||||
if (ret && !ip_set_eexist(ret, flags))
|
if (ret && !ip_set_eexist(ret, flags))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
ip2 = last2 + 1;
|
} while (ip2++ < ip2_to);
|
||||||
}
|
ip2 = ip2_from;
|
||||||
ip = last + 1;
|
} while (ip++ < ip_to);
|
||||||
}
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -161,7 +161,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||||
struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
|
struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
|
||||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||||
u32 port, port_to, p = 0, ip = 0, ip_to = 0, last;
|
u32 port, port_to, p = 0, ip = 0, ip_to = 0;
|
||||||
bool with_ports = false;
|
bool with_ports = false;
|
||||||
u8 cidr;
|
u8 cidr;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -239,25 +239,26 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||||
ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
|
ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (retried)
|
if (retried) {
|
||||||
ip = ntohl(h->next.ip);
|
ip = ntohl(h->next.ip);
|
||||||
while (ip <= ip_to) {
|
p = ntohs(h->next.port);
|
||||||
|
} else {
|
||||||
|
p = port;
|
||||||
|
}
|
||||||
|
do {
|
||||||
e.ip = htonl(ip);
|
e.ip = htonl(ip);
|
||||||
last = ip_set_range_to_cidr(ip, ip_to, &cidr);
|
ip = ip_set_range_to_cidr(ip, ip_to, &cidr);
|
||||||
e.cidr = cidr - 1;
|
e.cidr = cidr - 1;
|
||||||
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
|
|
||||||
: port;
|
|
||||||
for (; p <= port_to; p++) {
|
for (; p <= port_to; p++) {
|
||||||
e.port = htons(p);
|
e.port = htons(p);
|
||||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||||
|
|
||||||
if (ret && !ip_set_eexist(ret, flags))
|
if (ret && !ip_set_eexist(ret, flags))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
ip = last + 1;
|
p = port;
|
||||||
}
|
} while (ip++ < ip_to);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -184,8 +184,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||||
struct hash_netportnet4_elem e = { };
|
struct hash_netportnet4_elem e = { };
|
||||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||||
u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to;
|
u32 ip = 0, ip_to = 0, p = 0, port, port_to;
|
||||||
u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
|
u32 ip2_from = 0, ip2_to = 0, ip2;
|
||||||
bool with_ports = false;
|
bool with_ports = false;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -288,33 +288,34 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||||
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
|
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (retried)
|
if (retried) {
|
||||||
ip = ntohl(h->next.ip[0]);
|
ip = ntohl(h->next.ip[0]);
|
||||||
|
p = ntohs(h->next.port);
|
||||||
|
ip2 = ntohl(h->next.ip[1]);
|
||||||
|
} else {
|
||||||
|
p = port;
|
||||||
|
ip2 = ip2_from;
|
||||||
|
}
|
||||||
|
|
||||||
while (ip <= ip_to) {
|
do {
|
||||||
e.ip[0] = htonl(ip);
|
e.ip[0] = htonl(ip);
|
||||||
ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
|
ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
|
||||||
p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
|
|
||||||
: port;
|
|
||||||
for (; p <= port_to; p++) {
|
for (; p <= port_to; p++) {
|
||||||
e.port = htons(p);
|
e.port = htons(p);
|
||||||
ip2 = (retried && ip == ntohl(h->next.ip[0]) &&
|
do {
|
||||||
p == ntohs(h->next.port)) ? ntohl(h->next.ip[1])
|
|
||||||
: ip2_from;
|
|
||||||
while (ip2 <= ip2_to) {
|
|
||||||
e.ip[1] = htonl(ip2);
|
e.ip[1] = htonl(ip2);
|
||||||
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
|
ip2 = ip_set_range_to_cidr(ip2, ip2_to,
|
||||||
&e.cidr[1]);
|
&e.cidr[1]);
|
||||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||||
if (ret && !ip_set_eexist(ret, flags))
|
if (ret && !ip_set_eexist(ret, flags))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
ip2 = ip2_last + 1;
|
} while (ip2++ < ip2_to);
|
||||||
}
|
ip2 = ip2_from;
|
||||||
}
|
}
|
||||||
ip = ip_last + 1;
|
p = port;
|
||||||
}
|
} while (ip++ < ip_to);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,6 @@ MODULE_LICENSE("GPL");
|
||||||
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
|
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
|
||||||
MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
|
MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
|
||||||
|
|
||||||
#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
|
|
||||||
#define XT_PCPU_BLOCK_SIZE 4096
|
#define XT_PCPU_BLOCK_SIZE 4096
|
||||||
|
|
||||||
struct compat_delta {
|
struct compat_delta {
|
||||||
|
@ -210,6 +209,9 @@ xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
|
||||||
{
|
{
|
||||||
struct xt_match *match;
|
struct xt_match *match;
|
||||||
|
|
||||||
|
if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
match = xt_find_match(nfproto, name, revision);
|
match = xt_find_match(nfproto, name, revision);
|
||||||
if (IS_ERR(match)) {
|
if (IS_ERR(match)) {
|
||||||
request_module("%st_%s", xt_prefix[nfproto], name);
|
request_module("%st_%s", xt_prefix[nfproto], name);
|
||||||
|
@ -252,6 +254,9 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
|
||||||
{
|
{
|
||||||
struct xt_target *target;
|
struct xt_target *target;
|
||||||
|
|
||||||
|
if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
target = xt_find_target(af, name, revision);
|
target = xt_find_target(af, name, revision);
|
||||||
if (IS_ERR(target)) {
|
if (IS_ERR(target)) {
|
||||||
request_module("%st_%s", xt_prefix[af], name);
|
request_module("%st_%s", xt_prefix[af], name);
|
||||||
|
@ -1000,7 +1005,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
|
/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
|
||||||
if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
|
if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
info = kvmalloc(sz, GFP_KERNEL);
|
info = kvmalloc(sz, GFP_KERNEL);
|
||||||
|
|
|
@ -252,6 +252,7 @@ static struct xt_target idletimer_tg __read_mostly = {
|
||||||
.family = NFPROTO_UNSPEC,
|
.family = NFPROTO_UNSPEC,
|
||||||
.target = idletimer_tg_target,
|
.target = idletimer_tg_target,
|
||||||
.targetsize = sizeof(struct idletimer_tg_info),
|
.targetsize = sizeof(struct idletimer_tg_info),
|
||||||
|
.usersize = offsetof(struct idletimer_tg_info, timer),
|
||||||
.checkentry = idletimer_tg_checkentry,
|
.checkentry = idletimer_tg_checkentry,
|
||||||
.destroy = idletimer_tg_destroy,
|
.destroy = idletimer_tg_destroy,
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
|
|
|
@ -198,6 +198,7 @@ static struct xt_target led_tg_reg __read_mostly = {
|
||||||
.family = NFPROTO_UNSPEC,
|
.family = NFPROTO_UNSPEC,
|
||||||
.target = led_tg,
|
.target = led_tg,
|
||||||
.targetsize = sizeof(struct xt_led_info),
|
.targetsize = sizeof(struct xt_led_info),
|
||||||
|
.usersize = offsetof(struct xt_led_info, internal_data),
|
||||||
.checkentry = led_tg_check,
|
.checkentry = led_tg_check,
|
||||||
.destroy = led_tg_destroy,
|
.destroy = led_tg_destroy,
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
|
|
|
@ -193,9 +193,8 @@ static struct xt_match limit_mt_reg __read_mostly = {
|
||||||
.compatsize = sizeof(struct compat_xt_rateinfo),
|
.compatsize = sizeof(struct compat_xt_rateinfo),
|
||||||
.compat_from_user = limit_mt_compat_from_user,
|
.compat_from_user = limit_mt_compat_from_user,
|
||||||
.compat_to_user = limit_mt_compat_to_user,
|
.compat_to_user = limit_mt_compat_to_user,
|
||||||
#else
|
|
||||||
.usersize = offsetof(struct xt_rateinfo, prev),
|
|
||||||
#endif
|
#endif
|
||||||
|
.usersize = offsetof(struct xt_rateinfo, prev),
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -62,6 +62,7 @@ static struct xt_match nfacct_mt_reg __read_mostly = {
|
||||||
.match = nfacct_mt,
|
.match = nfacct_mt,
|
||||||
.destroy = nfacct_mt_destroy,
|
.destroy = nfacct_mt_destroy,
|
||||||
.matchsize = sizeof(struct xt_nfacct_match_info),
|
.matchsize = sizeof(struct xt_nfacct_match_info),
|
||||||
|
.usersize = offsetof(struct xt_nfacct_match_info, nfacct),
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -84,6 +84,7 @@ static struct xt_match xt_statistic_mt_reg __read_mostly = {
|
||||||
.checkentry = statistic_mt_check,
|
.checkentry = statistic_mt_check,
|
||||||
.destroy = statistic_mt_destroy,
|
.destroy = statistic_mt_destroy,
|
||||||
.matchsize = sizeof(struct xt_statistic_info),
|
.matchsize = sizeof(struct xt_statistic_info),
|
||||||
|
.usersize = offsetof(struct xt_statistic_info, master),
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue