Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== Netfilter/IPVS fixes for net The following patchset contains Netfilter/IPVS fixes for your net tree, they are: 1) Fix SIP conntrack with phones sending session descriptions for different media types but same port numbers, from Florian Westphal. 2) Fix incorrect rtnl_lock mutex logic from IPVS sync thread, from Julian Anastasov. 3) Skip compat array allocation in ebtables if there is no entries, also from Florian. 4) Do not lose left/right bits when shifting marks from xt_connmark, from Jack Ma. 5) Silence false positive memleak in conntrack extensions, from Cong Wang. 6) Fix CONFIG_NF_REJECT_IPV6=m link problems, from Arnd Bergmann. 7) Cannot kfree rule that is already in list in nf_tables, switch order so this error handling is not required, from Florian Westphal. 8) Release set name in error path, from Florian. 9) include kmemleak.h in nf_conntrack_extend.c, from Stepheh Rothwell. 10) NAT chain and extensions depend on NF_TABLES. 11) Out of bound access when renaming chains, from Taehee Yoo. 12) Incorrect casting in xt_connmark leads to wrong bitshifting. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
77621f024d
|
@ -1825,13 +1825,14 @@ static int compat_table_info(const struct ebt_table_info *info,
|
|||
{
|
||||
unsigned int size = info->entries_size;
|
||||
const void *entries = info->entries;
|
||||
int ret;
|
||||
|
||||
newinfo->entries_size = size;
|
||||
|
||||
ret = xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (info->nentries) {
|
||||
int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
|
||||
info->nentries);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
|
||||
entries, newinfo);
|
||||
|
|
|
@ -48,6 +48,34 @@ config NFT_CHAIN_ROUTE_IPV6
|
|||
fields such as the source, destination, flowlabel, hop-limit and
|
||||
the packet mark.
|
||||
|
||||
if NF_NAT_IPV6
|
||||
|
||||
config NFT_CHAIN_NAT_IPV6
|
||||
tristate "IPv6 nf_tables nat chain support"
|
||||
help
|
||||
This option enables the "nat" chain for IPv6 in nf_tables. This
|
||||
chain type is used to perform Network Address Translation (NAT)
|
||||
packet transformations such as the source, destination address and
|
||||
source and destination ports.
|
||||
|
||||
config NFT_MASQ_IPV6
|
||||
tristate "IPv6 masquerade support for nf_tables"
|
||||
depends on NFT_MASQ
|
||||
select NF_NAT_MASQUERADE_IPV6
|
||||
help
|
||||
This is the expression that provides IPv4 masquerading support for
|
||||
nf_tables.
|
||||
|
||||
config NFT_REDIR_IPV6
|
||||
tristate "IPv6 redirect support for nf_tables"
|
||||
depends on NFT_REDIR
|
||||
select NF_NAT_REDIRECT
|
||||
help
|
||||
This is the expression that provides IPv4 redirect support for
|
||||
nf_tables.
|
||||
|
||||
endif # NF_NAT_IPV6
|
||||
|
||||
config NFT_REJECT_IPV6
|
||||
select NF_REJECT_IPV6
|
||||
default NFT_REJECT
|
||||
|
@ -107,39 +135,12 @@ config NF_NAT_IPV6
|
|||
|
||||
if NF_NAT_IPV6
|
||||
|
||||
config NFT_CHAIN_NAT_IPV6
|
||||
depends on NF_TABLES_IPV6
|
||||
tristate "IPv6 nf_tables nat chain support"
|
||||
help
|
||||
This option enables the "nat" chain for IPv6 in nf_tables. This
|
||||
chain type is used to perform Network Address Translation (NAT)
|
||||
packet transformations such as the source, destination address and
|
||||
source and destination ports.
|
||||
|
||||
config NF_NAT_MASQUERADE_IPV6
|
||||
tristate "IPv6 masquerade support"
|
||||
help
|
||||
This is the kernel functionality to provide NAT in the masquerade
|
||||
flavour (automatic source address selection) for IPv6.
|
||||
|
||||
config NFT_MASQ_IPV6
|
||||
tristate "IPv6 masquerade support for nf_tables"
|
||||
depends on NF_TABLES_IPV6
|
||||
depends on NFT_MASQ
|
||||
select NF_NAT_MASQUERADE_IPV6
|
||||
help
|
||||
This is the expression that provides IPv4 masquerading support for
|
||||
nf_tables.
|
||||
|
||||
config NFT_REDIR_IPV6
|
||||
tristate "IPv6 redirect support for nf_tables"
|
||||
depends on NF_TABLES_IPV6
|
||||
depends on NFT_REDIR
|
||||
select NF_NAT_REDIRECT
|
||||
help
|
||||
This is the expression that provides IPv4 redirect support for
|
||||
nf_tables.
|
||||
|
||||
endif # NF_NAT_IPV6
|
||||
|
||||
config IP6_NF_IPTABLES
|
||||
|
|
|
@ -594,6 +594,7 @@ config NFT_QUOTA
|
|||
config NFT_REJECT
|
||||
default m if NETFILTER_ADVANCED=n
|
||||
tristate "Netfilter nf_tables reject support"
|
||||
depends on !NF_TABLES_INET || (IPV6!=m || m)
|
||||
help
|
||||
This option adds the "reject" expression that you can use to
|
||||
explicitly deny and notify via TCP reset/ICMP informational errors
|
||||
|
|
|
@ -2384,11 +2384,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
|
|||
strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
|
||||
sizeof(cfg.mcast_ifn));
|
||||
cfg.syncid = dm->syncid;
|
||||
rtnl_lock();
|
||||
mutex_lock(&ipvs->sync_mutex);
|
||||
ret = start_sync_thread(ipvs, &cfg, dm->state);
|
||||
mutex_unlock(&ipvs->sync_mutex);
|
||||
rtnl_unlock();
|
||||
} else {
|
||||
mutex_lock(&ipvs->sync_mutex);
|
||||
ret = stop_sync_thread(ipvs, dm->state);
|
||||
|
@ -3481,12 +3477,8 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
|
|||
if (ipvs->mixed_address_family_dests > 0)
|
||||
return -EINVAL;
|
||||
|
||||
rtnl_lock();
|
||||
mutex_lock(&ipvs->sync_mutex);
|
||||
ret = start_sync_thread(ipvs, &c,
|
||||
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
|
||||
mutex_unlock(&ipvs->sync_mutex);
|
||||
rtnl_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
#include <linux/kthread.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched/signal.h>
|
||||
|
||||
#include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */
|
||||
|
||||
|
@ -1360,15 +1361,9 @@ static void set_mcast_pmtudisc(struct sock *sk, int val)
|
|||
/*
|
||||
* Specifiy default interface for outgoing multicasts
|
||||
*/
|
||||
static int set_mcast_if(struct sock *sk, char *ifname)
|
||||
static int set_mcast_if(struct sock *sk, struct net_device *dev)
|
||||
{
|
||||
struct net_device *dev;
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct net *net = sock_net(sk);
|
||||
|
||||
dev = __dev_get_by_name(net, ifname);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
|
||||
return -EINVAL;
|
||||
|
@ -1396,19 +1391,14 @@ static int set_mcast_if(struct sock *sk, char *ifname)
|
|||
* in the in_addr structure passed in as a parameter.
|
||||
*/
|
||||
static int
|
||||
join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
|
||||
join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev)
|
||||
{
|
||||
struct net *net = sock_net(sk);
|
||||
struct ip_mreqn mreq;
|
||||
struct net_device *dev;
|
||||
int ret;
|
||||
|
||||
memset(&mreq, 0, sizeof(mreq));
|
||||
memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
|
||||
|
||||
dev = __dev_get_by_name(net, ifname);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1423,15 +1413,10 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
|
|||
|
||||
#ifdef CONFIG_IP_VS_IPV6
|
||||
static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
|
||||
char *ifname)
|
||||
struct net_device *dev)
|
||||
{
|
||||
struct net *net = sock_net(sk);
|
||||
struct net_device *dev;
|
||||
int ret;
|
||||
|
||||
dev = __dev_get_by_name(net, ifname);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1443,24 +1428,18 @@ static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
|
|||
}
|
||||
#endif
|
||||
|
||||
static int bind_mcastif_addr(struct socket *sock, char *ifname)
|
||||
static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
|
||||
{
|
||||
struct net *net = sock_net(sock->sk);
|
||||
struct net_device *dev;
|
||||
__be32 addr;
|
||||
struct sockaddr_in sin;
|
||||
|
||||
dev = __dev_get_by_name(net, ifname);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
|
||||
if (!addr)
|
||||
pr_err("You probably need to specify IP address on "
|
||||
"multicast interface.\n");
|
||||
|
||||
IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
|
||||
ifname, &addr);
|
||||
dev->name, &addr);
|
||||
|
||||
/* Now bind the socket with the address of multicast interface */
|
||||
sin.sin_family = AF_INET;
|
||||
|
@ -1493,7 +1472,8 @@ static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
|
|||
/*
|
||||
* Set up sending multicast socket over UDP
|
||||
*/
|
||||
static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
|
||||
static int make_send_sock(struct netns_ipvs *ipvs, int id,
|
||||
struct net_device *dev, struct socket **sock_ret)
|
||||
{
|
||||
/* multicast addr */
|
||||
union ipvs_sockaddr mcast_addr;
|
||||
|
@ -1505,9 +1485,10 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
|
|||
IPPROTO_UDP, &sock);
|
||||
if (result < 0) {
|
||||
pr_err("Error during creation of socket; terminating\n");
|
||||
return ERR_PTR(result);
|
||||
goto error;
|
||||
}
|
||||
result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn);
|
||||
*sock_ret = sock;
|
||||
result = set_mcast_if(sock->sk, dev);
|
||||
if (result < 0) {
|
||||
pr_err("Error setting outbound mcast interface\n");
|
||||
goto error;
|
||||
|
@ -1522,7 +1503,7 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
|
|||
set_sock_size(sock->sk, 1, result);
|
||||
|
||||
if (AF_INET == ipvs->mcfg.mcast_af)
|
||||
result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn);
|
||||
result = bind_mcastif_addr(sock, dev);
|
||||
else
|
||||
result = 0;
|
||||
if (result < 0) {
|
||||
|
@ -1538,19 +1519,18 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
|
|||
goto error;
|
||||
}
|
||||
|
||||
return sock;
|
||||
return 0;
|
||||
|
||||
error:
|
||||
sock_release(sock);
|
||||
return ERR_PTR(result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Set up receiving multicast socket over UDP
|
||||
*/
|
||||
static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
|
||||
int ifindex)
|
||||
static int make_receive_sock(struct netns_ipvs *ipvs, int id,
|
||||
struct net_device *dev, struct socket **sock_ret)
|
||||
{
|
||||
/* multicast addr */
|
||||
union ipvs_sockaddr mcast_addr;
|
||||
|
@ -1562,8 +1542,9 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
|
|||
IPPROTO_UDP, &sock);
|
||||
if (result < 0) {
|
||||
pr_err("Error during creation of socket; terminating\n");
|
||||
return ERR_PTR(result);
|
||||
goto error;
|
||||
}
|
||||
*sock_ret = sock;
|
||||
/* it is equivalent to the REUSEADDR option in user-space */
|
||||
sock->sk->sk_reuse = SK_CAN_REUSE;
|
||||
result = sysctl_sync_sock_size(ipvs);
|
||||
|
@ -1571,7 +1552,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
|
|||
set_sock_size(sock->sk, 0, result);
|
||||
|
||||
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
|
||||
sock->sk->sk_bound_dev_if = ifindex;
|
||||
sock->sk->sk_bound_dev_if = dev->ifindex;
|
||||
result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
|
||||
if (result < 0) {
|
||||
pr_err("Error binding to the multicast addr\n");
|
||||
|
@ -1582,21 +1563,20 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
|
|||
#ifdef CONFIG_IP_VS_IPV6
|
||||
if (ipvs->bcfg.mcast_af == AF_INET6)
|
||||
result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
|
||||
ipvs->bcfg.mcast_ifn);
|
||||
dev);
|
||||
else
|
||||
#endif
|
||||
result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
|
||||
ipvs->bcfg.mcast_ifn);
|
||||
dev);
|
||||
if (result < 0) {
|
||||
pr_err("Error joining to the multicast group\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
return sock;
|
||||
return 0;
|
||||
|
||||
error:
|
||||
sock_release(sock);
|
||||
return ERR_PTR(result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1778,13 +1758,12 @@ static int sync_thread_backup(void *data)
|
|||
int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
||||
int state)
|
||||
{
|
||||
struct ip_vs_sync_thread_data *tinfo;
|
||||
struct ip_vs_sync_thread_data *tinfo = NULL;
|
||||
struct task_struct **array = NULL, *task;
|
||||
struct socket *sock;
|
||||
struct net_device *dev;
|
||||
char *name;
|
||||
int (*threadfn)(void *data);
|
||||
int id, count, hlen;
|
||||
int id = 0, count, hlen;
|
||||
int result = -ENOMEM;
|
||||
u16 mtu, min_mtu;
|
||||
|
||||
|
@ -1792,6 +1771,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
|||
IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
|
||||
sizeof(struct ip_vs_sync_conn_v0));
|
||||
|
||||
/* Do not hold one mutex and then to block on another */
|
||||
for (;;) {
|
||||
rtnl_lock();
|
||||
if (mutex_trylock(&ipvs->sync_mutex))
|
||||
break;
|
||||
rtnl_unlock();
|
||||
mutex_lock(&ipvs->sync_mutex);
|
||||
if (rtnl_trylock())
|
||||
break;
|
||||
mutex_unlock(&ipvs->sync_mutex);
|
||||
}
|
||||
|
||||
if (!ipvs->sync_state) {
|
||||
count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
|
||||
ipvs->threads_mask = count - 1;
|
||||
|
@ -1810,7 +1801,8 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
|||
dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
|
||||
if (!dev) {
|
||||
pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
|
||||
return -ENODEV;
|
||||
result = -ENODEV;
|
||||
goto out_early;
|
||||
}
|
||||
hlen = (AF_INET6 == c->mcast_af) ?
|
||||
sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
|
||||
|
@ -1827,26 +1819,30 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
|||
c->sync_maxlen = mtu - hlen;
|
||||
|
||||
if (state == IP_VS_STATE_MASTER) {
|
||||
result = -EEXIST;
|
||||
if (ipvs->ms)
|
||||
return -EEXIST;
|
||||
goto out_early;
|
||||
|
||||
ipvs->mcfg = *c;
|
||||
name = "ipvs-m:%d:%d";
|
||||
threadfn = sync_thread_master;
|
||||
} else if (state == IP_VS_STATE_BACKUP) {
|
||||
result = -EEXIST;
|
||||
if (ipvs->backup_threads)
|
||||
return -EEXIST;
|
||||
goto out_early;
|
||||
|
||||
ipvs->bcfg = *c;
|
||||
name = "ipvs-b:%d:%d";
|
||||
threadfn = sync_thread_backup;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
result = -EINVAL;
|
||||
goto out_early;
|
||||
}
|
||||
|
||||
if (state == IP_VS_STATE_MASTER) {
|
||||
struct ipvs_master_sync_state *ms;
|
||||
|
||||
result = -ENOMEM;
|
||||
ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL);
|
||||
if (!ipvs->ms)
|
||||
goto out;
|
||||
|
@ -1862,39 +1858,38 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
|||
} else {
|
||||
array = kcalloc(count, sizeof(struct task_struct *),
|
||||
GFP_KERNEL);
|
||||
result = -ENOMEM;
|
||||
if (!array)
|
||||
goto out;
|
||||
}
|
||||
|
||||
tinfo = NULL;
|
||||
for (id = 0; id < count; id++) {
|
||||
if (state == IP_VS_STATE_MASTER)
|
||||
sock = make_send_sock(ipvs, id);
|
||||
else
|
||||
sock = make_receive_sock(ipvs, id, dev->ifindex);
|
||||
if (IS_ERR(sock)) {
|
||||
result = PTR_ERR(sock);
|
||||
goto outtinfo;
|
||||
}
|
||||
result = -ENOMEM;
|
||||
tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
|
||||
if (!tinfo)
|
||||
goto outsocket;
|
||||
goto out;
|
||||
tinfo->ipvs = ipvs;
|
||||
tinfo->sock = sock;
|
||||
tinfo->sock = NULL;
|
||||
if (state == IP_VS_STATE_BACKUP) {
|
||||
tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
|
||||
GFP_KERNEL);
|
||||
if (!tinfo->buf)
|
||||
goto outtinfo;
|
||||
goto out;
|
||||
} else {
|
||||
tinfo->buf = NULL;
|
||||
}
|
||||
tinfo->id = id;
|
||||
if (state == IP_VS_STATE_MASTER)
|
||||
result = make_send_sock(ipvs, id, dev, &tinfo->sock);
|
||||
else
|
||||
result = make_receive_sock(ipvs, id, dev, &tinfo->sock);
|
||||
if (result < 0)
|
||||
goto out;
|
||||
|
||||
task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
|
||||
if (IS_ERR(task)) {
|
||||
result = PTR_ERR(task);
|
||||
goto outtinfo;
|
||||
goto out;
|
||||
}
|
||||
tinfo = NULL;
|
||||
if (state == IP_VS_STATE_MASTER)
|
||||
|
@ -1911,20 +1906,20 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
|||
ipvs->sync_state |= state;
|
||||
spin_unlock_bh(&ipvs->sync_buff_lock);
|
||||
|
||||
mutex_unlock(&ipvs->sync_mutex);
|
||||
rtnl_unlock();
|
||||
|
||||
/* increase the module use count */
|
||||
ip_vs_use_count_inc();
|
||||
|
||||
return 0;
|
||||
|
||||
outsocket:
|
||||
sock_release(sock);
|
||||
|
||||
outtinfo:
|
||||
if (tinfo) {
|
||||
sock_release(tinfo->sock);
|
||||
kfree(tinfo->buf);
|
||||
kfree(tinfo);
|
||||
}
|
||||
out:
|
||||
/* We do not need RTNL lock anymore, release it here so that
|
||||
* sock_release below and in the kthreads can use rtnl_lock
|
||||
* to leave the mcast group.
|
||||
*/
|
||||
rtnl_unlock();
|
||||
count = id;
|
||||
while (count-- > 0) {
|
||||
if (state == IP_VS_STATE_MASTER)
|
||||
|
@ -1932,13 +1927,23 @@ outtinfo:
|
|||
else
|
||||
kthread_stop(array[count]);
|
||||
}
|
||||
kfree(array);
|
||||
|
||||
out:
|
||||
if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
|
||||
kfree(ipvs->ms);
|
||||
ipvs->ms = NULL;
|
||||
}
|
||||
mutex_unlock(&ipvs->sync_mutex);
|
||||
if (tinfo) {
|
||||
if (tinfo->sock)
|
||||
sock_release(tinfo->sock);
|
||||
kfree(tinfo->buf);
|
||||
kfree(tinfo);
|
||||
}
|
||||
kfree(array);
|
||||
return result;
|
||||
|
||||
out_early:
|
||||
mutex_unlock(&ipvs->sync_mutex);
|
||||
rtnl_unlock();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -252,7 +252,7 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
|
|||
static inline int expect_matches(const struct nf_conntrack_expect *a,
|
||||
const struct nf_conntrack_expect *b)
|
||||
{
|
||||
return a->master == b->master && a->class == b->class &&
|
||||
return a->master == b->master &&
|
||||
nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
|
||||
nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
|
||||
net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
|
||||
|
@ -421,6 +421,9 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
|
|||
h = nf_ct_expect_dst_hash(net, &expect->tuple);
|
||||
hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
|
||||
if (expect_matches(i, expect)) {
|
||||
if (i->class != expect->class)
|
||||
return -EALREADY;
|
||||
|
||||
if (nf_ct_remove_expect(i))
|
||||
break;
|
||||
} else if (expect_clash(i, expect)) {
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
@ -71,6 +72,7 @@ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
|
|||
rcu_read_unlock();
|
||||
|
||||
alloc = max(newlen, NF_CT_EXT_PREALLOC);
|
||||
kmemleak_not_leak(old);
|
||||
new = __krealloc(old, alloc, gfp);
|
||||
if (!new)
|
||||
return NULL;
|
||||
|
|
|
@ -938,11 +938,19 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
|
|||
datalen, rtp_exp, rtcp_exp,
|
||||
mediaoff, medialen, daddr);
|
||||
else {
|
||||
if (nf_ct_expect_related(rtp_exp) == 0) {
|
||||
if (nf_ct_expect_related(rtcp_exp) != 0)
|
||||
nf_ct_unexpect_related(rtp_exp);
|
||||
else
|
||||
/* -EALREADY handling works around end-points that send
|
||||
* SDP messages with identical port but different media type,
|
||||
* we pretend expectation was set up.
|
||||
*/
|
||||
int errp = nf_ct_expect_related(rtp_exp);
|
||||
|
||||
if (errp == 0 || errp == -EALREADY) {
|
||||
int errcp = nf_ct_expect_related(rtcp_exp);
|
||||
|
||||
if (errcp == 0 || errcp == -EALREADY)
|
||||
ret = NF_ACCEPT;
|
||||
else if (errp == 0)
|
||||
nf_ct_unexpect_related(rtp_exp);
|
||||
}
|
||||
}
|
||||
nf_ct_expect_put(rtcp_exp);
|
||||
|
|
|
@ -2361,41 +2361,46 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
|
|||
}
|
||||
|
||||
if (nlh->nlmsg_flags & NLM_F_REPLACE) {
|
||||
if (nft_is_active_next(net, old_rule)) {
|
||||
trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
|
||||
old_rule);
|
||||
if (trans == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto err2;
|
||||
}
|
||||
nft_deactivate_next(net, old_rule);
|
||||
chain->use--;
|
||||
list_add_tail_rcu(&rule->list, &old_rule->list);
|
||||
} else {
|
||||
if (!nft_is_active_next(net, old_rule)) {
|
||||
err = -ENOENT;
|
||||
goto err2;
|
||||
}
|
||||
} else if (nlh->nlmsg_flags & NLM_F_APPEND)
|
||||
if (old_rule)
|
||||
list_add_rcu(&rule->list, &old_rule->list);
|
||||
else
|
||||
list_add_tail_rcu(&rule->list, &chain->rules);
|
||||
else {
|
||||
if (old_rule)
|
||||
list_add_tail_rcu(&rule->list, &old_rule->list);
|
||||
else
|
||||
list_add_rcu(&rule->list, &chain->rules);
|
||||
}
|
||||
trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
|
||||
old_rule);
|
||||
if (trans == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto err2;
|
||||
}
|
||||
nft_deactivate_next(net, old_rule);
|
||||
chain->use--;
|
||||
|
||||
if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto err3;
|
||||
if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
list_add_tail_rcu(&rule->list, &old_rule->list);
|
||||
} else {
|
||||
if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
if (nlh->nlmsg_flags & NLM_F_APPEND) {
|
||||
if (old_rule)
|
||||
list_add_rcu(&rule->list, &old_rule->list);
|
||||
else
|
||||
list_add_tail_rcu(&rule->list, &chain->rules);
|
||||
} else {
|
||||
if (old_rule)
|
||||
list_add_tail_rcu(&rule->list, &old_rule->list);
|
||||
else
|
||||
list_add_rcu(&rule->list, &chain->rules);
|
||||
}
|
||||
}
|
||||
chain->use++;
|
||||
return 0;
|
||||
|
||||
err3:
|
||||
list_del_rcu(&rule->list);
|
||||
err2:
|
||||
nf_tables_rule_destroy(&ctx, rule);
|
||||
err1:
|
||||
|
@ -3207,18 +3212,20 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
|
|||
|
||||
err = ops->init(set, &desc, nla);
|
||||
if (err < 0)
|
||||
goto err2;
|
||||
goto err3;
|
||||
|
||||
err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
|
||||
if (err < 0)
|
||||
goto err3;
|
||||
goto err4;
|
||||
|
||||
list_add_tail_rcu(&set->list, &table->sets);
|
||||
table->use++;
|
||||
return 0;
|
||||
|
||||
err3:
|
||||
err4:
|
||||
ops->destroy(set);
|
||||
err3:
|
||||
kfree(set->name);
|
||||
err2:
|
||||
kvfree(set);
|
||||
err1:
|
||||
|
@ -5738,7 +5745,7 @@ static void nft_chain_commit_update(struct nft_trans *trans)
|
|||
struct nft_base_chain *basechain;
|
||||
|
||||
if (nft_trans_chain_name(trans))
|
||||
strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans));
|
||||
swap(trans->ctx.chain->name, nft_trans_chain_name(trans));
|
||||
|
||||
if (!nft_is_base_chain(trans->ctx.chain))
|
||||
return;
|
||||
|
|
|
@ -36,11 +36,10 @@ MODULE_ALIAS("ipt_connmark");
|
|||
MODULE_ALIAS("ip6t_connmark");
|
||||
|
||||
static unsigned int
|
||||
connmark_tg_shift(struct sk_buff *skb,
|
||||
const struct xt_connmark_tginfo1 *info,
|
||||
u8 shift_bits, u8 shift_dir)
|
||||
connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
|
||||
{
|
||||
enum ip_conntrack_info ctinfo;
|
||||
u_int32_t new_targetmark;
|
||||
struct nf_conn *ct;
|
||||
u_int32_t newmark;
|
||||
|
||||
|
@ -51,34 +50,39 @@ connmark_tg_shift(struct sk_buff *skb,
|
|||
switch (info->mode) {
|
||||
case XT_CONNMARK_SET:
|
||||
newmark = (ct->mark & ~info->ctmask) ^ info->ctmark;
|
||||
if (shift_dir == D_SHIFT_RIGHT)
|
||||
newmark >>= shift_bits;
|
||||
if (info->shift_dir == D_SHIFT_RIGHT)
|
||||
newmark >>= info->shift_bits;
|
||||
else
|
||||
newmark <<= shift_bits;
|
||||
newmark <<= info->shift_bits;
|
||||
|
||||
if (ct->mark != newmark) {
|
||||
ct->mark = newmark;
|
||||
nf_conntrack_event_cache(IPCT_MARK, ct);
|
||||
}
|
||||
break;
|
||||
case XT_CONNMARK_SAVE:
|
||||
newmark = (ct->mark & ~info->ctmask) ^
|
||||
(skb->mark & info->nfmask);
|
||||
if (shift_dir == D_SHIFT_RIGHT)
|
||||
newmark >>= shift_bits;
|
||||
new_targetmark = (skb->mark & info->nfmask);
|
||||
if (info->shift_dir == D_SHIFT_RIGHT)
|
||||
new_targetmark >>= info->shift_bits;
|
||||
else
|
||||
newmark <<= shift_bits;
|
||||
new_targetmark <<= info->shift_bits;
|
||||
|
||||
newmark = (ct->mark & ~info->ctmask) ^
|
||||
new_targetmark;
|
||||
if (ct->mark != newmark) {
|
||||
ct->mark = newmark;
|
||||
nf_conntrack_event_cache(IPCT_MARK, ct);
|
||||
}
|
||||
break;
|
||||
case XT_CONNMARK_RESTORE:
|
||||
newmark = (skb->mark & ~info->nfmask) ^
|
||||
(ct->mark & info->ctmask);
|
||||
if (shift_dir == D_SHIFT_RIGHT)
|
||||
newmark >>= shift_bits;
|
||||
new_targetmark = (ct->mark & info->ctmask);
|
||||
if (info->shift_dir == D_SHIFT_RIGHT)
|
||||
new_targetmark >>= info->shift_bits;
|
||||
else
|
||||
newmark <<= shift_bits;
|
||||
new_targetmark <<= info->shift_bits;
|
||||
|
||||
newmark = (skb->mark & ~info->nfmask) ^
|
||||
new_targetmark;
|
||||
skb->mark = newmark;
|
||||
break;
|
||||
}
|
||||
|
@ -89,8 +93,14 @@ static unsigned int
|
|||
connmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
const struct xt_connmark_tginfo1 *info = par->targinfo;
|
||||
const struct xt_connmark_tginfo2 info2 = {
|
||||
.ctmark = info->ctmark,
|
||||
.ctmask = info->ctmask,
|
||||
.nfmask = info->nfmask,
|
||||
.mode = info->mode,
|
||||
};
|
||||
|
||||
return connmark_tg_shift(skb, info, 0, 0);
|
||||
return connmark_tg_shift(skb, &info2);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
|
@ -98,8 +108,7 @@ connmark_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
|
|||
{
|
||||
const struct xt_connmark_tginfo2 *info = par->targinfo;
|
||||
|
||||
return connmark_tg_shift(skb, (const struct xt_connmark_tginfo1 *)info,
|
||||
info->shift_bits, info->shift_dir);
|
||||
return connmark_tg_shift(skb, info);
|
||||
}
|
||||
|
||||
static int connmark_tg_check(const struct xt_tgchk_param *par)
|
||||
|
|
Loading…
Reference in New Issue