Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
Pablo Neira Ayuso says: ==================== Netfilter/IPVS updates for net-next The following patchset contains Netfilter/IPVS updates for you net-next tree: 1) Missing NFTA_RULE_POSITION_ID netlink attribute validation, from Phil Sutter. 2) Restrict matching on tunnel metadata to rx/tx path, from wenxu. 3) Avoid indirect calls for IPV6=y, from Florian Westphal. 4) Add two indirections to prepare merger of IPV4 and IPV6 nat modules, from Florian Westphal. 5) Broken indentation in ctnetlink, from Colin Ian King. 6) Patches to use struct_size() from netfilter and IPVS, from Gustavo A. R. Silva. 7) Display kernel splat only once in case of racing to confirm conntrack from bridge plus nfqueue setups, from Chieh-Min Wang. 8) Skip checksum validation for layer 4 protocols that don't need it, patch from Alin Nastac. 9) Sparse warning due to symbol that should be static in CLUSTERIP, from Wei Yongjun. 10) Add new toggle to disable SDP payload translation when media endpoint is reachable though the same interface as the signalling peer, from Alin Nastac. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
8bbed40f10
|
@ -25,23 +25,24 @@ struct nf_queue_entry;
|
|||
* if IPv6 is a module.
|
||||
*/
|
||||
struct nf_ipv6_ops {
|
||||
#if IS_MODULE(CONFIG_IPV6)
|
||||
int (*chk_addr)(struct net *net, const struct in6_addr *addr,
|
||||
const struct net_device *dev, int strict);
|
||||
int (*route_me_harder)(struct net *net, struct sk_buff *skb);
|
||||
int (*dev_get_saddr)(struct net *net, const struct net_device *dev,
|
||||
const struct in6_addr *daddr, unsigned int srcprefs,
|
||||
struct in6_addr *saddr);
|
||||
int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
|
||||
bool strict);
|
||||
#endif
|
||||
void (*route_input)(struct sk_buff *skb);
|
||||
int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
|
||||
int (*output)(struct net *, struct sock *, struct sk_buff *));
|
||||
int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
|
||||
bool strict);
|
||||
int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_NETFILTER
|
||||
int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
|
||||
__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
|
||||
unsigned int dataoff, u_int8_t protocol);
|
||||
|
||||
int ipv6_netfilter_init(void);
|
||||
void ipv6_netfilter_fini(void);
|
||||
#include <net/addrconf.h>
|
||||
|
||||
extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
|
||||
static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
|
||||
|
@ -49,6 +50,49 @@ static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
|
|||
return rcu_dereference(nf_ipv6_ops);
|
||||
}
|
||||
|
||||
static inline int nf_ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
|
||||
const struct net_device *dev, int strict)
|
||||
{
|
||||
#if IS_MODULE(CONFIG_IPV6)
|
||||
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
|
||||
|
||||
if (!v6_ops)
|
||||
return 1;
|
||||
|
||||
return v6_ops->chk_addr(net, addr, dev, strict);
|
||||
#else
|
||||
return ipv6_chk_addr(net, addr, dev, strict);
|
||||
#endif
|
||||
}
|
||||
|
||||
int __nf_ip6_route(struct net *net, struct dst_entry **dst,
|
||||
struct flowi *fl, bool strict);
|
||||
|
||||
static inline int nf_ip6_route(struct net *net, struct dst_entry **dst,
|
||||
struct flowi *fl, bool strict)
|
||||
{
|
||||
#if IS_MODULE(CONFIG_IPV6)
|
||||
const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
|
||||
|
||||
if (v6ops)
|
||||
return v6ops->route(net, dst, fl, strict);
|
||||
|
||||
return -EHOSTUNREACH;
|
||||
#endif
|
||||
#if IS_BUILTIN(CONFIG_IPV6)
|
||||
return __nf_ip6_route(net, dst, fl, strict);
|
||||
#else
|
||||
return -EHOSTUNREACH;
|
||||
#endif
|
||||
}
|
||||
|
||||
int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
|
||||
__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
|
||||
unsigned int dataoff, u_int8_t protocol);
|
||||
|
||||
int ipv6_netfilter_init(void);
|
||||
void ipv6_netfilter_fini(void);
|
||||
|
||||
#else /* CONFIG_NETFILTER */
|
||||
static inline int ipv6_netfilter_init(void) { return 0; }
|
||||
static inline void ipv6_netfilter_fini(void) { return; }
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <linux/skbuff.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/icmp.h>
|
||||
#include <net/netfilter/nf_reject.h>
|
||||
|
||||
void nf_send_unreach(struct sk_buff *skb_in, int code, int hook);
|
||||
void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook);
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#define _IPV6_NF_REJECT_H
|
||||
|
||||
#include <linux/icmpv6.h>
|
||||
#include <net/netfilter/nf_reject.h>
|
||||
|
||||
void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
|
||||
unsigned int hooknum);
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _NF_REJECT_H
|
||||
#define _NF_REJECT_H
|
||||
|
||||
static inline bool nf_reject_verify_csum(__u8 proto)
|
||||
{
|
||||
/* Skip protocols that don't use 16-bit one's complement checksum
|
||||
* of the entire payload.
|
||||
*/
|
||||
switch (proto) {
|
||||
/* Protocols with other integrity checks. */
|
||||
case IPPROTO_AH:
|
||||
case IPPROTO_ESP:
|
||||
case IPPROTO_SCTP:
|
||||
|
||||
/* Protocols with partial checksums. */
|
||||
case IPPROTO_UDPLITE:
|
||||
case IPPROTO_DCCP:
|
||||
|
||||
/* Protocols with optional checksums. */
|
||||
case IPPROTO_GRE:
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* _NF_REJECT_H */
|
|
@ -1727,10 +1727,19 @@ enum nft_tunnel_keys {
|
|||
};
|
||||
#define NFT_TUNNEL_MAX (__NFT_TUNNEL_MAX - 1)
|
||||
|
||||
enum nft_tunnel_mode {
|
||||
NFT_TUNNEL_MODE_NONE,
|
||||
NFT_TUNNEL_MODE_RX,
|
||||
NFT_TUNNEL_MODE_TX,
|
||||
__NFT_TUNNEL_MODE_MAX
|
||||
};
|
||||
#define NFT_TUNNEL_MODE_MAX (__NFT_TUNNEL_MODE_MAX - 1)
|
||||
|
||||
enum nft_tunnel_attributes {
|
||||
NFTA_TUNNEL_UNSPEC,
|
||||
NFTA_TUNNEL_KEY,
|
||||
NFTA_TUNNEL_DREG,
|
||||
NFTA_TUNNEL_MODE,
|
||||
__NFTA_TUNNEL_MAX
|
||||
};
|
||||
#define NFTA_TUNNEL_MAX (__NFTA_TUNNEL_MAX - 1)
|
||||
|
|
|
@ -125,13 +125,10 @@ static void nft_reject_br_send_v4_unreach(struct net *net,
|
|||
if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
|
||||
return;
|
||||
|
||||
if (ip_hdr(oldskb)->protocol == IPPROTO_TCP ||
|
||||
ip_hdr(oldskb)->protocol == IPPROTO_UDP)
|
||||
proto = ip_hdr(oldskb)->protocol;
|
||||
else
|
||||
proto = 0;
|
||||
proto = ip_hdr(oldskb)->protocol;
|
||||
|
||||
if (!skb_csum_unnecessary(oldskb) &&
|
||||
nf_reject_verify_csum(proto) &&
|
||||
nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
|
||||
return;
|
||||
|
||||
|
@ -234,6 +231,9 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
|
|||
if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
|
||||
return false;
|
||||
|
||||
if (!nf_reject_verify_csum(proto))
|
||||
return true;
|
||||
|
||||
return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -864,7 +864,7 @@ static struct pernet_operations clusterip_net_ops = {
|
|||
.size = sizeof(struct clusterip_net),
|
||||
};
|
||||
|
||||
struct notifier_block cip_netdev_notifier = {
|
||||
static struct notifier_block cip_netdev_notifier = {
|
||||
.notifier_call = clusterip_netdev_event
|
||||
};
|
||||
|
||||
|
|
|
@ -173,21 +173,16 @@ EXPORT_SYMBOL_GPL(nf_send_reset);
|
|||
void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
|
||||
{
|
||||
struct iphdr *iph = ip_hdr(skb_in);
|
||||
u8 proto;
|
||||
u8 proto = iph->protocol;
|
||||
|
||||
if (iph->frag_off & htons(IP_OFFSET))
|
||||
return;
|
||||
|
||||
if (skb_csum_unnecessary(skb_in)) {
|
||||
if (skb_csum_unnecessary(skb_in) || !nf_reject_verify_csum(proto)) {
|
||||
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP)
|
||||
proto = iph->protocol;
|
||||
else
|
||||
proto = 0;
|
||||
|
||||
if (nf_ip_checksum(skb_in, hook, ip_hdrlen(skb_in), proto) == 0)
|
||||
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
|
||||
}
|
||||
|
|
|
@ -86,8 +86,8 @@ static int nf_ip6_reroute(struct sk_buff *skb,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int nf_ip6_route(struct net *net, struct dst_entry **dst,
|
||||
struct flowi *fl, bool strict)
|
||||
int __nf_ip6_route(struct net *net, struct dst_entry **dst,
|
||||
struct flowi *fl, bool strict)
|
||||
{
|
||||
static const struct ipv6_pinfo fake_pinfo;
|
||||
static const struct inet_sock fake_sk = {
|
||||
|
@ -107,12 +107,17 @@ static int nf_ip6_route(struct net *net, struct dst_entry **dst,
|
|||
*dst = result;
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__nf_ip6_route);
|
||||
|
||||
static const struct nf_ipv6_ops ipv6ops = {
|
||||
#if IS_MODULE(CONFIG_IPV6)
|
||||
.chk_addr = ipv6_chk_addr,
|
||||
.route_input = ip6_route_input,
|
||||
.route_me_harder = ip6_route_me_harder,
|
||||
.dev_get_saddr = ipv6_dev_get_saddr,
|
||||
.route = __nf_ip6_route,
|
||||
#endif
|
||||
.route_input = ip6_route_input,
|
||||
.fragment = ip6_fragment,
|
||||
.route = nf_ip6_route,
|
||||
.reroute = nf_ip6_reroute,
|
||||
};
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <net/checksum.h>
|
||||
#include <net/ip6_checksum.h>
|
||||
#include <net/ip6_route.h>
|
||||
#include <net/xfrm.h>
|
||||
#include <net/ipv6.h>
|
||||
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
|
@ -318,6 +319,20 @@ nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int nat_route_me_harder(struct net *net, struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_IPV6_MODULE
|
||||
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
|
||||
|
||||
if (!v6_ops)
|
||||
return -EHOSTUNREACH;
|
||||
|
||||
return v6_ops->route_me_harder(net, skb);
|
||||
#else
|
||||
return ip6_route_me_harder(net, skb);
|
||||
#endif
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
|
||||
const struct nf_hook_state *state)
|
||||
|
@ -334,7 +349,7 @@ nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
|
|||
|
||||
if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
|
||||
&ct->tuplehash[!dir].tuple.src.u3)) {
|
||||
err = ip6_route_me_harder(state->net, skb);
|
||||
err = nat_route_me_harder(state->net, skb);
|
||||
if (err < 0)
|
||||
ret = NF_DROP_ERR(err);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,23 @@
|
|||
|
||||
static atomic_t v6_worker_count;
|
||||
|
||||
static int
|
||||
nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
|
||||
const struct in6_addr *daddr, unsigned int srcprefs,
|
||||
struct in6_addr *saddr)
|
||||
{
|
||||
#ifdef CONFIG_IPV6_MODULE
|
||||
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
|
||||
|
||||
if (!v6_ops)
|
||||
return -EHOSTUNREACH;
|
||||
|
||||
return v6_ops->dev_get_saddr(net, dev, daddr, srcprefs, saddr);
|
||||
#else
|
||||
return ipv6_dev_get_saddr(net, dev, daddr, srcprefs, saddr);
|
||||
#endif
|
||||
}
|
||||
|
||||
unsigned int
|
||||
nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
|
||||
const struct net_device *out)
|
||||
|
@ -38,8 +55,8 @@ nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
|
|||
WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
|
||||
ctinfo == IP_CT_RELATED_REPLY)));
|
||||
|
||||
if (ipv6_dev_get_saddr(nf_ct_net(ct), out,
|
||||
&ipv6_hdr(skb)->daddr, 0, &src) < 0)
|
||||
if (nat_ipv6_dev_get_saddr(nf_ct_net(ct), out,
|
||||
&ipv6_hdr(skb)->daddr, 0, &src) < 0)
|
||||
return NF_DROP;
|
||||
|
||||
nat = nf_ct_nat_ext_add(ct);
|
||||
|
|
|
@ -233,6 +233,9 @@ static bool reject6_csum_ok(struct sk_buff *skb, int hook)
|
|||
if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
|
||||
return false;
|
||||
|
||||
if (!nf_reject_verify_csum(proto))
|
||||
return true;
|
||||
|
||||
return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -59,7 +59,6 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
|
|||
struct ipv6hdr *iph)
|
||||
{
|
||||
const struct net_device *dev = NULL;
|
||||
const struct nf_ipv6_ops *v6ops;
|
||||
int route_err, addrtype;
|
||||
struct rt6_info *rt;
|
||||
struct flowi6 fl6 = {
|
||||
|
@ -68,10 +67,6 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
|
|||
};
|
||||
u32 ret = 0;
|
||||
|
||||
v6ops = nf_get_ipv6_ops();
|
||||
if (!v6ops)
|
||||
return RTN_UNREACHABLE;
|
||||
|
||||
if (priv->flags & NFTA_FIB_F_IIF)
|
||||
dev = nft_in(pkt);
|
||||
else if (priv->flags & NFTA_FIB_F_OIF)
|
||||
|
@ -79,10 +74,10 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
|
|||
|
||||
nft_fib6_flowi_init(&fl6, priv, pkt, dev, iph);
|
||||
|
||||
if (dev && v6ops->chk_addr(nft_net(pkt), &fl6.daddr, dev, true))
|
||||
if (dev && nf_ipv6_chk_addr(nft_net(pkt), &fl6.daddr, dev, true))
|
||||
ret = RTN_LOCAL;
|
||||
|
||||
route_err = v6ops->route(nft_net(pkt), (struct dst_entry **)&rt,
|
||||
route_err = nf_ip6_route(nft_net(pkt), (struct dst_entry **)&rt,
|
||||
flowi6_to_flowi(&fl6), false);
|
||||
if (route_err)
|
||||
goto err;
|
||||
|
|
|
@ -2744,8 +2744,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
|
|||
int size;
|
||||
|
||||
get = (struct ip_vs_get_services *)arg;
|
||||
size = sizeof(*get) +
|
||||
sizeof(struct ip_vs_service_entry) * get->num_services;
|
||||
size = struct_size(get, entrytable, get->num_services);
|
||||
if (*len != size) {
|
||||
pr_err("length: %u != %u\n", *len, size);
|
||||
ret = -EINVAL;
|
||||
|
@ -2786,8 +2785,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
|
|||
int size;
|
||||
|
||||
get = (struct ip_vs_get_dests *)arg;
|
||||
size = sizeof(*get) +
|
||||
sizeof(struct ip_vs_dest_entry) * get->num_dests;
|
||||
size = struct_size(get, entrytable, get->num_dests);
|
||||
if (*len != size) {
|
||||
pr_err("length: %u != %u\n", *len, size);
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -936,10 +936,18 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
|||
* REJECT will give spurious warnings here.
|
||||
*/
|
||||
|
||||
/* No external references means no one else could have
|
||||
* confirmed us.
|
||||
/* Another skb with the same unconfirmed conntrack may
|
||||
* win the race. This may happen for bridge(br_flood)
|
||||
* or broadcast/multicast packets do skb_clone with
|
||||
* unconfirmed conntrack.
|
||||
*/
|
||||
WARN_ON(nf_ct_is_confirmed(ct));
|
||||
if (unlikely(nf_ct_is_confirmed(ct))) {
|
||||
WARN_ON_ONCE(1);
|
||||
nf_conntrack_double_unlock(hash, reply_hash);
|
||||
local_bh_enable();
|
||||
return NF_DROP;
|
||||
}
|
||||
|
||||
pr_debug("Confirming conntrack %p\n", ct);
|
||||
/* We have to check the DYING flag after unlink to prevent
|
||||
* a race against nf_ct_get_next_corpse() possibly called from
|
||||
|
|
|
@ -2675,7 +2675,7 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
|
|||
ret = ctnetlink_dump_tuples_ip(skb, &m);
|
||||
if (ret >= 0) {
|
||||
l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
|
||||
ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
|
||||
ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
|
|
|
@ -21,6 +21,8 @@
|
|||
#include <linux/tcp.h>
|
||||
#include <linux/netfilter.h>
|
||||
|
||||
#include <net/route.h>
|
||||
#include <net/ip6_route.h>
|
||||
#include <net/netfilter/nf_conntrack.h>
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
#include <net/netfilter/nf_conntrack_expect.h>
|
||||
|
@ -54,6 +56,11 @@ module_param(sip_direct_media, int, 0600);
|
|||
MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
|
||||
"endpoints only (default 1)");
|
||||
|
||||
static int sip_external_media __read_mostly = 0;
|
||||
module_param(sip_external_media, int, 0600);
|
||||
MODULE_PARM_DESC(sip_external_media, "Expect Media streams between external "
|
||||
"endpoints (default 0)");
|
||||
|
||||
const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
|
||||
EXPORT_SYMBOL_GPL(nf_nat_sip_hooks);
|
||||
|
||||
|
@ -861,6 +868,41 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
|
|||
if (!nf_inet_addr_cmp(daddr, &ct->tuplehash[dir].tuple.src.u3))
|
||||
return NF_ACCEPT;
|
||||
saddr = &ct->tuplehash[!dir].tuple.src.u3;
|
||||
} else if (sip_external_media) {
|
||||
struct net_device *dev = skb_dst(skb)->dev;
|
||||
struct net *net = dev_net(dev);
|
||||
struct rtable *rt;
|
||||
struct flowi4 fl4 = {};
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct flowi6 fl6 = {};
|
||||
#endif
|
||||
struct dst_entry *dst = NULL;
|
||||
|
||||
switch (nf_ct_l3num(ct)) {
|
||||
case NFPROTO_IPV4:
|
||||
fl4.daddr = daddr->ip;
|
||||
rt = ip_route_output_key(net, &fl4);
|
||||
if (!IS_ERR(rt))
|
||||
dst = &rt->dst;
|
||||
break;
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
case NFPROTO_IPV6:
|
||||
fl6.daddr = daddr->in6;
|
||||
dst = ip6_route_output(net, NULL, &fl6);
|
||||
if (dst->error) {
|
||||
dst_release(dst);
|
||||
dst = NULL;
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Don't predict any conntracks when media endpoint is reachable
|
||||
* through the same interface as the signalling peer.
|
||||
*/
|
||||
if (dst && dst->dev == dev)
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
/* We need to check whether the registration exists before attempting
|
||||
|
|
|
@ -2238,6 +2238,7 @@ static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
|
|||
[NFTA_RULE_USERDATA] = { .type = NLA_BINARY,
|
||||
.len = NFT_USERDATA_MAXLEN },
|
||||
[NFTA_RULE_ID] = { .type = NLA_U32 },
|
||||
[NFTA_RULE_POSITION_ID] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
struct nft_tunnel {
|
||||
enum nft_tunnel_keys key:8;
|
||||
enum nft_registers dreg:8;
|
||||
enum nft_tunnel_mode mode:8;
|
||||
};
|
||||
|
||||
static void nft_tunnel_get_eval(const struct nft_expr *expr,
|
||||
|
@ -29,14 +30,32 @@ static void nft_tunnel_get_eval(const struct nft_expr *expr,
|
|||
|
||||
switch (priv->key) {
|
||||
case NFT_TUNNEL_PATH:
|
||||
nft_reg_store8(dest, !!tun_info);
|
||||
if (!tun_info) {
|
||||
nft_reg_store8(dest, false);
|
||||
return;
|
||||
}
|
||||
if (priv->mode == NFT_TUNNEL_MODE_NONE ||
|
||||
(priv->mode == NFT_TUNNEL_MODE_RX &&
|
||||
!(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
|
||||
(priv->mode == NFT_TUNNEL_MODE_TX &&
|
||||
(tun_info->mode & IP_TUNNEL_INFO_TX)))
|
||||
nft_reg_store8(dest, true);
|
||||
else
|
||||
nft_reg_store8(dest, false);
|
||||
break;
|
||||
case NFT_TUNNEL_ID:
|
||||
if (!tun_info) {
|
||||
regs->verdict.code = NFT_BREAK;
|
||||
return;
|
||||
}
|
||||
*dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
|
||||
if (priv->mode == NFT_TUNNEL_MODE_NONE ||
|
||||
(priv->mode == NFT_TUNNEL_MODE_RX &&
|
||||
!(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
|
||||
(priv->mode == NFT_TUNNEL_MODE_TX &&
|
||||
(tun_info->mode & IP_TUNNEL_INFO_TX)))
|
||||
*dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
|
||||
else
|
||||
regs->verdict.code = NFT_BREAK;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
|
@ -47,6 +66,7 @@ static void nft_tunnel_get_eval(const struct nft_expr *expr,
|
|||
static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
|
||||
[NFTA_TUNNEL_KEY] = { .type = NLA_U32 },
|
||||
[NFTA_TUNNEL_DREG] = { .type = NLA_U32 },
|
||||
[NFTA_TUNNEL_MODE] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static int nft_tunnel_get_init(const struct nft_ctx *ctx,
|
||||
|
@ -74,6 +94,14 @@ static int nft_tunnel_get_init(const struct nft_ctx *ctx,
|
|||
|
||||
priv->dreg = nft_parse_register(tb[NFTA_TUNNEL_DREG]);
|
||||
|
||||
if (tb[NFTA_TUNNEL_MODE]) {
|
||||
priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
|
||||
if (priv->mode > NFT_TUNNEL_MODE_MAX)
|
||||
return -EOPNOTSUPP;
|
||||
} else {
|
||||
priv->mode = NFT_TUNNEL_MODE_NONE;
|
||||
}
|
||||
|
||||
return nft_validate_register_store(ctx, priv->dreg, NULL,
|
||||
NFT_DATA_VALUE, len);
|
||||
}
|
||||
|
@ -87,6 +115,8 @@ static int nft_tunnel_get_dump(struct sk_buff *skb,
|
|||
goto nla_put_failure;
|
||||
if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
|
||||
goto nla_put_failure;
|
||||
if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
|
||||
goto nla_put_failure;
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
|
|
|
@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(nf_checksum_partial);
|
|||
int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
|
||||
bool strict, unsigned short family)
|
||||
{
|
||||
const struct nf_ipv6_ops *v6ops;
|
||||
const struct nf_ipv6_ops *v6ops __maybe_unused;
|
||||
int ret = 0;
|
||||
|
||||
switch (family) {
|
||||
|
@ -170,9 +170,7 @@ int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
|
|||
ret = nf_ip_route(net, dst, fl, strict);
|
||||
break;
|
||||
case AF_INET6:
|
||||
v6ops = rcu_dereference(nf_ipv6_ops);
|
||||
if (v6ops)
|
||||
ret = v6ops->route(net, dst, fl, strict);
|
||||
ret = nf_ip6_route(net, dst, fl, strict);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,6 @@ MODULE_ALIAS("ip6t_addrtype");
|
|||
static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
|
||||
const struct in6_addr *addr, u16 mask)
|
||||
{
|
||||
const struct nf_ipv6_ops *v6ops;
|
||||
struct flowi6 flow;
|
||||
struct rt6_info *rt;
|
||||
u32 ret = 0;
|
||||
|
@ -47,18 +46,13 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
|
|||
if (dev)
|
||||
flow.flowi6_oif = dev->ifindex;
|
||||
|
||||
v6ops = nf_get_ipv6_ops();
|
||||
if (v6ops) {
|
||||
if (dev && (mask & XT_ADDRTYPE_LOCAL)) {
|
||||
if (v6ops->chk_addr(net, addr, dev, true))
|
||||
ret = XT_ADDRTYPE_LOCAL;
|
||||
}
|
||||
route_err = v6ops->route(net, (struct dst_entry **)&rt,
|
||||
flowi6_to_flowi(&flow), false);
|
||||
} else {
|
||||
route_err = 1;
|
||||
if (dev && (mask & XT_ADDRTYPE_LOCAL)) {
|
||||
if (nf_ipv6_chk_addr(net, addr, dev, true))
|
||||
ret = XT_ADDRTYPE_LOCAL;
|
||||
}
|
||||
|
||||
route_err = nf_ip6_route(net, (struct dst_entry **)&rt,
|
||||
flowi6_to_flowi(&flow), false);
|
||||
if (route_err)
|
||||
return XT_ADDRTYPE_UNREACHABLE;
|
||||
|
||||
|
|
|
@ -337,7 +337,6 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
|
|||
unsigned int nstamp_mask;
|
||||
unsigned int i;
|
||||
int ret = -EINVAL;
|
||||
size_t sz;
|
||||
|
||||
net_get_random_once(&hash_rnd, sizeof(hash_rnd));
|
||||
|
||||
|
@ -387,8 +386,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
|
|||
goto out;
|
||||
}
|
||||
|
||||
sz = sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size;
|
||||
t = kvzalloc(sz, GFP_KERNEL);
|
||||
t = kvzalloc(struct_size(t, iphash, ip_list_hash_size), GFP_KERNEL);
|
||||
if (t == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
|
Loading…
Reference in New Issue