Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
Pablo Neira Ayuso says: ==================== Netfilter/IPVS updates for net-next The following patchset contains Netfilter/IPVS updates for net-next: 1) Add support to specify a stateful expression in set definitions, this allows users to specify e.g. counters per set elements. 2) Flowtable software counter support. 3) Flowtable hardware offload counter support, from wenxu. 3) Parallelize flowtable hardware offload requests, from Paul Blakey. This includes a patch to add one work entry per offload command. 4) Several patches to rework nf_queue refcount handling, from Florian Westphal. 4) A few fixes for the flowtable tunnel offload: Fix crash if tunneling information is missing and set up indirect flow block as TC_SETUP_FT, patch from wenxu. 5) Stricter netlink attribute sanity check on filters, from Romain Bellan and Florent Fourcot. 5) Annotations to make sparse happy, from Jules Irenge. 6) Improve icmp errors in debugging information, from Haishuang Yan. 7) Fix warning in IPVS icmp error debugging, from Haishuang Yan. 8) Fix endianess issue in tcp extension header, from Sergey Marinkevich. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
d9679cd985
|
@ -533,6 +533,7 @@ void flow_indr_block_cb_unregister(struct net_device *dev,
|
|||
|
||||
void flow_indr_block_call(struct net_device *dev,
|
||||
struct flow_block_offload *bo,
|
||||
enum flow_block_command command);
|
||||
enum flow_block_command command,
|
||||
enum tc_setup_type type);
|
||||
|
||||
#endif /* _NET_FLOW_OFFLOAD_H */
|
||||
|
|
|
@ -65,6 +65,17 @@ static inline void nf_ct_set_acct(struct net *net, bool enable)
|
|||
#endif
|
||||
}
|
||||
|
||||
void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets,
|
||||
unsigned int bytes);
|
||||
|
||||
static inline void nf_ct_acct_update(struct nf_conn *ct, u32 dir,
|
||||
unsigned int bytes)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||
nf_ct_acct_add(ct, dir, 1, bytes);
|
||||
#endif
|
||||
}
|
||||
|
||||
void nf_conntrack_acct_pernet_init(struct net *net);
|
||||
|
||||
int nf_conntrack_acct_init(void);
|
||||
|
|
|
@ -62,7 +62,8 @@ struct nf_flowtable_type {
|
|||
};
|
||||
|
||||
enum nf_flowtable_flags {
|
||||
NF_FLOWTABLE_HW_OFFLOAD = 0x1,
|
||||
NF_FLOWTABLE_HW_OFFLOAD = 0x1, /* NFT_FLOWTABLE_HW_OFFLOAD */
|
||||
NF_FLOWTABLE_COUNTER = 0x2, /* NFT_FLOWTABLE_COUNTER */
|
||||
};
|
||||
|
||||
struct nf_flowtable {
|
||||
|
@ -73,7 +74,7 @@ struct nf_flowtable {
|
|||
struct delayed_work gc_work;
|
||||
unsigned int flags;
|
||||
struct flow_block flow_block;
|
||||
struct mutex flow_block_lock; /* Guards flow_block */
|
||||
struct rw_semaphore flow_block_lock; /* Guards flow_block */
|
||||
possible_net_t net;
|
||||
};
|
||||
|
||||
|
|
|
@ -14,7 +14,10 @@ struct nf_queue_entry {
|
|||
struct sk_buff *skb;
|
||||
unsigned int id;
|
||||
unsigned int hook_index; /* index in hook_entries->hook[] */
|
||||
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||
struct net_device *physin;
|
||||
struct net_device *physout;
|
||||
#endif
|
||||
struct nf_hook_state state;
|
||||
u16 size; /* sizeof(entry) + saved route keys */
|
||||
|
||||
|
@ -35,7 +38,7 @@ void nf_unregister_queue_handler(struct net *net);
|
|||
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
|
||||
|
||||
void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
|
||||
void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
|
||||
void nf_queue_entry_free(struct nf_queue_entry *entry);
|
||||
|
||||
static inline void init_hashrandom(u32 *jhash_initval)
|
||||
{
|
||||
|
|
|
@ -266,6 +266,7 @@ struct nft_set_iter {
|
|||
* @size: number of set elements
|
||||
* @field_len: length of each field in concatenation, bytes
|
||||
* @field_count: number of concatenated fields in element
|
||||
* @expr: set must support for expressions
|
||||
*/
|
||||
struct nft_set_desc {
|
||||
unsigned int klen;
|
||||
|
@ -273,6 +274,7 @@ struct nft_set_desc {
|
|||
unsigned int size;
|
||||
u8 field_len[NFT_REG32_COUNT];
|
||||
u8 field_count;
|
||||
bool expr;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -416,6 +418,7 @@ struct nft_set_type {
|
|||
* @policy: set parameterization (see enum nft_set_policies)
|
||||
* @udlen: user data length
|
||||
* @udata: user data
|
||||
* @expr: stateful expression
|
||||
* @ops: set ops
|
||||
* @flags: set flags
|
||||
* @genmask: generation mask
|
||||
|
@ -444,6 +447,7 @@ struct nft_set {
|
|||
u16 policy;
|
||||
u16 udlen;
|
||||
unsigned char *udata;
|
||||
struct nft_expr *expr;
|
||||
/* runtime data below here */
|
||||
const struct nft_set_ops *ops ____cacheline_aligned;
|
||||
u16 flags:14,
|
||||
|
@ -846,6 +850,7 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
|
|||
return (void *)expr->data;
|
||||
}
|
||||
|
||||
int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src);
|
||||
void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
|
||||
int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
|
||||
const struct nft_expr *expr);
|
||||
|
|
|
@ -342,6 +342,7 @@ enum nft_set_field_attributes {
|
|||
* @NFTA_SET_USERDATA: user data (NLA_BINARY)
|
||||
* @NFTA_SET_OBJ_TYPE: stateful object type (NLA_U32: NFT_OBJECT_*)
|
||||
* @NFTA_SET_HANDLE: set handle (NLA_U64)
|
||||
* @NFTA_SET_EXPR: set expression (NLA_NESTED: nft_expr_attributes)
|
||||
*/
|
||||
enum nft_set_attributes {
|
||||
NFTA_SET_UNSPEC,
|
||||
|
@ -361,6 +362,7 @@ enum nft_set_attributes {
|
|||
NFTA_SET_PAD,
|
||||
NFTA_SET_OBJ_TYPE,
|
||||
NFTA_SET_HANDLE,
|
||||
NFTA_SET_EXPR,
|
||||
__NFTA_SET_MAX
|
||||
};
|
||||
#define NFTA_SET_MAX (__NFTA_SET_MAX - 1)
|
||||
|
@ -1551,6 +1553,19 @@ enum nft_object_attributes {
|
|||
};
|
||||
#define NFTA_OBJ_MAX (__NFTA_OBJ_MAX - 1)
|
||||
|
||||
/**
|
||||
* enum nft_flowtable_flags - nf_tables flowtable flags
|
||||
*
|
||||
* @NFT_FLOWTABLE_HW_OFFLOAD: flowtable hardware offload is enabled
|
||||
* @NFT_FLOWTABLE_COUNTER: enable flow counters
|
||||
*/
|
||||
enum nft_flowtable_flags {
|
||||
NFT_FLOWTABLE_HW_OFFLOAD = 0x1,
|
||||
NFT_FLOWTABLE_COUNTER = 0x2,
|
||||
NFT_FLOWTABLE_MASK = (NFT_FLOWTABLE_HW_OFFLOAD |
|
||||
NFT_FLOWTABLE_COUNTER)
|
||||
};
|
||||
|
||||
/**
|
||||
* enum nft_flowtable_attributes - nf_tables flow table netlink attributes
|
||||
*
|
||||
|
|
|
@ -511,7 +511,8 @@ EXPORT_SYMBOL_GPL(flow_indr_block_cb_unregister);
|
|||
|
||||
void flow_indr_block_call(struct net_device *dev,
|
||||
struct flow_block_offload *bo,
|
||||
enum flow_block_command command)
|
||||
enum flow_block_command command,
|
||||
enum tc_setup_type type)
|
||||
{
|
||||
struct flow_indr_block_cb *indr_block_cb;
|
||||
struct flow_indr_block_dev *indr_dev;
|
||||
|
@ -521,8 +522,7 @@ void flow_indr_block_call(struct net_device *dev,
|
|||
return;
|
||||
|
||||
list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
|
||||
indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
|
||||
bo);
|
||||
indr_block_cb->cb(dev, indr_block_cb->cb_priv, type, bo);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flow_indr_block_call);
|
||||
|
||||
|
|
|
@ -1661,8 +1661,9 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
|
|||
struct ip_vs_protocol *pp;
|
||||
struct ip_vs_proto_data *pd;
|
||||
unsigned int offset, offset2, ihl, verdict;
|
||||
bool ipip, new_cp = false;
|
||||
bool tunnel, new_cp = false;
|
||||
union nf_inet_addr *raddr;
|
||||
char *outer_proto = "IPIP";
|
||||
|
||||
*related = 1;
|
||||
|
||||
|
@ -1703,8 +1704,8 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
|
|||
return NF_ACCEPT; /* The packet looks wrong, ignore */
|
||||
raddr = (union nf_inet_addr *)&cih->daddr;
|
||||
|
||||
/* Special case for errors for IPIP packets */
|
||||
ipip = false;
|
||||
/* Special case for errors for IPIP/UDP/GRE tunnel packets */
|
||||
tunnel = false;
|
||||
if (cih->protocol == IPPROTO_IPIP) {
|
||||
struct ip_vs_dest *dest;
|
||||
|
||||
|
@ -1721,7 +1722,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
|
|||
cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
|
||||
if (cih == NULL)
|
||||
return NF_ACCEPT; /* The packet looks wrong, ignore */
|
||||
ipip = true;
|
||||
tunnel = true;
|
||||
} else if ((cih->protocol == IPPROTO_UDP || /* Can be UDP encap */
|
||||
cih->protocol == IPPROTO_GRE) && /* Can be GRE encap */
|
||||
/* Error for our tunnel must arrive at LOCAL_IN */
|
||||
|
@ -1729,16 +1730,19 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
|
|||
__u8 iproto;
|
||||
int ulen;
|
||||
|
||||
/* Non-first fragment has no UDP header */
|
||||
/* Non-first fragment has no UDP/GRE header */
|
||||
if (unlikely(cih->frag_off & htons(IP_OFFSET)))
|
||||
return NF_ACCEPT;
|
||||
offset2 = offset + cih->ihl * 4;
|
||||
if (cih->protocol == IPPROTO_UDP)
|
||||
if (cih->protocol == IPPROTO_UDP) {
|
||||
ulen = ipvs_udp_decap(ipvs, skb, offset2, AF_INET,
|
||||
raddr, &iproto);
|
||||
else
|
||||
outer_proto = "UDP";
|
||||
} else {
|
||||
ulen = ipvs_gre_decap(ipvs, skb, offset2, AF_INET,
|
||||
raddr, &iproto);
|
||||
outer_proto = "GRE";
|
||||
}
|
||||
if (ulen > 0) {
|
||||
/* Skip IP and UDP/GRE tunnel headers */
|
||||
offset = offset2 + ulen;
|
||||
|
@ -1747,7 +1751,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
|
|||
&_ciph);
|
||||
if (cih && cih->version == 4 && cih->ihl >= 5 &&
|
||||
iproto == IPPROTO_IPIP)
|
||||
ipip = true;
|
||||
tunnel = true;
|
||||
else
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
@ -1767,11 +1771,11 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
|
|||
"Checking incoming ICMP for");
|
||||
|
||||
offset2 = offset;
|
||||
ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, !ipip, &ciph);
|
||||
ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, !tunnel, &ciph);
|
||||
offset = ciph.len;
|
||||
|
||||
/* The embedded headers contain source and dest in reverse order.
|
||||
* For IPIP this is error for request, not for reply.
|
||||
* For IPIP/UDP/GRE tunnel this is error for request, not for reply.
|
||||
*/
|
||||
cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
|
||||
ipvs, AF_INET, skb, &ciph);
|
||||
|
@ -1779,7 +1783,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
|
|||
if (!cp) {
|
||||
int v;
|
||||
|
||||
if (ipip || !sysctl_schedule_icmp(ipvs))
|
||||
if (tunnel || !sysctl_schedule_icmp(ipvs))
|
||||
return NF_ACCEPT;
|
||||
|
||||
if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
|
||||
|
@ -1797,7 +1801,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (ipip) {
|
||||
if (tunnel) {
|
||||
__be32 info = ic->un.gateway;
|
||||
__u8 type = ic->type;
|
||||
__u8 code = ic->code;
|
||||
|
@ -1809,17 +1813,18 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
|
|||
u32 mtu = ntohs(ic->un.frag.mtu);
|
||||
__be16 frag_off = cih->frag_off;
|
||||
|
||||
/* Strip outer IP and ICMP, go to IPIP header */
|
||||
/* Strip outer IP and ICMP, go to IPIP/UDP/GRE header */
|
||||
if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
|
||||
goto ignore_ipip;
|
||||
goto ignore_tunnel;
|
||||
offset2 -= ihl + sizeof(_icmph);
|
||||
skb_reset_network_header(skb);
|
||||
IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
|
||||
&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
|
||||
IP_VS_DBG(12, "ICMP for %s %pI4->%pI4: mtu=%u\n",
|
||||
outer_proto, &ip_hdr(skb)->saddr,
|
||||
&ip_hdr(skb)->daddr, mtu);
|
||||
ipv4_update_pmtu(skb, ipvs->net, mtu, 0, 0);
|
||||
/* Client uses PMTUD? */
|
||||
if (!(frag_off & htons(IP_DF)))
|
||||
goto ignore_ipip;
|
||||
goto ignore_tunnel;
|
||||
/* Prefer the resulting PMTU */
|
||||
if (dest) {
|
||||
struct ip_vs_dest_dst *dest_dst;
|
||||
|
@ -1832,11 +1837,11 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
|
|||
mtu -= sizeof(struct iphdr);
|
||||
info = htonl(mtu);
|
||||
}
|
||||
/* Strip outer IP, ICMP and IPIP, go to IP header of
|
||||
/* Strip outer IP, ICMP and IPIP/UDP/GRE, go to IP header of
|
||||
* original request.
|
||||
*/
|
||||
if (pskb_pull(skb, offset2) == NULL)
|
||||
goto ignore_ipip;
|
||||
goto ignore_tunnel;
|
||||
skb_reset_network_header(skb);
|
||||
IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
|
||||
&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
|
||||
|
@ -1845,7 +1850,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
|
|||
/* ICMP can be shorter but anyways, account it */
|
||||
ip_vs_out_stats(cp, skb);
|
||||
|
||||
ignore_ipip:
|
||||
ignore_tunnel:
|
||||
consume_skb(skb);
|
||||
verdict = NF_STOLEN;
|
||||
goto out;
|
||||
|
|
|
@ -143,6 +143,7 @@ static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
|
|||
}
|
||||
|
||||
static void nf_conntrack_all_lock(void)
|
||||
__acquires(&nf_conntrack_locks_all_lock)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -162,6 +163,7 @@ static void nf_conntrack_all_lock(void)
|
|||
}
|
||||
|
||||
static void nf_conntrack_all_unlock(void)
|
||||
__releases(&nf_conntrack_locks_all_lock)
|
||||
{
|
||||
/* All prior stores must be complete before we clear
|
||||
* 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
|
||||
|
@ -863,9 +865,8 @@ out:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
|
||||
|
||||
static inline void nf_ct_acct_update(struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo,
|
||||
unsigned int len)
|
||||
void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets,
|
||||
unsigned int bytes)
|
||||
{
|
||||
struct nf_conn_acct *acct;
|
||||
|
||||
|
@ -873,10 +874,11 @@ static inline void nf_ct_acct_update(struct nf_conn *ct,
|
|||
if (acct) {
|
||||
struct nf_conn_counter *counter = acct->counter;
|
||||
|
||||
atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
|
||||
atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
|
||||
atomic64_add(packets, &counter[dir].packets);
|
||||
atomic64_add(bytes, &counter[dir].bytes);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_acct_add);
|
||||
|
||||
static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
|
||||
const struct nf_conn *loser_ct)
|
||||
|
@ -890,7 +892,7 @@ static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
|
|||
|
||||
/* u32 should be fine since we must have seen one packet. */
|
||||
bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
|
||||
nf_ct_acct_update(ct, ctinfo, bytes);
|
||||
nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), bytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1931,7 +1933,7 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
|
|||
WRITE_ONCE(ct->timeout, extra_jiffies);
|
||||
acct:
|
||||
if (do_acct)
|
||||
nf_ct_acct_update(ct, ctinfo, skb->len);
|
||||
nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
|
||||
|
||||
|
@ -1939,7 +1941,7 @@ bool nf_ct_kill_acct(struct nf_conn *ct,
|
|||
enum ip_conntrack_info ctinfo,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
nf_ct_acct_update(ct, ctinfo, skb->len);
|
||||
nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
|
||||
|
||||
return nf_ct_delete(ct, 0, 0);
|
||||
}
|
||||
|
|
|
@ -860,7 +860,7 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
|
|||
struct ctnetlink_filter *filter;
|
||||
|
||||
#ifndef CONFIG_NF_CONNTRACK_MARK
|
||||
if (cda[CTA_MARK] && cda[CTA_MARK_MASK])
|
||||
if (cda[CTA_MARK] || cda[CTA_MARK_MASK])
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
#endif
|
||||
|
||||
|
@ -1533,6 +1533,7 @@ static int
|
|||
ctnetlink_parse_nat_setup(struct nf_conn *ct,
|
||||
enum nf_nat_manip_type manip,
|
||||
const struct nlattr *attr)
|
||||
__must_hold(RCU)
|
||||
{
|
||||
struct nf_nat_hook *nat_hook;
|
||||
int err;
|
||||
|
|
|
@ -392,7 +392,7 @@ int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
|
|||
struct flow_block_cb *block_cb;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&flow_table->flow_block_lock);
|
||||
down_write(&flow_table->flow_block_lock);
|
||||
block_cb = flow_block_cb_lookup(block, cb, cb_priv);
|
||||
if (block_cb) {
|
||||
err = -EEXIST;
|
||||
|
@ -408,7 +408,7 @@ int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
|
|||
list_add_tail(&block_cb->list, &block->cb_list);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&flow_table->flow_block_lock);
|
||||
up_write(&flow_table->flow_block_lock);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);
|
||||
|
@ -419,13 +419,13 @@ void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
|
|||
struct flow_block *block = &flow_table->flow_block;
|
||||
struct flow_block_cb *block_cb;
|
||||
|
||||
mutex_lock(&flow_table->flow_block_lock);
|
||||
down_write(&flow_table->flow_block_lock);
|
||||
block_cb = flow_block_cb_lookup(block, cb, cb_priv);
|
||||
if (block_cb)
|
||||
list_del(&block_cb->list);
|
||||
else
|
||||
WARN_ON(true);
|
||||
mutex_unlock(&flow_table->flow_block_lock);
|
||||
up_write(&flow_table->flow_block_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);
|
||||
|
||||
|
@ -551,7 +551,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
|
|||
|
||||
INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
|
||||
flow_block_init(&flowtable->flow_block);
|
||||
mutex_init(&flowtable->flow_block_lock);
|
||||
init_rwsem(&flowtable->flow_block_lock);
|
||||
|
||||
err = rhashtable_init(&flowtable->rhashtable,
|
||||
&nf_flow_offload_rhash_params);
|
||||
|
@ -617,7 +617,6 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
|
|||
nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
|
||||
flow_table);
|
||||
rhashtable_destroy(&flow_table->rhashtable);
|
||||
mutex_destroy(&flow_table->flow_block_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_flow_table_free);
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <net/ip6_route.h>
|
||||
#include <net/neighbour.h>
|
||||
#include <net/netfilter/nf_flow_table.h>
|
||||
#include <net/netfilter/nf_conntrack_acct.h>
|
||||
/* For layer 4 checksum field offset. */
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/udp.h>
|
||||
|
@ -289,6 +290,9 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
|
|||
ip_decrease_ttl(iph);
|
||||
skb->tstamp = 0;
|
||||
|
||||
if (flow_table->flags & NF_FLOWTABLE_COUNTER)
|
||||
nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
|
||||
|
||||
if (unlikely(dst_xfrm(&rt->dst))) {
|
||||
memset(skb->cb, 0, sizeof(struct inet_skb_parm));
|
||||
IPCB(skb)->iif = skb->dev->ifindex;
|
||||
|
@ -522,6 +526,9 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
|
|||
ip6h->hop_limit--;
|
||||
skb->tstamp = 0;
|
||||
|
||||
if (flow_table->flags & NF_FLOWTABLE_COUNTER)
|
||||
nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
|
||||
|
||||
if (unlikely(dst_xfrm(&rt->dst))) {
|
||||
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
|
||||
IP6CB(skb)->iif = skb->dev->ifindex;
|
||||
|
|
|
@ -9,12 +9,11 @@
|
|||
#include <net/netfilter/nf_flow_table.h>
|
||||
#include <net/netfilter/nf_tables.h>
|
||||
#include <net/netfilter/nf_conntrack.h>
|
||||
#include <net/netfilter/nf_conntrack_acct.h>
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
#include <net/netfilter/nf_conntrack_tuple.h>
|
||||
|
||||
static struct work_struct nf_flow_offload_work;
|
||||
static DEFINE_SPINLOCK(flow_offload_pending_list_lock);
|
||||
static LIST_HEAD(flow_offload_pending_list);
|
||||
static struct workqueue_struct *nf_flow_offload_wq;
|
||||
|
||||
struct flow_offload_work {
|
||||
struct list_head list;
|
||||
|
@ -22,6 +21,7 @@ struct flow_offload_work {
|
|||
int priority;
|
||||
struct nf_flowtable *flowtable;
|
||||
struct flow_offload *flow;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
#define NF_FLOW_DISSECTOR(__match, __type, __field) \
|
||||
|
@ -92,7 +92,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
|
|||
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
|
||||
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
|
||||
|
||||
if (other_dst->lwtstate) {
|
||||
if (other_dst && other_dst->lwtstate) {
|
||||
tun_info = lwt_tun_info(other_dst->lwtstate);
|
||||
nf_flow_rule_lwt_match(match, tun_info);
|
||||
}
|
||||
|
@ -484,7 +484,7 @@ static void flow_offload_encap_tunnel(const struct flow_offload *flow,
|
|||
struct dst_entry *dst;
|
||||
|
||||
dst = flow->tuplehash[dir].tuple.dst_cache;
|
||||
if (dst->lwtstate) {
|
||||
if (dst && dst->lwtstate) {
|
||||
struct ip_tunnel_info *tun_info;
|
||||
|
||||
tun_info = lwt_tun_info(dst->lwtstate);
|
||||
|
@ -504,7 +504,7 @@ static void flow_offload_decap_tunnel(const struct flow_offload *flow,
|
|||
struct dst_entry *dst;
|
||||
|
||||
dst = flow->tuplehash[!dir].tuple.dst_cache;
|
||||
if (dst->lwtstate) {
|
||||
if (dst && dst->lwtstate) {
|
||||
struct ip_tunnel_info *tun_info;
|
||||
|
||||
tun_info = lwt_tun_info(dst->lwtstate);
|
||||
|
@ -692,7 +692,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
|
|||
if (cmd == FLOW_CLS_REPLACE)
|
||||
cls_flow.rule = flow_rule->rule;
|
||||
|
||||
mutex_lock(&flowtable->flow_block_lock);
|
||||
down_read(&flowtable->flow_block_lock);
|
||||
list_for_each_entry(block_cb, block_cb_list, list) {
|
||||
err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
|
||||
block_cb->cb_priv);
|
||||
|
@ -701,7 +701,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
|
|||
|
||||
i++;
|
||||
}
|
||||
mutex_unlock(&flowtable->flow_block_lock);
|
||||
up_read(&flowtable->flow_block_lock);
|
||||
|
||||
if (cmd == FLOW_CLS_STATS)
|
||||
memcpy(stats, &cls_flow.stats, sizeof(*stats));
|
||||
|
@ -785,19 +785,25 @@ static void flow_offload_work_stats(struct flow_offload_work *offload)
|
|||
lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
|
||||
offload->flow->timeout = max_t(u64, offload->flow->timeout,
|
||||
lastused + NF_FLOW_TIMEOUT);
|
||||
|
||||
if (offload->flowtable->flags & NF_FLOWTABLE_COUNTER) {
|
||||
if (stats[0].pkts)
|
||||
nf_ct_acct_add(offload->flow->ct,
|
||||
FLOW_OFFLOAD_DIR_ORIGINAL,
|
||||
stats[0].pkts, stats[0].bytes);
|
||||
if (stats[1].pkts)
|
||||
nf_ct_acct_add(offload->flow->ct,
|
||||
FLOW_OFFLOAD_DIR_REPLY,
|
||||
stats[1].pkts, stats[1].bytes);
|
||||
}
|
||||
}
|
||||
|
||||
static void flow_offload_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct flow_offload_work *offload, *next;
|
||||
LIST_HEAD(offload_pending_list);
|
||||
struct flow_offload_work *offload;
|
||||
|
||||
spin_lock_bh(&flow_offload_pending_list_lock);
|
||||
list_replace_init(&flow_offload_pending_list, &offload_pending_list);
|
||||
spin_unlock_bh(&flow_offload_pending_list_lock);
|
||||
|
||||
list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
|
||||
switch (offload->cmd) {
|
||||
offload = container_of(work, struct flow_offload_work, work);
|
||||
switch (offload->cmd) {
|
||||
case FLOW_CLS_REPLACE:
|
||||
flow_offload_work_add(offload);
|
||||
break;
|
||||
|
@ -809,19 +815,14 @@ static void flow_offload_work_handler(struct work_struct *work)
|
|||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
list_del(&offload->list);
|
||||
kfree(offload);
|
||||
}
|
||||
|
||||
kfree(offload);
|
||||
}
|
||||
|
||||
static void flow_offload_queue_work(struct flow_offload_work *offload)
|
||||
{
|
||||
spin_lock_bh(&flow_offload_pending_list_lock);
|
||||
list_add_tail(&offload->list, &flow_offload_pending_list);
|
||||
spin_unlock_bh(&flow_offload_pending_list_lock);
|
||||
|
||||
schedule_work(&nf_flow_offload_work);
|
||||
queue_work(nf_flow_offload_wq, &offload->work);
|
||||
}
|
||||
|
||||
static struct flow_offload_work *
|
||||
|
@ -838,6 +839,7 @@ nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
|
|||
offload->flow = flow;
|
||||
offload->priority = flowtable->priority;
|
||||
offload->flowtable = flowtable;
|
||||
INIT_WORK(&offload->work, flow_offload_work_handler);
|
||||
|
||||
return offload;
|
||||
}
|
||||
|
@ -888,7 +890,7 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
|
|||
void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
|
||||
{
|
||||
if (nf_flowtable_hw_offload(flowtable))
|
||||
flush_work(&nf_flow_offload_work);
|
||||
flush_workqueue(nf_flow_offload_wq);
|
||||
}
|
||||
|
||||
static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
|
||||
|
@ -939,7 +941,7 @@ static int nf_flow_table_indr_offload_cmd(struct flow_block_offload *bo,
|
|||
{
|
||||
nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
|
||||
extack);
|
||||
flow_indr_block_call(dev, bo, cmd);
|
||||
flow_indr_block_call(dev, bo, cmd, TC_SETUP_FT);
|
||||
|
||||
if (list_empty(&bo->cb_list))
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -1053,7 +1055,10 @@ static struct flow_indr_block_entry block_ing_entry = {
|
|||
|
||||
int nf_flow_table_offload_init(void)
|
||||
{
|
||||
INIT_WORK(&nf_flow_offload_work, flow_offload_work_handler);
|
||||
nf_flow_offload_wq = alloc_workqueue("nf_flow_table_offload",
|
||||
WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
|
||||
if (!nf_flow_offload_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
flow_indr_add_block_cb(&block_ing_entry);
|
||||
|
||||
|
@ -1062,15 +1067,6 @@ int nf_flow_table_offload_init(void)
|
|||
|
||||
void nf_flow_table_offload_exit(void)
|
||||
{
|
||||
struct flow_offload_work *offload, *next;
|
||||
LIST_HEAD(offload_pending_list);
|
||||
|
||||
flow_indr_del_block_cb(&block_ing_entry);
|
||||
|
||||
cancel_work_sync(&nf_flow_offload_work);
|
||||
|
||||
list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
|
||||
list_del(&offload->list);
|
||||
kfree(offload);
|
||||
}
|
||||
destroy_workqueue(nf_flow_offload_wq);
|
||||
}
|
||||
|
|
|
@ -46,25 +46,7 @@ void nf_unregister_queue_handler(struct net *net)
|
|||
}
|
||||
EXPORT_SYMBOL(nf_unregister_queue_handler);
|
||||
|
||||
static void nf_queue_entry_release_br_nf_refs(struct sk_buff *skb)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
|
||||
|
||||
if (nf_bridge) {
|
||||
struct net_device *physdev;
|
||||
|
||||
physdev = nf_bridge_get_physindev(skb);
|
||||
if (physdev)
|
||||
dev_put(physdev);
|
||||
physdev = nf_bridge_get_physoutdev(skb);
|
||||
if (physdev)
|
||||
dev_put(physdev);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
|
||||
static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
|
||||
{
|
||||
struct nf_hook_state *state = &entry->state;
|
||||
|
||||
|
@ -76,24 +58,34 @@ void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
|
|||
if (state->sk)
|
||||
sock_put(state->sk);
|
||||
|
||||
nf_queue_entry_release_br_nf_refs(entry->skb);
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||
if (entry->physin)
|
||||
dev_put(entry->physin);
|
||||
if (entry->physout)
|
||||
dev_put(entry->physout);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
|
||||
|
||||
static void nf_queue_entry_get_br_nf_refs(struct sk_buff *skb)
|
||||
void nf_queue_entry_free(struct nf_queue_entry *entry)
|
||||
{
|
||||
nf_queue_entry_release_refs(entry);
|
||||
kfree(entry);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_queue_entry_free);
|
||||
|
||||
static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
|
||||
const struct sk_buff *skb = entry->skb;
|
||||
struct nf_bridge_info *nf_bridge;
|
||||
|
||||
nf_bridge = nf_bridge_info_get(skb);
|
||||
if (nf_bridge) {
|
||||
struct net_device *physdev;
|
||||
|
||||
physdev = nf_bridge_get_physindev(skb);
|
||||
if (physdev)
|
||||
dev_hold(physdev);
|
||||
physdev = nf_bridge_get_physoutdev(skb);
|
||||
if (physdev)
|
||||
dev_hold(physdev);
|
||||
entry->physin = nf_bridge_get_physindev(skb);
|
||||
entry->physout = nf_bridge_get_physoutdev(skb);
|
||||
} else {
|
||||
entry->physin = NULL;
|
||||
entry->physout = NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -110,7 +102,12 @@ void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
|
|||
if (state->sk)
|
||||
sock_hold(state->sk);
|
||||
|
||||
nf_queue_entry_get_br_nf_refs(entry->skb);
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||
if (entry->physin)
|
||||
dev_hold(entry->physin);
|
||||
if (entry->physout)
|
||||
dev_hold(entry->physout);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
|
||||
|
||||
|
@ -158,18 +155,16 @@ static void nf_ip6_saveroute(const struct sk_buff *skb,
|
|||
static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
|
||||
unsigned int index, unsigned int queuenum)
|
||||
{
|
||||
int status = -ENOENT;
|
||||
struct nf_queue_entry *entry = NULL;
|
||||
const struct nf_queue_handler *qh;
|
||||
struct net *net = state->net;
|
||||
unsigned int route_key_size;
|
||||
int status;
|
||||
|
||||
/* QUEUE == DROP if no one is waiting, to be safe. */
|
||||
qh = rcu_dereference(net->nf.queue_handler);
|
||||
if (!qh) {
|
||||
status = -ESRCH;
|
||||
goto err;
|
||||
}
|
||||
if (!qh)
|
||||
return -ESRCH;
|
||||
|
||||
switch (state->pf) {
|
||||
case AF_INET:
|
||||
|
@ -184,14 +179,12 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
|
|||
}
|
||||
|
||||
entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
|
||||
if (!entry) {
|
||||
status = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
if (skb_dst(skb) && !skb_dst_force(skb)) {
|
||||
status = -ENETDOWN;
|
||||
goto err;
|
||||
kfree(entry);
|
||||
return -ENETDOWN;
|
||||
}
|
||||
|
||||
*entry = (struct nf_queue_entry) {
|
||||
|
@ -201,6 +194,8 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
|
|||
.size = sizeof(*entry) + route_key_size,
|
||||
};
|
||||
|
||||
__nf_queue_entry_init_physdevs(entry);
|
||||
|
||||
nf_queue_entry_get_refs(entry);
|
||||
|
||||
switch (entry->state.pf) {
|
||||
|
@ -213,17 +208,12 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
|
|||
}
|
||||
|
||||
status = qh->outfn(entry, queuenum);
|
||||
|
||||
if (status < 0) {
|
||||
nf_queue_entry_release_refs(entry);
|
||||
goto err;
|
||||
nf_queue_entry_free(entry);
|
||||
return status;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
kfree(entry);
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Packets leaving via this function must come back through nf_reinject(). */
|
||||
|
@ -304,12 +294,10 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
|
|||
|
||||
hooks = nf_hook_entries_head(net, pf, entry->state.hook);
|
||||
|
||||
nf_queue_entry_release_refs(entry);
|
||||
|
||||
i = entry->hook_index;
|
||||
if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
|
||||
kfree_skb(skb);
|
||||
kfree(entry);
|
||||
nf_queue_entry_free(entry);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -348,6 +336,6 @@ next_hook:
|
|||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
kfree(entry);
|
||||
nf_queue_entry_free(entry);
|
||||
}
|
||||
EXPORT_SYMBOL(nf_reinject);
|
||||
|
|
|
@ -520,7 +520,8 @@ static struct nft_table *nft_table_lookup(const struct net *net,
|
|||
if (nla == NULL)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
list_for_each_entry_rcu(table, &net->nft.tables, list) {
|
||||
list_for_each_entry_rcu(table, &net->nft.tables, list,
|
||||
lockdep_is_held(&net->nft.commit_mutex)) {
|
||||
if (!nla_strcmp(nla, table->name) &&
|
||||
table->family == family &&
|
||||
nft_active_genmask(table, genmask))
|
||||
|
@ -2557,6 +2558,24 @@ err1:
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (src->ops->clone) {
|
||||
dst->ops = src->ops;
|
||||
err = src->ops->clone(dst, src);
|
||||
if (err < 0)
|
||||
return err;
|
||||
} else {
|
||||
memcpy(dst, src, src->ops->size);
|
||||
}
|
||||
|
||||
__module_get(src->ops->type->owner);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr)
|
||||
{
|
||||
nf_tables_expr_destroy(ctx, expr);
|
||||
|
@ -3376,6 +3395,7 @@ static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
|
|||
.len = NFT_USERDATA_MAXLEN },
|
||||
[NFTA_SET_OBJ_TYPE] = { .type = NLA_U32 },
|
||||
[NFTA_SET_HANDLE] = { .type = NLA_U64 },
|
||||
[NFTA_SET_EXPR] = { .type = NLA_NESTED },
|
||||
};
|
||||
|
||||
static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
|
||||
|
@ -3579,8 +3599,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
|
|||
{
|
||||
struct nfgenmsg *nfmsg;
|
||||
struct nlmsghdr *nlh;
|
||||
struct nlattr *desc;
|
||||
u32 portid = ctx->portid;
|
||||
struct nlattr *nest;
|
||||
u32 seq = ctx->seq;
|
||||
|
||||
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
|
||||
|
@ -3636,9 +3656,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
|
|||
if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
|
||||
goto nla_put_failure;
|
||||
|
||||
desc = nla_nest_start_noflag(skb, NFTA_SET_DESC);
|
||||
|
||||
if (desc == NULL)
|
||||
nest = nla_nest_start_noflag(skb, NFTA_SET_DESC);
|
||||
if (!nest)
|
||||
goto nla_put_failure;
|
||||
if (set->size &&
|
||||
nla_put_be32(skb, NFTA_SET_DESC_SIZE, htonl(set->size)))
|
||||
|
@ -3648,7 +3667,15 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
|
|||
nf_tables_fill_set_concat(skb, set))
|
||||
goto nla_put_failure;
|
||||
|
||||
nla_nest_end(skb, desc);
|
||||
nla_nest_end(skb, nest);
|
||||
|
||||
if (set->expr) {
|
||||
nest = nla_nest_start_noflag(skb, NFTA_SET_EXPR);
|
||||
if (nf_tables_fill_expr_info(skb, set->expr) < 0)
|
||||
goto nla_put_failure;
|
||||
|
||||
nla_nest_end(skb, nest);
|
||||
}
|
||||
|
||||
nlmsg_end(skb, nlh);
|
||||
return 0;
|
||||
|
@ -3895,6 +3922,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
|
|||
u8 genmask = nft_genmask_next(net);
|
||||
int family = nfmsg->nfgen_family;
|
||||
const struct nft_set_ops *ops;
|
||||
struct nft_expr *expr = NULL;
|
||||
struct nft_table *table;
|
||||
struct nft_set *set;
|
||||
struct nft_ctx ctx;
|
||||
|
@ -4004,6 +4032,9 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
|
|||
return err;
|
||||
}
|
||||
|
||||
if (nla[NFTA_SET_EXPR])
|
||||
desc.expr = true;
|
||||
|
||||
table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family, genmask);
|
||||
if (IS_ERR(table)) {
|
||||
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_TABLE]);
|
||||
|
@ -4051,13 +4082,21 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
|
|||
name = nla_strdup(nla[NFTA_SET_NAME], GFP_KERNEL);
|
||||
if (!name) {
|
||||
err = -ENOMEM;
|
||||
goto err2;
|
||||
goto err_set_name;
|
||||
}
|
||||
|
||||
err = nf_tables_set_alloc_name(&ctx, set, name);
|
||||
kfree(name);
|
||||
if (err < 0)
|
||||
goto err2;
|
||||
goto err_set_alloc_name;
|
||||
|
||||
if (nla[NFTA_SET_EXPR]) {
|
||||
expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]);
|
||||
if (IS_ERR(expr)) {
|
||||
err = PTR_ERR(expr);
|
||||
goto err_set_alloc_name;
|
||||
}
|
||||
}
|
||||
|
||||
udata = NULL;
|
||||
if (udlen) {
|
||||
|
@ -4074,6 +4113,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
|
|||
set->dtype = dtype;
|
||||
set->objtype = objtype;
|
||||
set->dlen = desc.dlen;
|
||||
set->expr = expr;
|
||||
set->flags = flags;
|
||||
set->size = desc.size;
|
||||
set->policy = policy;
|
||||
|
@ -4089,30 +4129,36 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
|
|||
|
||||
err = ops->init(set, &desc, nla);
|
||||
if (err < 0)
|
||||
goto err3;
|
||||
goto err_set_init;
|
||||
|
||||
err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
|
||||
if (err < 0)
|
||||
goto err4;
|
||||
goto err_set_trans;
|
||||
|
||||
list_add_tail_rcu(&set->list, &table->sets);
|
||||
table->use++;
|
||||
return 0;
|
||||
|
||||
err4:
|
||||
err_set_trans:
|
||||
ops->destroy(set);
|
||||
err3:
|
||||
err_set_init:
|
||||
if (expr)
|
||||
nft_expr_destroy(&ctx, expr);
|
||||
err_set_alloc_name:
|
||||
kfree(set->name);
|
||||
err2:
|
||||
err_set_name:
|
||||
kvfree(set);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void nft_set_destroy(struct nft_set *set)
|
||||
static void nft_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
|
||||
{
|
||||
if (WARN_ON(set->use > 0))
|
||||
return;
|
||||
|
||||
if (set->expr)
|
||||
nft_expr_destroy(ctx, set->expr);
|
||||
|
||||
set->ops->destroy(set);
|
||||
kfree(set->name);
|
||||
kvfree(set);
|
||||
|
@ -4253,7 +4299,7 @@ EXPORT_SYMBOL_GPL(nf_tables_deactivate_set);
|
|||
void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
|
||||
{
|
||||
if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
|
||||
nft_set_destroy(set);
|
||||
nft_set_destroy(ctx, set);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_tables_destroy_set);
|
||||
|
||||
|
@ -4840,6 +4886,17 @@ void *nft_set_elem_init(const struct nft_set *set,
|
|||
return elem;
|
||||
}
|
||||
|
||||
static void nft_set_elem_expr_destroy(const struct nft_ctx *ctx,
|
||||
struct nft_expr *expr)
|
||||
{
|
||||
if (expr->ops->destroy_clone) {
|
||||
expr->ops->destroy_clone(ctx, expr);
|
||||
module_put(expr->ops->type->owner);
|
||||
} else {
|
||||
nf_tables_expr_destroy(ctx, expr);
|
||||
}
|
||||
}
|
||||
|
||||
void nft_set_elem_destroy(const struct nft_set *set, void *elem,
|
||||
bool destroy_expr)
|
||||
{
|
||||
|
@ -4852,16 +4909,9 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
|
|||
nft_data_release(nft_set_ext_key(ext), NFT_DATA_VALUE);
|
||||
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
|
||||
nft_data_release(nft_set_ext_data(ext), set->dtype);
|
||||
if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) {
|
||||
struct nft_expr *expr = nft_set_ext_expr(ext);
|
||||
if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
|
||||
nft_set_elem_expr_destroy(&ctx, nft_set_ext_expr(ext));
|
||||
|
||||
if (expr->ops->destroy_clone) {
|
||||
expr->ops->destroy_clone(&ctx, expr);
|
||||
module_put(expr->ops->type->owner);
|
||||
} else {
|
||||
nf_tables_expr_destroy(&ctx, expr);
|
||||
}
|
||||
}
|
||||
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
|
||||
(*nft_set_ext_obj(ext))->use--;
|
||||
kfree(elem);
|
||||
|
@ -4877,7 +4927,8 @@ static void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
|
|||
struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
|
||||
|
||||
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
|
||||
nf_tables_expr_destroy(ctx, nft_set_ext_expr(ext));
|
||||
nft_set_elem_expr_destroy(ctx, nft_set_ext_expr(ext));
|
||||
|
||||
kfree(elem);
|
||||
}
|
||||
|
||||
|
@ -4964,6 +5015,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|||
nla[NFTA_SET_ELEM_EXPR]);
|
||||
if (IS_ERR(expr))
|
||||
return PTR_ERR(expr);
|
||||
|
||||
err = -EOPNOTSUPP;
|
||||
if (set->expr && set->expr->ops != expr->ops)
|
||||
goto err_set_elem_expr;
|
||||
} else if (set->expr) {
|
||||
expr = kzalloc(set->expr->ops->size, GFP_KERNEL);
|
||||
if (!expr)
|
||||
return -ENOMEM;
|
||||
|
||||
err = nft_expr_clone(expr, set->expr);
|
||||
if (err < 0)
|
||||
goto err_set_elem_expr;
|
||||
}
|
||||
|
||||
err = nft_setelem_parse_key(ctx, set, &elem.key.val,
|
||||
|
@ -5079,6 +5142,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|||
if (expr) {
|
||||
memcpy(nft_set_ext_expr(ext), expr, expr->ops->size);
|
||||
kfree(expr);
|
||||
expr = NULL;
|
||||
}
|
||||
|
||||
trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
|
||||
|
@ -5132,7 +5196,8 @@ err_element_clash:
|
|||
err_trans:
|
||||
if (obj)
|
||||
obj->use--;
|
||||
kfree(elem.priv);
|
||||
|
||||
nf_tables_set_elem_destroy(ctx, set, elem.priv);
|
||||
err_parse_data:
|
||||
if (nla[NFTA_SET_ELEM_DATA] != NULL)
|
||||
nft_data_release(&data, desc.type);
|
||||
|
@ -6319,7 +6384,7 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
|
|||
if (nla[NFTA_FLOWTABLE_FLAGS]) {
|
||||
flowtable->data.flags =
|
||||
ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
|
||||
if (flowtable->data.flags & ~NF_FLOWTABLE_HW_OFFLOAD)
|
||||
if (flowtable->data.flags & ~NFT_FLOWTABLE_MASK)
|
||||
goto err3;
|
||||
}
|
||||
|
||||
|
@ -7007,7 +7072,7 @@ static void nft_commit_release(struct nft_trans *trans)
|
|||
nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
|
||||
break;
|
||||
case NFT_MSG_DELSET:
|
||||
nft_set_destroy(nft_trans_set(trans));
|
||||
nft_set_destroy(&trans->ctx, nft_trans_set(trans));
|
||||
break;
|
||||
case NFT_MSG_DELSETELEM:
|
||||
nf_tables_set_elem_destroy(&trans->ctx,
|
||||
|
@ -7438,7 +7503,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
|
|||
nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
|
||||
break;
|
||||
case NFT_MSG_NEWSET:
|
||||
nft_set_destroy(nft_trans_set(trans));
|
||||
nft_set_destroy(&trans->ctx, nft_trans_set(trans));
|
||||
break;
|
||||
case NFT_MSG_NEWSETELEM:
|
||||
nft_set_elem_destroy(nft_trans_elem_set(trans),
|
||||
|
@ -8164,7 +8229,7 @@ static void __nft_release_tables(struct net *net)
|
|||
list_for_each_entry_safe(set, ns, &table->sets, list) {
|
||||
list_del(&set->list);
|
||||
table->use--;
|
||||
nft_set_destroy(set);
|
||||
nft_set_destroy(&ctx, set);
|
||||
}
|
||||
list_for_each_entry_safe(obj, ne, &table->objects, list) {
|
||||
nft_obj_del(obj);
|
||||
|
|
|
@ -313,7 +313,7 @@ static int nft_indr_block_offload_cmd(struct nft_base_chain *chain,
|
|||
|
||||
nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
|
||||
|
||||
flow_indr_block_call(dev, &bo, cmd);
|
||||
flow_indr_block_call(dev, &bo, cmd, TC_SETUP_BLOCK);
|
||||
|
||||
if (list_empty(&bo.cb_list))
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
@ -737,12 +737,6 @@ static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
|
|||
#define nf_bridge_adjust_segmented_data(s) do {} while (0)
|
||||
#endif
|
||||
|
||||
static void free_entry(struct nf_queue_entry *entry)
|
||||
{
|
||||
nf_queue_entry_release_refs(entry);
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
static int
|
||||
__nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
|
||||
struct sk_buff *skb, struct nf_queue_entry *entry)
|
||||
|
@ -768,7 +762,7 @@ __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
|
|||
entry_seg->skb = skb;
|
||||
ret = __nfqnl_enqueue_packet(net, queue, entry_seg);
|
||||
if (ret)
|
||||
free_entry(entry_seg);
|
||||
nf_queue_entry_free(entry_seg);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -827,7 +821,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
|
|||
|
||||
if (queued) {
|
||||
if (err) /* some segments are already queued */
|
||||
free_entry(entry);
|
||||
nf_queue_entry_free(entry);
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -24,23 +24,6 @@ struct nft_dynset {
|
|||
struct nft_set_binding binding;
|
||||
};
|
||||
|
||||
static int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (src->ops->clone) {
|
||||
dst->ops = src->ops;
|
||||
err = src->ops->clone(dst, src);
|
||||
if (err < 0)
|
||||
return err;
|
||||
} else {
|
||||
memcpy(dst, src, src->ops->size);
|
||||
}
|
||||
|
||||
__module_get(src->ops->type->owner);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
|
||||
struct nft_regs *regs)
|
||||
{
|
||||
|
@ -204,6 +187,11 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
|
|||
tb[NFTA_DYNSET_EXPR]);
|
||||
if (IS_ERR(priv->expr))
|
||||
return PTR_ERR(priv->expr);
|
||||
|
||||
if (set->expr && set->expr->ops != priv->expr->ops) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_expr_free;
|
||||
}
|
||||
}
|
||||
|
||||
nft_set_ext_prepare(&priv->tmpl);
|
||||
|
@ -222,7 +210,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
|
|||
|
||||
err = nf_tables_bind_set(ctx, set, &priv->binding);
|
||||
if (err < 0)
|
||||
goto err1;
|
||||
goto err_expr_free;
|
||||
|
||||
if (set->size == 0)
|
||||
set->size = 0xffff;
|
||||
|
@ -230,7 +218,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
|
|||
priv->set = set;
|
||||
return 0;
|
||||
|
||||
err1:
|
||||
err_expr_free:
|
||||
if (priv->expr != NULL)
|
||||
nft_expr_destroy(ctx, priv->expr);
|
||||
return err;
|
||||
|
|
|
@ -228,7 +228,6 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
|
|||
unsigned int i, optl, tcphdr_len, offset;
|
||||
struct tcphdr *tcph;
|
||||
u8 *opt;
|
||||
u32 src;
|
||||
|
||||
tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
|
||||
if (!tcph)
|
||||
|
@ -237,7 +236,6 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
|
|||
opt = (u8 *)tcph;
|
||||
for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
|
||||
union {
|
||||
u8 octet;
|
||||
__be16 v16;
|
||||
__be32 v32;
|
||||
} old, new;
|
||||
|
@ -259,13 +257,13 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
|
|||
if (!tcph)
|
||||
return;
|
||||
|
||||
src = regs->data[priv->sreg];
|
||||
offset = i + priv->offset;
|
||||
|
||||
switch (priv->len) {
|
||||
case 2:
|
||||
old.v16 = get_unaligned((u16 *)(opt + offset));
|
||||
new.v16 = src;
|
||||
new.v16 = (__force __be16)nft_reg_load16(
|
||||
®s->data[priv->sreg]);
|
||||
|
||||
switch (priv->type) {
|
||||
case TCPOPT_MSS:
|
||||
|
@ -283,7 +281,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
|
|||
old.v16, new.v16, false);
|
||||
break;
|
||||
case 4:
|
||||
new.v32 = src;
|
||||
new.v32 = regs->data[priv->sreg];
|
||||
old.v32 = get_unaligned((u32 *)(opt + offset));
|
||||
|
||||
if (old.v32 == new.v32)
|
||||
|
|
|
@ -81,6 +81,7 @@ static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
|
|||
u32 idx, off;
|
||||
|
||||
nft_bitmap_location(set, key, &idx, &off);
|
||||
*ext = NULL;
|
||||
|
||||
return nft_bitmap_active(priv->bitmap, idx, off, genmask);
|
||||
}
|
||||
|
@ -285,6 +286,8 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
|
|||
/* Make sure bitmaps we don't get bitmaps larger than 16 Kbytes. */
|
||||
if (desc->klen > 2)
|
||||
return false;
|
||||
else if (desc->expr)
|
||||
return false;
|
||||
|
||||
est->size = nft_bitmap_total_size(desc->klen);
|
||||
est->lookup = NFT_SET_CLASS_O_1;
|
||||
|
|
|
@ -708,7 +708,7 @@ static void tc_indr_block_call(struct tcf_block *block,
|
|||
};
|
||||
INIT_LIST_HEAD(&bo.cb_list);
|
||||
|
||||
flow_indr_block_call(dev, &bo, command);
|
||||
flow_indr_block_call(dev, &bo, command, TC_SETUP_BLOCK);
|
||||
tcf_block_setup(block, &bo);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue