Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
Pablo Neira Ayuso says: ==================== Netfilter updates for net-next The following patchset contains Netfilter updates for net-next, they are: 1) Incorrect uapi header comment in bitwise, from Jeremy Sowden. 2) Fetch flow statistics if flow is still active. 3) Restrict flow matching on hardware based on input device. 4) Add nf_flow_offload_work_alloc() helper function. 5) Remove the last client of the FLOW_OFFLOAD_DYING flag, use teardown instead. 6) Use atomic bitwise operation to operate with flow flags. 7) Add nf_flowtable_hw_offload() helper function to check for the NF_FLOWTABLE_HW_OFFLOAD flag. 8) Add NF_FLOW_HW_REFRESH to retry hardware offload from the flowtable software datapath. 9) Remove indirect calls in xt_hashlimit, from Florian Westphal. 10) Add nf_flow_offload_tuple() helper to consolidate code. 11) Add nf_flow_table_offload_cmd() helper function. 12) A few whitespace cleanups in nf_tables in bitwise and the bitmap/hash set types, from Jeremy Sowden. 13) Cleanup netlink attribute checks in bitwise, from Jeremy Sowden. 14) Replace goto by return in error path of nft_bitwise_dump(), from Jeremy Sowden. 15) Add bitwise operation netlink attribute, also from Jeremy. 16) Add nft_bitwise_init_bool(), from Jeremy Sowden. 17) Add nft_bitwise_eval_bool(), also from Jeremy. 18) Add nft_bitwise_dump_bool(), from Jeremy Sowden. 19) Disallow hardware offload for other that NFT_BITWISE_BOOL, from Jeremy Sowden. 20) Add NFTA_BITWISE_DATA netlink attribute, again from Jeremy. 21) Add support for bitwise shift operation, from Jeremy Sowden. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
7f013edeba
|
@ -47,6 +47,11 @@ struct nf_flowtable {
|
|||
possible_net_t net;
|
||||
};
|
||||
|
||||
static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable)
|
||||
{
|
||||
return flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD;
|
||||
}
|
||||
|
||||
enum flow_offload_tuple_dir {
|
||||
FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL,
|
||||
FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY,
|
||||
|
@ -83,13 +88,15 @@ struct flow_offload_tuple_rhash {
|
|||
struct flow_offload_tuple tuple;
|
||||
};
|
||||
|
||||
#define FLOW_OFFLOAD_SNAT 0x1
|
||||
#define FLOW_OFFLOAD_DNAT 0x2
|
||||
#define FLOW_OFFLOAD_DYING 0x4
|
||||
#define FLOW_OFFLOAD_TEARDOWN 0x8
|
||||
#define FLOW_OFFLOAD_HW 0x10
|
||||
#define FLOW_OFFLOAD_HW_DYING 0x20
|
||||
#define FLOW_OFFLOAD_HW_DEAD 0x40
|
||||
enum nf_flow_flags {
|
||||
NF_FLOW_SNAT,
|
||||
NF_FLOW_DNAT,
|
||||
NF_FLOW_TEARDOWN,
|
||||
NF_FLOW_HW,
|
||||
NF_FLOW_HW_DYING,
|
||||
NF_FLOW_HW_DEAD,
|
||||
NF_FLOW_HW_REFRESH,
|
||||
};
|
||||
|
||||
enum flow_offload_type {
|
||||
NF_FLOW_OFFLOAD_UNSPEC = 0,
|
||||
|
@ -99,7 +106,7 @@ enum flow_offload_type {
|
|||
struct flow_offload {
|
||||
struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
|
||||
struct nf_conn *ct;
|
||||
u16 flags;
|
||||
unsigned long flags;
|
||||
u16 type;
|
||||
u32 timeout;
|
||||
struct rcu_head rcu_head;
|
||||
|
@ -134,10 +141,6 @@ int nf_flow_table_init(struct nf_flowtable *flow_table);
|
|||
void nf_flow_table_free(struct nf_flowtable *flow_table);
|
||||
|
||||
void flow_offload_teardown(struct flow_offload *flow);
|
||||
static inline void flow_offload_dead(struct flow_offload *flow)
|
||||
{
|
||||
flow->flags |= FLOW_OFFLOAD_DYING;
|
||||
}
|
||||
|
||||
int nf_flow_snat_port(const struct flow_offload *flow,
|
||||
struct sk_buff *skb, unsigned int thoff,
|
||||
|
|
|
@ -484,6 +484,20 @@ enum nft_immediate_attributes {
|
|||
};
|
||||
#define NFTA_IMMEDIATE_MAX (__NFTA_IMMEDIATE_MAX - 1)
|
||||
|
||||
/**
|
||||
* enum nft_bitwise_ops - nf_tables bitwise operations
|
||||
*
|
||||
* @NFT_BITWISE_BOOL: mask-and-xor operation used to implement NOT, AND, OR and
|
||||
* XOR boolean operations
|
||||
* @NFT_BITWISE_LSHIFT: left-shift operation
|
||||
* @NFT_BITWISE_RSHIFT: right-shift operation
|
||||
*/
|
||||
enum nft_bitwise_ops {
|
||||
NFT_BITWISE_BOOL,
|
||||
NFT_BITWISE_LSHIFT,
|
||||
NFT_BITWISE_RSHIFT,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum nft_bitwise_attributes - nf_tables bitwise expression netlink attributes
|
||||
*
|
||||
|
@ -492,16 +506,20 @@ enum nft_immediate_attributes {
|
|||
* @NFTA_BITWISE_LEN: length of operands (NLA_U32)
|
||||
* @NFTA_BITWISE_MASK: mask value (NLA_NESTED: nft_data_attributes)
|
||||
* @NFTA_BITWISE_XOR: xor value (NLA_NESTED: nft_data_attributes)
|
||||
* @NFTA_BITWISE_OP: type of operation (NLA_U32: nft_bitwise_ops)
|
||||
* @NFTA_BITWISE_DATA: argument for non-boolean operations
|
||||
* (NLA_NESTED: nft_data_attributes)
|
||||
*
|
||||
* The bitwise expression performs the following operation:
|
||||
* The bitwise expression supports boolean and shift operations. It implements
|
||||
* the boolean operations by performing the following operation:
|
||||
*
|
||||
* dreg = (sreg & mask) ^ xor
|
||||
*
|
||||
* which allow to express all bitwise operations:
|
||||
* with these mask and xor values:
|
||||
*
|
||||
* mask xor
|
||||
* NOT: 1 1
|
||||
* OR: 0 x
|
||||
* OR: ~x x
|
||||
* XOR: 1 x
|
||||
* AND: x 0
|
||||
*/
|
||||
|
@ -512,6 +530,8 @@ enum nft_bitwise_attributes {
|
|||
NFTA_BITWISE_LEN,
|
||||
NFTA_BITWISE_MASK,
|
||||
NFTA_BITWISE_XOR,
|
||||
NFTA_BITWISE_OP,
|
||||
NFTA_BITWISE_DATA,
|
||||
__NFTA_BITWISE_MAX
|
||||
};
|
||||
#define NFTA_BITWISE_MAX (__NFTA_BITWISE_MAX - 1)
|
||||
|
|
|
@ -61,9 +61,9 @@ struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
|
|||
flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
|
||||
|
||||
if (ct->status & IPS_SRC_NAT)
|
||||
flow->flags |= FLOW_OFFLOAD_SNAT;
|
||||
__set_bit(NF_FLOW_SNAT, &flow->flags);
|
||||
if (ct->status & IPS_DST_NAT)
|
||||
flow->flags |= FLOW_OFFLOAD_DNAT;
|
||||
__set_bit(NF_FLOW_DNAT, &flow->flags);
|
||||
|
||||
return flow;
|
||||
|
||||
|
@ -182,8 +182,6 @@ void flow_offload_free(struct flow_offload *flow)
|
|||
default:
|
||||
break;
|
||||
}
|
||||
if (flow->flags & FLOW_OFFLOAD_DYING)
|
||||
nf_ct_delete(flow->ct, 0, 0);
|
||||
nf_ct_put(flow->ct);
|
||||
kfree_rcu(flow, rcu_head);
|
||||
}
|
||||
|
@ -245,8 +243,10 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
|
|||
return err;
|
||||
}
|
||||
|
||||
if (flow_table->flags & NF_FLOWTABLE_HW_OFFLOAD)
|
||||
if (nf_flowtable_hw_offload(flow_table)) {
|
||||
__set_bit(NF_FLOW_HW, &flow->flags);
|
||||
nf_flow_offload_add(flow_table, flow);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -271,7 +271,7 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
|
|||
|
||||
if (nf_flow_has_expired(flow))
|
||||
flow_offload_fixup_ct(flow->ct);
|
||||
else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
|
||||
else if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
|
||||
flow_offload_fixup_ct_timeout(flow->ct);
|
||||
|
||||
flow_offload_free(flow);
|
||||
|
@ -279,7 +279,7 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
|
|||
|
||||
void flow_offload_teardown(struct flow_offload *flow)
|
||||
{
|
||||
flow->flags |= FLOW_OFFLOAD_TEARDOWN;
|
||||
set_bit(NF_FLOW_TEARDOWN, &flow->flags);
|
||||
|
||||
flow_offload_fixup_ct_state(flow->ct);
|
||||
}
|
||||
|
@ -300,7 +300,7 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
|
|||
|
||||
dir = tuplehash->tuple.dir;
|
||||
flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
|
||||
if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))
|
||||
if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
|
||||
return NULL;
|
||||
|
||||
if (unlikely(nf_ct_is_dying(flow->ct)))
|
||||
|
@ -348,19 +348,18 @@ static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
|
|||
{
|
||||
struct nf_flowtable *flow_table = data;
|
||||
|
||||
if (flow->flags & FLOW_OFFLOAD_HW)
|
||||
nf_flow_offload_stats(flow_table, flow);
|
||||
|
||||
if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) ||
|
||||
(flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))) {
|
||||
if (flow->flags & FLOW_OFFLOAD_HW) {
|
||||
if (!(flow->flags & FLOW_OFFLOAD_HW_DYING))
|
||||
test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
|
||||
if (test_bit(NF_FLOW_HW, &flow->flags)) {
|
||||
if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
|
||||
nf_flow_offload_del(flow_table, flow);
|
||||
else if (flow->flags & FLOW_OFFLOAD_HW_DEAD)
|
||||
else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
|
||||
flow_offload_del(flow_table, flow);
|
||||
} else {
|
||||
flow_offload_del(flow_table, flow);
|
||||
}
|
||||
} else if (test_bit(NF_FLOW_HW, &flow->flags)) {
|
||||
nf_flow_offload_stats(flow_table, flow);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -524,7 +523,7 @@ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
|
|||
if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
|
||||
(flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
|
||||
flow->tuplehash[1].tuple.iifidx == dev->ifindex))
|
||||
flow_offload_dead(flow);
|
||||
flow_offload_teardown(flow);
|
||||
}
|
||||
|
||||
static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
|
||||
|
|
|
@ -144,11 +144,11 @@ static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
|
|||
{
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
|
||||
if (flow->flags & FLOW_OFFLOAD_SNAT &&
|
||||
if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
|
||||
(nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
|
||||
nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
|
||||
return -1;
|
||||
if (flow->flags & FLOW_OFFLOAD_DNAT &&
|
||||
if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
|
||||
(nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
|
||||
nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
|
||||
return -1;
|
||||
|
@ -232,6 +232,13 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
|
|||
return NF_STOLEN;
|
||||
}
|
||||
|
||||
static bool nf_flow_offload_refresh(struct nf_flowtable *flow_table,
|
||||
struct flow_offload *flow)
|
||||
{
|
||||
return nf_flowtable_hw_offload(flow_table) &&
|
||||
test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags);
|
||||
}
|
||||
|
||||
unsigned int
|
||||
nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
|
||||
const struct nf_hook_state *state)
|
||||
|
@ -272,6 +279,9 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
|
|||
if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
|
||||
return NF_ACCEPT;
|
||||
|
||||
if (unlikely(nf_flow_offload_refresh(flow_table, flow)))
|
||||
nf_flow_offload_add(flow_table, flow);
|
||||
|
||||
if (nf_flow_offload_dst_check(&rt->dst)) {
|
||||
flow_offload_teardown(flow);
|
||||
return NF_ACCEPT;
|
||||
|
@ -414,11 +424,11 @@ static int nf_flow_nat_ipv6(const struct flow_offload *flow,
|
|||
struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
||||
unsigned int thoff = sizeof(*ip6h);
|
||||
|
||||
if (flow->flags & FLOW_OFFLOAD_SNAT &&
|
||||
if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
|
||||
(nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
|
||||
nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
|
||||
return -1;
|
||||
if (flow->flags & FLOW_OFFLOAD_DNAT &&
|
||||
if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
|
||||
(nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
|
||||
nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
|
||||
return -1;
|
||||
|
@ -498,6 +508,9 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
|
|||
sizeof(*ip6h)))
|
||||
return NF_ACCEPT;
|
||||
|
||||
if (unlikely(nf_flow_offload_refresh(flow_table, flow)))
|
||||
nf_flow_offload_add(flow_table, flow);
|
||||
|
||||
if (nf_flow_offload_dst_check(&rt->dst)) {
|
||||
flow_offload_teardown(flow);
|
||||
return NF_ACCEPT;
|
||||
|
|
|
@ -24,6 +24,7 @@ struct flow_offload_work {
|
|||
};
|
||||
|
||||
struct nf_flow_key {
|
||||
struct flow_dissector_key_meta meta;
|
||||
struct flow_dissector_key_control control;
|
||||
struct flow_dissector_key_basic basic;
|
||||
union {
|
||||
|
@ -55,6 +56,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
|
|||
struct nf_flow_key *mask = &match->mask;
|
||||
struct nf_flow_key *key = &match->key;
|
||||
|
||||
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_META, meta);
|
||||
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
|
||||
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
|
||||
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
|
||||
|
@ -62,6 +64,9 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
|
|||
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
|
||||
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
|
||||
|
||||
key->meta.ingress_ifindex = tuple->iifidx;
|
||||
mask->meta.ingress_ifindex = 0xffffffff;
|
||||
|
||||
switch (tuple->l3proto) {
|
||||
case AF_INET:
|
||||
key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
|
||||
|
@ -105,7 +110,8 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
|
|||
key->tp.dst = tuple->dst_port;
|
||||
mask->tp.dst = 0xffff;
|
||||
|
||||
match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL) |
|
||||
match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_META) |
|
||||
BIT(FLOW_DISSECTOR_KEY_CONTROL) |
|
||||
BIT(FLOW_DISSECTOR_KEY_BASIC) |
|
||||
BIT(FLOW_DISSECTOR_KEY_PORTS);
|
||||
return 0;
|
||||
|
@ -444,16 +450,16 @@ int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
|
|||
flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
|
||||
return -1;
|
||||
|
||||
if (flow->flags & FLOW_OFFLOAD_SNAT) {
|
||||
if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
|
||||
flow_offload_ipv4_snat(net, flow, dir, flow_rule);
|
||||
flow_offload_port_snat(net, flow, dir, flow_rule);
|
||||
}
|
||||
if (flow->flags & FLOW_OFFLOAD_DNAT) {
|
||||
if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
|
||||
flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
|
||||
flow_offload_port_dnat(net, flow, dir, flow_rule);
|
||||
}
|
||||
if (flow->flags & FLOW_OFFLOAD_SNAT ||
|
||||
flow->flags & FLOW_OFFLOAD_DNAT)
|
||||
if (test_bit(NF_FLOW_SNAT, &flow->flags) ||
|
||||
test_bit(NF_FLOW_DNAT, &flow->flags))
|
||||
flow_offload_ipv4_checksum(net, flow, flow_rule);
|
||||
|
||||
flow_offload_redirect(flow, dir, flow_rule);
|
||||
|
@ -470,11 +476,11 @@ int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
|
|||
flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
|
||||
return -1;
|
||||
|
||||
if (flow->flags & FLOW_OFFLOAD_SNAT) {
|
||||
if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
|
||||
flow_offload_ipv6_snat(net, flow, dir, flow_rule);
|
||||
flow_offload_port_snat(net, flow, dir, flow_rule);
|
||||
}
|
||||
if (flow->flags & FLOW_OFFLOAD_DNAT) {
|
||||
if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
|
||||
flow_offload_ipv6_dnat(net, flow, dir, flow_rule);
|
||||
flow_offload_port_dnat(net, flow, dir, flow_rule);
|
||||
}
|
||||
|
@ -586,23 +592,25 @@ static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
|
|||
cls_flow->cookie = (unsigned long)tuple;
|
||||
}
|
||||
|
||||
static int flow_offload_tuple_add(struct flow_offload_work *offload,
|
||||
struct nf_flow_rule *flow_rule,
|
||||
enum flow_offload_tuple_dir dir)
|
||||
static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
|
||||
struct flow_offload *flow,
|
||||
struct nf_flow_rule *flow_rule,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
int priority, int cmd,
|
||||
struct list_head *block_cb_list)
|
||||
{
|
||||
struct nf_flowtable *flowtable = offload->flowtable;
|
||||
struct flow_cls_offload cls_flow = {};
|
||||
struct flow_block_cb *block_cb;
|
||||
struct netlink_ext_ack extack;
|
||||
__be16 proto = ETH_P_ALL;
|
||||
int err, i = 0;
|
||||
|
||||
nf_flow_offload_init(&cls_flow, proto, offload->priority,
|
||||
FLOW_CLS_REPLACE,
|
||||
&offload->flow->tuplehash[dir].tuple, &extack);
|
||||
cls_flow.rule = flow_rule->rule;
|
||||
nf_flow_offload_init(&cls_flow, proto, priority, cmd,
|
||||
&flow->tuplehash[dir].tuple, &extack);
|
||||
if (cmd == FLOW_CLS_REPLACE)
|
||||
cls_flow.rule = flow_rule->rule;
|
||||
|
||||
list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) {
|
||||
list_for_each_entry(block_cb, block_cb_list, list) {
|
||||
err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
|
||||
block_cb->cb_priv);
|
||||
if (err < 0)
|
||||
|
@ -614,23 +622,22 @@ static int flow_offload_tuple_add(struct flow_offload_work *offload,
|
|||
return i;
|
||||
}
|
||||
|
||||
static int flow_offload_tuple_add(struct flow_offload_work *offload,
|
||||
struct nf_flow_rule *flow_rule,
|
||||
enum flow_offload_tuple_dir dir)
|
||||
{
|
||||
return nf_flow_offload_tuple(offload->flowtable, offload->flow,
|
||||
flow_rule, dir, offload->priority,
|
||||
FLOW_CLS_REPLACE,
|
||||
&offload->flowtable->flow_block.cb_list);
|
||||
}
|
||||
|
||||
static void flow_offload_tuple_del(struct flow_offload_work *offload,
|
||||
enum flow_offload_tuple_dir dir)
|
||||
{
|
||||
struct nf_flowtable *flowtable = offload->flowtable;
|
||||
struct flow_cls_offload cls_flow = {};
|
||||
struct flow_block_cb *block_cb;
|
||||
struct netlink_ext_ack extack;
|
||||
__be16 proto = ETH_P_ALL;
|
||||
|
||||
nf_flow_offload_init(&cls_flow, proto, offload->priority,
|
||||
FLOW_CLS_DESTROY,
|
||||
&offload->flow->tuplehash[dir].tuple, &extack);
|
||||
|
||||
list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
|
||||
block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv);
|
||||
|
||||
offload->flow->flags |= FLOW_OFFLOAD_HW_DEAD;
|
||||
nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
|
||||
offload->priority, FLOW_CLS_DESTROY,
|
||||
&offload->flowtable->flow_block.cb_list);
|
||||
}
|
||||
|
||||
static int flow_offload_rule_add(struct flow_offload_work *offload,
|
||||
|
@ -648,20 +655,20 @@ static int flow_offload_rule_add(struct flow_offload_work *offload,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int flow_offload_work_add(struct flow_offload_work *offload)
|
||||
static void flow_offload_work_add(struct flow_offload_work *offload)
|
||||
{
|
||||
struct nf_flow_rule *flow_rule[FLOW_OFFLOAD_DIR_MAX];
|
||||
int err;
|
||||
|
||||
err = nf_flow_offload_alloc(offload, flow_rule);
|
||||
if (err < 0)
|
||||
return -ENOMEM;
|
||||
return;
|
||||
|
||||
err = flow_offload_rule_add(offload, flow_rule);
|
||||
if (err < 0)
|
||||
set_bit(NF_FLOW_HW_REFRESH, &offload->flow->flags);
|
||||
|
||||
nf_flow_offload_destroy(flow_rule);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void flow_offload_work_del(struct flow_offload_work *offload)
|
||||
|
@ -706,7 +713,6 @@ static void flow_offload_work_handler(struct work_struct *work)
|
|||
{
|
||||
struct flow_offload_work *offload, *next;
|
||||
LIST_HEAD(offload_pending_list);
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&flow_offload_pending_list_lock);
|
||||
list_replace_init(&flow_offload_pending_list, &offload_pending_list);
|
||||
|
@ -715,9 +721,7 @@ static void flow_offload_work_handler(struct work_struct *work)
|
|||
list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
|
||||
switch (offload->cmd) {
|
||||
case FLOW_CLS_REPLACE:
|
||||
ret = flow_offload_work_add(offload);
|
||||
if (ret < 0)
|
||||
offload->flow->flags &= ~FLOW_OFFLOAD_HW;
|
||||
flow_offload_work_add(offload);
|
||||
break;
|
||||
case FLOW_CLS_DESTROY:
|
||||
flow_offload_work_del(offload);
|
||||
|
@ -742,20 +746,33 @@ static void flow_offload_queue_work(struct flow_offload_work *offload)
|
|||
schedule_work(&nf_flow_offload_work);
|
||||
}
|
||||
|
||||
void nf_flow_offload_add(struct nf_flowtable *flowtable,
|
||||
struct flow_offload *flow)
|
||||
static struct flow_offload_work *
|
||||
nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
|
||||
struct flow_offload *flow, unsigned int cmd)
|
||||
{
|
||||
struct flow_offload_work *offload;
|
||||
|
||||
offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
|
||||
if (!offload)
|
||||
return;
|
||||
return NULL;
|
||||
|
||||
offload->cmd = FLOW_CLS_REPLACE;
|
||||
offload->cmd = cmd;
|
||||
offload->flow = flow;
|
||||
offload->priority = flowtable->priority;
|
||||
offload->flowtable = flowtable;
|
||||
flow->flags |= FLOW_OFFLOAD_HW;
|
||||
|
||||
return offload;
|
||||
}
|
||||
|
||||
|
||||
void nf_flow_offload_add(struct nf_flowtable *flowtable,
|
||||
struct flow_offload *flow)
|
||||
{
|
||||
struct flow_offload_work *offload;
|
||||
|
||||
offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_REPLACE);
|
||||
if (!offload)
|
||||
return;
|
||||
|
||||
flow_offload_queue_work(offload);
|
||||
}
|
||||
|
@ -765,15 +782,11 @@ void nf_flow_offload_del(struct nf_flowtable *flowtable,
|
|||
{
|
||||
struct flow_offload_work *offload;
|
||||
|
||||
offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
|
||||
offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_DESTROY);
|
||||
if (!offload)
|
||||
return;
|
||||
|
||||
offload->cmd = FLOW_CLS_DESTROY;
|
||||
offload->flow = flow;
|
||||
offload->flow->flags |= FLOW_OFFLOAD_HW_DYING;
|
||||
offload->flowtable = flowtable;
|
||||
|
||||
set_bit(NF_FLOW_HW_DYING, &flow->flags);
|
||||
flow_offload_queue_work(offload);
|
||||
}
|
||||
|
||||
|
@ -784,24 +797,19 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
|
|||
__s32 delta;
|
||||
|
||||
delta = nf_flow_timeout_delta(flow->timeout);
|
||||
if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) ||
|
||||
flow->flags & FLOW_OFFLOAD_HW_DYING)
|
||||
if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10))
|
||||
return;
|
||||
|
||||
offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
|
||||
offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_STATS);
|
||||
if (!offload)
|
||||
return;
|
||||
|
||||
offload->cmd = FLOW_CLS_STATS;
|
||||
offload->flow = flow;
|
||||
offload->flowtable = flowtable;
|
||||
|
||||
flow_offload_queue_work(offload);
|
||||
}
|
||||
|
||||
void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
|
||||
{
|
||||
if (flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD)
|
||||
if (nf_flowtable_hw_offload(flowtable))
|
||||
flush_work(&nf_flow_offload_work);
|
||||
}
|
||||
|
||||
|
@ -830,28 +838,44 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
|
|||
return err;
|
||||
}
|
||||
|
||||
int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
|
||||
struct net_device *dev,
|
||||
enum flow_block_command cmd)
|
||||
static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
|
||||
struct nf_flowtable *flowtable,
|
||||
struct net_device *dev,
|
||||
enum flow_block_command cmd,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct netlink_ext_ack extack = {};
|
||||
struct flow_block_offload bo = {};
|
||||
int err;
|
||||
|
||||
if (!(flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD))
|
||||
if (!nf_flowtable_hw_offload(flowtable))
|
||||
return 0;
|
||||
|
||||
if (!dev->netdev_ops->ndo_setup_tc)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
bo.net = dev_net(dev);
|
||||
bo.block = &flowtable->flow_block;
|
||||
bo.command = cmd;
|
||||
bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
|
||||
bo.extack = &extack;
|
||||
INIT_LIST_HEAD(&bo.cb_list);
|
||||
memset(bo, 0, sizeof(*bo));
|
||||
bo->net = dev_net(dev);
|
||||
bo->block = &flowtable->flow_block;
|
||||
bo->command = cmd;
|
||||
bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
|
||||
bo->extack = extack;
|
||||
INIT_LIST_HEAD(&bo->cb_list);
|
||||
|
||||
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, &bo);
|
||||
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
|
||||
struct net_device *dev,
|
||||
enum flow_block_command cmd)
|
||||
{
|
||||
struct netlink_ext_ack extack = {};
|
||||
struct flow_block_offload bo;
|
||||
int err;
|
||||
|
||||
err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd, &extack);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -18,21 +18,66 @@
|
|||
struct nft_bitwise {
|
||||
enum nft_registers sreg:8;
|
||||
enum nft_registers dreg:8;
|
||||
enum nft_bitwise_ops op:8;
|
||||
u8 len;
|
||||
struct nft_data mask;
|
||||
struct nft_data xor;
|
||||
struct nft_data data;
|
||||
};
|
||||
|
||||
static void nft_bitwise_eval_bool(u32 *dst, const u32 *src,
|
||||
const struct nft_bitwise *priv)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(priv->len, 4); i++)
|
||||
dst[i] = (src[i] & priv->mask.data[i]) ^ priv->xor.data[i];
|
||||
}
|
||||
|
||||
static void nft_bitwise_eval_lshift(u32 *dst, const u32 *src,
|
||||
const struct nft_bitwise *priv)
|
||||
{
|
||||
u32 shift = priv->data.data[0];
|
||||
unsigned int i;
|
||||
u32 carry = 0;
|
||||
|
||||
for (i = DIV_ROUND_UP(priv->len, sizeof(u32)); i > 0; i--) {
|
||||
dst[i - 1] = (src[i - 1] << shift) | carry;
|
||||
carry = src[i - 1] >> (BITS_PER_TYPE(u32) - shift);
|
||||
}
|
||||
}
|
||||
|
||||
static void nft_bitwise_eval_rshift(u32 *dst, const u32 *src,
|
||||
const struct nft_bitwise *priv)
|
||||
{
|
||||
u32 shift = priv->data.data[0];
|
||||
unsigned int i;
|
||||
u32 carry = 0;
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(priv->len, sizeof(u32)); i++) {
|
||||
dst[i] = carry | (src[i] >> shift);
|
||||
carry = src[i] << (BITS_PER_TYPE(u32) - shift);
|
||||
}
|
||||
}
|
||||
|
||||
void nft_bitwise_eval(const struct nft_expr *expr,
|
||||
struct nft_regs *regs, const struct nft_pktinfo *pkt)
|
||||
{
|
||||
const struct nft_bitwise *priv = nft_expr_priv(expr);
|
||||
const u32 *src = ®s->data[priv->sreg];
|
||||
u32 *dst = ®s->data[priv->dreg];
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(priv->len, 4); i++)
|
||||
dst[i] = (src[i] & priv->mask.data[i]) ^ priv->xor.data[i];
|
||||
switch (priv->op) {
|
||||
case NFT_BITWISE_BOOL:
|
||||
nft_bitwise_eval_bool(dst, src, priv);
|
||||
break;
|
||||
case NFT_BITWISE_LSHIFT:
|
||||
nft_bitwise_eval_lshift(dst, src, priv);
|
||||
break;
|
||||
case NFT_BITWISE_RSHIFT:
|
||||
nft_bitwise_eval_rshift(dst, src, priv);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = {
|
||||
|
@ -41,40 +86,22 @@ static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = {
|
|||
[NFTA_BITWISE_LEN] = { .type = NLA_U32 },
|
||||
[NFTA_BITWISE_MASK] = { .type = NLA_NESTED },
|
||||
[NFTA_BITWISE_XOR] = { .type = NLA_NESTED },
|
||||
[NFTA_BITWISE_OP] = { .type = NLA_U32 },
|
||||
[NFTA_BITWISE_DATA] = { .type = NLA_NESTED },
|
||||
};
|
||||
|
||||
static int nft_bitwise_init(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr,
|
||||
const struct nlattr * const tb[])
|
||||
static int nft_bitwise_init_bool(struct nft_bitwise *priv,
|
||||
const struct nlattr *const tb[])
|
||||
{
|
||||
struct nft_bitwise *priv = nft_expr_priv(expr);
|
||||
struct nft_data_desc d1, d2;
|
||||
u32 len;
|
||||
int err;
|
||||
|
||||
if (tb[NFTA_BITWISE_SREG] == NULL ||
|
||||
tb[NFTA_BITWISE_DREG] == NULL ||
|
||||
tb[NFTA_BITWISE_LEN] == NULL ||
|
||||
tb[NFTA_BITWISE_MASK] == NULL ||
|
||||
tb[NFTA_BITWISE_XOR] == NULL)
|
||||
if (tb[NFTA_BITWISE_DATA])
|
||||
return -EINVAL;
|
||||
|
||||
err = nft_parse_u32_check(tb[NFTA_BITWISE_LEN], U8_MAX, &len);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
priv->len = len;
|
||||
|
||||
priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]);
|
||||
err = nft_validate_register_load(priv->sreg, priv->len);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]);
|
||||
err = nft_validate_register_store(ctx, priv->dreg, NULL,
|
||||
NFT_DATA_VALUE, priv->len);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (!tb[NFTA_BITWISE_MASK] ||
|
||||
!tb[NFTA_BITWISE_XOR])
|
||||
return -EINVAL;
|
||||
|
||||
err = nft_data_init(NULL, &priv->mask, sizeof(priv->mask), &d1,
|
||||
tb[NFTA_BITWISE_MASK]);
|
||||
|
@ -102,40 +129,151 @@ err1:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||
static int nft_bitwise_init_shift(struct nft_bitwise *priv,
|
||||
const struct nlattr *const tb[])
|
||||
{
|
||||
const struct nft_bitwise *priv = nft_expr_priv(expr);
|
||||
struct nft_data_desc d;
|
||||
int err;
|
||||
|
||||
if (nft_dump_register(skb, NFTA_BITWISE_SREG, priv->sreg))
|
||||
goto nla_put_failure;
|
||||
if (nft_dump_register(skb, NFTA_BITWISE_DREG, priv->dreg))
|
||||
goto nla_put_failure;
|
||||
if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(priv->len)))
|
||||
goto nla_put_failure;
|
||||
if (tb[NFTA_BITWISE_MASK] ||
|
||||
tb[NFTA_BITWISE_XOR])
|
||||
return -EINVAL;
|
||||
|
||||
if (!tb[NFTA_BITWISE_DATA])
|
||||
return -EINVAL;
|
||||
|
||||
err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &d,
|
||||
tb[NFTA_BITWISE_DATA]);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (d.type != NFT_DATA_VALUE || d.len != sizeof(u32) ||
|
||||
priv->data.data[0] >= BITS_PER_TYPE(u32)) {
|
||||
nft_data_release(&priv->data, d.type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nft_bitwise_init(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr,
|
||||
const struct nlattr * const tb[])
|
||||
{
|
||||
struct nft_bitwise *priv = nft_expr_priv(expr);
|
||||
u32 len;
|
||||
int err;
|
||||
|
||||
if (!tb[NFTA_BITWISE_SREG] ||
|
||||
!tb[NFTA_BITWISE_DREG] ||
|
||||
!tb[NFTA_BITWISE_LEN])
|
||||
return -EINVAL;
|
||||
|
||||
err = nft_parse_u32_check(tb[NFTA_BITWISE_LEN], U8_MAX, &len);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
priv->len = len;
|
||||
|
||||
priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]);
|
||||
err = nft_validate_register_load(priv->sreg, priv->len);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]);
|
||||
err = nft_validate_register_store(ctx, priv->dreg, NULL,
|
||||
NFT_DATA_VALUE, priv->len);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (tb[NFTA_BITWISE_OP]) {
|
||||
priv->op = ntohl(nla_get_be32(tb[NFTA_BITWISE_OP]));
|
||||
switch (priv->op) {
|
||||
case NFT_BITWISE_BOOL:
|
||||
case NFT_BITWISE_LSHIFT:
|
||||
case NFT_BITWISE_RSHIFT:
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
} else {
|
||||
priv->op = NFT_BITWISE_BOOL;
|
||||
}
|
||||
|
||||
switch(priv->op) {
|
||||
case NFT_BITWISE_BOOL:
|
||||
err = nft_bitwise_init_bool(priv, tb);
|
||||
break;
|
||||
case NFT_BITWISE_LSHIFT:
|
||||
case NFT_BITWISE_RSHIFT:
|
||||
err = nft_bitwise_init_shift(priv, tb);
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nft_bitwise_dump_bool(struct sk_buff *skb,
|
||||
const struct nft_bitwise *priv)
|
||||
{
|
||||
if (nft_data_dump(skb, NFTA_BITWISE_MASK, &priv->mask,
|
||||
NFT_DATA_VALUE, priv->len) < 0)
|
||||
goto nla_put_failure;
|
||||
return -1;
|
||||
|
||||
if (nft_data_dump(skb, NFTA_BITWISE_XOR, &priv->xor,
|
||||
NFT_DATA_VALUE, priv->len) < 0)
|
||||
goto nla_put_failure;
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
nla_put_failure:
|
||||
return -1;
|
||||
static int nft_bitwise_dump_shift(struct sk_buff *skb,
|
||||
const struct nft_bitwise *priv)
|
||||
{
|
||||
if (nft_data_dump(skb, NFTA_BITWISE_DATA, &priv->data,
|
||||
NFT_DATA_VALUE, sizeof(u32)) < 0)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||
{
|
||||
const struct nft_bitwise *priv = nft_expr_priv(expr);
|
||||
int err = 0;
|
||||
|
||||
if (nft_dump_register(skb, NFTA_BITWISE_SREG, priv->sreg))
|
||||
return -1;
|
||||
if (nft_dump_register(skb, NFTA_BITWISE_DREG, priv->dreg))
|
||||
return -1;
|
||||
if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(priv->len)))
|
||||
return -1;
|
||||
if (nla_put_be32(skb, NFTA_BITWISE_OP, htonl(priv->op)))
|
||||
return -1;
|
||||
|
||||
switch (priv->op) {
|
||||
case NFT_BITWISE_BOOL:
|
||||
err = nft_bitwise_dump_bool(skb, priv);
|
||||
break;
|
||||
case NFT_BITWISE_LSHIFT:
|
||||
case NFT_BITWISE_RSHIFT:
|
||||
err = nft_bitwise_dump_shift(skb, priv);
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct nft_data zero;
|
||||
|
||||
static int nft_bitwise_offload(struct nft_offload_ctx *ctx,
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_expr *expr)
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
const struct nft_bitwise *priv = nft_expr_priv(expr);
|
||||
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
|
||||
|
||||
if (priv->op != NFT_BITWISE_BOOL)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (memcmp(&priv->xor, &zero, sizeof(priv->xor)) ||
|
||||
priv->sreg != priv->dreg || priv->len != reg->len)
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
@ -259,8 +259,8 @@ static u64 nft_bitmap_privsize(const struct nlattr * const nla[],
|
|||
}
|
||||
|
||||
static int nft_bitmap_init(const struct nft_set *set,
|
||||
const struct nft_set_desc *desc,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nft_set_desc *desc,
|
||||
const struct nlattr * const nla[])
|
||||
{
|
||||
struct nft_bitmap *priv = nft_set_priv(set);
|
||||
|
||||
|
|
|
@ -645,7 +645,7 @@ static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
|
|||
}
|
||||
|
||||
static bool nft_hash_fast_estimate(const struct nft_set_desc *desc, u32 features,
|
||||
struct nft_set_estimate *est)
|
||||
struct nft_set_estimate *est)
|
||||
{
|
||||
if (!desc->size)
|
||||
return false;
|
||||
|
|
|
@ -357,21 +357,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool select_all(const struct xt_hashlimit_htable *ht,
|
||||
const struct dsthash_ent *he)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool select_gc(const struct xt_hashlimit_htable *ht,
|
||||
const struct dsthash_ent *he)
|
||||
{
|
||||
return time_after_eq(jiffies, he->expires);
|
||||
}
|
||||
|
||||
static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
|
||||
bool (*select)(const struct xt_hashlimit_htable *ht,
|
||||
const struct dsthash_ent *he))
|
||||
static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, bool select_all)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
|
@ -381,7 +367,7 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
|
|||
|
||||
spin_lock_bh(&ht->lock);
|
||||
hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
|
||||
if ((*select)(ht, dh))
|
||||
if (time_after_eq(jiffies, dh->expires) || select_all)
|
||||
dsthash_free(ht, dh);
|
||||
}
|
||||
spin_unlock_bh(&ht->lock);
|
||||
|
@ -395,7 +381,7 @@ static void htable_gc(struct work_struct *work)
|
|||
|
||||
ht = container_of(work, struct xt_hashlimit_htable, gc_work.work);
|
||||
|
||||
htable_selective_cleanup(ht, select_gc);
|
||||
htable_selective_cleanup(ht, false);
|
||||
|
||||
queue_delayed_work(system_power_efficient_wq,
|
||||
&ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval));
|
||||
|
@ -419,7 +405,7 @@ static void htable_destroy(struct xt_hashlimit_htable *hinfo)
|
|||
{
|
||||
cancel_delayed_work_sync(&hinfo->gc_work);
|
||||
htable_remove_proc_entry(hinfo);
|
||||
htable_selective_cleanup(hinfo, select_all);
|
||||
htable_selective_cleanup(hinfo, true);
|
||||
kfree(hinfo->name);
|
||||
vfree(hinfo);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue