Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== Netfilter fixes for net The following patchset contains accumulated Netfilter fixes for your net tree: 1) Ensure quota dump and reset happens iff we can deliver numbers to userspace. 2) Silence splat on incorrect use of smp_processor_id() from nft_queue. 3) Fix an out-of-bound access reported by KASAN in nf_tables_rule_destroy(), patch from Florian Westphal. 4) Fix layer 4 checksum mangling in the nf_tables payload expression with IPv6. 5) Fix a race in the CLUSTERIP target from control plane path when two threads run to add a new configuration object. Serialize invocations of clusterip_config_init() using spin_lock. From Xin Long. 6) Call br_nf_pre_routing_finish_bridge_finish() once we are done with the br_nf_pre_routing_finish() hook. From Artur Molchanov. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
d896b3120b
|
@ -399,7 +399,7 @@ bridged_dnat:
|
|||
br_nf_hook_thresh(NF_BR_PRE_ROUTING,
|
||||
net, sk, skb, skb->dev,
|
||||
NULL,
|
||||
br_nf_pre_routing_finish);
|
||||
br_nf_pre_routing_finish_bridge);
|
||||
return 0;
|
||||
}
|
||||
ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
|
||||
|
|
|
@ -144,7 +144,7 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
|
|||
rcu_read_lock_bh();
|
||||
c = __clusterip_config_find(net, clusterip);
|
||||
if (c) {
|
||||
if (unlikely(!atomic_inc_not_zero(&c->refcount)))
|
||||
if (!c->pde || unlikely(!atomic_inc_not_zero(&c->refcount)))
|
||||
c = NULL;
|
||||
else if (entry)
|
||||
atomic_inc(&c->entries);
|
||||
|
@ -166,14 +166,15 @@ clusterip_config_init_nodelist(struct clusterip_config *c,
|
|||
|
||||
static struct clusterip_config *
|
||||
clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
|
||||
struct net_device *dev)
|
||||
struct net_device *dev)
|
||||
{
|
||||
struct net *net = dev_net(dev);
|
||||
struct clusterip_config *c;
|
||||
struct clusterip_net *cn = net_generic(dev_net(dev), clusterip_net_id);
|
||||
struct clusterip_net *cn = net_generic(net, clusterip_net_id);
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_ATOMIC);
|
||||
if (!c)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
c->dev = dev;
|
||||
c->clusterip = ip;
|
||||
|
@ -185,6 +186,17 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
|
|||
atomic_set(&c->refcount, 1);
|
||||
atomic_set(&c->entries, 1);
|
||||
|
||||
spin_lock_bh(&cn->lock);
|
||||
if (__clusterip_config_find(net, ip)) {
|
||||
spin_unlock_bh(&cn->lock);
|
||||
kfree(c);
|
||||
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
|
||||
list_add_rcu(&c->list, &cn->configs);
|
||||
spin_unlock_bh(&cn->lock);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
{
|
||||
char buffer[16];
|
||||
|
@ -195,16 +207,16 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
|
|||
cn->procdir,
|
||||
&clusterip_proc_fops, c);
|
||||
if (!c->pde) {
|
||||
spin_lock_bh(&cn->lock);
|
||||
list_del_rcu(&c->list);
|
||||
spin_unlock_bh(&cn->lock);
|
||||
kfree(c);
|
||||
return NULL;
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
spin_lock_bh(&cn->lock);
|
||||
list_add_rcu(&c->list, &cn->configs);
|
||||
spin_unlock_bh(&cn->lock);
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
|
@ -410,9 +422,9 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
|
|||
|
||||
config = clusterip_config_init(cipinfo,
|
||||
e->ip.dst.s_addr, dev);
|
||||
if (!config) {
|
||||
if (IS_ERR(config)) {
|
||||
dev_put(dev);
|
||||
return -ENOMEM;
|
||||
return PTR_ERR(config);
|
||||
}
|
||||
dev_mc_add(config->dev, config->clustermac);
|
||||
}
|
||||
|
|
|
@ -2115,7 +2115,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
|
|||
* is called on error from nf_tables_newrule().
|
||||
*/
|
||||
expr = nft_expr_first(rule);
|
||||
while (expr->ops && expr != nft_expr_last(rule)) {
|
||||
while (expr != nft_expr_last(rule) && expr->ops) {
|
||||
nf_tables_expr_destroy(ctx, expr);
|
||||
expr = nft_expr_next(expr);
|
||||
}
|
||||
|
|
|
@ -250,6 +250,22 @@ static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
|
||||
__wsum fsum, __wsum tsum, int csum_offset)
|
||||
{
|
||||
__sum16 sum;
|
||||
|
||||
if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
|
||||
return -1;
|
||||
|
||||
nft_csum_replace(&sum, fsum, tsum);
|
||||
if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
|
||||
skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nft_payload_set_eval(const struct nft_expr *expr,
|
||||
struct nft_regs *regs,
|
||||
const struct nft_pktinfo *pkt)
|
||||
|
@ -259,7 +275,6 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
|
|||
const u32 *src = ®s->data[priv->sreg];
|
||||
int offset, csum_offset;
|
||||
__wsum fsum, tsum;
|
||||
__sum16 sum;
|
||||
|
||||
switch (priv->base) {
|
||||
case NFT_PAYLOAD_LL_HEADER:
|
||||
|
@ -282,18 +297,14 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
|
|||
csum_offset = offset + priv->csum_offset;
|
||||
offset += priv->offset;
|
||||
|
||||
if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
|
||||
if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
|
||||
(priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
|
||||
skb->ip_summed != CHECKSUM_PARTIAL)) {
|
||||
if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
|
||||
goto err;
|
||||
|
||||
fsum = skb_checksum(skb, offset, priv->len, 0);
|
||||
tsum = csum_partial(src, priv->len, 0);
|
||||
nft_csum_replace(&sum, fsum, tsum);
|
||||
|
||||
if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
|
||||
skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
|
||||
if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
|
||||
nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
|
||||
goto err;
|
||||
|
||||
if (priv->csum_flags &&
|
||||
|
|
|
@ -38,7 +38,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
|
|||
|
||||
if (priv->queues_total > 1) {
|
||||
if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) {
|
||||
int cpu = smp_processor_id();
|
||||
int cpu = raw_smp_processor_id();
|
||||
|
||||
queue = priv->queuenum + cpu % priv->queues_total;
|
||||
} else {
|
||||
|
|
|
@ -110,30 +110,32 @@ static int nft_quota_obj_init(const struct nlattr * const tb[],
|
|||
static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv,
|
||||
bool reset)
|
||||
{
|
||||
u64 consumed, consumed_cap;
|
||||
u32 flags = priv->flags;
|
||||
u64 consumed;
|
||||
|
||||
if (reset) {
|
||||
consumed = atomic64_xchg(&priv->consumed, 0);
|
||||
if (test_and_clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags))
|
||||
flags |= NFT_QUOTA_F_DEPLETED;
|
||||
} else {
|
||||
consumed = atomic64_read(&priv->consumed);
|
||||
}
|
||||
|
||||
/* Since we inconditionally increment consumed quota for each packet
|
||||
* that we see, don't go over the quota boundary in what we send to
|
||||
* userspace.
|
||||
*/
|
||||
if (consumed > priv->quota)
|
||||
consumed = priv->quota;
|
||||
consumed = atomic64_read(&priv->consumed);
|
||||
if (consumed >= priv->quota) {
|
||||
consumed_cap = priv->quota;
|
||||
flags |= NFT_QUOTA_F_DEPLETED;
|
||||
} else {
|
||||
consumed_cap = consumed;
|
||||
}
|
||||
|
||||
if (nla_put_be64(skb, NFTA_QUOTA_BYTES, cpu_to_be64(priv->quota),
|
||||
NFTA_QUOTA_PAD) ||
|
||||
nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed),
|
||||
nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed_cap),
|
||||
NFTA_QUOTA_PAD) ||
|
||||
nla_put_be32(skb, NFTA_QUOTA_FLAGS, htonl(flags)))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (reset) {
|
||||
atomic64_sub(consumed, &priv->consumed);
|
||||
clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags);
|
||||
}
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
|
|
Loading…
Reference in New Issue