Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== Netfilter fixes for net The following patchset contains Netfilter fixes for your net tree. This batch mostly comes with patches to address fallout from the previous merge window cycle, they are: 1) Use entry->state.hook_list from nf_queue() instead of the global nf_hooks which is not valid when used from NFPROTO_NETDEV, this should cause no problems though since we have no userspace queueing for that family, but let's fix this now for the sake of correctness. Patch from Eric W. Biederman. 2) Fix compilation breakage in bridge netfilter if CONFIG_NF_DEFRAG_IPV4 is not set, from Bernhard Thaler. 3) Use percpu jumpstack in arptables too, now that there's a single copy of the rule blob we can't store the return address there anymore. Patch from Florian Westphal. 4) Fix a skb leak in the xmit path of bridge netfilter, problem there since 2.6.37 although it should be not possible to hit invalid traffic there, also from Florian. 5) Eric Leblond reports that when loading a large ruleset with many missing modules after a fresh boot, nf_tables can take long time commit it. Fix this by processing the full batch until the end, even on missing modules, then abort only once and restart processing. 6) Add bridge netfilter files to the MAINTAINER files. 7) Fix a net_device refcount leak in the new IPV6 bridge netfilter code, from Julien Grall. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
ace15bbb39
|
@ -6996,6 +6996,7 @@ F: include/uapi/linux/netfilter/
|
||||||
F: net/*/netfilter.c
|
F: net/*/netfilter.c
|
||||||
F: net/*/netfilter/
|
F: net/*/netfilter/
|
||||||
F: net/netfilter/
|
F: net/netfilter/
|
||||||
|
F: net/bridge/br_netfilter*.c
|
||||||
|
|
||||||
NETLABEL
|
NETLABEL
|
||||||
M: Paul Moore <paul@paul-moore.com>
|
M: Paul Moore <paul@paul-moore.com>
|
||||||
|
|
|
@ -111,7 +111,7 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
|
||||||
/* largest possible L2 header, see br_nf_dev_queue_xmit() */
|
/* largest possible L2 header, see br_nf_dev_queue_xmit() */
|
||||||
#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
|
#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
|
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
|
||||||
struct brnf_frag_data {
|
struct brnf_frag_data {
|
||||||
char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
|
char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
|
||||||
u8 encap_size;
|
u8 encap_size;
|
||||||
|
@ -694,6 +694,7 @@ static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
|
||||||
static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
|
static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
|
||||||
int (*output)(struct sock *, struct sk_buff *))
|
int (*output)(struct sock *, struct sk_buff *))
|
||||||
{
|
{
|
||||||
|
@ -712,6 +713,7 @@ static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
|
||||||
|
|
||||||
return ip_do_fragment(sk, skb, output);
|
return ip_do_fragment(sk, skb, output);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
|
static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
|
@ -742,7 +744,7 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
|
||||||
struct brnf_frag_data *data;
|
struct brnf_frag_data *data;
|
||||||
|
|
||||||
if (br_validate_ipv4(skb))
|
if (br_validate_ipv4(skb))
|
||||||
return NF_DROP;
|
goto drop;
|
||||||
|
|
||||||
IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
|
IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
|
||||||
|
|
||||||
|
@ -767,7 +769,7 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
|
||||||
struct brnf_frag_data *data;
|
struct brnf_frag_data *data;
|
||||||
|
|
||||||
if (br_validate_ipv6(skb))
|
if (br_validate_ipv6(skb))
|
||||||
return NF_DROP;
|
goto drop;
|
||||||
|
|
||||||
IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
|
IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
|
||||||
|
|
||||||
|
@ -782,12 +784,16 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
|
||||||
|
|
||||||
if (v6ops)
|
if (v6ops)
|
||||||
return v6ops->fragment(sk, skb, br_nf_push_frag_xmit);
|
return v6ops->fragment(sk, skb, br_nf_push_frag_xmit);
|
||||||
else
|
|
||||||
return -EMSGSIZE;
|
kfree_skb(skb);
|
||||||
|
return -EMSGSIZE;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
nf_bridge_info_free(skb);
|
nf_bridge_info_free(skb);
|
||||||
return br_dev_queue_push_xmit(sk, skb);
|
return br_dev_queue_push_xmit(sk, skb);
|
||||||
|
drop:
|
||||||
|
kfree_skb(skb);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* PF_BRIDGE/POST_ROUTING ********************************************/
|
/* PF_BRIDGE/POST_ROUTING ********************************************/
|
||||||
|
|
|
@ -104,7 +104,7 @@ int br_validate_ipv6(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
const struct ipv6hdr *hdr;
|
const struct ipv6hdr *hdr;
|
||||||
struct net_device *dev = skb->dev;
|
struct net_device *dev = skb->dev;
|
||||||
struct inet6_dev *idev = in6_dev_get(skb->dev);
|
struct inet6_dev *idev = __in6_dev_get(skb->dev);
|
||||||
u32 pkt_len;
|
u32 pkt_len;
|
||||||
u8 ip6h_len = sizeof(struct ipv6hdr);
|
u8 ip6h_len = sizeof(struct ipv6hdr);
|
||||||
|
|
||||||
|
|
|
@ -254,9 +254,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
|
||||||
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
|
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
|
||||||
unsigned int verdict = NF_DROP;
|
unsigned int verdict = NF_DROP;
|
||||||
const struct arphdr *arp;
|
const struct arphdr *arp;
|
||||||
struct arpt_entry *e, *back;
|
struct arpt_entry *e, **jumpstack;
|
||||||
const char *indev, *outdev;
|
const char *indev, *outdev;
|
||||||
const void *table_base;
|
const void *table_base;
|
||||||
|
unsigned int cpu, stackidx = 0;
|
||||||
const struct xt_table_info *private;
|
const struct xt_table_info *private;
|
||||||
struct xt_action_param acpar;
|
struct xt_action_param acpar;
|
||||||
unsigned int addend;
|
unsigned int addend;
|
||||||
|
@ -270,15 +271,16 @@ unsigned int arpt_do_table(struct sk_buff *skb,
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
addend = xt_write_recseq_begin();
|
addend = xt_write_recseq_begin();
|
||||||
private = table->private;
|
private = table->private;
|
||||||
|
cpu = smp_processor_id();
|
||||||
/*
|
/*
|
||||||
* Ensure we load private-> members after we've fetched the base
|
* Ensure we load private-> members after we've fetched the base
|
||||||
* pointer.
|
* pointer.
|
||||||
*/
|
*/
|
||||||
smp_read_barrier_depends();
|
smp_read_barrier_depends();
|
||||||
table_base = private->entries;
|
table_base = private->entries;
|
||||||
|
jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
|
||||||
|
|
||||||
e = get_entry(table_base, private->hook_entry[hook]);
|
e = get_entry(table_base, private->hook_entry[hook]);
|
||||||
back = get_entry(table_base, private->underflow[hook]);
|
|
||||||
|
|
||||||
acpar.in = state->in;
|
acpar.in = state->in;
|
||||||
acpar.out = state->out;
|
acpar.out = state->out;
|
||||||
|
@ -312,18 +314,23 @@ unsigned int arpt_do_table(struct sk_buff *skb,
|
||||||
verdict = (unsigned int)(-v) - 1;
|
verdict = (unsigned int)(-v) - 1;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
e = back;
|
if (stackidx == 0) {
|
||||||
back = get_entry(table_base, back->comefrom);
|
e = get_entry(table_base,
|
||||||
|
private->underflow[hook]);
|
||||||
|
} else {
|
||||||
|
e = jumpstack[--stackidx];
|
||||||
|
e = arpt_next_entry(e);
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (table_base + v
|
if (table_base + v
|
||||||
!= arpt_next_entry(e)) {
|
!= arpt_next_entry(e)) {
|
||||||
/* Save old back ptr in next entry */
|
|
||||||
struct arpt_entry *next = arpt_next_entry(e);
|
|
||||||
next->comefrom = (void *)back - table_base;
|
|
||||||
|
|
||||||
/* set back pointer to next entry */
|
if (stackidx >= private->stacksize) {
|
||||||
back = next;
|
verdict = NF_DROP;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
jumpstack[stackidx++] = e;
|
||||||
}
|
}
|
||||||
|
|
||||||
e = get_entry(table_base, v);
|
e = get_entry(table_base, v);
|
||||||
|
|
|
@ -213,7 +213,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
|
||||||
|
|
||||||
if (verdict == NF_ACCEPT) {
|
if (verdict == NF_ACCEPT) {
|
||||||
next_hook:
|
next_hook:
|
||||||
verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook],
|
verdict = nf_iterate(entry->state.hook_list,
|
||||||
skb, &entry->state, &elem);
|
skb, &entry->state, &elem);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -269,6 +269,12 @@ static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum {
|
||||||
|
NFNL_BATCH_FAILURE = (1 << 0),
|
||||||
|
NFNL_BATCH_DONE = (1 << 1),
|
||||||
|
NFNL_BATCH_REPLAY = (1 << 2),
|
||||||
|
};
|
||||||
|
|
||||||
static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
|
static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||||
u_int16_t subsys_id)
|
u_int16_t subsys_id)
|
||||||
{
|
{
|
||||||
|
@ -276,13 +282,15 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||||
struct net *net = sock_net(skb->sk);
|
struct net *net = sock_net(skb->sk);
|
||||||
const struct nfnetlink_subsystem *ss;
|
const struct nfnetlink_subsystem *ss;
|
||||||
const struct nfnl_callback *nc;
|
const struct nfnl_callback *nc;
|
||||||
bool success = true, done = false;
|
|
||||||
static LIST_HEAD(err_list);
|
static LIST_HEAD(err_list);
|
||||||
|
u32 status;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (subsys_id >= NFNL_SUBSYS_COUNT)
|
if (subsys_id >= NFNL_SUBSYS_COUNT)
|
||||||
return netlink_ack(skb, nlh, -EINVAL);
|
return netlink_ack(skb, nlh, -EINVAL);
|
||||||
replay:
|
replay:
|
||||||
|
status = 0;
|
||||||
|
|
||||||
skb = netlink_skb_clone(oskb, GFP_KERNEL);
|
skb = netlink_skb_clone(oskb, GFP_KERNEL);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
return netlink_ack(oskb, nlh, -ENOMEM);
|
return netlink_ack(oskb, nlh, -ENOMEM);
|
||||||
|
@ -336,10 +344,10 @@ replay:
|
||||||
if (type == NFNL_MSG_BATCH_BEGIN) {
|
if (type == NFNL_MSG_BATCH_BEGIN) {
|
||||||
/* Malformed: Batch begin twice */
|
/* Malformed: Batch begin twice */
|
||||||
nfnl_err_reset(&err_list);
|
nfnl_err_reset(&err_list);
|
||||||
success = false;
|
status |= NFNL_BATCH_FAILURE;
|
||||||
goto done;
|
goto done;
|
||||||
} else if (type == NFNL_MSG_BATCH_END) {
|
} else if (type == NFNL_MSG_BATCH_END) {
|
||||||
done = true;
|
status |= NFNL_BATCH_DONE;
|
||||||
goto done;
|
goto done;
|
||||||
} else if (type < NLMSG_MIN_TYPE) {
|
} else if (type < NLMSG_MIN_TYPE) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
|
@ -382,11 +390,8 @@ replay:
|
||||||
* original skb.
|
* original skb.
|
||||||
*/
|
*/
|
||||||
if (err == -EAGAIN) {
|
if (err == -EAGAIN) {
|
||||||
nfnl_err_reset(&err_list);
|
status |= NFNL_BATCH_REPLAY;
|
||||||
ss->abort(oskb);
|
goto next;
|
||||||
nfnl_unlock(subsys_id);
|
|
||||||
kfree_skb(skb);
|
|
||||||
goto replay;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ack:
|
ack:
|
||||||
|
@ -402,7 +407,7 @@ ack:
|
||||||
*/
|
*/
|
||||||
nfnl_err_reset(&err_list);
|
nfnl_err_reset(&err_list);
|
||||||
netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM);
|
netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM);
|
||||||
success = false;
|
status |= NFNL_BATCH_FAILURE;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
/* We don't stop processing the batch on errors, thus,
|
/* We don't stop processing the batch on errors, thus,
|
||||||
|
@ -410,19 +415,26 @@ ack:
|
||||||
* triggers.
|
* triggers.
|
||||||
*/
|
*/
|
||||||
if (err)
|
if (err)
|
||||||
success = false;
|
status |= NFNL_BATCH_FAILURE;
|
||||||
}
|
}
|
||||||
|
next:
|
||||||
msglen = NLMSG_ALIGN(nlh->nlmsg_len);
|
msglen = NLMSG_ALIGN(nlh->nlmsg_len);
|
||||||
if (msglen > skb->len)
|
if (msglen > skb->len)
|
||||||
msglen = skb->len;
|
msglen = skb->len;
|
||||||
skb_pull(skb, msglen);
|
skb_pull(skb, msglen);
|
||||||
}
|
}
|
||||||
done:
|
done:
|
||||||
if (success && done)
|
if (status & NFNL_BATCH_REPLAY) {
|
||||||
ss->commit(oskb);
|
|
||||||
else
|
|
||||||
ss->abort(oskb);
|
ss->abort(oskb);
|
||||||
|
nfnl_err_reset(&err_list);
|
||||||
|
nfnl_unlock(subsys_id);
|
||||||
|
kfree_skb(skb);
|
||||||
|
goto replay;
|
||||||
|
} else if (status == NFNL_BATCH_DONE) {
|
||||||
|
ss->commit(oskb);
|
||||||
|
} else {
|
||||||
|
ss->abort(oskb);
|
||||||
|
}
|
||||||
|
|
||||||
nfnl_err_deliver(&err_list, oskb);
|
nfnl_err_deliver(&err_list, oskb);
|
||||||
nfnl_unlock(subsys_id);
|
nfnl_unlock(subsys_id);
|
||||||
|
|
Loading…
Reference in New Issue