Merge branch 'IP-listification-follow-ups'
Edward Cree says: ==================== IP listification follow-ups While working on IPv6 list processing, I found another bug in the IPv4 version. So this patch series has that fix, and the IPv6 version with both fixes incorporated. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
ab8565af68
|
@ -922,6 +922,8 @@ static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6)
|
|||
|
||||
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
struct packet_type *pt, struct net_device *orig_dev);
|
||||
void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
|
||||
struct net_device *orig_dev);
|
||||
|
||||
int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
|
|
|
@ -316,13 +316,6 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
|
|||
struct rtable *rt;
|
||||
int err;
|
||||
|
||||
/* if ingress device is enslaved to an L3 master device pass the
|
||||
* skb to its handler for processing
|
||||
*/
|
||||
skb = l3mdev_ip_rcv(skb);
|
||||
if (!skb)
|
||||
return NET_RX_SUCCESS;
|
||||
|
||||
if (net->ipv4.sysctl_ip_early_demux &&
|
||||
!skb_dst(skb) &&
|
||||
!skb->sk &&
|
||||
|
@ -408,8 +401,16 @@ drop_error:
|
|||
|
||||
static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
int ret = ip_rcv_finish_core(net, sk, skb);
|
||||
int ret;
|
||||
|
||||
/* if ingress device is enslaved to an L3 master device pass the
|
||||
* skb to its handler for processing
|
||||
*/
|
||||
skb = l3mdev_ip_rcv(skb);
|
||||
if (!skb)
|
||||
return NET_RX_SUCCESS;
|
||||
|
||||
ret = ip_rcv_finish_core(net, sk, skb);
|
||||
if (ret != NET_RX_DROP)
|
||||
ret = dst_input(skb);
|
||||
return ret;
|
||||
|
@ -545,6 +546,12 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
|
|||
struct dst_entry *dst;
|
||||
|
||||
list_del(&skb->list);
|
||||
/* if ingress device is enslaved to an L3 master device pass the
|
||||
* skb to its handler for processing
|
||||
*/
|
||||
skb = l3mdev_ip_rcv(skb);
|
||||
if (!skb)
|
||||
continue;
|
||||
if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
|
||||
continue;
|
||||
|
||||
|
|
|
@ -764,6 +764,7 @@ EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
|
|||
static struct packet_type ipv6_packet_type __read_mostly = {
|
||||
.type = cpu_to_be16(ETH_P_IPV6),
|
||||
.func = ipv6_rcv,
|
||||
.list_func = ipv6_list_rcv,
|
||||
};
|
||||
|
||||
static int __init ipv6_packet_init(void)
|
||||
|
|
|
@ -47,17 +47,11 @@
|
|||
#include <net/inet_ecn.h>
|
||||
#include <net/dst_metadata.h>
|
||||
|
||||
int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
static void ip6_rcv_finish_core(struct net *net, struct sock *sk,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
void (*edemux)(struct sk_buff *skb);
|
||||
|
||||
/* if ingress device is enslaved to an L3 master device pass the
|
||||
* skb to its handler for processing
|
||||
*/
|
||||
skb = l3mdev_ip6_rcv(skb);
|
||||
if (!skb)
|
||||
return NET_RX_SUCCESS;
|
||||
|
||||
if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
|
||||
const struct inet6_protocol *ipprot;
|
||||
|
||||
|
@ -67,20 +61,73 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|||
}
|
||||
if (!skb_valid_dst(skb))
|
||||
ip6_route_input(skb);
|
||||
}
|
||||
|
||||
int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
/* if ingress device is enslaved to an L3 master device pass the
|
||||
* skb to its handler for processing
|
||||
*/
|
||||
skb = l3mdev_ip6_rcv(skb);
|
||||
if (!skb)
|
||||
return NET_RX_SUCCESS;
|
||||
ip6_rcv_finish_core(net, sk, skb);
|
||||
|
||||
return dst_input(skb);
|
||||
}
|
||||
|
||||
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
|
||||
static void ip6_sublist_rcv_finish(struct list_head *head)
|
||||
{
|
||||
struct sk_buff *skb, *next;
|
||||
|
||||
list_for_each_entry_safe(skb, next, head, list)
|
||||
dst_input(skb);
|
||||
}
|
||||
|
||||
static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct dst_entry *curr_dst = NULL;
|
||||
struct sk_buff *skb, *next;
|
||||
struct list_head sublist;
|
||||
|
||||
INIT_LIST_HEAD(&sublist);
|
||||
list_for_each_entry_safe(skb, next, head, list) {
|
||||
struct dst_entry *dst;
|
||||
|
||||
list_del(&skb->list);
|
||||
/* if ingress device is enslaved to an L3 master device pass the
|
||||
* skb to its handler for processing
|
||||
*/
|
||||
skb = l3mdev_ip6_rcv(skb);
|
||||
if (!skb)
|
||||
continue;
|
||||
ip6_rcv_finish_core(net, sk, skb);
|
||||
dst = skb_dst(skb);
|
||||
if (curr_dst != dst) {
|
||||
/* dispatch old sublist */
|
||||
if (!list_empty(&sublist))
|
||||
ip6_sublist_rcv_finish(&sublist);
|
||||
/* start new sublist */
|
||||
INIT_LIST_HEAD(&sublist);
|
||||
curr_dst = dst;
|
||||
}
|
||||
list_add_tail(&skb->list, &sublist);
|
||||
}
|
||||
/* dispatch final sublist */
|
||||
ip6_sublist_rcv_finish(&sublist);
|
||||
}
|
||||
|
||||
static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
|
||||
struct net *net)
|
||||
{
|
||||
const struct ipv6hdr *hdr;
|
||||
u32 pkt_len;
|
||||
struct inet6_dev *idev;
|
||||
struct net *net = dev_net(skb->dev);
|
||||
|
||||
if (skb->pkt_type == PACKET_OTHERHOST) {
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -196,7 +243,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
|
|||
if (ipv6_parse_hopopts(skb) < 0) {
|
||||
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
|
||||
rcu_read_unlock();
|
||||
return NET_RX_DROP;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -205,15 +252,67 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
|
|||
/* Must drop socket now because of tproxy. */
|
||||
skb_orphan(skb);
|
||||
|
||||
return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
|
||||
net, NULL, skb, dev, NULL,
|
||||
ip6_rcv_finish);
|
||||
return skb;
|
||||
err:
|
||||
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
|
||||
drop:
|
||||
rcu_read_unlock();
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
|
||||
{
|
||||
struct net *net = dev_net(skb->dev);
|
||||
|
||||
skb = ip6_rcv_core(skb, dev, net);
|
||||
if (skb == NULL)
|
||||
return NET_RX_DROP;
|
||||
return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
|
||||
net, NULL, skb, dev, NULL,
|
||||
ip6_rcv_finish);
|
||||
}
|
||||
|
||||
static void ip6_sublist_rcv(struct list_head *head, struct net_device *dev,
|
||||
struct net *net)
|
||||
{
|
||||
NF_HOOK_LIST(NFPROTO_IPV6, NF_INET_PRE_ROUTING, net, NULL,
|
||||
head, dev, NULL, ip6_rcv_finish);
|
||||
ip6_list_rcv_finish(net, NULL, head);
|
||||
}
|
||||
|
||||
/* Receive a list of IPv6 packets */
|
||||
void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
|
||||
struct net_device *orig_dev)
|
||||
{
|
||||
struct net_device *curr_dev = NULL;
|
||||
struct net *curr_net = NULL;
|
||||
struct sk_buff *skb, *next;
|
||||
struct list_head sublist;
|
||||
|
||||
INIT_LIST_HEAD(&sublist);
|
||||
list_for_each_entry_safe(skb, next, head, list) {
|
||||
struct net_device *dev = skb->dev;
|
||||
struct net *net = dev_net(dev);
|
||||
|
||||
list_del(&skb->list);
|
||||
skb = ip6_rcv_core(skb, dev, net);
|
||||
if (skb == NULL)
|
||||
continue;
|
||||
|
||||
if (curr_dev != dev || curr_net != net) {
|
||||
/* dispatch old sublist */
|
||||
if (!list_empty(&sublist))
|
||||
ip6_sublist_rcv(&sublist, curr_dev, curr_net);
|
||||
/* start new sublist */
|
||||
INIT_LIST_HEAD(&sublist);
|
||||
curr_dev = dev;
|
||||
curr_net = net;
|
||||
}
|
||||
list_add_tail(&skb->list, &sublist);
|
||||
}
|
||||
/* dispatch final sublist */
|
||||
ip6_sublist_rcv(&sublist, curr_dev, curr_net);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue