2007-02-09 22:24:47 +08:00
|
|
|
/*
|
2005-04-17 06:20:36 +08:00
|
|
|
* xfrm4_policy.c
|
|
|
|
*
|
|
|
|
* Changes:
|
|
|
|
* Kazunori MIYAZAWA @USAGI
|
|
|
|
* YOSHIFUJI Hideaki @USAGI
|
|
|
|
* Split up af-specific portion
|
2007-02-09 22:24:47 +08:00
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
|
2007-11-14 13:37:28 +08:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/kernel.h>
|
2005-05-04 07:27:10 +08:00
|
|
|
#include <linux/inetdevice.h>
|
2010-11-03 12:41:38 +08:00
|
|
|
#include <linux/if_tunnel.h>
|
2007-11-14 13:35:32 +08:00
|
|
|
#include <net/dst.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <net/xfrm.h>
|
|
|
|
#include <net/ip.h>
|
2015-09-30 11:07:13 +08:00
|
|
|
#include <net/l3mdev.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-05-10 06:13:28 +08:00
|
|
|
static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
|
2015-08-11 06:58:11 +08:00
|
|
|
int tos, int oif,
|
2011-05-10 06:13:28 +08:00
|
|
|
const xfrm_address_t *saddr,
|
|
|
|
const xfrm_address_t *daddr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-11-14 13:37:28 +08:00
|
|
|
struct rtable *rt;
|
2006-09-20 03:57:34 +08:00
|
|
|
|
2011-05-10 06:13:28 +08:00
|
|
|
memset(fl4, 0, sizeof(*fl4));
|
|
|
|
fl4->daddr = daddr->a4;
|
|
|
|
fl4->flowi4_tos = tos;
|
2016-08-15 10:52:56 +08:00
|
|
|
fl4->flowi4_oif = l3mdev_master_ifindex_by_index(net, oif);
|
2007-11-14 13:37:28 +08:00
|
|
|
if (saddr)
|
2011-05-10 06:13:28 +08:00
|
|
|
fl4->saddr = saddr->a4;
|
2007-11-14 13:37:28 +08:00
|
|
|
|
2015-09-16 06:10:50 +08:00
|
|
|
fl4->flowi4_flags = FLOWI_FLAG_SKIP_NH_OIF;
|
|
|
|
|
2011-05-10 06:13:28 +08:00
|
|
|
rt = __ip_route_output_key(net, fl4);
|
2011-03-03 06:31:35 +08:00
|
|
|
if (!IS_ERR(rt))
|
|
|
|
return &rt->dst;
|
|
|
|
|
|
|
|
return ERR_CAST(rt);
|
2007-11-14 13:37:28 +08:00
|
|
|
}
|
|
|
|
|
2015-08-11 06:58:11 +08:00
|
|
|
static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif,
|
2011-05-10 06:13:28 +08:00
|
|
|
const xfrm_address_t *saddr,
|
|
|
|
const xfrm_address_t *daddr)
|
|
|
|
{
|
|
|
|
struct flowi4 fl4;
|
|
|
|
|
2015-08-11 06:58:11 +08:00
|
|
|
return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr);
|
2011-05-10 06:13:28 +08:00
|
|
|
}
|
|
|
|
|
2015-08-11 06:58:11 +08:00
|
|
|
static int xfrm4_get_saddr(struct net *net, int oif,
|
2008-11-26 09:56:49 +08:00
|
|
|
xfrm_address_t *saddr, xfrm_address_t *daddr)
|
2007-11-14 13:37:28 +08:00
|
|
|
{
|
|
|
|
struct dst_entry *dst;
|
2011-05-10 06:13:28 +08:00
|
|
|
struct flowi4 fl4;
|
2007-11-14 13:37:28 +08:00
|
|
|
|
2015-08-11 06:58:11 +08:00
|
|
|
dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr);
|
2007-11-14 13:37:28 +08:00
|
|
|
if (IS_ERR(dst))
|
|
|
|
return -EHOSTUNREACH;
|
|
|
|
|
2011-05-10 06:13:28 +08:00
|
|
|
saddr->a4 = fl4.saddr;
|
2007-11-14 13:37:28 +08:00
|
|
|
dst_release(dst);
|
|
|
|
return 0;
|
2006-09-20 03:57:34 +08:00
|
|
|
}
|
|
|
|
|
2011-02-23 09:47:10 +08:00
|
|
|
static int xfrm4_get_tos(const struct flowi *fl)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-03-12 15:42:11 +08:00
|
|
|
return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos; /* Strip ECN bits */
|
2007-12-12 01:32:34 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-12-21 12:41:12 +08:00
|
|
|
static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
|
|
|
|
int nfheader_len)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-03-02 10:51:56 +08:00
|
|
|
static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
|
2011-02-23 09:48:57 +08:00
|
|
|
const struct flowi *fl)
|
2007-12-12 01:32:34 +08:00
|
|
|
{
|
|
|
|
struct rtable *rt = (struct rtable *)xdst->route;
|
2011-03-12 15:42:11 +08:00
|
|
|
const struct flowi4 *fl4 = &fl->u.ip4;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-10-23 05:58:20 +08:00
|
|
|
xdst->u.rt.rt_iif = fl4->flowi4_iif;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-12-12 01:32:34 +08:00
|
|
|
xdst->u.dst.dev = dev;
|
|
|
|
dev_hold(dev);
|
2007-02-07 06:27:32 +08:00
|
|
|
|
2007-12-12 01:32:34 +08:00
|
|
|
/* Sheit... I remember I did this right. Apparently,
|
|
|
|
* it was magically lost, so this code needs audit */
|
2012-07-18 05:44:26 +08:00
|
|
|
xdst->u.rt.rt_is_input = rt->rt_is_input;
|
2007-12-12 01:32:34 +08:00
|
|
|
xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
|
|
|
|
RTCF_LOCAL);
|
|
|
|
xdst->u.rt.rt_type = rt->rt_type;
|
|
|
|
xdst->u.rt.rt_gateway = rt->rt_gateway;
|
2012-10-08 19:41:18 +08:00
|
|
|
xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
|
2012-07-10 21:58:42 +08:00
|
|
|
xdst->u.rt.rt_pmtu = rt->rt_pmtu;
|
2015-09-03 04:58:35 +08:00
|
|
|
xdst->u.rt.rt_table_id = rt->rt_table_id;
|
2012-08-01 06:06:50 +08:00
|
|
|
INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2007-12-13 02:44:16 +08:00
|
|
|
_decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-04-22 12:53:02 +08:00
|
|
|
const struct iphdr *iph = ip_hdr(skb);
|
2007-04-11 11:50:43 +08:00
|
|
|
u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
|
2011-03-12 15:42:11 +08:00
|
|
|
struct flowi4 *fl4 = &fl->u.ip4;
|
2013-10-30 18:16:28 +08:00
|
|
|
int oif = 0;
|
|
|
|
|
2015-09-30 11:07:13 +08:00
|
|
|
if (skb_dst(skb))
|
2016-09-11 03:09:57 +08:00
|
|
|
oif = skb_dst(skb)->dev->ifindex;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-12 15:42:11 +08:00
|
|
|
memset(fl4, 0, sizeof(struct flowi4));
|
|
|
|
fl4->flowi4_mark = skb->mark;
|
2013-10-30 18:16:28 +08:00
|
|
|
fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
|
2010-07-02 15:47:55 +08:00
|
|
|
|
2011-06-22 11:33:34 +08:00
|
|
|
if (!ip_is_fragment(iph)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
switch (iph->protocol) {
|
|
|
|
case IPPROTO_UDP:
|
2006-11-28 03:10:57 +08:00
|
|
|
case IPPROTO_UDPLITE:
|
2005-04-17 06:20:36 +08:00
|
|
|
case IPPROTO_TCP:
|
|
|
|
case IPPROTO_SCTP:
|
2005-12-20 06:03:46 +08:00
|
|
|
case IPPROTO_DCCP:
|
2009-07-03 00:57:23 +08:00
|
|
|
if (xprth + 4 < skb->data ||
|
|
|
|
pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
2015-10-23 13:32:39 +08:00
|
|
|
__be16 *ports;
|
|
|
|
|
|
|
|
xprth = skb_network_header(skb) + iph->ihl * 4;
|
|
|
|
ports = (__be16 *)xprth;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-12 16:00:33 +08:00
|
|
|
fl4->fl4_sport = ports[!!reverse];
|
|
|
|
fl4->fl4_dport = ports[!reverse];
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IPPROTO_ICMP:
|
2015-10-23 13:31:23 +08:00
|
|
|
if (xprth + 2 < skb->data ||
|
|
|
|
pskb_may_pull(skb, xprth + 2 - skb->data)) {
|
2015-10-23 13:32:39 +08:00
|
|
|
u8 *icmp;
|
|
|
|
|
|
|
|
xprth = skb_network_header(skb) + iph->ihl * 4;
|
|
|
|
icmp = xprth;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-12 16:00:33 +08:00
|
|
|
fl4->fl4_icmp_type = icmp[0];
|
|
|
|
fl4->fl4_icmp_code = icmp[1];
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IPPROTO_ESP:
|
2015-10-23 13:31:23 +08:00
|
|
|
if (xprth + 4 < skb->data ||
|
|
|
|
pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
2015-10-23 13:32:39 +08:00
|
|
|
__be32 *ehdr;
|
|
|
|
|
|
|
|
xprth = skb_network_header(skb) + iph->ihl * 4;
|
|
|
|
ehdr = (__be32 *)xprth;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-12 16:00:33 +08:00
|
|
|
fl4->fl4_ipsec_spi = ehdr[0];
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IPPROTO_AH:
|
2015-10-23 13:31:23 +08:00
|
|
|
if (xprth + 8 < skb->data ||
|
|
|
|
pskb_may_pull(skb, xprth + 8 - skb->data)) {
|
2015-10-23 13:32:39 +08:00
|
|
|
__be32 *ah_hdr;
|
|
|
|
|
|
|
|
xprth = skb_network_header(skb) + iph->ihl * 4;
|
|
|
|
ah_hdr = (__be32 *)xprth;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-12 16:00:33 +08:00
|
|
|
fl4->fl4_ipsec_spi = ah_hdr[1];
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IPPROTO_COMP:
|
2015-10-23 13:31:23 +08:00
|
|
|
if (xprth + 4 < skb->data ||
|
|
|
|
pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
2015-10-23 13:32:39 +08:00
|
|
|
__be16 *ipcomp_hdr;
|
|
|
|
|
|
|
|
xprth = skb_network_header(skb) + iph->ihl * 4;
|
|
|
|
ipcomp_hdr = (__be16 *)xprth;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-12 16:00:33 +08:00
|
|
|
fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
break;
|
2010-11-03 12:41:38 +08:00
|
|
|
|
|
|
|
case IPPROTO_GRE:
|
2015-10-23 13:31:23 +08:00
|
|
|
if (xprth + 12 < skb->data ||
|
|
|
|
pskb_may_pull(skb, xprth + 12 - skb->data)) {
|
2015-10-23 13:32:39 +08:00
|
|
|
__be16 *greflags;
|
|
|
|
__be32 *gre_hdr;
|
|
|
|
|
|
|
|
xprth = skb_network_header(skb) + iph->ihl * 4;
|
|
|
|
greflags = (__be16 *)xprth;
|
|
|
|
gre_hdr = (__be32 *)xprth;
|
2010-11-03 12:41:38 +08:00
|
|
|
|
|
|
|
if (greflags[0] & GRE_KEY) {
|
|
|
|
if (greflags[0] & GRE_CSUM)
|
|
|
|
gre_hdr++;
|
2011-03-12 16:00:33 +08:00
|
|
|
fl4->fl4_gre_key = gre_hdr[1];
|
2010-11-03 12:41:38 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
default:
|
2011-03-12 16:00:33 +08:00
|
|
|
fl4->fl4_ipsec_spi = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
2007-04-21 08:09:22 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2011-03-12 15:42:11 +08:00
|
|
|
fl4->flowi4_proto = iph->protocol;
|
|
|
|
fl4->daddr = reverse ? iph->saddr : iph->daddr;
|
|
|
|
fl4->saddr = reverse ? iph->daddr : iph->saddr;
|
|
|
|
fl4->flowi4_tos = iph->tos;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-01-18 19:56:57 +08:00
|
|
|
static inline int xfrm4_garbage_collect(struct dst_ops *ops)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-01-25 14:47:53 +08:00
|
|
|
struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
|
|
|
|
|
2017-02-07 22:00:16 +08:00
|
|
|
xfrm_garbage_collect_deferred(net);
|
2010-10-08 14:37:34 +08:00
|
|
|
return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-07-17 18:29:28 +08:00
|
|
|
static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
|
|
|
struct sk_buff *skb, u32 mtu)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
|
|
|
|
struct dst_entry *path = xdst->route;
|
|
|
|
|
2012-07-17 18:29:28 +08:00
|
|
|
path->ops->update_pmtu(path, sk, skb, mtu);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-07-17 18:29:28 +08:00
|
|
|
static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,
|
|
|
|
struct sk_buff *skb)
|
2012-07-12 12:27:49 +08:00
|
|
|
{
|
|
|
|
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
|
|
|
|
struct dst_entry *path = xdst->route;
|
|
|
|
|
2012-07-17 18:29:28 +08:00
|
|
|
path->ops->redirect(path, sk, skb);
|
2012-07-12 12:27:49 +08:00
|
|
|
}
|
|
|
|
|
2005-05-04 07:27:10 +08:00
|
|
|
static void xfrm4_dst_destroy(struct dst_entry *dst)
|
|
|
|
{
|
|
|
|
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
|
|
|
|
|
net: Implement read-only protection and COW'ing of metrics.
Routing metrics are now copy-on-write.
Initially a route entry points it's metrics at a read-only location.
If a routing table entry exists, it will point there. Else it will
point at the all zero metric place-holder called 'dst_default_metrics'.
The writeability state of the metrics is stored in the low bits of the
metrics pointer, we have two bits left to spare if we want to store
more states.
For the initial implementation, COW is implemented simply via kmalloc.
However future enhancements will change this to place the writable
metrics somewhere else, in order to increase sharing. Very likely
this "somewhere else" will be the inetpeer cache.
Note also that this means that metrics updates may transiently fail
if we cannot COW the metrics successfully.
But even by itself, this patch should decrease memory usage and
increase cache locality especially for routing workloads. In those
cases the read-only metric copies stay in place and never get written
to.
TCP workloads where metrics get updated, and those rare cases where
PMTU triggers occur, will take a very slight performance hit. But
that hit will be alleviated when the long-term writable metrics
move to a more sharable location.
Since the metrics storage went from a u32 array of RTAX_MAX entries to
what is essentially a pointer, some retooling of the dst_entry layout
was necessary.
Most importantly, we need to preserve the alignment of the reference
count so that it doesn't share cache lines with the read-mostly state,
as per Eric Dumazet's alignment assertion checks.
The only non-trivial bit here is the move of the 'flags' member into
the writeable cacheline. This is OK since we are always accessing the
flags around the same moment when we made a modification to the
reference count.
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-01-27 12:51:05 +08:00
|
|
|
dst_destroy_metrics_generic(dst);
|
|
|
|
|
2005-05-04 07:27:10 +08:00
|
|
|
xfrm_dst_destroy(xdst);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
|
|
|
|
int unregister)
|
|
|
|
{
|
|
|
|
if (!unregister)
|
|
|
|
return;
|
|
|
|
|
|
|
|
xfrm_dst_ifdown(dst, dev);
|
|
|
|
}
|
|
|
|
|
2015-10-29 21:51:16 +08:00
|
|
|
static struct dst_ops xfrm4_dst_ops_template = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.family = AF_INET,
|
|
|
|
.gc = xfrm4_garbage_collect,
|
|
|
|
.update_pmtu = xfrm4_update_pmtu,
|
2012-07-12 12:27:49 +08:00
|
|
|
.redirect = xfrm4_redirect,
|
net: Implement read-only protection and COW'ing of metrics.
Routing metrics are now copy-on-write.
Initially a route entry points it's metrics at a read-only location.
If a routing table entry exists, it will point there. Else it will
point at the all zero metric place-holder called 'dst_default_metrics'.
The writeability state of the metrics is stored in the low bits of the
metrics pointer, we have two bits left to spare if we want to store
more states.
For the initial implementation, COW is implemented simply via kmalloc.
However future enhancements will change this to place the writable
metrics somewhere else, in order to increase sharing. Very likely
this "somewhere else" will be the inetpeer cache.
Note also that this means that metrics updates may transiently fail
if we cannot COW the metrics successfully.
But even by itself, this patch should decrease memory usage and
increase cache locality especially for routing workloads. In those
cases the read-only metric copies stay in place and never get written
to.
TCP workloads where metrics get updated, and those rare cases where
PMTU triggers occur, will take a very slight performance hit. But
that hit will be alleviated when the long-term writable metrics
move to a more sharable location.
Since the metrics storage went from a u32 array of RTAX_MAX entries to
what is essentially a pointer, some retooling of the dst_entry layout
was necessary.
Most importantly, we need to preserve the alignment of the reference
count so that it doesn't share cache lines with the read-mostly state,
as per Eric Dumazet's alignment assertion checks.
The only non-trivial bit here is the move of the 'flags' member into
the writeable cacheline. This is OK since we are always accessing the
flags around the same moment when we made a modification to the
reference count.
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-01-27 12:51:05 +08:00
|
|
|
.cow_metrics = dst_cow_metrics_generic,
|
2005-05-04 07:27:10 +08:00
|
|
|
.destroy = xfrm4_dst_destroy,
|
|
|
|
.ifdown = xfrm4_dst_ifdown,
|
2007-11-14 13:43:11 +08:00
|
|
|
.local_out = __ip_local_out,
|
2015-09-29 17:40:49 +08:00
|
|
|
.gc_thresh = INT_MAX,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2017-02-07 22:00:19 +08:00
|
|
|
static const struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
|
2015-10-29 21:51:16 +08:00
|
|
|
.dst_ops = &xfrm4_dst_ops_template,
|
2005-04-17 06:20:36 +08:00
|
|
|
.dst_lookup = xfrm4_dst_lookup,
|
2006-09-20 03:57:34 +08:00
|
|
|
.get_saddr = xfrm4_get_saddr,
|
2005-04-17 06:20:36 +08:00
|
|
|
.decode_session = _decode_session4,
|
2007-12-12 01:32:34 +08:00
|
|
|
.get_tos = xfrm4_get_tos,
|
2007-12-21 12:41:12 +08:00
|
|
|
.init_path = xfrm4_init_path,
|
2007-12-12 01:32:34 +08:00
|
|
|
.fill_dst = xfrm4_fill_dst,
|
2011-03-02 06:59:04 +08:00
|
|
|
.blackhole_route = ipv4_blackhole_route,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2009-08-05 11:18:33 +08:00
|
|
|
#ifdef CONFIG_SYSCTL
|
2009-07-27 16:22:46 +08:00
|
|
|
static struct ctl_table xfrm4_policy_table[] = {
|
|
|
|
{
|
|
|
|
.procname = "xfrm4_gc_thresh",
|
2010-01-25 14:47:53 +08:00
|
|
|
.data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh,
|
2009-07-27 16:22:46 +08:00
|
|
|
.maxlen = sizeof(int),
|
|
|
|
.mode = 0644,
|
|
|
|
.proc_handler = proc_dointvec,
|
|
|
|
},
|
|
|
|
{ }
|
|
|
|
};
|
|
|
|
|
2016-06-16 21:59:25 +08:00
|
|
|
static __net_init int xfrm4_net_sysctl_init(struct net *net)
|
2013-02-06 17:46:33 +08:00
|
|
|
{
|
|
|
|
struct ctl_table *table;
|
|
|
|
struct ctl_table_header *hdr;
|
|
|
|
|
|
|
|
table = xfrm4_policy_table;
|
|
|
|
if (!net_eq(net, &init_net)) {
|
|
|
|
table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
|
|
|
|
if (!table)
|
|
|
|
goto err_alloc;
|
|
|
|
|
|
|
|
table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
|
|
|
|
}
|
|
|
|
|
|
|
|
hdr = register_net_sysctl(net, "net/ipv4", table);
|
|
|
|
if (!hdr)
|
|
|
|
goto err_reg;
|
|
|
|
|
|
|
|
net->ipv4.xfrm4_hdr = hdr;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_reg:
|
|
|
|
if (!net_eq(net, &init_net))
|
|
|
|
kfree(table);
|
|
|
|
err_alloc:
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2016-06-16 21:59:25 +08:00
|
|
|
static __net_exit void xfrm4_net_sysctl_exit(struct net *net)
|
2013-02-06 17:46:33 +08:00
|
|
|
{
|
|
|
|
struct ctl_table *table;
|
|
|
|
|
2015-04-03 16:17:26 +08:00
|
|
|
if (!net->ipv4.xfrm4_hdr)
|
2013-02-06 17:46:33 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
table = net->ipv4.xfrm4_hdr->ctl_table_arg;
|
|
|
|
unregister_net_sysctl_table(net->ipv4.xfrm4_hdr);
|
|
|
|
if (!net_eq(net, &init_net))
|
|
|
|
kfree(table);
|
|
|
|
}
|
2015-10-29 21:51:16 +08:00
|
|
|
#else /* CONFIG_SYSCTL */
|
2016-06-16 21:59:25 +08:00
|
|
|
static inline int xfrm4_net_sysctl_init(struct net *net)
|
2015-10-29 21:51:16 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-16 21:59:25 +08:00
|
|
|
static inline void xfrm4_net_sysctl_exit(struct net *net)
|
2015-10-29 21:51:16 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static int __net_init xfrm4_net_init(struct net *net)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
memcpy(&net->xfrm.xfrm4_dst_ops, &xfrm4_dst_ops_template,
|
|
|
|
sizeof(xfrm4_dst_ops_template));
|
|
|
|
ret = dst_entries_init(&net->xfrm.xfrm4_dst_ops);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = xfrm4_net_sysctl_init(net);
|
|
|
|
if (ret)
|
|
|
|
dst_entries_destroy(&net->xfrm.xfrm4_dst_ops);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __net_exit xfrm4_net_exit(struct net *net)
|
|
|
|
{
|
|
|
|
xfrm4_net_sysctl_exit(net);
|
|
|
|
dst_entries_destroy(&net->xfrm.xfrm4_dst_ops);
|
|
|
|
}
|
2013-02-06 17:46:33 +08:00
|
|
|
|
|
|
|
static struct pernet_operations __net_initdata xfrm4_net_ops = {
|
|
|
|
.init = xfrm4_net_init,
|
|
|
|
.exit = xfrm4_net_exit,
|
|
|
|
};
|
2009-07-27 16:22:46 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void __init xfrm4_policy_init(void)
|
|
|
|
{
|
2017-02-07 22:00:17 +08:00
|
|
|
xfrm_policy_register_afinfo(&xfrm4_policy_afinfo, AF_INET);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-11-13 15:52:24 +08:00
|
|
|
void __init xfrm4_init(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-01-25 14:47:53 +08:00
|
|
|
xfrm4_state_init();
|
|
|
|
xfrm4_policy_init();
|
2014-03-14 14:28:07 +08:00
|
|
|
xfrm4_protocol_init();
|
2013-02-06 17:46:33 +08:00
|
|
|
register_pernet_subsys(&xfrm4_net_ops);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|