2009-08-29 09:34:49 +08:00
|
|
|
#ifndef _NET_DST_OPS_H
|
|
|
|
#define _NET_DST_OPS_H
|
|
|
|
#include <linux/types.h>
|
2010-10-08 14:37:34 +08:00
|
|
|
#include <linux/percpu_counter.h>
|
2010-11-08 11:58:05 +08:00
|
|
|
#include <linux/cache.h>
|
2009-08-29 09:34:49 +08:00
|
|
|
|
|
|
|
struct dst_entry;
|
|
|
|
struct kmem_cachep;
|
|
|
|
struct net_device;
|
|
|
|
struct sk_buff;
|
2012-07-20 01:43:03 +08:00
|
|
|
struct sock;
|
2015-10-08 05:48:45 +08:00
|
|
|
struct net;
|
2009-08-29 09:34:49 +08:00
|
|
|
|
|
|
|
struct dst_ops {
|
|
|
|
unsigned short family;
|
2012-04-15 13:58:06 +08:00
|
|
|
unsigned int gc_thresh;
|
2009-08-29 09:34:49 +08:00
|
|
|
|
|
|
|
int (*gc)(struct dst_ops *ops);
|
|
|
|
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
|
2010-12-14 04:52:14 +08:00
|
|
|
unsigned int (*default_advmss)(const struct dst_entry *);
|
2011-11-23 10:12:51 +08:00
|
|
|
unsigned int (*mtu)(const struct dst_entry *);
|
net: Implement read-only protection and COW'ing of metrics.
Routing metrics are now copy-on-write.
Initially a route entry points it's metrics at a read-only location.
If a routing table entry exists, it will point there. Else it will
point at the all zero metric place-holder called 'dst_default_metrics'.
The writeability state of the metrics is stored in the low bits of the
metrics pointer, we have two bits left to spare if we want to store
more states.
For the initial implementation, COW is implemented simply via kmalloc.
However future enhancements will change this to place the writable
metrics somewhere else, in order to increase sharing. Very likely
this "somewhere else" will be the inetpeer cache.
Note also that this means that metrics updates may transiently fail
if we cannot COW the metrics successfully.
But even by itself, this patch should decrease memory usage and
increase cache locality especially for routing workloads. In those
cases the read-only metric copies stay in place and never get written
to.
TCP workloads where metrics get updated, and those rare cases where
PMTU triggers occur, will take a very slight performance hit. But
that hit will be alleviated when the long-term writable metrics
move to a more sharable location.
Since the metrics storage went from a u32 array of RTAX_MAX entries to
what is essentially a pointer, some retooling of the dst_entry layout
was necessary.
Most importantly, we need to preserve the alignment of the reference
count so that it doesn't share cache lines with the read-mostly state,
as per Eric Dumazet's alignment assertion checks.
The only non-trivial bit here is the move of the 'flags' member into
the writeable cacheline. This is OK since we are always accessing the
flags around the same moment when we made a modification to the
reference count.
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-01-27 12:51:05 +08:00
|
|
|
u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
|
2009-08-29 09:34:49 +08:00
|
|
|
void (*destroy)(struct dst_entry *);
|
|
|
|
void (*ifdown)(struct dst_entry *,
|
|
|
|
struct net_device *dev, int how);
|
|
|
|
struct dst_entry * (*negative_advice)(struct dst_entry *);
|
|
|
|
void (*link_failure)(struct sk_buff *);
|
2012-07-17 18:29:28 +08:00
|
|
|
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
|
|
|
|
struct sk_buff *skb, u32 mtu);
|
|
|
|
void (*redirect)(struct dst_entry *dst, struct sock *sk,
|
|
|
|
struct sk_buff *skb);
|
2015-10-08 05:48:45 +08:00
|
|
|
int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
|
2012-07-03 12:52:24 +08:00
|
|
|
struct neighbour * (*neigh_lookup)(const struct dst_entry *dst,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
const void *daddr);
|
2017-02-07 05:14:15 +08:00
|
|
|
void (*confirm_neigh)(const struct dst_entry *dst,
|
|
|
|
const void *daddr);
|
2009-08-29 09:34:49 +08:00
|
|
|
|
|
|
|
struct kmem_cache *kmem_cachep;
|
2010-10-08 14:37:34 +08:00
|
|
|
|
|
|
|
struct percpu_counter pcpuc_entries ____cacheline_aligned_in_smp;
|
2009-08-29 09:34:49 +08:00
|
|
|
};
|
2010-10-08 14:37:34 +08:00
|
|
|
|
|
|
|
static inline int dst_entries_get_fast(struct dst_ops *dst)
|
|
|
|
{
|
|
|
|
return percpu_counter_read_positive(&dst->pcpuc_entries);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int dst_entries_get_slow(struct dst_ops *dst)
|
|
|
|
{
|
2017-01-20 21:06:08 +08:00
|
|
|
return percpu_counter_sum_positive(&dst->pcpuc_entries);
|
2010-10-08 14:37:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void dst_entries_add(struct dst_ops *dst, int val)
|
|
|
|
{
|
|
|
|
percpu_counter_add(&dst->pcpuc_entries, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int dst_entries_init(struct dst_ops *dst)
|
|
|
|
{
|
2014-09-08 08:51:29 +08:00
|
|
|
return percpu_counter_init(&dst->pcpuc_entries, 0, GFP_KERNEL);
|
2010-10-08 14:37:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void dst_entries_destroy(struct dst_ops *dst)
|
|
|
|
{
|
|
|
|
percpu_counter_destroy(&dst->pcpuc_entries);
|
|
|
|
}
|
|
|
|
|
2009-08-29 09:34:49 +08:00
|
|
|
#endif
|