OpenCloudOS-Kernel/include/net/dst_ops.h

75 lines
1.9 KiB
C
Raw Normal View History

#ifndef _NET_DST_OPS_H
#define _NET_DST_OPS_H
#include <linux/types.h>
#include <linux/percpu_counter.h>
#include <linux/cache.h>
struct dst_entry;
struct kmem_cachep;
struct net_device;
struct sk_buff;
struct sock;
struct dst_ops {
unsigned short family;
__be16 protocol;
unsigned int gc_thresh;
int (*gc)(struct dst_ops *ops);
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
unsigned int (*default_advmss)(const struct dst_entry *);
unsigned int (*mtu)(const struct dst_entry *);
net: Implement read-only protection and COW'ing of metrics. Routing metrics are now copy-on-write. Initially a route entry points it's metrics at a read-only location. If a routing table entry exists, it will point there. Else it will point at the all zero metric place-holder called 'dst_default_metrics'. The writeability state of the metrics is stored in the low bits of the metrics pointer, we have two bits left to spare if we want to store more states. For the initial implementation, COW is implemented simply via kmalloc. However future enhancements will change this to place the writable metrics somewhere else, in order to increase sharing. Very likely this "somewhere else" will be the inetpeer cache. Note also that this means that metrics updates may transiently fail if we cannot COW the metrics successfully. But even by itself, this patch should decrease memory usage and increase cache locality especially for routing workloads. In those cases the read-only metric copies stay in place and never get written to. TCP workloads where metrics get updated, and those rare cases where PMTU triggers occur, will take a very slight performance hit. But that hit will be alleviated when the long-term writable metrics move to a more sharable location. Since the metrics storage went from a u32 array of RTAX_MAX entries to what is essentially a pointer, some retooling of the dst_entry layout was necessary. Most importantly, we need to preserve the alignment of the reference count so that it doesn't share cache lines with the read-mostly state, as per Eric Dumazet's alignment assertion checks. The only non-trivial bit here is the move of the 'flags' member into the writeable cacheline. This is OK since we are always accessing the flags around the same moment when we made a modification to the reference count. Signed-off-by: David S. Miller <davem@davemloft.net>
2011-01-27 12:51:05 +08:00
u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
struct net_device *dev, int how);
struct dst_entry * (*negative_advice)(struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu);
void (*redirect)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
int (*local_out)(struct sk_buff *skb);
struct neighbour * (*neigh_lookup)(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr);
struct kmem_cache *kmem_cachep;
struct percpu_counter pcpuc_entries ____cacheline_aligned_in_smp;
};
static inline int dst_entries_get_fast(struct dst_ops *dst)
{
return percpu_counter_read_positive(&dst->pcpuc_entries);
}
static inline int dst_entries_get_slow(struct dst_ops *dst)
{
int res;
local_bh_disable();
res = percpu_counter_sum_positive(&dst->pcpuc_entries);
local_bh_enable();
return res;
}
static inline void dst_entries_add(struct dst_ops *dst, int val)
{
local_bh_disable();
percpu_counter_add(&dst->pcpuc_entries, val);
local_bh_enable();
}
static inline int dst_entries_init(struct dst_ops *dst)
{
return percpu_counter_init(&dst->pcpuc_entries, 0);
}
static inline void dst_entries_destroy(struct dst_ops *dst)
{
percpu_counter_destroy(&dst->pcpuc_entries);
}
#endif