net: indirect call helpers for ipv4/ipv6 dst_check functions
This patch avoids the indirect call for the common case: ip6_dst_check and ipv4_dst_check Signed-off-by: Brian Vazquez <brianvv@google.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
f67fbeaebd
commit
bbd807dfbf
|
@ -459,10 +459,15 @@ static inline int dst_input(struct sk_buff *skb)
|
|||
ip6_input, ip_local_deliver, skb);
|
||||
}
|
||||
|
||||
INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *,
|
||||
u32));
|
||||
INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
|
||||
u32));
|
||||
static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
|
||||
{
|
||||
if (dst->obsolete)
|
||||
dst = dst->ops->check(dst, cookie);
|
||||
dst = INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check,
|
||||
ipv4_dst_check, dst, cookie);
|
||||
return dst;
|
||||
}
|
||||
|
||||
|
|
|
@ -526,11 +526,17 @@ discard_and_relse:
|
|||
}
|
||||
EXPORT_SYMBOL(__sk_receive_skb);
|
||||
|
||||
INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *,
|
||||
u32));
|
||||
INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
|
||||
u32));
|
||||
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
|
||||
{
|
||||
struct dst_entry *dst = __sk_dst_get(sk);
|
||||
|
||||
if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
|
||||
if (dst && dst->obsolete &&
|
||||
INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
|
||||
dst, cookie) == NULL) {
|
||||
sk_tx_queue_clear(sk);
|
||||
sk->sk_dst_pending_confirm = 0;
|
||||
RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
|
||||
|
@ -546,7 +552,9 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
|
|||
{
|
||||
struct dst_entry *dst = sk_dst_get(sk);
|
||||
|
||||
if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
|
||||
if (dst && dst->obsolete &&
|
||||
INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
|
||||
dst, cookie) == NULL) {
|
||||
sk_dst_reset(sk);
|
||||
dst_release(dst);
|
||||
return NULL;
|
||||
|
|
|
@ -133,7 +133,8 @@ static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
|
|||
* Interface to generic destination cache.
|
||||
*/
|
||||
|
||||
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
|
||||
INDIRECT_CALLABLE_SCOPE
|
||||
struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
|
||||
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
|
||||
INDIRECT_CALLABLE_SCOPE
|
||||
unsigned int ipv4_mtu(const struct dst_entry *dst);
|
||||
|
@ -1188,7 +1189,8 @@ void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
|
||||
|
||||
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
|
||||
INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst,
|
||||
u32 cookie)
|
||||
{
|
||||
struct rtable *rt = (struct rtable *) dst;
|
||||
|
||||
|
@ -1204,6 +1206,7 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
|
|||
return NULL;
|
||||
return dst;
|
||||
}
|
||||
EXPORT_SYMBOL(ipv4_dst_check);
|
||||
|
||||
static void ipv4_send_dest_unreach(struct sk_buff *skb)
|
||||
{
|
||||
|
|
|
@ -1649,6 +1649,8 @@ u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
|
|||
return mss;
|
||||
}
|
||||
|
||||
INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
|
||||
u32));
|
||||
/* The socket must have it's spinlock held when we get
|
||||
* here, unless it is a TCP_LISTEN socket.
|
||||
*
|
||||
|
@ -1668,7 +1670,8 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
sk_mark_napi_id(sk, skb);
|
||||
if (dst) {
|
||||
if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
|
||||
!dst->ops->check(dst, 0)) {
|
||||
!INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check,
|
||||
dst, 0)) {
|
||||
dst_release(dst);
|
||||
sk->sk_rx_dst = NULL;
|
||||
}
|
||||
|
|
|
@ -81,7 +81,8 @@ enum rt6_nud_state {
|
|||
RT6_NUD_SUCCEED = 1
|
||||
};
|
||||
|
||||
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
|
||||
INDIRECT_CALLABLE_SCOPE
|
||||
struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
|
||||
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
|
||||
INDIRECT_CALLABLE_SCOPE
|
||||
unsigned int ip6_mtu(const struct dst_entry *dst);
|
||||
|
@ -2612,7 +2613,8 @@ static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
|
||||
INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
|
||||
u32 cookie)
|
||||
{
|
||||
struct dst_entry *dst_ret;
|
||||
struct fib6_info *from;
|
||||
|
@ -2642,6 +2644,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
|
|||
|
||||
return dst_ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ip6_dst_check);
|
||||
|
||||
static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
|
||||
{
|
||||
|
|
|
@ -1420,6 +1420,8 @@ out:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
|
||||
u32));
|
||||
/* The socket must have it's spinlock held when we get
|
||||
* here, unless it is a TCP_LISTEN socket.
|
||||
*
|
||||
|
@ -1473,7 +1475,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
sk_mark_napi_id(sk, skb);
|
||||
if (dst) {
|
||||
if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
|
||||
dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
|
||||
INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
|
||||
dst, np->rx_dst_cookie) == NULL) {
|
||||
dst_release(dst);
|
||||
sk->sk_rx_dst = NULL;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue