Merge branch 'ipv6-net-opts'
Pavel Begunkov says: ==================== generic net and ipv6 minor optimisations 1-3 inline simple functions that only reshuffle arguments possibly adding extra zero args, and call another function. It was benchmarked before with a bunch of extra patches, see for details https://lore.kernel.org/netdev/cover.1648981570.git.asml.silence@gmail.com/ It may increase the binary size, but it's the right thing to do and at least without modules it actually sheds some bytes for some standard-ish config. text data bss dec hex filename 9627200 0 0 9627200 92e640 ./arch/x86_64/boot/bzImage text data bss dec hex filename 9627104 0 0 9627104 92e5e0 ./arch/x86_64/boot/bzImage ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
8fd813441e
|
@ -2940,10 +2940,20 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
|
|||
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev);
|
||||
|
||||
int dev_queue_xmit(struct sk_buff *skb);
|
||||
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
|
||||
int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev);
|
||||
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
|
||||
|
||||
static inline int dev_queue_xmit(struct sk_buff *skb)
|
||||
{
|
||||
return __dev_queue_xmit(skb, NULL);
|
||||
}
|
||||
|
||||
static inline int dev_queue_xmit_accel(struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
return __dev_queue_xmit(skb, sb_dev);
|
||||
}
|
||||
|
||||
static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
|
||||
{
|
||||
int ret;
|
||||
|
|
|
@ -684,20 +684,6 @@ struct ubuf_info {
|
|||
int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
|
||||
void mm_unaccount_pinned_pages(struct mmpin *mmp);
|
||||
|
||||
struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size);
|
||||
struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
|
||||
struct ubuf_info *uarg);
|
||||
|
||||
void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
|
||||
|
||||
void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
|
||||
bool success);
|
||||
|
||||
int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
|
||||
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
|
||||
struct msghdr *msg, int len,
|
||||
struct ubuf_info *uarg);
|
||||
|
||||
/* This data is invariant across clones and lives at
|
||||
* the end of the header data, ie. at skb->end.
|
||||
*/
|
||||
|
@ -1679,6 +1665,28 @@ static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
|
|||
}
|
||||
#endif
|
||||
|
||||
struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size);
|
||||
struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
|
||||
struct ubuf_info *uarg);
|
||||
|
||||
void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
|
||||
|
||||
void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
|
||||
bool success);
|
||||
|
||||
int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
|
||||
struct iov_iter *from, size_t length);
|
||||
|
||||
static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb,
|
||||
struct msghdr *msg, int len)
|
||||
{
|
||||
return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
|
||||
}
|
||||
|
||||
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
|
||||
struct msghdr *msg, int len,
|
||||
struct ubuf_info *uarg);
|
||||
|
||||
/* Internal */
|
||||
#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
|
||||
|
||||
|
|
|
@ -1823,11 +1823,17 @@ int sock_getsockopt(struct socket *sock, int level, int op,
|
|||
char __user *optval, int __user *optlen);
|
||||
int sock_gettstamp(struct socket *sock, void __user *userstamp,
|
||||
bool timeval, bool time32);
|
||||
struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
|
||||
int noblock, int *errcode);
|
||||
struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
|
||||
unsigned long data_len, int noblock,
|
||||
int *errcode, int max_page_order);
|
||||
|
||||
static inline struct sk_buff *sock_alloc_send_skb(struct sock *sk,
|
||||
unsigned long size,
|
||||
int noblock, int *errcode)
|
||||
{
|
||||
return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
|
||||
}
|
||||
|
||||
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
|
||||
void sock_kfree_s(struct sock *sk, void *mem, int size);
|
||||
void sock_kzfree_s(struct sock *sk, void *mem, int size);
|
||||
|
|
|
@ -62,8 +62,6 @@
|
|||
#include <trace/events/skb.h>
|
||||
#include <net/busy_poll.h>
|
||||
|
||||
#include "datagram.h"
|
||||
|
||||
/*
|
||||
* Is a socket 'connection oriented' ?
|
||||
*/
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef _NET_CORE_DATAGRAM_H_
|
||||
#define _NET_CORE_DATAGRAM_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct sock;
|
||||
struct sk_buff;
|
||||
struct iov_iter;
|
||||
|
||||
int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
|
||||
struct iov_iter *from, size_t length);
|
||||
|
||||
#endif /* _NET_CORE_DATAGRAM_H_ */
|
|
@ -4111,7 +4111,7 @@ struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
|
|||
* the BH enable code must have IRQs enabled so that it will not deadlock.
|
||||
* --BLG
|
||||
*/
|
||||
static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
|
||||
int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
|
||||
{
|
||||
struct net_device *dev = skb->dev;
|
||||
struct netdev_queue *txq = NULL;
|
||||
|
@ -4235,18 +4235,7 @@ out:
|
|||
rcu_read_unlock_bh();
|
||||
return rc;
|
||||
}
|
||||
|
||||
int dev_queue_xmit(struct sk_buff *skb)
|
||||
{
|
||||
return __dev_queue_xmit(skb, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(dev_queue_xmit);
|
||||
|
||||
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
|
||||
{
|
||||
return __dev_queue_xmit(skb, sb_dev);
|
||||
}
|
||||
EXPORT_SYMBOL(dev_queue_xmit_accel);
|
||||
EXPORT_SYMBOL(__dev_queue_xmit);
|
||||
|
||||
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
|
||||
{
|
||||
|
|
|
@ -80,7 +80,6 @@
|
|||
#include <linux/user_namespace.h>
|
||||
#include <linux/indirect_call_wrapper.h>
|
||||
|
||||
#include "datagram.h"
|
||||
#include "sock_destructor.h"
|
||||
|
||||
struct kmem_cache *skbuff_head_cache __ro_after_init;
|
||||
|
@ -1340,12 +1339,6 @@ void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort);
|
||||
|
||||
int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
|
||||
{
|
||||
return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram);
|
||||
|
||||
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
|
||||
struct msghdr *msg, int len,
|
||||
struct ubuf_info *uarg)
|
||||
|
|
|
@ -2635,13 +2635,6 @@ failure:
|
|||
}
|
||||
EXPORT_SYMBOL(sock_alloc_send_pskb);
|
||||
|
||||
struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
|
||||
int noblock, int *errcode)
|
||||
{
|
||||
return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(sock_alloc_send_skb);
|
||||
|
||||
int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
|
||||
struct sockcm_cookie *sockc)
|
||||
{
|
||||
|
|
|
@ -119,19 +119,21 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
|
|||
rcu_read_lock_bh();
|
||||
nexthop = rt6_nexthop((struct rt6_info *)dst, daddr);
|
||||
neigh = __ipv6_neigh_lookup_noref(dev, nexthop);
|
||||
if (unlikely(!neigh))
|
||||
neigh = __neigh_create(&nd_tbl, nexthop, dev, false);
|
||||
if (!IS_ERR(neigh)) {
|
||||
sock_confirm_neigh(skb, neigh);
|
||||
ret = neigh_output(neigh, skb, false);
|
||||
rcu_read_unlock_bh();
|
||||
return ret;
|
||||
}
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTNOROUTES);
|
||||
kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
|
||||
return -EINVAL;
|
||||
if (unlikely(IS_ERR_OR_NULL(neigh))) {
|
||||
if (unlikely(!neigh))
|
||||
neigh = __neigh_create(&nd_tbl, nexthop, dev, false);
|
||||
if (IS_ERR(neigh)) {
|
||||
rcu_read_unlock_bh();
|
||||
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTNOROUTES);
|
||||
kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
sock_confirm_neigh(skb, neigh);
|
||||
ret = neigh_output(neigh, skb, false);
|
||||
rcu_read_unlock_bh();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -198,7 +200,6 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
|
|||
ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
|
||||
switch (ret) {
|
||||
case NET_XMIT_SUCCESS:
|
||||
return __ip6_finish_output(net, sk, skb);
|
||||
case NET_XMIT_CN:
|
||||
return __ip6_finish_output(net, sk, skb) ? : ret;
|
||||
default:
|
||||
|
|
Loading…
Reference in New Issue