Merge "net: finish renaming lls to busy poll"

Eliezer Tamir says:

====================
Here are three patches that complete the rename of lls to busy-poll

1. rename include/net/ll_poll.h to include/net/busy_poll.h
2. Rename ndo_ll_poll to ndo_busy_poll.
   Rename sk_mark_ll to sk_mark_napi_id.
   Rename skb_mark_ll to skb_mark_napi_id.
   Correct all useres of these functions.
   Update comments and defines  in include/net/busy_poll.h
3. Rename LL_SO to BUSY_POLL_SO
   Rename sysctl_net_ll_{read,poll} to sysctl_busy_{read,poll}
   Fix up users of these variables.
   Fix documentation for sysctl.

v2 fixed forgetting the ndo changes in v1
v3 is a resend with -M
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-07-10 17:10:40 -07:00
commit 0e00fd4794
34 changed files with 74 additions and 71 deletions

View File

@ -50,26 +50,27 @@ The maximum number of packets that kernel can handle on a NAPI interrupt,
it's a Per-CPU variable.
Default: 64
low_latency_read
busy_read
----------------
Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL)
Approximate time in us to busy loop waiting for packets on the device queue.
This sets the default value of the SO_LL socket option.
Can be set or overridden per socket by setting socket option SO_LL, which is
the preferred method of enabling.
If you need to enable the feature globally via sysctl, a value of 50 is recommended.
This sets the default value of the SO_BUSY_POLL socket option.
Can be set or overridden per socket by setting socket option SO_BUSY_POLL,
which is the preferred method of enabling. If you need to enable the feature
globally via sysctl, a value of 50 is recommended.
Will increase power usage.
Default: 0 (off)
low_latency_poll
busy_poll
----------------
Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL)
Approximate time in us to busy loop waiting for events.
Recommended value depends on the number of sockets you poll on.
For several sockets 50, for several hundreds 100.
For more than that you probably want to use epoll.
Note that only sockets with SO_LL set will be busy polled, so you want to either
selectively set SO_LL on those sockets or set sysctl.net.low_latency_read globally.
Note that only sockets with SO_BUSY_POLL set will be busy polled,
so you want to either selectively set SO_BUSY_POLL on those sockets or set
sysctl.net.busy_read globally.
Will increase power usage.
Default: 0 (off)

View File

@ -81,6 +81,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _UAPI_ASM_SOCKET_H */

View File

@ -74,6 +74,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* __ASM_AVR32_SOCKET_H */

View File

@ -76,7 +76,7 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _ASM_SOCKET_H */

View File

@ -74,7 +74,7 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _ASM_SOCKET_H */

View File

@ -74,6 +74,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _ASM_SOCKET_H */

View File

@ -83,6 +83,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _ASM_IA64_SOCKET_H */

View File

@ -74,6 +74,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _ASM_M32R_SOCKET_H */

View File

@ -92,6 +92,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _UAPI_ASM_SOCKET_H */

View File

@ -74,6 +74,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _ASM_SOCKET_H */

View File

@ -73,7 +73,7 @@
#define SO_SELECT_ERR_QUEUE 0x4026
#define SO_LL 0x4027
#define SO_BUSY_POLL 0x4027
/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
* have to define SOCK_NONBLOCK to a different value here.

View File

@ -81,6 +81,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _ASM_POWERPC_SOCKET_H */

View File

@ -80,6 +80,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _ASM_SOCKET_H */

View File

@ -70,7 +70,7 @@
#define SO_SELECT_ERR_QUEUE 0x0029
#define SO_LL 0x0030
#define SO_BUSY_POLL 0x0030
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001

View File

@ -85,6 +85,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* _XTENSA_SOCKET_H */

View File

@ -24,7 +24,7 @@
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#include <linux/prefetch.h>
#include "bnx2x_cmn.h"
#include "bnx2x_init.h"
@ -990,7 +990,7 @@ reuse_rx:
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(cqe_fp->vlan_tag));
skb_mark_ll(skb, &fp->napi);
skb_mark_napi_id(skb, &fp->napi);
if (bnx2x_fp_ll_polling(fp))
netif_receive_skb(skb);

View File

@ -12027,7 +12027,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#endif
#ifdef CONFIG_NET_LL_RX_POLL
.ndo_ll_poll = bnx2x_low_latency_recv,
.ndo_busy_poll = bnx2x_low_latency_recv,
#endif
};

View File

@ -52,7 +52,7 @@
#include <linux/dca.h>
#endif
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#ifdef CONFIG_NET_LL_RX_POLL
#define LL_EXTENDED_STATS

View File

@ -1978,7 +1978,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
#endif /* IXGBE_FCOE */
skb_mark_ll(skb, &q_vector->napi);
skb_mark_napi_id(skb, &q_vector->napi);
ixgbe_rx_skb(q_vector, skb);
/* update budget accounting */
@ -7228,7 +7228,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_poll_controller = ixgbe_netpoll,
#endif
#ifdef CONFIG_NET_LL_RX_POLL
.ndo_ll_poll = ixgbe_low_latency_recv,
.ndo_busy_poll = ixgbe_low_latency_recv,
#endif
#ifdef IXGBE_FCOE
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,

View File

@ -38,7 +38,7 @@
#include <linux/slab.h>
#include <linux/hash.h>
#include <net/ip.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@ -2141,7 +2141,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
#ifdef CONFIG_NET_LL_RX_POLL
.ndo_ll_poll = mlx4_en_low_latency_recv,
.ndo_busy_poll = mlx4_en_low_latency_recv,
#endif
};

View File

@ -31,7 +31,7 @@
*
*/
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#include <linux/mlx4/cq.h>
#include <linux/slab.h>
#include <linux/mlx4/qp.h>
@ -767,7 +767,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
timestamp);
}
skb_mark_ll(skb, &cq->napi);
skb_mark_napi_id(skb, &cq->napi);
/* Push it up the stack */
netif_receive_skb(skb);

View File

@ -28,7 +28,7 @@
#include <linux/hrtimer.h>
#include <linux/sched/rt.h>
#include <linux/freezer.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#include <asm/uaccess.h>

View File

@ -974,7 +974,7 @@ struct net_device_ops {
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
#ifdef CONFIG_NET_LL_RX_POLL
int (*ndo_ll_poll)(struct napi_struct *dev);
int (*ndo_busy_poll)(struct napi_struct *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
int queue, u8 *mac);

View File

@ -1,5 +1,5 @@
/*
* Low Latency Sockets
* net busy poll support
* Copyright(c) 2013 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
@ -21,8 +21,8 @@
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
*/
#ifndef _LINUX_NET_LL_POLL_H
#define _LINUX_NET_LL_POLL_H
#ifndef _LINUX_NET_BUSY_POLL_H
#define _LINUX_NET_BUSY_POLL_H
#include <linux/netdevice.h>
#include <net/ip.h>
@ -30,8 +30,8 @@
#ifdef CONFIG_NET_LL_RX_POLL
struct napi_struct;
extern unsigned int sysctl_net_ll_read __read_mostly;
extern unsigned int sysctl_net_ll_poll __read_mostly;
extern unsigned int sysctl_net_busy_read __read_mostly;
extern unsigned int sysctl_net_busy_poll __read_mostly;
/* return values from ndo_ll_poll */
#define LL_FLUSH_FAILED -1
@ -39,7 +39,7 @@ extern unsigned int sysctl_net_ll_poll __read_mostly;
static inline bool net_busy_loop_on(void)
{
return sysctl_net_ll_poll;
return sysctl_net_busy_poll;
}
/* a wrapper to make debug_smp_processor_id() happy
@ -72,7 +72,7 @@ static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
/* in poll/select we use the global sysctl_net_ll_poll value */
static inline unsigned long busy_loop_end_time(void)
{
return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_ll_poll);
return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_busy_poll);
}
static inline bool sk_can_busy_loop(struct sock *sk)
@ -110,11 +110,11 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
goto out;
ops = napi->dev->netdev_ops;
if (!ops->ndo_ll_poll)
if (!ops->ndo_busy_poll)
goto out;
do {
rc = ops->ndo_ll_poll(napi);
rc = ops->ndo_busy_poll(napi);
if (rc == LL_FLUSH_FAILED)
break; /* permanent failure */
@ -134,13 +134,14 @@ out:
}
/* used in the NIC receive handler to mark the skb */
static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi)
{
skb->napi_id = napi->napi_id;
}
/* used in the protocol hanlder to propagate the napi_id to the socket */
static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
{
sk->sk_napi_id = skb->napi_id;
}
@ -166,11 +167,12 @@ static inline bool sk_busy_poll(struct sock *sk, int nonblock)
return false;
}
static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi)
{
}
static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
{
}
@ -180,4 +182,4 @@ static inline bool busy_loop_timeout(unsigned long end_time)
}
#endif /* CONFIG_NET_LL_RX_POLL */
#endif /* _LINUX_NET_LL_POLL_H */
#endif /* _LINUX_NET_BUSY_POLL_H */

View File

@ -76,6 +76,6 @@
#define SO_SELECT_ERR_QUEUE 45
#define SO_LL 46
#define SO_BUSY_POLL 46
#endif /* __ASM_GENERIC_SOCKET_H */

View File

@ -56,7 +56,7 @@
#include <net/sock.h>
#include <net/tcp_states.h>
#include <trace/events/skb.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
/*
* Is a socket 'connection oriented' ?

View File

@ -139,7 +139,7 @@
#include <net/tcp.h>
#endif
#include <net/ll_poll.h>
#include <net/busy_poll.h>
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
@ -901,7 +901,7 @@ set_rcvbuf:
break;
#ifdef CONFIG_NET_LL_RX_POLL
case SO_LL:
case SO_BUSY_POLL:
/* allow unprivileged users to decrease the value */
if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
ret = -EPERM;
@ -1171,7 +1171,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
#ifdef CONFIG_NET_LL_RX_POLL
case SO_LL:
case SO_BUSY_POLL:
v.val = sk->sk_ll_usec;
break;
#endif
@ -2294,7 +2294,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
#ifdef CONFIG_NET_LL_RX_POLL
sk->sk_napi_id = 0;
sk->sk_ll_usec = sysctl_net_ll_read;
sk->sk_ll_usec = sysctl_net_busy_read;
#endif
/*

View File

@ -19,7 +19,7 @@
#include <net/ip.h>
#include <net/sock.h>
#include <net/net_ratelimit.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
static int one = 1;
@ -300,15 +300,15 @@ static struct ctl_table net_core_table[] = {
#endif /* CONFIG_NET_FLOW_LIMIT */
#ifdef CONFIG_NET_LL_RX_POLL
{
.procname = "low_latency_poll",
.data = &sysctl_net_ll_poll,
.procname = "busy_poll",
.data = &sysctl_net_busy_poll,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "low_latency_read",
.data = &sysctl_net_ll_read,
.procname = "busy_read",
.data = &sysctl_net_busy_read,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec

View File

@ -279,7 +279,7 @@
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;

View File

@ -75,7 +75,7 @@
#include <net/netdma.h>
#include <net/secure_seq.h>
#include <net/tcp_memcontrol.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#include <linux/inet.h>
#include <linux/ipv6.h>
@ -1994,7 +1994,7 @@ process:
if (sk_filter(sk, skb))
goto discard_and_relse;
sk_mark_ll(sk, skb);
sk_mark_napi_id(sk, skb);
skb->dev = NULL;
bh_lock_sock_nested(sk);

View File

@ -109,7 +109,7 @@
#include <trace/events/udp.h>
#include <linux/static_key.h>
#include <trace/events/skb.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#include "udp_impl.h"
struct udp_table udp_table __read_mostly;
@ -1713,7 +1713,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (sk != NULL) {
int ret;
sk_mark_ll(sk, skb);
sk_mark_napi_id(sk, skb);
ret = udp_queue_rcv_skb(sk, skb);
sock_put(sk);

View File

@ -63,7 +63,7 @@
#include <net/inet_common.h>
#include <net/secure_seq.h>
#include <net/tcp_memcontrol.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#include <asm/uaccess.h>
@ -1499,7 +1499,7 @@ process:
if (sk_filter(sk, skb))
goto discard_and_relse;
sk_mark_ll(sk, skb);
sk_mark_napi_id(sk, skb);
skb->dev = NULL;
bh_lock_sock_nested(sk);

View File

@ -46,7 +46,7 @@
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <net/inet6_hashtables.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
@ -844,7 +844,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (sk != NULL) {
int ret;
sk_mark_ll(sk, skb);
sk_mark_napi_id(sk, skb);
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);

View File

@ -104,11 +104,11 @@
#include <linux/route.h>
#include <linux/sockios.h>
#include <linux/atalk.h>
#include <net/ll_poll.h>
#include <net/busy_poll.h>
#ifdef CONFIG_NET_LL_RX_POLL
unsigned int sysctl_net_ll_read __read_mostly;
unsigned int sysctl_net_ll_poll __read_mostly;
unsigned int sysctl_net_busy_read __read_mostly;
unsigned int sysctl_net_busy_poll __read_mostly;
#endif
static int sock_no_open(struct inode *irrelevant, struct file *dontcare);