2011-11-12 06:16:48 +08:00
|
|
|
/*
|
|
|
|
* include/linux/if_team.h - Network team device driver header
|
|
|
|
* Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*/
|
|
|
|
#ifndef _LINUX_IF_TEAM_H_
|
|
|
|
#define _LINUX_IF_TEAM_H_
|
|
|
|
|
2012-07-17 13:22:36 +08:00
|
|
|
#include <linux/netpoll.h>
|
2012-07-20 10:28:51 +08:00
|
|
|
#include <net/sch_generic.h>
|
2013-07-20 18:13:52 +08:00
|
|
|
#include <linux/types.h>
|
2012-10-13 17:46:48 +08:00
|
|
|
#include <uapi/linux/if_team.h>
|
2012-07-17 13:22:36 +08:00
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
struct team_pcpu_stats {
|
|
|
|
u64 rx_packets;
|
|
|
|
u64 rx_bytes;
|
|
|
|
u64 rx_multicast;
|
|
|
|
u64 tx_packets;
|
|
|
|
u64 tx_bytes;
|
|
|
|
struct u64_stats_sync syncp;
|
|
|
|
u32 rx_dropped;
|
|
|
|
u32 tx_dropped;
|
2016-02-02 07:51:06 +08:00
|
|
|
u32 rx_nohandler;
|
2011-11-12 06:16:48 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct team;
|
|
|
|
|
|
|
|
struct team_port {
|
|
|
|
struct net_device *dev;
|
2012-04-20 12:42:05 +08:00
|
|
|
struct hlist_node hlist; /* node in enabled ports hash list */
|
2011-11-12 06:16:48 +08:00
|
|
|
struct list_head list; /* node in ordinary list */
|
|
|
|
struct team *team;
|
2012-04-20 12:42:05 +08:00
|
|
|
int index; /* index of enabled port. If disabled, it's set to -1 */
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2012-04-10 13:15:44 +08:00
|
|
|
bool linkup; /* either state.linkup or user.linkup */
|
|
|
|
|
|
|
|
struct {
|
|
|
|
bool linkup;
|
|
|
|
u32 speed;
|
|
|
|
u8 duplex;
|
|
|
|
} state;
|
|
|
|
|
|
|
|
/* Values set by userspace */
|
|
|
|
struct {
|
|
|
|
bool linkup;
|
|
|
|
bool linkup_enabled;
|
|
|
|
} user;
|
|
|
|
|
|
|
|
/* Custom gennetlink interface related flags */
|
|
|
|
bool changed;
|
|
|
|
bool removed;
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
/*
|
|
|
|
* A place for storing original values of the device before it
|
|
|
|
* become a port.
|
|
|
|
*/
|
|
|
|
struct {
|
|
|
|
unsigned char dev_addr[MAX_ADDR_LEN];
|
|
|
|
unsigned int mtu;
|
|
|
|
} orig;
|
|
|
|
|
2012-07-17 13:22:36 +08:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
struct netpoll *np;
|
|
|
|
#endif
|
|
|
|
|
2012-07-27 14:28:54 +08:00
|
|
|
s32 priority; /* lower number ~ higher priority */
|
2012-07-27 14:28:55 +08:00
|
|
|
u16 queue_id;
|
|
|
|
struct list_head qom_list; /* node in queue override mapping list */
|
2013-06-10 23:42:24 +08:00
|
|
|
struct rcu_head rcu;
|
2012-06-19 13:54:05 +08:00
|
|
|
long mode_priv[0];
|
2011-11-12 06:16:48 +08:00
|
|
|
};
|
|
|
|
|
2018-07-10 15:02:57 +08:00
|
|
|
static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return rcu_dereference(dev->rx_handler_data);
|
|
|
|
}
|
|
|
|
|
2012-07-11 13:34:04 +08:00
|
|
|
static inline bool team_port_enabled(struct team_port *port)
|
|
|
|
{
|
|
|
|
return port->index != -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool team_port_txable(struct team_port *port)
|
|
|
|
{
|
|
|
|
return port->linkup && team_port_enabled(port);
|
|
|
|
}
|
2012-06-26 14:52:46 +08:00
|
|
|
|
net: Add lag.h, net_lag_port_dev_txable()
LAG devices (team or bond) recognize for each one of their slave devices
whether LAG traffic is going to be sent through that device. Bond calls
such devices "active", team calls them "txable". When this state
changes, a NETDEV_CHANGELOWERSTATE notification is distributed, together
with a netdev_notifier_changelowerstate_info structure that for LAG
devices includes a tx_enabled flag that refers to the new state. The
notification thus makes it possible to react to the changes in txability
in drivers.
However there's no way to query txability from the outside on demand.
That is problematic namely for mlxsw, which when resolving ERSPAN packet
path, may encounter a LAG device, and needs to determine which of the
slaves it should choose.
To that end, introduce a new function, net_lag_port_dev_txable(), which
determines whether a given slave device is "active" or
"txable" (depending on the flavor of the LAG device). That function then
dispatches to per-LAG-flavor helpers, bond_is_active_slave_dev() resp.
team_port_dev_txable().
Because there currently is no good place where net_lag_port_dev_txable()
should be added, introduce a new header file, lag.h, which should from
now on hold any logic common to both team and bond. (But keep
netif_is_lag_master() together with the rest of netif_is_*_master()
functions).
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 15:02:58 +08:00
|
|
|
static inline bool team_port_dev_txable(const struct net_device *port_dev)
|
|
|
|
{
|
|
|
|
struct team_port *port;
|
|
|
|
bool txable;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
port = team_port_get_rcu(port_dev);
|
|
|
|
txable = port ? team_port_txable(port) : false;
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return txable;
|
|
|
|
}
|
|
|
|
|
2012-07-17 13:22:36 +08:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
static inline void team_netpoll_send_skb(struct team_port *port,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct netpoll *np = port->np;
|
|
|
|
|
|
|
|
if (np)
|
|
|
|
netpoll_send_skb(np, skb);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void team_netpoll_send_skb(struct team_port *port,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
struct team_mode_ops {
|
|
|
|
int (*init)(struct team *team);
|
|
|
|
void (*exit)(struct team *team);
|
|
|
|
rx_handler_result_t (*receive)(struct team *team,
|
|
|
|
struct team_port *port,
|
|
|
|
struct sk_buff *skb);
|
|
|
|
bool (*transmit)(struct team *team, struct sk_buff *skb);
|
|
|
|
int (*port_enter)(struct team *team, struct team_port *port);
|
|
|
|
void (*port_leave)(struct team *team, struct team_port *port);
|
2012-08-17 12:00:48 +08:00
|
|
|
void (*port_change_dev_addr)(struct team *team, struct team_port *port);
|
2012-06-19 13:54:16 +08:00
|
|
|
void (*port_enabled)(struct team *team, struct team_port *port);
|
|
|
|
void (*port_disabled)(struct team *team, struct team_port *port);
|
2011-11-12 06:16:48 +08:00
|
|
|
};
|
|
|
|
|
2013-03-06 09:31:12 +08:00
|
|
|
extern int team_modeop_port_enter(struct team *team, struct team_port *port);
|
|
|
|
extern void team_modeop_port_change_dev_addr(struct team *team,
|
|
|
|
struct team_port *port);
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
enum team_option_type {
|
|
|
|
TEAM_OPTION_TYPE_U32,
|
|
|
|
TEAM_OPTION_TYPE_STRING,
|
2012-04-04 20:16:26 +08:00
|
|
|
TEAM_OPTION_TYPE_BINARY,
|
2012-04-10 13:15:43 +08:00
|
|
|
TEAM_OPTION_TYPE_BOOL,
|
2012-07-27 14:28:53 +08:00
|
|
|
TEAM_OPTION_TYPE_S32,
|
2011-11-12 06:16:48 +08:00
|
|
|
};
|
|
|
|
|
2012-06-19 13:54:10 +08:00
|
|
|
struct team_option_inst_info {
|
|
|
|
u32 array_index;
|
|
|
|
struct team_port *port; /* != NULL if per-port */
|
|
|
|
};
|
|
|
|
|
2012-04-10 13:15:42 +08:00
|
|
|
struct team_gsetter_ctx {
|
|
|
|
union {
|
|
|
|
u32 u32_val;
|
|
|
|
const char *str_val;
|
|
|
|
struct {
|
|
|
|
const void *ptr;
|
|
|
|
u32 len;
|
|
|
|
} bin_val;
|
2012-04-10 13:15:43 +08:00
|
|
|
bool bool_val;
|
2012-07-27 14:28:53 +08:00
|
|
|
s32 s32_val;
|
2012-04-10 13:15:42 +08:00
|
|
|
} data;
|
2012-06-19 13:54:10 +08:00
|
|
|
struct team_option_inst_info *info;
|
2012-04-10 13:15:42 +08:00
|
|
|
};
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
struct team_option {
|
|
|
|
struct list_head list;
|
|
|
|
const char *name;
|
2012-04-10 13:15:42 +08:00
|
|
|
bool per_port;
|
2012-06-19 13:54:08 +08:00
|
|
|
unsigned int array_size; /* != 0 means the option is array */
|
2011-11-12 06:16:48 +08:00
|
|
|
enum team_option_type type;
|
2012-06-19 13:54:10 +08:00
|
|
|
int (*init)(struct team *team, struct team_option_inst_info *info);
|
2012-04-10 13:15:42 +08:00
|
|
|
int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
|
|
|
|
int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
|
2011-11-12 06:16:48 +08:00
|
|
|
};
|
|
|
|
|
2012-06-19 13:54:11 +08:00
|
|
|
extern void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info);
|
|
|
|
extern void team_options_change_check(struct team *team);
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
struct team_mode {
|
|
|
|
const char *kind;
|
|
|
|
struct module *owner;
|
|
|
|
size_t priv_size;
|
2012-06-19 13:54:05 +08:00
|
|
|
size_t port_priv_size;
|
2011-11-12 06:16:48 +08:00
|
|
|
const struct team_mode_ops *ops;
|
2015-12-03 19:12:13 +08:00
|
|
|
enum netdev_lag_tx_type lag_tx_type;
|
2011-11-12 06:16:48 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define TEAM_PORT_HASHBITS 4
|
|
|
|
#define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS)
|
|
|
|
|
|
|
|
#define TEAM_MODE_PRIV_LONGS 4
|
|
|
|
#define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
|
|
|
|
|
|
|
|
struct team {
|
|
|
|
struct net_device *dev; /* associated netdevice */
|
|
|
|
struct team_pcpu_stats __percpu *pcpu_stats;
|
|
|
|
|
2011-11-16 19:09:08 +08:00
|
|
|
struct mutex lock; /* used for overall locking, e.g. port lists write */
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
/*
|
2012-04-20 12:42:05 +08:00
|
|
|
* List of enabled ports and their count
|
2011-11-12 06:16:48 +08:00
|
|
|
*/
|
2012-04-20 12:42:05 +08:00
|
|
|
int en_port_count;
|
|
|
|
struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES];
|
|
|
|
|
|
|
|
struct list_head port_list; /* list of all ports */
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
struct list_head option_list;
|
2012-04-10 13:15:42 +08:00
|
|
|
struct list_head option_inst_list; /* list of option instances */
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
const struct team_mode *mode;
|
|
|
|
struct team_mode_ops ops;
|
2013-02-05 17:30:55 +08:00
|
|
|
bool user_carrier_enabled;
|
2012-07-27 14:28:55 +08:00
|
|
|
bool queue_override_enabled;
|
|
|
|
struct list_head *qom_lists; /* array of queue override mapping lists */
|
2014-05-30 02:46:17 +08:00
|
|
|
bool port_mtu_change_allowed;
|
2013-07-20 18:13:52 +08:00
|
|
|
struct {
|
|
|
|
unsigned int count;
|
|
|
|
unsigned int interval; /* in ms */
|
|
|
|
atomic_t count_pending;
|
|
|
|
struct delayed_work dw;
|
|
|
|
} notify_peers;
|
2013-07-20 18:13:54 +08:00
|
|
|
struct {
|
|
|
|
unsigned int count;
|
|
|
|
unsigned int interval; /* in ms */
|
|
|
|
atomic_t count_pending;
|
|
|
|
struct delayed_work dw;
|
|
|
|
} mcast_rejoin;
|
2011-11-12 06:16:48 +08:00
|
|
|
long mode_priv[TEAM_MODE_PRIV_LONGS];
|
|
|
|
};
|
|
|
|
|
2012-08-10 09:24:45 +08:00
|
|
|
static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
|
|
|
|
sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
|
|
|
|
skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
|
|
|
|
|
|
|
|
skb->dev = port->dev;
|
|
|
|
if (unlikely(netpoll_tx_running(team->dev))) {
|
|
|
|
team_netpoll_send_skb(port, skb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return dev_queue_xmit(skb);
|
|
|
|
}
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
static inline struct hlist_head *team_port_index_hash(struct team *team,
|
|
|
|
int port_index)
|
|
|
|
{
|
2012-04-20 12:42:05 +08:00
|
|
|
return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct team_port *team_get_port_by_index(struct team *team,
|
|
|
|
int port_index)
|
|
|
|
{
|
|
|
|
struct team_port *port;
|
|
|
|
struct hlist_head *head = team_port_index_hash(team, port_index);
|
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry(port, head, hlist)
|
2011-11-12 06:16:48 +08:00
|
|
|
if (port->index == port_index)
|
|
|
|
return port;
|
|
|
|
return NULL;
|
|
|
|
}
|
2013-06-10 23:42:25 +08:00
|
|
|
|
2016-10-07 21:02:33 +08:00
|
|
|
static inline int team_num_to_port_index(struct team *team, unsigned int num)
|
2013-06-10 23:42:25 +08:00
|
|
|
{
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 05:07:29 +08:00
|
|
|
int en_port_count = READ_ONCE(team->en_port_count);
|
2013-06-10 23:42:25 +08:00
|
|
|
|
|
|
|
if (unlikely(!en_port_count))
|
|
|
|
return 0;
|
|
|
|
return num % en_port_count;
|
|
|
|
}
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
|
|
|
|
int port_index)
|
|
|
|
{
|
|
|
|
struct team_port *port;
|
|
|
|
struct hlist_head *head = team_port_index_hash(team, port_index);
|
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry_rcu(port, head, hlist)
|
2011-11-12 06:16:48 +08:00
|
|
|
if (port->index == port_index)
|
|
|
|
return port;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-03-06 09:31:13 +08:00
|
|
|
static inline struct team_port *
|
|
|
|
team_get_first_port_txable_rcu(struct team *team, struct team_port *port)
|
|
|
|
{
|
|
|
|
struct team_port *cur;
|
|
|
|
|
|
|
|
if (likely(team_port_txable(port)))
|
|
|
|
return port;
|
|
|
|
cur = port;
|
|
|
|
list_for_each_entry_continue_rcu(cur, &team->port_list, list)
|
2013-06-08 21:00:55 +08:00
|
|
|
if (team_port_txable(cur))
|
2013-03-06 09:31:13 +08:00
|
|
|
return cur;
|
|
|
|
list_for_each_entry_rcu(cur, &team->port_list, list) {
|
|
|
|
if (cur == port)
|
|
|
|
break;
|
2013-06-08 21:00:55 +08:00
|
|
|
if (team_port_txable(cur))
|
2013-03-06 09:31:13 +08:00
|
|
|
return cur;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-11-16 19:09:09 +08:00
|
|
|
extern int team_options_register(struct team *team,
|
|
|
|
const struct team_option *option,
|
|
|
|
size_t option_count);
|
2011-11-12 06:16:48 +08:00
|
|
|
extern void team_options_unregister(struct team *team,
|
2011-11-16 19:09:09 +08:00
|
|
|
const struct team_option *option,
|
2011-11-12 06:16:48 +08:00
|
|
|
size_t option_count);
|
2012-06-19 13:54:03 +08:00
|
|
|
extern int team_mode_register(const struct team_mode *mode);
|
|
|
|
extern void team_mode_unregister(const struct team_mode *mode);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2012-07-20 10:28:51 +08:00
|
|
|
#define TEAM_DEFAULT_NUM_TX_QUEUES 16
|
|
|
|
#define TEAM_DEFAULT_NUM_RX_QUEUES 16
|
|
|
|
|
2017-06-01 15:37:02 +08:00
|
|
|
#define MODULE_ALIAS_TEAM_MODE(kind) MODULE_ALIAS("team-mode-" kind)
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
#endif /* _LINUX_IF_TEAM_H_ */
|