2011-11-12 06:16:48 +08:00
|
|
|
/*
|
2012-06-19 13:54:09 +08:00
|
|
|
* drivers/net/team/team.c - Network team device driver
|
2011-11-12 06:16:48 +08:00
|
|
|
* Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/rcupdate.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/ctype.h>
|
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/netdevice.h>
|
2012-07-17 13:22:36 +08:00
|
|
|
#include <linux/netpoll.h>
|
2011-12-08 12:11:17 +08:00
|
|
|
#include <linux/if_vlan.h>
|
2011-11-12 06:16:48 +08:00
|
|
|
#include <linux/if_arp.h>
|
|
|
|
#include <linux/socket.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/rtnetlink.h>
|
|
|
|
#include <net/rtnetlink.h>
|
|
|
|
#include <net/genetlink.h>
|
|
|
|
#include <net/netlink.h>
|
2012-07-20 10:28:51 +08:00
|
|
|
#include <net/sch_generic.h>
|
2015-01-30 14:40:17 +08:00
|
|
|
#include <net/switchdev.h>
|
2012-12-30 00:37:33 +08:00
|
|
|
#include <generated/utsrelease.h>
|
2011-11-12 06:16:48 +08:00
|
|
|
#include <linux/if_team.h>
|
|
|
|
|
|
|
|
#define DRV_NAME "team"
|
|
|
|
|
|
|
|
|
|
|
|
/**********
|
|
|
|
* Helpers
|
|
|
|
**********/
|
|
|
|
|
|
|
|
#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
|
|
|
|
|
|
|
|
static struct team_port *team_port_get_rtnl(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct team_port *port = rtnl_dereference(dev->rx_handler_data);
|
|
|
|
|
|
|
|
return team_port_exists(dev) ? port : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-08-17 12:00:48 +08:00
|
|
|
* Since the ability to change device address for open port device is tested in
|
2011-11-12 06:16:48 +08:00
|
|
|
* team_port_add, this function can be called without control of return value
|
|
|
|
*/
|
2012-08-17 12:00:48 +08:00
|
|
|
static int __set_port_dev_addr(struct net_device *port_dev,
|
|
|
|
const unsigned char *dev_addr)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
2017-07-27 06:22:07 +08:00
|
|
|
struct sockaddr_storage addr;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2017-07-27 06:22:07 +08:00
|
|
|
memcpy(addr.__data, dev_addr, port_dev->addr_len);
|
|
|
|
addr.ss_family = port_dev->type;
|
2018-12-13 19:54:30 +08:00
|
|
|
return dev_set_mac_address(port_dev, (struct sockaddr *)&addr, NULL);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
2012-08-17 12:00:48 +08:00
|
|
|
static int team_port_set_orig_dev_addr(struct team_port *port)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
2012-08-17 12:00:48 +08:00
|
|
|
return __set_port_dev_addr(port->dev, port->orig.dev_addr);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
2013-03-06 09:31:12 +08:00
|
|
|
static int team_port_set_team_dev_addr(struct team *team,
|
|
|
|
struct team_port *port)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
2013-03-06 09:31:12 +08:00
|
|
|
return __set_port_dev_addr(port->dev, team->dev->dev_addr);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
2013-03-06 09:31:12 +08:00
|
|
|
|
|
|
|
int team_modeop_port_enter(struct team *team, struct team_port *port)
|
|
|
|
{
|
|
|
|
return team_port_set_team_dev_addr(team, port);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(team_modeop_port_enter);
|
|
|
|
|
|
|
|
void team_modeop_port_change_dev_addr(struct team *team,
|
|
|
|
struct team_port *port)
|
|
|
|
{
|
|
|
|
team_port_set_team_dev_addr(team, port);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2015-12-03 19:12:18 +08:00
|
|
|
static void team_lower_state_changed(struct team_port *port)
|
|
|
|
{
|
|
|
|
struct netdev_lag_lower_state_info info;
|
|
|
|
|
|
|
|
info.link_up = port->linkup;
|
|
|
|
info.tx_enabled = team_port_enabled(port);
|
|
|
|
netdev_lower_state_changed(port->dev, &info);
|
|
|
|
}
|
|
|
|
|
2012-04-10 13:15:44 +08:00
|
|
|
static void team_refresh_port_linkup(struct team_port *port)
|
|
|
|
{
|
2015-12-03 19:12:18 +08:00
|
|
|
bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
|
|
|
|
port->state.linkup;
|
|
|
|
|
|
|
|
if (port->linkup != new_linkup) {
|
|
|
|
port->linkup = new_linkup;
|
|
|
|
team_lower_state_changed(port);
|
|
|
|
}
|
2012-04-10 13:15:44 +08:00
|
|
|
}
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2012-06-19 13:54:11 +08:00
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
/*******************
|
|
|
|
* Options handling
|
|
|
|
*******************/
|
|
|
|
|
2012-04-10 13:15:42 +08:00
|
|
|
struct team_option_inst { /* One for each option instance */
|
|
|
|
struct list_head list;
|
2012-06-19 13:54:14 +08:00
|
|
|
struct list_head tmp_list;
|
2012-04-10 13:15:42 +08:00
|
|
|
struct team_option *option;
|
2012-06-19 13:54:10 +08:00
|
|
|
struct team_option_inst_info info;
|
2012-04-10 13:15:42 +08:00
|
|
|
bool changed;
|
|
|
|
bool removed;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct team_option *__team_find_option(struct team *team,
|
|
|
|
const char *opt_name)
|
2011-11-16 19:09:09 +08:00
|
|
|
{
|
|
|
|
struct team_option *option;
|
|
|
|
|
|
|
|
list_for_each_entry(option, &team->option_list, list) {
|
|
|
|
if (strcmp(option->name, opt_name) == 0)
|
|
|
|
return option;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-04-10 13:15:42 +08:00
|
|
|
static void __team_option_inst_del(struct team_option_inst *opt_inst)
|
|
|
|
{
|
|
|
|
list_del(&opt_inst->list);
|
|
|
|
kfree(opt_inst);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __team_option_inst_del_option(struct team *team,
|
|
|
|
struct team_option *option)
|
|
|
|
{
|
|
|
|
struct team_option_inst *opt_inst, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
|
|
|
|
if (opt_inst->option == option)
|
|
|
|
__team_option_inst_del(opt_inst);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-19 13:54:08 +08:00
|
|
|
static int __team_option_inst_add(struct team *team, struct team_option *option,
|
|
|
|
struct team_port *port)
|
|
|
|
{
|
|
|
|
struct team_option_inst *opt_inst;
|
|
|
|
unsigned int array_size;
|
|
|
|
unsigned int i;
|
2012-06-19 13:54:10 +08:00
|
|
|
int err;
|
2012-06-19 13:54:08 +08:00
|
|
|
|
|
|
|
array_size = option->array_size;
|
|
|
|
if (!array_size)
|
|
|
|
array_size = 1; /* No array but still need one instance */
|
|
|
|
|
|
|
|
for (i = 0; i < array_size; i++) {
|
|
|
|
opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
|
|
|
|
if (!opt_inst)
|
|
|
|
return -ENOMEM;
|
|
|
|
opt_inst->option = option;
|
2012-06-19 13:54:10 +08:00
|
|
|
opt_inst->info.port = port;
|
|
|
|
opt_inst->info.array_index = i;
|
2012-06-19 13:54:08 +08:00
|
|
|
opt_inst->changed = true;
|
|
|
|
opt_inst->removed = false;
|
|
|
|
list_add_tail(&opt_inst->list, &team->option_inst_list);
|
2012-06-19 13:54:10 +08:00
|
|
|
if (option->init) {
|
|
|
|
err = option->init(team, &opt_inst->info);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2012-06-19 13:54:08 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-04-10 13:15:42 +08:00
|
|
|
static int __team_option_inst_add_option(struct team *team,
|
|
|
|
struct team_option *option)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2012-06-19 13:54:08 +08:00
|
|
|
if (!option->per_port) {
|
2012-06-19 13:54:15 +08:00
|
|
|
err = __team_option_inst_add(team, option, NULL);
|
2012-06-19 13:54:08 +08:00
|
|
|
if (err)
|
|
|
|
goto inst_del_option;
|
|
|
|
}
|
2012-04-10 13:15:42 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
inst_del_option:
|
|
|
|
__team_option_inst_del_option(team, option);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __team_option_inst_mark_removed_option(struct team *team,
|
|
|
|
struct team_option *option)
|
|
|
|
{
|
|
|
|
struct team_option_inst *opt_inst;
|
|
|
|
|
|
|
|
list_for_each_entry(opt_inst, &team->option_inst_list, list) {
|
|
|
|
if (opt_inst->option == option) {
|
|
|
|
opt_inst->changed = true;
|
|
|
|
opt_inst->removed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __team_option_inst_del_port(struct team *team,
|
|
|
|
struct team_port *port)
|
|
|
|
{
|
|
|
|
struct team_option_inst *opt_inst, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
|
|
|
|
if (opt_inst->option->per_port &&
|
2012-06-19 13:54:10 +08:00
|
|
|
opt_inst->info.port == port)
|
2012-04-10 13:15:42 +08:00
|
|
|
__team_option_inst_del(opt_inst);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __team_option_inst_add_port(struct team *team,
|
|
|
|
struct team_port *port)
|
|
|
|
{
|
|
|
|
struct team_option *option;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
list_for_each_entry(option, &team->option_list, list) {
|
|
|
|
if (!option->per_port)
|
|
|
|
continue;
|
|
|
|
err = __team_option_inst_add(team, option, port);
|
|
|
|
if (err)
|
|
|
|
goto inst_del_port;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
inst_del_port:
|
|
|
|
__team_option_inst_del_port(team, port);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __team_option_inst_mark_removed_port(struct team *team,
|
|
|
|
struct team_port *port)
|
|
|
|
{
|
|
|
|
struct team_option_inst *opt_inst;
|
|
|
|
|
|
|
|
list_for_each_entry(opt_inst, &team->option_inst_list, list) {
|
2012-06-19 13:54:10 +08:00
|
|
|
if (opt_inst->info.port == port) {
|
2012-04-10 13:15:42 +08:00
|
|
|
opt_inst->changed = true;
|
|
|
|
opt_inst->removed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __team_options_register(struct team *team,
|
|
|
|
const struct team_option *option,
|
|
|
|
size_t option_count)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
int i;
|
2011-11-17 12:16:05 +08:00
|
|
|
struct team_option **dst_opts;
|
2011-11-16 19:09:09 +08:00
|
|
|
int err;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 05:03:40 +08:00
|
|
|
dst_opts = kcalloc(option_count, sizeof(struct team_option *),
|
2011-11-17 12:16:05 +08:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!dst_opts)
|
|
|
|
return -ENOMEM;
|
2011-11-16 19:09:09 +08:00
|
|
|
for (i = 0; i < option_count; i++, option++) {
|
|
|
|
if (__team_find_option(team, option->name)) {
|
|
|
|
err = -EEXIST;
|
2012-04-10 13:15:42 +08:00
|
|
|
goto alloc_rollback;
|
2011-11-16 19:09:09 +08:00
|
|
|
}
|
2011-11-17 14:32:37 +08:00
|
|
|
dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
|
|
|
|
if (!dst_opts[i]) {
|
2011-11-16 19:09:09 +08:00
|
|
|
err = -ENOMEM;
|
2012-04-10 13:15:42 +08:00
|
|
|
goto alloc_rollback;
|
2011-11-16 19:09:09 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-01-24 13:16:00 +08:00
|
|
|
for (i = 0; i < option_count; i++) {
|
2012-04-10 13:15:42 +08:00
|
|
|
err = __team_option_inst_add_option(team, dst_opts[i]);
|
|
|
|
if (err)
|
|
|
|
goto inst_rollback;
|
2011-11-16 19:09:09 +08:00
|
|
|
list_add_tail(&dst_opts[i]->list, &team->option_list);
|
2012-01-24 13:16:00 +08:00
|
|
|
}
|
2011-11-16 19:09:09 +08:00
|
|
|
|
2011-11-17 12:16:05 +08:00
|
|
|
kfree(dst_opts);
|
2011-11-16 19:09:09 +08:00
|
|
|
return 0;
|
|
|
|
|
2012-04-10 13:15:42 +08:00
|
|
|
inst_rollback:
|
|
|
|
for (i--; i >= 0; i--)
|
|
|
|
__team_option_inst_del_option(team, dst_opts[i]);
|
|
|
|
|
|
|
|
i = option_count - 1;
|
|
|
|
alloc_rollback:
|
|
|
|
for (i--; i >= 0; i--)
|
2011-11-16 19:09:09 +08:00
|
|
|
kfree(dst_opts[i]);
|
|
|
|
|
2011-11-17 12:16:05 +08:00
|
|
|
kfree(dst_opts);
|
2011-11-16 19:09:09 +08:00
|
|
|
return err;
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
2011-11-16 19:09:09 +08:00
|
|
|
|
2012-01-24 13:16:00 +08:00
|
|
|
static void __team_options_mark_removed(struct team *team,
|
|
|
|
const struct team_option *option,
|
|
|
|
size_t option_count)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < option_count; i++, option++) {
|
|
|
|
struct team_option *del_opt;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2012-01-24 13:16:00 +08:00
|
|
|
del_opt = __team_find_option(team, option->name);
|
2012-04-10 13:15:42 +08:00
|
|
|
if (del_opt)
|
|
|
|
__team_option_inst_mark_removed_option(team, del_opt);
|
2012-01-24 13:16:00 +08:00
|
|
|
}
|
|
|
|
}
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
static void __team_options_unregister(struct team *team,
|
2011-11-16 19:09:09 +08:00
|
|
|
const struct team_option *option,
|
2011-11-12 06:16:48 +08:00
|
|
|
size_t option_count)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2011-11-16 19:09:09 +08:00
|
|
|
for (i = 0; i < option_count; i++, option++) {
|
|
|
|
struct team_option *del_opt;
|
|
|
|
|
|
|
|
del_opt = __team_find_option(team, option->name);
|
|
|
|
if (del_opt) {
|
2012-04-10 13:15:42 +08:00
|
|
|
__team_option_inst_del_option(team, del_opt);
|
2011-11-16 19:09:09 +08:00
|
|
|
list_del(&del_opt->list);
|
|
|
|
kfree(del_opt);
|
|
|
|
}
|
|
|
|
}
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
2012-01-24 13:16:00 +08:00
|
|
|
static void __team_options_change_check(struct team *team);
|
|
|
|
|
|
|
|
int team_options_register(struct team *team,
|
|
|
|
const struct team_option *option,
|
|
|
|
size_t option_count)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = __team_options_register(team, option, option_count);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
__team_options_change_check(team);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(team_options_register);
|
|
|
|
|
2011-11-16 19:09:09 +08:00
|
|
|
void team_options_unregister(struct team *team,
|
|
|
|
const struct team_option *option,
|
2011-11-12 06:16:48 +08:00
|
|
|
size_t option_count)
|
|
|
|
{
|
2012-01-24 13:16:00 +08:00
|
|
|
__team_options_mark_removed(team, option, option_count);
|
|
|
|
__team_options_change_check(team);
|
2011-11-12 06:16:48 +08:00
|
|
|
__team_options_unregister(team, option, option_count);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(team_options_unregister);
|
|
|
|
|
2012-04-10 13:15:42 +08:00
|
|
|
static int team_option_get(struct team *team,
|
|
|
|
struct team_option_inst *opt_inst,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
2012-06-19 13:54:07 +08:00
|
|
|
if (!opt_inst->option->getter)
|
|
|
|
return -EOPNOTSUPP;
|
2012-04-10 13:15:42 +08:00
|
|
|
return opt_inst->option->getter(team, ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_option_set(struct team *team,
|
|
|
|
struct team_option_inst *opt_inst,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
2012-06-19 13:54:07 +08:00
|
|
|
if (!opt_inst->option->setter)
|
|
|
|
return -EOPNOTSUPP;
|
2012-06-19 13:54:20 +08:00
|
|
|
return opt_inst->option->setter(team, ctx);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
2012-06-19 13:54:11 +08:00
|
|
|
void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
|
|
|
|
{
|
|
|
|
struct team_option_inst *opt_inst;
|
|
|
|
|
|
|
|
opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
|
|
|
|
opt_inst->changed = true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(team_option_inst_set_change);
|
|
|
|
|
|
|
|
void team_options_change_check(struct team *team)
|
|
|
|
{
|
|
|
|
__team_options_change_check(team);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(team_options_change_check);
|
|
|
|
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
/****************
|
|
|
|
* Mode handling
|
|
|
|
****************/
|
|
|
|
|
|
|
|
static LIST_HEAD(mode_list);
|
|
|
|
static DEFINE_SPINLOCK(mode_list_lock);
|
|
|
|
|
2012-06-19 13:54:03 +08:00
|
|
|
struct team_mode_item {
|
|
|
|
struct list_head list;
|
|
|
|
const struct team_mode *mode;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct team_mode_item *__find_mode(const char *kind)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
2012-06-19 13:54:03 +08:00
|
|
|
struct team_mode_item *mitem;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2012-06-19 13:54:03 +08:00
|
|
|
list_for_each_entry(mitem, &mode_list, list) {
|
|
|
|
if (strcmp(mitem->mode->kind, kind) == 0)
|
|
|
|
return mitem;
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_good_mode_name(const char *name)
|
|
|
|
{
|
|
|
|
while (*name != '\0') {
|
|
|
|
if (!isalpha(*name) && !isdigit(*name) && *name != '_')
|
|
|
|
return false;
|
|
|
|
name++;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-06-19 13:54:03 +08:00
|
|
|
int team_mode_register(const struct team_mode *mode)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
int err = 0;
|
2012-06-19 13:54:03 +08:00
|
|
|
struct team_mode_item *mitem;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
if (!is_good_mode_name(mode->kind) ||
|
|
|
|
mode->priv_size > TEAM_MODE_PRIV_SIZE)
|
|
|
|
return -EINVAL;
|
2012-06-19 13:54:03 +08:00
|
|
|
|
|
|
|
mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
|
|
|
|
if (!mitem)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
spin_lock(&mode_list_lock);
|
|
|
|
if (__find_mode(mode->kind)) {
|
|
|
|
err = -EEXIST;
|
2012-06-19 13:54:03 +08:00
|
|
|
kfree(mitem);
|
2011-11-12 06:16:48 +08:00
|
|
|
goto unlock;
|
|
|
|
}
|
2012-06-19 13:54:03 +08:00
|
|
|
mitem->mode = mode;
|
|
|
|
list_add_tail(&mitem->list, &mode_list);
|
2011-11-12 06:16:48 +08:00
|
|
|
unlock:
|
|
|
|
spin_unlock(&mode_list_lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(team_mode_register);
|
|
|
|
|
2012-06-19 13:54:03 +08:00
|
|
|
void team_mode_unregister(const struct team_mode *mode)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
2012-06-19 13:54:03 +08:00
|
|
|
struct team_mode_item *mitem;
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
spin_lock(&mode_list_lock);
|
2012-06-19 13:54:03 +08:00
|
|
|
mitem = __find_mode(mode->kind);
|
|
|
|
if (mitem) {
|
|
|
|
list_del_init(&mitem->list);
|
|
|
|
kfree(mitem);
|
|
|
|
}
|
2011-11-12 06:16:48 +08:00
|
|
|
spin_unlock(&mode_list_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(team_mode_unregister);
|
|
|
|
|
2012-06-19 13:54:03 +08:00
|
|
|
static const struct team_mode *team_mode_get(const char *kind)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
2012-06-19 13:54:03 +08:00
|
|
|
struct team_mode_item *mitem;
|
|
|
|
const struct team_mode *mode = NULL;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
spin_lock(&mode_list_lock);
|
2012-06-19 13:54:03 +08:00
|
|
|
mitem = __find_mode(kind);
|
|
|
|
if (!mitem) {
|
2011-11-12 06:16:48 +08:00
|
|
|
spin_unlock(&mode_list_lock);
|
|
|
|
request_module("team-mode-%s", kind);
|
|
|
|
spin_lock(&mode_list_lock);
|
2012-06-19 13:54:03 +08:00
|
|
|
mitem = __find_mode(kind);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
2012-06-19 13:54:03 +08:00
|
|
|
if (mitem) {
|
|
|
|
mode = mitem->mode;
|
2011-11-12 06:16:48 +08:00
|
|
|
if (!try_module_get(mode->owner))
|
|
|
|
mode = NULL;
|
2012-06-19 13:54:03 +08:00
|
|
|
}
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
spin_unlock(&mode_list_lock);
|
|
|
|
return mode;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void team_mode_put(const struct team_mode *mode)
|
|
|
|
{
|
|
|
|
module_put(mode->owner);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-03-08 17:07:43 +08:00
|
|
|
static rx_handler_result_t team_dummy_receive(struct team *team,
|
|
|
|
struct team_port *port,
|
|
|
|
struct sk_buff *skb)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
return RX_HANDLER_ANOTHER;
|
|
|
|
}
|
|
|
|
|
2012-06-19 13:54:04 +08:00
|
|
|
static const struct team_mode __team_no_mode = {
|
|
|
|
.kind = "*NOMODE*",
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool team_is_mode_set(struct team *team)
|
|
|
|
{
|
|
|
|
return team->mode != &__team_no_mode;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void team_set_no_mode(struct team *team)
|
|
|
|
{
|
2013-02-05 17:30:55 +08:00
|
|
|
team->user_carrier_enabled = false;
|
2012-06-19 13:54:04 +08:00
|
|
|
team->mode = &__team_no_mode;
|
|
|
|
}
|
|
|
|
|
2013-06-10 23:42:25 +08:00
|
|
|
static void team_adjust_ops(struct team *team)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* To avoid checks in rx/tx skb paths, ensure here that non-null and
|
|
|
|
* correct ops are always set.
|
|
|
|
*/
|
|
|
|
|
2013-06-10 23:42:25 +08:00
|
|
|
if (!team->en_port_count || !team_is_mode_set(team) ||
|
2012-06-26 14:52:45 +08:00
|
|
|
!team->mode->ops->transmit)
|
2011-11-12 06:16:48 +08:00
|
|
|
team->ops.transmit = team_dummy_transmit;
|
|
|
|
else
|
|
|
|
team->ops.transmit = team->mode->ops->transmit;
|
|
|
|
|
2013-06-10 23:42:25 +08:00
|
|
|
if (!team->en_port_count || !team_is_mode_set(team) ||
|
2012-06-26 14:52:45 +08:00
|
|
|
!team->mode->ops->receive)
|
2011-11-12 06:16:48 +08:00
|
|
|
team->ops.receive = team_dummy_receive;
|
|
|
|
else
|
|
|
|
team->ops.receive = team->mode->ops->receive;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We can benefit from the fact that it's ensured no port is present
|
|
|
|
* at the time of mode change. Therefore no packets are in fly so there's no
|
|
|
|
* need to set mode operations in any special way.
|
|
|
|
*/
|
|
|
|
static int __team_change_mode(struct team *team,
|
|
|
|
const struct team_mode *new_mode)
|
|
|
|
{
|
|
|
|
/* Check if mode was previously set and do cleanup if so */
|
2012-06-19 13:54:04 +08:00
|
|
|
if (team_is_mode_set(team)) {
|
2011-11-12 06:16:48 +08:00
|
|
|
void (*exit_op)(struct team *team) = team->ops.exit;
|
|
|
|
|
|
|
|
/* Clear ops area so no callback is called any longer */
|
|
|
|
memset(&team->ops, 0, sizeof(struct team_mode_ops));
|
|
|
|
team_adjust_ops(team);
|
|
|
|
|
|
|
|
if (exit_op)
|
|
|
|
exit_op(team);
|
|
|
|
team_mode_put(team->mode);
|
2012-06-19 13:54:04 +08:00
|
|
|
team_set_no_mode(team);
|
2011-11-12 06:16:48 +08:00
|
|
|
/* zero private data area */
|
|
|
|
memset(&team->mode_priv, 0,
|
|
|
|
sizeof(struct team) - offsetof(struct team, mode_priv));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!new_mode)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (new_mode->ops->init) {
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = new_mode->ops->init(team);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
team->mode = new_mode;
|
|
|
|
memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
|
|
|
|
team_adjust_ops(team);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_change_mode(struct team *team, const char *kind)
|
|
|
|
{
|
2012-06-19 13:54:03 +08:00
|
|
|
const struct team_mode *new_mode;
|
2011-11-12 06:16:48 +08:00
|
|
|
struct net_device *dev = team->dev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!list_empty(&team->port_list)) {
|
|
|
|
netdev_err(dev, "No ports can be present during mode change\n");
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2012-06-19 13:54:04 +08:00
|
|
|
if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
|
2011-11-12 06:16:48 +08:00
|
|
|
netdev_err(dev, "Unable to change to the same mode the team is in\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
new_mode = team_mode_get(kind);
|
|
|
|
if (!new_mode) {
|
|
|
|
netdev_err(dev, "Mode \"%s\" not found\n", kind);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = __team_change_mode(team, new_mode);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
|
|
|
|
team_mode_put(new_mode);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
netdev_info(dev, "Mode changed to \"%s\"\n", kind);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-20 18:13:52 +08:00
|
|
|
/*********************
|
|
|
|
* Peers notification
|
|
|
|
*********************/
|
|
|
|
|
|
|
|
static void team_notify_peers_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct team *team;
|
2015-01-15 01:15:30 +08:00
|
|
|
int val;
|
2013-07-20 18:13:52 +08:00
|
|
|
|
|
|
|
team = container_of(work, struct team, notify_peers.dw.work);
|
|
|
|
|
|
|
|
if (!rtnl_trylock()) {
|
|
|
|
schedule_delayed_work(&team->notify_peers.dw, 0);
|
|
|
|
return;
|
|
|
|
}
|
2015-01-15 01:15:30 +08:00
|
|
|
val = atomic_dec_if_positive(&team->notify_peers.count_pending);
|
|
|
|
if (val < 0) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return;
|
|
|
|
}
|
2013-07-20 18:13:52 +08:00
|
|
|
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
|
|
|
|
rtnl_unlock();
|
2015-01-15 01:15:30 +08:00
|
|
|
if (val)
|
2013-07-20 18:13:52 +08:00
|
|
|
schedule_delayed_work(&team->notify_peers.dw,
|
|
|
|
msecs_to_jiffies(team->notify_peers.interval));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void team_notify_peers(struct team *team)
|
|
|
|
{
|
|
|
|
if (!team->notify_peers.count || !netif_running(team->dev))
|
|
|
|
return;
|
2014-10-03 21:58:34 +08:00
|
|
|
atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
|
2013-07-20 18:13:52 +08:00
|
|
|
schedule_delayed_work(&team->notify_peers.dw, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void team_notify_peers_init(struct team *team)
|
|
|
|
{
|
|
|
|
INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void team_notify_peers_fini(struct team *team)
|
|
|
|
{
|
|
|
|
cancel_delayed_work_sync(&team->notify_peers.dw);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-20 18:13:54 +08:00
|
|
|
/*******************************
|
|
|
|
* Send multicast group rejoins
|
|
|
|
*******************************/
|
|
|
|
|
|
|
|
static void team_mcast_rejoin_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct team *team;
|
2015-01-15 01:15:30 +08:00
|
|
|
int val;
|
2013-07-20 18:13:54 +08:00
|
|
|
|
|
|
|
team = container_of(work, struct team, mcast_rejoin.dw.work);
|
|
|
|
|
|
|
|
if (!rtnl_trylock()) {
|
|
|
|
schedule_delayed_work(&team->mcast_rejoin.dw, 0);
|
|
|
|
return;
|
|
|
|
}
|
2015-01-15 01:15:30 +08:00
|
|
|
val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
|
|
|
|
if (val < 0) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return;
|
|
|
|
}
|
2013-07-20 18:13:54 +08:00
|
|
|
call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
|
|
|
|
rtnl_unlock();
|
2015-01-15 01:15:30 +08:00
|
|
|
if (val)
|
2013-07-20 18:13:54 +08:00
|
|
|
schedule_delayed_work(&team->mcast_rejoin.dw,
|
|
|
|
msecs_to_jiffies(team->mcast_rejoin.interval));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void team_mcast_rejoin(struct team *team)
|
|
|
|
{
|
|
|
|
if (!team->mcast_rejoin.count || !netif_running(team->dev))
|
|
|
|
return;
|
2014-10-03 21:58:34 +08:00
|
|
|
atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
|
2013-07-20 18:13:54 +08:00
|
|
|
schedule_delayed_work(&team->mcast_rejoin.dw, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void team_mcast_rejoin_init(struct team *team)
|
|
|
|
{
|
|
|
|
INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void team_mcast_rejoin_fini(struct team *team)
|
|
|
|
{
|
|
|
|
cancel_delayed_work_sync(&team->mcast_rejoin.dw);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
/************************
|
|
|
|
* Rx path frame handler
|
|
|
|
************************/
|
|
|
|
|
|
|
|
/* note: already called with rcu_read_lock */
|
|
|
|
static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb = *pskb;
|
|
|
|
struct team_port *port;
|
|
|
|
struct team *team;
|
|
|
|
rx_handler_result_t res;
|
|
|
|
|
|
|
|
skb = skb_share_check(skb, GFP_ATOMIC);
|
|
|
|
if (!skb)
|
|
|
|
return RX_HANDLER_CONSUMED;
|
|
|
|
|
|
|
|
*pskb = skb;
|
|
|
|
|
|
|
|
port = team_port_get_rcu(skb->dev);
|
|
|
|
team = port->team;
|
2012-04-20 12:42:05 +08:00
|
|
|
if (!team_port_enabled(port)) {
|
|
|
|
/* allow exact match delivery for disabled ports */
|
|
|
|
res = RX_HANDLER_EXACT;
|
|
|
|
} else {
|
|
|
|
res = team->ops.receive(team, port, skb);
|
|
|
|
}
|
2011-11-12 06:16:48 +08:00
|
|
|
if (res == RX_HANDLER_ANOTHER) {
|
|
|
|
struct team_pcpu_stats *pcpu_stats;
|
|
|
|
|
|
|
|
pcpu_stats = this_cpu_ptr(team->pcpu_stats);
|
|
|
|
u64_stats_update_begin(&pcpu_stats->syncp);
|
|
|
|
pcpu_stats->rx_packets++;
|
|
|
|
pcpu_stats->rx_bytes += skb->len;
|
|
|
|
if (skb->pkt_type == PACKET_MULTICAST)
|
|
|
|
pcpu_stats->rx_multicast++;
|
|
|
|
u64_stats_update_end(&pcpu_stats->syncp);
|
|
|
|
|
|
|
|
skb->dev = team->dev;
|
2016-02-02 07:51:06 +08:00
|
|
|
} else if (res == RX_HANDLER_EXACT) {
|
|
|
|
this_cpu_inc(team->pcpu_stats->rx_nohandler);
|
2011-11-12 06:16:48 +08:00
|
|
|
} else {
|
|
|
|
this_cpu_inc(team->pcpu_stats->rx_dropped);
|
|
|
|
}
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-07-27 14:28:55 +08:00
|
|
|
/*************************************
|
|
|
|
* Multiqueue Tx port select override
|
|
|
|
*************************************/
|
|
|
|
|
|
|
|
static int team_queue_override_init(struct team *team)
|
|
|
|
{
|
|
|
|
struct list_head *listarr;
|
|
|
|
unsigned int queue_cnt = team->dev->num_tx_queues - 1;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (!queue_cnt)
|
|
|
|
return 0;
|
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 04:55:00 +08:00
|
|
|
listarr = kmalloc_array(queue_cnt, sizeof(struct list_head),
|
|
|
|
GFP_KERNEL);
|
2012-07-27 14:28:55 +08:00
|
|
|
if (!listarr)
|
|
|
|
return -ENOMEM;
|
|
|
|
team->qom_lists = listarr;
|
|
|
|
for (i = 0; i < queue_cnt; i++)
|
|
|
|
INIT_LIST_HEAD(listarr++);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void team_queue_override_fini(struct team *team)
|
|
|
|
{
|
|
|
|
kfree(team->qom_lists);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
|
|
|
|
{
|
|
|
|
return &team->qom_lists[queue_id - 1];
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* note: already called with rcu_read_lock
|
|
|
|
*/
|
|
|
|
static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct list_head *qom_list;
|
|
|
|
struct team_port *port;
|
|
|
|
|
|
|
|
if (!team->queue_override_enabled || !skb->queue_mapping)
|
|
|
|
return false;
|
|
|
|
qom_list = __team_get_qom_list(team, skb->queue_mapping);
|
|
|
|
list_for_each_entry_rcu(port, qom_list, qom_list) {
|
|
|
|
if (!team_dev_queue_xmit(team, port, skb))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __team_queue_override_port_del(struct team *team,
|
|
|
|
struct team_port *port)
|
|
|
|
{
|
2013-06-10 23:42:23 +08:00
|
|
|
if (!port->queue_id)
|
|
|
|
return;
|
2012-07-27 14:28:55 +08:00
|
|
|
list_del_rcu(&port->qom_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
|
|
|
|
struct team_port *cur)
|
|
|
|
{
|
|
|
|
if (port->priority < cur->priority)
|
|
|
|
return true;
|
|
|
|
if (port->priority > cur->priority)
|
|
|
|
return false;
|
|
|
|
if (port->index < cur->index)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __team_queue_override_port_add(struct team *team,
|
|
|
|
struct team_port *port)
|
|
|
|
{
|
|
|
|
struct team_port *cur;
|
|
|
|
struct list_head *qom_list;
|
|
|
|
struct list_head *node;
|
|
|
|
|
2013-06-10 23:42:23 +08:00
|
|
|
if (!port->queue_id)
|
2012-07-27 14:28:55 +08:00
|
|
|
return;
|
|
|
|
qom_list = __team_get_qom_list(team, port->queue_id);
|
|
|
|
node = qom_list;
|
|
|
|
list_for_each_entry(cur, qom_list, qom_list) {
|
|
|
|
if (team_queue_override_port_has_gt_prio_than(port, cur))
|
|
|
|
break;
|
|
|
|
node = &cur->qom_list;
|
|
|
|
}
|
|
|
|
list_add_tail_rcu(&port->qom_list, node);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __team_queue_override_enabled_check(struct team *team)
|
|
|
|
{
|
|
|
|
struct team_port *port;
|
|
|
|
bool enabled = false;
|
|
|
|
|
|
|
|
list_for_each_entry(port, &team->port_list, list) {
|
2013-06-10 23:42:23 +08:00
|
|
|
if (port->queue_id) {
|
2012-07-27 14:28:55 +08:00
|
|
|
enabled = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (enabled == team->queue_override_enabled)
|
|
|
|
return;
|
|
|
|
netdev_dbg(team->dev, "%s queue override\n",
|
|
|
|
enabled ? "Enabling" : "Disabling");
|
|
|
|
team->queue_override_enabled = enabled;
|
|
|
|
}
|
|
|
|
|
2013-06-10 23:42:23 +08:00
|
|
|
static void team_queue_override_port_prio_changed(struct team *team,
|
|
|
|
struct team_port *port)
|
2012-07-27 14:28:55 +08:00
|
|
|
{
|
2013-06-10 23:42:23 +08:00
|
|
|
if (!port->queue_id || team_port_enabled(port))
|
|
|
|
return;
|
2012-07-27 14:28:55 +08:00
|
|
|
__team_queue_override_port_del(team, port);
|
|
|
|
__team_queue_override_port_add(team, port);
|
|
|
|
__team_queue_override_enabled_check(team);
|
|
|
|
}
|
|
|
|
|
2013-06-10 23:42:23 +08:00
|
|
|
static void team_queue_override_port_change_queue_id(struct team *team,
|
|
|
|
struct team_port *port,
|
|
|
|
u16 new_queue_id)
|
|
|
|
{
|
|
|
|
if (team_port_enabled(port)) {
|
|
|
|
__team_queue_override_port_del(team, port);
|
|
|
|
port->queue_id = new_queue_id;
|
|
|
|
__team_queue_override_port_add(team, port);
|
|
|
|
__team_queue_override_enabled_check(team);
|
|
|
|
} else {
|
|
|
|
port->queue_id = new_queue_id;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void team_queue_override_port_add(struct team *team,
|
|
|
|
struct team_port *port)
|
|
|
|
{
|
|
|
|
__team_queue_override_port_add(team, port);
|
|
|
|
__team_queue_override_enabled_check(team);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void team_queue_override_port_del(struct team *team,
|
|
|
|
struct team_port *port)
|
|
|
|
{
|
|
|
|
__team_queue_override_port_del(team, port);
|
|
|
|
__team_queue_override_enabled_check(team);
|
|
|
|
}
|
|
|
|
|
2012-07-27 14:28:55 +08:00
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
/****************
|
|
|
|
* Port handling
|
|
|
|
****************/
|
|
|
|
|
|
|
|
static bool team_port_find(const struct team *team,
|
|
|
|
const struct team_port *port)
|
|
|
|
{
|
|
|
|
struct team_port *cur;
|
|
|
|
|
|
|
|
list_for_each_entry(cur, &team->port_list, list)
|
|
|
|
if (cur == port)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-04-20 12:42:05 +08:00
|
|
|
* Enable/disable port by adding to enabled port hashlist and setting
|
|
|
|
* port->index (Might be racy so reader could see incorrect ifindex when
|
|
|
|
* processing a flying packet, but that is not a problem). Write guarded
|
|
|
|
* by team->lock.
|
2011-11-12 06:16:48 +08:00
|
|
|
*/
|
2012-04-20 12:42:05 +08:00
|
|
|
static void team_port_enable(struct team *team,
|
|
|
|
struct team_port *port)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
2012-04-20 12:42:05 +08:00
|
|
|
if (team_port_enabled(port))
|
|
|
|
return;
|
|
|
|
port->index = team->en_port_count++;
|
2011-11-12 06:16:48 +08:00
|
|
|
hlist_add_head_rcu(&port->hlist,
|
|
|
|
team_port_index_hash(team, port->index));
|
2012-06-26 14:52:45 +08:00
|
|
|
team_adjust_ops(team);
|
2013-06-10 23:42:23 +08:00
|
|
|
team_queue_override_port_add(team, port);
|
2012-06-19 13:54:16 +08:00
|
|
|
if (team->ops.port_enabled)
|
|
|
|
team->ops.port_enabled(team, port);
|
2013-07-20 18:13:52 +08:00
|
|
|
team_notify_peers(team);
|
2013-07-20 18:13:54 +08:00
|
|
|
team_mcast_rejoin(team);
|
2015-12-03 19:12:18 +08:00
|
|
|
team_lower_state_changed(port);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __reconstruct_port_hlist(struct team *team, int rm_index)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct team_port *port;
|
|
|
|
|
2012-04-20 12:42:05 +08:00
|
|
|
for (i = rm_index + 1; i < team->en_port_count; i++) {
|
2011-11-12 06:16:48 +08:00
|
|
|
port = team_get_port_by_index(team, i);
|
|
|
|
hlist_del_rcu(&port->hlist);
|
|
|
|
port->index--;
|
|
|
|
hlist_add_head_rcu(&port->hlist,
|
|
|
|
team_port_index_hash(team, port->index));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-20 12:42:05 +08:00
|
|
|
static void team_port_disable(struct team *team,
|
|
|
|
struct team_port *port)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
2012-04-20 12:42:05 +08:00
|
|
|
if (!team_port_enabled(port))
|
|
|
|
return;
|
2012-06-19 13:54:16 +08:00
|
|
|
if (team->ops.port_disabled)
|
|
|
|
team->ops.port_disabled(team, port);
|
2011-11-12 06:16:48 +08:00
|
|
|
hlist_del_rcu(&port->hlist);
|
2012-06-26 14:52:45 +08:00
|
|
|
__reconstruct_port_hlist(team, port->index);
|
2012-04-20 12:42:05 +08:00
|
|
|
port->index = -1;
|
2012-06-26 14:52:45 +08:00
|
|
|
team->en_port_count--;
|
2013-06-10 23:42:25 +08:00
|
|
|
team_queue_override_port_del(team, port);
|
|
|
|
team_adjust_ops(team);
|
2015-12-03 19:12:18 +08:00
|
|
|
team_lower_state_changed(port);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
2015-12-15 03:19:43 +08:00
|
|
|
#define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
|
2011-11-12 06:16:48 +08:00
|
|
|
NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
|
|
|
|
NETIF_F_HIGHDMA | NETIF_F_LRO)
|
|
|
|
|
2015-12-17 22:11:55 +08:00
|
|
|
#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
|
|
|
|
NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
|
|
|
|
|
2017-04-06 13:41:28 +08:00
|
|
|
static void __team_compute_features(struct team *team)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
struct team_port *port;
|
2018-06-04 22:46:01 +08:00
|
|
|
netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
|
|
|
|
NETIF_F_ALL_FOR_ALL;
|
2015-12-17 22:11:55 +08:00
|
|
|
netdev_features_t enc_features = TEAM_ENC_FEATURES;
|
2011-11-12 06:16:48 +08:00
|
|
|
unsigned short max_hard_header_len = ETH_HLEN;
|
2014-10-06 09:38:35 +08:00
|
|
|
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
|
|
|
|
IFF_XMIT_DST_RELEASE_PERM;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
list_for_each_entry(port, &team->port_list, list) {
|
|
|
|
vlan_features = netdev_increment_features(vlan_features,
|
|
|
|
port->dev->vlan_features,
|
|
|
|
TEAM_VLAN_FEATURES);
|
2015-12-17 22:11:55 +08:00
|
|
|
enc_features =
|
|
|
|
netdev_increment_features(enc_features,
|
|
|
|
port->dev->hw_enc_features,
|
|
|
|
TEAM_ENC_FEATURES);
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2012-07-18 15:39:38 +08:00
|
|
|
dst_release_flag &= port->dev->priv_flags;
|
2011-11-12 06:16:48 +08:00
|
|
|
if (port->dev->hard_header_len > max_hard_header_len)
|
|
|
|
max_hard_header_len = port->dev->hard_header_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
team->dev->vlan_features = vlan_features;
|
2018-05-22 23:34:40 +08:00
|
|
|
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
|
|
|
|
NETIF_F_GSO_UDP_L4;
|
2011-11-12 06:16:48 +08:00
|
|
|
team->dev->hard_header_len = max_hard_header_len;
|
|
|
|
|
2014-10-06 09:38:35 +08:00
|
|
|
team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
|
|
|
|
if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
|
|
|
|
team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
|
2016-05-26 03:21:52 +08:00
|
|
|
}
|
2012-07-18 15:39:38 +08:00
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
static void team_compute_features(struct team *team)
|
|
|
|
{
|
2011-11-16 19:09:08 +08:00
|
|
|
mutex_lock(&team->lock);
|
2017-04-06 13:41:28 +08:00
|
|
|
__team_compute_features(team);
|
2011-11-16 19:09:08 +08:00
|
|
|
mutex_unlock(&team->lock);
|
2016-05-26 03:21:52 +08:00
|
|
|
netdev_change_features(team->dev);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int team_port_enter(struct team *team, struct team_port *port)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
dev_hold(team->dev);
|
|
|
|
if (team->ops.port_enter) {
|
|
|
|
err = team->ops.port_enter(team, port);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(team->dev, "Device %s failed to enter team mode\n",
|
|
|
|
port->dev->name);
|
|
|
|
goto err_port_enter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_port_enter:
|
|
|
|
dev_put(team->dev);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void team_port_leave(struct team *team, struct team_port *port)
|
|
|
|
{
|
|
|
|
if (team->ops.port_leave)
|
|
|
|
team->ops.port_leave(team, port);
|
|
|
|
dev_put(team->dev);
|
|
|
|
}
|
|
|
|
|
2012-07-17 13:22:36 +08:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
2018-04-24 14:33:37 +08:00
|
|
|
static int __team_port_enable_netpoll(struct team_port *port)
|
2012-07-17 13:22:36 +08:00
|
|
|
{
|
|
|
|
struct netpoll *np;
|
|
|
|
int err;
|
|
|
|
|
2014-03-28 06:36:38 +08:00
|
|
|
np = kzalloc(sizeof(*np), GFP_KERNEL);
|
2012-07-17 13:22:36 +08:00
|
|
|
if (!np)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-03-28 06:36:38 +08:00
|
|
|
err = __netpoll_setup(np, port->dev);
|
2012-07-17 13:22:36 +08:00
|
|
|
if (err) {
|
|
|
|
kfree(np);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
port->np = np;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-04-24 14:33:37 +08:00
|
|
|
static int team_port_enable_netpoll(struct team_port *port)
|
|
|
|
{
|
|
|
|
if (!port->team->dev->npinfo)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return __team_port_enable_netpoll(port);
|
|
|
|
}
|
|
|
|
|
2012-07-17 13:22:36 +08:00
|
|
|
static void team_port_disable_netpoll(struct team_port *port)
|
|
|
|
{
|
|
|
|
struct netpoll *np = port->np;
|
|
|
|
|
|
|
|
if (!np)
|
|
|
|
return;
|
|
|
|
port->np = NULL;
|
|
|
|
|
2018-10-18 23:18:26 +08:00
|
|
|
__netpoll_free(np);
|
2012-07-17 13:22:36 +08:00
|
|
|
}
|
|
|
|
#else
|
2018-04-24 14:33:37 +08:00
|
|
|
static int team_port_enable_netpoll(struct team_port *port)
|
2012-07-17 13:22:36 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static void team_port_disable_netpoll(struct team_port *port)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-02-27 23:38:08 +08:00
|
|
|
static int team_upper_dev_link(struct team *team, struct team_port *port,
|
|
|
|
struct netlink_ext_ack *extack)
|
2014-08-26 03:38:27 +08:00
|
|
|
{
|
2015-12-03 19:12:13 +08:00
|
|
|
struct netdev_lag_upper_info lag_upper_info;
|
2014-08-26 03:38:27 +08:00
|
|
|
int err;
|
|
|
|
|
2015-12-03 19:12:13 +08:00
|
|
|
lag_upper_info.tx_type = team->mode->lag_tx_type;
|
2018-05-24 10:22:52 +08:00
|
|
|
lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN;
|
2015-12-03 19:12:13 +08:00
|
|
|
err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
|
2018-02-27 23:38:08 +08:00
|
|
|
&lag_upper_info, extack);
|
2014-08-26 03:38:27 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
2015-12-03 19:12:13 +08:00
|
|
|
port->dev->priv_flags |= IFF_TEAM_PORT;
|
2014-08-26 03:38:27 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-03 19:12:13 +08:00
|
|
|
static void team_upper_dev_unlink(struct team *team, struct team_port *port)
|
2014-08-26 03:38:27 +08:00
|
|
|
{
|
2015-12-03 19:12:13 +08:00
|
|
|
netdev_upper_dev_unlink(port->dev, team->dev);
|
|
|
|
port->dev->priv_flags &= ~IFF_TEAM_PORT;
|
2014-08-26 03:38:27 +08:00
|
|
|
}
|
|
|
|
|
2012-09-21 09:51:59 +08:00
|
|
|
static void __team_port_change_port_added(struct team_port *port, bool linkup);
|
2012-08-17 12:00:48 +08:00
|
|
|
static int team_dev_type_check_change(struct net_device *dev,
|
|
|
|
struct net_device *port_dev);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2018-02-27 23:38:08 +08:00
|
|
|
static int team_port_add(struct team *team, struct net_device *port_dev,
|
|
|
|
struct netlink_ext_ack *extack)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
struct net_device *dev = team->dev;
|
|
|
|
struct team_port *port;
|
|
|
|
char *portname = port_dev->name;
|
|
|
|
int err;
|
|
|
|
|
2012-08-17 12:00:48 +08:00
|
|
|
if (port_dev->flags & IFF_LOOPBACK) {
|
2018-02-27 23:38:08 +08:00
|
|
|
NL_SET_ERR_MSG(extack, "Loopback device can't be added as a team port");
|
2012-08-17 12:00:48 +08:00
|
|
|
netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
|
2011-11-12 06:16:48 +08:00
|
|
|
portname);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (team_port_exists(port_dev)) {
|
2018-02-27 23:38:08 +08:00
|
|
|
NL_SET_ERR_MSG(extack, "Device is already a port of a team device");
|
2011-11-12 06:16:48 +08:00
|
|
|
netdev_err(dev, "Device %s is already a port "
|
|
|
|
"of a team device\n", portname);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2018-10-01 17:21:59 +08:00
|
|
|
if (dev == port_dev) {
|
|
|
|
NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
|
|
|
|
netdev_err(dev, "Cannot enslave team device to itself\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2012-08-23 11:26:53 +08:00
|
|
|
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
|
|
|
|
vlan_uses_dev(dev)) {
|
2018-02-27 23:38:08 +08:00
|
|
|
NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
|
2012-08-23 11:26:53 +08:00
|
|
|
netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
|
|
|
|
portname);
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
2012-08-17 12:00:48 +08:00
|
|
|
err = team_dev_type_check_change(dev, port_dev);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
if (port_dev->flags & IFF_UP) {
|
2018-02-27 23:38:08 +08:00
|
|
|
NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
|
2011-11-12 06:16:48 +08:00
|
|
|
netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
|
|
|
|
portname);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2012-06-19 13:54:05 +08:00
|
|
|
port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
|
|
|
|
GFP_KERNEL);
|
2011-11-12 06:16:48 +08:00
|
|
|
if (!port)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
port->dev = port_dev;
|
|
|
|
port->team = team;
|
2012-07-27 14:28:55 +08:00
|
|
|
INIT_LIST_HEAD(&port->qom_list);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
port->orig.mtu = port_dev->mtu;
|
|
|
|
err = dev_set_mtu(port_dev, dev->mtu);
|
|
|
|
if (err) {
|
|
|
|
netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
|
|
|
|
goto err_set_mtu;
|
|
|
|
}
|
|
|
|
|
2012-08-17 12:00:48 +08:00
|
|
|
memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
err = team_port_enter(team, port);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(dev, "Device %s failed to enter team mode\n",
|
|
|
|
portname);
|
|
|
|
goto err_port_enter;
|
|
|
|
}
|
|
|
|
|
2018-12-07 01:05:36 +08:00
|
|
|
err = dev_open(port_dev, extack);
|
2011-11-12 06:16:48 +08:00
|
|
|
if (err) {
|
|
|
|
netdev_dbg(dev, "Device %s opening failed\n",
|
|
|
|
portname);
|
|
|
|
goto err_dev_open;
|
|
|
|
}
|
|
|
|
|
2011-12-08 12:11:20 +08:00
|
|
|
err = vlan_vids_add_by_dev(port_dev, dev);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(dev, "Failed to add vlan ids to device %s\n",
|
|
|
|
portname);
|
|
|
|
goto err_vids_add;
|
|
|
|
}
|
|
|
|
|
2018-04-24 14:33:37 +08:00
|
|
|
err = team_port_enable_netpoll(port);
|
2013-07-25 02:52:44 +08:00
|
|
|
if (err) {
|
|
|
|
netdev_err(dev, "Failed to enable netpoll on device %s\n",
|
|
|
|
portname);
|
|
|
|
goto err_enable_netpoll;
|
2012-07-17 13:22:36 +08:00
|
|
|
}
|
|
|
|
|
2014-11-13 14:54:50 +08:00
|
|
|
if (!(dev->features & NETIF_F_LRO))
|
|
|
|
dev_disable_lro(port_dev);
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
err = netdev_rx_handler_register(port_dev, team_handle_frame,
|
|
|
|
port);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(dev, "Device %s failed to register rx_handler\n",
|
|
|
|
portname);
|
|
|
|
goto err_handler_register;
|
|
|
|
}
|
|
|
|
|
2018-02-27 23:38:08 +08:00
|
|
|
err = team_upper_dev_link(team, port, extack);
|
2014-08-26 03:38:27 +08:00
|
|
|
if (err) {
|
|
|
|
netdev_err(dev, "Device %s failed to set upper link\n",
|
|
|
|
portname);
|
|
|
|
goto err_set_upper_link;
|
|
|
|
}
|
|
|
|
|
2012-06-19 13:54:19 +08:00
|
|
|
err = __team_option_inst_add_port(team, port);
|
2012-04-10 13:15:42 +08:00
|
|
|
if (err) {
|
|
|
|
netdev_err(dev, "Device %s failed to add per-port options\n",
|
|
|
|
portname);
|
|
|
|
goto err_option_port_add;
|
|
|
|
}
|
|
|
|
|
2018-03-26 01:25:06 +08:00
|
|
|
netif_addr_lock_bh(dev);
|
|
|
|
dev_uc_sync_multiple(port_dev, dev);
|
|
|
|
dev_mc_sync_multiple(port_dev, dev);
|
|
|
|
netif_addr_unlock_bh(dev);
|
|
|
|
|
2012-04-20 12:42:05 +08:00
|
|
|
port->index = -1;
|
|
|
|
list_add_tail_rcu(&port->list, &team->port_list);
|
2013-06-08 21:00:54 +08:00
|
|
|
team_port_enable(team, port);
|
2011-11-12 06:16:48 +08:00
|
|
|
__team_compute_features(team);
|
2012-09-21 09:51:59 +08:00
|
|
|
__team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
|
2012-06-19 13:54:19 +08:00
|
|
|
__team_options_change_check(team);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
netdev_info(dev, "Port device %s added\n", portname);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2012-04-10 13:15:42 +08:00
|
|
|
err_option_port_add:
|
2015-12-03 19:12:13 +08:00
|
|
|
team_upper_dev_unlink(team, port);
|
2014-08-26 03:38:27 +08:00
|
|
|
|
|
|
|
err_set_upper_link:
|
2012-04-10 13:15:42 +08:00
|
|
|
netdev_rx_handler_unregister(port_dev);
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
err_handler_register:
|
2012-07-17 13:22:36 +08:00
|
|
|
team_port_disable_netpoll(port);
|
|
|
|
|
|
|
|
err_enable_netpoll:
|
2011-12-08 12:11:20 +08:00
|
|
|
vlan_vids_del_by_dev(port_dev, dev);
|
|
|
|
|
|
|
|
err_vids_add:
|
2011-11-12 06:16:48 +08:00
|
|
|
dev_close(port_dev);
|
|
|
|
|
|
|
|
err_dev_open:
|
|
|
|
team_port_leave(team, port);
|
2012-08-17 12:00:48 +08:00
|
|
|
team_port_set_orig_dev_addr(port);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
err_port_enter:
|
|
|
|
dev_set_mtu(port_dev, port->orig.mtu);
|
|
|
|
|
|
|
|
err_set_mtu:
|
|
|
|
kfree(port);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2012-09-21 09:51:59 +08:00
|
|
|
static void __team_port_change_port_removed(struct team_port *port);
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
static int team_port_del(struct team *team, struct net_device *port_dev)
|
|
|
|
{
|
|
|
|
struct net_device *dev = team->dev;
|
|
|
|
struct team_port *port;
|
|
|
|
char *portname = port_dev->name;
|
|
|
|
|
|
|
|
port = team_port_get_rtnl(port_dev);
|
|
|
|
if (!port || !team_port_find(team, port)) {
|
|
|
|
netdev_err(dev, "Device %s does not act as a port of this team\n",
|
|
|
|
portname);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2012-04-20 12:42:05 +08:00
|
|
|
team_port_disable(team, port);
|
|
|
|
list_del_rcu(&port->list);
|
2015-12-03 19:12:13 +08:00
|
|
|
team_upper_dev_unlink(team, port);
|
2011-11-12 06:16:48 +08:00
|
|
|
netdev_rx_handler_unregister(port_dev);
|
2012-07-17 13:22:36 +08:00
|
|
|
team_port_disable_netpoll(port);
|
2011-12-08 12:11:20 +08:00
|
|
|
vlan_vids_del_by_dev(port_dev, dev);
|
2013-03-07 15:59:25 +08:00
|
|
|
dev_uc_unsync(port_dev, dev);
|
|
|
|
dev_mc_unsync(port_dev, dev);
|
2011-11-12 06:16:48 +08:00
|
|
|
dev_close(port_dev);
|
|
|
|
team_port_leave(team, port);
|
2013-02-01 16:17:25 +08:00
|
|
|
|
|
|
|
__team_option_inst_mark_removed_port(team, port);
|
|
|
|
__team_options_change_check(team);
|
|
|
|
__team_option_inst_del_port(team, port);
|
|
|
|
__team_port_change_port_removed(port);
|
|
|
|
|
2012-08-17 12:00:48 +08:00
|
|
|
team_port_set_orig_dev_addr(port);
|
2011-11-12 06:16:48 +08:00
|
|
|
dev_set_mtu(port_dev, port->orig.mtu);
|
2013-06-10 23:42:24 +08:00
|
|
|
kfree_rcu(port, rcu);
|
2011-11-12 06:16:48 +08:00
|
|
|
netdev_info(dev, "Port device %s removed\n", portname);
|
|
|
|
__team_compute_features(team);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*****************
|
|
|
|
* Net device ops
|
|
|
|
*****************/
|
|
|
|
|
2012-04-10 13:15:42 +08:00
|
|
|
static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
2012-06-19 13:54:04 +08:00
|
|
|
ctx->data.str_val = team->mode->kind;
|
2011-11-12 06:16:48 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-04-10 13:15:42 +08:00
|
|
|
static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
2012-04-10 13:15:42 +08:00
|
|
|
return team_change_mode(team, ctx->data.str_val);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
2013-07-20 18:13:52 +08:00
|
|
|
static int team_notify_peers_count_get(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
|
|
|
ctx->data.u32_val = team->notify_peers.count;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_notify_peers_count_set(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
|
|
|
team->notify_peers.count = ctx->data.u32_val;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_notify_peers_interval_get(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
|
|
|
ctx->data.u32_val = team->notify_peers.interval;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_notify_peers_interval_set(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
|
|
|
team->notify_peers.interval = ctx->data.u32_val;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-07-20 18:13:54 +08:00
|
|
|
static int team_mcast_rejoin_count_get(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
|
|
|
ctx->data.u32_val = team->mcast_rejoin.count;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_mcast_rejoin_count_set(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
|
|
|
team->mcast_rejoin.count = ctx->data.u32_val;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_mcast_rejoin_interval_get(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
|
|
|
ctx->data.u32_val = team->mcast_rejoin.interval;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_mcast_rejoin_interval_set(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
|
|
|
team->mcast_rejoin.interval = ctx->data.u32_val;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-04-20 12:42:06 +08:00
|
|
|
static int team_port_en_option_get(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
2012-06-19 13:54:10 +08:00
|
|
|
struct team_port *port = ctx->info->port;
|
|
|
|
|
|
|
|
ctx->data.bool_val = team_port_enabled(port);
|
2012-04-20 12:42:06 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_port_en_option_set(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
2012-06-19 13:54:10 +08:00
|
|
|
struct team_port *port = ctx->info->port;
|
|
|
|
|
2012-04-20 12:42:06 +08:00
|
|
|
if (ctx->data.bool_val)
|
2012-06-19 13:54:10 +08:00
|
|
|
team_port_enable(team, port);
|
2012-04-20 12:42:06 +08:00
|
|
|
else
|
2012-06-19 13:54:10 +08:00
|
|
|
team_port_disable(team, port);
|
2012-04-20 12:42:06 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-04-10 13:15:44 +08:00
|
|
|
static int team_user_linkup_option_get(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
2012-06-19 13:54:10 +08:00
|
|
|
struct team_port *port = ctx->info->port;
|
|
|
|
|
|
|
|
ctx->data.bool_val = port->user.linkup;
|
2012-04-10 13:15:44 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-29 01:01:38 +08:00
|
|
|
static void __team_carrier_check(struct team *team);
|
|
|
|
|
2012-04-10 13:15:44 +08:00
|
|
|
static int team_user_linkup_option_set(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
2012-06-19 13:54:10 +08:00
|
|
|
struct team_port *port = ctx->info->port;
|
|
|
|
|
|
|
|
port->user.linkup = ctx->data.bool_val;
|
|
|
|
team_refresh_port_linkup(port);
|
2013-11-29 01:01:38 +08:00
|
|
|
__team_carrier_check(port->team);
|
2012-04-10 13:15:44 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_user_linkup_en_option_get(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
2012-06-19 13:54:10 +08:00
|
|
|
struct team_port *port = ctx->info->port;
|
2012-04-10 13:15:44 +08:00
|
|
|
|
|
|
|
ctx->data.bool_val = port->user.linkup_enabled;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_user_linkup_en_option_set(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
2012-06-19 13:54:10 +08:00
|
|
|
struct team_port *port = ctx->info->port;
|
2012-04-10 13:15:44 +08:00
|
|
|
|
|
|
|
port->user.linkup_enabled = ctx->data.bool_val;
|
2012-06-19 13:54:10 +08:00
|
|
|
team_refresh_port_linkup(port);
|
2013-11-29 01:01:38 +08:00
|
|
|
__team_carrier_check(port->team);
|
2012-04-10 13:15:44 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-07-27 14:28:54 +08:00
|
|
|
static int team_priority_option_get(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct team_port *port = ctx->info->port;
|
|
|
|
|
|
|
|
ctx->data.s32_val = port->priority;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_priority_option_set(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct team_port *port = ctx->info->port;
|
2013-06-10 23:42:23 +08:00
|
|
|
s32 priority = ctx->data.s32_val;
|
2012-07-27 14:28:54 +08:00
|
|
|
|
2013-06-10 23:42:23 +08:00
|
|
|
if (port->priority == priority)
|
|
|
|
return 0;
|
|
|
|
port->priority = priority;
|
|
|
|
team_queue_override_port_prio_changed(team, port);
|
2012-07-27 14:28:55 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_queue_id_option_get(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct team_port *port = ctx->info->port;
|
|
|
|
|
|
|
|
ctx->data.u32_val = port->queue_id;
|
2012-07-27 14:28:54 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-07-27 14:28:55 +08:00
|
|
|
static int team_queue_id_option_set(struct team *team,
|
|
|
|
struct team_gsetter_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct team_port *port = ctx->info->port;
|
2013-06-10 23:42:23 +08:00
|
|
|
u16 new_queue_id = ctx->data.u32_val;
|
2012-07-27 14:28:55 +08:00
|
|
|
|
2013-06-10 23:42:23 +08:00
|
|
|
if (port->queue_id == new_queue_id)
|
2012-07-27 14:28:55 +08:00
|
|
|
return 0;
|
2013-06-10 23:42:23 +08:00
|
|
|
if (new_queue_id >= team->dev->real_num_tx_queues)
|
2012-07-27 14:28:55 +08:00
|
|
|
return -EINVAL;
|
2013-06-10 23:42:23 +08:00
|
|
|
team_queue_override_port_change_queue_id(team, port, new_queue_id);
|
2012-07-27 14:28:55 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-16 19:09:09 +08:00
|
|
|
static const struct team_option team_options[] = {
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
.name = "mode",
|
|
|
|
.type = TEAM_OPTION_TYPE_STRING,
|
|
|
|
.getter = team_mode_option_get,
|
|
|
|
.setter = team_mode_option_set,
|
|
|
|
},
|
2013-07-20 18:13:52 +08:00
|
|
|
{
|
|
|
|
.name = "notify_peers_count",
|
|
|
|
.type = TEAM_OPTION_TYPE_U32,
|
|
|
|
.getter = team_notify_peers_count_get,
|
|
|
|
.setter = team_notify_peers_count_set,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "notify_peers_interval",
|
|
|
|
.type = TEAM_OPTION_TYPE_U32,
|
|
|
|
.getter = team_notify_peers_interval_get,
|
|
|
|
.setter = team_notify_peers_interval_set,
|
|
|
|
},
|
2013-07-20 18:13:54 +08:00
|
|
|
{
|
|
|
|
.name = "mcast_rejoin_count",
|
|
|
|
.type = TEAM_OPTION_TYPE_U32,
|
|
|
|
.getter = team_mcast_rejoin_count_get,
|
|
|
|
.setter = team_mcast_rejoin_count_set,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "mcast_rejoin_interval",
|
|
|
|
.type = TEAM_OPTION_TYPE_U32,
|
|
|
|
.getter = team_mcast_rejoin_interval_get,
|
|
|
|
.setter = team_mcast_rejoin_interval_set,
|
|
|
|
},
|
2012-04-20 12:42:06 +08:00
|
|
|
{
|
|
|
|
.name = "enabled",
|
|
|
|
.type = TEAM_OPTION_TYPE_BOOL,
|
|
|
|
.per_port = true,
|
|
|
|
.getter = team_port_en_option_get,
|
|
|
|
.setter = team_port_en_option_set,
|
|
|
|
},
|
2012-04-10 13:15:44 +08:00
|
|
|
{
|
|
|
|
.name = "user_linkup",
|
|
|
|
.type = TEAM_OPTION_TYPE_BOOL,
|
|
|
|
.per_port = true,
|
|
|
|
.getter = team_user_linkup_option_get,
|
|
|
|
.setter = team_user_linkup_option_set,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "user_linkup_enabled",
|
|
|
|
.type = TEAM_OPTION_TYPE_BOOL,
|
|
|
|
.per_port = true,
|
|
|
|
.getter = team_user_linkup_en_option_get,
|
|
|
|
.setter = team_user_linkup_en_option_set,
|
|
|
|
},
|
2012-07-27 14:28:54 +08:00
|
|
|
{
|
|
|
|
.name = "priority",
|
|
|
|
.type = TEAM_OPTION_TYPE_S32,
|
|
|
|
.per_port = true,
|
|
|
|
.getter = team_priority_option_get,
|
|
|
|
.setter = team_priority_option_set,
|
|
|
|
},
|
2012-07-27 14:28:55 +08:00
|
|
|
{
|
|
|
|
.name = "queue_id",
|
|
|
|
.type = TEAM_OPTION_TYPE_U32,
|
|
|
|
.per_port = true,
|
|
|
|
.getter = team_queue_id_option_get,
|
|
|
|
.setter = team_queue_id_option_set,
|
|
|
|
},
|
2011-11-12 06:16:48 +08:00
|
|
|
};
|
|
|
|
|
2012-07-20 10:28:51 +08:00
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
static int team_init(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
int i;
|
2011-11-16 19:09:09 +08:00
|
|
|
int err;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
team->dev = dev;
|
2011-11-16 19:09:08 +08:00
|
|
|
mutex_init(&team->lock);
|
2012-06-19 13:54:04 +08:00
|
|
|
team_set_no_mode(team);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2014-02-14 03:46:28 +08:00
|
|
|
team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
|
2011-11-12 06:16:48 +08:00
|
|
|
if (!team->pcpu_stats)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
|
2012-04-20 12:42:05 +08:00
|
|
|
INIT_HLIST_HEAD(&team->en_port_hlist[i]);
|
2011-11-12 06:16:48 +08:00
|
|
|
INIT_LIST_HEAD(&team->port_list);
|
2012-07-27 14:28:55 +08:00
|
|
|
err = team_queue_override_init(team);
|
|
|
|
if (err)
|
|
|
|
goto err_team_queue_override_init;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
team_adjust_ops(team);
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&team->option_list);
|
2012-04-10 13:15:42 +08:00
|
|
|
INIT_LIST_HEAD(&team->option_inst_list);
|
2013-07-20 18:13:52 +08:00
|
|
|
|
|
|
|
team_notify_peers_init(team);
|
2013-07-20 18:13:54 +08:00
|
|
|
team_mcast_rejoin_init(team);
|
2013-07-20 18:13:52 +08:00
|
|
|
|
2011-11-16 19:09:09 +08:00
|
|
|
err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
|
|
|
|
if (err)
|
|
|
|
goto err_options_register;
|
2011-11-12 06:16:48 +08:00
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
2016-06-09 22:45:12 +08:00
|
|
|
netdev_lockdep_set_classes(dev);
|
2012-07-20 10:28:51 +08:00
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
return 0;
|
2011-11-16 19:09:09 +08:00
|
|
|
|
|
|
|
err_options_register:
|
2013-07-20 18:13:54 +08:00
|
|
|
team_mcast_rejoin_fini(team);
|
2013-07-20 18:13:52 +08:00
|
|
|
team_notify_peers_fini(team);
|
2012-07-27 14:28:55 +08:00
|
|
|
team_queue_override_fini(team);
|
|
|
|
err_team_queue_override_init:
|
2011-11-16 19:09:09 +08:00
|
|
|
free_percpu(team->pcpu_stats);
|
|
|
|
|
|
|
|
return err;
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void team_uninit(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
struct team_port *port;
|
|
|
|
struct team_port *tmp;
|
|
|
|
|
2011-11-16 19:09:08 +08:00
|
|
|
mutex_lock(&team->lock);
|
2011-11-12 06:16:48 +08:00
|
|
|
list_for_each_entry_safe(port, tmp, &team->port_list, list)
|
|
|
|
team_port_del(team, port->dev);
|
|
|
|
|
|
|
|
__team_change_mode(team, NULL); /* cleanup */
|
|
|
|
__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
|
2013-07-20 18:13:54 +08:00
|
|
|
team_mcast_rejoin_fini(team);
|
2013-07-20 18:13:52 +08:00
|
|
|
team_notify_peers_fini(team);
|
2012-07-27 14:28:55 +08:00
|
|
|
team_queue_override_fini(team);
|
2011-11-16 19:09:08 +08:00
|
|
|
mutex_unlock(&team->lock);
|
2017-04-06 13:41:28 +08:00
|
|
|
netdev_change_features(dev);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void team_destructor(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
|
|
|
|
free_percpu(team->pcpu_stats);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_close(struct net_device *dev)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* note: already called with rcu_read_lock
|
|
|
|
*/
|
|
|
|
static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct team *team = netdev_priv(dev);
|
2012-07-27 14:28:55 +08:00
|
|
|
bool tx_success;
|
2011-11-12 06:16:48 +08:00
|
|
|
unsigned int len = skb->len;
|
|
|
|
|
2012-07-27 14:28:55 +08:00
|
|
|
tx_success = team_queue_override_transmit(team, skb);
|
|
|
|
if (!tx_success)
|
|
|
|
tx_success = team->ops.transmit(team, skb);
|
2011-11-12 06:16:48 +08:00
|
|
|
if (tx_success) {
|
|
|
|
struct team_pcpu_stats *pcpu_stats;
|
|
|
|
|
|
|
|
pcpu_stats = this_cpu_ptr(team->pcpu_stats);
|
|
|
|
u64_stats_update_begin(&pcpu_stats->syncp);
|
|
|
|
pcpu_stats->tx_packets++;
|
|
|
|
pcpu_stats->tx_bytes += len;
|
|
|
|
u64_stats_update_end(&pcpu_stats->syncp);
|
|
|
|
} else {
|
|
|
|
this_cpu_inc(team->pcpu_stats->tx_dropped);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
2014-01-10 16:18:26 +08:00
|
|
|
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
|
2018-07-10 00:19:59 +08:00
|
|
|
struct net_device *sb_dev,
|
|
|
|
select_queue_fallback_t fallback)
|
2012-07-20 10:28:51 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* This helper function exists to help dev_pick_tx get the correct
|
|
|
|
* destination queue. Using a helper function skips a call to
|
|
|
|
* skb_tx_hash and will put the skbs in the queue we expect on their
|
|
|
|
* way down to the team driver.
|
|
|
|
*/
|
|
|
|
u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save the original txq to restore before passing to the driver
|
|
|
|
*/
|
|
|
|
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
|
|
|
|
|
|
|
|
if (unlikely(txq >= dev->real_num_tx_queues)) {
|
|
|
|
do {
|
|
|
|
txq -= dev->real_num_tx_queues;
|
|
|
|
} while (txq >= dev->real_num_tx_queues);
|
|
|
|
}
|
|
|
|
return txq;
|
|
|
|
}
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
static void team_change_rx_flags(struct net_device *dev, int change)
|
|
|
|
{
|
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
struct team_port *port;
|
|
|
|
int inc;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(port, &team->port_list, list) {
|
|
|
|
if (change & IFF_PROMISC) {
|
|
|
|
inc = dev->flags & IFF_PROMISC ? 1 : -1;
|
|
|
|
dev_set_promiscuity(port->dev, inc);
|
|
|
|
}
|
|
|
|
if (change & IFF_ALLMULTI) {
|
|
|
|
inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
|
|
|
|
dev_set_allmulti(port->dev, inc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void team_set_rx_mode(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
struct team_port *port;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(port, &team->port_list, list) {
|
2013-04-15 17:54:26 +08:00
|
|
|
dev_uc_sync_multiple(port->dev, dev);
|
|
|
|
dev_mc_sync_multiple(port->dev, dev);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_set_mac_address(struct net_device *dev, void *p)
|
|
|
|
{
|
2012-08-17 12:00:48 +08:00
|
|
|
struct sockaddr *addr = p;
|
2011-11-12 06:16:48 +08:00
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
struct team_port *port;
|
|
|
|
|
2012-08-17 12:00:48 +08:00
|
|
|
if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
|
2015-03-04 15:36:31 +08:00
|
|
|
mutex_lock(&team->lock);
|
|
|
|
list_for_each_entry(port, &team->port_list, list)
|
2012-08-17 12:00:48 +08:00
|
|
|
if (team->ops.port_change_dev_addr)
|
|
|
|
team->ops.port_change_dev_addr(team, port);
|
2015-03-04 15:36:31 +08:00
|
|
|
mutex_unlock(&team->lock);
|
2011-11-12 06:16:48 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_change_mtu(struct net_device *dev, int new_mtu)
|
|
|
|
{
|
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
struct team_port *port;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Alhough this is reader, it's guarded by team lock. It's not possible
|
|
|
|
* to traverse list in reverse under rcu_read_lock
|
|
|
|
*/
|
2011-11-16 19:09:08 +08:00
|
|
|
mutex_lock(&team->lock);
|
2014-05-30 02:46:17 +08:00
|
|
|
team->port_mtu_change_allowed = true;
|
2011-11-12 06:16:48 +08:00
|
|
|
list_for_each_entry(port, &team->port_list, list) {
|
|
|
|
err = dev_set_mtu(port->dev, new_mtu);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(dev, "Device %s failed to change mtu",
|
|
|
|
port->dev->name);
|
|
|
|
goto unwind;
|
|
|
|
}
|
|
|
|
}
|
2014-05-30 02:46:17 +08:00
|
|
|
team->port_mtu_change_allowed = false;
|
2011-11-16 19:09:08 +08:00
|
|
|
mutex_unlock(&team->lock);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
dev->mtu = new_mtu;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
unwind:
|
|
|
|
list_for_each_entry_continue_reverse(port, &team->port_list, list)
|
|
|
|
dev_set_mtu(port->dev, dev->mtu);
|
2014-05-30 02:46:17 +08:00
|
|
|
team->port_mtu_change_allowed = false;
|
2011-11-16 19:09:08 +08:00
|
|
|
mutex_unlock(&team->lock);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-01-07 11:12:52 +08:00
|
|
|
static void
|
2011-11-12 06:16:48 +08:00
|
|
|
team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
|
|
|
{
|
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
struct team_pcpu_stats *p;
|
|
|
|
u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
|
2016-02-02 07:51:06 +08:00
|
|
|
u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
|
2011-11-12 06:16:48 +08:00
|
|
|
unsigned int start;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_possible_cpu(i) {
|
|
|
|
p = per_cpu_ptr(team->pcpu_stats, i);
|
|
|
|
do {
|
2014-03-14 12:26:42 +08:00
|
|
|
start = u64_stats_fetch_begin_irq(&p->syncp);
|
2011-11-12 06:16:48 +08:00
|
|
|
rx_packets = p->rx_packets;
|
|
|
|
rx_bytes = p->rx_bytes;
|
|
|
|
rx_multicast = p->rx_multicast;
|
|
|
|
tx_packets = p->tx_packets;
|
|
|
|
tx_bytes = p->tx_bytes;
|
2014-03-14 12:26:42 +08:00
|
|
|
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
stats->rx_packets += rx_packets;
|
|
|
|
stats->rx_bytes += rx_bytes;
|
|
|
|
stats->multicast += rx_multicast;
|
|
|
|
stats->tx_packets += tx_packets;
|
|
|
|
stats->tx_bytes += tx_bytes;
|
|
|
|
/*
|
2016-02-02 07:51:06 +08:00
|
|
|
* rx_dropped, tx_dropped & rx_nohandler are u32,
|
|
|
|
* updated without syncp protection.
|
2011-11-12 06:16:48 +08:00
|
|
|
*/
|
|
|
|
rx_dropped += p->rx_dropped;
|
|
|
|
tx_dropped += p->tx_dropped;
|
2016-02-02 07:51:06 +08:00
|
|
|
rx_nohandler += p->rx_nohandler;
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
stats->rx_dropped = rx_dropped;
|
|
|
|
stats->tx_dropped = tx_dropped;
|
2016-02-02 07:51:06 +08:00
|
|
|
stats->rx_nohandler = rx_nohandler;
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
2013-04-19 10:04:28 +08:00
|
|
|
static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
struct team_port *port;
|
2011-12-08 12:11:17 +08:00
|
|
|
int err;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2011-12-08 12:11:17 +08:00
|
|
|
/*
|
|
|
|
* Alhough this is reader, it's guarded by team lock. It's not possible
|
|
|
|
* to traverse list in reverse under rcu_read_lock
|
|
|
|
*/
|
|
|
|
mutex_lock(&team->lock);
|
|
|
|
list_for_each_entry(port, &team->port_list, list) {
|
2013-04-19 10:04:28 +08:00
|
|
|
err = vlan_vid_add(port->dev, proto, vid);
|
2011-12-08 12:11:17 +08:00
|
|
|
if (err)
|
|
|
|
goto unwind;
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
2011-12-08 12:11:17 +08:00
|
|
|
mutex_unlock(&team->lock);
|
2011-12-09 08:52:37 +08:00
|
|
|
|
|
|
|
return 0;
|
2011-12-08 12:11:17 +08:00
|
|
|
|
|
|
|
unwind:
|
|
|
|
list_for_each_entry_continue_reverse(port, &team->port_list, list)
|
2013-04-19 10:04:28 +08:00
|
|
|
vlan_vid_del(port->dev, proto, vid);
|
2011-12-08 12:11:17 +08:00
|
|
|
mutex_unlock(&team->lock);
|
|
|
|
|
|
|
|
return err;
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
2013-04-19 10:04:28 +08:00
|
|
|
static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
struct team_port *port;
|
|
|
|
|
2016-01-18 23:30:22 +08:00
|
|
|
mutex_lock(&team->lock);
|
|
|
|
list_for_each_entry(port, &team->port_list, list)
|
2013-04-19 10:04:28 +08:00
|
|
|
vlan_vid_del(port->dev, proto, vid);
|
2016-01-18 23:30:22 +08:00
|
|
|
mutex_unlock(&team->lock);
|
2011-12-09 08:52:37 +08:00
|
|
|
|
|
|
|
return 0;
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
2012-07-17 13:22:36 +08:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
static void team_poll_controller(struct net_device *dev)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __team_netpoll_cleanup(struct team *team)
|
|
|
|
{
|
|
|
|
struct team_port *port;
|
|
|
|
|
|
|
|
list_for_each_entry(port, &team->port_list, list)
|
|
|
|
team_port_disable_netpoll(port);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void team_netpoll_cleanup(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
|
|
|
|
mutex_lock(&team->lock);
|
|
|
|
__team_netpoll_cleanup(team);
|
|
|
|
mutex_unlock(&team->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_netpoll_setup(struct net_device *dev,
|
2014-03-28 06:36:38 +08:00
|
|
|
struct netpoll_info *npifo)
|
2012-07-17 13:22:36 +08:00
|
|
|
{
|
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
struct team_port *port;
|
2012-07-24 09:20:48 +08:00
|
|
|
int err = 0;
|
2012-07-17 13:22:36 +08:00
|
|
|
|
|
|
|
mutex_lock(&team->lock);
|
|
|
|
list_for_each_entry(port, &team->port_list, list) {
|
2018-04-24 14:33:37 +08:00
|
|
|
err = __team_port_enable_netpoll(port);
|
2012-07-17 13:22:36 +08:00
|
|
|
if (err) {
|
|
|
|
__team_netpoll_cleanup(team);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&team->lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-10-05 08:48:46 +08:00
|
|
|
static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
|
|
|
|
struct netlink_ext_ack *extack)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
int err;
|
|
|
|
|
2011-11-16 19:09:08 +08:00
|
|
|
mutex_lock(&team->lock);
|
2018-02-27 23:38:08 +08:00
|
|
|
err = team_port_add(team, port_dev, extack);
|
2011-11-16 19:09:08 +08:00
|
|
|
mutex_unlock(&team->lock);
|
2017-04-06 13:41:28 +08:00
|
|
|
|
|
|
|
if (!err)
|
|
|
|
netdev_change_features(dev);
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
|
|
|
|
{
|
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
int err;
|
|
|
|
|
2011-11-16 19:09:08 +08:00
|
|
|
mutex_lock(&team->lock);
|
2011-11-12 06:16:48 +08:00
|
|
|
err = team_port_del(team, port_dev);
|
2011-11-16 19:09:08 +08:00
|
|
|
mutex_unlock(&team->lock);
|
2017-04-06 13:41:28 +08:00
|
|
|
|
|
|
|
if (!err)
|
|
|
|
netdev_change_features(dev);
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-11-17 12:16:04 +08:00
|
|
|
static netdev_features_t team_fix_features(struct net_device *dev,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
|
|
|
struct team_port *port;
|
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
netdev_features_t mask;
|
|
|
|
|
2015-05-11 00:48:07 +08:00
|
|
|
mask = features;
|
2011-11-17 12:16:04 +08:00
|
|
|
features &= ~NETIF_F_ONE_FOR_ALL;
|
|
|
|
features |= NETIF_F_ALL_FOR_ALL;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(port, &team->port_list, list) {
|
|
|
|
features = netdev_increment_features(features,
|
|
|
|
port->dev->features,
|
|
|
|
mask);
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
2015-02-26 02:52:11 +08:00
|
|
|
|
|
|
|
features = netdev_add_tso_features(features, mask);
|
|
|
|
|
2011-11-17 12:16:04 +08:00
|
|
|
return features;
|
|
|
|
}
|
|
|
|
|
2012-12-29 23:31:01 +08:00
|
|
|
static int team_change_carrier(struct net_device *dev, bool new_carrier)
|
|
|
|
{
|
2013-02-05 17:30:55 +08:00
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
|
|
|
|
team->user_carrier_enabled = true;
|
|
|
|
|
2012-12-29 23:31:01 +08:00
|
|
|
if (new_carrier)
|
|
|
|
netif_carrier_on(dev);
|
|
|
|
else
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
static const struct net_device_ops team_netdev_ops = {
|
|
|
|
.ndo_init = team_init,
|
|
|
|
.ndo_uninit = team_uninit,
|
|
|
|
.ndo_open = team_open,
|
|
|
|
.ndo_stop = team_close,
|
|
|
|
.ndo_start_xmit = team_xmit,
|
2012-07-20 10:28:51 +08:00
|
|
|
.ndo_select_queue = team_select_queue,
|
2011-11-12 06:16:48 +08:00
|
|
|
.ndo_change_rx_flags = team_change_rx_flags,
|
|
|
|
.ndo_set_rx_mode = team_set_rx_mode,
|
|
|
|
.ndo_set_mac_address = team_set_mac_address,
|
|
|
|
.ndo_change_mtu = team_change_mtu,
|
|
|
|
.ndo_get_stats64 = team_get_stats64,
|
|
|
|
.ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
|
|
|
|
.ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
|
2012-07-17 13:22:36 +08:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
.ndo_poll_controller = team_poll_controller,
|
|
|
|
.ndo_netpoll_setup = team_netpoll_setup,
|
|
|
|
.ndo_netpoll_cleanup = team_netpoll_cleanup,
|
|
|
|
#endif
|
2011-11-12 06:16:48 +08:00
|
|
|
.ndo_add_slave = team_add_slave,
|
|
|
|
.ndo_del_slave = team_del_slave,
|
2011-11-17 12:16:04 +08:00
|
|
|
.ndo_fix_features = team_fix_features,
|
2012-12-29 23:31:01 +08:00
|
|
|
.ndo_change_carrier = team_change_carrier,
|
2015-03-27 13:31:15 +08:00
|
|
|
.ndo_features_check = passthru_features_check,
|
2011-11-12 06:16:48 +08:00
|
|
|
};
|
|
|
|
|
2012-12-30 00:37:33 +08:00
|
|
|
/***********************
|
|
|
|
* ethtool interface
|
|
|
|
***********************/
|
|
|
|
|
|
|
|
static void team_ethtool_get_drvinfo(struct net_device *dev,
|
|
|
|
struct ethtool_drvinfo *drvinfo)
|
|
|
|
{
|
2013-01-05 10:53:10 +08:00
|
|
|
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
|
|
|
|
strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
|
2012-12-30 00:37:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct ethtool_ops team_ethtool_ops = {
|
|
|
|
.get_drvinfo = team_ethtool_get_drvinfo,
|
|
|
|
.get_link = ethtool_op_get_link,
|
|
|
|
};
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
/***********************
|
|
|
|
* rt netlink interface
|
|
|
|
***********************/
|
|
|
|
|
2012-08-17 12:00:48 +08:00
|
|
|
static void team_setup_by_port(struct net_device *dev,
|
|
|
|
struct net_device *port_dev)
|
|
|
|
{
|
|
|
|
dev->header_ops = port_dev->header_ops;
|
|
|
|
dev->type = port_dev->type;
|
|
|
|
dev->hard_header_len = port_dev->hard_header_len;
|
|
|
|
dev->addr_len = port_dev->addr_len;
|
|
|
|
dev->mtu = port_dev->mtu;
|
|
|
|
memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
|
2013-08-31 00:08:48 +08:00
|
|
|
eth_hw_addr_inherit(dev, port_dev);
|
2012-08-17 12:00:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int team_dev_type_check_change(struct net_device *dev,
|
|
|
|
struct net_device *port_dev)
|
|
|
|
{
|
|
|
|
struct team *team = netdev_priv(dev);
|
|
|
|
char *portname = port_dev->name;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (dev->type == port_dev->type)
|
|
|
|
return 0;
|
|
|
|
if (!list_empty(&team->port_list)) {
|
|
|
|
netdev_err(dev, "Device %s is of different type\n", portname);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
|
|
|
|
err = notifier_to_errno(err);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(dev, "Refused to change device type\n");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
dev_uc_flush(dev);
|
|
|
|
dev_mc_flush(dev);
|
|
|
|
team_setup_by_port(dev, port_dev);
|
|
|
|
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
static void team_setup(struct net_device *dev)
|
|
|
|
{
|
|
|
|
ether_setup(dev);
|
2017-03-06 21:48:58 +08:00
|
|
|
dev->max_mtu = ETH_MAX_MTU;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
dev->netdev_ops = &team_netdev_ops;
|
2012-12-30 00:37:33 +08:00
|
|
|
dev->ethtool_ops = &team_ethtool_ops;
|
net: Fix inconsistent teardown and release of private netdev state.
Network devices can allocate reasources and private memory using
netdev_ops->ndo_init(). However, the release of these resources
can occur in one of two different places.
Either netdev_ops->ndo_uninit() or netdev->destructor().
The decision of which operation frees the resources depends upon
whether it is necessary for all netdev refs to be released before it
is safe to perform the freeing.
netdev_ops->ndo_uninit() presumably can occur right after the
NETDEV_UNREGISTER notifier completes and the unicast and multicast
address lists are flushed.
netdev->destructor(), on the other hand, does not run until the
netdev references all go away.
Further complicating the situation is that netdev->destructor()
almost universally does also a free_netdev().
This creates a problem for the logic in register_netdevice().
Because all callers of register_netdevice() manage the freeing
of the netdev, and invoke free_netdev(dev) if register_netdevice()
fails.
If netdev_ops->ndo_init() succeeds, but something else fails inside
of register_netdevice(), it does call ndo_ops->ndo_uninit(). But
it is not able to invoke netdev->destructor().
This is because netdev->destructor() will do a free_netdev() and
then the caller of register_netdevice() will do the same.
However, this means that the resources that would normally be released
by netdev->destructor() will not be.
Over the years drivers have added local hacks to deal with this, by
invoking their destructor parts by hand when register_netdevice()
fails.
Many drivers do not try to deal with this, and instead we have leaks.
Let's close this hole by formalizing the distinction between what
private things need to be freed up by netdev->destructor() and whether
the driver needs unregister_netdevice() to perform the free_netdev().
netdev->priv_destructor() performs all actions to free up the private
resources that used to be freed by netdev->destructor(), except for
free_netdev().
netdev->needs_free_netdev is a boolean that indicates whether
free_netdev() should be done at the end of unregister_netdevice().
Now, register_netdevice() can sanely release all resources after
ndo_ops->ndo_init() succeeds, by invoking both ndo_ops->ndo_uninit()
and netdev->priv_destructor().
And at the end of unregister_netdevice(), we invoke
netdev->priv_destructor() and optionally call free_netdev().
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-09 00:52:56 +08:00
|
|
|
dev->needs_free_netdev = true;
|
|
|
|
dev->priv_destructor = team_destructor;
|
2011-11-12 06:16:48 +08:00
|
|
|
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
|
2015-08-18 16:30:34 +08:00
|
|
|
dev->priv_flags |= IFF_NO_QUEUE;
|
2015-12-03 19:12:06 +08:00
|
|
|
dev->priv_flags |= IFF_TEAM;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Indicate we support unicast address filtering. That way core won't
|
|
|
|
* bring us to promisc mode in case a unicast addr is added.
|
|
|
|
* Let this up to underlay drivers.
|
|
|
|
*/
|
2012-06-29 13:10:07 +08:00
|
|
|
dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
dev->features |= NETIF_F_LLTX;
|
|
|
|
dev->features |= NETIF_F_GRO;
|
2014-01-23 11:07:22 +08:00
|
|
|
|
|
|
|
/* Don't allow team devices to change network namespaces. */
|
|
|
|
dev->features |= NETIF_F_NETNS_LOCAL;
|
|
|
|
|
2012-11-28 14:13:10 +08:00
|
|
|
dev->hw_features = TEAM_VLAN_FEATURES |
|
2013-04-19 10:04:27 +08:00
|
|
|
NETIF_F_HW_VLAN_CTAG_TX |
|
|
|
|
NETIF_F_HW_VLAN_CTAG_RX |
|
|
|
|
NETIF_F_HW_VLAN_CTAG_FILTER;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2018-05-22 23:34:40 +08:00
|
|
|
dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
|
2011-11-12 06:16:48 +08:00
|
|
|
dev->features |= dev->hw_features;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_newlink(struct net *src_net, struct net_device *dev,
|
2017-06-26 05:55:59 +08:00
|
|
|
struct nlattr *tb[], struct nlattr *data[],
|
|
|
|
struct netlink_ext_ack *extack)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
if (tb[IFLA_ADDRESS] == NULL)
|
2012-02-15 14:45:40 +08:00
|
|
|
eth_hw_addr_random(dev);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2014-08-05 14:58:50 +08:00
|
|
|
return register_netdevice(dev);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
2017-06-26 05:56:01 +08:00
|
|
|
static int team_validate(struct nlattr *tb[], struct nlattr *data[],
|
|
|
|
struct netlink_ext_ack *extack)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
if (tb[IFLA_ADDRESS]) {
|
|
|
|
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
|
|
|
|
return -EINVAL;
|
|
|
|
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-07-20 10:28:51 +08:00
|
|
|
static unsigned int team_get_num_tx_queues(void)
|
|
|
|
{
|
|
|
|
return TEAM_DEFAULT_NUM_TX_QUEUES;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int team_get_num_rx_queues(void)
|
|
|
|
{
|
|
|
|
return TEAM_DEFAULT_NUM_RX_QUEUES;
|
|
|
|
}
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
static struct rtnl_link_ops team_link_ops __read_mostly = {
|
2012-07-20 10:28:51 +08:00
|
|
|
.kind = DRV_NAME,
|
|
|
|
.priv_size = sizeof(struct team),
|
|
|
|
.setup = team_setup,
|
|
|
|
.newlink = team_newlink,
|
|
|
|
.validate = team_validate,
|
|
|
|
.get_num_tx_queues = team_get_num_tx_queues,
|
|
|
|
.get_num_rx_queues = team_get_num_rx_queues,
|
2011-11-12 06:16:48 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************
|
|
|
|
* Generic netlink custom interface
|
|
|
|
***********************************/
|
|
|
|
|
2016-10-24 20:40:03 +08:00
|
|
|
static struct genl_family team_nl_family;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
|
|
|
|
[TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
|
|
|
|
[TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
|
|
|
|
[TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
|
|
|
|
[TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct nla_policy
|
|
|
|
team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
|
|
|
|
[TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
|
|
|
|
[TEAM_ATTR_OPTION_NAME] = {
|
|
|
|
.type = NLA_STRING,
|
|
|
|
.len = TEAM_STRING_MAX_LEN,
|
|
|
|
},
|
|
|
|
[TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
|
|
|
|
[TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
|
2012-04-04 20:16:26 +08:00
|
|
|
[TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
|
2011-11-12 06:16:48 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct sk_buff *msg;
|
|
|
|
void *hdr;
|
|
|
|
int err;
|
|
|
|
|
2012-06-28 11:57:45 +08:00
|
|
|
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
2011-11-12 06:16:48 +08:00
|
|
|
if (!msg)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2012-09-08 04:12:54 +08:00
|
|
|
hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
|
2011-11-12 06:16:48 +08:00
|
|
|
&team_nl_family, 0, TEAM_CMD_NOOP);
|
2012-09-25 02:29:35 +08:00
|
|
|
if (!hdr) {
|
|
|
|
err = -EMSGSIZE;
|
2011-11-12 06:16:48 +08:00
|
|
|
goto err_msg_put;
|
|
|
|
}
|
|
|
|
|
|
|
|
genlmsg_end(msg, hdr);
|
|
|
|
|
2012-09-08 04:12:54 +08:00
|
|
|
return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
err_msg_put:
|
|
|
|
nlmsg_free(msg);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Netlink cmd functions should be locked by following two functions.
|
2011-11-16 19:55:54 +08:00
|
|
|
* Since dev gets held here, that ensures dev won't disappear in between.
|
2011-11-12 06:16:48 +08:00
|
|
|
*/
|
|
|
|
static struct team *team_nl_team_get(struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct net *net = genl_info_net(info);
|
|
|
|
int ifindex;
|
|
|
|
struct net_device *dev;
|
|
|
|
struct team *team;
|
|
|
|
|
|
|
|
if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
|
2011-11-16 19:55:54 +08:00
|
|
|
dev = dev_get_by_index(net, ifindex);
|
2011-11-12 06:16:48 +08:00
|
|
|
if (!dev || dev->netdev_ops != &team_netdev_ops) {
|
2011-11-16 19:55:54 +08:00
|
|
|
if (dev)
|
|
|
|
dev_put(dev);
|
2011-11-12 06:16:48 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
team = netdev_priv(dev);
|
2011-11-16 19:09:08 +08:00
|
|
|
mutex_lock(&team->lock);
|
2011-11-12 06:16:48 +08:00
|
|
|
return team;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void team_nl_team_put(struct team *team)
|
|
|
|
{
|
2011-11-16 19:09:08 +08:00
|
|
|
mutex_unlock(&team->lock);
|
2011-11-16 19:55:54 +08:00
|
|
|
dev_put(team->dev);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
2012-06-19 13:54:18 +08:00
|
|
|
typedef int team_nl_send_func_t(struct sk_buff *skb,
|
2012-09-08 04:12:54 +08:00
|
|
|
struct team *team, u32 portid);
|
2012-06-19 13:54:18 +08:00
|
|
|
|
2012-09-08 04:12:54 +08:00
|
|
|
static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
|
2012-06-19 13:54:18 +08:00
|
|
|
{
|
2012-09-08 04:12:54 +08:00
|
|
|
return genlmsg_unicast(dev_net(team->dev), skb, portid);
|
2012-06-19 13:54:18 +08:00
|
|
|
}
|
|
|
|
|
2012-06-19 13:54:14 +08:00
|
|
|
static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
|
|
|
|
struct team_option_inst *opt_inst)
|
|
|
|
{
|
|
|
|
struct nlattr *option_item;
|
|
|
|
struct team_option *option = opt_inst->option;
|
2012-06-19 13:54:18 +08:00
|
|
|
struct team_option_inst_info *opt_inst_info = &opt_inst->info;
|
2012-06-19 13:54:14 +08:00
|
|
|
struct team_gsetter_ctx ctx;
|
|
|
|
int err;
|
|
|
|
|
2012-06-19 13:54:18 +08:00
|
|
|
ctx.info = opt_inst_info;
|
|
|
|
err = team_option_get(team, opt_inst, &ctx);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2012-06-19 13:54:14 +08:00
|
|
|
option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
|
|
|
|
if (!option_item)
|
2012-06-19 13:54:18 +08:00
|
|
|
return -EMSGSIZE;
|
2012-06-19 13:54:14 +08:00
|
|
|
|
2012-06-19 13:54:18 +08:00
|
|
|
if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
|
|
|
|
goto nest_cancel;
|
2012-06-19 13:54:14 +08:00
|
|
|
if (opt_inst_info->port &&
|
|
|
|
nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
|
|
|
|
opt_inst_info->port->dev->ifindex))
|
2012-06-19 13:54:18 +08:00
|
|
|
goto nest_cancel;
|
2012-06-19 13:54:14 +08:00
|
|
|
if (opt_inst->option->array_size &&
|
|
|
|
nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
|
|
|
|
opt_inst_info->array_index))
|
2012-06-19 13:54:18 +08:00
|
|
|
goto nest_cancel;
|
2012-06-19 13:54:14 +08:00
|
|
|
|
|
|
|
switch (option->type) {
|
|
|
|
case TEAM_OPTION_TYPE_U32:
|
|
|
|
if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
|
2012-06-19 13:54:18 +08:00
|
|
|
goto nest_cancel;
|
2012-06-19 13:54:14 +08:00
|
|
|
if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
|
2012-06-19 13:54:18 +08:00
|
|
|
goto nest_cancel;
|
2012-06-19 13:54:14 +08:00
|
|
|
break;
|
|
|
|
case TEAM_OPTION_TYPE_STRING:
|
|
|
|
if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
|
2012-06-19 13:54:18 +08:00
|
|
|
goto nest_cancel;
|
2012-06-19 13:54:14 +08:00
|
|
|
if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
|
|
|
|
ctx.data.str_val))
|
2012-06-19 13:54:18 +08:00
|
|
|
goto nest_cancel;
|
2012-06-19 13:54:14 +08:00
|
|
|
break;
|
|
|
|
case TEAM_OPTION_TYPE_BINARY:
|
|
|
|
if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
|
2012-06-19 13:54:18 +08:00
|
|
|
goto nest_cancel;
|
2012-06-19 13:54:14 +08:00
|
|
|
if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
|
|
|
|
ctx.data.bin_val.ptr))
|
2012-06-19 13:54:18 +08:00
|
|
|
goto nest_cancel;
|
2012-06-19 13:54:14 +08:00
|
|
|
break;
|
|
|
|
case TEAM_OPTION_TYPE_BOOL:
|
|
|
|
if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
|
2012-06-19 13:54:18 +08:00
|
|
|
goto nest_cancel;
|
2012-06-19 13:54:14 +08:00
|
|
|
if (ctx.data.bool_val &&
|
|
|
|
nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
|
2012-06-19 13:54:18 +08:00
|
|
|
goto nest_cancel;
|
2012-06-19 13:54:14 +08:00
|
|
|
break;
|
2012-07-27 14:28:53 +08:00
|
|
|
case TEAM_OPTION_TYPE_S32:
|
|
|
|
if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
|
|
|
|
goto nest_cancel;
|
|
|
|
if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
|
|
|
|
goto nest_cancel;
|
|
|
|
break;
|
2012-06-19 13:54:14 +08:00
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
2012-06-19 13:54:18 +08:00
|
|
|
if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
|
|
|
|
goto nest_cancel;
|
|
|
|
if (opt_inst->changed) {
|
|
|
|
if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
|
|
|
|
goto nest_cancel;
|
|
|
|
opt_inst->changed = false;
|
|
|
|
}
|
2012-06-19 13:54:14 +08:00
|
|
|
nla_nest_end(skb, option_item);
|
|
|
|
return 0;
|
|
|
|
|
2012-06-19 13:54:18 +08:00
|
|
|
nest_cancel:
|
|
|
|
nla_nest_cancel(skb, option_item);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __send_and_alloc_skb(struct sk_buff **pskb,
|
2012-09-08 04:12:54 +08:00
|
|
|
struct team *team, u32 portid,
|
2012-06-19 13:54:18 +08:00
|
|
|
team_nl_send_func_t *send_func)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (*pskb) {
|
2012-09-08 04:12:54 +08:00
|
|
|
err = send_func(*pskb, team, portid);
|
2012-06-19 13:54:18 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
2012-06-28 11:57:45 +08:00
|
|
|
*pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
2012-06-19 13:54:18 +08:00
|
|
|
if (!*pskb)
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
2012-06-19 13:54:14 +08:00
|
|
|
}
|
|
|
|
|
2012-09-08 04:12:54 +08:00
|
|
|
static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
|
2012-06-19 13:54:18 +08:00
|
|
|
int flags, team_nl_send_func_t *send_func,
|
2012-06-19 13:54:14 +08:00
|
|
|
struct list_head *sel_opt_inst_list)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
struct nlattr *option_list;
|
2012-06-19 13:54:18 +08:00
|
|
|
struct nlmsghdr *nlh;
|
2011-11-12 06:16:48 +08:00
|
|
|
void *hdr;
|
2012-04-10 13:15:42 +08:00
|
|
|
struct team_option_inst *opt_inst;
|
|
|
|
int err;
|
2012-06-19 13:54:18 +08:00
|
|
|
struct sk_buff *skb = NULL;
|
|
|
|
bool incomplete;
|
|
|
|
int i;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2012-06-19 13:54:18 +08:00
|
|
|
opt_inst = list_first_entry(sel_opt_inst_list,
|
|
|
|
struct team_option_inst, tmp_list);
|
|
|
|
|
|
|
|
start_again:
|
2012-09-08 04:12:54 +08:00
|
|
|
err = __send_and_alloc_skb(&skb, team, portid, send_func);
|
2012-06-19 13:54:18 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2012-09-08 04:12:54 +08:00
|
|
|
hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
|
2011-11-12 06:16:48 +08:00
|
|
|
TEAM_CMD_OPTIONS_GET);
|
2017-04-24 18:29:16 +08:00
|
|
|
if (!hdr) {
|
|
|
|
nlmsg_free(skb);
|
2012-09-25 02:29:35 +08:00
|
|
|
return -EMSGSIZE;
|
2017-04-24 18:29:16 +08:00
|
|
|
}
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2012-04-02 08:25:18 +08:00
|
|
|
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
|
|
|
|
goto nla_put_failure;
|
2011-11-12 06:16:48 +08:00
|
|
|
option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
|
|
|
|
if (!option_list)
|
2012-06-19 13:54:12 +08:00
|
|
|
goto nla_put_failure;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2012-06-19 13:54:18 +08:00
|
|
|
i = 0;
|
|
|
|
incomplete = false;
|
|
|
|
list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
|
2012-06-19 13:54:14 +08:00
|
|
|
err = team_nl_fill_one_option_get(skb, team, opt_inst);
|
2012-06-19 13:54:18 +08:00
|
|
|
if (err) {
|
|
|
|
if (err == -EMSGSIZE) {
|
|
|
|
if (!i)
|
|
|
|
goto errout;
|
|
|
|
incomplete = true;
|
|
|
|
break;
|
|
|
|
}
|
2012-06-19 13:54:14 +08:00
|
|
|
goto errout;
|
2012-06-19 13:54:18 +08:00
|
|
|
}
|
|
|
|
i++;
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
nla_nest_end(skb, option_list);
|
2012-06-19 13:54:18 +08:00
|
|
|
genlmsg_end(skb, hdr);
|
|
|
|
if (incomplete)
|
|
|
|
goto start_again;
|
|
|
|
|
|
|
|
send_done:
|
2012-09-08 04:12:54 +08:00
|
|
|
nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
|
2012-06-19 13:54:18 +08:00
|
|
|
if (!nlh) {
|
2012-09-08 04:12:54 +08:00
|
|
|
err = __send_and_alloc_skb(&skb, team, portid, send_func);
|
2012-06-19 13:54:18 +08:00
|
|
|
if (err)
|
2018-03-08 18:42:10 +08:00
|
|
|
return err;
|
2012-06-19 13:54:18 +08:00
|
|
|
goto send_done;
|
|
|
|
}
|
|
|
|
|
2012-09-08 04:12:54 +08:00
|
|
|
return send_func(skb, team, portid);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
nla_put_failure:
|
2012-04-10 13:15:42 +08:00
|
|
|
err = -EMSGSIZE;
|
|
|
|
errout:
|
2012-06-19 13:54:18 +08:00
|
|
|
nlmsg_free(skb);
|
2012-04-10 13:15:42 +08:00
|
|
|
return err;
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct team *team;
|
2012-06-19 13:54:18 +08:00
|
|
|
struct team_option_inst *opt_inst;
|
2011-11-12 06:16:48 +08:00
|
|
|
int err;
|
2012-06-19 13:54:18 +08:00
|
|
|
LIST_HEAD(sel_opt_inst_list);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
team = team_nl_team_get(info);
|
|
|
|
if (!team)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2012-06-19 13:54:18 +08:00
|
|
|
list_for_each_entry(opt_inst, &team->option_inst_list, list)
|
|
|
|
list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
|
2012-09-08 04:12:54 +08:00
|
|
|
err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
|
2012-06-19 13:54:18 +08:00
|
|
|
NLM_F_ACK, team_nl_send_unicast,
|
|
|
|
&sel_opt_inst_list);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
team_nl_team_put(team);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2012-06-19 13:54:20 +08:00
|
|
|
static int team_nl_send_event_options_get(struct team *team,
|
|
|
|
struct list_head *sel_opt_inst_list);
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct team *team;
|
|
|
|
int err = 0;
|
|
|
|
int i;
|
|
|
|
struct nlattr *nl_option;
|
|
|
|
|
2015-12-03 19:12:17 +08:00
|
|
|
rtnl_lock();
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
team = team_nl_team_get(info);
|
2015-12-03 19:12:17 +08:00
|
|
|
if (!team) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto rtnl_unlock;
|
|
|
|
}
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
err = -EINVAL;
|
|
|
|
if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto team_put;
|
|
|
|
}
|
|
|
|
|
|
|
|
nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
|
2012-04-10 13:15:42 +08:00
|
|
|
struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
|
2012-06-19 13:54:08 +08:00
|
|
|
struct nlattr *attr;
|
2012-04-10 13:15:43 +08:00
|
|
|
struct nlattr *attr_data;
|
2019-02-12 13:59:51 +08:00
|
|
|
LIST_HEAD(opt_inst_list);
|
2011-11-12 06:16:48 +08:00
|
|
|
enum team_option_type opt_type;
|
2012-04-10 13:15:42 +08:00
|
|
|
int opt_port_ifindex = 0; /* != 0 for per-port options */
|
2012-06-19 13:54:08 +08:00
|
|
|
u32 opt_array_index = 0;
|
|
|
|
bool opt_is_array = false;
|
2012-04-10 13:15:42 +08:00
|
|
|
struct team_option_inst *opt_inst;
|
2011-11-12 06:16:48 +08:00
|
|
|
char *opt_name;
|
|
|
|
bool opt_found = false;
|
|
|
|
|
|
|
|
if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto team_put;
|
|
|
|
}
|
2012-04-10 13:15:42 +08:00
|
|
|
err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
|
netlink: pass extended ACK struct where available
This is an add-on to the previous patch that passes the extended ACK
structure where it's already available by existing genl_info or extack
function arguments.
This was done with this spatch (with some manual adjustment of
indentation):
@@
expression A, B, C, D, E;
identifier fn, info;
@@
fn(..., struct genl_info *info, ...) {
...
-nlmsg_parse(A, B, C, D, E, NULL)
+nlmsg_parse(A, B, C, D, E, info->extack)
...
}
@@
expression A, B, C, D, E;
identifier fn, info;
@@
fn(..., struct genl_info *info, ...) {
<...
-nla_parse_nested(A, B, C, D, NULL)
+nla_parse_nested(A, B, C, D, info->extack)
...>
}
@@
expression A, B, C, D, E;
identifier fn, extack;
@@
fn(..., struct netlink_ext_ack *extack, ...) {
<...
-nlmsg_parse(A, B, C, D, E, NULL)
+nlmsg_parse(A, B, C, D, E, extack)
...>
}
@@
expression A, B, C, D, E;
identifier fn, extack;
@@
fn(..., struct netlink_ext_ack *extack, ...) {
<...
-nla_parse(A, B, C, D, E, NULL)
+nla_parse(A, B, C, D, E, extack)
...>
}
@@
expression A, B, C, D, E;
identifier fn, extack;
@@
fn(..., struct netlink_ext_ack *extack, ...) {
...
-nlmsg_parse(A, B, C, D, E, NULL)
+nlmsg_parse(A, B, C, D, E, extack)
...
}
@@
expression A, B, C, D;
identifier fn, extack;
@@
fn(..., struct netlink_ext_ack *extack, ...) {
<...
-nla_parse_nested(A, B, C, D, NULL)
+nla_parse_nested(A, B, C, D, extack)
...>
}
@@
expression A, B, C, D;
identifier fn, extack;
@@
fn(..., struct netlink_ext_ack *extack, ...) {
<...
-nlmsg_validate(A, B, C, D, NULL)
+nlmsg_validate(A, B, C, D, extack)
...>
}
@@
expression A, B, C, D;
identifier fn, extack;
@@
fn(..., struct netlink_ext_ack *extack, ...) {
<...
-nla_validate(A, B, C, D, NULL)
+nla_validate(A, B, C, D, extack)
...>
}
@@
expression A, B, C;
identifier fn, extack;
@@
fn(..., struct netlink_ext_ack *extack, ...) {
<...
-nla_validate_nested(A, B, C, NULL)
+nla_validate_nested(A, B, C, extack)
...>
}
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-04-12 20:34:08 +08:00
|
|
|
nl_option, team_nl_option_policy,
|
|
|
|
info->extack);
|
2011-11-12 06:16:48 +08:00
|
|
|
if (err)
|
|
|
|
goto team_put;
|
2012-04-10 13:15:42 +08:00
|
|
|
if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
|
2012-04-10 13:15:43 +08:00
|
|
|
!opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
|
2011-11-12 06:16:48 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
goto team_put;
|
|
|
|
}
|
2012-04-10 13:15:42 +08:00
|
|
|
switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
|
2011-11-12 06:16:48 +08:00
|
|
|
case NLA_U32:
|
|
|
|
opt_type = TEAM_OPTION_TYPE_U32;
|
|
|
|
break;
|
|
|
|
case NLA_STRING:
|
|
|
|
opt_type = TEAM_OPTION_TYPE_STRING;
|
|
|
|
break;
|
2012-04-04 20:16:26 +08:00
|
|
|
case NLA_BINARY:
|
|
|
|
opt_type = TEAM_OPTION_TYPE_BINARY;
|
|
|
|
break;
|
2012-04-10 13:15:43 +08:00
|
|
|
case NLA_FLAG:
|
|
|
|
opt_type = TEAM_OPTION_TYPE_BOOL;
|
|
|
|
break;
|
2012-07-27 14:28:53 +08:00
|
|
|
case NLA_S32:
|
|
|
|
opt_type = TEAM_OPTION_TYPE_S32;
|
|
|
|
break;
|
2011-11-12 06:16:48 +08:00
|
|
|
default:
|
|
|
|
goto team_put;
|
|
|
|
}
|
|
|
|
|
2012-04-10 13:15:43 +08:00
|
|
|
attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
|
|
|
|
if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto team_put;
|
|
|
|
}
|
|
|
|
|
2012-04-10 13:15:42 +08:00
|
|
|
opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
|
2012-06-19 13:54:08 +08:00
|
|
|
attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
|
|
|
|
if (attr)
|
|
|
|
opt_port_ifindex = nla_get_u32(attr);
|
|
|
|
|
|
|
|
attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
|
|
|
|
if (attr) {
|
|
|
|
opt_is_array = true;
|
|
|
|
opt_array_index = nla_get_u32(attr);
|
|
|
|
}
|
2012-04-10 13:15:42 +08:00
|
|
|
|
|
|
|
list_for_each_entry(opt_inst, &team->option_inst_list, list) {
|
|
|
|
struct team_option *option = opt_inst->option;
|
|
|
|
struct team_gsetter_ctx ctx;
|
2012-06-19 13:54:10 +08:00
|
|
|
struct team_option_inst_info *opt_inst_info;
|
2012-04-10 13:15:42 +08:00
|
|
|
int tmp_ifindex;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2012-06-19 13:54:10 +08:00
|
|
|
opt_inst_info = &opt_inst->info;
|
|
|
|
tmp_ifindex = opt_inst_info->port ?
|
|
|
|
opt_inst_info->port->dev->ifindex : 0;
|
2011-11-12 06:16:48 +08:00
|
|
|
if (option->type != opt_type ||
|
2012-04-10 13:15:42 +08:00
|
|
|
strcmp(option->name, opt_name) ||
|
2012-06-19 13:54:08 +08:00
|
|
|
tmp_ifindex != opt_port_ifindex ||
|
|
|
|
(option->array_size && !opt_is_array) ||
|
2012-06-19 13:54:10 +08:00
|
|
|
opt_inst_info->array_index != opt_array_index)
|
2011-11-12 06:16:48 +08:00
|
|
|
continue;
|
|
|
|
opt_found = true;
|
2012-06-19 13:54:10 +08:00
|
|
|
ctx.info = opt_inst_info;
|
2011-11-12 06:16:48 +08:00
|
|
|
switch (opt_type) {
|
|
|
|
case TEAM_OPTION_TYPE_U32:
|
2012-04-10 13:15:43 +08:00
|
|
|
ctx.data.u32_val = nla_get_u32(attr_data);
|
2011-11-12 06:16:48 +08:00
|
|
|
break;
|
|
|
|
case TEAM_OPTION_TYPE_STRING:
|
2012-04-10 13:15:43 +08:00
|
|
|
if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
|
2012-04-04 20:16:26 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
goto team_put;
|
|
|
|
}
|
2012-04-10 13:15:43 +08:00
|
|
|
ctx.data.str_val = nla_data(attr_data);
|
2011-11-12 06:16:48 +08:00
|
|
|
break;
|
2012-04-04 20:16:26 +08:00
|
|
|
case TEAM_OPTION_TYPE_BINARY:
|
2012-04-10 13:15:43 +08:00
|
|
|
ctx.data.bin_val.len = nla_len(attr_data);
|
|
|
|
ctx.data.bin_val.ptr = nla_data(attr_data);
|
|
|
|
break;
|
|
|
|
case TEAM_OPTION_TYPE_BOOL:
|
|
|
|
ctx.data.bool_val = attr_data ? true : false;
|
2012-04-04 20:16:26 +08:00
|
|
|
break;
|
2012-07-27 14:28:53 +08:00
|
|
|
case TEAM_OPTION_TYPE_S32:
|
|
|
|
ctx.data.s32_val = nla_get_s32(attr_data);
|
|
|
|
break;
|
2011-11-12 06:16:48 +08:00
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
2012-04-10 13:15:42 +08:00
|
|
|
err = team_option_set(team, opt_inst, &ctx);
|
2011-11-12 06:16:48 +08:00
|
|
|
if (err)
|
|
|
|
goto team_put;
|
2012-06-19 13:54:20 +08:00
|
|
|
opt_inst->changed = true;
|
|
|
|
list_add(&opt_inst->tmp_list, &opt_inst_list);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
if (!opt_found) {
|
|
|
|
err = -ENOENT;
|
|
|
|
goto team_put;
|
|
|
|
}
|
|
|
|
|
2019-02-12 13:59:51 +08:00
|
|
|
err = team_nl_send_event_options_get(team, &opt_inst_list);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
2012-06-19 13:54:20 +08:00
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
team_put:
|
|
|
|
team_nl_team_put(team);
|
2015-12-03 19:12:17 +08:00
|
|
|
rtnl_unlock:
|
|
|
|
rtnl_unlock();
|
2011-11-12 06:16:48 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2013-02-01 16:17:24 +08:00
|
|
|
static int team_nl_fill_one_port_get(struct sk_buff *skb,
|
|
|
|
struct team_port *port)
|
|
|
|
{
|
|
|
|
struct nlattr *port_item;
|
|
|
|
|
|
|
|
port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
|
|
|
|
if (!port_item)
|
|
|
|
goto nest_cancel;
|
|
|
|
if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
|
|
|
|
goto nest_cancel;
|
|
|
|
if (port->changed) {
|
|
|
|
if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
|
|
|
|
goto nest_cancel;
|
|
|
|
port->changed = false;
|
|
|
|
}
|
|
|
|
if ((port->removed &&
|
|
|
|
nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
|
|
|
|
(port->state.linkup &&
|
|
|
|
nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
|
|
|
|
nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
|
|
|
|
nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
|
|
|
|
goto nest_cancel;
|
|
|
|
nla_nest_end(skb, port_item);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nest_cancel:
|
|
|
|
nla_nest_cancel(skb, port_item);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
|
|
|
|
int flags, team_nl_send_func_t *send_func,
|
|
|
|
struct team_port *one_port)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
struct nlattr *port_list;
|
2013-02-01 16:17:24 +08:00
|
|
|
struct nlmsghdr *nlh;
|
2011-11-12 06:16:48 +08:00
|
|
|
void *hdr;
|
|
|
|
struct team_port *port;
|
2013-02-01 16:17:24 +08:00
|
|
|
int err;
|
|
|
|
struct sk_buff *skb = NULL;
|
|
|
|
bool incomplete;
|
|
|
|
int i;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2013-05-29 13:02:57 +08:00
|
|
|
port = list_first_entry_or_null(&team->port_list,
|
|
|
|
struct team_port, list);
|
2013-02-01 16:17:24 +08:00
|
|
|
|
|
|
|
start_again:
|
|
|
|
err = __send_and_alloc_skb(&skb, team, portid, send_func);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
|
2011-11-12 06:16:48 +08:00
|
|
|
TEAM_CMD_PORT_LIST_GET);
|
2017-04-24 18:29:16 +08:00
|
|
|
if (!hdr) {
|
|
|
|
nlmsg_free(skb);
|
2012-09-25 02:29:35 +08:00
|
|
|
return -EMSGSIZE;
|
2017-04-24 18:29:16 +08:00
|
|
|
}
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2012-04-02 08:25:18 +08:00
|
|
|
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
|
|
|
|
goto nla_put_failure;
|
2011-11-12 06:16:48 +08:00
|
|
|
port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
|
|
|
|
if (!port_list)
|
2012-06-19 13:54:13 +08:00
|
|
|
goto nla_put_failure;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2013-02-01 16:17:24 +08:00
|
|
|
i = 0;
|
|
|
|
incomplete = false;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2013-02-01 16:17:24 +08:00
|
|
|
/* If one port is selected, called wants to send port list containing
|
|
|
|
* only this port. Otherwise go through all listed ports and send all
|
|
|
|
*/
|
|
|
|
if (one_port) {
|
|
|
|
err = team_nl_fill_one_port_get(skb, one_port);
|
|
|
|
if (err)
|
|
|
|
goto errout;
|
2013-05-29 13:02:57 +08:00
|
|
|
} else if (port) {
|
|
|
|
list_for_each_entry_from(port, &team->port_list, list) {
|
2013-02-01 16:17:24 +08:00
|
|
|
err = team_nl_fill_one_port_get(skb, port);
|
|
|
|
if (err) {
|
|
|
|
if (err == -EMSGSIZE) {
|
|
|
|
if (!i)
|
|
|
|
goto errout;
|
|
|
|
incomplete = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
i++;
|
2012-01-24 13:16:00 +08:00
|
|
|
}
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
nla_nest_end(skb, port_list);
|
2013-02-01 16:17:24 +08:00
|
|
|
genlmsg_end(skb, hdr);
|
|
|
|
if (incomplete)
|
|
|
|
goto start_again;
|
|
|
|
|
|
|
|
send_done:
|
|
|
|
nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
|
|
|
|
if (!nlh) {
|
|
|
|
err = __send_and_alloc_skb(&skb, team, portid, send_func);
|
|
|
|
if (err)
|
2018-03-08 18:42:10 +08:00
|
|
|
return err;
|
2013-02-01 16:17:24 +08:00
|
|
|
goto send_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
return send_func(skb, team, portid);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
nla_put_failure:
|
2013-02-01 16:17:24 +08:00
|
|
|
err = -EMSGSIZE;
|
|
|
|
errout:
|
|
|
|
nlmsg_free(skb);
|
|
|
|
return err;
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int team_nl_cmd_port_list_get(struct sk_buff *skb,
|
|
|
|
struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct team *team;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
team = team_nl_team_get(info);
|
|
|
|
if (!team)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-02-01 16:17:24 +08:00
|
|
|
err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
|
|
|
|
NLM_F_ACK, team_nl_send_unicast, NULL);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
team_nl_team_put(team);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2013-11-15 00:14:46 +08:00
|
|
|
static const struct genl_ops team_nl_ops[] = {
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
.cmd = TEAM_CMD_NOOP,
|
|
|
|
.doit = team_nl_cmd_noop,
|
|
|
|
.policy = team_nl_policy,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.cmd = TEAM_CMD_OPTIONS_SET,
|
|
|
|
.doit = team_nl_cmd_options_set,
|
|
|
|
.policy = team_nl_policy,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.cmd = TEAM_CMD_OPTIONS_GET,
|
|
|
|
.doit = team_nl_cmd_options_get,
|
|
|
|
.policy = team_nl_policy,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.cmd = TEAM_CMD_PORT_LIST_GET,
|
|
|
|
.doit = team_nl_cmd_port_list_get,
|
|
|
|
.policy = team_nl_policy,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2013-11-19 22:19:39 +08:00
|
|
|
static const struct genl_multicast_group team_nl_mcgrps[] = {
|
|
|
|
{ .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
|
2011-11-12 06:16:48 +08:00
|
|
|
};
|
|
|
|
|
2016-10-24 20:40:05 +08:00
|
|
|
static struct genl_family team_nl_family __ro_after_init = {
|
2016-10-24 20:40:03 +08:00
|
|
|
.name = TEAM_GENL_NAME,
|
|
|
|
.version = TEAM_GENL_VERSION,
|
|
|
|
.maxattr = TEAM_ATTR_MAX,
|
|
|
|
.netnsok = true,
|
|
|
|
.module = THIS_MODULE,
|
|
|
|
.ops = team_nl_ops,
|
|
|
|
.n_ops = ARRAY_SIZE(team_nl_ops),
|
|
|
|
.mcgrps = team_nl_mcgrps,
|
|
|
|
.n_mcgrps = ARRAY_SIZE(team_nl_mcgrps),
|
|
|
|
};
|
|
|
|
|
2012-06-19 13:54:18 +08:00
|
|
|
static int team_nl_send_multicast(struct sk_buff *skb,
|
2012-09-08 04:12:54 +08:00
|
|
|
struct team *team, u32 portid)
|
2012-06-19 13:54:18 +08:00
|
|
|
{
|
2013-11-19 22:19:38 +08:00
|
|
|
return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
|
2013-11-19 22:19:39 +08:00
|
|
|
skb, 0, 0, GFP_KERNEL);
|
2012-06-19 13:54:18 +08:00
|
|
|
}
|
|
|
|
|
2012-06-19 13:54:14 +08:00
|
|
|
static int team_nl_send_event_options_get(struct team *team,
|
|
|
|
struct list_head *sel_opt_inst_list)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
2012-06-19 13:54:18 +08:00
|
|
|
return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
|
|
|
|
sel_opt_inst_list);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
2013-02-01 16:17:24 +08:00
|
|
|
static int team_nl_send_event_port_get(struct team *team,
|
|
|
|
struct team_port *port)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
2013-02-01 16:17:24 +08:00
|
|
|
return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
|
|
|
|
port);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
2016-10-24 20:40:05 +08:00
|
|
|
static int __init team_nl_init(void)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
2016-10-24 20:40:03 +08:00
|
|
|
return genl_register_family(&team_nl_family);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void team_nl_fini(void)
|
|
|
|
{
|
|
|
|
genl_unregister_family(&team_nl_family);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/******************
|
|
|
|
* Change checkers
|
|
|
|
******************/
|
|
|
|
|
2012-01-24 13:16:00 +08:00
|
|
|
static void __team_options_change_check(struct team *team)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
int err;
|
2012-06-19 13:54:14 +08:00
|
|
|
struct team_option_inst *opt_inst;
|
|
|
|
LIST_HEAD(sel_opt_inst_list);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2012-06-19 13:54:14 +08:00
|
|
|
list_for_each_entry(opt_inst, &team->option_inst_list, list) {
|
|
|
|
if (opt_inst->changed)
|
|
|
|
list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
|
|
|
|
}
|
|
|
|
err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
|
2012-08-23 11:26:51 +08:00
|
|
|
if (err && err != -ESRCH)
|
2012-06-19 13:54:18 +08:00
|
|
|
netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
|
|
|
|
err);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* rtnl lock is held */
|
2012-09-21 09:51:59 +08:00
|
|
|
|
|
|
|
static void __team_port_change_send(struct team_port *port, bool linkup)
|
2011-11-12 06:16:48 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2012-01-24 13:16:00 +08:00
|
|
|
port->changed = true;
|
2012-04-10 13:15:44 +08:00
|
|
|
port->state.linkup = linkup;
|
|
|
|
team_refresh_port_linkup(port);
|
2011-11-12 06:16:48 +08:00
|
|
|
if (linkup) {
|
2016-02-25 02:58:05 +08:00
|
|
|
struct ethtool_link_ksettings ecmd;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
2016-02-25 02:58:05 +08:00
|
|
|
err = __ethtool_get_link_ksettings(port->dev, &ecmd);
|
2011-11-12 06:16:48 +08:00
|
|
|
if (!err) {
|
2016-02-25 02:58:05 +08:00
|
|
|
port->state.speed = ecmd.base.speed;
|
|
|
|
port->state.duplex = ecmd.base.duplex;
|
2011-11-12 06:16:48 +08:00
|
|
|
goto send_event;
|
|
|
|
}
|
|
|
|
}
|
2012-04-10 13:15:44 +08:00
|
|
|
port->state.speed = 0;
|
|
|
|
port->state.duplex = 0;
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
send_event:
|
2013-02-01 16:17:24 +08:00
|
|
|
err = team_nl_send_event_port_get(port->team, port);
|
2012-08-23 11:26:51 +08:00
|
|
|
if (err && err != -ESRCH)
|
|
|
|
netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
|
|
|
|
port->dev->name, err);
|
2011-11-12 06:16:48 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2012-12-30 16:27:29 +08:00
|
|
|
static void __team_carrier_check(struct team *team)
|
|
|
|
{
|
|
|
|
struct team_port *port;
|
|
|
|
bool team_linkup;
|
|
|
|
|
2013-02-05 17:30:55 +08:00
|
|
|
if (team->user_carrier_enabled)
|
|
|
|
return;
|
|
|
|
|
2012-12-30 16:27:29 +08:00
|
|
|
team_linkup = false;
|
|
|
|
list_for_each_entry(port, &team->port_list, list) {
|
|
|
|
if (port->linkup) {
|
|
|
|
team_linkup = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (team_linkup)
|
|
|
|
netif_carrier_on(team->dev);
|
|
|
|
else
|
|
|
|
netif_carrier_off(team->dev);
|
|
|
|
}
|
|
|
|
|
2012-09-21 09:51:59 +08:00
|
|
|
static void __team_port_change_check(struct team_port *port, bool linkup)
|
|
|
|
{
|
|
|
|
if (port->state.linkup != linkup)
|
|
|
|
__team_port_change_send(port, linkup);
|
2012-12-30 16:27:29 +08:00
|
|
|
__team_carrier_check(port->team);
|
2012-09-21 09:51:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __team_port_change_port_added(struct team_port *port, bool linkup)
|
|
|
|
{
|
|
|
|
__team_port_change_send(port, linkup);
|
2012-12-30 16:27:29 +08:00
|
|
|
__team_carrier_check(port->team);
|
2012-09-21 09:51:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __team_port_change_port_removed(struct team_port *port)
|
|
|
|
{
|
|
|
|
port->removed = true;
|
|
|
|
__team_port_change_send(port, false);
|
2012-12-30 16:27:29 +08:00
|
|
|
__team_carrier_check(port->team);
|
2012-09-21 09:51:59 +08:00
|
|
|
}
|
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
static void team_port_change_check(struct team_port *port, bool linkup)
|
|
|
|
{
|
|
|
|
struct team *team = port->team;
|
|
|
|
|
2011-11-16 19:09:08 +08:00
|
|
|
mutex_lock(&team->lock);
|
2011-11-12 06:16:48 +08:00
|
|
|
__team_port_change_check(port, linkup);
|
2011-11-16 19:09:08 +08:00
|
|
|
mutex_unlock(&team->lock);
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
|
2012-06-19 13:54:11 +08:00
|
|
|
|
2011-11-12 06:16:48 +08:00
|
|
|
/************************************
|
|
|
|
* Net device notifier event handler
|
|
|
|
************************************/
|
|
|
|
|
|
|
|
static int team_device_event(struct notifier_block *unused,
|
|
|
|
unsigned long event, void *ptr)
|
|
|
|
{
|
2013-05-28 09:30:21 +08:00
|
|
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
2011-11-12 06:16:48 +08:00
|
|
|
struct team_port *port;
|
|
|
|
|
|
|
|
port = team_port_get_rtnl(dev);
|
|
|
|
if (!port)
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case NETDEV_UP:
|
|
|
|
if (netif_carrier_ok(dev))
|
|
|
|
team_port_change_check(port, true);
|
2014-04-23 20:17:55 +08:00
|
|
|
break;
|
2011-11-12 06:16:48 +08:00
|
|
|
case NETDEV_DOWN:
|
|
|
|
team_port_change_check(port, false);
|
2014-04-23 20:17:55 +08:00
|
|
|
break;
|
2011-11-12 06:16:48 +08:00
|
|
|
case NETDEV_CHANGE:
|
|
|
|
if (netif_running(port->dev))
|
|
|
|
team_port_change_check(port,
|
2018-04-19 18:34:14 +08:00
|
|
|
!!netif_oper_up(port->dev));
|
2011-11-12 06:16:48 +08:00
|
|
|
break;
|
|
|
|
case NETDEV_UNREGISTER:
|
|
|
|
team_del_slave(port->team->dev, dev);
|
|
|
|
break;
|
|
|
|
case NETDEV_FEAT_CHANGE:
|
|
|
|
team_compute_features(port->team);
|
|
|
|
break;
|
2014-01-16 07:02:19 +08:00
|
|
|
case NETDEV_PRECHANGEMTU:
|
2011-11-12 06:16:48 +08:00
|
|
|
/* Forbid to change mtu of underlaying device */
|
2014-05-30 02:46:17 +08:00
|
|
|
if (!port->team->port_mtu_change_allowed)
|
|
|
|
return NOTIFY_BAD;
|
|
|
|
break;
|
2011-11-12 06:16:48 +08:00
|
|
|
case NETDEV_PRE_TYPE_CHANGE:
|
|
|
|
/* Forbid to change type of underlaying device */
|
|
|
|
return NOTIFY_BAD;
|
2013-07-20 18:13:53 +08:00
|
|
|
case NETDEV_RESEND_IGMP:
|
|
|
|
/* Propagate to master device */
|
|
|
|
call_netdevice_notifiers(event, port->team->dev);
|
|
|
|
break;
|
2011-11-12 06:16:48 +08:00
|
|
|
}
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block team_notifier_block __read_mostly = {
|
|
|
|
.notifier_call = team_device_event,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/***********************
|
|
|
|
* Module init and exit
|
|
|
|
***********************/
|
|
|
|
|
|
|
|
static int __init team_module_init(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
register_netdevice_notifier(&team_notifier_block);
|
|
|
|
|
|
|
|
err = rtnl_link_register(&team_link_ops);
|
|
|
|
if (err)
|
|
|
|
goto err_rtnl_reg;
|
|
|
|
|
|
|
|
err = team_nl_init();
|
|
|
|
if (err)
|
|
|
|
goto err_nl_init;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_nl_init:
|
|
|
|
rtnl_link_unregister(&team_link_ops);
|
|
|
|
|
|
|
|
err_rtnl_reg:
|
|
|
|
unregister_netdevice_notifier(&team_notifier_block);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit team_module_exit(void)
|
|
|
|
{
|
|
|
|
team_nl_fini();
|
|
|
|
rtnl_link_unregister(&team_link_ops);
|
|
|
|
unregister_netdevice_notifier(&team_notifier_block);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(team_module_init);
|
|
|
|
module_exit(team_module_exit);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
|
|
|
|
MODULE_DESCRIPTION("Ethernet team device driver");
|
|
|
|
MODULE_ALIAS_RTNL_LINK(DRV_NAME);
|