2008-10-23 02:44:46 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
|
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/if_ether.h>
|
2011-05-28 04:14:23 +08:00
|
|
|
#include <linux/export.h>
|
2008-10-23 02:44:46 +08:00
|
|
|
|
|
|
|
#include <linux/mlx4/cmd.h>
|
|
|
|
|
|
|
|
#include "mlx4.h"
|
|
|
|
|
|
|
|
#define MLX4_MAC_VALID (1ull << 63)
|
|
|
|
|
|
|
|
#define MLX4_VLAN_VALID (1u << 31)
|
|
|
|
#define MLX4_VLAN_MASK 0xfff
|
|
|
|
|
2012-01-19 17:45:05 +08:00
|
|
|
#define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
|
|
|
|
#define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
|
|
|
|
#define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
|
|
|
|
#define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
|
|
|
|
|
2008-10-23 02:44:46 +08:00
|
|
|
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
mutex_init(&table->mutex);
|
|
|
|
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
|
|
|
|
table->entries[i] = 0;
|
|
|
|
table->refs[i] = 0;
|
|
|
|
}
|
|
|
|
table->max = 1 << dev->caps.log_num_macs;
|
|
|
|
table->total = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
mutex_init(&table->mutex);
|
|
|
|
for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
|
|
|
|
table->entries[i] = 0;
|
|
|
|
table->refs[i] = 0;
|
|
|
|
}
|
2011-10-18 09:50:29 +08:00
|
|
|
table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
|
2008-10-23 02:44:46 +08:00
|
|
|
table->total = 0;
|
|
|
|
}
|
|
|
|
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 12:03:46 +08:00
|
|
|
static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
|
|
|
|
u64 mac, int *qpn, u64 *reg_id)
|
2008-10-23 02:44:46 +08:00
|
|
|
{
|
2012-03-06 12:04:26 +08:00
|
|
|
__be64 be_mac;
|
2011-03-23 06:38:31 +08:00
|
|
|
int err;
|
|
|
|
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 12:03:46 +08:00
|
|
|
mac &= MLX4_MAC_MASK;
|
2012-03-06 12:04:26 +08:00
|
|
|
be_mac = cpu_to_be64(mac << 16);
|
2011-03-23 06:38:31 +08:00
|
|
|
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 12:03:46 +08:00
|
|
|
switch (dev->caps.steering_mode) {
|
|
|
|
case MLX4_STEERING_MODE_B0: {
|
|
|
|
struct mlx4_qp qp;
|
|
|
|
u8 gid[16] = {0};
|
|
|
|
|
|
|
|
qp.qpn = *qpn;
|
|
|
|
memcpy(&gid[10], &be_mac, ETH_ALEN);
|
|
|
|
gid[5] = port;
|
|
|
|
|
|
|
|
err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
|
|
|
|
struct mlx4_spec_list spec_eth = { {NULL} };
|
|
|
|
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
|
|
|
|
|
|
|
|
struct mlx4_net_trans_rule rule = {
|
|
|
|
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
|
|
|
|
.exclusive = 0,
|
|
|
|
.allow_loopback = 1,
|
|
|
|
.promisc_mode = MLX4_FS_PROMISC_NONE,
|
|
|
|
.priority = MLX4_DOMAIN_NIC,
|
|
|
|
};
|
|
|
|
|
|
|
|
rule.port = port;
|
|
|
|
rule.qpn = *qpn;
|
|
|
|
INIT_LIST_HEAD(&rule.list);
|
|
|
|
|
|
|
|
spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
|
|
|
|
memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN);
|
|
|
|
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
|
|
|
|
list_add_tail(&spec_eth.list, &rule.list);
|
|
|
|
|
|
|
|
err = mlx4_flow_attach(dev, &rule, reg_id);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2011-12-13 12:16:21 +08:00
|
|
|
if (err)
|
|
|
|
mlx4_warn(dev, "Failed Attaching Unicast\n");
|
2011-03-23 06:38:31 +08:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 12:03:46 +08:00
|
|
|
u64 mac, int qpn, u64 reg_id)
|
2011-03-23 06:38:31 +08:00
|
|
|
{
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 12:03:46 +08:00
|
|
|
switch (dev->caps.steering_mode) {
|
|
|
|
case MLX4_STEERING_MODE_B0: {
|
|
|
|
struct mlx4_qp qp;
|
|
|
|
u8 gid[16] = {0};
|
|
|
|
__be64 be_mac;
|
|
|
|
|
|
|
|
qp.qpn = qpn;
|
|
|
|
mac &= MLX4_MAC_MASK;
|
|
|
|
be_mac = cpu_to_be64(mac << 16);
|
|
|
|
memcpy(&gid[10], &be_mac, ETH_ALEN);
|
|
|
|
gid[5] = port;
|
|
|
|
|
|
|
|
mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
|
|
|
|
mlx4_flow_detach(dev, reg_id);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
mlx4_err(dev, "Invalid steering mode.\n");
|
|
|
|
}
|
2011-12-13 12:16:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int validate_index(struct mlx4_dev *dev,
|
|
|
|
struct mlx4_mac_table *table, int index)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (index < 0 || index >= table->max || !table->entries[index]) {
|
|
|
|
mlx4_warn(dev, "No valid Mac entry for the given index\n");
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int find_index(struct mlx4_dev *dev,
|
|
|
|
struct mlx4_mac_table *table, u64 mac)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
|
|
|
|
if ((mac & MLX4_MAC_MASK) ==
|
|
|
|
(MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
/* Mac not found */
|
|
|
|
return -EINVAL;
|
2011-03-23 06:38:31 +08:00
|
|
|
}
|
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
|
2011-03-23 06:38:31 +08:00
|
|
|
{
|
|
|
|
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
|
|
|
|
struct mlx4_mac_entry *entry;
|
2011-12-13 12:16:21 +08:00
|
|
|
int index = 0;
|
|
|
|
int err = 0;
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 12:03:46 +08:00
|
|
|
u64 reg_id;
|
2008-10-23 02:44:46 +08:00
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
|
|
|
|
(unsigned long long) mac);
|
|
|
|
index = mlx4_register_mac(dev, port, mac);
|
|
|
|
if (index < 0) {
|
|
|
|
err = index;
|
|
|
|
mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
|
|
|
|
(unsigned long long) mac);
|
|
|
|
return err;
|
|
|
|
}
|
2011-10-07 00:33:11 +08:00
|
|
|
|
2012-07-05 12:03:44 +08:00
|
|
|
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
|
2011-12-13 12:16:21 +08:00
|
|
|
*qpn = info->base_qpn + index;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
|
|
|
|
mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
|
|
|
|
if (err) {
|
|
|
|
mlx4_err(dev, "Failed to reserve qp for mac registration\n");
|
|
|
|
goto qp_err;
|
|
|
|
}
|
|
|
|
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 12:03:46 +08:00
|
|
|
err = mlx4_uc_steer_add(dev, port, mac, qpn, ®_id);
|
2011-12-13 12:16:21 +08:00
|
|
|
if (err)
|
|
|
|
goto steer_err;
|
|
|
|
|
|
|
|
entry = kmalloc(sizeof *entry, GFP_KERNEL);
|
|
|
|
if (!entry) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto alloc_err;
|
|
|
|
}
|
|
|
|
entry->mac = mac;
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 12:03:46 +08:00
|
|
|
entry->reg_id = reg_id;
|
2011-12-13 12:16:21 +08:00
|
|
|
err = radix_tree_insert(&info->mac_tree, *qpn, entry);
|
|
|
|
if (err)
|
|
|
|
goto insert_err;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
insert_err:
|
|
|
|
kfree(entry);
|
|
|
|
|
|
|
|
alloc_err:
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 12:03:46 +08:00
|
|
|
mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id);
|
2011-12-13 12:16:21 +08:00
|
|
|
|
|
|
|
steer_err:
|
|
|
|
mlx4_qp_release_range(dev, *qpn, 1);
|
2011-10-07 00:33:11 +08:00
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
qp_err:
|
|
|
|
mlx4_unregister_mac(dev, port, mac);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
|
|
|
|
|
|
|
|
void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
|
|
|
|
{
|
|
|
|
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
|
|
|
|
struct mlx4_mac_entry *entry;
|
|
|
|
|
|
|
|
mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
|
|
|
|
(unsigned long long) mac);
|
|
|
|
mlx4_unregister_mac(dev, port, mac);
|
|
|
|
|
2012-07-05 12:03:44 +08:00
|
|
|
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
|
2011-12-13 12:16:21 +08:00
|
|
|
entry = radix_tree_lookup(&info->mac_tree, qpn);
|
|
|
|
if (entry) {
|
|
|
|
mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
|
|
|
|
" qpn %d\n", port,
|
|
|
|
(unsigned long long) mac, qpn);
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 12:03:46 +08:00
|
|
|
mlx4_uc_steer_release(dev, port, entry->mac,
|
|
|
|
qpn, entry->reg_id);
|
2011-12-13 12:16:21 +08:00
|
|
|
mlx4_qp_release_range(dev, qpn, 1);
|
|
|
|
radix_tree_delete(&info->mac_tree, qpn);
|
2011-10-07 00:33:11 +08:00
|
|
|
kfree(entry);
|
|
|
|
}
|
2011-03-23 06:38:31 +08:00
|
|
|
}
|
2011-12-13 12:16:21 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
|
|
|
|
|
|
|
|
static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
|
|
|
|
__be64 *entries)
|
|
|
|
{
|
|
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
|
|
u32 in_mod;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
|
|
if (IS_ERR(mailbox))
|
|
|
|
return PTR_ERR(mailbox);
|
|
|
|
|
|
|
|
memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
|
|
|
|
|
|
|
|
in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
|
2011-10-07 00:33:11 +08:00
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
|
|
|
|
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
|
|
|
|
|
|
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
|
|
|
|
{
|
|
|
|
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
|
|
|
|
struct mlx4_mac_table *table = &info->mac_table;
|
|
|
|
int i, err = 0;
|
|
|
|
int free = -1;
|
|
|
|
|
|
|
|
mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
|
|
|
|
(unsigned long long) mac, port);
|
2011-10-07 00:33:11 +08:00
|
|
|
|
2008-10-23 02:44:46 +08:00
|
|
|
mutex_lock(&table->mutex);
|
2011-12-13 12:16:21 +08:00
|
|
|
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
|
|
|
|
if (free < 0 && !table->entries[i]) {
|
2008-10-23 02:44:46 +08:00
|
|
|
free = i;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
|
2011-12-13 12:16:21 +08:00
|
|
|
/* MAC already registered, Must not have duplicates */
|
|
|
|
err = -EEXIST;
|
2008-10-23 02:44:46 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
2010-10-25 10:56:47 +08:00
|
|
|
|
2008-10-23 02:44:46 +08:00
|
|
|
mlx4_dbg(dev, "Free MAC index is %d\n", free);
|
|
|
|
|
|
|
|
if (table->total == table->max) {
|
|
|
|
/* No free mac entries */
|
|
|
|
err = -ENOSPC;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Register new MAC */
|
|
|
|
table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
|
|
|
|
|
|
|
|
err = mlx4_set_port_mac_table(dev, port, table->entries);
|
|
|
|
if (unlikely(err)) {
|
2011-12-13 12:16:21 +08:00
|
|
|
mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
|
|
|
|
(unsigned long long) mac);
|
2008-10-23 02:44:46 +08:00
|
|
|
table->entries[free] = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
err = free;
|
2008-10-23 02:44:46 +08:00
|
|
|
++table->total;
|
|
|
|
out:
|
|
|
|
mutex_unlock(&table->mutex);
|
|
|
|
return err;
|
|
|
|
}
|
2011-12-13 12:16:21 +08:00
|
|
|
EXPORT_SYMBOL_GPL(__mlx4_register_mac);
|
2008-10-23 02:44:46 +08:00
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
|
2008-10-23 02:44:46 +08:00
|
|
|
{
|
2011-12-13 12:16:21 +08:00
|
|
|
u64 out_param;
|
|
|
|
int err;
|
2008-10-23 02:44:46 +08:00
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
if (mlx4_is_mfunc(dev)) {
|
|
|
|
set_param_l(&out_param, port);
|
|
|
|
err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
|
|
|
|
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
|
|
|
|
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2011-03-23 06:38:31 +08:00
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
return get_param_l(&out_param);
|
2011-03-23 06:38:31 +08:00
|
|
|
}
|
2011-12-13 12:16:21 +08:00
|
|
|
return __mlx4_register_mac(dev, port, mac);
|
2011-03-23 06:38:31 +08:00
|
|
|
}
|
2011-12-13 12:16:21 +08:00
|
|
|
EXPORT_SYMBOL_GPL(mlx4_register_mac);
|
|
|
|
|
2011-03-23 06:38:31 +08:00
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
|
2011-03-23 06:38:31 +08:00
|
|
|
{
|
|
|
|
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
|
|
|
|
struct mlx4_mac_table *table = &info->mac_table;
|
2011-12-13 12:16:21 +08:00
|
|
|
int index;
|
2011-03-23 06:38:31 +08:00
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
index = find_index(dev, table, mac);
|
2011-03-23 06:38:31 +08:00
|
|
|
|
|
|
|
mutex_lock(&table->mutex);
|
|
|
|
|
|
|
|
if (validate_index(dev, table, index))
|
|
|
|
goto out;
|
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
table->entries[index] = 0;
|
|
|
|
mlx4_set_port_mac_table(dev, port, table->entries);
|
|
|
|
--table->total;
|
2008-10-23 02:44:46 +08:00
|
|
|
out:
|
|
|
|
mutex_unlock(&table->mutex);
|
|
|
|
}
|
2011-12-13 12:16:21 +08:00
|
|
|
EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
|
|
|
|
|
|
|
|
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
|
|
|
|
{
|
|
|
|
u64 out_param;
|
|
|
|
|
|
|
|
if (mlx4_is_mfunc(dev)) {
|
|
|
|
set_param_l(&out_param, port);
|
2012-05-15 18:34:57 +08:00
|
|
|
(void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
|
|
|
|
RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
|
|
|
|
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
2011-12-13 12:16:21 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
__mlx4_unregister_mac(dev, port, mac);
|
|
|
|
return;
|
|
|
|
}
|
2008-10-23 02:44:46 +08:00
|
|
|
EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
|
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
|
2011-03-23 06:38:31 +08:00
|
|
|
{
|
|
|
|
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
|
|
|
|
struct mlx4_mac_table *table = &info->mac_table;
|
|
|
|
struct mlx4_mac_entry *entry;
|
2011-12-13 12:16:21 +08:00
|
|
|
int index = qpn - info->base_qpn;
|
|
|
|
int err = 0;
|
2011-03-23 06:38:31 +08:00
|
|
|
|
2012-07-05 12:03:44 +08:00
|
|
|
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
|
2011-03-23 06:38:31 +08:00
|
|
|
entry = radix_tree_lookup(&info->mac_tree, qpn);
|
|
|
|
if (!entry)
|
|
|
|
return -EINVAL;
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 12:03:46 +08:00
|
|
|
mlx4_uc_steer_release(dev, port, entry->mac,
|
|
|
|
qpn, entry->reg_id);
|
2011-12-13 12:16:21 +08:00
|
|
|
mlx4_unregister_mac(dev, port, entry->mac);
|
2011-03-23 06:38:31 +08:00
|
|
|
entry->mac = new_mac;
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 12:03:46 +08:00
|
|
|
entry->reg_id = 0;
|
2011-12-13 12:16:21 +08:00
|
|
|
mlx4_register_mac(dev, port, new_mac);
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 12:03:46 +08:00
|
|
|
err = mlx4_uc_steer_add(dev, port, entry->mac,
|
|
|
|
&qpn, &entry->reg_id);
|
2011-12-13 12:16:21 +08:00
|
|
|
return err;
|
2011-03-23 06:38:31 +08:00
|
|
|
}
|
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
/* CX1 doesn't support multi-functions */
|
2011-03-23 06:38:31 +08:00
|
|
|
mutex_lock(&table->mutex);
|
|
|
|
|
|
|
|
err = validate_index(dev, table, index);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
|
|
|
|
|
|
|
|
err = mlx4_set_port_mac_table(dev, port, table->entries);
|
|
|
|
if (unlikely(err)) {
|
2011-12-13 12:16:21 +08:00
|
|
|
mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
|
|
|
|
(unsigned long long) new_mac);
|
2011-03-23 06:38:31 +08:00
|
|
|
table->entries[index] = 0;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
mutex_unlock(&table->mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mlx4_replace_mac);
|
2011-12-13 12:16:21 +08:00
|
|
|
|
2008-10-23 02:44:46 +08:00
|
|
|
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
|
|
|
|
__be32 *entries)
|
|
|
|
{
|
|
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
|
|
u32 in_mod;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
|
|
if (IS_ERR(mailbox))
|
|
|
|
return PTR_ERR(mailbox);
|
|
|
|
|
|
|
|
memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
|
|
|
|
in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
|
|
|
|
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
|
2011-12-13 12:10:51 +08:00
|
|
|
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
|
2008-10-23 02:44:46 +08:00
|
|
|
|
|
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2010-08-26 22:19:22 +08:00
|
|
|
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
|
|
|
|
{
|
|
|
|
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
|
|
|
|
if (table->refs[i] &&
|
|
|
|
(vid == (MLX4_VLAN_MASK &
|
|
|
|
be32_to_cpu(table->entries[i])))) {
|
|
|
|
/* VLAN already registered, increase reference count */
|
|
|
|
*idx = i;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
|
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
|
|
|
|
int *index)
|
2008-10-23 02:44:46 +08:00
|
|
|
{
|
|
|
|
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
|
|
|
|
int i, err = 0;
|
|
|
|
int free = -1;
|
|
|
|
|
|
|
|
mutex_lock(&table->mutex);
|
2011-10-18 09:50:29 +08:00
|
|
|
|
|
|
|
if (table->total == table->max) {
|
|
|
|
/* No free vlan entries */
|
|
|
|
err = -ENOSPC;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2008-10-23 02:44:46 +08:00
|
|
|
for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
|
|
|
|
if (free < 0 && (table->refs[i] == 0)) {
|
|
|
|
free = i;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (table->refs[i] &&
|
|
|
|
(vlan == (MLX4_VLAN_MASK &
|
|
|
|
be32_to_cpu(table->entries[i])))) {
|
2011-03-31 09:57:33 +08:00
|
|
|
/* Vlan already registered, increase references count */
|
2008-10-23 02:44:46 +08:00
|
|
|
*index = i;
|
|
|
|
++table->refs[i];
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-10-25 10:56:47 +08:00
|
|
|
if (free < 0) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
/* Register new VLAN */
|
2008-10-23 02:44:46 +08:00
|
|
|
table->refs[free] = 1;
|
|
|
|
table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
|
|
|
|
|
|
|
|
err = mlx4_set_port_vlan_table(dev, port, table->entries);
|
|
|
|
if (unlikely(err)) {
|
|
|
|
mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
|
|
|
|
table->refs[free] = 0;
|
|
|
|
table->entries[free] = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
*index = free;
|
|
|
|
++table->total;
|
|
|
|
out:
|
|
|
|
mutex_unlock(&table->mutex);
|
|
|
|
return err;
|
|
|
|
}
|
2011-12-13 12:16:21 +08:00
|
|
|
|
|
|
|
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
|
|
|
|
{
|
|
|
|
u64 out_param;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (mlx4_is_mfunc(dev)) {
|
|
|
|
set_param_l(&out_param, port);
|
|
|
|
err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
|
|
|
|
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
|
|
|
|
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
|
|
|
if (!err)
|
|
|
|
*index = get_param_l(&out_param);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
return __mlx4_register_vlan(dev, port, vlan, index);
|
|
|
|
}
|
2008-10-23 02:44:46 +08:00
|
|
|
EXPORT_SYMBOL_GPL(mlx4_register_vlan);
|
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
|
2008-10-23 02:44:46 +08:00
|
|
|
{
|
|
|
|
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
|
|
|
|
|
|
|
|
if (index < MLX4_VLAN_REGULAR) {
|
|
|
|
mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_lock(&table->mutex);
|
|
|
|
if (!table->refs[index]) {
|
|
|
|
mlx4_warn(dev, "No vlan entry for index %d\n", index);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (--table->refs[index]) {
|
|
|
|
mlx4_dbg(dev, "Have more references for index %d,"
|
|
|
|
"no need to modify vlan table\n", index);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
table->entries[index] = 0;
|
|
|
|
mlx4_set_port_vlan_table(dev, port, table->entries);
|
|
|
|
--table->total;
|
|
|
|
out:
|
|
|
|
mutex_unlock(&table->mutex);
|
|
|
|
}
|
2011-12-13 12:16:21 +08:00
|
|
|
|
|
|
|
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
|
|
|
|
{
|
|
|
|
u64 in_param;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (mlx4_is_mfunc(dev)) {
|
|
|
|
set_param_l(&in_param, port);
|
|
|
|
err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
|
|
|
|
MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
|
|
|
|
MLX4_CMD_WRAPPED);
|
|
|
|
if (!err)
|
|
|
|
mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
|
|
|
|
index);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
__mlx4_unregister_vlan(dev, port, index);
|
|
|
|
}
|
2008-10-23 02:44:46 +08:00
|
|
|
EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
|
2008-10-23 06:38:42 +08:00
|
|
|
|
2008-11-29 13:29:46 +08:00
|
|
|
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
|
|
|
|
{
|
|
|
|
struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
|
|
|
|
u8 *inbuf, *outbuf;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
inmailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
|
|
if (IS_ERR(inmailbox))
|
|
|
|
return PTR_ERR(inmailbox);
|
|
|
|
|
|
|
|
outmailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
|
|
if (IS_ERR(outmailbox)) {
|
|
|
|
mlx4_free_cmd_mailbox(dev, inmailbox);
|
|
|
|
return PTR_ERR(outmailbox);
|
|
|
|
}
|
|
|
|
|
|
|
|
inbuf = inmailbox->buf;
|
|
|
|
outbuf = outmailbox->buf;
|
|
|
|
memset(inbuf, 0, 256);
|
|
|
|
memset(outbuf, 0, 256);
|
|
|
|
inbuf[0] = 1;
|
|
|
|
inbuf[1] = 1;
|
|
|
|
inbuf[2] = 1;
|
|
|
|
inbuf[3] = 1;
|
|
|
|
*(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
|
|
|
|
*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
|
|
|
|
|
|
|
|
err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
|
2011-12-13 12:10:51 +08:00
|
|
|
MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
|
|
|
|
MLX4_CMD_NATIVE);
|
2008-11-29 13:29:46 +08:00
|
|
|
if (!err)
|
|
|
|
*caps = *(__be32 *) (outbuf + 84);
|
|
|
|
mlx4_free_cmd_mailbox(dev, inmailbox);
|
|
|
|
mlx4_free_cmd_mailbox(dev, outmailbox);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
|
|
|
|
u8 op_mod, struct mlx4_cmd_mailbox *inbox)
|
|
|
|
{
|
|
|
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
|
|
|
struct mlx4_port_info *port_info;
|
|
|
|
struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
|
|
|
|
struct mlx4_slave_state *slave_st = &master->slave_state[slave];
|
|
|
|
struct mlx4_set_port_rqp_calc_context *qpn_context;
|
|
|
|
struct mlx4_set_port_general_context *gen_context;
|
|
|
|
int reset_qkey_viols;
|
|
|
|
int port;
|
|
|
|
int is_eth;
|
|
|
|
u32 in_modifier;
|
|
|
|
u32 promisc;
|
|
|
|
u16 mtu, prev_mtu;
|
|
|
|
int err;
|
|
|
|
int i;
|
|
|
|
__be32 agg_cap_mask;
|
|
|
|
__be32 slave_cap_mask;
|
|
|
|
__be32 new_cap_mask;
|
|
|
|
|
|
|
|
port = in_mod & 0xff;
|
|
|
|
in_modifier = in_mod >> 8;
|
|
|
|
is_eth = op_mod;
|
|
|
|
port_info = &priv->port[port];
|
|
|
|
|
|
|
|
/* Slaves cannot perform SET_PORT operations except changing MTU */
|
|
|
|
if (is_eth) {
|
|
|
|
if (slave != dev->caps.function &&
|
|
|
|
in_modifier != MLX4_SET_PORT_GENERAL) {
|
|
|
|
mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
|
|
|
|
slave);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
switch (in_modifier) {
|
|
|
|
case MLX4_SET_PORT_RQP_CALC:
|
|
|
|
qpn_context = inbox->buf;
|
|
|
|
qpn_context->base_qpn =
|
|
|
|
cpu_to_be32(port_info->base_qpn);
|
|
|
|
qpn_context->n_mac = 0x7;
|
|
|
|
promisc = be32_to_cpu(qpn_context->promisc) >>
|
|
|
|
SET_PORT_PROMISC_SHIFT;
|
|
|
|
qpn_context->promisc = cpu_to_be32(
|
|
|
|
promisc << SET_PORT_PROMISC_SHIFT |
|
|
|
|
port_info->base_qpn);
|
|
|
|
promisc = be32_to_cpu(qpn_context->mcast) >>
|
|
|
|
SET_PORT_MC_PROMISC_SHIFT;
|
|
|
|
qpn_context->mcast = cpu_to_be32(
|
|
|
|
promisc << SET_PORT_MC_PROMISC_SHIFT |
|
|
|
|
port_info->base_qpn);
|
|
|
|
break;
|
|
|
|
case MLX4_SET_PORT_GENERAL:
|
|
|
|
gen_context = inbox->buf;
|
|
|
|
/* Mtu is configured as the max MTU among all the
|
|
|
|
* the functions on the port. */
|
|
|
|
mtu = be16_to_cpu(gen_context->mtu);
|
|
|
|
mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]);
|
|
|
|
prev_mtu = slave_st->mtu[port];
|
|
|
|
slave_st->mtu[port] = mtu;
|
|
|
|
if (mtu > master->max_mtu[port])
|
|
|
|
master->max_mtu[port] = mtu;
|
|
|
|
if (mtu < prev_mtu && prev_mtu ==
|
|
|
|
master->max_mtu[port]) {
|
|
|
|
slave_st->mtu[port] = mtu;
|
|
|
|
master->max_mtu[port] = mtu;
|
|
|
|
for (i = 0; i < dev->num_slaves; i++) {
|
|
|
|
master->max_mtu[port] =
|
|
|
|
max(master->max_mtu[port],
|
|
|
|
master->slave_state[i].mtu[port]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
|
|
|
|
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
|
|
|
|
MLX4_CMD_NATIVE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For IB, we only consider:
|
|
|
|
* - The capability mask, which is set to the aggregate of all
|
|
|
|
* slave function capabilities
|
|
|
|
* - The QKey violatin counter - reset according to each request.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
|
|
|
|
reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
|
|
|
|
new_cap_mask = ((__be32 *) inbox->buf)[2];
|
|
|
|
} else {
|
|
|
|
reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
|
|
|
|
new_cap_mask = ((__be32 *) inbox->buf)[1];
|
|
|
|
}
|
|
|
|
|
|
|
|
agg_cap_mask = 0;
|
|
|
|
slave_cap_mask =
|
|
|
|
priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
|
|
|
|
priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
|
|
|
|
for (i = 0; i < dev->num_slaves; i++)
|
|
|
|
agg_cap_mask |=
|
|
|
|
priv->mfunc.master.slave_state[i].ib_cap_mask[port];
|
|
|
|
|
|
|
|
/* only clear mailbox for guests. Master may be setting
|
|
|
|
* MTU or PKEY table size
|
|
|
|
*/
|
|
|
|
if (slave != dev->caps.function)
|
|
|
|
memset(inbox->buf, 0, 256);
|
|
|
|
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
|
2012-05-24 21:08:09 +08:00
|
|
|
*(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
|
2011-12-13 12:16:21 +08:00
|
|
|
((__be32 *) inbox->buf)[2] = agg_cap_mask;
|
|
|
|
} else {
|
2012-05-24 21:08:09 +08:00
|
|
|
((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
|
2011-12-13 12:16:21 +08:00
|
|
|
((__be32 *) inbox->buf)[1] = agg_cap_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
|
|
|
|
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
|
|
|
|
if (err)
|
|
|
|
priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
|
|
|
|
slave_cap_mask;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
|
|
|
|
struct mlx4_vhcr *vhcr,
|
|
|
|
struct mlx4_cmd_mailbox *inbox,
|
|
|
|
struct mlx4_cmd_mailbox *outbox,
|
|
|
|
struct mlx4_cmd_info *cmd)
|
|
|
|
{
|
|
|
|
return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
|
|
|
|
vhcr->op_modifier, inbox);
|
|
|
|
}
|
|
|
|
|
2012-01-12 01:02:17 +08:00
|
|
|
/* bit locations for set port command with zero op modifier */
|
|
|
|
enum {
|
|
|
|
MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */
|
|
|
|
MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */
|
mlx4: Put physical GID and P_Key table sizes in mlx4_phys_caps struct and paravirtualize them
To allow easy paravirtualization of P_Key and GID table sizes, keep
paravirtualized sizes in mlx4_dev->caps, but save the actual physical
sizes from FW in struct: mlx4_dev->phys_cap.
In addition, in SR-IOV mode, do the following:
1. Reduce reported P_Key table size by 1.
This is done to reserve the highest P_Key index for internal use,
for declaring an invalid P_Key in P_Key paravirtualization.
We require a P_Key index which always contain an invalid P_Key
value for this purpose (i.e., one which cannot be modified by
the subnet manager). The way to do this is to reduce the
P_Key table size reported to the subnet manager by 1, so that
it will not attempt to access the P_Key at index #127.
2. Paravirtualize the GID table size to 1. Thus, each guest sees
only a single GID (at its paravirtualized index 0).
In addition, since we are paravirtualizing the GID table size to 1, we
add paravirtualization of the master GID event here (i.e., we do not
do ib_dispatch_event() for the GUID change event on the master, since
its (only) GUID never changes).
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2012-06-19 16:21:44 +08:00
|
|
|
MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
|
2012-01-12 01:02:17 +08:00
|
|
|
MLX4_CHANGE_PORT_VL_CAP = 21,
|
|
|
|
MLX4_CHANGE_PORT_MTU_CAP = 22,
|
|
|
|
};
|
|
|
|
|
mlx4: Put physical GID and P_Key table sizes in mlx4_phys_caps struct and paravirtualize them
To allow easy paravirtualization of P_Key and GID table sizes, keep
paravirtualized sizes in mlx4_dev->caps, but save the actual physical
sizes from FW in struct: mlx4_dev->phys_cap.
In addition, in SR-IOV mode, do the following:
1. Reduce reported P_Key table size by 1.
This is done to reserve the highest P_Key index for internal use,
for declaring an invalid P_Key in P_Key paravirtualization.
We require a P_Key index which always contain an invalid P_Key
value for this purpose (i.e., one which cannot be modified by
the subnet manager). The way to do this is to reduce the
P_Key table size reported to the subnet manager by 1, so that
it will not attempt to access the P_Key at index #127.
2. Paravirtualize the GID table size to 1. Thus, each guest sees
only a single GID (at its paravirtualized index 0).
In addition, since we are paravirtualizing the GID table size to 1, we
add paravirtualization of the master GID event here (i.e., we do not
do ib_dispatch_event() for the GUID change event on the master, since
its (only) GUID never changes).
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2012-06-19 16:21:44 +08:00
|
|
|
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
|
2008-10-23 06:38:42 +08:00
|
|
|
{
|
|
|
|
struct mlx4_cmd_mailbox *mailbox;
|
mlx4: Put physical GID and P_Key table sizes in mlx4_phys_caps struct and paravirtualize them
To allow easy paravirtualization of P_Key and GID table sizes, keep
paravirtualized sizes in mlx4_dev->caps, but save the actual physical
sizes from FW in struct: mlx4_dev->phys_cap.
In addition, in SR-IOV mode, do the following:
1. Reduce reported P_Key table size by 1.
This is done to reserve the highest P_Key index for internal use,
for declaring an invalid P_Key in P_Key paravirtualization.
We require a P_Key index which always contain an invalid P_Key
value for this purpose (i.e., one which cannot be modified by
the subnet manager). The way to do this is to reduce the
P_Key table size reported to the subnet manager by 1, so that
it will not attempt to access the P_Key at index #127.
2. Paravirtualize the GID table size to 1. Thus, each guest sees
only a single GID (at its paravirtualized index 0).
In addition, since we are paravirtualizing the GID table size to 1, we
add paravirtualization of the master GID event here (i.e., we do not
do ib_dispatch_event() for the GUID change event on the master, since
its (only) GUID never changes).
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2012-06-19 16:21:44 +08:00
|
|
|
int err, vl_cap, pkey_tbl_flag = 0;
|
2008-10-23 06:38:42 +08:00
|
|
|
|
2009-04-01 00:54:15 +08:00
|
|
|
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
|
|
|
|
return 0;
|
|
|
|
|
2008-10-23 06:38:42 +08:00
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
|
|
if (IS_ERR(mailbox))
|
|
|
|
return PTR_ERR(mailbox);
|
|
|
|
|
|
|
|
memset(mailbox->buf, 0, 256);
|
2009-03-12 06:47:18 +08:00
|
|
|
|
|
|
|
((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
|
2012-01-12 01:02:17 +08:00
|
|
|
|
mlx4: Put physical GID and P_Key table sizes in mlx4_phys_caps struct and paravirtualize them
To allow easy paravirtualization of P_Key and GID table sizes, keep
paravirtualized sizes in mlx4_dev->caps, but save the actual physical
sizes from FW in struct: mlx4_dev->phys_cap.
In addition, in SR-IOV mode, do the following:
1. Reduce reported P_Key table size by 1.
This is done to reserve the highest P_Key index for internal use,
for declaring an invalid P_Key in P_Key paravirtualization.
We require a P_Key index which always contain an invalid P_Key
value for this purpose (i.e., one which cannot be modified by
the subnet manager). The way to do this is to reduce the
P_Key table size reported to the subnet manager by 1, so that
it will not attempt to access the P_Key at index #127.
2. Paravirtualize the GID table size to 1. Thus, each guest sees
only a single GID (at its paravirtualized index 0).
In addition, since we are paravirtualizing the GID table size to 1, we
add paravirtualization of the master GID event here (i.e., we do not
do ib_dispatch_event() for the GUID change event on the master, since
its (only) GUID never changes).
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2012-06-19 16:21:44 +08:00
|
|
|
if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
|
|
|
|
pkey_tbl_flag = 1;
|
|
|
|
((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
|
|
|
|
}
|
|
|
|
|
2012-01-12 01:02:17 +08:00
|
|
|
/* IB VL CAP enum isn't used by the firmware, just numerical values */
|
|
|
|
for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
|
|
|
|
((__be32 *) mailbox->buf)[0] = cpu_to_be32(
|
|
|
|
(1 << MLX4_CHANGE_PORT_MTU_CAP) |
|
|
|
|
(1 << MLX4_CHANGE_PORT_VL_CAP) |
|
mlx4: Put physical GID and P_Key table sizes in mlx4_phys_caps struct and paravirtualize them
To allow easy paravirtualization of P_Key and GID table sizes, keep
paravirtualized sizes in mlx4_dev->caps, but save the actual physical
sizes from FW in struct: mlx4_dev->phys_cap.
In addition, in SR-IOV mode, do the following:
1. Reduce reported P_Key table size by 1.
This is done to reserve the highest P_Key index for internal use,
for declaring an invalid P_Key in P_Key paravirtualization.
We require a P_Key index which always contain an invalid P_Key
value for this purpose (i.e., one which cannot be modified by
the subnet manager). The way to do this is to reduce the
P_Key table size reported to the subnet manager by 1, so that
it will not attempt to access the P_Key at index #127.
2. Paravirtualize the GID table size to 1. Thus, each guest sees
only a single GID (at its paravirtualized index 0).
In addition, since we are paravirtualizing the GID table size to 1, we
add paravirtualization of the master GID event here (i.e., we do not
do ib_dispatch_event() for the GUID change event on the master, since
its (only) GUID never changes).
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2012-06-19 16:21:44 +08:00
|
|
|
(pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
|
2012-01-12 01:02:17 +08:00
|
|
|
(dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
|
|
|
|
(vl_cap << MLX4_SET_PORT_VL_CAP));
|
|
|
|
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
|
|
|
|
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
|
|
|
|
if (err != -ENOMEM)
|
|
|
|
break;
|
|
|
|
}
|
2008-10-23 06:38:42 +08:00
|
|
|
|
|
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
|
return err;
|
|
|
|
}
|
2011-12-13 12:16:21 +08:00
|
|
|
|
2011-12-15 14:48:37 +08:00
|
|
|
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
|
2011-12-13 12:16:21 +08:00
|
|
|
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
|
|
|
|
{
|
|
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
|
|
struct mlx4_set_port_general_context *context;
|
|
|
|
int err;
|
|
|
|
u32 in_mod;
|
|
|
|
|
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
|
|
if (IS_ERR(mailbox))
|
|
|
|
return PTR_ERR(mailbox);
|
|
|
|
context = mailbox->buf;
|
|
|
|
memset(context, 0, sizeof *context);
|
|
|
|
|
|
|
|
context->flags = SET_PORT_GEN_ALL_VALID;
|
|
|
|
context->mtu = cpu_to_be16(mtu);
|
|
|
|
context->pptx = (pptx * (!pfctx)) << 7;
|
|
|
|
context->pfctx = pfctx;
|
|
|
|
context->pprx = (pprx * (!pfcrx)) << 7;
|
|
|
|
context->pfcrx = pfcrx;
|
|
|
|
|
|
|
|
in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
|
|
|
|
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
|
|
|
|
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
|
|
|
|
|
|
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(mlx4_SET_PORT_general);
|
|
|
|
|
2011-12-15 14:48:37 +08:00
|
|
|
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
|
2011-12-13 12:16:21 +08:00
|
|
|
u8 promisc)
|
|
|
|
{
|
|
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
|
|
struct mlx4_set_port_rqp_calc_context *context;
|
|
|
|
int err;
|
|
|
|
u32 in_mod;
|
|
|
|
u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
|
|
|
|
MCAST_DIRECT : MCAST_DEFAULT;
|
|
|
|
|
2012-07-05 12:03:44 +08:00
|
|
|
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
|
2011-12-13 12:16:21 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
|
|
if (IS_ERR(mailbox))
|
|
|
|
return PTR_ERR(mailbox);
|
|
|
|
context = mailbox->buf;
|
|
|
|
memset(context, 0, sizeof *context);
|
|
|
|
|
|
|
|
context->base_qpn = cpu_to_be32(base_qpn);
|
|
|
|
context->n_mac = dev->caps.log_num_macs;
|
|
|
|
context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
|
|
|
|
base_qpn);
|
|
|
|
context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
|
|
|
|
base_qpn);
|
|
|
|
context->intra_no_vlan = 0;
|
|
|
|
context->no_vlan = MLX4_NO_VLAN_IDX;
|
|
|
|
context->intra_vlan_miss = 0;
|
|
|
|
context->vlan_miss = MLX4_VLAN_MISS_IDX;
|
|
|
|
|
|
|
|
in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
|
|
|
|
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
|
|
|
|
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
|
|
|
|
|
|
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
|
|
|
|
|
2012-04-05 05:33:25 +08:00
|
|
|
int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
|
|
|
|
{
|
|
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
|
|
struct mlx4_set_port_prio2tc_context *context;
|
|
|
|
int err;
|
|
|
|
u32 in_mod;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
|
|
if (IS_ERR(mailbox))
|
|
|
|
return PTR_ERR(mailbox);
|
|
|
|
context = mailbox->buf;
|
|
|
|
memset(context, 0, sizeof *context);
|
|
|
|
|
|
|
|
for (i = 0; i < MLX4_NUM_UP; i += 2)
|
|
|
|
context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
|
|
|
|
|
|
|
|
in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
|
|
|
|
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
|
|
|
|
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
|
|
|
|
|
|
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
|
|
|
|
|
|
|
|
int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
|
|
|
|
u8 *pg, u16 *ratelimit)
|
|
|
|
{
|
|
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
|
|
struct mlx4_set_port_scheduler_context *context;
|
|
|
|
int err;
|
|
|
|
u32 in_mod;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
|
|
if (IS_ERR(mailbox))
|
|
|
|
return PTR_ERR(mailbox);
|
|
|
|
context = mailbox->buf;
|
|
|
|
memset(context, 0, sizeof *context);
|
|
|
|
|
|
|
|
for (i = 0; i < MLX4_NUM_TC; i++) {
|
|
|
|
struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
|
|
|
|
u16 r = ratelimit && ratelimit[i] ? ratelimit[i] :
|
|
|
|
MLX4_RATELIMIT_DEFAULT;
|
|
|
|
|
|
|
|
tc->pg = htons(pg[i]);
|
|
|
|
tc->bw_precentage = htons(tc_tx_bw[i]);
|
|
|
|
|
|
|
|
tc->max_bw_units = htons(MLX4_RATELIMIT_UNITS);
|
|
|
|
tc->max_bw_value = htons(r);
|
|
|
|
}
|
|
|
|
|
|
|
|
in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
|
|
|
|
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
|
|
|
|
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
|
|
|
|
|
|
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
|
|
|
|
|
2011-12-13 12:16:21 +08:00
|
|
|
int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
|
|
|
|
struct mlx4_vhcr *vhcr,
|
|
|
|
struct mlx4_cmd_mailbox *inbox,
|
|
|
|
struct mlx4_cmd_mailbox *outbox,
|
|
|
|
struct mlx4_cmd_info *cmd)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
|
|
|
|
u64 mac, u64 clear, u8 mode)
|
|
|
|
{
|
|
|
|
return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
|
|
|
|
MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
|
|
|
|
MLX4_CMD_WRAPPED);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
|
|
|
|
|
|
|
|
int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
|
|
|
|
struct mlx4_vhcr *vhcr,
|
|
|
|
struct mlx4_cmd_mailbox *inbox,
|
|
|
|
struct mlx4_cmd_mailbox *outbox,
|
|
|
|
struct mlx4_cmd_info *cmd)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
|
|
|
|
u32 in_mod, struct mlx4_cmd_mailbox *outbox)
|
|
|
|
{
|
|
|
|
return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
|
|
|
|
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
|
|
|
|
MLX4_CMD_NATIVE);
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
|
|
|
|
struct mlx4_vhcr *vhcr,
|
|
|
|
struct mlx4_cmd_mailbox *inbox,
|
|
|
|
struct mlx4_cmd_mailbox *outbox,
|
|
|
|
struct mlx4_cmd_info *cmd)
|
|
|
|
{
|
2012-01-19 17:44:37 +08:00
|
|
|
if (slave != dev->caps.function)
|
|
|
|
return 0;
|
2011-12-13 12:16:21 +08:00
|
|
|
return mlx4_common_dump_eth_stats(dev, slave,
|
|
|
|
vhcr->in_modifier, outbox);
|
|
|
|
}
|
2012-01-19 17:45:05 +08:00
|
|
|
|
|
|
|
void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
|
|
|
|
{
|
|
|
|
if (!mlx4_is_mfunc(dev)) {
|
|
|
|
*stats_bitmap = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
*stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
|
|
|
|
MLX4_STATS_TRAFFIC_DROPS_MASK |
|
|
|
|
MLX4_STATS_PORT_COUNTERS_MASK);
|
|
|
|
|
|
|
|
if (mlx4_is_master(dev))
|
|
|
|
*stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(mlx4_set_stats_bitmap);
|