Merge branch 'mlx4'
Amir Vadai says:
====================
net/mlx4: Mellanox driver update 27-02-2014
This patchset contains some fixes for small trivial bugs, and
compilation/syntactic parsers warnings
Patchset was applied and tested over commit 750f679
"Merge branch '6lowpan'"
Changes from V1:
-patch 5/9: Replace mlx4_en_mac_to_u64() with mlx4_mac_to_u64()
- Remove unnecessary define of ETH_ALEN
Changes from V0:
-patch 3/9: net/mlx4_en: Pad ethernet packets smaller than 17 bytes
- Make condition more efficient
- Didn't use canonical function to pad buffer since using bounce buffer
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
a811f2c038
|
@ -3,7 +3,7 @@
|
|||
#
|
||||
|
||||
config MLX4_EN
|
||||
tristate "Mellanox Technologies 10Gbit Ethernet support"
|
||||
tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
|
||||
depends on PCI
|
||||
select MLX4_CORE
|
||||
select PTP_1588_CLOCK
|
||||
|
|
|
@ -62,7 +62,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
|
|||
int has_ets_tc = 0;
|
||||
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
|
||||
if (ets->prio_tc[i] > MLX4_EN_NUM_UP) {
|
||||
if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) {
|
||||
en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
|
||||
i, ets->prio_tc[i]);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -72,6 +72,12 @@ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
|
|||
MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
|
||||
" Per priority bit mask");
|
||||
|
||||
MLX4_EN_PARM_INT(inline_thold, MAX_INLINE,
|
||||
"Threshold for using inline data (range: 17-104, default: 104)");
|
||||
|
||||
#define MAX_PFC_TX 0xff
|
||||
#define MAX_PFC_RX 0xff
|
||||
|
||||
int en_print(const char *level, const struct mlx4_en_priv *priv,
|
||||
const char *format, ...)
|
||||
{
|
||||
|
@ -140,6 +146,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
|
|||
params->prof[i].tx_ring_num = params->num_tx_rings_p_up *
|
||||
MLX4_EN_NUM_UP;
|
||||
params->prof[i].rss_rings = 0;
|
||||
params->prof[i].inline_thold = inline_thold;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -325,8 +332,31 @@ static struct mlx4_interface mlx4_en_interface = {
|
|||
.protocol = MLX4_PROT_ETH,
|
||||
};
|
||||
|
||||
void mlx4_en_verify_params(void)
|
||||
{
|
||||
if (pfctx > MAX_PFC_TX) {
|
||||
pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
|
||||
pfctx, MAX_PFC_TX);
|
||||
pfctx = 0;
|
||||
}
|
||||
|
||||
if (pfcrx > MAX_PFC_RX) {
|
||||
pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
|
||||
pfcrx, MAX_PFC_RX);
|
||||
pfcrx = 0;
|
||||
}
|
||||
|
||||
if (inline_thold < MIN_PKT_LEN || inline_thold > MAX_INLINE) {
|
||||
pr_warn("mlx4_en: WARNING: illegal module parameter inline_thold %d - should be in range %d-%d, will be changed to default (%d)\n",
|
||||
inline_thold, MIN_PKT_LEN, MAX_INLINE, MAX_INLINE);
|
||||
inline_thold = MAX_INLINE;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init mlx4_en_init(void)
|
||||
{
|
||||
mlx4_en_verify_params();
|
||||
|
||||
return mlx4_register_interface(&mlx4_en_interface);
|
||||
}
|
||||
|
||||
|
|
|
@ -603,7 +603,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
|
|||
int err = 0;
|
||||
u64 reg_id;
|
||||
int *qpn = &priv->base_qpn;
|
||||
u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
|
||||
u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
|
||||
|
||||
en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
|
||||
priv->dev->dev_addr);
|
||||
|
@ -672,7 +672,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
|
|||
u64 mac;
|
||||
|
||||
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
|
||||
mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
|
||||
mac = mlx4_mac_to_u64(priv->dev->dev_addr);
|
||||
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
|
||||
priv->dev->dev_addr);
|
||||
mlx4_unregister_mac(dev, priv->port, mac);
|
||||
|
@ -685,7 +685,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
|
|||
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
|
||||
bucket = &priv->mac_hash[i];
|
||||
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
|
||||
mac = mlx4_en_mac_to_u64(entry->mac);
|
||||
mac = mlx4_mac_to_u64(entry->mac);
|
||||
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
|
||||
entry->mac);
|
||||
mlx4_en_uc_steer_release(priv, entry->mac,
|
||||
|
@ -715,14 +715,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
|
|||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct mlx4_dev *dev = mdev->dev;
|
||||
int err = 0;
|
||||
u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac);
|
||||
u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
|
||||
|
||||
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
|
||||
struct hlist_head *bucket;
|
||||
unsigned int mac_hash;
|
||||
struct mlx4_mac_entry *entry;
|
||||
struct hlist_node *tmp;
|
||||
u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
|
||||
u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
|
||||
|
||||
bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
|
||||
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
|
||||
|
@ -751,18 +751,6 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
|
|||
return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
|
||||
}
|
||||
|
||||
u64 mlx4_en_mac_to_u64(u8 *addr)
|
||||
{
|
||||
u64 mac = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ETH_ALEN; i++) {
|
||||
mac <<= 8;
|
||||
mac |= addr[i];
|
||||
}
|
||||
return mac;
|
||||
}
|
||||
|
||||
static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
|
||||
{
|
||||
int err = 0;
|
||||
|
@ -1081,7 +1069,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
|
|||
mlx4_en_cache_mclist(dev);
|
||||
netif_addr_unlock_bh(dev);
|
||||
list_for_each_entry(mclist, &priv->mc_list, list) {
|
||||
mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
|
||||
mcast_addr = mlx4_mac_to_u64(mclist->addr);
|
||||
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
|
||||
mcast_addr, 0, MLX4_MCAST_CONFIG);
|
||||
}
|
||||
|
@ -1173,7 +1161,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
|
|||
found = true;
|
||||
|
||||
if (!found) {
|
||||
mac = mlx4_en_mac_to_u64(entry->mac);
|
||||
mac = mlx4_mac_to_u64(entry->mac);
|
||||
mlx4_en_uc_steer_release(priv, entry->mac,
|
||||
priv->base_qpn,
|
||||
entry->reg_id);
|
||||
|
@ -1216,7 +1204,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
|
|||
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
|
||||
break;
|
||||
}
|
||||
mac = mlx4_en_mac_to_u64(ha->addr);
|
||||
mac = mlx4_mac_to_u64(ha->addr);
|
||||
memcpy(entry->mac, ha->addr, ETH_ALEN);
|
||||
err = mlx4_register_mac(mdev->dev, priv->port, mac);
|
||||
if (err < 0) {
|
||||
|
@ -2206,7 +2194,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
|
|||
{
|
||||
struct mlx4_en_priv *en_priv = netdev_priv(dev);
|
||||
struct mlx4_en_dev *mdev = en_priv->mdev;
|
||||
u64 mac_u64 = mlx4_en_mac_to_u64(mac);
|
||||
u64 mac_u64 = mlx4_mac_to_u64(mac);
|
||||
|
||||
if (!is_valid_ether_addr(mac))
|
||||
return -EINVAL;
|
||||
|
@ -2407,7 +2395,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|||
if (mlx4_is_slave(priv->mdev->dev)) {
|
||||
eth_hw_addr_random(dev);
|
||||
en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
|
||||
mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr);
|
||||
mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
|
||||
mdev->dev->caps.def_mac[priv->port] = mac_u64;
|
||||
} else {
|
||||
en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
|
||||
|
|
|
@ -148,10 +148,16 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
|||
stats->tx_packets = 0;
|
||||
stats->tx_bytes = 0;
|
||||
priv->port_stats.tx_chksum_offload = 0;
|
||||
priv->port_stats.queue_stopped = 0;
|
||||
priv->port_stats.wake_queue = 0;
|
||||
|
||||
for (i = 0; i < priv->tx_ring_num; i++) {
|
||||
stats->tx_packets += priv->tx_ring[i]->packets;
|
||||
stats->tx_bytes += priv->tx_ring[i]->bytes;
|
||||
priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
|
||||
priv->port_stats.queue_stopped +=
|
||||
priv->tx_ring[i]->queue_stopped;
|
||||
priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue;
|
||||
}
|
||||
|
||||
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
|
||||
|
|
|
@ -129,8 +129,10 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
|
|||
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
|
||||
return -ENOMEM;
|
||||
|
||||
/* The device currently only supports 10G speed */
|
||||
if (priv->port_state.link_speed != SPEED_10000)
|
||||
/* The device supports 1G, 10G and 40G speeds */
|
||||
if (priv->port_state.link_speed != 1000 &&
|
||||
priv->port_state.link_speed != 10000 &&
|
||||
priv->port_state.link_speed != 40000)
|
||||
return priv->port_state.link_speed;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -44,16 +44,6 @@
|
|||
|
||||
#include "mlx4_en.h"
|
||||
|
||||
enum {
|
||||
MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
|
||||
MAX_BF = 256,
|
||||
};
|
||||
|
||||
static int inline_thold __read_mostly = MAX_INLINE;
|
||||
|
||||
module_param_named(inline_thold, inline_thold, int, 0444);
|
||||
MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
|
||||
|
||||
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_tx_ring **pring, int qpn, u32 size,
|
||||
u16 stride, int node, int queue_index)
|
||||
|
@ -75,8 +65,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
|||
ring->size = size;
|
||||
ring->size_mask = size - 1;
|
||||
ring->stride = stride;
|
||||
|
||||
inline_thold = min(inline_thold, MAX_INLINE);
|
||||
ring->inline_thold = priv->prof->inline_thold;
|
||||
|
||||
tmp = size * sizeof(struct mlx4_en_tx_info);
|
||||
ring->tx_info = vmalloc_node(tmp, node);
|
||||
|
@ -456,7 +445,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
|
|||
*/
|
||||
if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
|
||||
netif_tx_wake_queue(ring->tx_queue);
|
||||
priv->port_stats.wake_queue++;
|
||||
ring->wake_queue++;
|
||||
}
|
||||
return done;
|
||||
}
|
||||
|
@ -520,7 +509,7 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
|
|||
return ring->buf + index * TXBB_SIZE;
|
||||
}
|
||||
|
||||
static int is_inline(struct sk_buff *skb, void **pfrag)
|
||||
static int is_inline(int inline_thold, struct sk_buff *skb, void **pfrag)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
|
@ -580,7 +569,7 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
|
|||
}
|
||||
} else {
|
||||
*lso_header_size = 0;
|
||||
if (!is_inline(skb, NULL))
|
||||
if (!is_inline(priv->prof->inline_thold, skb, NULL))
|
||||
real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
|
||||
else
|
||||
real_size = inline_size(skb);
|
||||
|
@ -596,7 +585,13 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
|
|||
int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
|
||||
|
||||
if (skb->len <= spc) {
|
||||
inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
|
||||
if (likely(skb->len >= MIN_PKT_LEN)) {
|
||||
inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
|
||||
} else {
|
||||
inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
|
||||
memset(((void *)(inl + 1)) + skb->len, 0,
|
||||
MIN_PKT_LEN - skb->len);
|
||||
}
|
||||
skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
|
||||
if (skb_shinfo(skb)->nr_frags)
|
||||
memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
|
||||
|
@ -696,7 +691,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
ring->size - HEADROOM - MAX_DESC_TXBBS)) {
|
||||
/* every full Tx ring stops queue */
|
||||
netif_tx_stop_queue(ring->tx_queue);
|
||||
priv->port_stats.queue_stopped++;
|
||||
ring->queue_stopped++;
|
||||
|
||||
/* If queue was emptied after the if, and before the
|
||||
* stop_queue - need to wake the queue, or else it will remain
|
||||
|
@ -709,7 +704,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (unlikely(((int)(ring->prod - ring->cons)) <=
|
||||
ring->size - HEADROOM - MAX_DESC_TXBBS)) {
|
||||
netif_tx_wake_queue(ring->tx_queue);
|
||||
priv->port_stats.wake_queue++;
|
||||
ring->wake_queue++;
|
||||
} else {
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
@ -747,11 +742,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
tx_info->data_offset = (void *)data - (void *)tx_desc;
|
||||
|
||||
tx_info->linear = (lso_header_size < skb_headlen(skb) &&
|
||||
!is_inline(skb, NULL)) ? 1 : 0;
|
||||
!is_inline(ring->inline_thold, skb, NULL)) ? 1 : 0;
|
||||
|
||||
data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
|
||||
|
||||
if (is_inline(skb, &fragptr)) {
|
||||
if (is_inline(ring->inline_thold, skb, &fragptr)) {
|
||||
tx_info->inl = 1;
|
||||
} else {
|
||||
/* Map fragments */
|
||||
|
@ -881,7 +876,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
skb_tx_timestamp(skb);
|
||||
|
||||
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) {
|
||||
*(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
|
||||
tx_desc->ctrl.bf_qpn |= cpu_to_be32(ring->doorbell_qpn);
|
||||
|
||||
op_own |= htonl((bf_index & 0xffff) << 8);
|
||||
/* Ensure new descirptor hits memory
|
||||
* before setting ownership of this descriptor to HW */
|
||||
|
|
|
@ -1890,7 +1890,8 @@ void mlx4_opreq_action(struct work_struct *work)
|
|||
err = EINVAL;
|
||||
break;
|
||||
}
|
||||
err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16),
|
||||
err = mlx4_cmd(dev, 0, ((u32) err |
|
||||
(__force u32)cpu_to_be32(token) << 16),
|
||||
1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_NATIVE);
|
||||
if (err) {
|
||||
|
|
|
@ -187,6 +187,13 @@ enum {
|
|||
#define GET_AVG_PERF_COUNTER(cnt) (0)
|
||||
#endif /* MLX4_EN_PERF_STAT */
|
||||
|
||||
/* Constants for TX flow */
|
||||
enum {
|
||||
MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
|
||||
MAX_BF = 256,
|
||||
MIN_PKT_LEN = 17,
|
||||
};
|
||||
|
||||
/*
|
||||
* Configurables
|
||||
*/
|
||||
|
@ -267,10 +274,13 @@ struct mlx4_en_tx_ring {
|
|||
unsigned long bytes;
|
||||
unsigned long packets;
|
||||
unsigned long tx_csum;
|
||||
unsigned long queue_stopped;
|
||||
unsigned long wake_queue;
|
||||
struct mlx4_bf bf;
|
||||
bool bf_enabled;
|
||||
struct netdev_queue *tx_queue;
|
||||
int hwtstamp_tx_type;
|
||||
int inline_thold;
|
||||
};
|
||||
|
||||
struct mlx4_en_rx_desc {
|
||||
|
@ -346,6 +356,7 @@ struct mlx4_en_port_profile {
|
|||
u8 tx_pause;
|
||||
u8 tx_ppp;
|
||||
int rss_rings;
|
||||
int inline_thold;
|
||||
};
|
||||
|
||||
struct mlx4_en_profile {
|
||||
|
@ -786,7 +797,6 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
|
|||
|
||||
#define MLX4_EN_NUM_SELF_TEST 5
|
||||
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
|
||||
u64 mlx4_en_mac_to_u64(u8 *addr);
|
||||
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
|
||||
|
||||
/*
|
||||
|
|
|
@ -64,4 +64,16 @@ void mlx4_unregister_interface(struct mlx4_interface *intf);
|
|||
|
||||
void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
|
||||
|
||||
static inline u64 mlx4_mac_to_u64(u8 *addr)
|
||||
{
|
||||
u64 mac = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ETH_ALEN; i++) {
|
||||
mac <<= 8;
|
||||
mac |= addr[i];
|
||||
}
|
||||
return mac;
|
||||
}
|
||||
|
||||
#endif /* MLX4_DRIVER_H */
|
||||
|
|
|
@ -270,9 +270,14 @@ enum {
|
|||
|
||||
struct mlx4_wqe_ctrl_seg {
|
||||
__be32 owner_opcode;
|
||||
__be16 vlan_tag;
|
||||
u8 ins_vlan;
|
||||
u8 fence_size;
|
||||
union {
|
||||
struct {
|
||||
__be16 vlan_tag;
|
||||
u8 ins_vlan;
|
||||
u8 fence_size;
|
||||
};
|
||||
__be32 bf_qpn;
|
||||
};
|
||||
/*
|
||||
* High 24 bits are SRC remote buffer; low 8 bits are flags:
|
||||
* [7] SO (strong ordering)
|
||||
|
|
Loading…
Reference in New Issue