Merge branch 'mlx4-next'
Or Gerlitz says: ==================== Mellanox driver update 2014-05-12 This patchset introduce some small bug fixes: Eyal fixed some compilation and syntactic checkers warnings. Ido fixed a coruption in user priority mapping when changing number of channels. Shani fixed some other problems when modifying MAC address. Yuval fixed a problem when changing IRQ affinity during high traffic - IRQ changes got effective only after the first pause in traffic. This patchset was tested and applied over commit 93dccc5: "mdio_bus: fix devm_mdiobus_alloc_size export" Changes from V1: - applied feedback from Dave to use true/false and not 0/1 in patch 1/9 - removed the patch from Noa which adddressed a bug in flow steering table when using a bond device, as the fix might need to be in the bonding driver, this is now dicussed in the netdev thread "bonding directly changes underlying device address" Changes from V0: - Patch 1/9 - net/mlx4_core: Enforce irq affinity changes immediatly - Moved the new members to a hot cache line as Eric suggested ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
005e35f5f0
|
@ -473,6 +473,13 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (out_is_imm && !out_param) {
|
||||||
|
mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
|
||||||
|
op);
|
||||||
|
err = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
|
err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
|
||||||
in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
|
in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -551,6 +558,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
|
||||||
cmd->free_head = context->next;
|
cmd->free_head = context->next;
|
||||||
spin_unlock(&cmd->context_lock);
|
spin_unlock(&cmd->context_lock);
|
||||||
|
|
||||||
|
if (out_is_imm && !out_param) {
|
||||||
|
mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
|
||||||
|
op);
|
||||||
|
err = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
init_completion(&context->done);
|
init_completion(&context->done);
|
||||||
|
|
||||||
mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
|
mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
|
||||||
|
|
|
@ -293,6 +293,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
|
||||||
atomic_set(&cq->refcount, 1);
|
atomic_set(&cq->refcount, 1);
|
||||||
init_completion(&cq->free);
|
init_completion(&cq->free);
|
||||||
|
|
||||||
|
cq->irq = priv->eq_table.eq[cq->vector].irq;
|
||||||
|
cq->irq_affinity_change = false;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_radix:
|
err_radix:
|
||||||
|
|
|
@ -1151,7 +1151,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
|
||||||
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
|
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
|
||||||
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
|
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
|
||||||
|
|
||||||
mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
|
if (dev->num_tc)
|
||||||
|
mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
|
||||||
|
|
||||||
en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
|
en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
|
||||||
en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
|
en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
|
||||||
|
|
|
@ -130,7 +130,7 @@ static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
|
||||||
case IPPROTO_TCP:
|
case IPPROTO_TCP:
|
||||||
return MLX4_NET_TRANS_RULE_ID_TCP;
|
return MLX4_NET_TRANS_RULE_ID_TCP;
|
||||||
default:
|
default:
|
||||||
return -EPROTONOSUPPORT;
|
return MLX4_NET_TRANS_RULE_NUM;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -177,7 +177,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
|
||||||
int rc;
|
int rc;
|
||||||
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
|
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
|
||||||
|
|
||||||
if (spec_tcp_udp.id < 0) {
|
if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
|
||||||
en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
|
en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
|
||||||
filter->ip_proto);
|
filter->ip_proto);
|
||||||
goto ignore;
|
goto ignore;
|
||||||
|
@ -770,11 +770,12 @@ static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
|
||||||
priv->dev->dev_addr, priv->prev_mac);
|
priv->dev->dev_addr, priv->prev_mac);
|
||||||
if (err)
|
if (err)
|
||||||
en_err(priv, "Failed changing HW MAC address\n");
|
en_err(priv, "Failed changing HW MAC address\n");
|
||||||
memcpy(priv->prev_mac, priv->dev->dev_addr,
|
|
||||||
sizeof(priv->prev_mac));
|
|
||||||
} else
|
} else
|
||||||
en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
|
en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
|
||||||
|
|
||||||
|
memcpy(priv->prev_mac, priv->dev->dev_addr,
|
||||||
|
sizeof(priv->prev_mac));
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -788,9 +789,8 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
|
||||||
if (!is_valid_ether_addr(saddr->sa_data))
|
if (!is_valid_ether_addr(saddr->sa_data))
|
||||||
return -EADDRNOTAVAIL;
|
return -EADDRNOTAVAIL;
|
||||||
|
|
||||||
memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
|
|
||||||
|
|
||||||
mutex_lock(&mdev->state_lock);
|
mutex_lock(&mdev->state_lock);
|
||||||
|
memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
|
||||||
err = mlx4_en_do_set_mac(priv);
|
err = mlx4_en_do_set_mac(priv);
|
||||||
mutex_unlock(&mdev->state_lock);
|
mutex_unlock(&mdev->state_lock);
|
||||||
|
|
||||||
|
|
|
@ -895,10 +895,17 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
|
||||||
mlx4_en_cq_unlock_napi(cq);
|
mlx4_en_cq_unlock_napi(cq);
|
||||||
|
|
||||||
/* If we used up all the quota - we're probably not done yet... */
|
/* If we used up all the quota - we're probably not done yet... */
|
||||||
if (done == budget)
|
if (done == budget) {
|
||||||
INC_PERF_COUNTER(priv->pstats.napi_quota);
|
INC_PERF_COUNTER(priv->pstats.napi_quota);
|
||||||
else {
|
if (unlikely(cq->mcq.irq_affinity_change)) {
|
||||||
|
cq->mcq.irq_affinity_change = false;
|
||||||
|
napi_complete(napi);
|
||||||
|
mlx4_en_arm_cq(priv, cq);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
/* Done for now */
|
/* Done for now */
|
||||||
|
cq->mcq.irq_affinity_change = false;
|
||||||
napi_complete(napi);
|
napi_complete(napi);
|
||||||
mlx4_en_arm_cq(priv, cq);
|
mlx4_en_arm_cq(priv, cq);
|
||||||
}
|
}
|
||||||
|
|
|
@ -474,9 +474,15 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
|
||||||
/* If we used up all the quota - we're probably not done yet... */
|
/* If we used up all the quota - we're probably not done yet... */
|
||||||
if (done < budget) {
|
if (done < budget) {
|
||||||
/* Done for now */
|
/* Done for now */
|
||||||
|
cq->mcq.irq_affinity_change = false;
|
||||||
napi_complete(napi);
|
napi_complete(napi);
|
||||||
mlx4_en_arm_cq(priv, cq);
|
mlx4_en_arm_cq(priv, cq);
|
||||||
return done;
|
return done;
|
||||||
|
} else if (unlikely(cq->mcq.irq_affinity_change)) {
|
||||||
|
cq->mcq.irq_affinity_change = false;
|
||||||
|
napi_complete(napi);
|
||||||
|
mlx4_en_arm_cq(priv, cq);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
return budget;
|
return budget;
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,6 +53,11 @@ enum {
|
||||||
MLX4_EQ_ENTRY_SIZE = 0x20
|
MLX4_EQ_ENTRY_SIZE = 0x20
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mlx4_irq_notify {
|
||||||
|
void *arg;
|
||||||
|
struct irq_affinity_notify notify;
|
||||||
|
};
|
||||||
|
|
||||||
#define MLX4_EQ_STATUS_OK ( 0 << 28)
|
#define MLX4_EQ_STATUS_OK ( 0 << 28)
|
||||||
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
|
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
|
||||||
#define MLX4_EQ_OWNER_SW ( 0 << 24)
|
#define MLX4_EQ_OWNER_SW ( 0 << 24)
|
||||||
|
@ -1083,6 +1088,57 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
|
||||||
iounmap(priv->clr_base);
|
iounmap(priv->clr_base);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
|
||||||
|
const cpumask_t *mask)
|
||||||
|
{
|
||||||
|
struct mlx4_irq_notify *n = container_of(notify,
|
||||||
|
struct mlx4_irq_notify,
|
||||||
|
notify);
|
||||||
|
struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
|
||||||
|
struct radix_tree_iter iter;
|
||||||
|
void **slot;
|
||||||
|
|
||||||
|
radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
|
||||||
|
struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
|
||||||
|
|
||||||
|
if (cq->irq == notify->irq)
|
||||||
|
cq->irq_affinity_change = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mlx4_release_irq_notifier(struct kref *ref)
|
||||||
|
{
|
||||||
|
struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
|
||||||
|
notify.kref);
|
||||||
|
kfree(n);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
|
||||||
|
struct mlx4_dev *dev, int irq)
|
||||||
|
{
|
||||||
|
struct mlx4_irq_notify *irq_notifier = NULL;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
|
||||||
|
if (!irq_notifier) {
|
||||||
|
mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
|
||||||
|
irq);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
irq_notifier->notify.irq = irq;
|
||||||
|
irq_notifier->notify.notify = mlx4_irq_notifier_notify;
|
||||||
|
irq_notifier->notify.release = mlx4_release_irq_notifier;
|
||||||
|
irq_notifier->arg = priv;
|
||||||
|
err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
|
||||||
|
if (err) {
|
||||||
|
kfree(irq_notifier);
|
||||||
|
irq_notifier = NULL;
|
||||||
|
mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
|
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
|
||||||
{
|
{
|
||||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
|
@ -1353,6 +1409,9 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
|
||||||
continue;
|
continue;
|
||||||
/*we dont want to break here*/
|
/*we dont want to break here*/
|
||||||
}
|
}
|
||||||
|
mlx4_assign_irq_notifier(priv, dev,
|
||||||
|
priv->eq_table.eq[vec].irq);
|
||||||
|
|
||||||
eq_set_ci(&priv->eq_table.eq[vec], 1);
|
eq_set_ci(&priv->eq_table.eq[vec], 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1379,6 +1438,9 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
|
||||||
Belonging to a legacy EQ*/
|
Belonging to a legacy EQ*/
|
||||||
mutex_lock(&priv->msix_ctl.pool_lock);
|
mutex_lock(&priv->msix_ctl.pool_lock);
|
||||||
if (priv->msix_ctl.pool_bm & 1ULL << i) {
|
if (priv->msix_ctl.pool_bm & 1ULL << i) {
|
||||||
|
irq_set_affinity_notifier(
|
||||||
|
priv->eq_table.eq[vec].irq,
|
||||||
|
NULL);
|
||||||
free_irq(priv->eq_table.eq[vec].irq,
|
free_irq(priv->eq_table.eq[vec].irq,
|
||||||
&priv->eq_table.eq[vec]);
|
&priv->eq_table.eq[vec]);
|
||||||
priv->msix_ctl.pool_bm &= ~(1ULL << i);
|
priv->msix_ctl.pool_bm &= ~(1ULL << i);
|
||||||
|
|
|
@ -104,8 +104,6 @@ module_param(enable_64b_cqe_eqe, bool, 0444);
|
||||||
MODULE_PARM_DESC(enable_64b_cqe_eqe,
|
MODULE_PARM_DESC(enable_64b_cqe_eqe,
|
||||||
"Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
|
"Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
|
||||||
|
|
||||||
#define HCA_GLOBAL_CAP_MASK 0
|
|
||||||
|
|
||||||
#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
|
#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
|
||||||
|
|
||||||
static char mlx4_version[] =
|
static char mlx4_version[] =
|
||||||
|
@ -582,9 +580,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*fail if the hca has an unknown capability */
|
/* fail if the hca has an unknown global capability
|
||||||
if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
|
* at this time global_caps should be always zeroed
|
||||||
HCA_GLOBAL_CAP_MASK) {
|
*/
|
||||||
|
if (hca_param.global_caps) {
|
||||||
mlx4_err(dev, "Unknown hca global capabilities\n");
|
mlx4_err(dev, "Unknown hca global capabilities\n");
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -897,7 +897,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
|
||||||
ret = parse_trans_rule(dev, cur, mailbox->buf + size);
|
ret = parse_trans_rule(dev, cur, mailbox->buf + size);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||||
return -EINVAL;
|
return ret;
|
||||||
}
|
}
|
||||||
size += ret;
|
size += ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -577,6 +577,9 @@ struct mlx4_cq {
|
||||||
|
|
||||||
u32 cons_index;
|
u32 cons_index;
|
||||||
|
|
||||||
|
u16 irq;
|
||||||
|
bool irq_affinity_change;
|
||||||
|
|
||||||
__be32 *set_ci_db;
|
__be32 *set_ci_db;
|
||||||
__be32 *arm_db;
|
__be32 *arm_db;
|
||||||
int arm_sn;
|
int arm_sn;
|
||||||
|
|
Loading…
Reference in New Issue