Merge branch 'mlx-next'
Or Gerlitz says:
==============================
Mellanox driver update for net-next
Some small fixes and small enhancements from the team.
Series applies over net-next commit acb4a6b
"tcp: ensure prior synack rtx behavior
with small backlogs".
==============================
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
8cec75bd85
|
@ -573,10 +573,8 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
|
|||
{
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct mlx4_dev *dev = mdev->dev;
|
||||
struct mlx4_mac_entry *entry;
|
||||
int index = 0;
|
||||
int err = 0;
|
||||
u64 reg_id = 0;
|
||||
int *qpn = &priv->base_qpn;
|
||||
u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
|
||||
|
||||
|
@ -600,44 +598,11 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
|
|||
en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
|
||||
if (err) {
|
||||
en_err(priv, "Failed to reserve qp for mac registration\n");
|
||||
goto qp_err;
|
||||
mlx4_unregister_mac(dev, priv->port, mac);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id);
|
||||
if (err)
|
||||
goto steer_err;
|
||||
|
||||
err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
|
||||
&priv->tunnel_reg_id);
|
||||
if (err)
|
||||
goto tunnel_err;
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry) {
|
||||
err = -ENOMEM;
|
||||
goto alloc_err;
|
||||
}
|
||||
memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
|
||||
memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
|
||||
entry->reg_id = reg_id;
|
||||
|
||||
hlist_add_head_rcu(&entry->hlist,
|
||||
&priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
|
||||
|
||||
return 0;
|
||||
|
||||
alloc_err:
|
||||
if (priv->tunnel_reg_id)
|
||||
mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
|
||||
tunnel_err:
|
||||
mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
|
||||
|
||||
steer_err:
|
||||
mlx4_qp_release_range(dev, *qpn, 1);
|
||||
|
||||
qp_err:
|
||||
mlx4_unregister_mac(dev, priv->port, mac);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
|
||||
|
@ -645,39 +610,13 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
|
|||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct mlx4_dev *dev = mdev->dev;
|
||||
int qpn = priv->base_qpn;
|
||||
u64 mac;
|
||||
|
||||
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
|
||||
mac = mlx4_mac_to_u64(priv->dev->dev_addr);
|
||||
u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
|
||||
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
|
||||
priv->dev->dev_addr);
|
||||
mlx4_unregister_mac(dev, priv->port, mac);
|
||||
} else {
|
||||
struct mlx4_mac_entry *entry;
|
||||
struct hlist_node *tmp;
|
||||
struct hlist_head *bucket;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
|
||||
bucket = &priv->mac_hash[i];
|
||||
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
|
||||
mac = mlx4_mac_to_u64(entry->mac);
|
||||
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
|
||||
entry->mac);
|
||||
mlx4_en_uc_steer_release(priv, entry->mac,
|
||||
qpn, entry->reg_id);
|
||||
|
||||
mlx4_unregister_mac(dev, priv->port, mac);
|
||||
hlist_del_rcu(&entry->hlist);
|
||||
kfree_rcu(entry, rcu);
|
||||
}
|
||||
}
|
||||
|
||||
if (priv->tunnel_reg_id) {
|
||||
mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
|
||||
priv->tunnel_reg_id = 0;
|
||||
}
|
||||
|
||||
en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
|
||||
priv->port, qpn);
|
||||
mlx4_qp_release_range(dev, qpn, 1);
|
||||
|
@ -1283,6 +1222,75 @@ static void mlx4_en_netpoll(struct net_device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
|
||||
{
|
||||
u64 reg_id;
|
||||
int err = 0;
|
||||
int *qpn = &priv->base_qpn;
|
||||
struct mlx4_mac_entry *entry;
|
||||
|
||||
err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
|
||||
&priv->tunnel_reg_id);
|
||||
if (err)
|
||||
goto tunnel_err;
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry) {
|
||||
err = -ENOMEM;
|
||||
goto alloc_err;
|
||||
}
|
||||
|
||||
memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
|
||||
memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
|
||||
entry->reg_id = reg_id;
|
||||
hlist_add_head_rcu(&entry->hlist,
|
||||
&priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
|
||||
|
||||
return 0;
|
||||
|
||||
alloc_err:
|
||||
if (priv->tunnel_reg_id)
|
||||
mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
|
||||
|
||||
tunnel_err:
|
||||
mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
|
||||
{
|
||||
u64 mac;
|
||||
unsigned int i;
|
||||
int qpn = priv->base_qpn;
|
||||
struct hlist_head *bucket;
|
||||
struct hlist_node *tmp;
|
||||
struct mlx4_mac_entry *entry;
|
||||
|
||||
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
|
||||
bucket = &priv->mac_hash[i];
|
||||
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
|
||||
mac = mlx4_mac_to_u64(entry->mac);
|
||||
en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
|
||||
entry->mac);
|
||||
mlx4_en_uc_steer_release(priv, entry->mac,
|
||||
qpn, entry->reg_id);
|
||||
|
||||
mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
|
||||
hlist_del_rcu(&entry->hlist);
|
||||
kfree_rcu(entry, rcu);
|
||||
}
|
||||
}
|
||||
|
||||
if (priv->tunnel_reg_id) {
|
||||
mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
|
||||
priv->tunnel_reg_id = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx4_en_tx_timeout(struct net_device *dev)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
|
@ -1684,6 +1692,11 @@ int mlx4_en_start_port(struct net_device *dev)
|
|||
goto tx_err;
|
||||
}
|
||||
|
||||
/* Set Unicast and VXLAN steering rules */
|
||||
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
|
||||
mlx4_en_set_rss_steer_rules(priv))
|
||||
mlx4_warn(mdev, "Failed setting steering rules\n");
|
||||
|
||||
/* Attach rx QP to bradcast address */
|
||||
eth_broadcast_addr(&mc_list[10]);
|
||||
mc_list[5] = priv->port; /* needed for B0 steering support */
|
||||
|
@ -1831,6 +1844,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
|
|||
for (i = 0; i < priv->tx_ring_num; i++)
|
||||
mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
|
||||
|
||||
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
|
||||
mlx4_en_delete_rss_steer_rules(priv);
|
||||
|
||||
/* Free RSS qps */
|
||||
mlx4_en_release_rss_steer(priv);
|
||||
|
||||
|
|
|
@ -422,15 +422,15 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
|
|||
u64 qp_mask = 0;
|
||||
int err = 0;
|
||||
|
||||
if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
|
||||
return -EINVAL;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
cmd = (struct mlx4_update_qp_context *)mailbox->buf;
|
||||
|
||||
if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
|
||||
return -EINVAL;
|
||||
|
||||
if (attr & MLX4_UPDATE_QP_SMAC) {
|
||||
pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
|
||||
cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
|
||||
|
|
|
@ -1238,8 +1238,10 @@ static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
|
|||
return 0;
|
||||
|
||||
undo:
|
||||
for (--i; i >= base; --i)
|
||||
for (--i; i >= 0; --i) {
|
||||
rb_erase(&res_arr[i]->node, root);
|
||||
list_del_init(&res_arr[i]->list);
|
||||
}
|
||||
|
||||
spin_unlock_irq(mlx4_tlock(dev));
|
||||
|
||||
|
|
|
@ -254,6 +254,10 @@ static void dump_buf(void *buf, int size, int data_only, int offset)
|
|||
pr_debug("\n");
|
||||
}
|
||||
|
||||
enum {
|
||||
MLX5_DRIVER_STATUS_ABORTED = 0xfe,
|
||||
};
|
||||
|
||||
const char *mlx5_command_str(int command)
|
||||
{
|
||||
switch (command) {
|
||||
|
@ -473,6 +477,7 @@ static void cmd_work_handler(struct work_struct *work)
|
|||
struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
|
||||
struct mlx5_cmd_layout *lay;
|
||||
struct semaphore *sem;
|
||||
unsigned long flags;
|
||||
|
||||
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
|
||||
down(sem);
|
||||
|
@ -485,6 +490,9 @@ static void cmd_work_handler(struct work_struct *work)
|
|||
}
|
||||
} else {
|
||||
ent->idx = cmd->max_reg_cmds;
|
||||
spin_lock_irqsave(&cmd->alloc_lock, flags);
|
||||
clear_bit(ent->idx, &cmd->bitmask);
|
||||
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
|
||||
}
|
||||
|
||||
ent->token = alloc_token(cmd);
|
||||
|
@ -1081,7 +1089,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
|
|||
}
|
||||
}
|
||||
|
||||
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
|
||||
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
|
||||
{
|
||||
struct mlx5_cmd *cmd = &dev->cmd;
|
||||
struct mlx5_cmd_work_ent *ent;
|
||||
|
@ -1092,7 +1100,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
|
|||
s64 ds;
|
||||
struct mlx5_cmd_stats *stats;
|
||||
unsigned long flags;
|
||||
unsigned long vector;
|
||||
|
||||
/* there can be at most 32 command queues */
|
||||
vector = vec & 0xffffffff;
|
||||
for (i = 0; i < (1 << cmd->log_sz); i++) {
|
||||
if (test_bit(i, &vector)) {
|
||||
struct semaphore *sem;
|
||||
|
@ -1110,11 +1121,16 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
|
|||
ent->ret = verify_signature(ent);
|
||||
else
|
||||
ent->ret = 0;
|
||||
ent->status = ent->lay->status_own >> 1;
|
||||
if (vec & MLX5_TRIGGERED_CMD_COMP)
|
||||
ent->status = MLX5_DRIVER_STATUS_ABORTED;
|
||||
else
|
||||
ent->status = ent->lay->status_own >> 1;
|
||||
|
||||
mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
|
||||
ent->ret, deliv_status_to_str(ent->status), ent->status);
|
||||
}
|
||||
free_ent(cmd, ent->idx);
|
||||
|
||||
if (ent->callback) {
|
||||
ds = ent->ts2 - ent->ts1;
|
||||
if (ent->op < ARRAY_SIZE(cmd->stats)) {
|
||||
|
|
|
@ -57,31 +57,16 @@ enum {
|
|||
MLX5_HEALTH_SYNDR_HIGH_TEMP = 0x10
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(health_lock);
|
||||
static LIST_HEAD(health_list);
|
||||
static struct work_struct health_work;
|
||||
|
||||
static void health_care(struct work_struct *work)
|
||||
{
|
||||
struct mlx5_core_health *health, *n;
|
||||
struct mlx5_core_health *health;
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_priv *priv;
|
||||
LIST_HEAD(tlist);
|
||||
|
||||
spin_lock_irq(&health_lock);
|
||||
list_splice_init(&health_list, &tlist);
|
||||
|
||||
spin_unlock_irq(&health_lock);
|
||||
|
||||
list_for_each_entry_safe(health, n, &tlist, list) {
|
||||
priv = container_of(health, struct mlx5_priv, health);
|
||||
dev = container_of(priv, struct mlx5_core_dev, priv);
|
||||
mlx5_core_warn(dev, "handling bad device here\n");
|
||||
/* nothing yet */
|
||||
spin_lock_irq(&health_lock);
|
||||
list_del_init(&health->list);
|
||||
spin_unlock_irq(&health_lock);
|
||||
}
|
||||
health = container_of(work, struct mlx5_core_health, work);
|
||||
priv = container_of(health, struct mlx5_priv, health);
|
||||
dev = container_of(priv, struct mlx5_core_dev, priv);
|
||||
mlx5_core_warn(dev, "handling bad device here\n");
|
||||
}
|
||||
|
||||
static const char *hsynd_str(u8 synd)
|
||||
|
@ -114,32 +99,41 @@ static const char *hsynd_str(u8 synd)
|
|||
}
|
||||
}
|
||||
|
||||
static u16 read_be16(__be16 __iomem *p)
|
||||
static u16 get_maj(u32 fw)
|
||||
{
|
||||
return swab16(readl((__force u16 __iomem *) p));
|
||||
return fw >> 28;
|
||||
}
|
||||
|
||||
static u32 read_be32(__be32 __iomem *p)
|
||||
static u16 get_min(u32 fw)
|
||||
{
|
||||
return swab32(readl((__force u32 __iomem *) p));
|
||||
return fw >> 16 & 0xfff;
|
||||
}
|
||||
|
||||
static u16 get_sub(u32 fw)
|
||||
{
|
||||
return fw & 0xffff;
|
||||
}
|
||||
|
||||
static void print_health_info(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_core_health *health = &dev->priv.health;
|
||||
struct health_buffer __iomem *h = health->health;
|
||||
char fw_str[18];
|
||||
u32 fw;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
|
||||
pr_info("assert_var[%d] 0x%08x\n", i, read_be32(h->assert_var + i));
|
||||
dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
|
||||
|
||||
pr_info("assert_exit_ptr 0x%08x\n", read_be32(&h->assert_exit_ptr));
|
||||
pr_info("assert_callra 0x%08x\n", read_be32(&h->assert_callra));
|
||||
pr_info("fw_ver 0x%08x\n", read_be32(&h->fw_ver));
|
||||
pr_info("hw_id 0x%08x\n", read_be32(&h->hw_id));
|
||||
pr_info("irisc_index %d\n", readb(&h->irisc_index));
|
||||
pr_info("synd 0x%x: %s\n", readb(&h->synd), hsynd_str(readb(&h->synd)));
|
||||
pr_info("ext_sync 0x%04x\n", read_be16(&h->ext_synd));
|
||||
dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
|
||||
dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
|
||||
fw = ioread32be(&h->fw_ver);
|
||||
sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw));
|
||||
dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str);
|
||||
dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
|
||||
dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index));
|
||||
dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
|
||||
dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
|
||||
}
|
||||
|
||||
static void poll_health(unsigned long data)
|
||||
|
@ -159,11 +153,7 @@ static void poll_health(unsigned long data)
|
|||
if (health->miss_counter == MAX_MISSES) {
|
||||
mlx5_core_err(dev, "device's health compromised\n");
|
||||
print_health_info(dev);
|
||||
spin_lock_irq(&health_lock);
|
||||
list_add_tail(&health->list, &health_list);
|
||||
spin_unlock_irq(&health_lock);
|
||||
|
||||
queue_work(mlx5_core_wq, &health_work);
|
||||
queue_work(health->wq, &health->work);
|
||||
} else {
|
||||
get_random_bytes(&next, sizeof(next));
|
||||
next %= HZ;
|
||||
|
@ -176,7 +166,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
|
|||
{
|
||||
struct mlx5_core_health *health = &dev->priv.health;
|
||||
|
||||
INIT_LIST_HEAD(&health->list);
|
||||
init_timer(&health->timer);
|
||||
health->health = &dev->iseg->health;
|
||||
health->health_counter = &dev->iseg->health_counter;
|
||||
|
@ -192,18 +181,33 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
|
|||
struct mlx5_core_health *health = &dev->priv.health;
|
||||
|
||||
del_timer_sync(&health->timer);
|
||||
|
||||
spin_lock_irq(&health_lock);
|
||||
if (!list_empty(&health->list))
|
||||
list_del_init(&health->list);
|
||||
spin_unlock_irq(&health_lock);
|
||||
}
|
||||
|
||||
void mlx5_health_cleanup(void)
|
||||
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_core_health *health = &dev->priv.health;
|
||||
|
||||
destroy_workqueue(health->wq);
|
||||
}
|
||||
|
||||
void __init mlx5_health_init(void)
|
||||
int mlx5_health_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
INIT_WORK(&health_work, health_care);
|
||||
struct mlx5_core_health *health;
|
||||
char *name;
|
||||
|
||||
health = &dev->priv.health;
|
||||
name = kmalloc(64, GFP_KERNEL);
|
||||
if (!name)
|
||||
return -ENOMEM;
|
||||
|
||||
strcpy(name, "mlx5_health");
|
||||
strcat(name, dev_name(&dev->pdev->dev));
|
||||
health->wq = create_singlethread_workqueue(name);
|
||||
kfree(name);
|
||||
if (!health->wq)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_WORK(&health->work, health_care);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -62,7 +62,6 @@ static int prof_sel = MLX5_DEFAULT_PROF;
|
|||
module_param_named(prof_sel, prof_sel, int, 0444);
|
||||
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
|
||||
|
||||
struct workqueue_struct *mlx5_core_wq;
|
||||
static LIST_HEAD(intf_list);
|
||||
static LIST_HEAD(dev_list);
|
||||
static DEFINE_MUTEX(intf_mutex);
|
||||
|
@ -1046,6 +1045,7 @@ err_pagealloc_cleanup:
|
|||
|
||||
static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
||||
{
|
||||
int err;
|
||||
|
||||
mlx5_unregister_device(dev);
|
||||
mlx5_cleanup_mr_table(dev);
|
||||
|
@ -1060,9 +1060,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
|||
mlx5_eq_cleanup(dev);
|
||||
mlx5_disable_msix(dev);
|
||||
mlx5_stop_health_poll(dev);
|
||||
if (mlx5_cmd_teardown_hca(dev)) {
|
||||
err = mlx5_cmd_teardown_hca(dev);
|
||||
if (err) {
|
||||
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
|
||||
return 1;
|
||||
goto out;
|
||||
}
|
||||
mlx5_pagealloc_stop(dev);
|
||||
mlx5_reclaim_startup_pages(dev);
|
||||
|
@ -1070,11 +1071,12 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
|||
mlx5_pagealloc_cleanup(dev);
|
||||
mlx5_cmd_cleanup(dev);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
|
||||
unsigned long param)
|
||||
unsigned long param)
|
||||
{
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
struct mlx5_device_context *dev_ctx;
|
||||
|
@ -1129,14 +1131,22 @@ static int init_one(struct pci_dev *pdev,
|
|||
goto clean_dev;
|
||||
}
|
||||
|
||||
err = mlx5_health_init(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err);
|
||||
goto close_pci;
|
||||
}
|
||||
|
||||
err = mlx5_load_one(dev, priv);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
|
||||
goto close_pci;
|
||||
goto clean_health;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
clean_health:
|
||||
mlx5_health_cleanup(dev);
|
||||
close_pci:
|
||||
mlx5_pci_close(dev, priv);
|
||||
clean_dev:
|
||||
|
@ -1153,8 +1163,10 @@ static void remove_one(struct pci_dev *pdev)
|
|||
|
||||
if (mlx5_unload_one(dev, priv)) {
|
||||
dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
|
||||
mlx5_health_cleanup(dev);
|
||||
return;
|
||||
}
|
||||
mlx5_health_cleanup(dev);
|
||||
mlx5_pci_close(dev, priv);
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
kfree(dev);
|
||||
|
@ -1184,16 +1196,10 @@ static int __init init(void)
|
|||
int err;
|
||||
|
||||
mlx5_register_debugfs();
|
||||
mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
|
||||
if (!mlx5_core_wq) {
|
||||
err = -ENOMEM;
|
||||
goto err_debug;
|
||||
}
|
||||
mlx5_health_init();
|
||||
|
||||
err = pci_register_driver(&mlx5_core_driver);
|
||||
if (err)
|
||||
goto err_health;
|
||||
goto err_debug;
|
||||
|
||||
#ifdef CONFIG_MLX5_CORE_EN
|
||||
mlx5e_init();
|
||||
|
@ -1201,9 +1207,6 @@ static int __init init(void)
|
|||
|
||||
return 0;
|
||||
|
||||
err_health:
|
||||
mlx5_health_cleanup();
|
||||
destroy_workqueue(mlx5_core_wq);
|
||||
err_debug:
|
||||
mlx5_unregister_debugfs();
|
||||
return err;
|
||||
|
@ -1215,8 +1218,6 @@ static void __exit cleanup(void)
|
|||
mlx5e_cleanup();
|
||||
#endif
|
||||
pci_unregister_driver(&mlx5_core_driver);
|
||||
mlx5_health_cleanup();
|
||||
destroy_workqueue(mlx5_core_wq);
|
||||
mlx5_unregister_debugfs();
|
||||
}
|
||||
|
||||
|
|
|
@ -43,25 +43,25 @@
|
|||
|
||||
extern int mlx5_core_debug_mask;
|
||||
|
||||
#define mlx5_core_dbg(dev, format, ...) \
|
||||
pr_debug("%s:%s:%d:(pid %d): " format, \
|
||||
(dev)->priv.name, __func__, __LINE__, current->pid, \
|
||||
#define mlx5_core_dbg(__dev, format, ...) \
|
||||
dev_dbg(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
|
||||
(__dev)->priv.name, __func__, __LINE__, current->pid, \
|
||||
##__VA_ARGS__)
|
||||
|
||||
#define mlx5_core_dbg_mask(dev, mask, format, ...) \
|
||||
#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
|
||||
do { \
|
||||
if ((mask) & mlx5_core_debug_mask) \
|
||||
mlx5_core_dbg(dev, format, ##__VA_ARGS__); \
|
||||
mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define mlx5_core_err(dev, format, ...) \
|
||||
pr_err("%s:%s:%d:(pid %d): " format, \
|
||||
(dev)->priv.name, __func__, __LINE__, current->pid, \
|
||||
#define mlx5_core_err(__dev, format, ...) \
|
||||
dev_err(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
|
||||
(__dev)->priv.name, __func__, __LINE__, current->pid, \
|
||||
##__VA_ARGS__)
|
||||
|
||||
#define mlx5_core_warn(dev, format, ...) \
|
||||
pr_warn("%s:%s:%d:(pid %d): " format, \
|
||||
(dev)->priv.name, __func__, __LINE__, current->pid, \
|
||||
#define mlx5_core_warn(__dev, format, ...) \
|
||||
dev_warn(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
|
||||
(__dev)->priv.name, __func__, __LINE__, current->pid, \
|
||||
##__VA_ARGS__)
|
||||
|
||||
enum {
|
||||
|
|
|
@ -391,9 +391,10 @@ struct mlx5_core_health {
|
|||
struct health_buffer __iomem *health;
|
||||
__be32 __iomem *health_counter;
|
||||
struct timer_list timer;
|
||||
struct list_head list;
|
||||
u32 prev;
|
||||
int miss_counter;
|
||||
struct workqueue_struct *wq;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
struct mlx5_cq_table {
|
||||
|
@ -676,8 +677,8 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
|
|||
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
|
||||
int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
|
||||
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
|
||||
void mlx5_health_cleanup(void);
|
||||
void __init mlx5_health_init(void);
|
||||
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_health_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
|
||||
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
|
||||
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
|
||||
|
@ -731,7 +732,7 @@ void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
|
|||
#endif
|
||||
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
|
||||
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
|
||||
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
|
||||
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
|
||||
void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
|
||||
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
|
||||
int nent, u64 mask, const char *name, struct mlx5_uar *uar);
|
||||
|
@ -865,4 +866,8 @@ static inline int mlx5_get_gid_table_len(u16 param)
|
|||
return 8 * (1 << param);
|
||||
}
|
||||
|
||||
enum {
|
||||
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
|
||||
};
|
||||
|
||||
#endif /* MLX5_DRIVER_H */
|
||||
|
|
Loading…
Reference in New Issue