net/mlx5: Separate and generalize dma device from pci device
The mlx5 Sub-Function (SF) sub device will be introduced in subsequent patches. It will be created as mediated device and belong to mdev bus. It is necessary to treat dma operations on PF, VF and SF in uniform way, hence reduce the dependency on pdev pci dev struct and work directly out of newly introduced 'struct device' from previous patch. This patch does not change any functionality. Signed-off-by: Vu Pham <vuhuong@mellanox.com> Reviewed-by: Parav Pandit <parav@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
27b942fbbd
commit
c42260f195
|
@ -181,7 +181,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
|
|||
ibdev->rep->vport);
|
||||
if (rep_ndev == ndev)
|
||||
roce->netdev = ndev;
|
||||
} else if (ndev->dev.parent == &mdev->pdev->dev) {
|
||||
} else if (ndev->dev.parent == mdev->device) {
|
||||
roce->netdev = ndev;
|
||||
}
|
||||
write_unlock(&roce->netdev_lock);
|
||||
|
@ -5666,7 +5666,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
|
|||
}
|
||||
|
||||
if (bound) {
|
||||
dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n");
|
||||
dev_dbg(mpi->mdev->device,
|
||||
"removing port from unaffiliated list.\n");
|
||||
mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
|
||||
list_del(&mpi->list);
|
||||
break;
|
||||
|
@ -5865,7 +5866,7 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
|
|||
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
|
||||
dev->ib_dev.phys_port_cnt = dev->num_ports;
|
||||
dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
|
||||
dev->ib_dev.dev.parent = &mdev->pdev->dev;
|
||||
dev->ib_dev.dev.parent = mdev->device;
|
||||
|
||||
mutex_init(&dev->cap_mask_mutex);
|
||||
INIT_LIST_HEAD(&dev->qp_list);
|
||||
|
@ -6554,7 +6555,8 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
|
|||
|
||||
if (!bound) {
|
||||
list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
|
||||
dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
|
||||
dev_dbg(mdev->device,
|
||||
"no suitable IB device found to bind to, added to unaffiliated list.\n");
|
||||
}
|
||||
mutex_unlock(&mlx5_ib_multiport_mutex);
|
||||
|
||||
|
|
|
@ -57,15 +57,16 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
|
|||
int node)
|
||||
{
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
struct device *device = dev->device;
|
||||
int original_node;
|
||||
void *cpu_handle;
|
||||
|
||||
mutex_lock(&priv->alloc_mutex);
|
||||
original_node = dev_to_node(&dev->pdev->dev);
|
||||
set_dev_node(&dev->pdev->dev, node);
|
||||
cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle,
|
||||
original_node = dev_to_node(device);
|
||||
set_dev_node(device, node);
|
||||
cpu_handle = dma_alloc_coherent(device, size, dma_handle,
|
||||
GFP_KERNEL);
|
||||
set_dev_node(&dev->pdev->dev, original_node);
|
||||
set_dev_node(device, original_node);
|
||||
mutex_unlock(&priv->alloc_mutex);
|
||||
return cpu_handle;
|
||||
}
|
||||
|
@ -110,7 +111,7 @@ EXPORT_SYMBOL(mlx5_buf_alloc);
|
|||
|
||||
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
|
||||
{
|
||||
dma_free_coherent(&dev->pdev->dev, buf->size, buf->frags->buf,
|
||||
dma_free_coherent(dev->device, buf->size, buf->frags->buf,
|
||||
buf->frags->map);
|
||||
|
||||
kfree(buf->frags);
|
||||
|
@ -139,7 +140,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
|
|||
if (!frag->buf)
|
||||
goto err_free_buf;
|
||||
if (frag->map & ((1 << buf->page_shift) - 1)) {
|
||||
dma_free_coherent(&dev->pdev->dev, frag_sz,
|
||||
dma_free_coherent(dev->device, frag_sz,
|
||||
buf->frags[i].buf, buf->frags[i].map);
|
||||
mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
|
||||
&frag->map, buf->page_shift);
|
||||
|
@ -152,7 +153,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
|
|||
|
||||
err_free_buf:
|
||||
while (i--)
|
||||
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf,
|
||||
dma_free_coherent(dev->device, PAGE_SIZE, buf->frags[i].buf,
|
||||
buf->frags[i].map);
|
||||
kfree(buf->frags);
|
||||
err_out:
|
||||
|
@ -168,7 +169,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
|
|||
for (i = 0; i < buf->npages; i++) {
|
||||
int frag_sz = min_t(int, size, PAGE_SIZE);
|
||||
|
||||
dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf,
|
||||
dma_free_coherent(dev->device, frag_sz, buf->frags[i].buf,
|
||||
buf->frags[i].map);
|
||||
size -= frag_sz;
|
||||
}
|
||||
|
@ -274,7 +275,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
|
|||
__set_bit(db->index, db->u.pgdir->bitmap);
|
||||
|
||||
if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
|
||||
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
|
||||
dma_free_coherent(dev->device, PAGE_SIZE,
|
||||
db->u.pgdir->db_page, db->u.pgdir->db_dma);
|
||||
list_del(&db->u.pgdir->list);
|
||||
bitmap_free(db->u.pgdir->bitmap);
|
||||
|
|
|
@ -1852,7 +1852,7 @@ static void create_msg_cache(struct mlx5_core_dev *dev)
|
|||
|
||||
static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
|
||||
{
|
||||
struct device *ddev = &dev->pdev->dev;
|
||||
struct device *ddev = dev->device;
|
||||
|
||||
cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
|
||||
&cmd->alloc_dma, GFP_KERNEL);
|
||||
|
@ -1883,7 +1883,7 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
|
|||
|
||||
static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
|
||||
{
|
||||
struct device *ddev = &dev->pdev->dev;
|
||||
struct device *ddev = dev->device;
|
||||
|
||||
dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
|
||||
cmd->alloc_dma);
|
||||
|
@ -1908,8 +1908,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
cmd->pool = dma_pool_create("mlx5_cmd", &dev->pdev->dev, size, align,
|
||||
0);
|
||||
cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
|
||||
if (!cmd->pool)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1891,7 +1891,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
|||
c->tstamp = &priv->tstamp;
|
||||
c->ix = ix;
|
||||
c->cpu = cpu;
|
||||
c->pdev = &priv->mdev->pdev->dev;
|
||||
c->pdev = priv->mdev->device;
|
||||
c->netdev = priv->netdev;
|
||||
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
|
||||
c->num_tc = params->num_tc;
|
||||
|
@ -2137,7 +2137,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
|
|||
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
|
||||
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
|
||||
|
||||
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
|
||||
param->wq.buf_numa_node = dev_to_node(mdev->device);
|
||||
}
|
||||
|
||||
static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
|
||||
|
@ -2152,7 +2152,7 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
|
|||
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
|
||||
MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
|
||||
|
||||
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
|
||||
param->wq.buf_numa_node = dev_to_node(mdev->device);
|
||||
}
|
||||
|
||||
static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
|
||||
|
@ -2164,7 +2164,7 @@ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
|
|||
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
|
||||
MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
|
||||
|
||||
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
|
||||
param->wq.buf_numa_node = dev_to_node(priv->mdev->device);
|
||||
}
|
||||
|
||||
static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
|
||||
|
@ -3046,8 +3046,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
|
|||
struct mlx5e_cq *cq,
|
||||
struct mlx5e_cq_param *param)
|
||||
{
|
||||
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
|
||||
param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
|
||||
param->wq.buf_numa_node = dev_to_node(mdev->device);
|
||||
param->wq.db_numa_node = dev_to_node(mdev->device);
|
||||
|
||||
return mlx5e_alloc_cq_common(mdev, param, cq);
|
||||
}
|
||||
|
@ -4639,7 +4639,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
|||
bool fcs_supported;
|
||||
bool fcs_enabled;
|
||||
|
||||
SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
|
||||
SET_NETDEV_DEV(netdev, mdev->device);
|
||||
|
||||
netdev->netdev_ops = &mlx5e_netdev_ops;
|
||||
|
||||
|
|
|
@ -1389,7 +1389,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
|
|||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
|
||||
if (rep->vport == MLX5_VPORT_UPLINK) {
|
||||
SET_NETDEV_DEV(netdev, &priv->mdev->pdev->dev);
|
||||
SET_NETDEV_DEV(netdev, mdev->device);
|
||||
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
|
||||
/* we want a persistent mac for the uplink rep */
|
||||
mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr);
|
||||
|
|
|
@ -200,7 +200,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
|
|||
rb_erase(&fwp->rb_node, &dev->priv.page_root);
|
||||
if (fwp->free_count != 1)
|
||||
list_del(&fwp->list);
|
||||
dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK,
|
||||
dma_unmap_page(dev->device, addr & MLX5_U64_4K_PAGE_MASK,
|
||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
__free_page(fwp->page);
|
||||
kfree(fwp);
|
||||
|
@ -211,11 +211,12 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
|
|||
|
||||
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
|
||||
{
|
||||
struct device *device = dev->device;
|
||||
int nid = dev_to_node(device);
|
||||
struct page *page;
|
||||
u64 zero_addr = 1;
|
||||
u64 addr;
|
||||
int err;
|
||||
int nid = dev_to_node(&dev->pdev->dev);
|
||||
|
||||
page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
|
||||
if (!page) {
|
||||
|
@ -223,9 +224,8 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
|
|||
return -ENOMEM;
|
||||
}
|
||||
map:
|
||||
addr = dma_map_page(&dev->pdev->dev, page, 0,
|
||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(&dev->pdev->dev, addr)) {
|
||||
addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(device, addr)) {
|
||||
mlx5_core_warn(dev, "failed dma mapping page\n");
|
||||
err = -ENOMEM;
|
||||
goto err_mapping;
|
||||
|
@ -240,8 +240,7 @@ map:
|
|||
err = insert_page(dev, addr, page, func_id);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "failed to track allocated page\n");
|
||||
dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
err_mapping:
|
||||
|
@ -249,7 +248,7 @@ err_mapping:
|
|||
__free_page(page);
|
||||
|
||||
if (zero_addr == 0)
|
||||
dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
|
||||
dma_unmap_page(device, zero_addr, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
return err;
|
||||
|
|
Loading…
Reference in New Issue