net/mlx4_core: ICM pages are allocated on device NUMA node
This is done to optimize FW/HW access to host memory. Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.com> Signed-off-by: Eugenia Emantayev <eugenia@mellanox.com> Reviewed-by: Hadar Hen Zion <hadarh@mellanox.com> Signed-off-by: Amir Vadai <amirv@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
41d942d56c
commit
6e7136ed77
|
@ -93,13 +93,17 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
|
|||
kfree(icm);
|
||||
}
|
||||
|
||||
static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
|
||||
static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
|
||||
gfp_t gfp_mask, int node)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = alloc_pages(gfp_mask, order);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
page = alloc_pages_node(node, gfp_mask, order);
|
||||
if (!page) {
|
||||
page = alloc_pages(gfp_mask, order);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sg_set_page(mem, page, PAGE_SIZE << order, 0);
|
||||
return 0;
|
||||
|
@ -130,9 +134,15 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
|||
/* We use sg_set_buf for coherent allocs, which assumes low memory */
|
||||
BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
|
||||
|
||||
icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
|
||||
if (!icm)
|
||||
return NULL;
|
||||
icm = kmalloc_node(sizeof(*icm),
|
||||
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
|
||||
dev->numa_node);
|
||||
if (!icm) {
|
||||
icm = kmalloc(sizeof(*icm),
|
||||
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
|
||||
if (!icm)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
icm->refcount = 0;
|
||||
INIT_LIST_HEAD(&icm->chunk_list);
|
||||
|
@ -141,10 +151,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
|||
|
||||
while (npages > 0) {
|
||||
if (!chunk) {
|
||||
chunk = kmalloc(sizeof *chunk,
|
||||
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
|
||||
if (!chunk)
|
||||
goto fail;
|
||||
chunk = kmalloc_node(sizeof(*chunk),
|
||||
gfp_mask & ~(__GFP_HIGHMEM |
|
||||
__GFP_NOWARN),
|
||||
dev->numa_node);
|
||||
if (!chunk) {
|
||||
chunk = kmalloc(sizeof(*chunk),
|
||||
gfp_mask & ~(__GFP_HIGHMEM |
|
||||
__GFP_NOWARN));
|
||||
if (!chunk)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
|
||||
chunk->npages = 0;
|
||||
|
@ -161,7 +178,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
|||
cur_order, gfp_mask);
|
||||
else
|
||||
ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
|
||||
cur_order, gfp_mask);
|
||||
cur_order, gfp_mask,
|
||||
dev->numa_node);
|
||||
|
||||
if (ret) {
|
||||
if (--cur_order < 0)
|
||||
|
|
|
@ -2191,6 +2191,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
|
|||
mutex_init(&priv->bf_mutex);
|
||||
|
||||
dev->rev_id = pdev->revision;
|
||||
dev->numa_node = dev_to_node(&pdev->dev);
|
||||
/* Detect if this device is a virtual function */
|
||||
if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
|
||||
/* When acting as pf, we normally skip vfs unless explicitly
|
||||
|
|
|
@ -662,6 +662,7 @@ struct mlx4_dev {
|
|||
u8 rev_id;
|
||||
char board_id[MLX4_BOARD_ID_LEN];
|
||||
int num_vfs;
|
||||
int numa_node;
|
||||
int oper_log_mgm_entry_size;
|
||||
u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
|
||||
u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
|
||||
|
|
Loading…
Reference in New Issue