drm/amdkfd: Store xcp partition id to amdgpu bo
For memory accounting per compute partition and export drm amdgpu bo and then import to KFD, we need the xcp id to account the memory usage or find the KFD node of the original amdgpu bo to create the KFD bo on the correct adev KFD node. Set xcp_id_plus1 of amdgpu_bo_param to create bo and store xcp_id to amddgpu bo. Add helper macro to get the mem_id from adev and xcp_id. v2: squash in fix ("drm/amdgpu: Fix BO creation failure on GFX 9.4.3 dGPU") Signed-off-by: Philip Yang <Philip.Yang@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
6cfba94a77
commit
3ebfd221c1
|
@ -330,6 +330,10 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
|
|||
void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
|
||||
uint64_t size, u32 alloc_flag);
|
||||
|
||||
#define KFD_XCP_MEM_ID(adev, xcp_id) \
|
||||
((adev)->xcp_mgr && (xcp_id) >= 0 ?\
|
||||
(adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1)
|
||||
|
||||
#define KFD_XCP_MEMORY_SIZE(n) ((n)->adev->gmc.num_mem_partitions ?\
|
||||
(n)->adev->gmc.mem_partitions[(n)->xcp->mem_id].size /\
|
||||
(n)->adev->xcp_mgr->num_xcp_per_mem_partition :\
|
||||
|
|
|
@ -1634,6 +1634,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
|||
uint64_t *offset, uint32_t flags, bool criu_resume)
|
||||
{
|
||||
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
|
||||
struct amdgpu_fpriv *fpriv = container_of(avm, struct amdgpu_fpriv, vm);
|
||||
enum ttm_bo_type bo_type = ttm_bo_type_device;
|
||||
struct sg_table *sg = NULL;
|
||||
uint64_t user_addr = 0;
|
||||
|
@ -1641,7 +1642,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
|||
struct drm_gem_object *gobj = NULL;
|
||||
u32 domain, alloc_domain;
|
||||
uint64_t aligned_size;
|
||||
int8_t mem_id = -1;
|
||||
int8_t xcp_id = -1;
|
||||
u64 alloc_flags;
|
||||
int ret;
|
||||
|
||||
|
@ -1660,7 +1661,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
|||
alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
|
||||
}
|
||||
mem_id = avm->mem_id;
|
||||
xcp_id = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id;
|
||||
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
|
||||
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
alloc_flags = 0;
|
||||
|
@ -1718,12 +1719,12 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
|||
goto err_reserve_limit;
|
||||
}
|
||||
|
||||
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s mem_id %d\n",
|
||||
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s xcp_id %d\n",
|
||||
va, (*mem)->aql_queue ? size << 1 : size,
|
||||
domain_string(alloc_domain), mem_id);
|
||||
domain_string(alloc_domain), xcp_id);
|
||||
|
||||
ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
|
||||
bo_type, NULL, &gobj, mem_id + 1);
|
||||
bo_type, NULL, &gobj, xcp_id + 1);
|
||||
if (ret) {
|
||||
pr_debug("Failed to create BO on domain %s. ret %d\n",
|
||||
domain_string(alloc_domain), ret);
|
||||
|
|
|
@ -98,7 +98,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
|||
int alignment, u32 initial_domain,
|
||||
u64 flags, enum ttm_bo_type type,
|
||||
struct dma_resv *resv,
|
||||
struct drm_gem_object **obj, int8_t mem_id_plus1)
|
||||
struct drm_gem_object **obj, int8_t xcp_id_plus1)
|
||||
{
|
||||
struct amdgpu_bo *bo;
|
||||
struct amdgpu_bo_user *ubo;
|
||||
|
@ -116,7 +116,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
|||
bp.flags = flags;
|
||||
bp.domain = initial_domain;
|
||||
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
|
||||
bp.mem_id_plus1 = mem_id_plus1;
|
||||
bp.xcp_id_plus1 = xcp_id_plus1;
|
||||
|
||||
r = amdgpu_bo_create_user(adev, &bp, &ubo);
|
||||
if (r)
|
||||
|
|
|
@ -43,7 +43,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
|||
int alignment, u32 initial_domain,
|
||||
u64 flags, enum ttm_bo_type type,
|
||||
struct dma_resv *resv,
|
||||
struct drm_gem_object **obj, int8_t mem_id_plus1);
|
||||
struct drm_gem_object **obj, int8_t xcp_id_plus1);
|
||||
int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
|
|
@ -131,14 +131,15 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
|
|||
|
||||
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
|
||||
int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
|
||||
|
||||
if (adev->gmc.mem_partitions && abo->mem_id >= 0) {
|
||||
places[c].fpfn = adev->gmc.mem_partitions[abo->mem_id].range.fpfn;
|
||||
if (adev->gmc.mem_partitions && mem_id >= 0) {
|
||||
places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn;
|
||||
/*
|
||||
* memory partition range lpfn is inclusive start + size - 1
|
||||
* TTM place lpfn is exclusive start + size
|
||||
*/
|
||||
places[c].lpfn = adev->gmc.mem_partitions[abo->mem_id].range.lpfn + 1;
|
||||
places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1;
|
||||
} else {
|
||||
places[c].fpfn = 0;
|
||||
places[c].lpfn = 0;
|
||||
|
@ -583,8 +584,12 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||
|
||||
bo->flags = bp->flags;
|
||||
|
||||
/* bo->mem_id -1 means any partition */
|
||||
bo->mem_id = bp->mem_id_plus1 - 1;
|
||||
if (adev->gmc.mem_partitions)
|
||||
/* For GPUs with spatial partitioning, bo->xcp_id=-1 means any partition */
|
||||
bo->xcp_id = bp->xcp_id_plus1 - 1;
|
||||
else
|
||||
/* For GPUs without spatial partitioning */
|
||||
bo->xcp_id = 0;
|
||||
|
||||
if (!amdgpu_bo_support_uswc(bo->flags))
|
||||
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
||||
|
|
|
@ -56,8 +56,8 @@ struct amdgpu_bo_param {
|
|||
bool no_wait_gpu;
|
||||
struct dma_resv *resv;
|
||||
void (*destroy)(struct ttm_buffer_object *bo);
|
||||
/* memory partition number plus 1, 0 means any partition */
|
||||
int8_t mem_id_plus1;
|
||||
/* xcp partition number plus 1, 0 means any partition */
|
||||
int8_t xcp_id_plus1;
|
||||
};
|
||||
|
||||
/* bo virtual addresses in a vm */
|
||||
|
@ -111,8 +111,12 @@ struct amdgpu_bo {
|
|||
#endif
|
||||
struct kgd_mem *kfd_bo;
|
||||
|
||||
/* memory partition number, -1 means any partition */
|
||||
int8_t mem_id;
|
||||
/*
|
||||
* For GPUs with spatial partitioning, xcp partition number, -1 means
|
||||
* any partition. For other ASICs without spatial partition, always 0
|
||||
* for memory accounting.
|
||||
*/
|
||||
int8_t xcp_id;
|
||||
};
|
||||
|
||||
struct amdgpu_bo_user {
|
||||
|
|
|
@ -1051,6 +1051,7 @@ static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
|
|||
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
|
||||
struct amdgpu_ttm_tt *gtt;
|
||||
enum ttm_caching caching;
|
||||
|
@ -1060,7 +1061,10 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
|
|||
return NULL;
|
||||
}
|
||||
gtt->gobj = &bo->base;
|
||||
gtt->pool_id = abo->mem_id;
|
||||
if (adev->gmc.mem_partitions && abo->xcp_id >= 0)
|
||||
gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
|
||||
else
|
||||
gtt->pool_id = abo->xcp_id;
|
||||
|
||||
if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
|
||||
caching = ttm_write_combined;
|
||||
|
|
|
@ -502,6 +502,7 @@ exit:
|
|||
int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
int level, bool immediate, struct amdgpu_bo_vm **vmbo)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = container_of(vm, struct amdgpu_fpriv, vm);
|
||||
struct amdgpu_bo_param bp;
|
||||
struct amdgpu_bo *bo;
|
||||
struct dma_resv *resv;
|
||||
|
@ -534,7 +535,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.no_wait_gpu = immediate;
|
||||
bp.mem_id_plus1 = vm->mem_id + 1;
|
||||
bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1;
|
||||
|
||||
if (vm->root.bo)
|
||||
bp.resv = vm->root.bo->tbo.base.resv;
|
||||
|
@ -560,7 +561,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
bp.type = ttm_bo_type_kernel;
|
||||
bp.resv = bo->tbo.base.resv;
|
||||
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
|
||||
bp.mem_id_plus1 = vm->mem_id + 1;
|
||||
bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1;
|
||||
|
||||
r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
|
||||
|
||||
|
|
|
@ -1248,7 +1248,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
|
|||
is_local = (!is_vram && (adev->flags & AMD_IS_APU) &&
|
||||
num_possible_nodes() <= 1) ||
|
||||
(is_vram && adev == bo_adev &&
|
||||
bo->mem_id == vm->mem_id);
|
||||
KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id);
|
||||
snoop = true;
|
||||
if (uncached) {
|
||||
mtype = MTYPE_UC;
|
||||
|
|
|
@ -556,7 +556,7 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
|
|||
bp.type = ttm_bo_type_device;
|
||||
bp.resv = NULL;
|
||||
if (node->xcp)
|
||||
bp.mem_id_plus1 = node->xcp->mem_id + 1;
|
||||
bp.xcp_id_plus1 = node->xcp->id + 1;
|
||||
|
||||
r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
|
||||
if (r) {
|
||||
|
@ -567,7 +567,7 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
|
|||
|
||||
pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n",
|
||||
bo->tbo.resource->start << PAGE_SHIFT, bp.size,
|
||||
bp.mem_id_plus1 - 1);
|
||||
bp.xcp_id_plus1 - 1);
|
||||
|
||||
r = amdgpu_bo_reserve(bo, true);
|
||||
if (r) {
|
||||
|
|
Loading…
Reference in New Issue