drm/amdkfd: Move pgmap to amdgpu_kfd_dev structure

VRAM pgmap resource is allocated every time when switching compute
partitions because kfd_dev is re-initialized by post_partition_switch,
As a result, it causes memory region resource leaking and system
memory usage accounting unbalanced.

pgmap resource should be allocated and registered only once when loading
driver and freed when unloading driver, move it from kfd_dev to
amdgpu_kfd_dev.

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Philip Yang 2023-03-31 11:13:40 -04:00 committed by Alex Deucher
parent 00e1ab02c2
commit 610dab118f
6 changed files with 14 additions and 13 deletions

View File

@ -30,6 +30,7 @@
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <linux/mmu_notifier.h>
#include <linux/memremap.h>
#include <kgd_kfd_interface.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include "amdgpu_sync.h"
@ -101,6 +102,9 @@ struct amdgpu_kfd_dev {
uint64_t vram_used_aligned;
bool init_complete;
struct work_struct reset_work;
/* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
struct dev_pagemap pgmap;
};
enum kgd_engine_type {

View File

@ -206,7 +206,7 @@ svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
{
return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT;
return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT;
}
static void
@ -236,7 +236,7 @@ svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
unsigned long addr;
addr = page_to_pfn(page) << PAGE_SHIFT;
return (addr - adev->kfd.dev->pgmap.range.start);
return (addr - adev->kfd.pgmap.range.start);
}
static struct page *
@ -990,14 +990,14 @@ static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
int svm_migrate_init(struct amdgpu_device *adev)
{
struct kfd_dev *kfddev = adev->kfd.dev;
struct amdgpu_kfd_dev *kfddev = &adev->kfd;
struct dev_pagemap *pgmap;
struct resource *res = NULL;
unsigned long size;
void *r;
/* Page migration works on Vega10 or newer */
if (!KFD_IS_SOC15(kfddev))
if (!KFD_IS_SOC15(kfddev->dev))
return -EINVAL;
pgmap = &kfddev->pgmap;

View File

@ -378,9 +378,6 @@ struct kfd_dev {
int noretry;
/* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
struct dev_pagemap pgmap;
struct kfd_node *nodes[MAX_KFD_NODES];
unsigned int num_nodes;
};

View File

@ -174,7 +174,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
bo_adev->vm_manager.vram_base_offset -
bo_adev->kfd.dev->pgmap.range.start;
bo_adev->kfd.pgmap.range.start;
addr[i] |= SVM_RANGE_VRAM_DOMAIN;
pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
continue;
@ -2827,7 +2827,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
bool migration = false;
int r = 0;
if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
if (!KFD_IS_SVM_API_SUPPORTED(adev)) {
pr_debug("device does not support SVM\n");
return -EFAULT;
}
@ -3112,7 +3112,7 @@ int svm_range_list_init(struct kfd_process *p)
spin_lock_init(&svms->deferred_list_lock);
for (i = 0; i < p->n_pdds; i++)
if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->kfd))
if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
bitmap_set(svms->bitmap_supported, i, 1);
return 0;

View File

@ -200,8 +200,8 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
/* SVM API and HMM page migration work together, device memory type
* is initialized to not 0 when page migration register device memory.
*/
#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0 ||\
(dev)->adev->gmc.is_app_apu)
#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
(adev)->gmc.is_app_apu)
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);

View File

@ -2021,7 +2021,7 @@ int kfd_topology_add_device(struct kfd_node *gpu)
dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ?
HSA_CAP_RASEVENTNOTIFY : 0;
if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev->kfd.dev))
if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev))
dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED;
kfd_debug_print_topology();