drm/amdkfd: Refactor migrate init to support partition switch
Rename smv_migrate_init to a better name kgd2kfd_init_zone_device because it setup zone devive pgmap for page migration and keep it in kfd_migrate.c to access static functions svm_migrate_pgmap_ops. Call it only once in amdgpu_device_ip_init after adev ip blocks are initialized, but before amdgpu_amdkfd_device_init initialize kfd nodes which enable SVM support based on pgmap. svm_range_set_max_pages is called by kgd2kfd_device_init everytime after switching compute partition mode. Signed-off-by: Philip Yang <Philip.Yang@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
44a9766555
commit
84b4dd3f84
|
@ -372,6 +372,17 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
|
|||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
|
||||
int kgd2kfd_init_zone_device(struct amdgpu_device *adev);
|
||||
#else
|
||||
static inline
|
||||
int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* KGD2KFD callbacks */
|
||||
int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger);
|
||||
int kgd2kfd_resume_mm(struct mm_struct *mm);
|
||||
|
|
|
@ -2633,8 +2633,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
|||
goto init_failed;
|
||||
|
||||
/* Don't init kfd if whole hive need to be reset during init */
|
||||
if (!adev->gmc.xgmi.pending_reset)
|
||||
if (!adev->gmc.xgmi.pending_reset) {
|
||||
kgd2kfd_init_zone_device(adev);
|
||||
amdgpu_amdkfd_device_init(adev);
|
||||
}
|
||||
|
||||
amdgpu_fru_get_product_info(adev);
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include "kfd_iommu.h"
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "kfd_smi_events.h"
|
||||
#include "kfd_svm.h"
|
||||
#include "kfd_migrate.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_xcp.h"
|
||||
|
@ -791,7 +792,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
|
|||
kfd->nodes[i] = node;
|
||||
}
|
||||
|
||||
svm_migrate_init(kfd->adev);
|
||||
svm_range_set_max_pages(kfd->adev);
|
||||
|
||||
if (kfd_resume_iommu(kfd))
|
||||
goto kfd_resume_iommu_error;
|
||||
|
|
|
@ -988,7 +988,7 @@ static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
|
|||
/* Each VRAM page uses sizeof(struct page) on system memory */
|
||||
#define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page))
|
||||
|
||||
int svm_migrate_init(struct amdgpu_device *adev)
|
||||
int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_kfd_dev *kfddev = &adev->kfd;
|
||||
struct dev_pagemap *pgmap;
|
||||
|
@ -996,12 +996,10 @@ int svm_migrate_init(struct amdgpu_device *adev)
|
|||
unsigned long size;
|
||||
void *r;
|
||||
|
||||
/* Page migration works on Vega10 or newer */
|
||||
if (!KFD_IS_SOC15(kfddev->dev))
|
||||
/* Page migration works on gfx9 or newer */
|
||||
if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 1))
|
||||
return -EINVAL;
|
||||
|
||||
svm_range_set_max_pages(adev);
|
||||
|
||||
if (adev->gmc.is_app_apu)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -47,15 +47,6 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
|
|||
unsigned long
|
||||
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
|
||||
|
||||
int svm_migrate_init(struct amdgpu_device *adev);
|
||||
|
||||
#else
|
||||
|
||||
static inline int svm_migrate_init(struct amdgpu_device *adev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
|
||||
|
||||
#endif /* KFD_MIGRATE_H_ */
|
||||
|
|
|
@ -265,6 +265,10 @@ static inline int kfd_criu_resume_svm(struct kfd_process *p)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void svm_range_set_max_pages(struct amdgpu_device *adev)
|
||||
{
|
||||
}
|
||||
|
||||
#define KFD_IS_SVM_API_SUPPORTED(dev) false
|
||||
|
||||
#endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
|
||||
|
|
Loading…
Reference in New Issue