drm/amd/amdkfd: adjust dummy functions' placement
Move all the dummy functions in amdgpu_amdkfd.c to amdgpu_amdkfd.h as inline functions. Signed-off-by: Lang Yu <Lang.Yu@amd.com> Suggested-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Huang Rui <ray.huang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
33cf440d59
commit
cd63989e0e
|
@ -47,12 +47,8 @@ int amdgpu_amdkfd_init(void)
|
|||
amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
|
||||
amdgpu_amdkfd_total_mem_size *= si.mem_unit;
|
||||
|
||||
#ifdef CONFIG_HSA_AMD
|
||||
ret = kgd2kfd_init();
|
||||
amdgpu_amdkfd_gpuvm_init_mem_limits();
|
||||
#else
|
||||
ret = -ENOENT;
|
||||
#endif
|
||||
kfd_initialized = !ret;
|
||||
|
||||
return ret;
|
||||
|
@ -696,86 +692,3 @@ bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
|
|||
|
||||
return adev->have_atomics_support;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_HSA_AMD
|
||||
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
|
||||
{
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm)
|
||||
{
|
||||
}
|
||||
|
||||
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
|
||||
unsigned int asic_type, bool vf)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool kgd2kfd_device_init(struct kfd_dev *kfd,
|
||||
struct drm_device *ddev,
|
||||
const struct kgd2kfd_shared_resources *gpu_resources)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
void kgd2kfd_device_exit(struct kfd_dev *kfd)
|
||||
{
|
||||
}
|
||||
|
||||
void kgd2kfd_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
|
||||
{
|
||||
}
|
||||
|
||||
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kgd2kfd_pre_reset(struct kfd_dev *kfd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kgd2kfd_post_reset(struct kfd_dev *kfd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
|
||||
{
|
||||
}
|
||||
|
||||
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
|
||||
{
|
||||
}
|
||||
|
||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -94,11 +94,6 @@ enum kgd_engine_type {
|
|||
KGD_ENGINE_MAX
|
||||
};
|
||||
|
||||
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
|
||||
struct mm_struct *mm);
|
||||
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
|
||||
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
|
||||
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
|
||||
|
||||
struct amdkfd_process_info {
|
||||
/* List head of all VMs that belong to a KFD process */
|
||||
|
@ -132,8 +127,6 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
|
|||
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
|
||||
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
|
||||
void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
|
||||
int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
|
||||
uint32_t vmid, uint64_t gpu_addr,
|
||||
uint32_t *ib_cmd, uint32_t ib_len);
|
||||
|
@ -153,6 +146,38 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
|
|||
int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
|
||||
int queue_bit);
|
||||
|
||||
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
|
||||
struct mm_struct *mm);
|
||||
#if IS_ENABLED(CONFIG_HSA_AMD)
|
||||
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
|
||||
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
|
||||
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
|
||||
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
|
||||
#else
|
||||
static inline
|
||||
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline
|
||||
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
/* Shared API */
|
||||
int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
||||
void **mem_obj, uint64_t *gpu_addr,
|
||||
|
@ -215,8 +240,6 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
|
|||
struct file *filp, u32 pasid,
|
||||
void **vm, void **process_info,
|
||||
struct dma_fence **ef);
|
||||
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
|
||||
void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm);
|
||||
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
|
||||
|
@ -236,23 +259,43 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
|
|||
struct kgd_mem *mem, void **kptr, uint64_t *size);
|
||||
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
|
||||
struct dma_fence **ef);
|
||||
|
||||
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
|
||||
struct kfd_vm_fault_info *info);
|
||||
|
||||
int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
|
||||
struct dma_buf *dmabuf,
|
||||
uint64_t va, void *vm,
|
||||
struct kgd_mem **mem, uint64_t *size,
|
||||
uint64_t *mmap_offset);
|
||||
|
||||
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
|
||||
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
|
||||
|
||||
int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
|
||||
struct tile_config *config);
|
||||
#if IS_ENABLED(CONFIG_HSA_AMD)
|
||||
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
|
||||
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
|
||||
#else
|
||||
static inline
|
||||
void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
/* KGD2KFD callbacks */
|
||||
int kgd2kfd_quiesce_mm(struct mm_struct *mm);
|
||||
int kgd2kfd_resume_mm(struct mm_struct *mm);
|
||||
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
|
||||
struct dma_fence *fence);
|
||||
#if IS_ENABLED(CONFIG_HSA_AMD)
|
||||
int kgd2kfd_init(void);
|
||||
void kgd2kfd_exit(void);
|
||||
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
|
||||
|
@ -266,11 +309,68 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
|
|||
int kgd2kfd_pre_reset(struct kfd_dev *kfd);
|
||||
int kgd2kfd_post_reset(struct kfd_dev *kfd);
|
||||
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
|
||||
int kgd2kfd_quiesce_mm(struct mm_struct *mm);
|
||||
int kgd2kfd_resume_mm(struct mm_struct *mm);
|
||||
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
|
||||
struct dma_fence *fence);
|
||||
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
|
||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask);
|
||||
#else
|
||||
static inline int kgd2kfd_init(void)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static inline void kgd2kfd_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
|
||||
unsigned int asic_type, bool vf)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline
|
||||
bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev,
|
||||
const struct kgd2kfd_shared_resources *gpu_resources)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kgd2kfd_post_reset(struct kfd_dev *kfd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
|
||||
|
|
Loading…
Reference in New Issue