drm/amdgpu: Add AMDGPU_GPU_PAGES_IN_CPU_PAGE define
To hopefully make the code dealing with GPU vs CPU pages a little clearer. Suggested-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Michel Dänzer <michel.daenzer@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
9735bf1930
commit
463d2fe85b
|
@ -234,7 +234,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
|||
}
|
||||
|
||||
t = offset / AMDGPU_GPU_PAGE_SIZE;
|
||||
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
||||
p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
||||
for (i = 0; i < pages; i++, p++) {
|
||||
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
||||
adev->gart.pages[p] = NULL;
|
||||
|
@ -243,7 +243,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
|||
if (!adev->gart.ptr)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
|
||||
for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
|
||||
amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
|
||||
t, page_base, flags);
|
||||
page_base += AMDGPU_GPU_PAGE_SIZE;
|
||||
|
@ -282,7 +282,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
|
|||
|
||||
for (i = 0; i < pages; i++) {
|
||||
page_base = dma_addr[i];
|
||||
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
|
||||
for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
|
||||
amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
|
||||
page_base += AMDGPU_GPU_PAGE_SIZE;
|
||||
}
|
||||
|
@ -319,7 +319,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
|
|||
|
||||
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
||||
t = offset / AMDGPU_GPU_PAGE_SIZE;
|
||||
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
||||
p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
||||
for (i = 0; i < pages; i++, p++)
|
||||
adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
|
||||
#endif
|
||||
|
|
|
@ -37,6 +37,8 @@ struct amdgpu_bo;
|
|||
#define AMDGPU_GPU_PAGE_SHIFT 12
|
||||
#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
|
||||
|
||||
#define AMDGPU_GPU_PAGES_IN_CPU_PAGE (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE)
|
||||
|
||||
struct amdgpu_gart {
|
||||
u64 table_addr;
|
||||
struct amdgpu_bo *robj;
|
||||
|
|
|
@ -1567,7 +1567,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
|||
if (nodes) {
|
||||
addr = nodes->start << PAGE_SHIFT;
|
||||
max_entries = (nodes->size - pfn) *
|
||||
(PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
||||
AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
||||
} else {
|
||||
addr = 0;
|
||||
max_entries = S64_MAX;
|
||||
|
@ -1578,7 +1578,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
|||
|
||||
max_entries = min(max_entries, 16ull * 1024ull);
|
||||
for (count = 1;
|
||||
count < max_entries / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
||||
count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
||||
++count) {
|
||||
uint64_t idx = pfn + count;
|
||||
|
||||
|
@ -1592,7 +1592,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
|||
dma_addr = pages_addr;
|
||||
} else {
|
||||
addr = pages_addr[pfn];
|
||||
max_entries = count * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
||||
max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
||||
}
|
||||
|
||||
} else if (flags & AMDGPU_PTE_VALID) {
|
||||
|
@ -1607,7 +1607,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
pfn += (last - start + 1) / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
||||
pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
||||
if (nodes && nodes->size == pfn) {
|
||||
pfn = 0;
|
||||
++nodes;
|
||||
|
|
Loading…
Reference in New Issue