2015-04-21 04:55:21 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2008 Advanced Micro Devices, Inc.
|
|
|
|
* Copyright 2008 Red Hat Inc.
|
|
|
|
* Copyright 2009 Jerome Glisse.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors: Dave Airlie
|
|
|
|
* Alex Deucher
|
|
|
|
* Jerome Glisse
|
|
|
|
*/
|
2016-10-25 20:00:45 +08:00
|
|
|
#include <linux/dma-fence-array.h>
|
2017-03-30 20:03:59 +08:00
|
|
|
#include <linux/interval_tree_generic.h>
|
2017-08-26 08:40:26 +08:00
|
|
|
#include <linux/idr.h>
|
2015-04-21 04:55:21 +08:00
|
|
|
#include <drm/drmP.h>
|
|
|
|
#include <drm/amdgpu_drm.h>
|
|
|
|
#include "amdgpu.h"
|
|
|
|
#include "amdgpu_trace.h"
|
2018-03-16 05:27:43 +08:00
|
|
|
#include "amdgpu_amdkfd.h"
|
2018-06-13 02:28:20 +08:00
|
|
|
#include "amdgpu_gmc.h"
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2018-06-11 23:11:24 +08:00
|
|
|
/**
|
|
|
|
* DOC: GPUVM
|
|
|
|
*
|
2015-04-21 04:55:21 +08:00
|
|
|
* GPUVM is similar to the legacy gart on older asics, however
|
|
|
|
* rather than there being a single global gart table
|
|
|
|
* for the entire GPU, there are multiple VM page tables active
|
|
|
|
* at any given time. The VM page tables can contain a mix
|
|
|
|
* vram pages and system memory pages and system memory pages
|
|
|
|
* can be mapped as snooped (cached system pages) or unsnooped
|
|
|
|
* (uncached system pages).
|
|
|
|
* Each VM has an ID associated with it and there is a page table
|
|
|
|
* associated with each VMID. When execting a command buffer,
|
|
|
|
* the kernel tells the the ring what VMID to use for that command
|
|
|
|
* buffer. VMIDs are allocated dynamically as commands are submitted.
|
|
|
|
* The userspace drivers maintain their own address space and the kernel
|
|
|
|
* sets up their pages tables accordingly when they submit their
|
|
|
|
* command buffers and a VMID is assigned.
|
|
|
|
* Cayman/Trinity support up to 8 active VMs at any given time;
|
|
|
|
* SI supports 16.
|
|
|
|
*/
|
|
|
|
|
2017-03-30 20:03:59 +08:00
|
|
|
#define START(node) ((node)->start)
|
|
|
|
#define LAST(node) ((node)->last)
|
|
|
|
|
|
|
|
INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
|
|
|
|
START, LAST, static, amdgpu_vm_it)
|
|
|
|
|
|
|
|
#undef START
|
|
|
|
#undef LAST
|
|
|
|
|
2018-06-11 23:11:24 +08:00
|
|
|
/**
|
|
|
|
* struct amdgpu_pte_update_params - Local structure
|
|
|
|
*
|
|
|
|
* Encapsulate some VM table update parameters to reduce
|
2016-04-21 22:40:18 +08:00
|
|
|
* the number of function parameters
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
2016-04-21 22:40:18 +08:00
|
|
|
*/
|
2016-08-04 20:52:50 +08:00
|
|
|
struct amdgpu_pte_update_params {
|
2018-06-11 23:11:24 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @adev: amdgpu device we do this update for
|
|
|
|
*/
|
2016-08-04 21:02:49 +08:00
|
|
|
struct amdgpu_device *adev;
|
2018-06-11 23:11:24 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @vm: optional amdgpu_vm we do this update for
|
|
|
|
*/
|
2016-10-13 21:09:08 +08:00
|
|
|
struct amdgpu_vm *vm;
|
2018-06-11 23:11:24 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @src: address where to copy page table entries from
|
|
|
|
*/
|
2016-04-21 22:40:18 +08:00
|
|
|
uint64_t src;
|
2018-06-11 23:11:24 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @ib: indirect buffer to fill with commands
|
|
|
|
*/
|
2016-04-21 22:40:18 +08:00
|
|
|
struct amdgpu_ib *ib;
|
2018-06-11 23:11:24 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @func: Function which actually does the update
|
|
|
|
*/
|
2018-01-16 23:54:25 +08:00
|
|
|
void (*func)(struct amdgpu_pte_update_params *params,
|
|
|
|
struct amdgpu_bo *bo, uint64_t pe,
|
2016-08-12 19:29:18 +08:00
|
|
|
uint64_t addr, unsigned count, uint32_t incr,
|
2016-09-21 16:19:19 +08:00
|
|
|
uint64_t flags);
|
2018-06-11 23:11:24 +08:00
|
|
|
/**
|
|
|
|
* @pages_addr:
|
|
|
|
*
|
|
|
|
* DMA addresses to use for mapping, used during VM update by CPU
|
2017-05-12 07:47:22 +08:00
|
|
|
*/
|
|
|
|
dma_addr_t *pages_addr;
|
2018-06-11 23:11:24 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @kptr:
|
|
|
|
*
|
|
|
|
* Kernel pointer of PD/PT BO that needs to be updated,
|
|
|
|
* used during VM update by CPU
|
|
|
|
*/
|
2017-05-12 07:47:22 +08:00
|
|
|
void *kptr;
|
2016-04-21 22:40:18 +08:00
|
|
|
};
|
|
|
|
|
2018-06-11 23:11:24 +08:00
|
|
|
/**
|
|
|
|
* struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
|
|
|
|
*/
|
2017-01-30 18:09:31 +08:00
|
|
|
struct amdgpu_prt_cb {
|
2018-06-11 23:11:24 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @adev: amdgpu device
|
|
|
|
*/
|
2017-01-30 18:09:31 +08:00
|
|
|
struct amdgpu_device *adev;
|
2018-06-11 23:11:24 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @cb: callback
|
|
|
|
*/
|
2017-01-30 18:09:31 +08:00
|
|
|
struct dma_fence_cb cb;
|
|
|
|
};
|
|
|
|
|
2018-06-11 23:11:24 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
|
|
|
|
*
|
|
|
|
* @base: base structure for tracking BO usage in a VM
|
|
|
|
* @vm: vm to which bo is to be added
|
|
|
|
* @bo: amdgpu buffer object
|
|
|
|
*
|
|
|
|
* Initialize a bo_va_base structure and add it to the appropriate lists
|
|
|
|
*
|
|
|
|
*/
|
2018-04-24 12:14:39 +08:00
|
|
|
static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_bo *bo)
|
|
|
|
{
|
|
|
|
base->vm = vm;
|
|
|
|
base->bo = bo;
|
|
|
|
INIT_LIST_HEAD(&base->bo_list);
|
|
|
|
INIT_LIST_HEAD(&base->vm_status);
|
|
|
|
|
|
|
|
if (!bo)
|
|
|
|
return;
|
|
|
|
list_add_tail(&base->bo_list, &bo->va);
|
|
|
|
|
2018-07-06 02:49:34 +08:00
|
|
|
if (bo->tbo.type == ttm_bo_type_kernel)
|
|
|
|
list_move(&base->vm_status, &vm->relocated);
|
|
|
|
|
2018-04-24 12:14:39 +08:00
|
|
|
if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (bo->preferred_domains &
|
|
|
|
amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we checked all the prerequisites, but it looks like this per vm bo
|
|
|
|
* is currently evicted. add the bo to the evicted list to make sure it
|
|
|
|
* is validated on next vm use to avoid fault.
|
|
|
|
* */
|
|
|
|
list_move_tail(&base->vm_status, &vm->evicted);
|
drm/amdgpu: Need to set moved to true when evict bo
Fix the VMC page fault when the running sequence is as below:
1.amdgpu_gem_create_ioctl
2.ttm_bo_swapout->amdgpu_vm_bo_invalidate, as not called
amdgpu_vm_bo_base_init, so won't called
list_add_tail(&base->bo_list, &bo->va). Even the bo was evicted,
it won't set the bo_base->moved.
3.drm_gem_open_ioctl->amdgpu_vm_bo_base_init, here only called
list_move_tail(&base->vm_status, &vm->evicted), but not set the
bo_base->moved.
4.amdgpu_vm_bo_map->amdgpu_vm_bo_insert_map, as the bo_base->moved is
not set true, the function amdgpu_vm_bo_insert_map will call
list_move(&bo_va->base.vm_status, &vm->moved)
5.amdgpu_cs_ioctl won't validate the swapout bo, as it is only in the
moved list, not in the evict list. So VMC page fault occurs.
Signed-off-by: Emily Deng <Emily.Deng@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-08-28 20:52:40 +08:00
|
|
|
base->moved = true;
|
2018-04-24 12:14:39 +08:00
|
|
|
}
|
|
|
|
|
2017-11-27 21:01:51 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_level_shift - return the addr shift for each level
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2018-06-11 23:11:24 +08:00
|
|
|
* @level: VMPT level
|
2017-11-27 21:01:51 +08:00
|
|
|
*
|
2018-06-11 23:11:24 +08:00
|
|
|
* Returns:
|
|
|
|
* The number of bits the pfn needs to be right shifted for a level.
|
2017-11-27 21:01:51 +08:00
|
|
|
*/
|
|
|
|
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
|
|
|
|
unsigned level)
|
|
|
|
{
|
2017-12-13 14:22:54 +08:00
|
|
|
unsigned shift = 0xff;
|
|
|
|
|
|
|
|
switch (level) {
|
|
|
|
case AMDGPU_VM_PDB2:
|
|
|
|
case AMDGPU_VM_PDB1:
|
|
|
|
case AMDGPU_VM_PDB0:
|
|
|
|
shift = 9 * (AMDGPU_VM_PDB0 - level) +
|
2017-11-27 21:01:51 +08:00
|
|
|
adev->vm_manager.block_size;
|
2017-12-13 14:22:54 +08:00
|
|
|
break;
|
|
|
|
case AMDGPU_VM_PTB:
|
|
|
|
shift = 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_err(adev->dev, "the level%d isn't supported.\n", level);
|
|
|
|
}
|
|
|
|
|
|
|
|
return shift;
|
2017-11-27 21:01:51 +08:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
2016-10-19 17:03:57 +08:00
|
|
|
* amdgpu_vm_num_entries - return the number of entries in a PD/PT
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2018-06-11 23:11:24 +08:00
|
|
|
* @level: VMPT level
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
2018-06-11 23:11:24 +08:00
|
|
|
* Returns:
|
|
|
|
* The number of entries in a page directory or page table.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2016-10-19 17:03:57 +08:00
|
|
|
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
|
|
|
|
unsigned level)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2017-12-13 14:22:54 +08:00
|
|
|
unsigned shift = amdgpu_vm_level_shift(adev,
|
|
|
|
adev->vm_manager.root_level);
|
2017-11-20 21:29:01 +08:00
|
|
|
|
2017-12-13 14:22:54 +08:00
|
|
|
if (level == adev->vm_manager.root_level)
|
2016-10-19 17:03:57 +08:00
|
|
|
/* For the root directory */
|
2017-11-20 21:29:01 +08:00
|
|
|
return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
|
2017-12-13 14:22:54 +08:00
|
|
|
else if (level != AMDGPU_VM_PTB)
|
2017-11-20 21:29:01 +08:00
|
|
|
/* Everything in between */
|
|
|
|
return 512;
|
|
|
|
else
|
2016-10-19 17:03:57 +08:00
|
|
|
/* For the page tables on the leaves */
|
2017-03-29 16:08:32 +08:00
|
|
|
return AMDGPU_VM_PTE_COUNT(adev);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-10-19 17:03:57 +08:00
|
|
|
* amdgpu_vm_bo_size - returns the size of the BOs in bytes
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2018-06-11 23:11:24 +08:00
|
|
|
* @level: VMPT level
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
2018-06-11 23:11:24 +08:00
|
|
|
* Returns:
|
|
|
|
* The size of the BO for a page directory or page table in bytes.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2016-10-19 17:03:57 +08:00
|
|
|
static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2016-10-19 17:03:57 +08:00
|
|
|
return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-12-11 22:16:32 +08:00
|
|
|
* amdgpu_vm_get_pd_bo - add the VM PD to a validation list
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* @vm: vm providing the BOs
|
2015-12-11 21:39:05 +08:00
|
|
|
* @validated: head of validation list
|
2015-12-11 22:16:32 +08:00
|
|
|
* @entry: entry to add
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* Add the page directory to the list of BOs to
|
2015-12-11 22:16:32 +08:00
|
|
|
* validate for command submission.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2015-12-11 22:16:32 +08:00
|
|
|
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
|
|
|
struct list_head *validated,
|
|
|
|
struct amdgpu_bo_list_entry *entry)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2017-08-03 20:02:13 +08:00
|
|
|
entry->robj = vm->root.base.bo;
|
2015-12-11 22:16:32 +08:00
|
|
|
entry->priority = 0;
|
2016-10-12 20:46:26 +08:00
|
|
|
entry->tv.bo = &entry->robj->tbo;
|
2015-12-11 22:16:32 +08:00
|
|
|
entry->tv.shared = true;
|
2016-02-23 19:36:59 +08:00
|
|
|
entry->user_pages = NULL;
|
2015-12-11 22:16:32 +08:00
|
|
|
list_add(&entry->tv.head, validated);
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
drm/amdgpu: use bulk moves for efficient VM LRU handling (v6)
I continue to work for bulk moving that based on the proposal by Christian.
Background:
amdgpu driver will move all PD/PT and PerVM BOs into idle list. Then move all of
them on the end of LRU list one by one. Thus, that cause so many BOs moved to
the end of the LRU, and impact performance seriously.
Then Christian provided a workaround to not move PD/PT BOs on LRU with below
patch:
Commit 0bbf32026cf5ba41e9922b30e26e1bed1ecd38ae ("drm/amdgpu: band aid
validating VM PTs")
However, the final solution should bulk move all PD/PT and PerVM BOs on the LRU
instead of one by one.
Whenever amdgpu_vm_validate_pt_bos() is called and we have BOs which need to be
validated we move all BOs together to the end of the LRU without dropping the
lock for the LRU.
While doing so we note the beginning and end of this block in the LRU list.
Now when amdgpu_vm_validate_pt_bos() is called and we don't have anything to do,
we don't move every BO one by one, but instead cut the LRU list into pieces so
that we bulk move everything to the end in just one operation.
Test data:
+--------------+-----------------+-----------+---------------------------------------+
| |The Talos |Clpeak(OCL)|BusSpeedReadback(OCL) |
| |Principle(Vulkan)| | |
+------------------------------------------------------------------------------------+
| | | |0.319 ms(1k) 0.314 ms(2K) 0.308 ms(4K) |
| Original | 147.7 FPS | 76.86 us |0.307 ms(8K) 0.310 ms(16K) |
+------------------------------------------------------------------------------------+
| Orignial + WA| | |0.254 ms(1K) 0.241 ms(2K) |
|(don't move | 162.1 FPS | 42.15 us |0.230 ms(4K) 0.223 ms(8K) 0.204 ms(16K)|
|PT BOs on LRU)| | | |
+------------------------------------------------------------------------------------+
| Bulk move | 163.1 FPS | 40.52 us |0.244 ms(1K) 0.252 ms(2K) 0.213 ms(4K) |
| | | |0.214 ms(8K) 0.225 ms(16K) |
+--------------+-----------------+-----------+---------------------------------------+
After test them with above three benchmarks include vulkan and opencl. We can
see the visible improvement than original, and even better than original with
workaround.
v2: move all BOs include idle, relocated, and moved list to the end of LRU and
put them together.
v3: remove unused parameter and use list_for_each_entry instead of the one with
save entry.
v4: move the amdgpu_vm_move_to_lru_tail after command submission, at that time,
all bo will be back on idle list.
v5: remove amdgpu_vm_move_to_lru_tail_by_list(), use bulk_moveable instread of
validated, and move ttm_bo_bulk_move_lru_tail() also into
amdgpu_vm_move_to_lru_tail().
v6: clean up and fix return value.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Tested-by: Mike Lothian <mike@fireburn.co.uk>
Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-08-06 10:57:08 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device pointer
|
|
|
|
* @vm: vm providing the BOs
|
|
|
|
*
|
|
|
|
* Move all BOs to the end of LRU and remember their positions to put them
|
|
|
|
* together.
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm)
|
|
|
|
{
|
|
|
|
struct ttm_bo_global *glob = adev->mman.bdev.glob;
|
|
|
|
struct amdgpu_vm_bo_base *bo_base;
|
|
|
|
|
|
|
|
if (vm->bulk_moveable) {
|
|
|
|
spin_lock(&glob->lru_lock);
|
|
|
|
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
|
|
|
|
spin_unlock(&glob->lru_lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
|
|
|
|
|
|
|
|
spin_lock(&glob->lru_lock);
|
|
|
|
list_for_each_entry(bo_base, &vm->idle, vm_status) {
|
|
|
|
struct amdgpu_bo *bo = bo_base->bo;
|
|
|
|
|
|
|
|
if (!bo->parent)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
|
|
|
|
if (bo->shadow)
|
|
|
|
ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
|
|
|
|
&vm->lru_bulk_move);
|
|
|
|
}
|
|
|
|
spin_unlock(&glob->lru_lock);
|
|
|
|
|
|
|
|
vm->bulk_moveable = true;
|
|
|
|
}
|
|
|
|
|
2016-10-12 21:36:57 +08:00
|
|
|
/**
|
2016-09-28 18:03:04 +08:00
|
|
|
* amdgpu_vm_validate_pt_bos - validate the page table BOs
|
2016-10-12 21:36:57 +08:00
|
|
|
*
|
2016-06-21 22:28:15 +08:00
|
|
|
* @adev: amdgpu device pointer
|
2015-12-11 22:16:32 +08:00
|
|
|
* @vm: vm providing the BOs
|
2016-10-12 21:36:57 +08:00
|
|
|
* @validate: callback to do the validation
|
|
|
|
* @param: parameter for the validation callback
|
|
|
|
*
|
|
|
|
* Validate the page table BOs on command submission if neccessary.
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Validation result.
|
2016-10-12 21:36:57 +08:00
|
|
|
*/
|
2016-09-28 18:03:04 +08:00
|
|
|
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
|
|
int (*validate)(void *p, struct amdgpu_bo *bo),
|
|
|
|
void *param)
|
2016-10-12 21:36:57 +08:00
|
|
|
{
|
2018-04-19 17:02:54 +08:00
|
|
|
struct amdgpu_vm_bo_base *bo_base, *tmp;
|
|
|
|
int r = 0;
|
2016-10-12 21:36:57 +08:00
|
|
|
|
drm/amdgpu: use bulk moves for efficient VM LRU handling (v6)
I continue to work for bulk moving that based on the proposal by Christian.
Background:
amdgpu driver will move all PD/PT and PerVM BOs into idle list. Then move all of
them on the end of LRU list one by one. Thus, that cause so many BOs moved to
the end of the LRU, and impact performance seriously.
Then Christian provided a workaround to not move PD/PT BOs on LRU with below
patch:
Commit 0bbf32026cf5ba41e9922b30e26e1bed1ecd38ae ("drm/amdgpu: band aid
validating VM PTs")
However, the final solution should bulk move all PD/PT and PerVM BOs on the LRU
instead of one by one.
Whenever amdgpu_vm_validate_pt_bos() is called and we have BOs which need to be
validated we move all BOs together to the end of the LRU without dropping the
lock for the LRU.
While doing so we note the beginning and end of this block in the LRU list.
Now when amdgpu_vm_validate_pt_bos() is called and we don't have anything to do,
we don't move every BO one by one, but instead cut the LRU list into pieces so
that we bulk move everything to the end in just one operation.
Test data:
+--------------+-----------------+-----------+---------------------------------------+
| |The Talos |Clpeak(OCL)|BusSpeedReadback(OCL) |
| |Principle(Vulkan)| | |
+------------------------------------------------------------------------------------+
| | | |0.319 ms(1k) 0.314 ms(2K) 0.308 ms(4K) |
| Original | 147.7 FPS | 76.86 us |0.307 ms(8K) 0.310 ms(16K) |
+------------------------------------------------------------------------------------+
| Orignial + WA| | |0.254 ms(1K) 0.241 ms(2K) |
|(don't move | 162.1 FPS | 42.15 us |0.230 ms(4K) 0.223 ms(8K) 0.204 ms(16K)|
|PT BOs on LRU)| | | |
+------------------------------------------------------------------------------------+
| Bulk move | 163.1 FPS | 40.52 us |0.244 ms(1K) 0.252 ms(2K) 0.213 ms(4K) |
| | | |0.214 ms(8K) 0.225 ms(16K) |
+--------------+-----------------+-----------+---------------------------------------+
After test them with above three benchmarks include vulkan and opencl. We can
see the visible improvement than original, and even better than original with
workaround.
v2: move all BOs include idle, relocated, and moved list to the end of LRU and
put them together.
v3: remove unused parameter and use list_for_each_entry instead of the one with
save entry.
v4: move the amdgpu_vm_move_to_lru_tail after command submission, at that time,
all bo will be back on idle list.
v5: remove amdgpu_vm_move_to_lru_tail_by_list(), use bulk_moveable instread of
validated, and move ttm_bo_bulk_move_lru_tail() also into
amdgpu_vm_move_to_lru_tail().
v6: clean up and fix return value.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Tested-by: Mike Lothian <mike@fireburn.co.uk>
Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-08-06 10:57:08 +08:00
|
|
|
vm->bulk_moveable &= list_empty(&vm->evicted);
|
|
|
|
|
2018-04-19 17:02:54 +08:00
|
|
|
list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
|
|
|
|
struct amdgpu_bo *bo = bo_base->bo;
|
2016-10-12 21:36:57 +08:00
|
|
|
|
2018-08-16 01:10:40 +08:00
|
|
|
r = validate(param, bo);
|
|
|
|
if (r)
|
|
|
|
break;
|
2016-10-12 21:36:57 +08:00
|
|
|
|
2018-04-19 16:56:02 +08:00
|
|
|
if (bo->tbo.type != ttm_bo_type_kernel) {
|
|
|
|
spin_lock(&vm->moved_lock);
|
2017-08-16 17:13:48 +08:00
|
|
|
list_move(&bo_base->vm_status, &vm->moved);
|
2018-04-19 16:56:02 +08:00
|
|
|
spin_unlock(&vm->moved_lock);
|
|
|
|
} else {
|
2018-08-22 22:44:56 +08:00
|
|
|
r = amdgpu_ttm_alloc_gart(&bo->tbo);
|
|
|
|
if (r)
|
|
|
|
break;
|
2017-08-16 17:13:48 +08:00
|
|
|
list_move(&bo_base->vm_status, &vm->relocated);
|
2018-04-19 16:56:02 +08:00
|
|
|
}
|
2016-10-12 21:36:57 +08:00
|
|
|
}
|
|
|
|
|
2018-04-19 17:02:54 +08:00
|
|
|
return r;
|
2016-10-12 21:36:57 +08:00
|
|
|
}
|
|
|
|
|
2015-12-11 22:16:32 +08:00
|
|
|
/**
|
2017-08-24 18:32:55 +08:00
|
|
|
* amdgpu_vm_ready - check VM is ready for updates
|
2015-12-11 22:16:32 +08:00
|
|
|
*
|
2017-08-24 18:32:55 +08:00
|
|
|
* @vm: VM to check
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
2017-08-24 18:32:55 +08:00
|
|
|
* Check if all VM PDs/PTs are ready for updates
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* True if eviction list is empty.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2017-08-03 20:02:13 +08:00
|
|
|
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2018-04-19 16:56:02 +08:00
|
|
|
return list_empty(&vm->evicted);
|
2016-10-13 16:20:53 +08:00
|
|
|
}
|
|
|
|
|
2018-01-25 00:19:04 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_clear_bo - initially clear the PDs/PTs
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2018-06-11 23:11:24 +08:00
|
|
|
* @vm: VM to clear BO from
|
2018-01-25 00:19:04 +08:00
|
|
|
* @bo: BO to clear
|
|
|
|
* @level: level this BO is at
|
2018-06-14 04:01:38 +08:00
|
|
|
* @pte_support_ats: indicate ATS support from PTE
|
2018-01-25 00:19:04 +08:00
|
|
|
*
|
|
|
|
* Root PD needs to be reserved when calling this.
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 on success, errno otherwise.
|
2018-01-25 00:19:04 +08:00
|
|
|
*/
|
|
|
|
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
2018-01-26 01:36:15 +08:00
|
|
|
struct amdgpu_vm *vm, struct amdgpu_bo *bo,
|
|
|
|
unsigned level, bool pte_support_ats)
|
2018-01-25 00:19:04 +08:00
|
|
|
{
|
|
|
|
struct ttm_operation_ctx ctx = { true, false };
|
|
|
|
struct dma_fence *fence = NULL;
|
2018-01-26 01:36:15 +08:00
|
|
|
unsigned entries, ats_entries;
|
2018-01-25 00:19:04 +08:00
|
|
|
struct amdgpu_ring *ring;
|
|
|
|
struct amdgpu_job *job;
|
2018-01-26 01:36:15 +08:00
|
|
|
uint64_t addr;
|
2018-01-25 00:19:04 +08:00
|
|
|
int r;
|
|
|
|
|
2018-01-26 01:36:15 +08:00
|
|
|
entries = amdgpu_bo_size(bo) / 8;
|
|
|
|
|
|
|
|
if (pte_support_ats) {
|
|
|
|
if (level == adev->vm_manager.root_level) {
|
|
|
|
ats_entries = amdgpu_vm_level_shift(adev, level);
|
|
|
|
ats_entries += AMDGPU_GPU_PAGE_SHIFT;
|
|
|
|
ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
|
|
|
|
ats_entries = min(ats_entries, entries);
|
|
|
|
entries -= ats_entries;
|
|
|
|
} else {
|
|
|
|
ats_entries = entries;
|
|
|
|
entries = 0;
|
|
|
|
}
|
2018-01-25 00:19:04 +08:00
|
|
|
} else {
|
2018-01-26 01:36:15 +08:00
|
|
|
ats_entries = 0;
|
2018-01-25 00:19:04 +08:00
|
|
|
}
|
|
|
|
|
2018-07-20 20:21:06 +08:00
|
|
|
ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
|
2018-01-25 00:19:04 +08:00
|
|
|
|
|
|
|
r = reservation_object_reserve_shared(bo->tbo.resv);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
|
|
|
if (r)
|
|
|
|
goto error;
|
|
|
|
|
2018-08-22 22:44:56 +08:00
|
|
|
r = amdgpu_ttm_alloc_gart(&bo->tbo);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2018-01-25 00:19:04 +08:00
|
|
|
r = amdgpu_job_alloc_with_ib(adev, 64, &job);
|
|
|
|
if (r)
|
|
|
|
goto error;
|
|
|
|
|
2018-08-16 18:01:03 +08:00
|
|
|
addr = amdgpu_bo_gpu_offset(bo);
|
2018-01-26 01:36:15 +08:00
|
|
|
if (ats_entries) {
|
|
|
|
uint64_t ats_value;
|
|
|
|
|
|
|
|
ats_value = AMDGPU_PTE_DEFAULT_ATC;
|
|
|
|
if (level != AMDGPU_VM_PTB)
|
|
|
|
ats_value |= AMDGPU_PDE_PTE;
|
|
|
|
|
|
|
|
amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
|
|
|
|
ats_entries, 0, ats_value);
|
|
|
|
addr += ats_entries * 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (entries)
|
|
|
|
amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
|
|
|
|
entries, 0, 0);
|
|
|
|
|
2018-01-25 00:19:04 +08:00
|
|
|
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
|
|
|
|
|
|
|
|
WARN_ON(job->ibs[0].length_dw > 64);
|
2018-02-05 02:36:52 +08:00
|
|
|
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
|
|
|
|
AMDGPU_FENCE_OWNER_UNDEFINED, false);
|
|
|
|
if (r)
|
|
|
|
goto error_free;
|
|
|
|
|
2018-07-13 19:54:56 +08:00
|
|
|
r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
|
|
|
|
&fence);
|
2018-01-25 00:19:04 +08:00
|
|
|
if (r)
|
|
|
|
goto error_free;
|
|
|
|
|
|
|
|
amdgpu_bo_fence(bo, fence, true);
|
|
|
|
dma_fence_put(fence);
|
2018-02-03 04:05:40 +08:00
|
|
|
|
|
|
|
if (bo->shadow)
|
|
|
|
return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
|
|
|
|
level, pte_support_ats);
|
|
|
|
|
2018-01-25 00:19:04 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
error_free:
|
|
|
|
amdgpu_job_free(job);
|
|
|
|
|
|
|
|
error:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2018-08-28 04:17:59 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requesting vm
|
|
|
|
* @bp: resulting BO allocation parameters
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
|
|
int level, struct amdgpu_bo_param *bp)
|
|
|
|
{
|
|
|
|
memset(bp, 0, sizeof(*bp));
|
|
|
|
|
|
|
|
bp->size = amdgpu_vm_bo_size(adev, level);
|
|
|
|
bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
|
2018-08-22 22:44:56 +08:00
|
|
|
if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
|
|
|
|
adev->flags & AMD_IS_APU)
|
|
|
|
bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
|
|
|
|
bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
|
|
|
|
bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
|
|
|
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
2018-08-28 04:17:59 +08:00
|
|
|
if (vm->use_cpu_for_update)
|
|
|
|
bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
|
|
|
else
|
|
|
|
bp->flags |= AMDGPU_GEM_CREATE_SHADOW |
|
|
|
|
AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
|
|
|
|
bp->type = ttm_bo_type_kernel;
|
|
|
|
if (vm->root.base.bo)
|
|
|
|
bp->resv = vm->root.base.bo->tbo.resv;
|
|
|
|
}
|
|
|
|
|
2016-10-13 16:20:53 +08:00
|
|
|
/**
|
2016-10-28 02:04:38 +08:00
|
|
|
* amdgpu_vm_alloc_levels - allocate the PD/PT levels
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
2018-06-11 23:11:24 +08:00
|
|
|
* @parent: parent PT
|
2016-10-28 02:04:38 +08:00
|
|
|
* @saddr: start of the address range
|
|
|
|
* @eaddr: end of the address range
|
2018-06-11 23:11:24 +08:00
|
|
|
* @level: VMPT level
|
|
|
|
* @ats: indicate ATS support from PTE
|
2016-10-28 02:04:38 +08:00
|
|
|
*
|
|
|
|
* Make sure the page directories and page tables are allocated
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 on success, errno otherwise.
|
2016-10-28 02:04:38 +08:00
|
|
|
*/
|
|
|
|
static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_vm_pt *parent,
|
|
|
|
uint64_t saddr, uint64_t eaddr,
|
2018-01-26 01:36:15 +08:00
|
|
|
unsigned level, bool ats)
|
2016-10-28 02:04:38 +08:00
|
|
|
{
|
2017-11-27 21:01:51 +08:00
|
|
|
unsigned shift = amdgpu_vm_level_shift(adev, level);
|
2018-08-28 04:17:59 +08:00
|
|
|
struct amdgpu_bo_param bp;
|
2016-10-28 02:04:38 +08:00
|
|
|
unsigned pt_idx, from, to;
|
2018-01-25 00:19:04 +08:00
|
|
|
int r;
|
2016-10-28 02:04:38 +08:00
|
|
|
|
|
|
|
if (!parent->entries) {
|
|
|
|
unsigned num_entries = amdgpu_vm_num_entries(adev, level);
|
|
|
|
|
2017-05-17 20:23:12 +08:00
|
|
|
parent->entries = kvmalloc_array(num_entries,
|
|
|
|
sizeof(struct amdgpu_vm_pt),
|
|
|
|
GFP_KERNEL | __GFP_ZERO);
|
2016-10-28 02:04:38 +08:00
|
|
|
if (!parent->entries)
|
|
|
|
return -ENOMEM;
|
|
|
|
memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
|
|
|
|
}
|
|
|
|
|
2017-03-29 08:36:12 +08:00
|
|
|
from = saddr >> shift;
|
|
|
|
to = eaddr >> shift;
|
|
|
|
if (from >= amdgpu_vm_num_entries(adev, level) ||
|
|
|
|
to >= amdgpu_vm_num_entries(adev, level))
|
|
|
|
return -EINVAL;
|
2016-10-28 02:04:38 +08:00
|
|
|
|
|
|
|
++level;
|
2017-03-29 08:36:12 +08:00
|
|
|
saddr = saddr & ((1 << shift) - 1);
|
|
|
|
eaddr = eaddr & ((1 << shift) - 1);
|
2016-10-28 02:04:38 +08:00
|
|
|
|
2018-08-28 04:17:59 +08:00
|
|
|
amdgpu_vm_bo_param(adev, vm, level, &bp);
|
2017-05-12 03:50:08 +08:00
|
|
|
|
2016-10-28 02:04:38 +08:00
|
|
|
/* walk over the address space and allocate the page tables */
|
|
|
|
for (pt_idx = from; pt_idx <= to; ++pt_idx) {
|
|
|
|
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
|
|
|
|
struct amdgpu_bo *pt;
|
|
|
|
|
2017-08-03 20:02:13 +08:00
|
|
|
if (!entry->base.bo) {
|
2018-04-16 18:27:50 +08:00
|
|
|
r = amdgpu_bo_create(adev, &bp, &pt);
|
2016-10-28 02:04:38 +08:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2018-01-26 01:36:15 +08:00
|
|
|
r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
|
2018-01-25 00:19:04 +08:00
|
|
|
if (r) {
|
2018-02-03 04:00:44 +08:00
|
|
|
amdgpu_bo_unref(&pt->shadow);
|
2018-01-25 00:19:04 +08:00
|
|
|
amdgpu_bo_unref(&pt);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2017-07-12 16:01:48 +08:00
|
|
|
if (vm->use_cpu_for_update) {
|
|
|
|
r = amdgpu_bo_kmap(pt, NULL);
|
|
|
|
if (r) {
|
2018-02-03 04:00:44 +08:00
|
|
|
amdgpu_bo_unref(&pt->shadow);
|
2017-07-12 16:01:48 +08:00
|
|
|
amdgpu_bo_unref(&pt);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-28 02:04:38 +08:00
|
|
|
/* Keep a reference to the root directory to avoid
|
|
|
|
* freeing them up in the wrong order.
|
|
|
|
*/
|
2017-08-31 16:46:20 +08:00
|
|
|
pt->parent = amdgpu_bo_ref(parent->base.bo);
|
2016-10-28 02:04:38 +08:00
|
|
|
|
2018-04-24 12:14:39 +08:00
|
|
|
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
|
2016-10-28 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2017-12-13 14:22:54 +08:00
|
|
|
if (level < AMDGPU_VM_PTB) {
|
2017-03-29 08:36:12 +08:00
|
|
|
uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
|
|
|
|
uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
|
|
|
|
((1 << shift) - 1);
|
|
|
|
r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
|
2018-01-26 01:36:15 +08:00
|
|
|
sub_eaddr, level, ats);
|
2016-10-28 02:04:38 +08:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-13 17:13:37 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_alloc_pts - Allocate page tables.
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: VM to allocate page tables for
|
|
|
|
* @saddr: Start address which needs to be allocated
|
|
|
|
* @size: Size from start address we need.
|
|
|
|
*
|
|
|
|
* Make sure the page tables are allocated.
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 on success, errno otherwise.
|
2017-03-13 17:13:37 +08:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
uint64_t saddr, uint64_t size)
|
|
|
|
{
|
|
|
|
uint64_t eaddr;
|
2018-01-26 01:36:15 +08:00
|
|
|
bool ats = false;
|
2017-03-13 17:13:37 +08:00
|
|
|
|
|
|
|
/* validate the parameters */
|
|
|
|
if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
eaddr = saddr + size - 1;
|
2018-01-26 01:36:15 +08:00
|
|
|
|
|
|
|
if (vm->pte_support_ats)
|
|
|
|
ats = saddr < AMDGPU_VA_HOLE_START;
|
2017-03-13 17:13:37 +08:00
|
|
|
|
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
|
2018-01-26 01:36:15 +08:00
|
|
|
if (eaddr >= adev->vm_manager.max_pfn) {
|
|
|
|
dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
|
|
|
|
eaddr, adev->vm_manager.max_pfn);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-12-13 14:22:54 +08:00
|
|
|
return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
|
2018-01-26 01:36:15 +08:00
|
|
|
adev->vm_manager.root_level, ats);
|
2017-03-13 17:13:37 +08:00
|
|
|
}
|
|
|
|
|
2017-06-01 21:42:59 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
|
2016-06-18 05:05:15 +08:00
|
|
|
{
|
2016-10-14 05:41:13 +08:00
|
|
|
const struct amdgpu_ip_block *ip_block;
|
2017-06-01 21:42:59 +08:00
|
|
|
bool has_compute_vm_bug;
|
|
|
|
struct amdgpu_ring *ring;
|
|
|
|
int i;
|
2016-06-18 05:05:15 +08:00
|
|
|
|
2017-06-01 21:42:59 +08:00
|
|
|
has_compute_vm_bug = false;
|
2016-06-18 05:05:15 +08:00
|
|
|
|
2017-12-16 05:18:00 +08:00
|
|
|
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
|
2017-06-01 21:42:59 +08:00
|
|
|
if (ip_block) {
|
|
|
|
/* Compute has a VM bug for GFX version < 7.
|
|
|
|
Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
|
|
|
|
if (ip_block->version->major <= 7)
|
|
|
|
has_compute_vm_bug = true;
|
|
|
|
else if (ip_block->version->major == 8)
|
|
|
|
if (adev->gfx.mec_fw_version < 673)
|
|
|
|
has_compute_vm_bug = true;
|
|
|
|
}
|
2016-06-18 05:05:15 +08:00
|
|
|
|
2017-06-01 21:42:59 +08:00
|
|
|
for (i = 0; i < adev->num_rings; i++) {
|
|
|
|
ring = adev->rings[i];
|
|
|
|
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
|
|
|
|
/* only compute rings */
|
|
|
|
ring->has_compute_vm_bug = has_compute_vm_bug;
|
2016-06-18 05:05:15 +08:00
|
|
|
else
|
2017-06-01 21:42:59 +08:00
|
|
|
ring->has_compute_vm_bug = false;
|
2016-06-18 05:05:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-11 23:11:24 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
|
|
|
|
*
|
|
|
|
* @ring: ring on which the job will be submitted
|
|
|
|
* @job: job to submit
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* True if sync is needed.
|
|
|
|
*/
|
2017-05-12 02:52:48 +08:00
|
|
|
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
|
|
|
|
struct amdgpu_job *job)
|
2017-03-10 00:36:26 +08:00
|
|
|
{
|
2017-05-12 02:52:48 +08:00
|
|
|
struct amdgpu_device *adev = ring->adev;
|
|
|
|
unsigned vmhub = ring->funcs->vmhub;
|
2017-12-18 23:53:03 +08:00
|
|
|
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
|
|
|
struct amdgpu_vmid *id;
|
2017-05-12 02:52:48 +08:00
|
|
|
bool gds_switch_needed;
|
2017-06-01 21:42:59 +08:00
|
|
|
bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
|
2017-05-12 02:52:48 +08:00
|
|
|
|
2017-12-19 00:08:25 +08:00
|
|
|
if (job->vmid == 0)
|
2017-05-12 02:52:48 +08:00
|
|
|
return false;
|
2017-12-19 00:08:25 +08:00
|
|
|
id = &id_mgr->ids[job->vmid];
|
2017-05-12 02:52:48 +08:00
|
|
|
gds_switch_needed = ring->funcs->emit_gds_switch && (
|
|
|
|
id->gds_base != job->gds_base ||
|
|
|
|
id->gds_size != job->gds_size ||
|
|
|
|
id->gws_base != job->gws_base ||
|
|
|
|
id->gws_size != job->gws_size ||
|
|
|
|
id->oa_base != job->oa_base ||
|
|
|
|
id->oa_size != job->oa_size);
|
2017-03-10 00:36:26 +08:00
|
|
|
|
2017-12-18 23:53:03 +08:00
|
|
|
if (amdgpu_vmid_had_gpu_reset(adev, id))
|
2017-05-12 02:52:48 +08:00
|
|
|
return true;
|
2017-03-10 00:36:26 +08:00
|
|
|
|
2017-05-31 11:50:10 +08:00
|
|
|
return vm_flush_needed || gds_switch_needed;
|
2017-05-12 02:52:48 +08:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_flush - hardware flush the vm
|
|
|
|
*
|
|
|
|
* @ring: ring to use for flush
|
2018-06-14 04:01:38 +08:00
|
|
|
* @job: related job
|
2018-06-11 23:11:24 +08:00
|
|
|
* @need_pipe_sync: is pipe sync needed
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
2016-02-26 23:18:26 +08:00
|
|
|
* Emit a VM flush when it is necessary.
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 on success, errno otherwise.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2017-06-06 17:25:13 +08:00
|
|
|
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2016-03-01 22:09:25 +08:00
|
|
|
struct amdgpu_device *adev = ring->adev;
|
2017-04-06 23:52:39 +08:00
|
|
|
unsigned vmhub = ring->funcs->vmhub;
|
2017-12-18 23:53:03 +08:00
|
|
|
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
2017-12-19 00:08:25 +08:00
|
|
|
struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
|
2016-03-01 22:51:53 +08:00
|
|
|
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
|
2016-07-01 17:59:01 +08:00
|
|
|
id->gds_base != job->gds_base ||
|
|
|
|
id->gds_size != job->gds_size ||
|
|
|
|
id->gws_base != job->gws_base ||
|
|
|
|
id->gws_size != job->gws_size ||
|
|
|
|
id->oa_base != job->oa_base ||
|
|
|
|
id->oa_size != job->oa_size);
|
2017-05-18 13:56:22 +08:00
|
|
|
bool vm_flush_needed = job->vm_needs_flush;
|
2018-02-06 00:38:01 +08:00
|
|
|
bool pasid_mapping_needed = id->pasid != job->pasid ||
|
|
|
|
!id->pasid_mapping ||
|
|
|
|
!dma_fence_is_signaled(id->pasid_mapping);
|
|
|
|
struct dma_fence *fence = NULL;
|
2017-04-03 20:16:07 +08:00
|
|
|
unsigned patch_offset = 0;
|
2016-03-01 23:46:18 +08:00
|
|
|
int r;
|
2016-03-01 22:51:53 +08:00
|
|
|
|
2017-12-18 23:53:03 +08:00
|
|
|
if (amdgpu_vmid_had_gpu_reset(adev, id)) {
|
2017-04-03 20:28:26 +08:00
|
|
|
gds_switch_needed = true;
|
|
|
|
vm_flush_needed = true;
|
2018-02-06 00:38:01 +08:00
|
|
|
pasid_mapping_needed = true;
|
2017-04-03 20:28:26 +08:00
|
|
|
}
|
2016-03-01 22:09:25 +08:00
|
|
|
|
2018-02-06 00:38:01 +08:00
|
|
|
gds_switch_needed &= !!ring->funcs->emit_gds_switch;
|
|
|
|
vm_flush_needed &= !!ring->funcs->emit_vm_flush;
|
|
|
|
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
|
|
|
|
ring->funcs->emit_wreg;
|
|
|
|
|
2017-06-06 17:25:13 +08:00
|
|
|
if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
|
2017-04-03 20:28:26 +08:00
|
|
|
return 0;
|
2016-03-01 23:46:18 +08:00
|
|
|
|
2017-04-03 20:16:07 +08:00
|
|
|
if (ring->funcs->init_cond_exec)
|
|
|
|
patch_offset = amdgpu_ring_init_cond_exec(ring);
|
2016-03-01 23:46:18 +08:00
|
|
|
|
2017-06-06 17:25:13 +08:00
|
|
|
if (need_pipe_sync)
|
|
|
|
amdgpu_ring_emit_pipeline_sync(ring);
|
|
|
|
|
2018-02-06 00:38:01 +08:00
|
|
|
if (vm_flush_needed) {
|
2017-12-19 00:08:25 +08:00
|
|
|
trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
|
2018-02-04 17:32:35 +08:00
|
|
|
amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
|
2018-02-06 00:38:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pasid_mapping_needed)
|
|
|
|
amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
|
drm/amdgpu:changes in gfx DMAframe scheme (v2)
1) Adapt to vulkan:
Now use double SWITCH BUFFER to replace the 128 nops w/a,
because when vulkan introduced, umd can insert 7 ~ 16 IBs
per submit which makes 256 DW size cannot hold the whole
DMAframe (if we still insert those 128 nops), CP team suggests
use double SWITCH_BUFFERs, instead of tricky 128 NOPs w/a.
2) To fix the CE VM fault issue when MCBP introduced:
Need one more COND_EXEC wrapping IB part (original one us
for VM switch part).
this change can fix vm fault issue caused by below scenario
without this change:
>CE passed original COND_EXEC (no MCBP issued this moment),
proceed as normal.
>DE catch up to this COND_EXEC, but this time MCBP issued,
thus DE treats all following packages as NOP. The following
VM switch packages now looks just as NOP to DE, so DE
dosen't do VM flush at all.
>Now CE proceeds to the first IBc, and triggers VM fault,
because DE didn't do VM flush for this DMAframe.
3) change estimated alloc size for gfx9.
with new DMAframe scheme, we need modify emit_frame_size
for gfx9
4) No need to insert 128 nops after gfx8 vm flush anymore
because there was double SWITCH_BUFFER append to vm flush,
and for gfx7 we already use double SWITCH_BUFFER following
after vm_flush so no change needed for it.
5) Change emit_frame_size for gfx8
v2: squash in BUG removal from Monk
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-03-15 12:18:57 +08:00
|
|
|
|
2018-02-06 00:38:01 +08:00
|
|
|
if (vm_flush_needed || pasid_mapping_needed) {
|
2018-04-04 01:05:03 +08:00
|
|
|
r = amdgpu_fence_emit(ring, &fence, 0);
|
2017-04-03 20:16:07 +08:00
|
|
|
if (r)
|
|
|
|
return r;
|
2018-02-06 00:38:01 +08:00
|
|
|
}
|
drm/amdgpu:changes in gfx DMAframe scheme (v2)
1) Adapt to vulkan:
Now use double SWITCH BUFFER to replace the 128 nops w/a,
because when vulkan introduced, umd can insert 7 ~ 16 IBs
per submit which makes 256 DW size cannot hold the whole
DMAframe (if we still insert those 128 nops), CP team suggests
use double SWITCH_BUFFERs, instead of tricky 128 NOPs w/a.
2) To fix the CE VM fault issue when MCBP introduced:
Need one more COND_EXEC wrapping IB part (original one us
for VM switch part).
this change can fix vm fault issue caused by below scenario
without this change:
>CE passed original COND_EXEC (no MCBP issued this moment),
proceed as normal.
>DE catch up to this COND_EXEC, but this time MCBP issued,
thus DE treats all following packages as NOP. The following
VM switch packages now looks just as NOP to DE, so DE
dosen't do VM flush at all.
>Now CE proceeds to the first IBc, and triggers VM fault,
because DE didn't do VM flush for this DMAframe.
3) change estimated alloc size for gfx9.
with new DMAframe scheme, we need modify emit_frame_size
for gfx9
4) No need to insert 128 nops after gfx8 vm flush anymore
because there was double SWITCH_BUFFER append to vm flush,
and for gfx7 we already use double SWITCH_BUFFER following
after vm_flush so no change needed for it.
5) Change emit_frame_size for gfx8
v2: squash in BUG removal from Monk
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-03-15 12:18:57 +08:00
|
|
|
|
2018-02-06 00:38:01 +08:00
|
|
|
if (vm_flush_needed) {
|
2017-04-06 23:52:39 +08:00
|
|
|
mutex_lock(&id_mgr->lock);
|
2017-04-03 20:16:07 +08:00
|
|
|
dma_fence_put(id->last_flush);
|
2018-02-06 00:38:01 +08:00
|
|
|
id->last_flush = dma_fence_get(fence);
|
|
|
|
id->current_gpu_reset_count =
|
|
|
|
atomic_read(&adev->gpu_reset_counter);
|
2017-04-06 23:52:39 +08:00
|
|
|
mutex_unlock(&id_mgr->lock);
|
2017-04-03 20:16:07 +08:00
|
|
|
}
|
drm/amdgpu:changes in gfx DMAframe scheme (v2)
1) Adapt to vulkan:
Now use double SWITCH BUFFER to replace the 128 nops w/a,
because when vulkan introduced, umd can insert 7 ~ 16 IBs
per submit which makes 256 DW size cannot hold the whole
DMAframe (if we still insert those 128 nops), CP team suggests
use double SWITCH_BUFFERs, instead of tricky 128 NOPs w/a.
2) To fix the CE VM fault issue when MCBP introduced:
Need one more COND_EXEC wrapping IB part (original one us
for VM switch part).
this change can fix vm fault issue caused by below scenario
without this change:
>CE passed original COND_EXEC (no MCBP issued this moment),
proceed as normal.
>DE catch up to this COND_EXEC, but this time MCBP issued,
thus DE treats all following packages as NOP. The following
VM switch packages now looks just as NOP to DE, so DE
dosen't do VM flush at all.
>Now CE proceeds to the first IBc, and triggers VM fault,
because DE didn't do VM flush for this DMAframe.
3) change estimated alloc size for gfx9.
with new DMAframe scheme, we need modify emit_frame_size
for gfx9
4) No need to insert 128 nops after gfx8 vm flush anymore
because there was double SWITCH_BUFFER append to vm flush,
and for gfx7 we already use double SWITCH_BUFFER following
after vm_flush so no change needed for it.
5) Change emit_frame_size for gfx8
v2: squash in BUG removal from Monk
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-03-15 12:18:57 +08:00
|
|
|
|
2018-02-06 00:38:01 +08:00
|
|
|
if (pasid_mapping_needed) {
|
|
|
|
id->pasid = job->pasid;
|
|
|
|
dma_fence_put(id->pasid_mapping);
|
|
|
|
id->pasid_mapping = dma_fence_get(fence);
|
|
|
|
}
|
|
|
|
dma_fence_put(fence);
|
|
|
|
|
2017-05-11 18:22:17 +08:00
|
|
|
if (ring->funcs->emit_gds_switch && gds_switch_needed) {
|
2017-04-03 20:16:07 +08:00
|
|
|
id->gds_base = job->gds_base;
|
|
|
|
id->gds_size = job->gds_size;
|
|
|
|
id->gws_base = job->gws_base;
|
|
|
|
id->gws_size = job->gws_size;
|
|
|
|
id->oa_base = job->oa_base;
|
|
|
|
id->oa_size = job->oa_size;
|
2017-12-19 00:08:25 +08:00
|
|
|
amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
|
2017-04-03 20:16:07 +08:00
|
|
|
job->gds_size, job->gws_base,
|
|
|
|
job->gws_size, job->oa_base,
|
|
|
|
job->oa_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ring->funcs->patch_cond_exec)
|
|
|
|
amdgpu_ring_patch_cond_exec(ring, patch_offset);
|
|
|
|
|
|
|
|
/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
|
|
|
|
if (ring->funcs->emit_switch_buffer) {
|
|
|
|
amdgpu_ring_emit_switch_buffer(ring);
|
|
|
|
amdgpu_ring_emit_switch_buffer(ring);
|
drm/amdgpu:changes in gfx DMAframe scheme (v2)
1) Adapt to vulkan:
Now use double SWITCH BUFFER to replace the 128 nops w/a,
because when vulkan introduced, umd can insert 7 ~ 16 IBs
per submit which makes 256 DW size cannot hold the whole
DMAframe (if we still insert those 128 nops), CP team suggests
use double SWITCH_BUFFERs, instead of tricky 128 NOPs w/a.
2) To fix the CE VM fault issue when MCBP introduced:
Need one more COND_EXEC wrapping IB part (original one us
for VM switch part).
this change can fix vm fault issue caused by below scenario
without this change:
>CE passed original COND_EXEC (no MCBP issued this moment),
proceed as normal.
>DE catch up to this COND_EXEC, but this time MCBP issued,
thus DE treats all following packages as NOP. The following
VM switch packages now looks just as NOP to DE, so DE
dosen't do VM flush at all.
>Now CE proceeds to the first IBc, and triggers VM fault,
because DE didn't do VM flush for this DMAframe.
3) change estimated alloc size for gfx9.
with new DMAframe scheme, we need modify emit_frame_size
for gfx9
4) No need to insert 128 nops after gfx8 vm flush anymore
because there was double SWITCH_BUFFER append to vm flush,
and for gfx7 we already use double SWITCH_BUFFER following
after vm_flush so no change needed for it.
5) Change emit_frame_size for gfx8
v2: squash in BUG removal from Monk
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-03-15 12:18:57 +08:00
|
|
|
}
|
2016-03-01 23:46:18 +08:00
|
|
|
return 0;
|
2016-03-01 22:09:25 +08:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
|
|
|
|
*
|
|
|
|
* @vm: requested vm
|
|
|
|
* @bo: requested buffer object
|
|
|
|
*
|
2016-01-26 19:17:11 +08:00
|
|
|
* Find @bo inside the requested vm.
|
2015-04-21 04:55:21 +08:00
|
|
|
* Search inside the @bos vm list for the requested vm
|
|
|
|
* Returns the found bo_va or NULL if none is found
|
|
|
|
*
|
|
|
|
* Object has to be reserved!
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Found bo_va or NULL.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
|
|
|
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_bo *bo)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va *bo_va;
|
|
|
|
|
2017-08-01 16:51:43 +08:00
|
|
|
list_for_each_entry(bo_va, &bo->va, base.bo_list) {
|
|
|
|
if (bo_va->base.vm == vm) {
|
2015-04-21 04:55:21 +08:00
|
|
|
return bo_va;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-08-12 19:29:18 +08:00
|
|
|
* amdgpu_vm_do_set_ptes - helper to call the right asic function
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
2016-08-04 20:52:50 +08:00
|
|
|
* @params: see amdgpu_pte_update_params definition
|
2018-01-16 23:54:25 +08:00
|
|
|
* @bo: PD/PT to update
|
2015-04-21 04:55:21 +08:00
|
|
|
* @pe: addr of the page entry
|
|
|
|
* @addr: dst addr to write into pe
|
|
|
|
* @count: number of page entries to update
|
|
|
|
* @incr: increase next addr by incr bytes
|
|
|
|
* @flags: hw access flags
|
|
|
|
*
|
|
|
|
* Traces the parameters and calls the right asic functions
|
|
|
|
* to setup the page table using the DMA.
|
|
|
|
*/
|
2016-08-12 19:29:18 +08:00
|
|
|
static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
|
2018-01-16 23:54:25 +08:00
|
|
|
struct amdgpu_bo *bo,
|
2016-08-12 19:29:18 +08:00
|
|
|
uint64_t pe, uint64_t addr,
|
|
|
|
unsigned count, uint32_t incr,
|
2016-09-21 16:19:19 +08:00
|
|
|
uint64_t flags)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2018-01-16 23:54:25 +08:00
|
|
|
pe += amdgpu_bo_gpu_offset(bo);
|
2016-09-25 22:11:52 +08:00
|
|
|
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-08-12 19:29:18 +08:00
|
|
|
if (count < 3) {
|
2016-08-12 17:33:30 +08:00
|
|
|
amdgpu_vm_write_pte(params->adev, params->ib, pe,
|
|
|
|
addr | flags, count, incr);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
} else {
|
2016-08-04 21:02:49 +08:00
|
|
|
amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
|
2015-04-21 04:55:21 +08:00
|
|
|
count, incr, flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-12 19:29:18 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
|
|
|
|
*
|
|
|
|
* @params: see amdgpu_pte_update_params definition
|
2018-01-16 23:54:25 +08:00
|
|
|
* @bo: PD/PT to update
|
2016-08-12 19:29:18 +08:00
|
|
|
* @pe: addr of the page entry
|
|
|
|
* @addr: dst addr to write into pe
|
|
|
|
* @count: number of page entries to update
|
|
|
|
* @incr: increase next addr by incr bytes
|
|
|
|
* @flags: hw access flags
|
|
|
|
*
|
|
|
|
* Traces the parameters and calls the DMA function to copy the PTEs.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
|
2018-01-16 23:54:25 +08:00
|
|
|
struct amdgpu_bo *bo,
|
2016-08-12 19:29:18 +08:00
|
|
|
uint64_t pe, uint64_t addr,
|
|
|
|
unsigned count, uint32_t incr,
|
2016-09-21 16:19:19 +08:00
|
|
|
uint64_t flags)
|
2016-08-12 19:29:18 +08:00
|
|
|
{
|
2016-09-25 22:11:52 +08:00
|
|
|
uint64_t src = (params->src + (addr >> 12) * 8);
|
2016-08-12 19:29:18 +08:00
|
|
|
|
2018-01-16 23:54:25 +08:00
|
|
|
pe += amdgpu_bo_gpu_offset(bo);
|
2016-09-25 22:11:52 +08:00
|
|
|
trace_amdgpu_vm_copy_ptes(pe, src, count);
|
|
|
|
|
|
|
|
amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
|
2016-08-12 19:29:18 +08:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
2015-11-30 20:26:07 +08:00
|
|
|
* amdgpu_vm_map_gart - Resolve gart mapping of addr
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
2015-11-30 20:26:07 +08:00
|
|
|
* @pages_addr: optional DMA address to use for lookup
|
2015-04-21 04:55:21 +08:00
|
|
|
* @addr: the unmapped addr
|
|
|
|
*
|
|
|
|
* Look up the physical address of the page that the pte resolves
|
2018-06-11 23:11:24 +08:00
|
|
|
* to.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* The pointer for the page table entry.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2016-08-12 17:33:30 +08:00
|
|
|
static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
uint64_t result;
|
|
|
|
|
2016-08-12 17:33:30 +08:00
|
|
|
/* page table offset */
|
|
|
|
result = pages_addr[addr >> PAGE_SHIFT];
|
2015-11-30 20:26:07 +08:00
|
|
|
|
2016-08-12 17:33:30 +08:00
|
|
|
/* in case cpu page size != gpu page size*/
|
|
|
|
result |= addr & (~PAGE_MASK);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2015-11-30 20:26:07 +08:00
|
|
|
result &= 0xFFFFFFFFFFFFF000ULL;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-05-12 03:50:08 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
|
|
|
|
*
|
|
|
|
* @params: see amdgpu_pte_update_params definition
|
2018-01-16 23:54:25 +08:00
|
|
|
* @bo: PD/PT to update
|
2017-05-12 03:50:08 +08:00
|
|
|
* @pe: kmap addr of the page entry
|
|
|
|
* @addr: dst addr to write into pe
|
|
|
|
* @count: number of page entries to update
|
|
|
|
* @incr: increase next addr by incr bytes
|
|
|
|
* @flags: hw access flags
|
|
|
|
*
|
|
|
|
* Write count number of PT/PD entries directly.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
|
2018-01-16 23:54:25 +08:00
|
|
|
struct amdgpu_bo *bo,
|
2017-05-12 03:50:08 +08:00
|
|
|
uint64_t pe, uint64_t addr,
|
|
|
|
unsigned count, uint32_t incr,
|
|
|
|
uint64_t flags)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
2017-05-12 07:47:22 +08:00
|
|
|
uint64_t value;
|
2017-05-12 03:50:08 +08:00
|
|
|
|
2018-01-16 23:54:25 +08:00
|
|
|
pe += (unsigned long)amdgpu_bo_kptr(bo);
|
|
|
|
|
2017-07-11 23:15:37 +08:00
|
|
|
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
|
|
|
|
|
2017-05-12 03:50:08 +08:00
|
|
|
for (i = 0; i < count; i++) {
|
2017-05-12 07:47:22 +08:00
|
|
|
value = params->pages_addr ?
|
|
|
|
amdgpu_vm_map_gart(params->pages_addr, addr) :
|
|
|
|
addr;
|
2018-01-12 22:26:08 +08:00
|
|
|
amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
|
|
|
|
i, value, flags);
|
2017-05-12 03:50:08 +08:00
|
|
|
addr += incr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-11 23:11:24 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_wait_pd - Wait for PT BOs to be free.
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: related vm
|
|
|
|
* @owner: fence owner
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 on success, errno otherwise.
|
|
|
|
*/
|
2017-07-11 23:13:00 +08:00
|
|
|
static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
|
|
void *owner)
|
2017-05-12 03:50:08 +08:00
|
|
|
{
|
|
|
|
struct amdgpu_sync sync;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
amdgpu_sync_create(&sync);
|
2017-09-16 08:44:06 +08:00
|
|
|
amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
|
2017-05-12 03:50:08 +08:00
|
|
|
r = amdgpu_sync_wait(&sync, true);
|
|
|
|
amdgpu_sync_free(&sync);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2016-09-16 21:36:49 +08:00
|
|
|
/*
|
2017-12-01 02:08:05 +08:00
|
|
|
* amdgpu_vm_update_pde - update a single level in the hierarchy
|
2016-09-16 21:36:49 +08:00
|
|
|
*
|
2017-12-01 02:08:05 +08:00
|
|
|
* @param: parameters for the update
|
2016-09-16 21:36:49 +08:00
|
|
|
* @vm: requested vm
|
2016-10-12 21:13:52 +08:00
|
|
|
* @parent: parent directory
|
2017-12-01 02:08:05 +08:00
|
|
|
* @entry: entry to update
|
2016-09-16 21:36:49 +08:00
|
|
|
*
|
2017-12-01 02:08:05 +08:00
|
|
|
* Makes sure the requested entry in parent is up to date.
|
2016-09-16 21:36:49 +08:00
|
|
|
*/
|
2017-12-01 02:08:05 +08:00
|
|
|
static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_vm_pt *parent,
|
|
|
|
struct amdgpu_vm_pt *entry)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2018-01-16 23:54:25 +08:00
|
|
|
struct amdgpu_bo *bo = parent->base.bo, *pbo;
|
2017-11-29 20:27:26 +08:00
|
|
|
uint64_t pde, pt, flags;
|
|
|
|
unsigned level;
|
2015-07-21 16:52:10 +08:00
|
|
|
|
2017-12-01 02:08:05 +08:00
|
|
|
/* Don't update huge pages here */
|
|
|
|
if (entry->huge)
|
|
|
|
return;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2018-01-16 23:54:25 +08:00
|
|
|
for (level = 0, pbo = bo->parent; pbo; ++level)
|
2017-11-29 20:27:26 +08:00
|
|
|
pbo = pbo->parent;
|
|
|
|
|
2017-12-13 14:22:54 +08:00
|
|
|
level += params->adev->vm_manager.root_level;
|
2018-08-22 20:11:19 +08:00
|
|
|
amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
|
2018-01-16 23:54:25 +08:00
|
|
|
pde = (entry - parent->entries) * 8;
|
|
|
|
if (bo->shadow)
|
|
|
|
params->func(params, bo->shadow, pde, pt, 1, 0, flags);
|
|
|
|
params->func(params, bo, pde, pt, 1, 0, flags);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2017-05-12 22:09:26 +08:00
|
|
|
/*
|
|
|
|
* amdgpu_vm_invalidate_level - mark all PD levels as invalid
|
|
|
|
*
|
2018-06-11 23:11:24 +08:00
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: related vm
|
2017-05-12 22:09:26 +08:00
|
|
|
* @parent: parent PD
|
2018-06-11 23:11:24 +08:00
|
|
|
* @level: VMPT level
|
2017-05-12 22:09:26 +08:00
|
|
|
*
|
|
|
|
* Mark all PD level as invalid after an error.
|
|
|
|
*/
|
2017-11-30 22:28:03 +08:00
|
|
|
static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_vm_pt *parent,
|
|
|
|
unsigned level)
|
2017-05-12 22:09:26 +08:00
|
|
|
{
|
2017-11-30 22:28:03 +08:00
|
|
|
unsigned pt_idx, num_entries;
|
2017-05-12 22:09:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Recurse into the subdirectories. This recursion is harmless because
|
|
|
|
* we only have a maximum of 5 layers.
|
|
|
|
*/
|
2017-11-30 22:28:03 +08:00
|
|
|
num_entries = amdgpu_vm_num_entries(adev, level);
|
|
|
|
for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
|
2017-05-12 22:09:26 +08:00
|
|
|
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
|
|
|
|
|
2017-08-03 20:02:13 +08:00
|
|
|
if (!entry->base.bo)
|
2017-05-12 22:09:26 +08:00
|
|
|
continue;
|
|
|
|
|
2018-04-19 20:22:56 +08:00
|
|
|
if (!entry->base.moved)
|
|
|
|
list_move(&entry->base.vm_status, &vm->relocated);
|
2017-11-30 22:28:03 +08:00
|
|
|
amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
|
2017-05-12 22:09:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-12 21:13:52 +08:00
|
|
|
/*
|
|
|
|
* amdgpu_vm_update_directories - make sure that all directories are valid
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
*
|
|
|
|
* Makes sure all directories are up to date.
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, error for failure.
|
2016-10-12 21:13:52 +08:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm)
|
|
|
|
{
|
2017-12-01 02:08:05 +08:00
|
|
|
struct amdgpu_pte_update_params params;
|
|
|
|
struct amdgpu_job *job;
|
|
|
|
unsigned ndw = 0;
|
2017-09-30 16:14:13 +08:00
|
|
|
int r = 0;
|
2017-05-12 22:09:26 +08:00
|
|
|
|
2017-12-01 02:08:05 +08:00
|
|
|
if (list_empty(&vm->relocated))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
restart:
|
|
|
|
memset(¶ms, 0, sizeof(params));
|
|
|
|
params.adev = adev;
|
|
|
|
|
|
|
|
if (vm->use_cpu_for_update) {
|
2018-04-19 19:58:42 +08:00
|
|
|
struct amdgpu_vm_bo_base *bo_base;
|
|
|
|
|
|
|
|
list_for_each_entry(bo_base, &vm->relocated, vm_status) {
|
|
|
|
r = amdgpu_bo_kmap(bo_base->bo, NULL);
|
|
|
|
if (unlikely(r))
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2017-12-01 02:08:05 +08:00
|
|
|
r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
|
|
|
|
if (unlikely(r))
|
|
|
|
return r;
|
|
|
|
|
|
|
|
params.func = amdgpu_vm_cpu_set_ptes;
|
|
|
|
} else {
|
|
|
|
ndw = 512 * 8;
|
|
|
|
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
params.ib = &job->ibs[0];
|
|
|
|
params.func = amdgpu_vm_do_set_ptes;
|
|
|
|
}
|
|
|
|
|
2017-08-09 20:15:46 +08:00
|
|
|
while (!list_empty(&vm->relocated)) {
|
2017-12-01 02:08:05 +08:00
|
|
|
struct amdgpu_vm_bo_base *bo_base, *parent;
|
|
|
|
struct amdgpu_vm_pt *pt, *entry;
|
2017-08-09 20:15:46 +08:00
|
|
|
struct amdgpu_bo *bo;
|
|
|
|
|
|
|
|
bo_base = list_first_entry(&vm->relocated,
|
|
|
|
struct amdgpu_vm_bo_base,
|
|
|
|
vm_status);
|
2018-04-19 20:22:56 +08:00
|
|
|
bo_base->moved = false;
|
2018-07-31 21:24:40 +08:00
|
|
|
list_move(&bo_base->vm_status, &vm->idle);
|
2017-08-09 20:15:46 +08:00
|
|
|
|
|
|
|
bo = bo_base->bo->parent;
|
2018-04-19 16:56:02 +08:00
|
|
|
if (!bo)
|
2017-12-01 02:08:05 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
|
|
|
|
bo_list);
|
|
|
|
pt = container_of(parent, struct amdgpu_vm_pt, base);
|
|
|
|
entry = container_of(bo_base, struct amdgpu_vm_pt, base);
|
|
|
|
|
|
|
|
amdgpu_vm_update_pde(¶ms, vm, pt, entry);
|
|
|
|
|
|
|
|
if (!vm->use_cpu_for_update &&
|
|
|
|
(ndw - params.ib->length_dw) < 32)
|
|
|
|
break;
|
2017-08-09 20:15:46 +08:00
|
|
|
}
|
2017-05-12 22:09:26 +08:00
|
|
|
|
2017-07-11 23:23:29 +08:00
|
|
|
if (vm->use_cpu_for_update) {
|
|
|
|
/* Flush HDP */
|
|
|
|
mb();
|
2018-01-19 21:17:40 +08:00
|
|
|
amdgpu_asic_flush_hdp(adev, NULL);
|
2017-12-01 02:08:05 +08:00
|
|
|
} else if (params.ib->length_dw == 0) {
|
|
|
|
amdgpu_job_free(job);
|
|
|
|
} else {
|
|
|
|
struct amdgpu_bo *root = vm->root.base.bo;
|
|
|
|
struct amdgpu_ring *ring;
|
|
|
|
struct dma_fence *fence;
|
|
|
|
|
2018-07-20 20:21:06 +08:00
|
|
|
ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
|
2017-12-01 02:08:05 +08:00
|
|
|
sched);
|
|
|
|
|
|
|
|
amdgpu_ring_pad_ib(ring, params.ib);
|
|
|
|
amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
|
|
|
|
AMDGPU_FENCE_OWNER_VM, false);
|
|
|
|
WARN_ON(params.ib->length_dw > ndw);
|
2018-07-13 19:54:56 +08:00
|
|
|
r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
|
|
|
|
&fence);
|
2017-12-01 02:08:05 +08:00
|
|
|
if (r)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
amdgpu_bo_fence(root, fence, true);
|
|
|
|
dma_fence_put(vm->last_update);
|
|
|
|
vm->last_update = fence;
|
2017-07-11 23:23:29 +08:00
|
|
|
}
|
|
|
|
|
2017-12-01 02:08:05 +08:00
|
|
|
if (!list_empty(&vm->relocated))
|
|
|
|
goto restart;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
2017-12-13 14:22:54 +08:00
|
|
|
amdgpu_vm_invalidate_level(adev, vm, &vm->root,
|
|
|
|
adev->vm_manager.root_level);
|
2017-12-01 02:08:05 +08:00
|
|
|
amdgpu_job_free(job);
|
2017-05-12 22:09:26 +08:00
|
|
|
return r;
|
2016-10-12 21:13:52 +08:00
|
|
|
}
|
|
|
|
|
2016-10-25 21:52:28 +08:00
|
|
|
/**
|
2017-07-26 04:35:38 +08:00
|
|
|
* amdgpu_vm_find_entry - find the entry for an address
|
2016-10-25 21:52:28 +08:00
|
|
|
*
|
|
|
|
* @p: see amdgpu_pte_update_params definition
|
|
|
|
* @addr: virtual address in question
|
2017-07-26 04:35:38 +08:00
|
|
|
* @entry: resulting entry or NULL
|
|
|
|
* @parent: parent entry
|
2016-10-25 21:52:28 +08:00
|
|
|
*
|
2017-07-26 04:35:38 +08:00
|
|
|
* Find the vm_pt entry and it's parent for the given address.
|
2016-10-25 21:52:28 +08:00
|
|
|
*/
|
2017-07-26 04:35:38 +08:00
|
|
|
void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
|
|
|
|
struct amdgpu_vm_pt **entry,
|
|
|
|
struct amdgpu_vm_pt **parent)
|
2016-10-25 21:52:28 +08:00
|
|
|
{
|
2017-12-13 14:22:54 +08:00
|
|
|
unsigned level = p->adev->vm_manager.root_level;
|
2016-10-25 21:52:28 +08:00
|
|
|
|
2017-07-26 04:35:38 +08:00
|
|
|
*parent = NULL;
|
|
|
|
*entry = &p->vm->root;
|
|
|
|
while ((*entry)->entries) {
|
2017-12-01 20:28:46 +08:00
|
|
|
unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
|
2017-11-27 21:01:51 +08:00
|
|
|
|
2017-07-26 04:35:38 +08:00
|
|
|
*parent = *entry;
|
2017-12-01 20:28:46 +08:00
|
|
|
*entry = &(*entry)->entries[addr >> shift];
|
|
|
|
addr &= (1ULL << shift) - 1;
|
2016-10-25 21:52:28 +08:00
|
|
|
}
|
|
|
|
|
2017-12-13 14:22:54 +08:00
|
|
|
if (level != AMDGPU_VM_PTB)
|
2017-07-26 04:35:38 +08:00
|
|
|
*entry = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
|
|
|
|
*
|
|
|
|
* @p: see amdgpu_pte_update_params definition
|
|
|
|
* @entry: vm_pt entry to check
|
|
|
|
* @parent: parent entry
|
|
|
|
* @nptes: number of PTEs updated with this operation
|
|
|
|
* @dst: destination address where the PTEs should point to
|
|
|
|
* @flags: access flags fro the PTEs
|
|
|
|
*
|
|
|
|
* Check if we can update the PD with a huge page.
|
|
|
|
*/
|
2017-08-04 01:24:06 +08:00
|
|
|
static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
|
|
|
|
struct amdgpu_vm_pt *entry,
|
|
|
|
struct amdgpu_vm_pt *parent,
|
|
|
|
unsigned nptes, uint64_t dst,
|
|
|
|
uint64_t flags)
|
2017-07-26 04:35:38 +08:00
|
|
|
{
|
2018-01-16 23:54:25 +08:00
|
|
|
uint64_t pde;
|
2017-07-26 04:35:38 +08:00
|
|
|
|
|
|
|
/* In the case of a mixed PT the PDE must point to it*/
|
2017-12-21 22:47:28 +08:00
|
|
|
if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
|
|
|
|
nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
|
2017-08-04 02:30:50 +08:00
|
|
|
/* Set the huge page flag to stop scanning at this PDE */
|
2017-07-26 04:35:38 +08:00
|
|
|
flags |= AMDGPU_PDE_PTE;
|
|
|
|
}
|
|
|
|
|
2017-12-21 22:47:28 +08:00
|
|
|
if (!(flags & AMDGPU_PDE_PTE)) {
|
|
|
|
if (entry->huge) {
|
|
|
|
/* Add the entry to the relocated list to update it. */
|
|
|
|
entry->huge = false;
|
|
|
|
list_move(&entry->base.vm_status, &p->vm->relocated);
|
|
|
|
}
|
2017-08-04 01:24:06 +08:00
|
|
|
return;
|
2017-12-21 22:47:28 +08:00
|
|
|
}
|
2017-07-26 04:35:38 +08:00
|
|
|
|
2017-12-21 22:47:28 +08:00
|
|
|
entry->huge = true;
|
2018-01-12 22:26:08 +08:00
|
|
|
amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
|
2017-11-29 20:27:26 +08:00
|
|
|
|
2018-01-16 23:54:25 +08:00
|
|
|
pde = (entry - parent->entries) * 8;
|
|
|
|
if (parent->base.bo->shadow)
|
|
|
|
p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
|
|
|
|
p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
|
2016-10-25 21:52:28 +08:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_update_ptes - make sure that page tables are valid
|
|
|
|
*
|
2016-08-04 20:52:50 +08:00
|
|
|
* @params: see amdgpu_pte_update_params definition
|
2015-04-21 04:55:21 +08:00
|
|
|
* @start: start of GPU address range
|
|
|
|
* @end: end of GPU address range
|
2016-06-07 06:13:26 +08:00
|
|
|
* @dst: destination address to map to, the next dst inside the function
|
2015-04-21 04:55:21 +08:00
|
|
|
* @flags: mapping flags
|
|
|
|
*
|
2016-01-26 19:17:11 +08:00
|
|
|
* Update the page tables in the range @start - @end.
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, -EINVAL for failure.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2017-05-12 10:39:31 +08:00
|
|
|
static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
|
2016-01-26 18:40:46 +08:00
|
|
|
uint64_t start, uint64_t end,
|
2016-09-21 16:19:19 +08:00
|
|
|
uint64_t dst, uint64_t flags)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2017-03-29 16:08:32 +08:00
|
|
|
struct amdgpu_device *adev = params->adev;
|
|
|
|
const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
|
2016-01-26 19:37:49 +08:00
|
|
|
|
2017-05-16 20:30:27 +08:00
|
|
|
uint64_t addr, pe_start;
|
2016-06-07 06:21:09 +08:00
|
|
|
struct amdgpu_bo *pt;
|
2017-05-16 20:30:27 +08:00
|
|
|
unsigned nptes;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
/* walk over the address space and update the page tables */
|
2017-07-26 04:35:38 +08:00
|
|
|
for (addr = start; addr < end; addr += nptes,
|
|
|
|
dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
|
|
|
|
struct amdgpu_vm_pt *entry, *parent;
|
|
|
|
|
|
|
|
amdgpu_vm_get_entry(params, addr, &entry, &parent);
|
|
|
|
if (!entry)
|
|
|
|
return -ENOENT;
|
2016-10-25 21:52:28 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
if ((addr & ~mask) == (end & ~mask))
|
|
|
|
nptes = end - addr;
|
|
|
|
else
|
2017-03-29 16:08:32 +08:00
|
|
|
nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-08-04 01:24:06 +08:00
|
|
|
amdgpu_vm_handle_huge_pages(params, entry, parent,
|
|
|
|
nptes, dst, flags);
|
2017-08-04 02:30:50 +08:00
|
|
|
/* We don't need to update PTEs for huge pages */
|
2017-11-30 22:41:28 +08:00
|
|
|
if (entry->huge)
|
2017-07-26 04:35:38 +08:00
|
|
|
continue;
|
|
|
|
|
2017-08-03 20:02:13 +08:00
|
|
|
pt = entry->base.bo;
|
2018-01-16 23:54:25 +08:00
|
|
|
pe_start = (addr & mask) * 8;
|
|
|
|
if (pt->shadow)
|
|
|
|
params->func(params, pt->shadow, pe_start, dst, nptes,
|
|
|
|
AMDGPU_GPU_PAGE_SIZE, flags);
|
|
|
|
params->func(params, pt, pe_start, dst, nptes,
|
2017-05-16 20:30:27 +08:00
|
|
|
AMDGPU_GPU_PAGE_SIZE, flags);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2017-05-12 10:39:31 +08:00
|
|
|
return 0;
|
2016-08-05 19:56:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* amdgpu_vm_frag_ptes - add fragment information to PTEs
|
|
|
|
*
|
|
|
|
* @params: see amdgpu_pte_update_params definition
|
|
|
|
* @vm: requested vm
|
|
|
|
* @start: first PTE to handle
|
|
|
|
* @end: last PTE to handle
|
|
|
|
* @dst: addr those PTEs should point to
|
|
|
|
* @flags: hw mapping flags
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, -EINVAL for failure.
|
2016-08-05 19:56:35 +08:00
|
|
|
*/
|
2017-05-12 10:39:31 +08:00
|
|
|
static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
|
2016-08-05 19:56:35 +08:00
|
|
|
uint64_t start, uint64_t end,
|
2016-09-21 16:19:19 +08:00
|
|
|
uint64_t dst, uint64_t flags)
|
2016-08-05 19:56:35 +08:00
|
|
|
{
|
|
|
|
/**
|
|
|
|
* The MC L1 TLB supports variable sized pages, based on a fragment
|
|
|
|
* field in the PTE. When this field is set to a non-zero value, page
|
|
|
|
* granularity is increased from 4KB to (1 << (12 + frag)). The PTE
|
|
|
|
* flags are considered valid for all PTEs within the fragment range
|
|
|
|
* and corresponding mappings are assumed to be physically contiguous.
|
|
|
|
*
|
|
|
|
* The L1 TLB can store a single PTE for the whole fragment,
|
|
|
|
* significantly increasing the space available for translation
|
|
|
|
* caching. This leads to large improvements in throughput when the
|
|
|
|
* TLB is under pressure.
|
|
|
|
*
|
|
|
|
* The L2 TLB distributes small and large fragments into two
|
|
|
|
* asymmetric partitions. The large fragment cache is significantly
|
|
|
|
* larger. Thus, we try to use large fragments wherever possible.
|
|
|
|
* Userspace can support this by aligning virtual base address and
|
|
|
|
* allocation size to the fragment size.
|
|
|
|
*/
|
2017-08-30 13:01:19 +08:00
|
|
|
unsigned max_frag = params->adev->vm_manager.fragment_size;
|
|
|
|
int r;
|
2016-08-05 19:56:35 +08:00
|
|
|
|
|
|
|
/* system pages are non continuously */
|
2017-08-30 13:01:19 +08:00
|
|
|
if (params->src || !(flags & AMDGPU_PTE_VALID))
|
2017-05-12 10:39:31 +08:00
|
|
|
return amdgpu_vm_update_ptes(params, start, end, dst, flags);
|
2016-08-05 19:56:35 +08:00
|
|
|
|
2017-08-30 13:01:19 +08:00
|
|
|
while (start != end) {
|
|
|
|
uint64_t frag_flags, frag_end;
|
|
|
|
unsigned frag;
|
|
|
|
|
|
|
|
/* This intentionally wraps around if no bit is set */
|
|
|
|
frag = min((unsigned)ffs(start) - 1,
|
|
|
|
(unsigned)fls64(end - start) - 1);
|
|
|
|
if (frag >= max_frag) {
|
|
|
|
frag_flags = AMDGPU_PTE_FRAG(max_frag);
|
|
|
|
frag_end = end & ~((1ULL << max_frag) - 1);
|
|
|
|
} else {
|
|
|
|
frag_flags = AMDGPU_PTE_FRAG(frag);
|
|
|
|
frag_end = start + (1 << frag);
|
|
|
|
}
|
|
|
|
|
|
|
|
r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
|
|
|
|
flags | frag_flags);
|
2017-05-12 10:39:31 +08:00
|
|
|
if (r)
|
|
|
|
return r;
|
2016-08-05 19:56:35 +08:00
|
|
|
|
2017-08-30 13:01:19 +08:00
|
|
|
dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
start = frag_end;
|
2016-08-05 19:56:35 +08:00
|
|
|
}
|
2017-08-30 13:01:19 +08:00
|
|
|
|
|
|
|
return 0;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2016-06-06 16:17:58 +08:00
|
|
|
* @exclusive: fence we need to sync to
|
2016-03-19 04:00:35 +08:00
|
|
|
* @pages_addr: DMA addresses to use for mapping
|
2015-04-21 04:55:21 +08:00
|
|
|
* @vm: requested vm
|
2016-01-25 21:27:31 +08:00
|
|
|
* @start: start of mapped range
|
|
|
|
* @last: last mapped entry
|
|
|
|
* @flags: flags for the entries
|
2015-04-21 04:55:21 +08:00
|
|
|
* @addr: addr to set the area to
|
|
|
|
* @fence: optional resulting fence
|
|
|
|
*
|
2016-01-25 21:27:31 +08:00
|
|
|
* Fill in the page table entries between @start and @last.
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, -EINVAL for failure.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
|
|
|
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence *exclusive,
|
2016-03-19 04:00:35 +08:00
|
|
|
dma_addr_t *pages_addr,
|
2015-04-21 04:55:21 +08:00
|
|
|
struct amdgpu_vm *vm,
|
2016-01-25 21:27:31 +08:00
|
|
|
uint64_t start, uint64_t last,
|
2016-09-21 16:19:19 +08:00
|
|
|
uint64_t flags, uint64_t addr,
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence **fence)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2016-02-09 00:37:38 +08:00
|
|
|
struct amdgpu_ring *ring;
|
2016-01-26 18:40:46 +08:00
|
|
|
void *owner = AMDGPU_FENCE_OWNER_VM;
|
2015-04-21 04:55:21 +08:00
|
|
|
unsigned nptes, ncmds, ndw;
|
2016-02-01 19:20:25 +08:00
|
|
|
struct amdgpu_job *job;
|
2016-08-04 20:52:50 +08:00
|
|
|
struct amdgpu_pte_update_params params;
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence *f = NULL;
|
2015-04-21 04:55:21 +08:00
|
|
|
int r;
|
|
|
|
|
2016-08-12 19:29:18 +08:00
|
|
|
memset(¶ms, 0, sizeof(params));
|
|
|
|
params.adev = adev;
|
2016-10-13 21:09:08 +08:00
|
|
|
params.vm = vm;
|
2016-08-12 19:29:18 +08:00
|
|
|
|
2017-07-11 23:13:00 +08:00
|
|
|
/* sync to everything on unmapping */
|
|
|
|
if (!(flags & AMDGPU_PTE_VALID))
|
|
|
|
owner = AMDGPU_FENCE_OWNER_UNDEFINED;
|
|
|
|
|
2017-05-12 07:47:22 +08:00
|
|
|
if (vm->use_cpu_for_update) {
|
|
|
|
/* params.src is used as flag to indicate system Memory */
|
|
|
|
if (pages_addr)
|
|
|
|
params.src = ~0;
|
|
|
|
|
|
|
|
/* Wait for PT BOs to be free. PTs share the same resv. object
|
|
|
|
* as the root PD BO
|
|
|
|
*/
|
2017-07-11 23:13:00 +08:00
|
|
|
r = amdgpu_vm_wait_pd(adev, vm, owner);
|
2017-05-12 07:47:22 +08:00
|
|
|
if (unlikely(r))
|
|
|
|
return r;
|
|
|
|
|
|
|
|
params.func = amdgpu_vm_cpu_set_ptes;
|
|
|
|
params.pages_addr = pages_addr;
|
|
|
|
return amdgpu_vm_frag_ptes(¶ms, start, last + 1,
|
|
|
|
addr, flags);
|
|
|
|
}
|
|
|
|
|
2018-07-20 20:21:06 +08:00
|
|
|
ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
|
2016-08-04 21:02:49 +08:00
|
|
|
|
2016-01-25 21:27:31 +08:00
|
|
|
nptes = last - start + 1;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
/*
|
2017-09-07 19:23:21 +08:00
|
|
|
* reserve space for two commands every (1 << BLOCK_SIZE)
|
2015-04-21 04:55:21 +08:00
|
|
|
* entries or 2k dwords (whatever is smaller)
|
2017-09-07 19:23:21 +08:00
|
|
|
*
|
|
|
|
* The second command is for the shadow pagetables.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2017-12-29 13:13:08 +08:00
|
|
|
if (vm->root.base.bo->shadow)
|
|
|
|
ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
|
|
|
|
else
|
|
|
|
ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
/* padding, etc. */
|
|
|
|
ndw = 64;
|
|
|
|
|
2017-08-30 21:38:45 +08:00
|
|
|
if (pages_addr) {
|
2016-08-11 20:06:54 +08:00
|
|
|
/* copy commands needed */
|
2017-09-20 00:58:15 +08:00
|
|
|
ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-08-11 20:06:54 +08:00
|
|
|
/* and also PTEs */
|
2015-04-21 04:55:21 +08:00
|
|
|
ndw += nptes * 2;
|
|
|
|
|
2016-08-12 19:29:18 +08:00
|
|
|
params.func = amdgpu_vm_do_copy_ptes;
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
} else {
|
|
|
|
/* set page commands needed */
|
2018-01-25 02:58:45 +08:00
|
|
|
ndw += ncmds * 10;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-08-30 13:01:19 +08:00
|
|
|
/* extra commands for begin/end fragments */
|
2018-06-08 16:36:22 +08:00
|
|
|
if (vm->root.base.bo->shadow)
|
|
|
|
ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
|
|
|
|
else
|
|
|
|
ndw += 2 * 10 * adev->vm_manager.fragment_size;
|
2016-08-12 19:29:18 +08:00
|
|
|
|
|
|
|
params.func = amdgpu_vm_do_set_ptes;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2016-02-01 19:20:25 +08:00
|
|
|
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
|
|
|
|
if (r)
|
2015-04-21 04:55:21 +08:00
|
|
|
return r;
|
2016-02-01 19:20:25 +08:00
|
|
|
|
2016-08-04 20:52:50 +08:00
|
|
|
params.ib = &job->ibs[0];
|
2015-07-21 16:52:10 +08:00
|
|
|
|
2017-08-30 21:38:45 +08:00
|
|
|
if (pages_addr) {
|
2016-08-11 20:06:54 +08:00
|
|
|
uint64_t *pte;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
/* Put the PTEs at the end of the IB. */
|
|
|
|
i = ndw - nptes * 2;
|
|
|
|
pte= (uint64_t *)&(job->ibs->ptr[i]);
|
|
|
|
params.src = job->ibs->gpu_addr + i * 4;
|
|
|
|
|
|
|
|
for (i = 0; i < nptes; ++i) {
|
|
|
|
pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
|
|
|
|
AMDGPU_GPU_PAGE_SIZE);
|
|
|
|
pte[i] |= flags;
|
|
|
|
}
|
2016-09-25 17:54:00 +08:00
|
|
|
addr = 0;
|
2016-08-11 20:06:54 +08:00
|
|
|
}
|
|
|
|
|
2017-11-14 03:47:52 +08:00
|
|
|
r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
|
2016-06-06 16:17:58 +08:00
|
|
|
if (r)
|
|
|
|
goto error_free;
|
|
|
|
|
2017-08-03 20:02:13 +08:00
|
|
|
r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
|
2017-09-16 08:44:06 +08:00
|
|
|
owner, false);
|
2016-01-26 18:40:46 +08:00
|
|
|
if (r)
|
|
|
|
goto error_free;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-08-03 20:02:13 +08:00
|
|
|
r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
|
2016-01-26 18:40:46 +08:00
|
|
|
if (r)
|
|
|
|
goto error_free;
|
|
|
|
|
2017-05-12 10:39:31 +08:00
|
|
|
r = amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags);
|
|
|
|
if (r)
|
|
|
|
goto error_free;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-08-04 20:52:50 +08:00
|
|
|
amdgpu_ring_pad_ib(ring, params.ib);
|
|
|
|
WARN_ON(params.ib->length_dw > ndw);
|
2018-07-13 19:54:56 +08:00
|
|
|
r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
|
2015-08-03 12:57:31 +08:00
|
|
|
if (r)
|
|
|
|
goto error_free;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-08-03 20:02:13 +08:00
|
|
|
amdgpu_bo_fence(vm->root.base.bo, f, true);
|
2017-01-30 18:09:31 +08:00
|
|
|
dma_fence_put(*fence);
|
|
|
|
*fence = f;
|
2015-04-21 04:55:21 +08:00
|
|
|
return 0;
|
2015-07-21 16:52:10 +08:00
|
|
|
|
|
|
|
error_free:
|
2016-02-01 19:20:25 +08:00
|
|
|
amdgpu_job_free(job);
|
2015-08-03 12:57:31 +08:00
|
|
|
return r;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2016-01-25 21:27:31 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2016-06-06 16:17:58 +08:00
|
|
|
* @exclusive: fence we need to sync to
|
2016-03-30 16:50:25 +08:00
|
|
|
* @pages_addr: DMA addresses to use for mapping
|
2016-01-25 21:27:31 +08:00
|
|
|
* @vm: requested vm
|
|
|
|
* @mapping: mapped range and flags to use for the update
|
2016-03-30 16:50:25 +08:00
|
|
|
* @flags: HW flags for the mapping
|
2016-08-16 23:38:37 +08:00
|
|
|
* @nodes: array of drm_mm_nodes with the MC addresses
|
2016-01-25 21:27:31 +08:00
|
|
|
* @fence: optional resulting fence
|
|
|
|
*
|
|
|
|
* Split the mapping into smaller chunks so that each update fits
|
|
|
|
* into a SDMA IB.
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, -EINVAL for failure.
|
2016-01-25 21:27:31 +08:00
|
|
|
*/
|
|
|
|
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence *exclusive,
|
2016-03-30 16:50:25 +08:00
|
|
|
dma_addr_t *pages_addr,
|
2016-01-25 21:27:31 +08:00
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_bo_va_mapping *mapping,
|
2016-09-21 16:19:19 +08:00
|
|
|
uint64_t flags,
|
2016-08-16 23:38:37 +08:00
|
|
|
struct drm_mm_node *nodes,
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence **fence)
|
2016-01-25 21:27:31 +08:00
|
|
|
{
|
2017-09-18 19:58:30 +08:00
|
|
|
unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
|
2017-08-30 21:38:45 +08:00
|
|
|
uint64_t pfn, start = mapping->start;
|
2016-01-25 21:27:31 +08:00
|
|
|
int r;
|
|
|
|
|
|
|
|
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
|
|
|
|
* but in case of something, we filter the flags in first place
|
|
|
|
*/
|
|
|
|
if (!(mapping->flags & AMDGPU_PTE_READABLE))
|
|
|
|
flags &= ~AMDGPU_PTE_READABLE;
|
|
|
|
if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
|
|
|
|
flags &= ~AMDGPU_PTE_WRITEABLE;
|
|
|
|
|
2017-03-04 05:47:11 +08:00
|
|
|
flags &= ~AMDGPU_PTE_EXECUTABLE;
|
|
|
|
flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
|
|
|
|
|
2017-03-04 05:49:39 +08:00
|
|
|
flags &= ~AMDGPU_PTE_MTYPE_MASK;
|
|
|
|
flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
|
|
|
|
|
2017-04-19 09:53:29 +08:00
|
|
|
if ((mapping->flags & AMDGPU_PTE_PRT) &&
|
|
|
|
(adev->asic_type >= CHIP_VEGA10)) {
|
|
|
|
flags |= AMDGPU_PTE_PRT;
|
|
|
|
flags &= ~AMDGPU_PTE_VALID;
|
|
|
|
}
|
|
|
|
|
2016-01-25 21:27:31 +08:00
|
|
|
trace_amdgpu_vm_bo_update(mapping);
|
|
|
|
|
2016-08-16 23:38:37 +08:00
|
|
|
pfn = mapping->offset >> PAGE_SHIFT;
|
|
|
|
if (nodes) {
|
|
|
|
while (pfn >= nodes->size) {
|
|
|
|
pfn -= nodes->size;
|
|
|
|
++nodes;
|
|
|
|
}
|
2016-03-19 04:00:35 +08:00
|
|
|
}
|
2016-01-25 21:27:31 +08:00
|
|
|
|
2016-08-16 23:38:37 +08:00
|
|
|
do {
|
2017-09-18 19:58:30 +08:00
|
|
|
dma_addr_t *dma_addr = NULL;
|
2016-08-16 23:38:37 +08:00
|
|
|
uint64_t max_entries;
|
|
|
|
uint64_t addr, last;
|
2016-01-25 21:27:31 +08:00
|
|
|
|
2016-08-16 23:38:37 +08:00
|
|
|
if (nodes) {
|
|
|
|
addr = nodes->start << PAGE_SHIFT;
|
|
|
|
max_entries = (nodes->size - pfn) *
|
2018-06-23 00:54:03 +08:00
|
|
|
AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
2016-08-16 23:38:37 +08:00
|
|
|
} else {
|
|
|
|
addr = 0;
|
|
|
|
max_entries = S64_MAX;
|
|
|
|
}
|
2016-01-25 21:27:31 +08:00
|
|
|
|
2016-08-16 23:38:37 +08:00
|
|
|
if (pages_addr) {
|
2017-09-18 19:58:30 +08:00
|
|
|
uint64_t count;
|
|
|
|
|
2017-08-22 18:50:46 +08:00
|
|
|
max_entries = min(max_entries, 16ull * 1024ull);
|
2018-06-21 17:27:46 +08:00
|
|
|
for (count = 1;
|
2018-06-23 00:54:03 +08:00
|
|
|
count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
2018-06-21 17:27:46 +08:00
|
|
|
++count) {
|
2017-09-18 19:58:30 +08:00
|
|
|
uint64_t idx = pfn + count;
|
|
|
|
|
|
|
|
if (pages_addr[idx] !=
|
|
|
|
(pages_addr[idx - 1] + PAGE_SIZE))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (count < min_linear_pages) {
|
|
|
|
addr = pfn << PAGE_SHIFT;
|
|
|
|
dma_addr = pages_addr;
|
|
|
|
} else {
|
|
|
|
addr = pages_addr[pfn];
|
2018-06-23 00:54:03 +08:00
|
|
|
max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
2017-09-18 19:58:30 +08:00
|
|
|
}
|
|
|
|
|
2016-08-16 23:38:37 +08:00
|
|
|
} else if (flags & AMDGPU_PTE_VALID) {
|
|
|
|
addr += adev->vm_manager.vram_base_offset;
|
2017-09-18 19:58:30 +08:00
|
|
|
addr += pfn << PAGE_SHIFT;
|
2016-08-16 23:38:37 +08:00
|
|
|
}
|
|
|
|
|
2017-03-30 20:03:59 +08:00
|
|
|
last = min((uint64_t)mapping->last, start + max_entries - 1);
|
2017-09-18 19:58:30 +08:00
|
|
|
r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
|
2016-01-25 21:27:31 +08:00
|
|
|
start, last, flags, addr,
|
|
|
|
fence);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2018-06-23 00:54:03 +08:00
|
|
|
pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
2016-08-16 23:38:37 +08:00
|
|
|
if (nodes && nodes->size == pfn) {
|
|
|
|
pfn = 0;
|
|
|
|
++nodes;
|
|
|
|
}
|
2016-01-25 21:27:31 +08:00
|
|
|
start = last + 1;
|
2016-08-16 23:38:37 +08:00
|
|
|
|
2017-03-30 20:03:59 +08:00
|
|
|
} while (unlikely(start != mapping->last + 1));
|
2016-01-25 21:27:31 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_update - update all BO mappings in the vm page table
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: requested BO and VM object
|
2016-08-16 20:43:17 +08:00
|
|
|
* @clear: if true clear the entries
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* Fill in the page table entries for @bo_va.
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, -EINVAL for failure.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
2016-08-16 20:43:17 +08:00
|
|
|
bool clear)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2017-08-01 16:51:43 +08:00
|
|
|
struct amdgpu_bo *bo = bo_va->base.bo;
|
|
|
|
struct amdgpu_vm *vm = bo_va->base.vm;
|
2015-04-21 04:55:21 +08:00
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
2016-03-30 16:50:25 +08:00
|
|
|
dma_addr_t *pages_addr = NULL;
|
2016-08-16 20:43:17 +08:00
|
|
|
struct ttm_mem_reg *mem;
|
2016-08-16 23:38:37 +08:00
|
|
|
struct drm_mm_node *nodes;
|
2017-09-11 22:54:59 +08:00
|
|
|
struct dma_fence *exclusive, **last_update;
|
2017-08-22 18:50:46 +08:00
|
|
|
uint64_t flags;
|
2015-04-21 04:55:21 +08:00
|
|
|
int r;
|
|
|
|
|
2018-07-04 18:08:54 +08:00
|
|
|
if (clear || !bo) {
|
2016-08-16 20:43:17 +08:00
|
|
|
mem = NULL;
|
2016-08-16 23:38:37 +08:00
|
|
|
nodes = NULL;
|
2016-08-16 20:43:17 +08:00
|
|
|
exclusive = NULL;
|
|
|
|
} else {
|
2016-03-30 16:50:25 +08:00
|
|
|
struct ttm_dma_tt *ttm;
|
|
|
|
|
2018-07-04 18:08:54 +08:00
|
|
|
mem = &bo->tbo.mem;
|
2016-08-16 23:38:37 +08:00
|
|
|
nodes = mem->mm_node;
|
|
|
|
if (mem->mem_type == TTM_PL_TT) {
|
2018-07-04 18:08:54 +08:00
|
|
|
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
|
2016-03-30 16:50:25 +08:00
|
|
|
pages_addr = ttm->dma_address;
|
2015-11-30 21:19:26 +08:00
|
|
|
}
|
2017-08-01 16:51:43 +08:00
|
|
|
exclusive = reservation_object_get_excl(bo->tbo.resv);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2017-08-22 18:50:46 +08:00
|
|
|
if (bo)
|
2017-08-01 16:51:43 +08:00
|
|
|
flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
|
2017-08-22 18:50:46 +08:00
|
|
|
else
|
2017-01-30 18:01:38 +08:00
|
|
|
flags = 0x0;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-09-11 22:54:59 +08:00
|
|
|
if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
|
|
|
|
last_update = &vm->last_update;
|
|
|
|
else
|
|
|
|
last_update = &bo_va->last_pt_update;
|
|
|
|
|
2017-08-23 22:13:33 +08:00
|
|
|
if (!clear && bo_va->base.moved) {
|
|
|
|
bo_va->base.moved = false;
|
2015-07-30 17:53:42 +08:00
|
|
|
list_splice_init(&bo_va->valids, &bo_va->invalids);
|
2017-08-23 22:13:33 +08:00
|
|
|
|
2017-08-15 23:08:12 +08:00
|
|
|
} else if (bo_va->cleared != clear) {
|
|
|
|
list_splice_init(&bo_va->valids, &bo_va->invalids);
|
2017-08-23 22:13:33 +08:00
|
|
|
}
|
2015-07-30 17:53:42 +08:00
|
|
|
|
|
|
|
list_for_each_entry(mapping, &bo_va->invalids, list) {
|
2017-08-22 18:50:46 +08:00
|
|
|
r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
|
2016-08-16 23:38:37 +08:00
|
|
|
mapping, flags, nodes,
|
2017-09-11 22:54:59 +08:00
|
|
|
last_update);
|
2015-04-21 04:55:21 +08:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2017-08-15 23:08:12 +08:00
|
|
|
if (vm->use_cpu_for_update) {
|
|
|
|
/* Flush HDP */
|
|
|
|
mb();
|
2018-01-19 21:17:40 +08:00
|
|
|
amdgpu_asic_flush_hdp(adev, NULL);
|
2015-09-28 18:00:23 +08:00
|
|
|
}
|
|
|
|
|
2018-04-19 16:56:02 +08:00
|
|
|
spin_lock(&vm->moved_lock);
|
2018-04-19 13:17:26 +08:00
|
|
|
list_del_init(&bo_va->base.vm_status);
|
2018-04-19 16:56:02 +08:00
|
|
|
spin_unlock(&vm->moved_lock);
|
2018-03-19 18:49:14 +08:00
|
|
|
|
2018-04-19 13:17:26 +08:00
|
|
|
/* If the BO is not in its preferred location add it back to
|
|
|
|
* the evicted list so that it gets validated again on the
|
|
|
|
* next command submission.
|
|
|
|
*/
|
2018-04-19 21:01:12 +08:00
|
|
|
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
|
|
|
|
uint32_t mem_type = bo->tbo.mem.mem_type;
|
|
|
|
|
|
|
|
if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
|
|
|
|
list_add_tail(&bo_va->base.vm_status, &vm->evicted);
|
|
|
|
else
|
|
|
|
list_add(&bo_va->base.vm_status, &vm->idle);
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-08-15 23:08:12 +08:00
|
|
|
list_splice_init(&bo_va->invalids, &bo_va->valids);
|
|
|
|
bo_va->cleared = clear;
|
|
|
|
|
|
|
|
if (trace_amdgpu_vm_bo_mapping_enabled()) {
|
|
|
|
list_for_each_entry(mapping, &bo_va->valids, list)
|
|
|
|
trace_amdgpu_vm_bo_mapping(mapping);
|
2017-07-11 23:23:29 +08:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-30 18:09:31 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_update_prt_state - update the global PRT state
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2017-01-30 18:09:31 +08:00
|
|
|
*/
|
|
|
|
static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
bool enable;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
|
2017-02-14 23:02:52 +08:00
|
|
|
enable = !!atomic_read(&adev->vm_manager.num_prt_users);
|
2018-01-12 22:26:08 +08:00
|
|
|
adev->gmc.gmc_funcs->set_prt(adev, enable);
|
2017-01-30 18:09:31 +08:00
|
|
|
spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
|
|
|
|
}
|
|
|
|
|
2017-02-14 23:02:52 +08:00
|
|
|
/**
|
2017-03-13 17:13:36 +08:00
|
|
|
* amdgpu_vm_prt_get - add a PRT user
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2017-02-14 23:02:52 +08:00
|
|
|
*/
|
|
|
|
static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
|
|
|
|
{
|
2018-01-12 22:26:08 +08:00
|
|
|
if (!adev->gmc.gmc_funcs->set_prt)
|
2017-03-13 17:13:36 +08:00
|
|
|
return;
|
|
|
|
|
2017-02-14 23:02:52 +08:00
|
|
|
if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
|
|
|
|
amdgpu_vm_update_prt_state(adev);
|
|
|
|
}
|
|
|
|
|
2017-02-14 22:47:03 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_prt_put - drop a PRT user
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2017-02-14 22:47:03 +08:00
|
|
|
*/
|
|
|
|
static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
|
|
|
|
{
|
2017-02-14 23:02:52 +08:00
|
|
|
if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
|
2017-02-14 22:47:03 +08:00
|
|
|
amdgpu_vm_update_prt_state(adev);
|
|
|
|
}
|
|
|
|
|
2017-01-30 18:09:31 +08:00
|
|
|
/**
|
2017-02-14 23:02:52 +08:00
|
|
|
* amdgpu_vm_prt_cb - callback for updating the PRT status
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* @fence: fence for the callback
|
2018-06-14 04:01:38 +08:00
|
|
|
* @_cb: the callback function
|
2017-01-30 18:09:31 +08:00
|
|
|
*/
|
|
|
|
static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
|
|
|
|
{
|
|
|
|
struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
|
|
|
|
|
2017-02-14 22:47:03 +08:00
|
|
|
amdgpu_vm_prt_put(cb->adev);
|
2017-01-30 18:09:31 +08:00
|
|
|
kfree(cb);
|
|
|
|
}
|
|
|
|
|
2017-02-14 23:02:52 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_add_prt_cb - add callback for updating the PRT status
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @fence: fence for the callback
|
2017-02-14 23:02:52 +08:00
|
|
|
*/
|
|
|
|
static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
|
|
|
|
struct dma_fence *fence)
|
|
|
|
{
|
2017-03-13 17:13:36 +08:00
|
|
|
struct amdgpu_prt_cb *cb;
|
2017-02-14 23:02:52 +08:00
|
|
|
|
2018-01-12 22:26:08 +08:00
|
|
|
if (!adev->gmc.gmc_funcs->set_prt)
|
2017-03-13 17:13:36 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
|
2017-02-14 23:02:52 +08:00
|
|
|
if (!cb) {
|
|
|
|
/* Last resort when we are OOM */
|
|
|
|
if (fence)
|
|
|
|
dma_fence_wait(fence, false);
|
|
|
|
|
2017-04-04 02:41:39 +08:00
|
|
|
amdgpu_vm_prt_put(adev);
|
2017-02-14 23:02:52 +08:00
|
|
|
} else {
|
|
|
|
cb->adev = adev;
|
|
|
|
if (!fence || dma_fence_add_callback(fence, &cb->cb,
|
|
|
|
amdgpu_vm_prt_cb))
|
|
|
|
amdgpu_vm_prt_cb(fence, &cb->cb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-30 18:09:31 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_free_mapping - free a mapping
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
* @mapping: mapping to be freed
|
|
|
|
* @fence: fence of the unmap operation
|
|
|
|
*
|
|
|
|
* Free a mapping and make sure we decrease the PRT usage count if applicable.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_bo_va_mapping *mapping,
|
|
|
|
struct dma_fence *fence)
|
|
|
|
{
|
2017-02-14 23:02:52 +08:00
|
|
|
if (mapping->flags & AMDGPU_PTE_PRT)
|
|
|
|
amdgpu_vm_add_prt_cb(adev, fence);
|
|
|
|
kfree(mapping);
|
|
|
|
}
|
2017-01-30 18:09:31 +08:00
|
|
|
|
2017-02-14 23:02:52 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_prt_fini - finish all prt mappings
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
*
|
|
|
|
* Register a cleanup callback to disable PRT support after VM dies.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
|
|
{
|
2017-08-03 20:02:13 +08:00
|
|
|
struct reservation_object *resv = vm->root.base.bo->tbo.resv;
|
2017-02-14 23:02:52 +08:00
|
|
|
struct dma_fence *excl, **shared;
|
|
|
|
unsigned i, shared_count;
|
|
|
|
int r;
|
2017-02-14 22:47:03 +08:00
|
|
|
|
2017-02-14 23:02:52 +08:00
|
|
|
r = reservation_object_get_fences_rcu(resv, &excl,
|
|
|
|
&shared_count, &shared);
|
|
|
|
if (r) {
|
|
|
|
/* Not enough memory to grab the fence list, as last resort
|
|
|
|
* block for all the fences to complete.
|
|
|
|
*/
|
|
|
|
reservation_object_wait_timeout_rcu(resv, true, false,
|
|
|
|
MAX_SCHEDULE_TIMEOUT);
|
|
|
|
return;
|
2017-01-30 18:09:31 +08:00
|
|
|
}
|
2017-02-14 23:02:52 +08:00
|
|
|
|
|
|
|
/* Add a callback for each fence in the reservation object */
|
|
|
|
amdgpu_vm_prt_get(adev);
|
|
|
|
amdgpu_vm_add_prt_cb(adev, excl);
|
|
|
|
|
|
|
|
for (i = 0; i < shared_count; ++i) {
|
|
|
|
amdgpu_vm_prt_get(adev);
|
|
|
|
amdgpu_vm_add_prt_cb(adev, shared[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(shared);
|
2017-01-30 18:09:31 +08:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_clear_freed - clear freed BOs in the PT
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
2017-03-24 02:36:31 +08:00
|
|
|
* @fence: optional resulting fence (unchanged if no work needed to be done
|
|
|
|
* or if an error occurred)
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* Make sure all freed BOs are cleared in the PT.
|
|
|
|
* PTs have to be reserved and mutex must be locked!
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success.
|
|
|
|
*
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
2017-03-24 02:36:31 +08:00
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct dma_fence **fence)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
2018-01-26 01:36:15 +08:00
|
|
|
uint64_t init_pte_value = 0;
|
2017-03-24 02:36:31 +08:00
|
|
|
struct dma_fence *f = NULL;
|
2015-04-21 04:55:21 +08:00
|
|
|
int r;
|
|
|
|
|
|
|
|
while (!list_empty(&vm->freed)) {
|
|
|
|
mapping = list_first_entry(&vm->freed,
|
|
|
|
struct amdgpu_bo_va_mapping, list);
|
|
|
|
list_del(&mapping->list);
|
2016-03-09 00:52:01 +08:00
|
|
|
|
2018-01-26 01:36:15 +08:00
|
|
|
if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
|
2017-09-01 03:55:00 +08:00
|
|
|
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
|
2017-07-28 00:48:22 +08:00
|
|
|
|
2017-08-30 21:38:45 +08:00
|
|
|
r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
|
2017-04-19 20:41:19 +08:00
|
|
|
mapping->start, mapping->last,
|
2017-07-28 00:48:22 +08:00
|
|
|
init_pte_value, 0, &f);
|
2017-03-24 02:36:31 +08:00
|
|
|
amdgpu_vm_free_mapping(adev, vm, mapping, f);
|
2017-01-30 18:09:31 +08:00
|
|
|
if (r) {
|
2017-03-24 02:36:31 +08:00
|
|
|
dma_fence_put(f);
|
2015-04-21 04:55:21 +08:00
|
|
|
return r;
|
2017-01-30 18:09:31 +08:00
|
|
|
}
|
2017-03-24 02:36:31 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-03-24 02:36:31 +08:00
|
|
|
if (fence && f) {
|
|
|
|
dma_fence_put(*fence);
|
|
|
|
*fence = f;
|
|
|
|
} else {
|
|
|
|
dma_fence_put(f);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
2017-03-24 02:36:31 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-08-16 17:13:48 +08:00
|
|
|
* amdgpu_vm_handle_moved - handle moved BOs in the PT
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
*
|
2017-08-16 17:13:48 +08:00
|
|
|
* Make sure all BOs which are moved are updated in the PTs.
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success.
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
2017-08-16 17:13:48 +08:00
|
|
|
* PTs have to be reserved!
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2017-08-16 17:13:48 +08:00
|
|
|
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
2017-09-11 22:54:59 +08:00
|
|
|
struct amdgpu_vm *vm)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2018-04-19 17:08:24 +08:00
|
|
|
struct amdgpu_bo_va *bo_va, *tmp;
|
|
|
|
struct list_head moved;
|
2017-08-16 17:13:48 +08:00
|
|
|
bool clear;
|
2018-04-19 17:08:24 +08:00
|
|
|
int r;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2018-04-19 17:08:24 +08:00
|
|
|
INIT_LIST_HEAD(&moved);
|
2018-04-19 16:56:02 +08:00
|
|
|
spin_lock(&vm->moved_lock);
|
2018-04-19 17:08:24 +08:00
|
|
|
list_splice_init(&vm->moved, &moved);
|
|
|
|
spin_unlock(&vm->moved_lock);
|
2016-03-09 01:03:27 +08:00
|
|
|
|
2018-04-19 17:08:24 +08:00
|
|
|
list_for_each_entry_safe(bo_va, tmp, &moved, base.vm_status) {
|
|
|
|
struct reservation_object *resv = bo_va->base.bo->tbo.resv;
|
2017-09-02 02:34:27 +08:00
|
|
|
|
2017-08-16 17:13:48 +08:00
|
|
|
/* Per VM BOs never need to bo cleared in the page tables */
|
2017-09-02 02:34:27 +08:00
|
|
|
if (resv == vm->root.base.bo->tbo.resv)
|
|
|
|
clear = false;
|
|
|
|
/* Try to reserve the BO to avoid clearing its ptes */
|
2018-01-03 20:36:22 +08:00
|
|
|
else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
|
2017-09-02 02:34:27 +08:00
|
|
|
clear = false;
|
|
|
|
/* Somebody else is using the BO right now */
|
|
|
|
else
|
|
|
|
clear = true;
|
2017-08-16 17:13:48 +08:00
|
|
|
|
|
|
|
r = amdgpu_vm_bo_update(adev, bo_va, clear);
|
2018-04-19 17:08:24 +08:00
|
|
|
if (r) {
|
|
|
|
spin_lock(&vm->moved_lock);
|
|
|
|
list_splice(&moved, &vm->moved);
|
|
|
|
spin_unlock(&vm->moved_lock);
|
2015-04-21 04:55:21 +08:00
|
|
|
return r;
|
2018-04-19 17:08:24 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-09-02 02:34:27 +08:00
|
|
|
if (!clear && resv != vm->root.base.bo->tbo.resv)
|
|
|
|
reservation_object_unlock(resv);
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2018-04-19 17:08:24 +08:00
|
|
|
return 0;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_add - add a bo to a specific vm
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
* @bo: amdgpu buffer object
|
|
|
|
*
|
2016-01-26 19:17:11 +08:00
|
|
|
* Add @bo into the requested vm.
|
2015-04-21 04:55:21 +08:00
|
|
|
* Add @bo to the list of bos associated with the vm
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Newly added bo_va or NULL for failure
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* Object has to be reserved!
|
|
|
|
*/
|
|
|
|
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_bo *bo)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va *bo_va;
|
|
|
|
|
|
|
|
bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
|
|
|
|
if (bo_va == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-04-24 12:14:39 +08:00
|
|
|
amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
|
2017-08-01 16:51:43 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
bo_va->ref_count = 1;
|
2015-07-30 17:53:42 +08:00
|
|
|
INIT_LIST_HEAD(&bo_va->valids);
|
|
|
|
INIT_LIST_HEAD(&bo_va->invalids);
|
2016-03-09 01:03:27 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
return bo_va;
|
|
|
|
}
|
|
|
|
|
2017-08-16 17:13:48 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_insert_mapping - insert a new mapping
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: bo_va to store the address
|
|
|
|
* @mapping: the mapping to insert
|
|
|
|
*
|
|
|
|
* Insert a new mapping into all structures.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
|
|
|
struct amdgpu_bo_va_mapping *mapping)
|
|
|
|
{
|
|
|
|
struct amdgpu_vm *vm = bo_va->base.vm;
|
|
|
|
struct amdgpu_bo *bo = bo_va->base.bo;
|
|
|
|
|
2017-09-06 22:55:16 +08:00
|
|
|
mapping->bo_va = bo_va;
|
2017-08-16 17:13:48 +08:00
|
|
|
list_add(&mapping->list, &bo_va->invalids);
|
|
|
|
amdgpu_vm_it_insert(mapping, &vm->va);
|
|
|
|
|
|
|
|
if (mapping->flags & AMDGPU_PTE_PRT)
|
|
|
|
amdgpu_vm_prt_get(adev);
|
|
|
|
|
2018-04-19 20:22:56 +08:00
|
|
|
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
|
|
|
|
!bo_va->base.moved) {
|
2018-04-19 16:56:02 +08:00
|
|
|
spin_lock(&vm->moved_lock);
|
2018-04-19 20:22:56 +08:00
|
|
|
list_move(&bo_va->base.vm_status, &vm->moved);
|
2018-04-19 16:56:02 +08:00
|
|
|
spin_unlock(&vm->moved_lock);
|
2017-08-16 17:13:48 +08:00
|
|
|
}
|
|
|
|
trace_amdgpu_vm_bo_map(bo_va, mapping);
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_map - map bo inside a vm
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: bo_va to store the address
|
|
|
|
* @saddr: where to map the BO
|
|
|
|
* @offset: requested offset in the BO
|
2018-06-14 04:01:38 +08:00
|
|
|
* @size: BO size in bytes
|
2015-04-21 04:55:21 +08:00
|
|
|
* @flags: attributes of pages (read/write/valid/etc.)
|
|
|
|
*
|
|
|
|
* Add a mapping of the BO at the specefied addr into the VM.
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, error for failure.
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
2015-11-13 14:18:38 +08:00
|
|
|
* Object has to be reserved and unreserved outside!
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
|
|
|
uint64_t saddr, uint64_t offset,
|
2017-01-18 21:49:43 +08:00
|
|
|
uint64_t size, uint64_t flags)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2017-03-30 20:03:59 +08:00
|
|
|
struct amdgpu_bo_va_mapping *mapping, *tmp;
|
2017-08-01 16:51:43 +08:00
|
|
|
struct amdgpu_bo *bo = bo_va->base.bo;
|
|
|
|
struct amdgpu_vm *vm = bo_va->base.vm;
|
2015-04-21 04:55:21 +08:00
|
|
|
uint64_t eaddr;
|
|
|
|
|
2015-05-18 20:37:27 +08:00
|
|
|
/* validate the parameters */
|
|
|
|
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
|
2015-11-13 14:18:38 +08:00
|
|
|
size == 0 || size & AMDGPU_GPU_PAGE_MASK)
|
2015-05-18 20:37:27 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/* make sure object fit at this offset */
|
2015-11-24 06:43:48 +08:00
|
|
|
eaddr = saddr + size - 1;
|
2017-01-30 18:01:38 +08:00
|
|
|
if (saddr >= eaddr ||
|
2017-08-01 16:51:43 +08:00
|
|
|
(bo && offset + size > amdgpu_bo_size(bo)))
|
2015-04-21 04:55:21 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
|
2017-03-30 20:03:59 +08:00
|
|
|
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
|
|
|
|
if (tmp) {
|
2015-04-21 04:55:21 +08:00
|
|
|
/* bo and tmp overlap, invalid addr */
|
|
|
|
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
|
2017-08-01 16:51:43 +08:00
|
|
|
"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
|
2017-03-30 20:03:59 +08:00
|
|
|
tmp->start, tmp->last + 1);
|
2017-03-13 17:13:37 +08:00
|
|
|
return -EINVAL;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
|
2017-03-13 17:13:37 +08:00
|
|
|
if (!mapping)
|
|
|
|
return -ENOMEM;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-03-30 20:03:59 +08:00
|
|
|
mapping->start = saddr;
|
|
|
|
mapping->last = eaddr;
|
2015-04-21 04:55:21 +08:00
|
|
|
mapping->offset = offset;
|
|
|
|
mapping->flags = flags;
|
|
|
|
|
2017-08-16 17:13:48 +08:00
|
|
|
amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
|
2017-03-13 17:13:39 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: bo_va to store the address
|
|
|
|
* @saddr: where to map the BO
|
|
|
|
* @offset: requested offset in the BO
|
2018-06-14 04:01:38 +08:00
|
|
|
* @size: BO size in bytes
|
2017-03-13 17:13:39 +08:00
|
|
|
* @flags: attributes of pages (read/write/valid/etc.)
|
|
|
|
*
|
|
|
|
* Add a mapping of the BO at the specefied addr into the VM. Replace existing
|
|
|
|
* mappings as we do so.
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, error for failure.
|
2017-03-13 17:13:39 +08:00
|
|
|
*
|
|
|
|
* Object has to be reserved and unreserved outside!
|
|
|
|
*/
|
|
|
|
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
|
|
|
uint64_t saddr, uint64_t offset,
|
|
|
|
uint64_t size, uint64_t flags)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
2017-08-01 16:51:43 +08:00
|
|
|
struct amdgpu_bo *bo = bo_va->base.bo;
|
2017-03-13 17:13:39 +08:00
|
|
|
uint64_t eaddr;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
/* validate the parameters */
|
|
|
|
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
|
|
|
|
size == 0 || size & AMDGPU_GPU_PAGE_MASK)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* make sure object fit at this offset */
|
|
|
|
eaddr = saddr + size - 1;
|
|
|
|
if (saddr >= eaddr ||
|
2017-08-01 16:51:43 +08:00
|
|
|
(bo && offset + size > amdgpu_bo_size(bo)))
|
2017-03-13 17:13:39 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Allocate all the needed memory */
|
|
|
|
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
|
|
|
|
if (!mapping)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-08-01 16:51:43 +08:00
|
|
|
r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
|
2017-03-13 17:13:39 +08:00
|
|
|
if (r) {
|
|
|
|
kfree(mapping);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
|
2017-03-30 20:03:59 +08:00
|
|
|
mapping->start = saddr;
|
|
|
|
mapping->last = eaddr;
|
2017-03-13 17:13:39 +08:00
|
|
|
mapping->offset = offset;
|
|
|
|
mapping->flags = flags;
|
|
|
|
|
2017-08-16 17:13:48 +08:00
|
|
|
amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
|
2017-03-13 17:13:36 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_unmap - remove bo mapping from vm
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: bo_va to remove the address from
|
|
|
|
* @saddr: where to the BO is mapped
|
|
|
|
*
|
|
|
|
* Remove a mapping of the BO at the specefied addr from the VM.
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, error for failure.
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
2015-11-13 14:18:38 +08:00
|
|
|
* Object has to be reserved and unreserved outside!
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
|
|
|
uint64_t saddr)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
2017-08-01 16:51:43 +08:00
|
|
|
struct amdgpu_vm *vm = bo_va->base.vm;
|
2015-07-30 17:53:42 +08:00
|
|
|
bool valid = true;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2015-06-06 02:56:17 +08:00
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
2016-03-09 01:03:27 +08:00
|
|
|
|
2015-07-30 17:53:42 +08:00
|
|
|
list_for_each_entry(mapping, &bo_va->valids, list) {
|
2017-03-30 20:03:59 +08:00
|
|
|
if (mapping->start == saddr)
|
2015-04-21 04:55:21 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-07-30 17:53:42 +08:00
|
|
|
if (&mapping->list == &bo_va->valids) {
|
|
|
|
valid = false;
|
|
|
|
|
|
|
|
list_for_each_entry(mapping, &bo_va->invalids, list) {
|
2017-03-30 20:03:59 +08:00
|
|
|
if (mapping->start == saddr)
|
2015-07-30 17:53:42 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-03-09 01:03:27 +08:00
|
|
|
if (&mapping->list == &bo_va->invalids)
|
2015-07-30 17:53:42 +08:00
|
|
|
return -ENOENT;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
2016-03-09 01:03:27 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
list_del(&mapping->list);
|
2017-03-30 20:03:59 +08:00
|
|
|
amdgpu_vm_it_remove(mapping, &vm->va);
|
2017-09-06 22:55:16 +08:00
|
|
|
mapping->bo_va = NULL;
|
2015-06-09 22:58:33 +08:00
|
|
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-03-09 00:52:01 +08:00
|
|
|
if (valid)
|
2015-04-21 04:55:21 +08:00
|
|
|
list_add(&mapping->list, &vm->freed);
|
2016-03-09 00:52:01 +08:00
|
|
|
else
|
2017-01-30 18:09:31 +08:00
|
|
|
amdgpu_vm_free_mapping(adev, vm, mapping,
|
|
|
|
bo_va->last_pt_update);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-13 17:13:38 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: VM structure to use
|
|
|
|
* @saddr: start of the range
|
|
|
|
* @size: size of the range
|
|
|
|
*
|
|
|
|
* Remove all mappings in a range, split them as appropriate.
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, error for failure.
|
2017-03-13 17:13:38 +08:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
uint64_t saddr, uint64_t size)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
|
|
|
|
LIST_HEAD(removed);
|
|
|
|
uint64_t eaddr;
|
|
|
|
|
|
|
|
eaddr = saddr + size - 1;
|
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
|
|
|
|
/* Allocate all the needed memory */
|
|
|
|
before = kzalloc(sizeof(*before), GFP_KERNEL);
|
|
|
|
if (!before)
|
|
|
|
return -ENOMEM;
|
2017-03-16 16:09:24 +08:00
|
|
|
INIT_LIST_HEAD(&before->list);
|
2017-03-13 17:13:38 +08:00
|
|
|
|
|
|
|
after = kzalloc(sizeof(*after), GFP_KERNEL);
|
|
|
|
if (!after) {
|
|
|
|
kfree(before);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2017-03-16 16:09:24 +08:00
|
|
|
INIT_LIST_HEAD(&after->list);
|
2017-03-13 17:13:38 +08:00
|
|
|
|
|
|
|
/* Now gather all removed mappings */
|
2017-03-30 20:03:59 +08:00
|
|
|
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
|
|
|
|
while (tmp) {
|
2017-03-13 17:13:38 +08:00
|
|
|
/* Remember mapping split at the start */
|
2017-03-30 20:03:59 +08:00
|
|
|
if (tmp->start < saddr) {
|
|
|
|
before->start = tmp->start;
|
|
|
|
before->last = saddr - 1;
|
2017-03-13 17:13:38 +08:00
|
|
|
before->offset = tmp->offset;
|
|
|
|
before->flags = tmp->flags;
|
2018-06-05 17:31:51 +08:00
|
|
|
before->bo_va = tmp->bo_va;
|
|
|
|
list_add(&before->list, &tmp->bo_va->invalids);
|
2017-03-13 17:13:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Remember mapping split at the end */
|
2017-03-30 20:03:59 +08:00
|
|
|
if (tmp->last > eaddr) {
|
|
|
|
after->start = eaddr + 1;
|
|
|
|
after->last = tmp->last;
|
2017-03-13 17:13:38 +08:00
|
|
|
after->offset = tmp->offset;
|
2017-03-30 20:03:59 +08:00
|
|
|
after->offset += after->start - tmp->start;
|
2017-03-13 17:13:38 +08:00
|
|
|
after->flags = tmp->flags;
|
2018-06-05 17:31:51 +08:00
|
|
|
after->bo_va = tmp->bo_va;
|
|
|
|
list_add(&after->list, &tmp->bo_va->invalids);
|
2017-03-13 17:13:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
list_del(&tmp->list);
|
|
|
|
list_add(&tmp->list, &removed);
|
2017-03-30 20:03:59 +08:00
|
|
|
|
|
|
|
tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
|
2017-03-13 17:13:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* And free them up */
|
|
|
|
list_for_each_entry_safe(tmp, next, &removed, list) {
|
2017-03-30 20:03:59 +08:00
|
|
|
amdgpu_vm_it_remove(tmp, &vm->va);
|
2017-03-13 17:13:38 +08:00
|
|
|
list_del(&tmp->list);
|
|
|
|
|
2017-03-30 20:03:59 +08:00
|
|
|
if (tmp->start < saddr)
|
|
|
|
tmp->start = saddr;
|
|
|
|
if (tmp->last > eaddr)
|
|
|
|
tmp->last = eaddr;
|
2017-03-13 17:13:38 +08:00
|
|
|
|
2017-09-06 22:55:16 +08:00
|
|
|
tmp->bo_va = NULL;
|
2017-03-13 17:13:38 +08:00
|
|
|
list_add(&tmp->list, &vm->freed);
|
|
|
|
trace_amdgpu_vm_bo_unmap(NULL, tmp);
|
|
|
|
}
|
|
|
|
|
2017-03-16 16:09:24 +08:00
|
|
|
/* Insert partial mapping before the range */
|
|
|
|
if (!list_empty(&before->list)) {
|
2017-03-30 20:03:59 +08:00
|
|
|
amdgpu_vm_it_insert(before, &vm->va);
|
2017-03-13 17:13:38 +08:00
|
|
|
if (before->flags & AMDGPU_PTE_PRT)
|
|
|
|
amdgpu_vm_prt_get(adev);
|
|
|
|
} else {
|
|
|
|
kfree(before);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Insert partial mapping after the range */
|
2017-03-16 16:09:24 +08:00
|
|
|
if (!list_empty(&after->list)) {
|
2017-03-30 20:03:59 +08:00
|
|
|
amdgpu_vm_it_insert(after, &vm->va);
|
2017-03-13 17:13:38 +08:00
|
|
|
if (after->flags & AMDGPU_PTE_PRT)
|
|
|
|
amdgpu_vm_prt_get(adev);
|
|
|
|
} else {
|
|
|
|
kfree(after);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-06 22:55:16 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_lookup_mapping - find mapping by address
|
|
|
|
*
|
|
|
|
* @vm: the requested VM
|
2018-06-14 04:01:38 +08:00
|
|
|
* @addr: the address
|
2017-09-06 22:55:16 +08:00
|
|
|
*
|
|
|
|
* Find a mapping by it's address.
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* The amdgpu_bo_va_mapping matching for addr or NULL
|
|
|
|
*
|
2017-09-06 22:55:16 +08:00
|
|
|
*/
|
|
|
|
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
|
|
|
|
uint64_t addr)
|
|
|
|
{
|
|
|
|
return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
|
|
|
|
}
|
|
|
|
|
2018-07-27 22:56:34 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_trace_cs - trace all reserved mappings
|
|
|
|
*
|
|
|
|
* @vm: the requested vm
|
|
|
|
* @ticket: CS ticket
|
|
|
|
*
|
|
|
|
* Trace all mappings of BOs reserved during a command submission.
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
|
|
|
|
|
|
|
if (!trace_amdgpu_vm_bo_cs_enabled())
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
|
|
|
|
mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
|
|
|
|
if (mapping->bo_va && mapping->bo_va->base.bo) {
|
|
|
|
struct amdgpu_bo *bo;
|
|
|
|
|
|
|
|
bo = mapping->bo_va->base.bo;
|
|
|
|
if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_amdgpu_vm_bo_cs(mapping);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_rmv - remove a bo to a specific vm
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: requested bo_va
|
|
|
|
*
|
2016-01-26 19:17:11 +08:00
|
|
|
* Remove @bo_va->bo from the requested vm.
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* Object have to be reserved!
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping, *next;
|
2017-08-01 16:51:43 +08:00
|
|
|
struct amdgpu_vm *vm = bo_va->base.vm;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-08-01 16:51:43 +08:00
|
|
|
list_del(&bo_va->base.bo_list);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2018-04-19 16:56:02 +08:00
|
|
|
spin_lock(&vm->moved_lock);
|
2017-08-01 16:51:43 +08:00
|
|
|
list_del(&bo_va->base.vm_status);
|
2018-04-19 16:56:02 +08:00
|
|
|
spin_unlock(&vm->moved_lock);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2015-07-30 17:53:42 +08:00
|
|
|
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
|
2015-04-21 04:55:21 +08:00
|
|
|
list_del(&mapping->list);
|
2017-03-30 20:03:59 +08:00
|
|
|
amdgpu_vm_it_remove(mapping, &vm->va);
|
2017-09-06 22:55:16 +08:00
|
|
|
mapping->bo_va = NULL;
|
2015-06-09 22:58:33 +08:00
|
|
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
2015-07-30 17:53:42 +08:00
|
|
|
list_add(&mapping->list, &vm->freed);
|
|
|
|
}
|
|
|
|
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
|
|
|
|
list_del(&mapping->list);
|
2017-03-30 20:03:59 +08:00
|
|
|
amdgpu_vm_it_remove(mapping, &vm->va);
|
2017-01-30 18:09:31 +08:00
|
|
|
amdgpu_vm_free_mapping(adev, vm, mapping,
|
|
|
|
bo_va->last_pt_update);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
2016-03-09 01:03:27 +08:00
|
|
|
|
2016-10-25 20:00:45 +08:00
|
|
|
dma_fence_put(bo_va->last_pt_update);
|
2015-04-21 04:55:21 +08:00
|
|
|
kfree(bo_va);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_invalidate - mark the bo as invalid
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo: amdgpu buffer object
|
2018-06-14 04:01:38 +08:00
|
|
|
* @evicted: is the BO evicted
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
2016-01-26 19:17:11 +08:00
|
|
|
* Mark @bo as invalid.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
|
|
|
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
2017-08-03 20:02:13 +08:00
|
|
|
struct amdgpu_bo *bo, bool evicted)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2017-08-01 16:51:43 +08:00
|
|
|
struct amdgpu_vm_bo_base *bo_base;
|
|
|
|
|
2018-04-24 13:54:10 +08:00
|
|
|
/* shadow bo doesn't have bo base, its validation needs its parent */
|
|
|
|
if (bo->parent && bo->parent->shadow == bo)
|
|
|
|
bo = bo->parent;
|
|
|
|
|
2017-08-01 16:51:43 +08:00
|
|
|
list_for_each_entry(bo_base, &bo->va, bo_list) {
|
2017-08-03 20:02:13 +08:00
|
|
|
struct amdgpu_vm *vm = bo_base->vm;
|
2018-04-19 20:22:56 +08:00
|
|
|
bool was_moved = bo_base->moved;
|
2017-08-03 20:02:13 +08:00
|
|
|
|
2017-08-23 22:13:33 +08:00
|
|
|
bo_base->moved = true;
|
2017-08-03 20:02:13 +08:00
|
|
|
if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
|
2017-08-16 17:13:48 +08:00
|
|
|
if (bo->tbo.type == ttm_bo_type_kernel)
|
|
|
|
list_move(&bo_base->vm_status, &vm->evicted);
|
|
|
|
else
|
|
|
|
list_move_tail(&bo_base->vm_status,
|
|
|
|
&vm->evicted);
|
2017-08-03 20:02:13 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-04-19 20:22:56 +08:00
|
|
|
if (was_moved)
|
2017-08-03 20:02:13 +08:00
|
|
|
continue;
|
|
|
|
|
2018-04-19 20:22:56 +08:00
|
|
|
if (bo->tbo.type == ttm_bo_type_kernel) {
|
|
|
|
list_move(&bo_base->vm_status, &vm->relocated);
|
|
|
|
} else {
|
|
|
|
spin_lock(&bo_base->vm->moved_lock);
|
|
|
|
list_move(&bo_base->vm_status, &vm->moved);
|
|
|
|
spin_unlock(&bo_base->vm->moved_lock);
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-11 23:11:24 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_get_block_size - calculate VM page table size as power of two
|
|
|
|
*
|
|
|
|
* @vm_size: VM size
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* VM page table as power of two
|
|
|
|
*/
|
2017-04-05 13:54:56 +08:00
|
|
|
static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
|
|
|
|
{
|
|
|
|
/* Total bits covered by PD + PTs */
|
|
|
|
unsigned bits = ilog2(vm_size) + 18;
|
|
|
|
|
|
|
|
/* Make sure the PD is 4K in size up to 8GB address space.
|
|
|
|
Above that split equal between PD and PTs */
|
|
|
|
if (vm_size <= 8)
|
|
|
|
return (bits - 9);
|
|
|
|
else
|
|
|
|
return ((bits + 3) / 2);
|
|
|
|
}
|
|
|
|
|
2017-08-15 16:05:59 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
|
2017-04-05 13:54:56 +08:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2018-08-22 05:14:32 +08:00
|
|
|
* @min_vm_size: the minimum vm size in GB if it's set auto
|
2018-06-14 04:01:38 +08:00
|
|
|
* @fragment_size_default: Default PTE fragment size
|
|
|
|
* @max_level: max VMPT level
|
|
|
|
* @max_bits: max address space size in bits
|
|
|
|
*
|
2017-04-05 13:54:56 +08:00
|
|
|
*/
|
2018-08-22 05:14:32 +08:00
|
|
|
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
|
2017-11-23 19:57:18 +08:00
|
|
|
uint32_t fragment_size_default, unsigned max_level,
|
|
|
|
unsigned max_bits)
|
2017-04-05 13:54:56 +08:00
|
|
|
{
|
2018-08-22 05:14:32 +08:00
|
|
|
unsigned int max_size = 1 << (max_bits - 30);
|
|
|
|
unsigned int vm_size;
|
2017-11-23 18:16:05 +08:00
|
|
|
uint64_t tmp;
|
|
|
|
|
|
|
|
/* adjust vm size first */
|
2017-11-23 19:57:18 +08:00
|
|
|
if (amdgpu_vm_size != -1) {
|
2017-11-04 23:51:44 +08:00
|
|
|
vm_size = amdgpu_vm_size;
|
2017-11-23 19:57:18 +08:00
|
|
|
if (vm_size > max_size) {
|
|
|
|
dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
|
|
|
|
amdgpu_vm_size, max_size);
|
|
|
|
vm_size = max_size;
|
|
|
|
}
|
2018-08-22 05:14:32 +08:00
|
|
|
} else {
|
|
|
|
struct sysinfo si;
|
|
|
|
unsigned int phys_ram_gb;
|
|
|
|
|
|
|
|
/* Optimal VM size depends on the amount of physical
|
|
|
|
* RAM available. Underlying requirements and
|
|
|
|
* assumptions:
|
|
|
|
*
|
|
|
|
* - Need to map system memory and VRAM from all GPUs
|
|
|
|
* - VRAM from other GPUs not known here
|
|
|
|
* - Assume VRAM <= system memory
|
|
|
|
* - On GFX8 and older, VM space can be segmented for
|
|
|
|
* different MTYPEs
|
|
|
|
* - Need to allow room for fragmentation, guard pages etc.
|
|
|
|
*
|
|
|
|
* This adds up to a rough guess of system memory x3.
|
|
|
|
* Round up to power of two to maximize the available
|
|
|
|
* VM size with the given page table size.
|
|
|
|
*/
|
|
|
|
si_meminfo(&si);
|
|
|
|
phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
|
|
|
|
(1 << 30) - 1) >> 30;
|
|
|
|
vm_size = roundup_pow_of_two(
|
|
|
|
min(max(phys_ram_gb * 3, min_vm_size), max_size));
|
2017-11-23 19:57:18 +08:00
|
|
|
}
|
2017-11-04 23:51:44 +08:00
|
|
|
|
|
|
|
adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
|
2017-11-23 18:16:05 +08:00
|
|
|
|
|
|
|
tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
|
2017-11-27 23:22:05 +08:00
|
|
|
if (amdgpu_vm_block_size != -1)
|
|
|
|
tmp >>= amdgpu_vm_block_size - 9;
|
2017-11-23 18:16:05 +08:00
|
|
|
tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
|
|
|
|
adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
|
2017-12-13 14:22:54 +08:00
|
|
|
switch (adev->vm_manager.num_level) {
|
|
|
|
case 3:
|
|
|
|
adev->vm_manager.root_level = AMDGPU_VM_PDB2;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
adev->vm_manager.root_level = AMDGPU_VM_PDB1;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
adev->vm_manager.root_level = AMDGPU_VM_PDB0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
|
|
|
|
}
|
2017-11-23 00:00:35 +08:00
|
|
|
/* block size depends on vm size and hw setup*/
|
2017-11-27 23:22:05 +08:00
|
|
|
if (amdgpu_vm_block_size != -1)
|
2017-04-05 13:54:56 +08:00
|
|
|
adev->vm_manager.block_size =
|
2017-11-27 23:22:05 +08:00
|
|
|
min((unsigned)amdgpu_vm_block_size, max_bits
|
|
|
|
- AMDGPU_GPU_PAGE_SHIFT
|
|
|
|
- 9 * adev->vm_manager.num_level);
|
|
|
|
else if (adev->vm_manager.num_level > 1)
|
|
|
|
adev->vm_manager.block_size = 9;
|
2017-04-05 13:54:56 +08:00
|
|
|
else
|
2017-11-27 23:22:05 +08:00
|
|
|
adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
|
2017-04-05 13:54:56 +08:00
|
|
|
|
2017-11-23 00:00:35 +08:00
|
|
|
if (amdgpu_vm_fragment_size == -1)
|
|
|
|
adev->vm_manager.fragment_size = fragment_size_default;
|
|
|
|
else
|
|
|
|
adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
|
2017-08-15 16:05:59 +08:00
|
|
|
|
2017-11-23 18:16:05 +08:00
|
|
|
DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
|
|
|
|
vm_size, adev->vm_manager.num_level + 1,
|
|
|
|
adev->vm_manager.block_size,
|
2017-11-04 23:51:44 +08:00
|
|
|
adev->vm_manager.fragment_size);
|
2017-04-05 13:54:56 +08:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_init - initialize a vm instance
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
2017-06-09 23:26:57 +08:00
|
|
|
* @vm_context: Indicates if it GFX or Compute context
|
2018-06-14 04:01:38 +08:00
|
|
|
* @pasid: Process address space identifier
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
2016-01-26 19:17:11 +08:00
|
|
|
* Init @vm fields.
|
2018-06-11 23:11:24 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, error for failure.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2017-06-09 23:26:57 +08:00
|
|
|
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
2017-08-26 08:40:26 +08:00
|
|
|
int vm_context, unsigned int pasid)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2018-04-16 18:27:50 +08:00
|
|
|
struct amdgpu_bo_param bp;
|
2018-04-24 12:14:39 +08:00
|
|
|
struct amdgpu_bo *root;
|
2017-04-20 16:17:34 +08:00
|
|
|
int r, i;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-09-09 07:15:08 +08:00
|
|
|
vm->va = RB_ROOT_CACHED;
|
2017-04-20 16:17:34 +08:00
|
|
|
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
|
|
|
|
vm->reserved_vmid[i] = NULL;
|
2017-08-03 20:02:13 +08:00
|
|
|
INIT_LIST_HEAD(&vm->evicted);
|
2017-08-09 20:15:46 +08:00
|
|
|
INIT_LIST_HEAD(&vm->relocated);
|
2018-04-19 16:56:02 +08:00
|
|
|
spin_lock_init(&vm->moved_lock);
|
2017-08-01 17:27:36 +08:00
|
|
|
INIT_LIST_HEAD(&vm->moved);
|
2018-04-19 21:01:12 +08:00
|
|
|
INIT_LIST_HEAD(&vm->idle);
|
2015-04-21 04:55:21 +08:00
|
|
|
INIT_LIST_HEAD(&vm->freed);
|
2016-03-09 00:58:35 +08:00
|
|
|
|
2016-02-01 19:53:58 +08:00
|
|
|
/* create scheduler entity for page table updates */
|
2018-07-12 21:15:21 +08:00
|
|
|
r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
|
|
|
|
adev->vm_manager.vm_pte_num_rqs, NULL);
|
2016-02-01 19:53:58 +08:00
|
|
|
if (r)
|
2016-10-28 02:04:38 +08:00
|
|
|
return r;
|
2016-02-01 19:53:58 +08:00
|
|
|
|
2017-07-28 00:48:22 +08:00
|
|
|
vm->pte_support_ats = false;
|
drm/amdgpu: use bulk moves for efficient VM LRU handling (v6)
I continue to work for bulk moving that based on the proposal by Christian.
Background:
amdgpu driver will move all PD/PT and PerVM BOs into idle list. Then move all of
them on the end of LRU list one by one. Thus, that cause so many BOs moved to
the end of the LRU, and impact performance seriously.
Then Christian provided a workaround to not move PD/PT BOs on LRU with below
patch:
Commit 0bbf32026cf5ba41e9922b30e26e1bed1ecd38ae ("drm/amdgpu: band aid
validating VM PTs")
However, the final solution should bulk move all PD/PT and PerVM BOs on the LRU
instead of one by one.
Whenever amdgpu_vm_validate_pt_bos() is called and we have BOs which need to be
validated we move all BOs together to the end of the LRU without dropping the
lock for the LRU.
While doing so we note the beginning and end of this block in the LRU list.
Now when amdgpu_vm_validate_pt_bos() is called and we don't have anything to do,
we don't move every BO one by one, but instead cut the LRU list into pieces so
that we bulk move everything to the end in just one operation.
Test data:
+--------------+-----------------+-----------+---------------------------------------+
| |The Talos |Clpeak(OCL)|BusSpeedReadback(OCL) |
| |Principle(Vulkan)| | |
+------------------------------------------------------------------------------------+
| | | |0.319 ms(1k) 0.314 ms(2K) 0.308 ms(4K) |
| Original | 147.7 FPS | 76.86 us |0.307 ms(8K) 0.310 ms(16K) |
+------------------------------------------------------------------------------------+
| Orignial + WA| | |0.254 ms(1K) 0.241 ms(2K) |
|(don't move | 162.1 FPS | 42.15 us |0.230 ms(4K) 0.223 ms(8K) 0.204 ms(16K)|
|PT BOs on LRU)| | | |
+------------------------------------------------------------------------------------+
| Bulk move | 163.1 FPS | 40.52 us |0.244 ms(1K) 0.252 ms(2K) 0.213 ms(4K) |
| | | |0.214 ms(8K) 0.225 ms(16K) |
+--------------+-----------------+-----------+---------------------------------------+
After test them with above three benchmarks include vulkan and opencl. We can
see the visible improvement than original, and even better than original with
workaround.
v2: move all BOs include idle, relocated, and moved list to the end of LRU and
put them together.
v3: remove unused parameter and use list_for_each_entry instead of the one with
save entry.
v4: move the amdgpu_vm_move_to_lru_tail after command submission, at that time,
all bo will be back on idle list.
v5: remove amdgpu_vm_move_to_lru_tail_by_list(), use bulk_moveable instread of
validated, and move ttm_bo_bulk_move_lru_tail() also into
amdgpu_vm_move_to_lru_tail().
v6: clean up and fix return value.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Tested-by: Mike Lothian <mike@fireburn.co.uk>
Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-08-06 10:57:08 +08:00
|
|
|
vm->bulk_moveable = true;
|
2017-07-28 00:48:22 +08:00
|
|
|
|
|
|
|
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
|
2017-06-09 23:26:57 +08:00
|
|
|
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
|
|
|
|
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
|
2017-07-28 00:48:22 +08:00
|
|
|
|
2018-01-25 00:19:04 +08:00
|
|
|
if (adev->asic_type == CHIP_RAVEN)
|
2017-07-28 00:48:22 +08:00
|
|
|
vm->pte_support_ats = true;
|
2018-01-25 00:19:04 +08:00
|
|
|
} else {
|
2017-06-09 23:26:57 +08:00
|
|
|
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
|
|
|
|
AMDGPU_VM_USE_CPU_FOR_GFX);
|
2018-01-25 00:19:04 +08:00
|
|
|
}
|
2017-06-09 23:26:57 +08:00
|
|
|
DRM_DEBUG_DRIVER("VM update mode is %s\n",
|
|
|
|
vm->use_cpu_for_update ? "CPU" : "SDMA");
|
2018-06-13 02:28:20 +08:00
|
|
|
WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
|
2017-06-09 23:26:57 +08:00
|
|
|
"CPU update of VM recommended only for large BAR system\n");
|
2017-09-08 20:09:41 +08:00
|
|
|
vm->last_update = NULL;
|
2015-08-15 02:08:40 +08:00
|
|
|
|
2018-08-28 04:17:59 +08:00
|
|
|
amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
|
2018-04-24 12:14:39 +08:00
|
|
|
r = amdgpu_bo_create(adev, &bp, &root);
|
2015-04-21 04:55:21 +08:00
|
|
|
if (r)
|
2016-02-01 19:53:58 +08:00
|
|
|
goto error_free_sched_entity;
|
|
|
|
|
2018-04-24 12:14:39 +08:00
|
|
|
r = amdgpu_bo_reserve(root, true);
|
2018-01-24 21:57:02 +08:00
|
|
|
if (r)
|
|
|
|
goto error_free_root;
|
|
|
|
|
2018-04-24 12:14:39 +08:00
|
|
|
r = amdgpu_vm_clear_bo(adev, vm, root,
|
2018-01-26 01:36:15 +08:00
|
|
|
adev->vm_manager.root_level,
|
|
|
|
vm->pte_support_ats);
|
2018-01-25 00:19:04 +08:00
|
|
|
if (r)
|
|
|
|
goto error_unreserve;
|
|
|
|
|
2018-04-24 12:14:39 +08:00
|
|
|
amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
|
2018-01-24 21:57:02 +08:00
|
|
|
amdgpu_bo_unreserve(vm->root.base.bo);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-08-26 08:40:26 +08:00
|
|
|
if (pasid) {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
|
|
|
|
GFP_ATOMIC);
|
|
|
|
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
if (r < 0)
|
|
|
|
goto error_free_root;
|
|
|
|
|
|
|
|
vm->pasid = pasid;
|
2017-07-12 16:01:48 +08:00
|
|
|
}
|
|
|
|
|
2017-08-26 14:43:06 +08:00
|
|
|
INIT_KFIFO(vm->faults);
|
2017-09-22 04:26:41 +08:00
|
|
|
vm->fault_credit = 16;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
return 0;
|
2016-02-01 19:53:58 +08:00
|
|
|
|
2018-01-25 00:19:04 +08:00
|
|
|
error_unreserve:
|
|
|
|
amdgpu_bo_unreserve(vm->root.base.bo);
|
|
|
|
|
2016-10-12 20:46:26 +08:00
|
|
|
error_free_root:
|
2017-08-03 20:02:13 +08:00
|
|
|
amdgpu_bo_unref(&vm->root.base.bo->shadow);
|
|
|
|
amdgpu_bo_unref(&vm->root.base.bo);
|
|
|
|
vm->root.base.bo = NULL;
|
2016-02-01 19:53:58 +08:00
|
|
|
|
|
|
|
error_free_sched_entity:
|
2018-07-20 20:21:05 +08:00
|
|
|
drm_sched_entity_destroy(&vm->entity);
|
2016-02-01 19:53:58 +08:00
|
|
|
|
|
|
|
return r;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2018-03-16 05:27:42 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
|
|
|
|
*
|
2018-06-11 23:11:24 +08:00
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
*
|
2018-03-16 05:27:42 +08:00
|
|
|
* This only works on GFX VMs that don't have any BOs added and no
|
|
|
|
* page tables allocated yet.
|
|
|
|
*
|
|
|
|
* Changes the following VM parameters:
|
|
|
|
* - use_cpu_for_update
|
|
|
|
* - pte_supports_ats
|
|
|
|
* - pasid (old PASID is released, because compute manages its own PASIDs)
|
|
|
|
*
|
|
|
|
* Reinitializes the page directory to reflect the changed ATS
|
2018-07-12 10:32:59 +08:00
|
|
|
* setting.
|
2018-03-16 05:27:42 +08:00
|
|
|
*
|
2018-06-11 23:11:24 +08:00
|
|
|
* Returns:
|
|
|
|
* 0 for success, -errno for errors.
|
2018-03-16 05:27:42 +08:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
|
|
{
|
|
|
|
bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = amdgpu_bo_reserve(vm->root.base.bo, true);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
/* Sanity checks */
|
|
|
|
if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
|
|
|
|
r = -EINVAL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if PD needs to be reinitialized and do it before
|
|
|
|
* changing any other state, in case it fails.
|
|
|
|
*/
|
|
|
|
if (pte_support_ats != vm->pte_support_ats) {
|
|
|
|
r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
|
|
|
|
adev->vm_manager.root_level,
|
|
|
|
pte_support_ats);
|
|
|
|
if (r)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update VM state */
|
|
|
|
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
|
|
|
|
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
|
|
|
|
vm->pte_support_ats = pte_support_ats;
|
|
|
|
DRM_DEBUG_DRIVER("VM update mode is %s\n",
|
|
|
|
vm->use_cpu_for_update ? "CPU" : "SDMA");
|
2018-06-13 02:28:20 +08:00
|
|
|
WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
|
2018-03-16 05:27:42 +08:00
|
|
|
"CPU update of VM recommended only for large BAR system\n");
|
|
|
|
|
|
|
|
if (vm->pasid) {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
|
|
|
|
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
|
|
|
|
vm->pasid = 0;
|
|
|
|
}
|
|
|
|
|
2018-07-12 10:32:59 +08:00
|
|
|
/* Free the shadow bo for compute VM */
|
|
|
|
amdgpu_bo_unref(&vm->root.base.bo->shadow);
|
|
|
|
|
2018-03-16 05:27:42 +08:00
|
|
|
error:
|
|
|
|
amdgpu_bo_unreserve(vm->root.base.bo);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2016-10-28 02:04:38 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_free_levels - free PD/PT levels
|
|
|
|
*
|
2017-11-30 22:28:03 +08:00
|
|
|
* @adev: amdgpu device structure
|
|
|
|
* @parent: PD/PT starting level to free
|
|
|
|
* @level: level of parent structure
|
2016-10-28 02:04:38 +08:00
|
|
|
*
|
|
|
|
* Free the page directory or page table level and all sub levels.
|
|
|
|
*/
|
2017-11-30 22:28:03 +08:00
|
|
|
static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm_pt *parent,
|
|
|
|
unsigned level)
|
2016-10-28 02:04:38 +08:00
|
|
|
{
|
2017-11-30 22:28:03 +08:00
|
|
|
unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
|
2016-10-28 02:04:38 +08:00
|
|
|
|
2017-11-30 22:28:03 +08:00
|
|
|
if (parent->base.bo) {
|
|
|
|
list_del(&parent->base.bo_list);
|
|
|
|
list_del(&parent->base.vm_status);
|
|
|
|
amdgpu_bo_unref(&parent->base.bo->shadow);
|
|
|
|
amdgpu_bo_unref(&parent->base.bo);
|
2016-10-28 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2017-11-30 22:28:03 +08:00
|
|
|
if (parent->entries)
|
|
|
|
for (i = 0; i < num_entries; i++)
|
|
|
|
amdgpu_vm_free_levels(adev, &parent->entries[i],
|
|
|
|
level + 1);
|
2016-10-28 02:04:38 +08:00
|
|
|
|
2017-11-30 22:28:03 +08:00
|
|
|
kvfree(parent->entries);
|
2016-10-28 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_fini - tear down a vm instance
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
*
|
2016-01-26 19:17:11 +08:00
|
|
|
* Tear down @vm.
|
2015-04-21 04:55:21 +08:00
|
|
|
* Unbind the VM and remove all bos from the vm bo list
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping, *tmp;
|
2018-01-12 22:26:08 +08:00
|
|
|
bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
|
2017-10-13 23:24:31 +08:00
|
|
|
struct amdgpu_bo *root;
|
2017-08-26 14:43:06 +08:00
|
|
|
u64 fault;
|
2017-10-13 23:24:31 +08:00
|
|
|
int i, r;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2018-03-16 05:27:43 +08:00
|
|
|
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
|
|
|
|
|
2017-08-26 14:43:06 +08:00
|
|
|
/* Clear pending page faults from IH when the VM is destroyed */
|
|
|
|
while (kfifo_get(&vm->faults, &fault))
|
|
|
|
amdgpu_ih_clear_fault(adev, fault);
|
|
|
|
|
2017-08-26 08:40:26 +08:00
|
|
|
if (vm->pasid) {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
|
|
|
|
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
}
|
|
|
|
|
2018-07-20 20:21:05 +08:00
|
|
|
drm_sched_entity_destroy(&vm->entity);
|
2016-02-01 19:53:58 +08:00
|
|
|
|
2017-09-09 07:15:08 +08:00
|
|
|
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
|
2015-04-21 04:55:21 +08:00
|
|
|
dev_err(adev->dev, "still active bo inside vm\n");
|
|
|
|
}
|
2017-09-09 07:15:08 +08:00
|
|
|
rbtree_postorder_for_each_entry_safe(mapping, tmp,
|
|
|
|
&vm->va.rb_root, rb) {
|
2015-04-21 04:55:21 +08:00
|
|
|
list_del(&mapping->list);
|
2017-03-30 20:03:59 +08:00
|
|
|
amdgpu_vm_it_remove(mapping, &vm->va);
|
2015-04-21 04:55:21 +08:00
|
|
|
kfree(mapping);
|
|
|
|
}
|
|
|
|
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
|
2017-03-13 17:13:36 +08:00
|
|
|
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
|
2017-02-14 23:02:52 +08:00
|
|
|
amdgpu_vm_prt_fini(adev, vm);
|
2017-03-13 17:13:36 +08:00
|
|
|
prt_fini_needed = false;
|
2017-02-14 23:02:52 +08:00
|
|
|
}
|
2017-01-30 18:09:31 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
list_del(&mapping->list);
|
2017-02-14 23:02:52 +08:00
|
|
|
amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2017-10-13 23:24:31 +08:00
|
|
|
root = amdgpu_bo_ref(vm->root.base.bo);
|
|
|
|
r = amdgpu_bo_reserve(root, true);
|
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
|
|
|
|
} else {
|
2017-12-13 14:22:54 +08:00
|
|
|
amdgpu_vm_free_levels(adev, &vm->root,
|
|
|
|
adev->vm_manager.root_level);
|
2017-10-13 23:24:31 +08:00
|
|
|
amdgpu_bo_unreserve(root);
|
|
|
|
}
|
|
|
|
amdgpu_bo_unref(&root);
|
2017-09-08 20:09:41 +08:00
|
|
|
dma_fence_put(vm->last_update);
|
2017-04-20 16:18:48 +08:00
|
|
|
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
|
2017-12-18 23:53:03 +08:00
|
|
|
amdgpu_vmid_free_reserved(adev, vm, i);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
2015-11-16 03:52:06 +08:00
|
|
|
|
2017-09-22 04:26:41 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @pasid: PASID do identify the VM
|
|
|
|
*
|
2018-06-11 23:11:24 +08:00
|
|
|
* This function is expected to be called in interrupt context.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* True if there was fault credit, false otherwise
|
2017-09-22 04:26:41 +08:00
|
|
|
*/
|
|
|
|
bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
|
|
|
|
unsigned int pasid)
|
|
|
|
{
|
|
|
|
struct amdgpu_vm *vm;
|
|
|
|
|
|
|
|
spin_lock(&adev->vm_manager.pasid_lock);
|
|
|
|
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
2018-01-10 02:18:59 +08:00
|
|
|
if (!vm) {
|
2017-09-22 04:26:41 +08:00
|
|
|
/* VM not found, can't track fault credit */
|
2018-01-10 02:18:59 +08:00
|
|
|
spin_unlock(&adev->vm_manager.pasid_lock);
|
2017-09-22 04:26:41 +08:00
|
|
|
return true;
|
2018-01-10 02:18:59 +08:00
|
|
|
}
|
2017-09-22 04:26:41 +08:00
|
|
|
|
|
|
|
/* No lock needed. only accessed by IRQ handler */
|
2018-01-10 02:18:59 +08:00
|
|
|
if (!vm->fault_credit) {
|
2017-09-22 04:26:41 +08:00
|
|
|
/* Too many faults in this VM */
|
2018-01-10 02:18:59 +08:00
|
|
|
spin_unlock(&adev->vm_manager.pasid_lock);
|
2017-09-22 04:26:41 +08:00
|
|
|
return false;
|
2018-01-10 02:18:59 +08:00
|
|
|
}
|
2017-09-22 04:26:41 +08:00
|
|
|
|
|
|
|
vm->fault_credit--;
|
2018-01-10 02:18:59 +08:00
|
|
|
spin_unlock(&adev->vm_manager.pasid_lock);
|
2017-09-22 04:26:41 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-01-21 17:19:11 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_manager_init - init the VM manager
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Initialize the VM manager structures
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
|
|
|
{
|
2017-12-18 23:53:03 +08:00
|
|
|
unsigned i;
|
2016-01-21 17:19:11 +08:00
|
|
|
|
2017-12-18 23:53:03 +08:00
|
|
|
amdgpu_vmid_mgr_init(adev);
|
2016-02-09 00:37:38 +08:00
|
|
|
|
2016-10-25 20:00:45 +08:00
|
|
|
adev->vm_manager.fence_context =
|
|
|
|
dma_fence_context_alloc(AMDGPU_MAX_RINGS);
|
2016-06-01 16:47:36 +08:00
|
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
|
|
|
adev->vm_manager.seqno[i] = 0;
|
|
|
|
|
2017-01-30 18:09:31 +08:00
|
|
|
spin_lock_init(&adev->vm_manager.prt_lock);
|
2017-02-14 23:02:52 +08:00
|
|
|
atomic_set(&adev->vm_manager.num_prt_users, 0);
|
2017-06-09 23:26:57 +08:00
|
|
|
|
|
|
|
/* If not overridden by the user, by default, only in large BAR systems
|
|
|
|
* Compute VM tables will be updated by CPU
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
if (amdgpu_vm_update_mode == -1) {
|
2018-06-13 02:28:20 +08:00
|
|
|
if (amdgpu_gmc_vram_full_visible(&adev->gmc))
|
2017-06-09 23:26:57 +08:00
|
|
|
adev->vm_manager.vm_update_mode =
|
|
|
|
AMDGPU_VM_USE_CPU_FOR_COMPUTE;
|
|
|
|
else
|
|
|
|
adev->vm_manager.vm_update_mode = 0;
|
|
|
|
} else
|
|
|
|
adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
|
|
|
|
#else
|
|
|
|
adev->vm_manager.vm_update_mode = 0;
|
|
|
|
#endif
|
|
|
|
|
2017-08-26 08:40:26 +08:00
|
|
|
idr_init(&adev->vm_manager.pasid_idr);
|
|
|
|
spin_lock_init(&adev->vm_manager.pasid_lock);
|
2016-01-21 17:19:11 +08:00
|
|
|
}
|
|
|
|
|
2015-11-16 03:52:06 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_manager_fini - cleanup VM manager
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Cleanup the VM manager and free resources.
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
|
|
|
|
{
|
2017-08-26 08:40:26 +08:00
|
|
|
WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
|
|
|
|
idr_destroy(&adev->vm_manager.pasid_idr);
|
|
|
|
|
2017-12-18 23:53:03 +08:00
|
|
|
amdgpu_vmid_mgr_fini(adev);
|
2015-11-16 03:52:06 +08:00
|
|
|
}
|
2017-04-24 11:09:04 +08:00
|
|
|
|
2018-06-11 23:11:24 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
|
|
|
|
*
|
|
|
|
* @dev: drm device pointer
|
|
|
|
* @data: drm_amdgpu_vm
|
|
|
|
* @filp: drm file pointer
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, -errno for errors.
|
|
|
|
*/
|
2017-04-24 11:09:04 +08:00
|
|
|
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|
|
|
{
|
|
|
|
union drm_amdgpu_vm *args = data;
|
2017-04-20 16:18:48 +08:00
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
|
|
|
int r;
|
2017-04-24 11:09:04 +08:00
|
|
|
|
|
|
|
switch (args->in.op) {
|
|
|
|
case AMDGPU_VM_OP_RESERVE_VMID:
|
2017-04-20 16:18:48 +08:00
|
|
|
/* current, we only have requirement to reserve vmid from gfxhub */
|
2017-12-18 23:53:03 +08:00
|
|
|
r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
|
2017-04-20 16:18:48 +08:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
break;
|
2017-04-24 11:09:04 +08:00
|
|
|
case AMDGPU_VM_OP_UNRESERVE_VMID:
|
2017-12-18 23:53:03 +08:00
|
|
|
amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
|
2017-04-24 11:09:04 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2018-06-29 10:51:32 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_get_task_info - Extracts task info for a PASID.
|
|
|
|
*
|
|
|
|
* @dev: drm device pointer
|
|
|
|
* @pasid: PASID identifier for VM
|
|
|
|
* @task_info: task_info to fill.
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
|
|
|
|
struct amdgpu_task_info *task_info)
|
|
|
|
{
|
|
|
|
struct amdgpu_vm *vm;
|
|
|
|
|
|
|
|
spin_lock(&adev->vm_manager.pasid_lock);
|
|
|
|
|
|
|
|
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
|
|
|
if (vm)
|
|
|
|
*task_info = vm->task_info;
|
|
|
|
|
|
|
|
spin_unlock(&adev->vm_manager.pasid_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_set_task_info - Sets VMs task info.
|
|
|
|
*
|
|
|
|
* @vm: vm for which to set the info
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
|
|
|
|
{
|
|
|
|
if (!vm->task_info.pid) {
|
|
|
|
vm->task_info.pid = current->pid;
|
|
|
|
get_task_comm(vm->task_info.task_name, current);
|
|
|
|
|
|
|
|
if (current->group_leader->mm == current->mm) {
|
|
|
|
vm->task_info.tgid = current->group_leader->pid;
|
|
|
|
get_task_comm(vm->task_info.process_name, current->group_leader);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|