2015-04-21 04:55:21 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2008 Advanced Micro Devices, Inc.
|
|
|
|
* Copyright 2008 Red Hat Inc.
|
|
|
|
* Copyright 2009 Jerome Glisse.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors: Dave Airlie
|
|
|
|
* Alex Deucher
|
|
|
|
* Jerome Glisse
|
|
|
|
*/
|
2016-10-25 20:00:45 +08:00
|
|
|
#include <linux/dma-fence-array.h>
|
2017-03-30 20:03:59 +08:00
|
|
|
#include <linux/interval_tree_generic.h>
|
2015-04-21 04:55:21 +08:00
|
|
|
#include <drm/drmP.h>
|
|
|
|
#include <drm/amdgpu_drm.h>
|
|
|
|
#include "amdgpu.h"
|
|
|
|
#include "amdgpu_trace.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* GPUVM
|
|
|
|
* GPUVM is similar to the legacy gart on older asics, however
|
|
|
|
* rather than there being a single global gart table
|
|
|
|
* for the entire GPU, there are multiple VM page tables active
|
|
|
|
* at any given time. The VM page tables can contain a mix
|
|
|
|
* vram pages and system memory pages and system memory pages
|
|
|
|
* can be mapped as snooped (cached system pages) or unsnooped
|
|
|
|
* (uncached system pages).
|
|
|
|
* Each VM has an ID associated with it and there is a page table
|
|
|
|
* associated with each VMID. When execting a command buffer,
|
|
|
|
* the kernel tells the the ring what VMID to use for that command
|
|
|
|
* buffer. VMIDs are allocated dynamically as commands are submitted.
|
|
|
|
* The userspace drivers maintain their own address space and the kernel
|
|
|
|
* sets up their pages tables accordingly when they submit their
|
|
|
|
* command buffers and a VMID is assigned.
|
|
|
|
* Cayman/Trinity support up to 8 active VMs at any given time;
|
|
|
|
* SI supports 16.
|
|
|
|
*/
|
|
|
|
|
2017-03-30 20:03:59 +08:00
|
|
|
#define START(node) ((node)->start)
|
|
|
|
#define LAST(node) ((node)->last)
|
|
|
|
|
|
|
|
INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
|
|
|
|
START, LAST, static, amdgpu_vm_it)
|
|
|
|
|
|
|
|
#undef START
|
|
|
|
#undef LAST
|
|
|
|
|
2016-04-21 22:40:18 +08:00
|
|
|
/* Local structure. Encapsulate some VM table update parameters to reduce
|
|
|
|
* the number of function parameters
|
|
|
|
*/
|
2016-08-04 20:52:50 +08:00
|
|
|
struct amdgpu_pte_update_params {
|
2016-08-04 21:02:49 +08:00
|
|
|
/* amdgpu device we do this update for */
|
|
|
|
struct amdgpu_device *adev;
|
2016-10-13 21:09:08 +08:00
|
|
|
/* optional amdgpu_vm we do this update for */
|
|
|
|
struct amdgpu_vm *vm;
|
2016-04-21 22:40:18 +08:00
|
|
|
/* address where to copy page table entries from */
|
|
|
|
uint64_t src;
|
|
|
|
/* indirect buffer to fill with commands */
|
|
|
|
struct amdgpu_ib *ib;
|
2016-08-12 19:29:18 +08:00
|
|
|
/* Function which actually does the update */
|
|
|
|
void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
|
|
|
|
uint64_t addr, unsigned count, uint32_t incr,
|
2016-09-21 16:19:19 +08:00
|
|
|
uint64_t flags);
|
2016-08-15 11:46:21 +08:00
|
|
|
/* indicate update pt or its shadow */
|
|
|
|
bool shadow;
|
2016-04-21 22:40:18 +08:00
|
|
|
};
|
|
|
|
|
2017-01-30 18:09:31 +08:00
|
|
|
/* Helper to disable partial resident texture feature from a fence callback */
|
|
|
|
struct amdgpu_prt_cb {
|
|
|
|
struct amdgpu_device *adev;
|
|
|
|
struct dma_fence_cb cb;
|
|
|
|
};
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
2016-10-19 17:03:57 +08:00
|
|
|
* amdgpu_vm_num_entries - return the number of entries in a PD/PT
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
2016-10-19 17:03:57 +08:00
|
|
|
* Calculate the number of entries in a page directory or page table.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2016-10-19 17:03:57 +08:00
|
|
|
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
|
|
|
|
unsigned level)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2016-10-19 17:03:57 +08:00
|
|
|
if (level == 0)
|
|
|
|
/* For the root directory */
|
|
|
|
return adev->vm_manager.max_pfn >>
|
2017-03-29 16:08:32 +08:00
|
|
|
(adev->vm_manager.block_size *
|
|
|
|
adev->vm_manager.num_level);
|
2016-10-19 17:03:57 +08:00
|
|
|
else if (level == adev->vm_manager.num_level)
|
|
|
|
/* For the page tables on the leaves */
|
2017-03-29 16:08:32 +08:00
|
|
|
return AMDGPU_VM_PTE_COUNT(adev);
|
2016-10-19 17:03:57 +08:00
|
|
|
else
|
|
|
|
/* Everything in between */
|
2017-03-29 16:08:32 +08:00
|
|
|
return 1 << adev->vm_manager.block_size;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-10-19 17:03:57 +08:00
|
|
|
* amdgpu_vm_bo_size - returns the size of the BOs in bytes
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
2016-10-19 17:03:57 +08:00
|
|
|
* Calculate the size of the BO for a page directory or page table in bytes.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2016-10-19 17:03:57 +08:00
|
|
|
static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2016-10-19 17:03:57 +08:00
|
|
|
return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-12-11 22:16:32 +08:00
|
|
|
* amdgpu_vm_get_pd_bo - add the VM PD to a validation list
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* @vm: vm providing the BOs
|
2015-12-11 21:39:05 +08:00
|
|
|
* @validated: head of validation list
|
2015-12-11 22:16:32 +08:00
|
|
|
* @entry: entry to add
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* Add the page directory to the list of BOs to
|
2015-12-11 22:16:32 +08:00
|
|
|
* validate for command submission.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2015-12-11 22:16:32 +08:00
|
|
|
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
|
|
|
struct list_head *validated,
|
|
|
|
struct amdgpu_bo_list_entry *entry)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2016-10-12 20:46:26 +08:00
|
|
|
entry->robj = vm->root.bo;
|
2015-12-11 22:16:32 +08:00
|
|
|
entry->priority = 0;
|
2016-10-12 20:46:26 +08:00
|
|
|
entry->tv.bo = &entry->robj->tbo;
|
2015-12-11 22:16:32 +08:00
|
|
|
entry->tv.shared = true;
|
2016-02-23 19:36:59 +08:00
|
|
|
entry->user_pages = NULL;
|
2015-12-11 22:16:32 +08:00
|
|
|
list_add(&entry->tv.head, validated);
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-10-12 21:36:57 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_validate_layer - validate a single page table level
|
|
|
|
*
|
|
|
|
* @parent: parent page table level
|
|
|
|
* @validate: callback to do the validation
|
|
|
|
* @param: parameter for the validation callback
|
|
|
|
*
|
|
|
|
* Validate the page table BOs on command submission if neccessary.
|
|
|
|
*/
|
|
|
|
static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
|
|
|
|
int (*validate)(void *, struct amdgpu_bo *),
|
|
|
|
void *param)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (!parent->entries)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0; i <= parent->last_entry_used; ++i) {
|
|
|
|
struct amdgpu_vm_pt *entry = &parent->entries[i];
|
|
|
|
|
|
|
|
if (!entry->bo)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
r = validate(param, entry->bo);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Recurse into the sub directory. This is harmless because we
|
|
|
|
* have only a maximum of 5 layers.
|
|
|
|
*/
|
|
|
|
r = amdgpu_vm_validate_level(entry, validate, param);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-12-11 22:16:32 +08:00
|
|
|
/**
|
2016-09-28 18:03:04 +08:00
|
|
|
* amdgpu_vm_validate_pt_bos - validate the page table BOs
|
2015-12-11 22:16:32 +08:00
|
|
|
*
|
2016-06-21 22:28:15 +08:00
|
|
|
* @adev: amdgpu device pointer
|
2015-12-11 22:16:32 +08:00
|
|
|
* @vm: vm providing the BOs
|
2016-09-28 18:03:04 +08:00
|
|
|
* @validate: callback to do the validation
|
|
|
|
* @param: parameter for the validation callback
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
2016-09-28 18:03:04 +08:00
|
|
|
* Validate the page table BOs on command submission if neccessary.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2016-09-28 18:03:04 +08:00
|
|
|
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
|
|
int (*validate)(void *p, struct amdgpu_bo *bo),
|
|
|
|
void *param)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2016-06-21 22:28:15 +08:00
|
|
|
uint64_t num_evictions;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-06-21 22:28:15 +08:00
|
|
|
/* We only need to validate the page tables
|
|
|
|
* if they aren't already valid.
|
|
|
|
*/
|
|
|
|
num_evictions = atomic64_read(&adev->num_evictions);
|
|
|
|
if (num_evictions == vm->last_eviction_counter)
|
2016-09-28 18:03:04 +08:00
|
|
|
return 0;
|
2016-06-21 22:28:15 +08:00
|
|
|
|
2016-10-12 21:36:57 +08:00
|
|
|
return amdgpu_vm_validate_level(&vm->root, validate, param);
|
2016-01-11 22:35:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-10-13 16:20:53 +08:00
|
|
|
* amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail
|
2016-01-11 22:35:21 +08:00
|
|
|
*
|
|
|
|
* @adev: amdgpu device instance
|
|
|
|
* @vm: vm providing the BOs
|
|
|
|
*
|
|
|
|
* Move the PT BOs to the tail of the LRU.
|
|
|
|
*/
|
2016-10-13 16:20:53 +08:00
|
|
|
static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt *parent)
|
2016-01-11 22:35:21 +08:00
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
2016-10-13 16:20:53 +08:00
|
|
|
if (!parent->entries)
|
|
|
|
return;
|
2016-01-11 22:35:21 +08:00
|
|
|
|
2016-10-13 16:20:53 +08:00
|
|
|
for (i = 0; i <= parent->last_entry_used; ++i) {
|
|
|
|
struct amdgpu_vm_pt *entry = &parent->entries[i];
|
|
|
|
|
|
|
|
if (!entry->bo)
|
2016-01-11 22:35:21 +08:00
|
|
|
continue;
|
|
|
|
|
2016-10-13 16:20:53 +08:00
|
|
|
ttm_bo_move_to_lru_tail(&entry->bo->tbo);
|
|
|
|
amdgpu_vm_move_level_in_lru(entry);
|
2016-01-11 22:35:21 +08:00
|
|
|
}
|
2016-10-13 16:20:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device instance
|
|
|
|
* @vm: vm providing the BOs
|
|
|
|
*
|
|
|
|
* Move the PT BOs to the tail of the LRU.
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm)
|
|
|
|
{
|
|
|
|
struct ttm_bo_global *glob = adev->mman.bdev.glob;
|
|
|
|
|
|
|
|
spin_lock(&glob->lru_lock);
|
|
|
|
amdgpu_vm_move_level_in_lru(&vm->root);
|
2016-01-11 22:35:21 +08:00
|
|
|
spin_unlock(&glob->lru_lock);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2016-10-28 02:04:38 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_alloc_levels - allocate the PD/PT levels
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
* @saddr: start of the address range
|
|
|
|
* @eaddr: end of the address range
|
|
|
|
*
|
|
|
|
* Make sure the page directories and page tables are allocated
|
|
|
|
*/
|
|
|
|
static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_vm_pt *parent,
|
|
|
|
uint64_t saddr, uint64_t eaddr,
|
|
|
|
unsigned level)
|
|
|
|
{
|
|
|
|
unsigned shift = (adev->vm_manager.num_level - level) *
|
2017-03-29 16:08:32 +08:00
|
|
|
adev->vm_manager.block_size;
|
2016-10-28 02:04:38 +08:00
|
|
|
unsigned pt_idx, from, to;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (!parent->entries) {
|
|
|
|
unsigned num_entries = amdgpu_vm_num_entries(adev, level);
|
|
|
|
|
|
|
|
parent->entries = drm_calloc_large(num_entries,
|
|
|
|
sizeof(struct amdgpu_vm_pt));
|
|
|
|
if (!parent->entries)
|
|
|
|
return -ENOMEM;
|
|
|
|
memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
|
|
|
|
}
|
|
|
|
|
2017-03-29 08:36:12 +08:00
|
|
|
from = saddr >> shift;
|
|
|
|
to = eaddr >> shift;
|
|
|
|
if (from >= amdgpu_vm_num_entries(adev, level) ||
|
|
|
|
to >= amdgpu_vm_num_entries(adev, level))
|
|
|
|
return -EINVAL;
|
2016-10-28 02:04:38 +08:00
|
|
|
|
|
|
|
if (to > parent->last_entry_used)
|
|
|
|
parent->last_entry_used = to;
|
|
|
|
|
|
|
|
++level;
|
2017-03-29 08:36:12 +08:00
|
|
|
saddr = saddr & ((1 << shift) - 1);
|
|
|
|
eaddr = eaddr & ((1 << shift) - 1);
|
2016-10-28 02:04:38 +08:00
|
|
|
|
|
|
|
/* walk over the address space and allocate the page tables */
|
|
|
|
for (pt_idx = from; pt_idx <= to; ++pt_idx) {
|
|
|
|
struct reservation_object *resv = vm->root.bo->tbo.resv;
|
|
|
|
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
|
|
|
|
struct amdgpu_bo *pt;
|
|
|
|
|
|
|
|
if (!entry->bo) {
|
|
|
|
r = amdgpu_bo_create(adev,
|
|
|
|
amdgpu_vm_bo_size(adev, level),
|
|
|
|
AMDGPU_GPU_PAGE_SIZE, true,
|
|
|
|
AMDGPU_GEM_DOMAIN_VRAM,
|
|
|
|
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
|
|
|
AMDGPU_GEM_CREATE_SHADOW |
|
|
|
|
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
|
|
|
AMDGPU_GEM_CREATE_VRAM_CLEARED,
|
|
|
|
NULL, resv, &pt);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
/* Keep a reference to the root directory to avoid
|
|
|
|
* freeing them up in the wrong order.
|
|
|
|
*/
|
|
|
|
pt->parent = amdgpu_bo_ref(vm->root.bo);
|
|
|
|
|
|
|
|
entry->bo = pt;
|
|
|
|
entry->addr = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (level < adev->vm_manager.num_level) {
|
2017-03-29 08:36:12 +08:00
|
|
|
uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
|
|
|
|
uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
|
|
|
|
((1 << shift) - 1);
|
|
|
|
r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
|
|
|
|
sub_eaddr, level);
|
2016-10-28 02:04:38 +08:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-13 17:13:37 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_alloc_pts - Allocate page tables.
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: VM to allocate page tables for
|
|
|
|
* @saddr: Start address which needs to be allocated
|
|
|
|
* @size: Size from start address we need.
|
|
|
|
*
|
|
|
|
* Make sure the page tables are allocated.
|
|
|
|
*/
|
|
|
|
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
uint64_t saddr, uint64_t size)
|
|
|
|
{
|
2017-03-29 08:24:53 +08:00
|
|
|
uint64_t last_pfn;
|
2017-03-13 17:13:37 +08:00
|
|
|
uint64_t eaddr;
|
|
|
|
|
|
|
|
/* validate the parameters */
|
|
|
|
if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
eaddr = saddr + size - 1;
|
|
|
|
last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
if (last_pfn >= adev->vm_manager.max_pfn) {
|
2017-03-29 08:24:53 +08:00
|
|
|
dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
|
2017-03-13 17:13:37 +08:00
|
|
|
last_pfn, adev->vm_manager.max_pfn);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
|
2016-10-28 02:04:38 +08:00
|
|
|
return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
|
2017-03-13 17:13:37 +08:00
|
|
|
}
|
|
|
|
|
2017-04-03 19:59:25 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_had_gpu_reset - check if reset occured since last use
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @id: VMID structure
|
|
|
|
*
|
|
|
|
* Check if GPU reset occured since last use of the VMID.
|
|
|
|
*/
|
|
|
|
static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm_id *id)
|
2016-06-29 14:01:15 +08:00
|
|
|
{
|
|
|
|
return id->current_gpu_reset_count !=
|
2017-04-03 19:59:25 +08:00
|
|
|
atomic_read(&adev->gpu_reset_counter);
|
2016-06-29 14:01:15 +08:00
|
|
|
}
|
|
|
|
|
2017-04-21 11:13:56 +08:00
|
|
|
static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub)
|
|
|
|
{
|
|
|
|
return !!vm->reserved_vmid[vmhub];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* idr_mgr->lock must be held */
|
|
|
|
static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_ring *ring,
|
|
|
|
struct amdgpu_sync *sync,
|
|
|
|
struct dma_fence *fence,
|
|
|
|
struct amdgpu_job *job)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = ring->adev;
|
|
|
|
unsigned vmhub = ring->funcs->vmhub;
|
|
|
|
uint64_t fence_context = adev->fence_context + ring->idx;
|
|
|
|
struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub];
|
|
|
|
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
|
|
|
struct dma_fence *updates = sync->last_vm_update;
|
|
|
|
int r = 0;
|
|
|
|
struct dma_fence *flushed, *tmp;
|
|
|
|
bool needs_flush = false;
|
|
|
|
|
|
|
|
flushed = id->flushed_updates;
|
|
|
|
if ((amdgpu_vm_had_gpu_reset(adev, id)) ||
|
|
|
|
(atomic64_read(&id->owner) != vm->client_id) ||
|
|
|
|
(job->vm_pd_addr != id->pd_gpu_addr) ||
|
|
|
|
(updates && (!flushed || updates->context != flushed->context ||
|
|
|
|
dma_fence_is_later(updates, flushed))) ||
|
|
|
|
(!id->last_flush || (id->last_flush->context != fence_context &&
|
|
|
|
!dma_fence_is_signaled(id->last_flush)))) {
|
|
|
|
needs_flush = true;
|
|
|
|
/* to prevent one context starved by another context */
|
|
|
|
id->pd_gpu_addr = 0;
|
|
|
|
tmp = amdgpu_sync_peek_fence(&id->active, ring);
|
|
|
|
if (tmp) {
|
|
|
|
r = amdgpu_sync_fence(adev, sync, tmp);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Good we can use this VMID. Remember this submission as
|
|
|
|
* user of the VMID.
|
|
|
|
*/
|
|
|
|
r = amdgpu_sync_fence(ring->adev, &id->active, fence);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (updates && (!flushed || updates->context != flushed->context ||
|
|
|
|
dma_fence_is_later(updates, flushed))) {
|
|
|
|
dma_fence_put(id->flushed_updates);
|
|
|
|
id->flushed_updates = dma_fence_get(updates);
|
|
|
|
}
|
|
|
|
id->pd_gpu_addr = job->vm_pd_addr;
|
|
|
|
atomic64_set(&id->owner, vm->client_id);
|
|
|
|
job->vm_needs_flush = needs_flush;
|
|
|
|
if (needs_flush) {
|
|
|
|
dma_fence_put(id->last_flush);
|
|
|
|
id->last_flush = NULL;
|
|
|
|
}
|
|
|
|
job->vm_id = id - id_mgr->ids;
|
|
|
|
trace_amdgpu_vm_grab_id(vm, ring, job);
|
|
|
|
out:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_grab_id - allocate the next free VMID
|
|
|
|
*
|
|
|
|
* @vm: vm to allocate id for
|
2015-07-20 22:09:40 +08:00
|
|
|
* @ring: ring we want to submit job to
|
|
|
|
* @sync: sync object where we add dependencies
|
2016-01-19 00:01:42 +08:00
|
|
|
* @fence: fence protecting ID from reuse
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
2015-07-20 22:09:40 +08:00
|
|
|
* Allocate an id for the vm, adding fences to the sync obj as necessary.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2015-07-20 22:09:40 +08:00
|
|
|
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
2016-10-25 20:00:45 +08:00
|
|
|
struct amdgpu_sync *sync, struct dma_fence *fence,
|
2016-07-01 17:59:01 +08:00
|
|
|
struct amdgpu_job *job)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = ring->adev;
|
2017-03-30 22:50:47 +08:00
|
|
|
unsigned vmhub = ring->funcs->vmhub;
|
2017-04-06 23:52:39 +08:00
|
|
|
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
2016-07-08 16:21:02 +08:00
|
|
|
uint64_t fence_context = adev->fence_context + ring->idx;
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence *updates = sync->last_vm_update;
|
2016-05-23 22:00:32 +08:00
|
|
|
struct amdgpu_vm_id *id, *idle;
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence **fences;
|
2016-06-01 16:47:36 +08:00
|
|
|
unsigned i;
|
|
|
|
int r = 0;
|
|
|
|
|
2017-04-21 11:13:56 +08:00
|
|
|
mutex_lock(&id_mgr->lock);
|
|
|
|
if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) {
|
|
|
|
r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job);
|
|
|
|
mutex_unlock(&id_mgr->lock);
|
|
|
|
return r;
|
|
|
|
}
|
2017-04-06 23:52:39 +08:00
|
|
|
fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
|
2017-04-21 11:13:56 +08:00
|
|
|
if (!fences) {
|
|
|
|
mutex_unlock(&id_mgr->lock);
|
2016-06-01 16:47:36 +08:00
|
|
|
return -ENOMEM;
|
2017-04-21 11:13:56 +08:00
|
|
|
}
|
2016-05-23 21:30:08 +08:00
|
|
|
/* Check if we have an idle VMID */
|
2016-06-01 16:47:36 +08:00
|
|
|
i = 0;
|
2017-04-06 23:52:39 +08:00
|
|
|
list_for_each_entry(idle, &id_mgr->ids_lru, list) {
|
2016-06-01 16:47:36 +08:00
|
|
|
fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
|
|
|
|
if (!fences[i])
|
2016-05-23 21:30:08 +08:00
|
|
|
break;
|
2016-06-01 16:47:36 +08:00
|
|
|
++i;
|
2016-05-23 21:30:08 +08:00
|
|
|
}
|
|
|
|
|
2016-06-01 16:47:36 +08:00
|
|
|
/* If we can't find a idle VMID to use, wait till one becomes available */
|
2017-04-06 23:52:39 +08:00
|
|
|
if (&idle->list == &id_mgr->ids_lru) {
|
2016-06-01 16:47:36 +08:00
|
|
|
u64 fence_context = adev->vm_manager.fence_context + ring->idx;
|
|
|
|
unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence_array *array;
|
2016-06-01 16:47:36 +08:00
|
|
|
unsigned j;
|
|
|
|
|
|
|
|
for (j = 0; j < i; ++j)
|
2016-10-25 20:00:45 +08:00
|
|
|
dma_fence_get(fences[j]);
|
2016-06-01 16:47:36 +08:00
|
|
|
|
2016-10-25 20:00:45 +08:00
|
|
|
array = dma_fence_array_create(i, fences, fence_context,
|
2016-06-01 16:47:36 +08:00
|
|
|
seqno, true);
|
|
|
|
if (!array) {
|
|
|
|
for (j = 0; j < i; ++j)
|
2016-10-25 20:00:45 +08:00
|
|
|
dma_fence_put(fences[j]);
|
2016-06-01 16:47:36 +08:00
|
|
|
kfree(fences);
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
r = amdgpu_sync_fence(ring->adev, sync, &array->base);
|
2016-10-25 20:00:45 +08:00
|
|
|
dma_fence_put(&array->base);
|
2016-06-01 16:47:36 +08:00
|
|
|
if (r)
|
|
|
|
goto error;
|
|
|
|
|
2017-04-06 23:52:39 +08:00
|
|
|
mutex_unlock(&id_mgr->lock);
|
2016-06-01 16:47:36 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
kfree(fences);
|
|
|
|
|
2017-03-30 22:56:20 +08:00
|
|
|
job->vm_needs_flush = false;
|
2016-06-01 16:47:36 +08:00
|
|
|
/* Check if we can use a VMID already assigned to this VM */
|
2017-04-06 23:52:39 +08:00
|
|
|
list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence *flushed;
|
2017-03-30 22:56:20 +08:00
|
|
|
bool needs_flush = false;
|
2016-06-01 16:47:36 +08:00
|
|
|
|
|
|
|
/* Check all the prerequisites to using this VMID */
|
2017-04-03 19:59:25 +08:00
|
|
|
if (amdgpu_vm_had_gpu_reset(adev, id))
|
2016-06-27 17:06:01 +08:00
|
|
|
continue;
|
2016-06-01 16:47:36 +08:00
|
|
|
|
|
|
|
if (atomic64_read(&id->owner) != vm->client_id)
|
|
|
|
continue;
|
|
|
|
|
2016-07-01 17:59:01 +08:00
|
|
|
if (job->vm_pd_addr != id->pd_gpu_addr)
|
2016-06-01 16:47:36 +08:00
|
|
|
continue;
|
|
|
|
|
2017-03-30 22:56:20 +08:00
|
|
|
if (!id->last_flush ||
|
|
|
|
(id->last_flush->context != fence_context &&
|
|
|
|
!dma_fence_is_signaled(id->last_flush)))
|
|
|
|
needs_flush = true;
|
2016-06-01 16:47:36 +08:00
|
|
|
|
|
|
|
flushed = id->flushed_updates;
|
2017-03-30 22:56:20 +08:00
|
|
|
if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
|
|
|
|
needs_flush = true;
|
|
|
|
|
|
|
|
/* Concurrent flushes are only possible starting with Vega10 */
|
|
|
|
if (adev->asic_type < CHIP_VEGA10 && needs_flush)
|
2016-06-01 16:47:36 +08:00
|
|
|
continue;
|
|
|
|
|
2016-06-01 19:31:17 +08:00
|
|
|
/* Good we can use this VMID. Remember this submission as
|
|
|
|
* user of the VMID.
|
|
|
|
*/
|
2016-06-01 16:47:36 +08:00
|
|
|
r = amdgpu_sync_fence(ring->adev, &id->active, fence);
|
|
|
|
if (r)
|
|
|
|
goto error;
|
2016-05-23 22:00:32 +08:00
|
|
|
|
2017-03-30 22:56:20 +08:00
|
|
|
if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
|
|
|
|
dma_fence_put(id->flushed_updates);
|
|
|
|
id->flushed_updates = dma_fence_get(updates);
|
|
|
|
}
|
2016-05-23 22:00:32 +08:00
|
|
|
|
2017-03-30 22:56:20 +08:00
|
|
|
if (needs_flush)
|
|
|
|
goto needs_flush;
|
|
|
|
else
|
|
|
|
goto no_flush_needed;
|
2016-05-23 22:00:32 +08:00
|
|
|
|
2017-04-06 21:18:21 +08:00
|
|
|
};
|
2016-05-23 22:00:32 +08:00
|
|
|
|
2016-06-01 16:47:36 +08:00
|
|
|
/* Still no ID to use? Then use the idle one found earlier */
|
|
|
|
id = idle;
|
2016-03-17 11:41:37 +08:00
|
|
|
|
2016-06-01 16:47:36 +08:00
|
|
|
/* Remember this submission as user of the VMID */
|
|
|
|
r = amdgpu_sync_fence(ring->adev, &id->active, fence);
|
2016-02-15 19:33:02 +08:00
|
|
|
if (r)
|
|
|
|
goto error;
|
2016-01-19 00:01:42 +08:00
|
|
|
|
2017-03-30 22:56:20 +08:00
|
|
|
id->pd_gpu_addr = job->vm_pd_addr;
|
2016-10-25 20:00:45 +08:00
|
|
|
dma_fence_put(id->flushed_updates);
|
|
|
|
id->flushed_updates = dma_fence_get(updates);
|
2016-05-04 16:20:01 +08:00
|
|
|
atomic64_set(&id->owner, vm->client_id);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-03-30 22:56:20 +08:00
|
|
|
needs_flush:
|
|
|
|
job->vm_needs_flush = true;
|
|
|
|
dma_fence_put(id->last_flush);
|
|
|
|
id->last_flush = NULL;
|
|
|
|
|
|
|
|
no_flush_needed:
|
|
|
|
list_move_tail(&id->list, &id_mgr->ids_lru);
|
|
|
|
|
2017-04-06 23:52:39 +08:00
|
|
|
job->vm_id = id - id_mgr->ids;
|
2017-04-07 21:31:13 +08:00
|
|
|
trace_amdgpu_vm_grab_id(vm, ring, job);
|
2016-02-15 19:33:02 +08:00
|
|
|
|
|
|
|
error:
|
2017-04-06 23:52:39 +08:00
|
|
|
mutex_unlock(&id_mgr->lock);
|
2016-01-21 17:19:11 +08:00
|
|
|
return r;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2017-04-20 16:18:48 +08:00
|
|
|
static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
unsigned vmhub)
|
|
|
|
{
|
|
|
|
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
|
|
|
|
|
|
|
mutex_lock(&id_mgr->lock);
|
|
|
|
if (vm->reserved_vmid[vmhub]) {
|
|
|
|
list_add(&vm->reserved_vmid[vmhub]->list,
|
|
|
|
&id_mgr->ids_lru);
|
|
|
|
vm->reserved_vmid[vmhub] = NULL;
|
2017-04-21 15:51:04 +08:00
|
|
|
atomic_dec(&id_mgr->reserved_vmid_num);
|
2017-04-20 16:18:48 +08:00
|
|
|
}
|
|
|
|
mutex_unlock(&id_mgr->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
unsigned vmhub)
|
|
|
|
{
|
|
|
|
struct amdgpu_vm_id_manager *id_mgr;
|
|
|
|
struct amdgpu_vm_id *idle;
|
|
|
|
int r = 0;
|
|
|
|
|
|
|
|
id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
|
|
|
mutex_lock(&id_mgr->lock);
|
|
|
|
if (vm->reserved_vmid[vmhub])
|
|
|
|
goto unlock;
|
2017-04-21 15:51:04 +08:00
|
|
|
if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
|
|
|
|
AMDGPU_VM_MAX_RESERVED_VMID) {
|
|
|
|
DRM_ERROR("Over limitation of reserved vmid\n");
|
|
|
|
atomic_dec(&id_mgr->reserved_vmid_num);
|
|
|
|
r = -EINVAL;
|
|
|
|
goto unlock;
|
|
|
|
}
|
2017-04-20 16:18:48 +08:00
|
|
|
/* Select the first entry VMID */
|
|
|
|
idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list);
|
|
|
|
list_del_init(&idle->list);
|
|
|
|
vm->reserved_vmid[vmhub] = idle;
|
|
|
|
mutex_unlock(&id_mgr->lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
unlock:
|
|
|
|
mutex_unlock(&id_mgr->lock);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2016-06-18 05:05:15 +08:00
|
|
|
static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = ring->adev;
|
2016-10-14 05:41:13 +08:00
|
|
|
const struct amdgpu_ip_block *ip_block;
|
2016-06-18 05:05:15 +08:00
|
|
|
|
2016-10-05 21:36:39 +08:00
|
|
|
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
|
2016-06-18 05:05:15 +08:00
|
|
|
/* only compute rings */
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
|
|
|
|
if (!ip_block)
|
|
|
|
return false;
|
|
|
|
|
2016-10-14 05:41:13 +08:00
|
|
|
if (ip_block->version->major <= 7) {
|
2016-06-18 05:05:15 +08:00
|
|
|
/* gfx7 has no workaround */
|
|
|
|
return true;
|
2016-10-14 05:41:13 +08:00
|
|
|
} else if (ip_block->version->major == 8) {
|
2016-06-18 05:05:15 +08:00
|
|
|
if (adev->gfx.mec_fw_version >= 673)
|
|
|
|
/* gfx8 is fixed in MEC firmware 673 */
|
|
|
|
return false;
|
|
|
|
else
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-03-10 00:36:26 +08:00
|
|
|
static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
|
|
|
|
{
|
|
|
|
u64 addr = mc_addr;
|
|
|
|
|
2017-03-30 21:55:07 +08:00
|
|
|
if (adev->gart.gart_funcs->adjust_mc_addr)
|
|
|
|
addr = adev->gart.gart_funcs->adjust_mc_addr(adev, addr);
|
2017-03-10 00:36:26 +08:00
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2017-05-12 02:52:48 +08:00
|
|
|
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
|
|
|
|
struct amdgpu_job *job)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = ring->adev;
|
|
|
|
unsigned vmhub = ring->funcs->vmhub;
|
|
|
|
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
|
|
|
struct amdgpu_vm_id *id;
|
|
|
|
bool gds_switch_needed;
|
|
|
|
bool vm_flush_needed = job->vm_needs_flush ||
|
|
|
|
amdgpu_vm_ring_has_compute_vm_bug(ring);
|
|
|
|
|
|
|
|
if (job->vm_id == 0)
|
|
|
|
return false;
|
|
|
|
id = &id_mgr->ids[job->vm_id];
|
|
|
|
gds_switch_needed = ring->funcs->emit_gds_switch && (
|
|
|
|
id->gds_base != job->gds_base ||
|
|
|
|
id->gds_size != job->gds_size ||
|
|
|
|
id->gws_base != job->gws_base ||
|
|
|
|
id->gws_size != job->gws_size ||
|
|
|
|
id->oa_base != job->oa_base ||
|
|
|
|
id->oa_size != job->oa_size);
|
|
|
|
|
|
|
|
if (amdgpu_vm_had_gpu_reset(adev, id))
|
|
|
|
return true;
|
|
|
|
if (!vm_flush_needed && !gds_switch_needed)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_flush - hardware flush the vm
|
|
|
|
*
|
|
|
|
* @ring: ring to use for flush
|
2016-03-01 20:34:49 +08:00
|
|
|
* @vm_id: vmid number to use
|
2016-02-26 23:18:26 +08:00
|
|
|
* @pd_addr: address of the page directory
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
2016-02-26 23:18:26 +08:00
|
|
|
* Emit a VM flush when it is necessary.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2016-07-01 17:59:01 +08:00
|
|
|
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2016-03-01 22:09:25 +08:00
|
|
|
struct amdgpu_device *adev = ring->adev;
|
2017-04-06 23:52:39 +08:00
|
|
|
unsigned vmhub = ring->funcs->vmhub;
|
|
|
|
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
|
|
|
struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id];
|
2016-03-01 22:51:53 +08:00
|
|
|
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
|
2016-07-01 17:59:01 +08:00
|
|
|
id->gds_base != job->gds_base ||
|
|
|
|
id->gds_size != job->gds_size ||
|
|
|
|
id->gws_base != job->gws_base ||
|
|
|
|
id->gws_size != job->gws_size ||
|
|
|
|
id->oa_base != job->oa_base ||
|
|
|
|
id->oa_size != job->oa_size);
|
2017-05-18 13:56:22 +08:00
|
|
|
bool vm_flush_needed = job->vm_needs_flush;
|
2017-04-03 20:16:07 +08:00
|
|
|
unsigned patch_offset = 0;
|
2016-03-01 23:46:18 +08:00
|
|
|
int r;
|
2016-03-01 22:51:53 +08:00
|
|
|
|
2017-04-03 20:28:26 +08:00
|
|
|
if (amdgpu_vm_had_gpu_reset(adev, id)) {
|
|
|
|
gds_switch_needed = true;
|
|
|
|
vm_flush_needed = true;
|
|
|
|
}
|
2016-03-01 22:09:25 +08:00
|
|
|
|
2017-04-03 20:28:26 +08:00
|
|
|
if (!vm_flush_needed && !gds_switch_needed)
|
|
|
|
return 0;
|
2016-03-01 23:46:18 +08:00
|
|
|
|
2017-04-03 20:16:07 +08:00
|
|
|
if (ring->funcs->init_cond_exec)
|
|
|
|
patch_offset = amdgpu_ring_init_cond_exec(ring);
|
2016-03-01 23:46:18 +08:00
|
|
|
|
2017-04-03 20:28:26 +08:00
|
|
|
if (ring->funcs->emit_vm_flush && vm_flush_needed) {
|
2017-04-03 20:16:07 +08:00
|
|
|
struct dma_fence *fence;
|
2016-03-01 23:46:18 +08:00
|
|
|
|
2017-05-12 20:46:23 +08:00
|
|
|
trace_amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr);
|
|
|
|
amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
|
drm/amdgpu:changes in gfx DMAframe scheme (v2)
1) Adapt to vulkan:
Now use double SWITCH BUFFER to replace the 128 nops w/a,
because when vulkan introduced, umd can insert 7 ~ 16 IBs
per submit which makes 256 DW size cannot hold the whole
DMAframe (if we still insert those 128 nops), CP team suggests
use double SWITCH_BUFFERs, instead of tricky 128 NOPs w/a.
2) To fix the CE VM fault issue when MCBP introduced:
Need one more COND_EXEC wrapping IB part (original one us
for VM switch part).
this change can fix vm fault issue caused by below scenario
without this change:
>CE passed original COND_EXEC (no MCBP issued this moment),
proceed as normal.
>DE catch up to this COND_EXEC, but this time MCBP issued,
thus DE treats all following packages as NOP. The following
VM switch packages now looks just as NOP to DE, so DE
dosen't do VM flush at all.
>Now CE proceeds to the first IBc, and triggers VM fault,
because DE didn't do VM flush for this DMAframe.
3) change estimated alloc size for gfx9.
with new DMAframe scheme, we need modify emit_frame_size
for gfx9
4) No need to insert 128 nops after gfx8 vm flush anymore
because there was double SWITCH_BUFFER append to vm flush,
and for gfx7 we already use double SWITCH_BUFFER following
after vm_flush so no change needed for it.
5) Change emit_frame_size for gfx8
v2: squash in BUG removal from Monk
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-03-15 12:18:57 +08:00
|
|
|
|
2017-04-03 20:16:07 +08:00
|
|
|
r = amdgpu_fence_emit(ring, &fence);
|
|
|
|
if (r)
|
|
|
|
return r;
|
drm/amdgpu:changes in gfx DMAframe scheme (v2)
1) Adapt to vulkan:
Now use double SWITCH BUFFER to replace the 128 nops w/a,
because when vulkan introduced, umd can insert 7 ~ 16 IBs
per submit which makes 256 DW size cannot hold the whole
DMAframe (if we still insert those 128 nops), CP team suggests
use double SWITCH_BUFFERs, instead of tricky 128 NOPs w/a.
2) To fix the CE VM fault issue when MCBP introduced:
Need one more COND_EXEC wrapping IB part (original one us
for VM switch part).
this change can fix vm fault issue caused by below scenario
without this change:
>CE passed original COND_EXEC (no MCBP issued this moment),
proceed as normal.
>DE catch up to this COND_EXEC, but this time MCBP issued,
thus DE treats all following packages as NOP. The following
VM switch packages now looks just as NOP to DE, so DE
dosen't do VM flush at all.
>Now CE proceeds to the first IBc, and triggers VM fault,
because DE didn't do VM flush for this DMAframe.
3) change estimated alloc size for gfx9.
with new DMAframe scheme, we need modify emit_frame_size
for gfx9
4) No need to insert 128 nops after gfx8 vm flush anymore
because there was double SWITCH_BUFFER append to vm flush,
and for gfx7 we already use double SWITCH_BUFFER following
after vm_flush so no change needed for it.
5) Change emit_frame_size for gfx8
v2: squash in BUG removal from Monk
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-03-15 12:18:57 +08:00
|
|
|
|
2017-04-06 23:52:39 +08:00
|
|
|
mutex_lock(&id_mgr->lock);
|
2017-04-03 20:16:07 +08:00
|
|
|
dma_fence_put(id->last_flush);
|
|
|
|
id->last_flush = fence;
|
2017-05-10 13:02:39 +08:00
|
|
|
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
|
2017-04-06 23:52:39 +08:00
|
|
|
mutex_unlock(&id_mgr->lock);
|
2017-04-03 20:16:07 +08:00
|
|
|
}
|
drm/amdgpu:changes in gfx DMAframe scheme (v2)
1) Adapt to vulkan:
Now use double SWITCH BUFFER to replace the 128 nops w/a,
because when vulkan introduced, umd can insert 7 ~ 16 IBs
per submit which makes 256 DW size cannot hold the whole
DMAframe (if we still insert those 128 nops), CP team suggests
use double SWITCH_BUFFERs, instead of tricky 128 NOPs w/a.
2) To fix the CE VM fault issue when MCBP introduced:
Need one more COND_EXEC wrapping IB part (original one us
for VM switch part).
this change can fix vm fault issue caused by below scenario
without this change:
>CE passed original COND_EXEC (no MCBP issued this moment),
proceed as normal.
>DE catch up to this COND_EXEC, but this time MCBP issued,
thus DE treats all following packages as NOP. The following
VM switch packages now looks just as NOP to DE, so DE
dosen't do VM flush at all.
>Now CE proceeds to the first IBc, and triggers VM fault,
because DE didn't do VM flush for this DMAframe.
3) change estimated alloc size for gfx9.
with new DMAframe scheme, we need modify emit_frame_size
for gfx9
4) No need to insert 128 nops after gfx8 vm flush anymore
because there was double SWITCH_BUFFER append to vm flush,
and for gfx7 we already use double SWITCH_BUFFER following
after vm_flush so no change needed for it.
5) Change emit_frame_size for gfx8
v2: squash in BUG removal from Monk
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-03-15 12:18:57 +08:00
|
|
|
|
2017-05-11 18:22:17 +08:00
|
|
|
if (ring->funcs->emit_gds_switch && gds_switch_needed) {
|
2017-04-03 20:16:07 +08:00
|
|
|
id->gds_base = job->gds_base;
|
|
|
|
id->gds_size = job->gds_size;
|
|
|
|
id->gws_base = job->gws_base;
|
|
|
|
id->gws_size = job->gws_size;
|
|
|
|
id->oa_base = job->oa_base;
|
|
|
|
id->oa_size = job->oa_size;
|
|
|
|
amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
|
|
|
|
job->gds_size, job->gws_base,
|
|
|
|
job->gws_size, job->oa_base,
|
|
|
|
job->oa_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ring->funcs->patch_cond_exec)
|
|
|
|
amdgpu_ring_patch_cond_exec(ring, patch_offset);
|
|
|
|
|
|
|
|
/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
|
|
|
|
if (ring->funcs->emit_switch_buffer) {
|
|
|
|
amdgpu_ring_emit_switch_buffer(ring);
|
|
|
|
amdgpu_ring_emit_switch_buffer(ring);
|
drm/amdgpu:changes in gfx DMAframe scheme (v2)
1) Adapt to vulkan:
Now use double SWITCH BUFFER to replace the 128 nops w/a,
because when vulkan introduced, umd can insert 7 ~ 16 IBs
per submit which makes 256 DW size cannot hold the whole
DMAframe (if we still insert those 128 nops), CP team suggests
use double SWITCH_BUFFERs, instead of tricky 128 NOPs w/a.
2) To fix the CE VM fault issue when MCBP introduced:
Need one more COND_EXEC wrapping IB part (original one us
for VM switch part).
this change can fix vm fault issue caused by below scenario
without this change:
>CE passed original COND_EXEC (no MCBP issued this moment),
proceed as normal.
>DE catch up to this COND_EXEC, but this time MCBP issued,
thus DE treats all following packages as NOP. The following
VM switch packages now looks just as NOP to DE, so DE
dosen't do VM flush at all.
>Now CE proceeds to the first IBc, and triggers VM fault,
because DE didn't do VM flush for this DMAframe.
3) change estimated alloc size for gfx9.
with new DMAframe scheme, we need modify emit_frame_size
for gfx9
4) No need to insert 128 nops after gfx8 vm flush anymore
because there was double SWITCH_BUFFER append to vm flush,
and for gfx7 we already use double SWITCH_BUFFER following
after vm_flush so no change needed for it.
5) Change emit_frame_size for gfx8
v2: squash in BUG removal from Monk
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-03-15 12:18:57 +08:00
|
|
|
}
|
2016-03-01 23:46:18 +08:00
|
|
|
return 0;
|
2016-03-01 22:09:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_reset_id - reset VMID to zero
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device structure
|
|
|
|
* @vm_id: vmid number to use
|
|
|
|
*
|
|
|
|
* Reset saved GDW, GWS and OA to force switch on next flush.
|
|
|
|
*/
|
2017-04-06 23:52:39 +08:00
|
|
|
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
|
|
|
|
unsigned vmid)
|
2016-03-01 22:09:25 +08:00
|
|
|
{
|
2017-04-06 23:52:39 +08:00
|
|
|
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
|
|
|
struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
|
2016-03-08 22:40:11 +08:00
|
|
|
|
2017-05-11 02:06:58 +08:00
|
|
|
atomic64_set(&id->owner, 0);
|
2016-03-08 22:40:11 +08:00
|
|
|
id->gds_base = 0;
|
|
|
|
id->gds_size = 0;
|
|
|
|
id->gws_base = 0;
|
|
|
|
id->gws_size = 0;
|
|
|
|
id->oa_base = 0;
|
|
|
|
id->oa_size = 0;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2017-05-11 02:06:58 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_reset_all_id - reset VMID to zero
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device structure
|
|
|
|
*
|
|
|
|
* Reset VMID to force flush on next use
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
unsigned i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
|
|
|
|
struct amdgpu_vm_id_manager *id_mgr =
|
|
|
|
&adev->vm_manager.id_mgr[i];
|
|
|
|
|
|
|
|
for (j = 1; j < id_mgr->num_ids; ++j)
|
|
|
|
amdgpu_vm_reset_id(adev, i, j);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
|
|
|
|
*
|
|
|
|
* @vm: requested vm
|
|
|
|
* @bo: requested buffer object
|
|
|
|
*
|
2016-01-26 19:17:11 +08:00
|
|
|
* Find @bo inside the requested vm.
|
2015-04-21 04:55:21 +08:00
|
|
|
* Search inside the @bos vm list for the requested vm
|
|
|
|
* Returns the found bo_va or NULL if none is found
|
|
|
|
*
|
|
|
|
* Object has to be reserved!
|
|
|
|
*/
|
|
|
|
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_bo *bo)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va *bo_va;
|
|
|
|
|
|
|
|
list_for_each_entry(bo_va, &bo->va, bo_list) {
|
|
|
|
if (bo_va->vm == vm) {
|
|
|
|
return bo_va;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-08-12 19:29:18 +08:00
|
|
|
* amdgpu_vm_do_set_ptes - helper to call the right asic function
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
2016-08-04 20:52:50 +08:00
|
|
|
* @params: see amdgpu_pte_update_params definition
|
2015-04-21 04:55:21 +08:00
|
|
|
* @pe: addr of the page entry
|
|
|
|
* @addr: dst addr to write into pe
|
|
|
|
* @count: number of page entries to update
|
|
|
|
* @incr: increase next addr by incr bytes
|
|
|
|
* @flags: hw access flags
|
|
|
|
*
|
|
|
|
* Traces the parameters and calls the right asic functions
|
|
|
|
* to setup the page table using the DMA.
|
|
|
|
*/
|
2016-08-12 19:29:18 +08:00
|
|
|
static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
|
|
|
|
uint64_t pe, uint64_t addr,
|
|
|
|
unsigned count, uint32_t incr,
|
2016-09-21 16:19:19 +08:00
|
|
|
uint64_t flags)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2016-09-25 22:11:52 +08:00
|
|
|
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-08-12 19:29:18 +08:00
|
|
|
if (count < 3) {
|
2016-08-12 17:33:30 +08:00
|
|
|
amdgpu_vm_write_pte(params->adev, params->ib, pe,
|
|
|
|
addr | flags, count, incr);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
} else {
|
2016-08-04 21:02:49 +08:00
|
|
|
amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
|
2015-04-21 04:55:21 +08:00
|
|
|
count, incr, flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-12 19:29:18 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
|
|
|
|
*
|
|
|
|
* @params: see amdgpu_pte_update_params definition
|
|
|
|
* @pe: addr of the page entry
|
|
|
|
* @addr: dst addr to write into pe
|
|
|
|
* @count: number of page entries to update
|
|
|
|
* @incr: increase next addr by incr bytes
|
|
|
|
* @flags: hw access flags
|
|
|
|
*
|
|
|
|
* Traces the parameters and calls the DMA function to copy the PTEs.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
|
|
|
|
uint64_t pe, uint64_t addr,
|
|
|
|
unsigned count, uint32_t incr,
|
2016-09-21 16:19:19 +08:00
|
|
|
uint64_t flags)
|
2016-08-12 19:29:18 +08:00
|
|
|
{
|
2016-09-25 22:11:52 +08:00
|
|
|
uint64_t src = (params->src + (addr >> 12) * 8);
|
2016-08-12 19:29:18 +08:00
|
|
|
|
2016-09-25 22:11:52 +08:00
|
|
|
|
|
|
|
trace_amdgpu_vm_copy_ptes(pe, src, count);
|
|
|
|
|
|
|
|
amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
|
2016-08-12 19:29:18 +08:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
2015-11-30 20:26:07 +08:00
|
|
|
* amdgpu_vm_map_gart - Resolve gart mapping of addr
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
2015-11-30 20:26:07 +08:00
|
|
|
* @pages_addr: optional DMA address to use for lookup
|
2015-04-21 04:55:21 +08:00
|
|
|
* @addr: the unmapped addr
|
|
|
|
*
|
|
|
|
* Look up the physical address of the page that the pte resolves
|
2015-11-30 20:26:07 +08:00
|
|
|
* to and return the pointer for the page table entry.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2016-08-12 17:33:30 +08:00
|
|
|
static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
uint64_t result;
|
|
|
|
|
2016-08-12 17:33:30 +08:00
|
|
|
/* page table offset */
|
|
|
|
result = pages_addr[addr >> PAGE_SHIFT];
|
2015-11-30 20:26:07 +08:00
|
|
|
|
2016-08-12 17:33:30 +08:00
|
|
|
/* in case cpu page size != gpu page size*/
|
|
|
|
result |= addr & (~PAGE_MASK);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2015-11-30 20:26:07 +08:00
|
|
|
result &= 0xFFFFFFFFFFFFF000ULL;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-09-16 21:36:49 +08:00
|
|
|
/*
|
2016-10-12 21:13:52 +08:00
|
|
|
* amdgpu_vm_update_level - update a single level in the hierarchy
|
2016-09-16 21:36:49 +08:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
2016-10-12 21:13:52 +08:00
|
|
|
* @parent: parent directory
|
2016-09-16 21:36:49 +08:00
|
|
|
*
|
2016-10-12 21:13:52 +08:00
|
|
|
* Makes sure all entries in @parent are up to date.
|
2016-09-16 21:36:49 +08:00
|
|
|
* Returns 0 for success, error for failure.
|
|
|
|
*/
|
2016-10-12 21:13:52 +08:00
|
|
|
static int amdgpu_vm_update_level(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_vm_pt *parent,
|
|
|
|
unsigned level)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2016-09-16 21:36:49 +08:00
|
|
|
struct amdgpu_bo *shadow;
|
2016-02-09 00:37:38 +08:00
|
|
|
struct amdgpu_ring *ring;
|
2016-09-16 21:36:49 +08:00
|
|
|
uint64_t pd_addr, shadow_addr;
|
2016-10-12 21:13:52 +08:00
|
|
|
uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
|
2016-09-16 21:36:49 +08:00
|
|
|
uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
|
2015-04-21 04:55:21 +08:00
|
|
|
unsigned count = 0, pt_idx, ndw;
|
2016-02-01 19:20:25 +08:00
|
|
|
struct amdgpu_job *job;
|
2016-08-04 20:52:50 +08:00
|
|
|
struct amdgpu_pte_update_params params;
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence *fence = NULL;
|
2015-07-21 16:52:10 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
int r;
|
|
|
|
|
2016-10-12 21:13:52 +08:00
|
|
|
if (!parent->entries)
|
|
|
|
return 0;
|
2016-02-09 00:37:38 +08:00
|
|
|
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/* padding, etc. */
|
|
|
|
ndw = 64;
|
|
|
|
|
|
|
|
/* assume the worst case */
|
2016-10-12 21:13:52 +08:00
|
|
|
ndw += parent->last_entry_used * 6;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-10-12 21:13:52 +08:00
|
|
|
pd_addr = amdgpu_bo_gpu_offset(parent->bo);
|
|
|
|
|
|
|
|
shadow = parent->bo->shadow;
|
2016-09-16 21:36:49 +08:00
|
|
|
if (shadow) {
|
|
|
|
r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
shadow_addr = amdgpu_bo_gpu_offset(shadow);
|
|
|
|
ndw *= 2;
|
|
|
|
} else {
|
|
|
|
shadow_addr = 0;
|
|
|
|
}
|
|
|
|
|
2016-02-01 19:20:25 +08:00
|
|
|
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
|
|
|
|
if (r)
|
2015-04-21 04:55:21 +08:00
|
|
|
return r;
|
2016-02-01 19:20:25 +08:00
|
|
|
|
2016-08-04 21:02:49 +08:00
|
|
|
memset(¶ms, 0, sizeof(params));
|
|
|
|
params.adev = adev;
|
2016-08-04 20:52:50 +08:00
|
|
|
params.ib = &job->ibs[0];
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-10-12 21:13:52 +08:00
|
|
|
/* walk over the address space and update the directory */
|
|
|
|
for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
|
|
|
|
struct amdgpu_bo *bo = parent->entries[pt_idx].bo;
|
2015-04-21 04:55:21 +08:00
|
|
|
uint64_t pde, pt;
|
|
|
|
|
|
|
|
if (bo == NULL)
|
|
|
|
continue;
|
|
|
|
|
2016-09-16 17:46:23 +08:00
|
|
|
if (bo->shadow) {
|
2016-09-16 21:36:49 +08:00
|
|
|
struct amdgpu_bo *pt_shadow = bo->shadow;
|
2016-09-16 17:46:23 +08:00
|
|
|
|
2016-09-16 21:36:49 +08:00
|
|
|
r = amdgpu_ttm_bind(&pt_shadow->tbo,
|
|
|
|
&pt_shadow->tbo.mem);
|
2016-09-16 17:46:23 +08:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
pt = amdgpu_bo_gpu_offset(bo);
|
2016-10-12 21:13:52 +08:00
|
|
|
if (parent->entries[pt_idx].addr == pt)
|
2016-09-16 21:36:49 +08:00
|
|
|
continue;
|
|
|
|
|
2016-10-12 21:13:52 +08:00
|
|
|
parent->entries[pt_idx].addr = pt;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
pde = pd_addr + pt_idx * 8;
|
|
|
|
if (((last_pde + 8 * count) != pde) ||
|
2016-08-12 18:59:59 +08:00
|
|
|
((last_pt + incr * count) != pt) ||
|
|
|
|
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
if (count) {
|
2017-03-10 00:36:26 +08:00
|
|
|
uint64_t pt_addr =
|
|
|
|
amdgpu_vm_adjust_mc_addr(adev, last_pt);
|
|
|
|
|
2016-09-16 21:36:49 +08:00
|
|
|
if (shadow)
|
|
|
|
amdgpu_vm_do_set_ptes(¶ms,
|
|
|
|
last_shadow,
|
2017-03-10 00:36:26 +08:00
|
|
|
pt_addr, count,
|
2016-09-16 21:36:49 +08:00
|
|
|
incr,
|
|
|
|
AMDGPU_PTE_VALID);
|
|
|
|
|
2016-08-12 19:29:18 +08:00
|
|
|
amdgpu_vm_do_set_ptes(¶ms, last_pde,
|
2017-03-10 00:36:26 +08:00
|
|
|
pt_addr, count, incr,
|
2016-08-12 19:29:18 +08:00
|
|
|
AMDGPU_PTE_VALID);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
count = 1;
|
|
|
|
last_pde = pde;
|
2016-09-16 21:36:49 +08:00
|
|
|
last_shadow = shadow_addr + pt_idx * 8;
|
2015-04-21 04:55:21 +08:00
|
|
|
last_pt = pt;
|
|
|
|
} else {
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-16 21:36:49 +08:00
|
|
|
if (count) {
|
2017-03-10 00:36:26 +08:00
|
|
|
uint64_t pt_addr = amdgpu_vm_adjust_mc_addr(adev, last_pt);
|
|
|
|
|
2016-10-12 20:46:26 +08:00
|
|
|
if (vm->root.bo->shadow)
|
2017-03-10 00:36:26 +08:00
|
|
|
amdgpu_vm_do_set_ptes(¶ms, last_shadow, pt_addr,
|
2016-09-16 21:36:49 +08:00
|
|
|
count, incr, AMDGPU_PTE_VALID);
|
|
|
|
|
2017-03-10 00:36:26 +08:00
|
|
|
amdgpu_vm_do_set_ptes(¶ms, last_pde, pt_addr,
|
2016-08-12 19:29:18 +08:00
|
|
|
count, incr, AMDGPU_PTE_VALID);
|
2016-09-16 21:36:49 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-09-16 21:36:49 +08:00
|
|
|
if (params.ib->length_dw == 0) {
|
|
|
|
amdgpu_job_free(job);
|
2016-10-12 21:13:52 +08:00
|
|
|
} else {
|
|
|
|
amdgpu_ring_pad_ib(ring, params.ib);
|
|
|
|
amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
|
2016-02-08 19:13:05 +08:00
|
|
|
AMDGPU_FENCE_OWNER_VM);
|
2016-10-12 21:13:52 +08:00
|
|
|
if (shadow)
|
|
|
|
amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
|
|
|
|
AMDGPU_FENCE_OWNER_VM);
|
2015-08-15 02:08:40 +08:00
|
|
|
|
2016-10-12 21:13:52 +08:00
|
|
|
WARN_ON(params.ib->length_dw > ndw);
|
|
|
|
r = amdgpu_job_submit(job, ring, &vm->entity,
|
|
|
|
AMDGPU_FENCE_OWNER_VM, &fence);
|
|
|
|
if (r)
|
|
|
|
goto error_free;
|
|
|
|
|
|
|
|
amdgpu_bo_fence(parent->bo, fence, true);
|
|
|
|
dma_fence_put(vm->last_dir_update);
|
|
|
|
vm->last_dir_update = dma_fence_get(fence);
|
|
|
|
dma_fence_put(fence);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Recurse into the subdirectories. This recursion is harmless because
|
|
|
|
* we only have a maximum of 5 layers.
|
|
|
|
*/
|
|
|
|
for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
|
|
|
|
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
|
|
|
|
|
|
|
|
if (!entry->bo)
|
|
|
|
continue;
|
2015-07-21 16:52:10 +08:00
|
|
|
|
2016-10-12 21:13:52 +08:00
|
|
|
r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
return 0;
|
2015-07-21 16:52:10 +08:00
|
|
|
|
|
|
|
error_free:
|
2016-02-01 19:20:25 +08:00
|
|
|
amdgpu_job_free(job);
|
2015-08-03 12:57:31 +08:00
|
|
|
return r;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2016-10-12 21:13:52 +08:00
|
|
|
/*
|
|
|
|
* amdgpu_vm_update_directories - make sure that all directories are valid
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
*
|
|
|
|
* Makes sure all directories are up to date.
|
|
|
|
* Returns 0 for success, error for failure.
|
|
|
|
*/
|
|
|
|
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm)
|
|
|
|
{
|
|
|
|
return amdgpu_vm_update_level(adev, vm, &vm->root, 0);
|
|
|
|
}
|
|
|
|
|
2016-10-25 21:52:28 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_find_pt - find the page table for an address
|
|
|
|
*
|
|
|
|
* @p: see amdgpu_pte_update_params definition
|
|
|
|
* @addr: virtual address in question
|
|
|
|
*
|
|
|
|
* Find the page table BO for a virtual address, return NULL when none found.
|
|
|
|
*/
|
|
|
|
static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p,
|
|
|
|
uint64_t addr)
|
|
|
|
{
|
|
|
|
struct amdgpu_vm_pt *entry = &p->vm->root;
|
|
|
|
unsigned idx, level = p->adev->vm_manager.num_level;
|
|
|
|
|
|
|
|
while (entry->entries) {
|
2017-03-29 16:08:32 +08:00
|
|
|
idx = addr >> (p->adev->vm_manager.block_size * level--);
|
2016-10-25 21:52:28 +08:00
|
|
|
idx %= amdgpu_bo_size(entry->bo) / 8;
|
|
|
|
entry = &entry->entries[idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (level)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return entry->bo;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_update_ptes - make sure that page tables are valid
|
|
|
|
*
|
2016-08-04 20:52:50 +08:00
|
|
|
* @params: see amdgpu_pte_update_params definition
|
2015-04-21 04:55:21 +08:00
|
|
|
* @vm: requested vm
|
|
|
|
* @start: start of GPU address range
|
|
|
|
* @end: end of GPU address range
|
2016-06-07 06:13:26 +08:00
|
|
|
* @dst: destination address to map to, the next dst inside the function
|
2015-04-21 04:55:21 +08:00
|
|
|
* @flags: mapping flags
|
|
|
|
*
|
2016-01-26 19:17:11 +08:00
|
|
|
* Update the page tables in the range @start - @end.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2016-08-04 21:02:49 +08:00
|
|
|
static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
|
2016-01-26 18:40:46 +08:00
|
|
|
uint64_t start, uint64_t end,
|
2016-09-21 16:19:19 +08:00
|
|
|
uint64_t dst, uint64_t flags)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2017-03-29 16:08:32 +08:00
|
|
|
struct amdgpu_device *adev = params->adev;
|
|
|
|
const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
|
2016-01-26 19:37:49 +08:00
|
|
|
|
2016-08-05 19:56:35 +08:00
|
|
|
uint64_t cur_pe_start, cur_nptes, cur_dst;
|
2016-06-07 06:13:26 +08:00
|
|
|
uint64_t addr; /* next GPU address to be updated */
|
2016-06-07 06:21:09 +08:00
|
|
|
struct amdgpu_bo *pt;
|
|
|
|
unsigned nptes; /* next number of ptes to be updated */
|
|
|
|
uint64_t next_pe_start;
|
|
|
|
|
|
|
|
/* initialize the variables */
|
|
|
|
addr = start;
|
2016-10-25 21:52:28 +08:00
|
|
|
pt = amdgpu_vm_get_pt(params, addr);
|
2017-03-29 08:36:12 +08:00
|
|
|
if (!pt) {
|
|
|
|
pr_err("PT not found, aborting update_ptes\n");
|
2016-10-25 21:52:28 +08:00
|
|
|
return;
|
2017-03-29 08:36:12 +08:00
|
|
|
}
|
2016-10-25 21:52:28 +08:00
|
|
|
|
2016-08-15 11:46:21 +08:00
|
|
|
if (params->shadow) {
|
|
|
|
if (!pt->shadow)
|
|
|
|
return;
|
2016-09-28 18:27:37 +08:00
|
|
|
pt = pt->shadow;
|
2016-08-15 11:46:21 +08:00
|
|
|
}
|
2016-06-07 06:21:09 +08:00
|
|
|
if ((addr & ~mask) == (end & ~mask))
|
|
|
|
nptes = end - addr;
|
|
|
|
else
|
2017-03-29 16:08:32 +08:00
|
|
|
nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
|
2016-06-07 06:21:09 +08:00
|
|
|
|
|
|
|
cur_pe_start = amdgpu_bo_gpu_offset(pt);
|
|
|
|
cur_pe_start += (addr & mask) * 8;
|
2016-08-05 19:56:35 +08:00
|
|
|
cur_nptes = nptes;
|
2016-06-07 06:21:09 +08:00
|
|
|
cur_dst = dst;
|
|
|
|
|
|
|
|
/* for next ptb*/
|
|
|
|
addr += nptes;
|
|
|
|
dst += nptes * AMDGPU_GPU_PAGE_SIZE;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
/* walk over the address space and update the page tables */
|
2016-06-07 06:21:09 +08:00
|
|
|
while (addr < end) {
|
2016-10-25 21:52:28 +08:00
|
|
|
pt = amdgpu_vm_get_pt(params, addr);
|
2017-03-29 08:36:12 +08:00
|
|
|
if (!pt) {
|
|
|
|
pr_err("PT not found, aborting update_ptes\n");
|
2016-10-25 21:52:28 +08:00
|
|
|
return;
|
2017-03-29 08:36:12 +08:00
|
|
|
}
|
2016-10-25 21:52:28 +08:00
|
|
|
|
2016-08-15 11:46:21 +08:00
|
|
|
if (params->shadow) {
|
|
|
|
if (!pt->shadow)
|
|
|
|
return;
|
2016-09-28 18:27:37 +08:00
|
|
|
pt = pt->shadow;
|
2016-08-15 11:46:21 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
if ((addr & ~mask) == (end & ~mask))
|
|
|
|
nptes = end - addr;
|
|
|
|
else
|
2017-03-29 16:08:32 +08:00
|
|
|
nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-06-07 06:13:26 +08:00
|
|
|
next_pe_start = amdgpu_bo_gpu_offset(pt);
|
|
|
|
next_pe_start += (addr & mask) * 8;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-08-12 18:59:59 +08:00
|
|
|
if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
|
|
|
|
((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
|
2016-06-07 06:14:57 +08:00
|
|
|
/* The next ptb is consecutive to current ptb.
|
2016-08-12 19:29:18 +08:00
|
|
|
* Don't call the update function now.
|
2016-06-07 06:14:57 +08:00
|
|
|
* Will update two ptbs together in future.
|
|
|
|
*/
|
2016-08-05 19:56:35 +08:00
|
|
|
cur_nptes += nptes;
|
2016-06-07 06:14:57 +08:00
|
|
|
} else {
|
2016-08-12 19:29:18 +08:00
|
|
|
params->func(params, cur_pe_start, cur_dst, cur_nptes,
|
|
|
|
AMDGPU_GPU_PAGE_SIZE, flags);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-06-07 06:13:26 +08:00
|
|
|
cur_pe_start = next_pe_start;
|
2016-08-05 19:56:35 +08:00
|
|
|
cur_nptes = nptes;
|
2016-06-07 06:13:26 +08:00
|
|
|
cur_dst = dst;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2016-06-07 06:21:09 +08:00
|
|
|
/* for next ptb*/
|
2015-04-21 04:55:21 +08:00
|
|
|
addr += nptes;
|
|
|
|
dst += nptes * AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
2016-08-12 19:29:18 +08:00
|
|
|
params->func(params, cur_pe_start, cur_dst, cur_nptes,
|
|
|
|
AMDGPU_GPU_PAGE_SIZE, flags);
|
2016-08-05 19:56:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* amdgpu_vm_frag_ptes - add fragment information to PTEs
|
|
|
|
*
|
|
|
|
* @params: see amdgpu_pte_update_params definition
|
|
|
|
* @vm: requested vm
|
|
|
|
* @start: first PTE to handle
|
|
|
|
* @end: last PTE to handle
|
|
|
|
* @dst: addr those PTEs should point to
|
|
|
|
* @flags: hw mapping flags
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
|
|
|
|
uint64_t start, uint64_t end,
|
2016-09-21 16:19:19 +08:00
|
|
|
uint64_t dst, uint64_t flags)
|
2016-08-05 19:56:35 +08:00
|
|
|
{
|
|
|
|
/**
|
|
|
|
* The MC L1 TLB supports variable sized pages, based on a fragment
|
|
|
|
* field in the PTE. When this field is set to a non-zero value, page
|
|
|
|
* granularity is increased from 4KB to (1 << (12 + frag)). The PTE
|
|
|
|
* flags are considered valid for all PTEs within the fragment range
|
|
|
|
* and corresponding mappings are assumed to be physically contiguous.
|
|
|
|
*
|
|
|
|
* The L1 TLB can store a single PTE for the whole fragment,
|
|
|
|
* significantly increasing the space available for translation
|
|
|
|
* caching. This leads to large improvements in throughput when the
|
|
|
|
* TLB is under pressure.
|
|
|
|
*
|
|
|
|
* The L2 TLB distributes small and large fragments into two
|
|
|
|
* asymmetric partitions. The large fragment cache is significantly
|
|
|
|
* larger. Thus, we try to use large fragments wherever possible.
|
|
|
|
* Userspace can support this by aligning virtual base address and
|
|
|
|
* allocation size to the fragment size.
|
|
|
|
*/
|
|
|
|
|
2016-10-04 19:39:43 +08:00
|
|
|
/* SI and newer are optimized for 64KB */
|
|
|
|
uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
|
|
|
|
uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
|
2016-08-05 19:56:35 +08:00
|
|
|
|
|
|
|
uint64_t frag_start = ALIGN(start, frag_align);
|
|
|
|
uint64_t frag_end = end & ~(frag_align - 1);
|
|
|
|
|
|
|
|
/* system pages are non continuously */
|
2016-08-11 22:44:15 +08:00
|
|
|
if (params->src || !(flags & AMDGPU_PTE_VALID) ||
|
2016-08-05 19:56:35 +08:00
|
|
|
(frag_start >= frag_end)) {
|
|
|
|
|
2016-10-13 21:09:08 +08:00
|
|
|
amdgpu_vm_update_ptes(params, start, end, dst, flags);
|
2016-08-05 19:56:35 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* handle the 4K area at the beginning */
|
|
|
|
if (start != frag_start) {
|
2016-10-13 21:09:08 +08:00
|
|
|
amdgpu_vm_update_ptes(params, start, frag_start,
|
2016-08-05 19:56:35 +08:00
|
|
|
dst, flags);
|
|
|
|
dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* handle the area in the middle */
|
2016-10-13 21:09:08 +08:00
|
|
|
amdgpu_vm_update_ptes(params, frag_start, frag_end, dst,
|
2016-10-04 19:39:43 +08:00
|
|
|
flags | frag_flags);
|
2016-08-05 19:56:35 +08:00
|
|
|
|
|
|
|
/* handle the 4K area at the end */
|
|
|
|
if (frag_end != end) {
|
|
|
|
dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
|
2016-10-13 21:09:08 +08:00
|
|
|
amdgpu_vm_update_ptes(params, frag_end, end, dst, flags);
|
2016-08-05 19:56:35 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2016-06-06 16:17:58 +08:00
|
|
|
* @exclusive: fence we need to sync to
|
2016-03-19 04:00:35 +08:00
|
|
|
* @src: address where to copy page table entries from
|
|
|
|
* @pages_addr: DMA addresses to use for mapping
|
2015-04-21 04:55:21 +08:00
|
|
|
* @vm: requested vm
|
2016-01-25 21:27:31 +08:00
|
|
|
* @start: start of mapped range
|
|
|
|
* @last: last mapped entry
|
|
|
|
* @flags: flags for the entries
|
2015-04-21 04:55:21 +08:00
|
|
|
* @addr: addr to set the area to
|
|
|
|
* @fence: optional resulting fence
|
|
|
|
*
|
2016-01-25 21:27:31 +08:00
|
|
|
* Fill in the page table entries between @start and @last.
|
2015-04-21 04:55:21 +08:00
|
|
|
* Returns 0 for success, -EINVAL for failure.
|
|
|
|
*/
|
|
|
|
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence *exclusive,
|
2016-03-19 04:00:35 +08:00
|
|
|
uint64_t src,
|
|
|
|
dma_addr_t *pages_addr,
|
2015-04-21 04:55:21 +08:00
|
|
|
struct amdgpu_vm *vm,
|
2016-01-25 21:27:31 +08:00
|
|
|
uint64_t start, uint64_t last,
|
2016-09-21 16:19:19 +08:00
|
|
|
uint64_t flags, uint64_t addr,
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence **fence)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2016-02-09 00:37:38 +08:00
|
|
|
struct amdgpu_ring *ring;
|
2016-01-26 18:40:46 +08:00
|
|
|
void *owner = AMDGPU_FENCE_OWNER_VM;
|
2015-04-21 04:55:21 +08:00
|
|
|
unsigned nptes, ncmds, ndw;
|
2016-02-01 19:20:25 +08:00
|
|
|
struct amdgpu_job *job;
|
2016-08-04 20:52:50 +08:00
|
|
|
struct amdgpu_pte_update_params params;
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence *f = NULL;
|
2015-04-21 04:55:21 +08:00
|
|
|
int r;
|
|
|
|
|
2016-08-12 19:29:18 +08:00
|
|
|
memset(¶ms, 0, sizeof(params));
|
|
|
|
params.adev = adev;
|
2016-10-13 21:09:08 +08:00
|
|
|
params.vm = vm;
|
2016-08-12 19:29:18 +08:00
|
|
|
params.src = src;
|
|
|
|
|
2016-02-09 00:37:38 +08:00
|
|
|
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
|
2016-08-04 21:02:49 +08:00
|
|
|
|
2016-01-26 18:40:46 +08:00
|
|
|
/* sync to everything on unmapping */
|
|
|
|
if (!(flags & AMDGPU_PTE_VALID))
|
|
|
|
owner = AMDGPU_FENCE_OWNER_UNDEFINED;
|
|
|
|
|
2016-01-25 21:27:31 +08:00
|
|
|
nptes = last - start + 1;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* reserve space for one command every (1 << BLOCK_SIZE)
|
|
|
|
* entries or 2k dwords (whatever is smaller)
|
|
|
|
*/
|
2017-03-29 16:08:32 +08:00
|
|
|
ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
/* padding, etc. */
|
|
|
|
ndw = 64;
|
|
|
|
|
2016-08-11 20:06:54 +08:00
|
|
|
if (src) {
|
2015-04-21 04:55:21 +08:00
|
|
|
/* only copy commands needed */
|
|
|
|
ndw += ncmds * 7;
|
|
|
|
|
2016-08-12 19:29:18 +08:00
|
|
|
params.func = amdgpu_vm_do_copy_ptes;
|
|
|
|
|
2016-08-11 20:06:54 +08:00
|
|
|
} else if (pages_addr) {
|
|
|
|
/* copy commands needed */
|
|
|
|
ndw += ncmds * 7;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-08-11 20:06:54 +08:00
|
|
|
/* and also PTEs */
|
2015-04-21 04:55:21 +08:00
|
|
|
ndw += nptes * 2;
|
|
|
|
|
2016-08-12 19:29:18 +08:00
|
|
|
params.func = amdgpu_vm_do_copy_ptes;
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
} else {
|
|
|
|
/* set page commands needed */
|
|
|
|
ndw += ncmds * 10;
|
|
|
|
|
|
|
|
/* two extra commands for begin/end of fragment */
|
|
|
|
ndw += 2 * 10;
|
2016-08-12 19:29:18 +08:00
|
|
|
|
|
|
|
params.func = amdgpu_vm_do_set_ptes;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2016-02-01 19:20:25 +08:00
|
|
|
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
|
|
|
|
if (r)
|
2015-04-21 04:55:21 +08:00
|
|
|
return r;
|
2016-02-01 19:20:25 +08:00
|
|
|
|
2016-08-04 20:52:50 +08:00
|
|
|
params.ib = &job->ibs[0];
|
2015-07-21 16:52:10 +08:00
|
|
|
|
2016-08-11 20:06:54 +08:00
|
|
|
if (!src && pages_addr) {
|
|
|
|
uint64_t *pte;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
/* Put the PTEs at the end of the IB. */
|
|
|
|
i = ndw - nptes * 2;
|
|
|
|
pte= (uint64_t *)&(job->ibs->ptr[i]);
|
|
|
|
params.src = job->ibs->gpu_addr + i * 4;
|
|
|
|
|
|
|
|
for (i = 0; i < nptes; ++i) {
|
|
|
|
pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
|
|
|
|
AMDGPU_GPU_PAGE_SIZE);
|
|
|
|
pte[i] |= flags;
|
|
|
|
}
|
2016-09-25 17:54:00 +08:00
|
|
|
addr = 0;
|
2016-08-11 20:06:54 +08:00
|
|
|
}
|
|
|
|
|
2016-06-06 16:17:58 +08:00
|
|
|
r = amdgpu_sync_fence(adev, &job->sync, exclusive);
|
|
|
|
if (r)
|
|
|
|
goto error_free;
|
|
|
|
|
2016-10-12 20:46:26 +08:00
|
|
|
r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv,
|
2016-01-26 18:40:46 +08:00
|
|
|
owner);
|
|
|
|
if (r)
|
|
|
|
goto error_free;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-10-12 20:46:26 +08:00
|
|
|
r = reservation_object_reserve_shared(vm->root.bo->tbo.resv);
|
2016-01-26 18:40:46 +08:00
|
|
|
if (r)
|
|
|
|
goto error_free;
|
|
|
|
|
2016-08-15 11:46:21 +08:00
|
|
|
params.shadow = true;
|
2016-10-13 21:09:08 +08:00
|
|
|
amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags);
|
2016-08-15 11:46:21 +08:00
|
|
|
params.shadow = false;
|
2016-10-13 21:09:08 +08:00
|
|
|
amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-08-04 20:52:50 +08:00
|
|
|
amdgpu_ring_pad_ib(ring, params.ib);
|
|
|
|
WARN_ON(params.ib->length_dw > ndw);
|
2016-02-01 19:53:58 +08:00
|
|
|
r = amdgpu_job_submit(job, ring, &vm->entity,
|
|
|
|
AMDGPU_FENCE_OWNER_VM, &f);
|
2015-08-03 12:57:31 +08:00
|
|
|
if (r)
|
|
|
|
goto error_free;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-10-12 20:46:26 +08:00
|
|
|
amdgpu_bo_fence(vm->root.bo, f, true);
|
2017-01-30 18:09:31 +08:00
|
|
|
dma_fence_put(*fence);
|
|
|
|
*fence = f;
|
2015-04-21 04:55:21 +08:00
|
|
|
return 0;
|
2015-07-21 16:52:10 +08:00
|
|
|
|
|
|
|
error_free:
|
2016-02-01 19:20:25 +08:00
|
|
|
amdgpu_job_free(job);
|
2015-08-03 12:57:31 +08:00
|
|
|
return r;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2016-01-25 21:27:31 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2016-06-06 16:17:58 +08:00
|
|
|
* @exclusive: fence we need to sync to
|
2016-03-30 16:50:25 +08:00
|
|
|
* @gtt_flags: flags as they are used for GTT
|
|
|
|
* @pages_addr: DMA addresses to use for mapping
|
2016-01-25 21:27:31 +08:00
|
|
|
* @vm: requested vm
|
|
|
|
* @mapping: mapped range and flags to use for the update
|
2016-03-30 16:50:25 +08:00
|
|
|
* @flags: HW flags for the mapping
|
2016-08-16 23:38:37 +08:00
|
|
|
* @nodes: array of drm_mm_nodes with the MC addresses
|
2016-01-25 21:27:31 +08:00
|
|
|
* @fence: optional resulting fence
|
|
|
|
*
|
|
|
|
* Split the mapping into smaller chunks so that each update fits
|
|
|
|
* into a SDMA IB.
|
|
|
|
* Returns 0 for success, -EINVAL for failure.
|
|
|
|
*/
|
|
|
|
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence *exclusive,
|
2016-09-21 16:19:19 +08:00
|
|
|
uint64_t gtt_flags,
|
2016-03-30 16:50:25 +08:00
|
|
|
dma_addr_t *pages_addr,
|
2016-01-25 21:27:31 +08:00
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_bo_va_mapping *mapping,
|
2016-09-21 16:19:19 +08:00
|
|
|
uint64_t flags,
|
2016-08-16 23:38:37 +08:00
|
|
|
struct drm_mm_node *nodes,
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence **fence)
|
2016-01-25 21:27:31 +08:00
|
|
|
{
|
2017-03-30 20:03:59 +08:00
|
|
|
uint64_t pfn, src = 0, start = mapping->start;
|
2016-01-25 21:27:31 +08:00
|
|
|
int r;
|
|
|
|
|
|
|
|
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
|
|
|
|
* but in case of something, we filter the flags in first place
|
|
|
|
*/
|
|
|
|
if (!(mapping->flags & AMDGPU_PTE_READABLE))
|
|
|
|
flags &= ~AMDGPU_PTE_READABLE;
|
|
|
|
if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
|
|
|
|
flags &= ~AMDGPU_PTE_WRITEABLE;
|
|
|
|
|
2017-03-04 05:47:11 +08:00
|
|
|
flags &= ~AMDGPU_PTE_EXECUTABLE;
|
|
|
|
flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
|
|
|
|
|
2017-03-04 05:49:39 +08:00
|
|
|
flags &= ~AMDGPU_PTE_MTYPE_MASK;
|
|
|
|
flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
|
|
|
|
|
2017-04-19 09:53:29 +08:00
|
|
|
if ((mapping->flags & AMDGPU_PTE_PRT) &&
|
|
|
|
(adev->asic_type >= CHIP_VEGA10)) {
|
|
|
|
flags |= AMDGPU_PTE_PRT;
|
|
|
|
flags &= ~AMDGPU_PTE_VALID;
|
|
|
|
}
|
|
|
|
|
2016-01-25 21:27:31 +08:00
|
|
|
trace_amdgpu_vm_bo_update(mapping);
|
|
|
|
|
2016-08-16 23:38:37 +08:00
|
|
|
pfn = mapping->offset >> PAGE_SHIFT;
|
|
|
|
if (nodes) {
|
|
|
|
while (pfn >= nodes->size) {
|
|
|
|
pfn -= nodes->size;
|
|
|
|
++nodes;
|
|
|
|
}
|
2016-03-19 04:00:35 +08:00
|
|
|
}
|
2016-01-25 21:27:31 +08:00
|
|
|
|
2016-08-16 23:38:37 +08:00
|
|
|
do {
|
|
|
|
uint64_t max_entries;
|
|
|
|
uint64_t addr, last;
|
2016-01-25 21:27:31 +08:00
|
|
|
|
2016-08-16 23:38:37 +08:00
|
|
|
if (nodes) {
|
|
|
|
addr = nodes->start << PAGE_SHIFT;
|
|
|
|
max_entries = (nodes->size - pfn) *
|
|
|
|
(PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
|
|
|
} else {
|
|
|
|
addr = 0;
|
|
|
|
max_entries = S64_MAX;
|
|
|
|
}
|
2016-01-25 21:27:31 +08:00
|
|
|
|
2016-08-16 23:38:37 +08:00
|
|
|
if (pages_addr) {
|
|
|
|
if (flags == gtt_flags)
|
|
|
|
src = adev->gart.table_addr +
|
|
|
|
(addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
|
|
|
|
else
|
|
|
|
max_entries = min(max_entries, 16ull * 1024ull);
|
|
|
|
addr = 0;
|
|
|
|
} else if (flags & AMDGPU_PTE_VALID) {
|
|
|
|
addr += adev->vm_manager.vram_base_offset;
|
|
|
|
}
|
|
|
|
addr += pfn << PAGE_SHIFT;
|
|
|
|
|
2017-03-30 20:03:59 +08:00
|
|
|
last = min((uint64_t)mapping->last, start + max_entries - 1);
|
2016-06-06 16:17:58 +08:00
|
|
|
r = amdgpu_vm_bo_update_mapping(adev, exclusive,
|
|
|
|
src, pages_addr, vm,
|
2016-01-25 21:27:31 +08:00
|
|
|
start, last, flags, addr,
|
|
|
|
fence);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2016-08-16 23:38:37 +08:00
|
|
|
pfn += last - start + 1;
|
|
|
|
if (nodes && nodes->size == pfn) {
|
|
|
|
pfn = 0;
|
|
|
|
++nodes;
|
|
|
|
}
|
2016-01-25 21:27:31 +08:00
|
|
|
start = last + 1;
|
2016-08-16 23:38:37 +08:00
|
|
|
|
2017-03-30 20:03:59 +08:00
|
|
|
} while (unlikely(start != mapping->last + 1));
|
2016-01-25 21:27:31 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_update - update all BO mappings in the vm page table
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: requested BO and VM object
|
2016-08-16 20:43:17 +08:00
|
|
|
* @clear: if true clear the entries
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* Fill in the page table entries for @bo_va.
|
|
|
|
* Returns 0 for success, -EINVAL for failure.
|
|
|
|
*/
|
|
|
|
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
2016-08-16 20:43:17 +08:00
|
|
|
bool clear)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
struct amdgpu_vm *vm = bo_va->vm;
|
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
2016-03-30 16:50:25 +08:00
|
|
|
dma_addr_t *pages_addr = NULL;
|
2016-09-21 16:19:19 +08:00
|
|
|
uint64_t gtt_flags, flags;
|
2016-08-16 20:43:17 +08:00
|
|
|
struct ttm_mem_reg *mem;
|
2016-08-16 23:38:37 +08:00
|
|
|
struct drm_mm_node *nodes;
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence *exclusive;
|
2015-04-21 04:55:21 +08:00
|
|
|
int r;
|
|
|
|
|
2017-01-30 18:01:38 +08:00
|
|
|
if (clear || !bo_va->bo) {
|
2016-08-16 20:43:17 +08:00
|
|
|
mem = NULL;
|
2016-08-16 23:38:37 +08:00
|
|
|
nodes = NULL;
|
2016-08-16 20:43:17 +08:00
|
|
|
exclusive = NULL;
|
|
|
|
} else {
|
2016-03-30 16:50:25 +08:00
|
|
|
struct ttm_dma_tt *ttm;
|
|
|
|
|
2016-08-16 20:43:17 +08:00
|
|
|
mem = &bo_va->bo->tbo.mem;
|
2016-08-16 23:38:37 +08:00
|
|
|
nodes = mem->mm_node;
|
|
|
|
if (mem->mem_type == TTM_PL_TT) {
|
2016-03-30 16:50:25 +08:00
|
|
|
ttm = container_of(bo_va->bo->tbo.ttm, struct
|
|
|
|
ttm_dma_tt, ttm);
|
|
|
|
pages_addr = ttm->dma_address;
|
2015-11-30 21:19:26 +08:00
|
|
|
}
|
2016-06-06 16:17:58 +08:00
|
|
|
exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2017-01-30 18:01:38 +08:00
|
|
|
if (bo_va->bo) {
|
|
|
|
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
|
|
|
|
gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
|
|
|
|
adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
|
|
|
|
flags : 0;
|
|
|
|
} else {
|
|
|
|
flags = 0x0;
|
|
|
|
gtt_flags = ~0x0;
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2015-07-30 17:53:42 +08:00
|
|
|
spin_lock(&vm->status_lock);
|
|
|
|
if (!list_empty(&bo_va->vm_status))
|
|
|
|
list_splice_init(&bo_va->valids, &bo_va->invalids);
|
|
|
|
spin_unlock(&vm->status_lock);
|
|
|
|
|
|
|
|
list_for_each_entry(mapping, &bo_va->invalids, list) {
|
2016-06-06 16:17:58 +08:00
|
|
|
r = amdgpu_vm_bo_split_mapping(adev, exclusive,
|
|
|
|
gtt_flags, pages_addr, vm,
|
2016-08-16 23:38:37 +08:00
|
|
|
mapping, flags, nodes,
|
2016-03-30 16:50:25 +08:00
|
|
|
&bo_va->last_pt_update);
|
2015-04-21 04:55:21 +08:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-09-28 18:00:23 +08:00
|
|
|
if (trace_amdgpu_vm_bo_mapping_enabled()) {
|
|
|
|
list_for_each_entry(mapping, &bo_va->valids, list)
|
|
|
|
trace_amdgpu_vm_bo_mapping(mapping);
|
|
|
|
|
|
|
|
list_for_each_entry(mapping, &bo_va->invalids, list)
|
|
|
|
trace_amdgpu_vm_bo_mapping(mapping);
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
spin_lock(&vm->status_lock);
|
2015-08-14 13:36:41 +08:00
|
|
|
list_splice_init(&bo_va->invalids, &bo_va->valids);
|
2015-04-21 04:55:21 +08:00
|
|
|
list_del_init(&bo_va->vm_status);
|
2016-08-16 20:43:17 +08:00
|
|
|
if (clear)
|
2015-07-30 17:53:42 +08:00
|
|
|
list_add(&bo_va->vm_status, &vm->cleared);
|
2015-04-21 04:55:21 +08:00
|
|
|
spin_unlock(&vm->status_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-30 18:09:31 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_update_prt_state - update the global PRT state
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
bool enable;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
|
2017-02-14 23:02:52 +08:00
|
|
|
enable = !!atomic_read(&adev->vm_manager.num_prt_users);
|
2017-01-30 18:09:31 +08:00
|
|
|
adev->gart.gart_funcs->set_prt(adev, enable);
|
|
|
|
spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
|
|
|
|
}
|
|
|
|
|
2017-02-14 23:02:52 +08:00
|
|
|
/**
|
2017-03-13 17:13:36 +08:00
|
|
|
* amdgpu_vm_prt_get - add a PRT user
|
2017-02-14 23:02:52 +08:00
|
|
|
*/
|
|
|
|
static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
|
|
|
|
{
|
2017-03-13 17:13:36 +08:00
|
|
|
if (!adev->gart.gart_funcs->set_prt)
|
|
|
|
return;
|
|
|
|
|
2017-02-14 23:02:52 +08:00
|
|
|
if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
|
|
|
|
amdgpu_vm_update_prt_state(adev);
|
|
|
|
}
|
|
|
|
|
2017-02-14 22:47:03 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_prt_put - drop a PRT user
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
|
|
|
|
{
|
2017-02-14 23:02:52 +08:00
|
|
|
if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
|
2017-02-14 22:47:03 +08:00
|
|
|
amdgpu_vm_update_prt_state(adev);
|
|
|
|
}
|
|
|
|
|
2017-01-30 18:09:31 +08:00
|
|
|
/**
|
2017-02-14 23:02:52 +08:00
|
|
|
* amdgpu_vm_prt_cb - callback for updating the PRT status
|
2017-01-30 18:09:31 +08:00
|
|
|
*/
|
|
|
|
static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
|
|
|
|
{
|
|
|
|
struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
|
|
|
|
|
2017-02-14 22:47:03 +08:00
|
|
|
amdgpu_vm_prt_put(cb->adev);
|
2017-01-30 18:09:31 +08:00
|
|
|
kfree(cb);
|
|
|
|
}
|
|
|
|
|
2017-02-14 23:02:52 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_add_prt_cb - add callback for updating the PRT status
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
|
|
|
|
struct dma_fence *fence)
|
|
|
|
{
|
2017-03-13 17:13:36 +08:00
|
|
|
struct amdgpu_prt_cb *cb;
|
2017-02-14 23:02:52 +08:00
|
|
|
|
2017-03-13 17:13:36 +08:00
|
|
|
if (!adev->gart.gart_funcs->set_prt)
|
|
|
|
return;
|
|
|
|
|
|
|
|
cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
|
2017-02-14 23:02:52 +08:00
|
|
|
if (!cb) {
|
|
|
|
/* Last resort when we are OOM */
|
|
|
|
if (fence)
|
|
|
|
dma_fence_wait(fence, false);
|
|
|
|
|
2017-04-04 02:41:39 +08:00
|
|
|
amdgpu_vm_prt_put(adev);
|
2017-02-14 23:02:52 +08:00
|
|
|
} else {
|
|
|
|
cb->adev = adev;
|
|
|
|
if (!fence || dma_fence_add_callback(fence, &cb->cb,
|
|
|
|
amdgpu_vm_prt_cb))
|
|
|
|
amdgpu_vm_prt_cb(fence, &cb->cb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-30 18:09:31 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_free_mapping - free a mapping
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
* @mapping: mapping to be freed
|
|
|
|
* @fence: fence of the unmap operation
|
|
|
|
*
|
|
|
|
* Free a mapping and make sure we decrease the PRT usage count if applicable.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_bo_va_mapping *mapping,
|
|
|
|
struct dma_fence *fence)
|
|
|
|
{
|
2017-02-14 23:02:52 +08:00
|
|
|
if (mapping->flags & AMDGPU_PTE_PRT)
|
|
|
|
amdgpu_vm_add_prt_cb(adev, fence);
|
|
|
|
kfree(mapping);
|
|
|
|
}
|
2017-01-30 18:09:31 +08:00
|
|
|
|
2017-02-14 23:02:52 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_prt_fini - finish all prt mappings
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
*
|
|
|
|
* Register a cleanup callback to disable PRT support after VM dies.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
|
|
{
|
2016-10-12 20:46:26 +08:00
|
|
|
struct reservation_object *resv = vm->root.bo->tbo.resv;
|
2017-02-14 23:02:52 +08:00
|
|
|
struct dma_fence *excl, **shared;
|
|
|
|
unsigned i, shared_count;
|
|
|
|
int r;
|
2017-02-14 22:47:03 +08:00
|
|
|
|
2017-02-14 23:02:52 +08:00
|
|
|
r = reservation_object_get_fences_rcu(resv, &excl,
|
|
|
|
&shared_count, &shared);
|
|
|
|
if (r) {
|
|
|
|
/* Not enough memory to grab the fence list, as last resort
|
|
|
|
* block for all the fences to complete.
|
|
|
|
*/
|
|
|
|
reservation_object_wait_timeout_rcu(resv, true, false,
|
|
|
|
MAX_SCHEDULE_TIMEOUT);
|
|
|
|
return;
|
2017-01-30 18:09:31 +08:00
|
|
|
}
|
2017-02-14 23:02:52 +08:00
|
|
|
|
|
|
|
/* Add a callback for each fence in the reservation object */
|
|
|
|
amdgpu_vm_prt_get(adev);
|
|
|
|
amdgpu_vm_add_prt_cb(adev, excl);
|
|
|
|
|
|
|
|
for (i = 0; i < shared_count; ++i) {
|
|
|
|
amdgpu_vm_prt_get(adev);
|
|
|
|
amdgpu_vm_add_prt_cb(adev, shared[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(shared);
|
2017-01-30 18:09:31 +08:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_clear_freed - clear freed BOs in the PT
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
2017-03-24 02:36:31 +08:00
|
|
|
* @fence: optional resulting fence (unchanged if no work needed to be done
|
|
|
|
* or if an error occurred)
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* Make sure all freed BOs are cleared in the PT.
|
|
|
|
* Returns 0 for success.
|
|
|
|
*
|
|
|
|
* PTs have to be reserved and mutex must be locked!
|
|
|
|
*/
|
|
|
|
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
2017-03-24 02:36:31 +08:00
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct dma_fence **fence)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
2017-03-24 02:36:31 +08:00
|
|
|
struct dma_fence *f = NULL;
|
2015-04-21 04:55:21 +08:00
|
|
|
int r;
|
|
|
|
|
|
|
|
while (!list_empty(&vm->freed)) {
|
|
|
|
mapping = list_first_entry(&vm->freed,
|
|
|
|
struct amdgpu_bo_va_mapping, list);
|
|
|
|
list_del(&mapping->list);
|
2016-03-09 00:52:01 +08:00
|
|
|
|
2017-04-19 20:41:19 +08:00
|
|
|
r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
|
|
|
|
mapping->start, mapping->last,
|
|
|
|
0, 0, &f);
|
2017-03-24 02:36:31 +08:00
|
|
|
amdgpu_vm_free_mapping(adev, vm, mapping, f);
|
2017-01-30 18:09:31 +08:00
|
|
|
if (r) {
|
2017-03-24 02:36:31 +08:00
|
|
|
dma_fence_put(f);
|
2015-04-21 04:55:21 +08:00
|
|
|
return r;
|
2017-01-30 18:09:31 +08:00
|
|
|
}
|
2017-03-24 02:36:31 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-03-24 02:36:31 +08:00
|
|
|
if (fence && f) {
|
|
|
|
dma_fence_put(*fence);
|
|
|
|
*fence = f;
|
|
|
|
} else {
|
|
|
|
dma_fence_put(f);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
2017-03-24 02:36:31 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
*
|
|
|
|
* Make sure all invalidated BOs are cleared in the PT.
|
|
|
|
* Returns 0 for success.
|
|
|
|
*
|
|
|
|
* PTs have to be reserved and mutex must be locked!
|
|
|
|
*/
|
|
|
|
int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
|
2015-05-26 15:01:54 +08:00
|
|
|
struct amdgpu_vm *vm, struct amdgpu_sync *sync)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2015-05-26 15:01:54 +08:00
|
|
|
struct amdgpu_bo_va *bo_va = NULL;
|
2015-07-07 04:06:40 +08:00
|
|
|
int r = 0;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
spin_lock(&vm->status_lock);
|
|
|
|
while (!list_empty(&vm->invalidated)) {
|
|
|
|
bo_va = list_first_entry(&vm->invalidated,
|
|
|
|
struct amdgpu_bo_va, vm_status);
|
|
|
|
spin_unlock(&vm->status_lock);
|
2016-03-09 01:03:27 +08:00
|
|
|
|
2016-08-16 20:43:17 +08:00
|
|
|
r = amdgpu_vm_bo_update(adev, bo_va, true);
|
2015-04-21 04:55:21 +08:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
spin_lock(&vm->status_lock);
|
|
|
|
}
|
|
|
|
spin_unlock(&vm->status_lock);
|
|
|
|
|
2015-05-26 15:01:54 +08:00
|
|
|
if (bo_va)
|
2015-08-03 18:19:38 +08:00
|
|
|
r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
|
2015-07-07 04:06:40 +08:00
|
|
|
|
|
|
|
return r;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_add - add a bo to a specific vm
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
* @bo: amdgpu buffer object
|
|
|
|
*
|
2016-01-26 19:17:11 +08:00
|
|
|
* Add @bo into the requested vm.
|
2015-04-21 04:55:21 +08:00
|
|
|
* Add @bo to the list of bos associated with the vm
|
|
|
|
* Returns newly added bo_va or NULL for failure
|
|
|
|
*
|
|
|
|
* Object has to be reserved!
|
|
|
|
*/
|
|
|
|
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_bo *bo)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va *bo_va;
|
|
|
|
|
|
|
|
bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
|
|
|
|
if (bo_va == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
bo_va->vm = vm;
|
|
|
|
bo_va->bo = bo;
|
|
|
|
bo_va->ref_count = 1;
|
|
|
|
INIT_LIST_HEAD(&bo_va->bo_list);
|
2015-07-30 17:53:42 +08:00
|
|
|
INIT_LIST_HEAD(&bo_va->valids);
|
|
|
|
INIT_LIST_HEAD(&bo_va->invalids);
|
2015-04-21 04:55:21 +08:00
|
|
|
INIT_LIST_HEAD(&bo_va->vm_status);
|
2016-03-09 01:03:27 +08:00
|
|
|
|
2017-01-30 18:01:38 +08:00
|
|
|
if (bo)
|
|
|
|
list_add_tail(&bo_va->bo_list, &bo->va);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
return bo_va;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_map - map bo inside a vm
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: bo_va to store the address
|
|
|
|
* @saddr: where to map the BO
|
|
|
|
* @offset: requested offset in the BO
|
|
|
|
* @flags: attributes of pages (read/write/valid/etc.)
|
|
|
|
*
|
|
|
|
* Add a mapping of the BO at the specefied addr into the VM.
|
|
|
|
* Returns 0 for success, error for failure.
|
|
|
|
*
|
2015-11-13 14:18:38 +08:00
|
|
|
* Object has to be reserved and unreserved outside!
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
|
|
|
uint64_t saddr, uint64_t offset,
|
2017-01-18 21:49:43 +08:00
|
|
|
uint64_t size, uint64_t flags)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2017-03-30 20:03:59 +08:00
|
|
|
struct amdgpu_bo_va_mapping *mapping, *tmp;
|
2015-04-21 04:55:21 +08:00
|
|
|
struct amdgpu_vm *vm = bo_va->vm;
|
|
|
|
uint64_t eaddr;
|
|
|
|
|
2015-05-18 20:37:27 +08:00
|
|
|
/* validate the parameters */
|
|
|
|
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
|
2015-11-13 14:18:38 +08:00
|
|
|
size == 0 || size & AMDGPU_GPU_PAGE_MASK)
|
2015-05-18 20:37:27 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/* make sure object fit at this offset */
|
2015-11-24 06:43:48 +08:00
|
|
|
eaddr = saddr + size - 1;
|
2017-01-30 18:01:38 +08:00
|
|
|
if (saddr >= eaddr ||
|
|
|
|
(bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
|
2015-04-21 04:55:21 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
|
2017-03-30 20:03:59 +08:00
|
|
|
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
|
|
|
|
if (tmp) {
|
2015-04-21 04:55:21 +08:00
|
|
|
/* bo and tmp overlap, invalid addr */
|
|
|
|
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
|
2017-03-30 20:03:59 +08:00
|
|
|
"0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
|
|
|
|
tmp->start, tmp->last + 1);
|
2017-03-13 17:13:37 +08:00
|
|
|
return -EINVAL;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
|
2017-03-13 17:13:37 +08:00
|
|
|
if (!mapping)
|
|
|
|
return -ENOMEM;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&mapping->list);
|
2017-03-30 20:03:59 +08:00
|
|
|
mapping->start = saddr;
|
|
|
|
mapping->last = eaddr;
|
2015-04-21 04:55:21 +08:00
|
|
|
mapping->offset = offset;
|
|
|
|
mapping->flags = flags;
|
|
|
|
|
2015-07-30 17:53:42 +08:00
|
|
|
list_add(&mapping->list, &bo_va->invalids);
|
2017-03-30 20:03:59 +08:00
|
|
|
amdgpu_vm_it_insert(mapping, &vm->va);
|
2017-03-13 17:13:39 +08:00
|
|
|
|
|
|
|
if (flags & AMDGPU_PTE_PRT)
|
|
|
|
amdgpu_vm_prt_get(adev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: bo_va to store the address
|
|
|
|
* @saddr: where to map the BO
|
|
|
|
* @offset: requested offset in the BO
|
|
|
|
* @flags: attributes of pages (read/write/valid/etc.)
|
|
|
|
*
|
|
|
|
* Add a mapping of the BO at the specefied addr into the VM. Replace existing
|
|
|
|
* mappings as we do so.
|
|
|
|
* Returns 0 for success, error for failure.
|
|
|
|
*
|
|
|
|
* Object has to be reserved and unreserved outside!
|
|
|
|
*/
|
|
|
|
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
|
|
|
uint64_t saddr, uint64_t offset,
|
|
|
|
uint64_t size, uint64_t flags)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
|
|
|
struct amdgpu_vm *vm = bo_va->vm;
|
|
|
|
uint64_t eaddr;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
/* validate the parameters */
|
|
|
|
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
|
|
|
|
size == 0 || size & AMDGPU_GPU_PAGE_MASK)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* make sure object fit at this offset */
|
|
|
|
eaddr = saddr + size - 1;
|
|
|
|
if (saddr >= eaddr ||
|
|
|
|
(bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Allocate all the needed memory */
|
|
|
|
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
|
|
|
|
if (!mapping)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size);
|
|
|
|
if (r) {
|
|
|
|
kfree(mapping);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
|
2017-03-30 20:03:59 +08:00
|
|
|
mapping->start = saddr;
|
|
|
|
mapping->last = eaddr;
|
2017-03-13 17:13:39 +08:00
|
|
|
mapping->offset = offset;
|
|
|
|
mapping->flags = flags;
|
|
|
|
|
|
|
|
list_add(&mapping->list, &bo_va->invalids);
|
2017-03-30 20:03:59 +08:00
|
|
|
amdgpu_vm_it_insert(mapping, &vm->va);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-03-13 17:13:36 +08:00
|
|
|
if (flags & AMDGPU_PTE_PRT)
|
|
|
|
amdgpu_vm_prt_get(adev);
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_unmap - remove bo mapping from vm
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: bo_va to remove the address from
|
|
|
|
* @saddr: where to the BO is mapped
|
|
|
|
*
|
|
|
|
* Remove a mapping of the BO at the specefied addr from the VM.
|
|
|
|
* Returns 0 for success, error for failure.
|
|
|
|
*
|
2015-11-13 14:18:38 +08:00
|
|
|
* Object has to be reserved and unreserved outside!
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
|
|
|
uint64_t saddr)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
|
|
|
struct amdgpu_vm *vm = bo_va->vm;
|
2015-07-30 17:53:42 +08:00
|
|
|
bool valid = true;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2015-06-06 02:56:17 +08:00
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
2016-03-09 01:03:27 +08:00
|
|
|
|
2015-07-30 17:53:42 +08:00
|
|
|
list_for_each_entry(mapping, &bo_va->valids, list) {
|
2017-03-30 20:03:59 +08:00
|
|
|
if (mapping->start == saddr)
|
2015-04-21 04:55:21 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-07-30 17:53:42 +08:00
|
|
|
if (&mapping->list == &bo_va->valids) {
|
|
|
|
valid = false;
|
|
|
|
|
|
|
|
list_for_each_entry(mapping, &bo_va->invalids, list) {
|
2017-03-30 20:03:59 +08:00
|
|
|
if (mapping->start == saddr)
|
2015-07-30 17:53:42 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-03-09 01:03:27 +08:00
|
|
|
if (&mapping->list == &bo_va->invalids)
|
2015-07-30 17:53:42 +08:00
|
|
|
return -ENOENT;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
2016-03-09 01:03:27 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
list_del(&mapping->list);
|
2017-03-30 20:03:59 +08:00
|
|
|
amdgpu_vm_it_remove(mapping, &vm->va);
|
2015-06-09 22:58:33 +08:00
|
|
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-03-09 00:52:01 +08:00
|
|
|
if (valid)
|
2015-04-21 04:55:21 +08:00
|
|
|
list_add(&mapping->list, &vm->freed);
|
2016-03-09 00:52:01 +08:00
|
|
|
else
|
2017-01-30 18:09:31 +08:00
|
|
|
amdgpu_vm_free_mapping(adev, vm, mapping,
|
|
|
|
bo_va->last_pt_update);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-13 17:13:38 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: VM structure to use
|
|
|
|
* @saddr: start of the range
|
|
|
|
* @size: size of the range
|
|
|
|
*
|
|
|
|
* Remove all mappings in a range, split them as appropriate.
|
|
|
|
* Returns 0 for success, error for failure.
|
|
|
|
*/
|
|
|
|
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
uint64_t saddr, uint64_t size)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
|
|
|
|
LIST_HEAD(removed);
|
|
|
|
uint64_t eaddr;
|
|
|
|
|
|
|
|
eaddr = saddr + size - 1;
|
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
|
|
|
|
/* Allocate all the needed memory */
|
|
|
|
before = kzalloc(sizeof(*before), GFP_KERNEL);
|
|
|
|
if (!before)
|
|
|
|
return -ENOMEM;
|
2017-03-16 16:09:24 +08:00
|
|
|
INIT_LIST_HEAD(&before->list);
|
2017-03-13 17:13:38 +08:00
|
|
|
|
|
|
|
after = kzalloc(sizeof(*after), GFP_KERNEL);
|
|
|
|
if (!after) {
|
|
|
|
kfree(before);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2017-03-16 16:09:24 +08:00
|
|
|
INIT_LIST_HEAD(&after->list);
|
2017-03-13 17:13:38 +08:00
|
|
|
|
|
|
|
/* Now gather all removed mappings */
|
2017-03-30 20:03:59 +08:00
|
|
|
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
|
|
|
|
while (tmp) {
|
2017-03-13 17:13:38 +08:00
|
|
|
/* Remember mapping split at the start */
|
2017-03-30 20:03:59 +08:00
|
|
|
if (tmp->start < saddr) {
|
|
|
|
before->start = tmp->start;
|
|
|
|
before->last = saddr - 1;
|
2017-03-13 17:13:38 +08:00
|
|
|
before->offset = tmp->offset;
|
|
|
|
before->flags = tmp->flags;
|
|
|
|
list_add(&before->list, &tmp->list);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remember mapping split at the end */
|
2017-03-30 20:03:59 +08:00
|
|
|
if (tmp->last > eaddr) {
|
|
|
|
after->start = eaddr + 1;
|
|
|
|
after->last = tmp->last;
|
2017-03-13 17:13:38 +08:00
|
|
|
after->offset = tmp->offset;
|
2017-03-30 20:03:59 +08:00
|
|
|
after->offset += after->start - tmp->start;
|
2017-03-13 17:13:38 +08:00
|
|
|
after->flags = tmp->flags;
|
|
|
|
list_add(&after->list, &tmp->list);
|
|
|
|
}
|
|
|
|
|
|
|
|
list_del(&tmp->list);
|
|
|
|
list_add(&tmp->list, &removed);
|
2017-03-30 20:03:59 +08:00
|
|
|
|
|
|
|
tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
|
2017-03-13 17:13:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* And free them up */
|
|
|
|
list_for_each_entry_safe(tmp, next, &removed, list) {
|
2017-03-30 20:03:59 +08:00
|
|
|
amdgpu_vm_it_remove(tmp, &vm->va);
|
2017-03-13 17:13:38 +08:00
|
|
|
list_del(&tmp->list);
|
|
|
|
|
2017-03-30 20:03:59 +08:00
|
|
|
if (tmp->start < saddr)
|
|
|
|
tmp->start = saddr;
|
|
|
|
if (tmp->last > eaddr)
|
|
|
|
tmp->last = eaddr;
|
2017-03-13 17:13:38 +08:00
|
|
|
|
|
|
|
list_add(&tmp->list, &vm->freed);
|
|
|
|
trace_amdgpu_vm_bo_unmap(NULL, tmp);
|
|
|
|
}
|
|
|
|
|
2017-03-16 16:09:24 +08:00
|
|
|
/* Insert partial mapping before the range */
|
|
|
|
if (!list_empty(&before->list)) {
|
2017-03-30 20:03:59 +08:00
|
|
|
amdgpu_vm_it_insert(before, &vm->va);
|
2017-03-13 17:13:38 +08:00
|
|
|
if (before->flags & AMDGPU_PTE_PRT)
|
|
|
|
amdgpu_vm_prt_get(adev);
|
|
|
|
} else {
|
|
|
|
kfree(before);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Insert partial mapping after the range */
|
2017-03-16 16:09:24 +08:00
|
|
|
if (!list_empty(&after->list)) {
|
2017-03-30 20:03:59 +08:00
|
|
|
amdgpu_vm_it_insert(after, &vm->va);
|
2017-03-13 17:13:38 +08:00
|
|
|
if (after->flags & AMDGPU_PTE_PRT)
|
|
|
|
amdgpu_vm_prt_get(adev);
|
|
|
|
} else {
|
|
|
|
kfree(after);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_rmv - remove a bo to a specific vm
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: requested bo_va
|
|
|
|
*
|
2016-01-26 19:17:11 +08:00
|
|
|
* Remove @bo_va->bo from the requested vm.
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* Object have to be reserved!
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping, *next;
|
|
|
|
struct amdgpu_vm *vm = bo_va->vm;
|
|
|
|
|
|
|
|
list_del(&bo_va->bo_list);
|
|
|
|
|
|
|
|
spin_lock(&vm->status_lock);
|
|
|
|
list_del(&bo_va->vm_status);
|
|
|
|
spin_unlock(&vm->status_lock);
|
|
|
|
|
2015-07-30 17:53:42 +08:00
|
|
|
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
|
2015-04-21 04:55:21 +08:00
|
|
|
list_del(&mapping->list);
|
2017-03-30 20:03:59 +08:00
|
|
|
amdgpu_vm_it_remove(mapping, &vm->va);
|
2015-06-09 22:58:33 +08:00
|
|
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
2015-07-30 17:53:42 +08:00
|
|
|
list_add(&mapping->list, &vm->freed);
|
|
|
|
}
|
|
|
|
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
|
|
|
|
list_del(&mapping->list);
|
2017-03-30 20:03:59 +08:00
|
|
|
amdgpu_vm_it_remove(mapping, &vm->va);
|
2017-01-30 18:09:31 +08:00
|
|
|
amdgpu_vm_free_mapping(adev, vm, mapping,
|
|
|
|
bo_va->last_pt_update);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
2016-03-09 01:03:27 +08:00
|
|
|
|
2016-10-25 20:00:45 +08:00
|
|
|
dma_fence_put(bo_va->last_pt_update);
|
2015-04-21 04:55:21 +08:00
|
|
|
kfree(bo_va);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_invalidate - mark the bo as invalid
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
* @bo: amdgpu buffer object
|
|
|
|
*
|
2016-01-26 19:17:11 +08:00
|
|
|
* Mark @bo as invalid.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
|
|
|
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo *bo)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va *bo_va;
|
|
|
|
|
|
|
|
list_for_each_entry(bo_va, &bo->va, bo_list) {
|
2015-07-30 17:53:42 +08:00
|
|
|
spin_lock(&bo_va->vm->status_lock);
|
|
|
|
if (list_empty(&bo_va->vm_status))
|
2015-04-21 04:55:21 +08:00
|
|
|
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
|
2015-07-30 17:53:42 +08:00
|
|
|
spin_unlock(&bo_va->vm->status_lock);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-05 13:54:56 +08:00
|
|
|
static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
|
|
|
|
{
|
|
|
|
/* Total bits covered by PD + PTs */
|
|
|
|
unsigned bits = ilog2(vm_size) + 18;
|
|
|
|
|
|
|
|
/* Make sure the PD is 4K in size up to 8GB address space.
|
|
|
|
Above that split equal between PD and PTs */
|
|
|
|
if (vm_size <= 8)
|
|
|
|
return (bits - 9);
|
|
|
|
else
|
|
|
|
return ((bits + 3) / 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_adjust_size - adjust vm size and block size
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm_size: the default vm size if it's set auto
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
|
|
|
|
{
|
|
|
|
/* adjust vm size firstly */
|
|
|
|
if (amdgpu_vm_size == -1)
|
|
|
|
adev->vm_manager.vm_size = vm_size;
|
|
|
|
else
|
|
|
|
adev->vm_manager.vm_size = amdgpu_vm_size;
|
|
|
|
|
|
|
|
/* block size depends on vm size */
|
|
|
|
if (amdgpu_vm_block_size == -1)
|
|
|
|
adev->vm_manager.block_size =
|
|
|
|
amdgpu_vm_get_block_size(adev->vm_manager.vm_size);
|
|
|
|
else
|
|
|
|
adev->vm_manager.block_size = amdgpu_vm_block_size;
|
|
|
|
|
|
|
|
DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
|
|
|
|
adev->vm_manager.vm_size, adev->vm_manager.block_size);
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_init - initialize a vm instance
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
*
|
2016-01-26 19:17:11 +08:00
|
|
|
* Init @vm fields.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
|
|
{
|
|
|
|
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
|
2017-03-29 16:08:32 +08:00
|
|
|
AMDGPU_VM_PTE_COUNT(adev) * 8);
|
2016-02-09 00:37:38 +08:00
|
|
|
unsigned ring_instance;
|
|
|
|
struct amdgpu_ring *ring;
|
2016-02-01 19:53:58 +08:00
|
|
|
struct amd_sched_rq *rq;
|
2017-04-20 16:17:34 +08:00
|
|
|
int r, i;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
vm->va = RB_ROOT;
|
2016-04-25 10:19:13 +08:00
|
|
|
vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
|
2017-04-20 16:17:34 +08:00
|
|
|
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
|
|
|
|
vm->reserved_vmid[i] = NULL;
|
2015-04-21 04:55:21 +08:00
|
|
|
spin_lock_init(&vm->status_lock);
|
|
|
|
INIT_LIST_HEAD(&vm->invalidated);
|
2015-07-30 17:53:42 +08:00
|
|
|
INIT_LIST_HEAD(&vm->cleared);
|
2015-04-21 04:55:21 +08:00
|
|
|
INIT_LIST_HEAD(&vm->freed);
|
2016-03-09 00:58:35 +08:00
|
|
|
|
2016-02-01 19:53:58 +08:00
|
|
|
/* create scheduler entity for page table updates */
|
2016-02-09 00:37:38 +08:00
|
|
|
|
|
|
|
ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
|
|
|
|
ring_instance %= adev->vm_manager.vm_pte_num_rings;
|
|
|
|
ring = adev->vm_manager.vm_pte_rings[ring_instance];
|
2016-02-01 19:53:58 +08:00
|
|
|
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
|
|
|
|
r = amd_sched_entity_init(&ring->sched, &vm->entity,
|
|
|
|
rq, amdgpu_sched_jobs);
|
|
|
|
if (r)
|
2016-10-28 02:04:38 +08:00
|
|
|
return r;
|
2016-02-01 19:53:58 +08:00
|
|
|
|
2016-10-12 19:20:52 +08:00
|
|
|
vm->last_dir_update = NULL;
|
2015-08-15 02:08:40 +08:00
|
|
|
|
2016-10-28 02:04:38 +08:00
|
|
|
r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
|
2015-08-27 12:14:16 +08:00
|
|
|
AMDGPU_GEM_DOMAIN_VRAM,
|
2016-08-04 13:59:32 +08:00
|
|
|
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
2016-08-15 23:00:22 +08:00
|
|
|
AMDGPU_GEM_CREATE_SHADOW |
|
2016-11-17 22:40:02 +08:00
|
|
|
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
|
|
|
AMDGPU_GEM_CREATE_VRAM_CLEARED,
|
2016-10-12 20:46:26 +08:00
|
|
|
NULL, NULL, &vm->root.bo);
|
2015-04-21 04:55:21 +08:00
|
|
|
if (r)
|
2016-02-01 19:53:58 +08:00
|
|
|
goto error_free_sched_entity;
|
|
|
|
|
2016-10-12 20:46:26 +08:00
|
|
|
r = amdgpu_bo_reserve(vm->root.bo, false);
|
2016-02-01 19:53:58 +08:00
|
|
|
if (r)
|
2016-10-12 20:46:26 +08:00
|
|
|
goto error_free_root;
|
2016-02-01 19:53:58 +08:00
|
|
|
|
2016-06-21 22:28:15 +08:00
|
|
|
vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
|
2016-10-12 20:46:26 +08:00
|
|
|
amdgpu_bo_unreserve(vm->root.bo);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
return 0;
|
2016-02-01 19:53:58 +08:00
|
|
|
|
2016-10-12 20:46:26 +08:00
|
|
|
error_free_root:
|
|
|
|
amdgpu_bo_unref(&vm->root.bo->shadow);
|
|
|
|
amdgpu_bo_unref(&vm->root.bo);
|
|
|
|
vm->root.bo = NULL;
|
2016-02-01 19:53:58 +08:00
|
|
|
|
|
|
|
error_free_sched_entity:
|
|
|
|
amd_sched_entity_fini(&ring->sched, &vm->entity);
|
|
|
|
|
|
|
|
return r;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2016-10-28 02:04:38 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_free_levels - free PD/PT levels
|
|
|
|
*
|
|
|
|
* @level: PD/PT starting level to free
|
|
|
|
*
|
|
|
|
* Free the page directory or page table level and all sub levels.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
if (level->bo) {
|
|
|
|
amdgpu_bo_unref(&level->bo->shadow);
|
|
|
|
amdgpu_bo_unref(&level->bo);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (level->entries)
|
|
|
|
for (i = 0; i <= level->last_entry_used; i++)
|
|
|
|
amdgpu_vm_free_levels(&level->entries[i]);
|
|
|
|
|
|
|
|
drm_free_large(level->entries);
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_fini - tear down a vm instance
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
*
|
2016-01-26 19:17:11 +08:00
|
|
|
* Tear down @vm.
|
2015-04-21 04:55:21 +08:00
|
|
|
* Unbind the VM and remove all bos from the vm bo list
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping, *tmp;
|
2017-03-13 17:13:36 +08:00
|
|
|
bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
|
2017-04-20 16:17:34 +08:00
|
|
|
int i;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-02-09 00:37:38 +08:00
|
|
|
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
|
2016-02-01 19:53:58 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
if (!RB_EMPTY_ROOT(&vm->va)) {
|
|
|
|
dev_err(adev->dev, "still active bo inside vm\n");
|
|
|
|
}
|
2017-03-30 20:03:59 +08:00
|
|
|
rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
|
2015-04-21 04:55:21 +08:00
|
|
|
list_del(&mapping->list);
|
2017-03-30 20:03:59 +08:00
|
|
|
amdgpu_vm_it_remove(mapping, &vm->va);
|
2015-04-21 04:55:21 +08:00
|
|
|
kfree(mapping);
|
|
|
|
}
|
|
|
|
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
|
2017-03-13 17:13:36 +08:00
|
|
|
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
|
2017-02-14 23:02:52 +08:00
|
|
|
amdgpu_vm_prt_fini(adev, vm);
|
2017-03-13 17:13:36 +08:00
|
|
|
prt_fini_needed = false;
|
2017-02-14 23:02:52 +08:00
|
|
|
}
|
2017-01-30 18:09:31 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
list_del(&mapping->list);
|
2017-02-14 23:02:52 +08:00
|
|
|
amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2016-10-28 02:04:38 +08:00
|
|
|
amdgpu_vm_free_levels(&vm->root);
|
2016-10-12 19:20:52 +08:00
|
|
|
dma_fence_put(vm->last_dir_update);
|
2017-04-20 16:18:48 +08:00
|
|
|
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
|
|
|
|
amdgpu_vm_free_reserved_vmid(adev, vm, i);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
2015-11-16 03:52:06 +08:00
|
|
|
|
2016-01-21 17:19:11 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_manager_init - init the VM manager
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Initialize the VM manager structures
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
|
|
|
{
|
2017-04-06 23:52:39 +08:00
|
|
|
unsigned i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
|
|
|
|
struct amdgpu_vm_id_manager *id_mgr =
|
|
|
|
&adev->vm_manager.id_mgr[i];
|
2016-01-21 17:19:11 +08:00
|
|
|
|
2017-04-06 23:52:39 +08:00
|
|
|
mutex_init(&id_mgr->lock);
|
|
|
|
INIT_LIST_HEAD(&id_mgr->ids_lru);
|
2017-04-21 15:51:04 +08:00
|
|
|
atomic_set(&id_mgr->reserved_vmid_num, 0);
|
2016-01-21 17:19:11 +08:00
|
|
|
|
2017-04-06 23:52:39 +08:00
|
|
|
/* skip over VMID 0, since it is the system VM */
|
|
|
|
for (j = 1; j < id_mgr->num_ids; ++j) {
|
|
|
|
amdgpu_vm_reset_id(adev, i, j);
|
|
|
|
amdgpu_sync_create(&id_mgr->ids[i].active);
|
|
|
|
list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
|
|
|
|
}
|
2016-03-01 22:09:25 +08:00
|
|
|
}
|
2016-02-09 00:37:38 +08:00
|
|
|
|
2016-10-25 20:00:45 +08:00
|
|
|
adev->vm_manager.fence_context =
|
|
|
|
dma_fence_context_alloc(AMDGPU_MAX_RINGS);
|
2016-06-01 16:47:36 +08:00
|
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
|
|
|
adev->vm_manager.seqno[i] = 0;
|
|
|
|
|
2016-02-09 00:37:38 +08:00
|
|
|
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
|
2016-05-04 16:34:03 +08:00
|
|
|
atomic64_set(&adev->vm_manager.client_counter, 0);
|
2017-01-30 18:09:31 +08:00
|
|
|
spin_lock_init(&adev->vm_manager.prt_lock);
|
2017-02-14 23:02:52 +08:00
|
|
|
atomic_set(&adev->vm_manager.num_prt_users, 0);
|
2016-01-21 17:19:11 +08:00
|
|
|
}
|
|
|
|
|
2015-11-16 03:52:06 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_manager_fini - cleanup VM manager
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Cleanup the VM manager and free resources.
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
|
|
|
|
{
|
2017-04-06 23:52:39 +08:00
|
|
|
unsigned i, j;
|
2015-11-16 03:52:06 +08:00
|
|
|
|
2017-04-06 23:52:39 +08:00
|
|
|
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
|
|
|
|
struct amdgpu_vm_id_manager *id_mgr =
|
|
|
|
&adev->vm_manager.id_mgr[i];
|
2016-03-08 22:40:11 +08:00
|
|
|
|
2017-04-06 23:52:39 +08:00
|
|
|
mutex_destroy(&id_mgr->lock);
|
|
|
|
for (j = 0; j < AMDGPU_NUM_VM; ++j) {
|
|
|
|
struct amdgpu_vm_id *id = &id_mgr->ids[j];
|
|
|
|
|
|
|
|
amdgpu_sync_free(&id->active);
|
|
|
|
dma_fence_put(id->flushed_updates);
|
|
|
|
dma_fence_put(id->last_flush);
|
|
|
|
}
|
2016-03-08 22:40:11 +08:00
|
|
|
}
|
2015-11-16 03:52:06 +08:00
|
|
|
}
|
2017-04-24 11:09:04 +08:00
|
|
|
|
|
|
|
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|
|
|
{
|
|
|
|
union drm_amdgpu_vm *args = data;
|
2017-04-20 16:18:48 +08:00
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
|
|
|
int r;
|
2017-04-24 11:09:04 +08:00
|
|
|
|
|
|
|
switch (args->in.op) {
|
|
|
|
case AMDGPU_VM_OP_RESERVE_VMID:
|
2017-04-20 16:18:48 +08:00
|
|
|
/* current, we only have requirement to reserve vmid from gfxhub */
|
|
|
|
r = amdgpu_vm_alloc_reserved_vmid(adev, &fpriv->vm,
|
|
|
|
AMDGPU_GFXHUB);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
break;
|
2017-04-24 11:09:04 +08:00
|
|
|
case AMDGPU_VM_OP_UNRESERVE_VMID:
|
2017-04-20 16:18:48 +08:00
|
|
|
amdgpu_vm_free_reserved_vmid(adev, &fpriv->vm, AMDGPU_GFXHUB);
|
2017-04-24 11:09:04 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|