2009-12-11 17:24:15 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2007 Dave Airlied
|
|
|
|
* All Rights Reserved.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Authors: Dave Airlied <airlied@linux.ie>
|
|
|
|
* Ben Skeggs <darktama@iinet.net.au>
|
|
|
|
* Jeremy Kolb <jkolb@brandeis.edu>
|
|
|
|
*/
|
|
|
|
|
2014-08-10 02:10:23 +08:00
|
|
|
#include <linux/dma-mapping.h>
|
2009-12-11 17:24:15 +08:00
|
|
|
|
2016-05-20 07:22:55 +08:00
|
|
|
#include "nouveau_drv.h"
|
2020-06-22 13:37:19 +08:00
|
|
|
#include "nouveau_chan.h"
|
2012-04-30 11:30:00 +08:00
|
|
|
#include "nouveau_fence.h"
|
2009-12-11 17:24:15 +08:00
|
|
|
|
2012-07-20 06:17:34 +08:00
|
|
|
#include "nouveau_bo.h"
|
|
|
|
#include "nouveau_ttm.h"
|
|
|
|
#include "nouveau_gem.h"
|
2017-11-01 01:56:19 +08:00
|
|
|
#include "nouveau_mem.h"
|
2017-11-01 01:56:19 +08:00
|
|
|
#include "nouveau_vmm.h"
|
2009-12-27 04:46:36 +08:00
|
|
|
|
2017-11-01 01:56:20 +08:00
|
|
|
#include <nvif/class.h>
|
|
|
|
#include <nvif/if500b.h>
|
|
|
|
#include <nvif/if900b.h>
|
|
|
|
|
2020-10-01 20:51:40 +08:00
|
|
|
static int nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
|
2020-09-17 11:48:59 +08:00
|
|
|
struct ttm_resource *reg);
|
2020-10-01 20:51:40 +08:00
|
|
|
static void nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
|
2020-09-17 11:48:59 +08:00
|
|
|
|
2012-07-19 15:54:21 +08:00
|
|
|
/*
|
|
|
|
* NV10-NV40 tiling helpers
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
2012-07-20 06:17:34 +08:00
|
|
|
nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
|
|
|
|
u32 addr, u32 size, u32 pitch, u32 flags)
|
2012-07-19 15:54:21 +08:00
|
|
|
{
|
2012-07-31 14:16:21 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
2012-07-20 06:17:34 +08:00
|
|
|
int i = reg - drm->tile.reg;
|
2017-11-01 01:56:19 +08:00
|
|
|
struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
|
2015-08-20 12:54:06 +08:00
|
|
|
struct nvkm_fb_tile *tile = &fb->tile.region[i];
|
2012-07-19 15:54:21 +08:00
|
|
|
|
2012-07-20 06:17:34 +08:00
|
|
|
nouveau_fence_unref(®->fence);
|
2012-07-19 15:54:21 +08:00
|
|
|
|
|
|
|
if (tile->pitch)
|
2015-08-20 12:54:20 +08:00
|
|
|
nvkm_fb_tile_fini(fb, i, tile);
|
2012-07-19 15:54:21 +08:00
|
|
|
|
|
|
|
if (pitch)
|
2015-08-20 12:54:20 +08:00
|
|
|
nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
|
2012-07-19 15:54:21 +08:00
|
|
|
|
2015-08-20 12:54:20 +08:00
|
|
|
nvkm_fb_tile_prog(fb, i, tile);
|
2012-07-19 15:54:21 +08:00
|
|
|
}
|
|
|
|
|
2012-07-20 06:17:34 +08:00
|
|
|
static struct nouveau_drm_tile *
|
2012-07-19 15:54:21 +08:00
|
|
|
nv10_bo_get_tile_region(struct drm_device *dev, int i)
|
|
|
|
{
|
2012-07-31 14:16:21 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm_tile *tile = &drm->tile.reg[i];
|
2012-07-19 15:54:21 +08:00
|
|
|
|
2012-07-20 06:17:34 +08:00
|
|
|
spin_lock(&drm->tile.lock);
|
2012-07-19 15:54:21 +08:00
|
|
|
|
|
|
|
if (!tile->used &&
|
|
|
|
(!tile->fence || nouveau_fence_done(tile->fence)))
|
|
|
|
tile->used = true;
|
|
|
|
else
|
|
|
|
tile = NULL;
|
|
|
|
|
2012-07-20 06:17:34 +08:00
|
|
|
spin_unlock(&drm->tile.lock);
|
2012-07-19 15:54:21 +08:00
|
|
|
return tile;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2012-07-20 06:17:34 +08:00
|
|
|
nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence *fence)
|
2012-07-19 15:54:21 +08:00
|
|
|
{
|
2012-07-31 14:16:21 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
2012-07-19 15:54:21 +08:00
|
|
|
|
|
|
|
if (tile) {
|
2012-07-20 06:17:34 +08:00
|
|
|
spin_lock(&drm->tile.lock);
|
2016-10-25 20:00:45 +08:00
|
|
|
tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
|
2012-07-19 15:54:21 +08:00
|
|
|
tile->used = false;
|
2012-07-20 06:17:34 +08:00
|
|
|
spin_unlock(&drm->tile.lock);
|
2012-07-19 15:54:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-20 06:17:34 +08:00
|
|
|
static struct nouveau_drm_tile *
|
|
|
|
nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
|
2017-11-01 01:56:19 +08:00
|
|
|
u32 size, u32 pitch, u32 zeta)
|
2012-07-19 15:54:21 +08:00
|
|
|
{
|
2012-07-31 14:16:21 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
2016-05-18 11:57:42 +08:00
|
|
|
struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm_tile *tile, *found = NULL;
|
2012-07-19 15:54:21 +08:00
|
|
|
int i;
|
|
|
|
|
2015-08-20 12:54:06 +08:00
|
|
|
for (i = 0; i < fb->tile.regions; i++) {
|
2012-07-19 15:54:21 +08:00
|
|
|
tile = nv10_bo_get_tile_region(dev, i);
|
|
|
|
|
|
|
|
if (pitch && !found) {
|
|
|
|
found = tile;
|
|
|
|
continue;
|
|
|
|
|
2015-08-20 12:54:06 +08:00
|
|
|
} else if (tile && fb->tile.region[i].pitch) {
|
2012-07-19 15:54:21 +08:00
|
|
|
/* Kill an unused tile region. */
|
|
|
|
nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
nv10_bo_put_tile_region(dev, tile, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (found)
|
2017-11-01 01:56:19 +08:00
|
|
|
nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
|
2012-07-19 15:54:21 +08:00
|
|
|
return found;
|
|
|
|
}
|
|
|
|
|
2009-12-11 17:24:15 +08:00
|
|
|
static void
|
|
|
|
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
|
|
|
|
{
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
|
|
|
struct drm_device *dev = drm->dev;
|
2009-12-11 17:24:15 +08:00
|
|
|
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
|
|
|
|
2020-09-21 21:37:12 +08:00
|
|
|
WARN_ON(nvbo->bo.pin_count > 0);
|
2020-08-21 22:06:50 +08:00
|
|
|
nouveau_bo_del_io_reserve_lru(bo);
|
2012-07-19 15:54:21 +08:00
|
|
|
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
|
2019-09-16 22:19:25 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If nouveau_bo_new() allocated this buffer, the GEM object was never
|
|
|
|
* initialized, so don't attempt to release it.
|
|
|
|
*/
|
|
|
|
if (bo->base.dev)
|
|
|
|
drm_gem_object_release(&bo->base);
|
2021-06-10 01:25:56 +08:00
|
|
|
else
|
|
|
|
dma_resv_fini(&bo->base._resv);
|
2019-09-16 22:19:25 +08:00
|
|
|
|
2009-12-11 17:24:15 +08:00
|
|
|
kfree(nvbo);
|
|
|
|
}
|
|
|
|
|
2016-05-23 10:34:49 +08:00
|
|
|
static inline u64
|
|
|
|
roundup_64(u64 x, u32 y)
|
|
|
|
{
|
|
|
|
x += y - 1;
|
|
|
|
do_div(x, y);
|
|
|
|
return x * y;
|
|
|
|
}
|
|
|
|
|
2009-12-11 23:51:09 +08:00
|
|
|
static void
|
2020-09-08 20:39:36 +08:00
|
|
|
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
|
2009-12-11 23:51:09 +08:00
|
|
|
{
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
2016-05-18 11:57:42 +08:00
|
|
|
struct nvif_device *device = &drm->client.device;
|
2009-12-11 23:51:09 +08:00
|
|
|
|
2014-08-10 02:10:22 +08:00
|
|
|
if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
|
2017-11-01 01:56:19 +08:00
|
|
|
if (nvbo->mode) {
|
2014-08-10 02:10:22 +08:00
|
|
|
if (device->info.chipset >= 0x40) {
|
2009-12-11 23:51:09 +08:00
|
|
|
*align = 65536;
|
2017-11-01 01:56:19 +08:00
|
|
|
*size = roundup_64(*size, 64 * nvbo->mode);
|
2009-12-11 23:51:09 +08:00
|
|
|
|
2014-08-10 02:10:22 +08:00
|
|
|
} else if (device->info.chipset >= 0x30) {
|
2009-12-11 23:51:09 +08:00
|
|
|
*align = 32768;
|
2017-11-01 01:56:19 +08:00
|
|
|
*size = roundup_64(*size, 64 * nvbo->mode);
|
2009-12-11 23:51:09 +08:00
|
|
|
|
2014-08-10 02:10:22 +08:00
|
|
|
} else if (device->info.chipset >= 0x20) {
|
2009-12-11 23:51:09 +08:00
|
|
|
*align = 16384;
|
2017-11-01 01:56:19 +08:00
|
|
|
*size = roundup_64(*size, 64 * nvbo->mode);
|
2009-12-11 23:51:09 +08:00
|
|
|
|
2014-08-10 02:10:22 +08:00
|
|
|
} else if (device->info.chipset >= 0x10) {
|
2009-12-11 23:51:09 +08:00
|
|
|
*align = 16384;
|
2017-11-01 01:56:19 +08:00
|
|
|
*size = roundup_64(*size, 32 * nvbo->mode);
|
2009-12-11 23:51:09 +08:00
|
|
|
}
|
|
|
|
}
|
2010-11-12 13:12:51 +08:00
|
|
|
} else {
|
2017-11-01 01:56:19 +08:00
|
|
|
*size = roundup_64(*size, (1 << nvbo->page));
|
|
|
|
*align = max((1 << nvbo->page), *align);
|
2009-12-11 23:51:09 +08:00
|
|
|
}
|
|
|
|
|
2016-05-23 10:34:49 +08:00
|
|
|
*size = roundup_64(*size, PAGE_SIZE);
|
2009-12-11 23:51:09 +08:00
|
|
|
}
|
|
|
|
|
2019-08-14 17:00:48 +08:00
|
|
|
struct nouveau_bo *
|
2020-09-08 20:39:36 +08:00
|
|
|
nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
|
2019-09-16 22:19:23 +08:00
|
|
|
u32 tile_mode, u32 tile_flags)
|
2009-12-11 17:24:15 +08:00
|
|
|
{
|
2017-11-01 01:56:19 +08:00
|
|
|
struct nouveau_drm *drm = cli->drm;
|
2009-12-11 17:24:15 +08:00
|
|
|
struct nouveau_bo *nvbo;
|
2017-11-01 01:56:19 +08:00
|
|
|
struct nvif_mmu *mmu = &cli->mmu;
|
2019-02-19 15:21:48 +08:00
|
|
|
struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
|
2019-08-14 17:00:48 +08:00
|
|
|
int i, pi = -1;
|
2013-07-07 16:40:19 +08:00
|
|
|
|
2019-09-16 22:19:23 +08:00
|
|
|
if (!*size) {
|
|
|
|
NV_WARN(drm, "skipped size %016llx\n", *size);
|
2019-08-14 17:00:48 +08:00
|
|
|
return ERR_PTR(-EINVAL);
|
2013-07-07 16:40:19 +08:00
|
|
|
}
|
2012-04-02 18:53:06 +08:00
|
|
|
|
2009-12-11 17:24:15 +08:00
|
|
|
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
|
|
|
|
if (!nvbo)
|
2019-08-14 17:00:48 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2009-12-11 17:24:15 +08:00
|
|
|
INIT_LIST_HEAD(&nvbo->head);
|
|
|
|
INIT_LIST_HEAD(&nvbo->entry);
|
2011-06-06 12:07:04 +08:00
|
|
|
INIT_LIST_HEAD(&nvbo->vma_list);
|
2012-07-20 06:17:34 +08:00
|
|
|
nvbo->bo.bdev = &drm->ttm.bdev;
|
2009-12-11 17:24:15 +08:00
|
|
|
|
2017-11-01 01:56:20 +08:00
|
|
|
/* This is confusing, and doesn't actually mean we want an uncached
|
|
|
|
* mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
|
|
|
|
* into in nouveau_gem_new().
|
|
|
|
*/
|
2020-09-08 20:39:36 +08:00
|
|
|
if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) {
|
2017-11-01 01:56:20 +08:00
|
|
|
/* Determine if we can get a cache-coherent map, forcing
|
|
|
|
* uncached mapping if we can't.
|
|
|
|
*/
|
2017-12-14 09:19:27 +08:00
|
|
|
if (!nouveau_drm_use_coherent_gpu_mapping(drm))
|
2017-11-01 01:56:20 +08:00
|
|
|
nvbo->force_coherent = true;
|
|
|
|
}
|
2014-10-27 17:49:17 +08:00
|
|
|
|
2017-11-01 01:56:19 +08:00
|
|
|
if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
|
|
|
|
nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
|
2017-11-01 01:56:19 +08:00
|
|
|
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
|
|
|
|
kfree(nvbo);
|
2019-08-14 17:00:48 +08:00
|
|
|
return ERR_PTR(-EINVAL);
|
2017-11-01 01:56:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
|
2017-11-01 01:56:19 +08:00
|
|
|
} else
|
|
|
|
if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
|
|
|
nvbo->kind = (tile_flags & 0x00007f00) >> 8;
|
|
|
|
nvbo->comp = (tile_flags & 0x00030000) >> 16;
|
2017-11-01 01:56:19 +08:00
|
|
|
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
|
|
|
|
kfree(nvbo);
|
2019-08-14 17:00:48 +08:00
|
|
|
return ERR_PTR(-EINVAL);
|
2017-11-01 01:56:19 +08:00
|
|
|
}
|
2017-11-01 01:56:19 +08:00
|
|
|
} else {
|
|
|
|
nvbo->zeta = (tile_flags & 0x00000007);
|
|
|
|
}
|
|
|
|
nvbo->mode = tile_mode;
|
|
|
|
nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
|
|
|
|
|
2017-11-01 01:56:20 +08:00
|
|
|
/* Determine the desirable target GPU page size for the buffer. */
|
|
|
|
for (i = 0; i < vmm->page_nr; i++) {
|
|
|
|
/* Because we cannot currently allow VMM maps to fail
|
|
|
|
* during buffer migration, we need to determine page
|
|
|
|
* size for the buffer up-front, and pre-allocate its
|
|
|
|
* page tables.
|
|
|
|
*
|
|
|
|
* Skip page sizes that can't support needed domains.
|
|
|
|
*/
|
|
|
|
if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
|
2020-09-08 20:39:36 +08:00
|
|
|
(domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
|
2017-11-01 01:56:20 +08:00
|
|
|
continue;
|
2020-09-08 20:39:36 +08:00
|
|
|
if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
|
2017-12-07 13:25:14 +08:00
|
|
|
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
|
2017-11-01 01:56:20 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Select this page size if it's the first that supports
|
|
|
|
* the potential memory domains, or when it's compatible
|
|
|
|
* with the requested compression settings.
|
|
|
|
*/
|
|
|
|
if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
|
|
|
|
pi = i;
|
|
|
|
|
|
|
|
/* Stop once the buffer is larger than the current page size. */
|
2019-09-16 22:19:23 +08:00
|
|
|
if (*size >= 1ULL << vmm->page[i].shift)
|
2017-11-01 01:56:20 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (WARN_ON(pi < 0))
|
2019-08-14 17:00:48 +08:00
|
|
|
return ERR_PTR(-EINVAL);
|
2017-11-01 01:56:20 +08:00
|
|
|
|
|
|
|
/* Disable compression if suitable settings couldn't be found. */
|
|
|
|
if (nvbo->comp && !vmm->page[pi].comp) {
|
|
|
|
if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
|
|
|
|
nvbo->kind = mmu->kind[nvbo->kind];
|
|
|
|
nvbo->comp = 0;
|
2011-06-06 12:15:46 +08:00
|
|
|
}
|
2017-11-01 01:56:20 +08:00
|
|
|
nvbo->page = vmm->page[pi].shift;
|
2011-06-06 12:15:46 +08:00
|
|
|
|
2020-09-08 20:39:36 +08:00
|
|
|
nouveau_bo_fixup_align(nvbo, align, size);
|
2019-09-16 22:19:23 +08:00
|
|
|
|
2019-08-14 17:00:48 +08:00
|
|
|
return nvbo;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2020-09-08 20:39:36 +08:00
|
|
|
nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
|
2019-08-14 17:00:48 +08:00
|
|
|
struct sg_table *sg, struct dma_resv *robj)
|
|
|
|
{
|
|
|
|
int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
|
|
|
|
int ret;
|
|
|
|
|
2020-09-08 20:39:36 +08:00
|
|
|
nouveau_bo_placement_set(nvbo, domain, 0);
|
2020-08-21 22:06:50 +08:00
|
|
|
INIT_LIST_HEAD(&nvbo->io_reserve_lru);
|
2009-12-11 17:24:15 +08:00
|
|
|
|
2019-08-14 17:00:48 +08:00
|
|
|
ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
|
2020-11-17 20:52:28 +08:00
|
|
|
&nvbo->placement, align >> PAGE_SHIFT, false, sg,
|
|
|
|
robj, nouveau_bo_del_ttm);
|
2009-12-11 17:24:15 +08:00
|
|
|
if (ret) {
|
|
|
|
/* ttm will call nouveau_bo_del_ttm if it fails.. */
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-08-14 17:00:48 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
|
2020-09-08 20:39:36 +08:00
|
|
|
uint32_t domain, uint32_t tile_mode, uint32_t tile_flags,
|
2019-08-14 17:00:48 +08:00
|
|
|
struct sg_table *sg, struct dma_resv *robj,
|
|
|
|
struct nouveau_bo **pnvbo)
|
|
|
|
{
|
|
|
|
struct nouveau_bo *nvbo;
|
|
|
|
int ret;
|
|
|
|
|
2020-09-08 20:39:36 +08:00
|
|
|
nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
|
2019-09-16 22:19:23 +08:00
|
|
|
tile_flags);
|
2019-08-14 17:00:48 +08:00
|
|
|
if (IS_ERR(nvbo))
|
|
|
|
return PTR_ERR(nvbo);
|
|
|
|
|
2021-06-10 01:25:56 +08:00
|
|
|
nvbo->bo.base.size = size;
|
|
|
|
dma_resv_init(&nvbo->bo.base._resv);
|
|
|
|
drm_vma_node_reset(&nvbo->bo.base.vma_node);
|
|
|
|
|
2020-09-08 20:39:36 +08:00
|
|
|
ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
|
2019-08-14 17:00:48 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2009-12-11 17:24:15 +08:00
|
|
|
*pnvbo = nvbo;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-03-18 20:07:47 +08:00
|
|
|
static void
|
2020-09-30 22:44:16 +08:00
|
|
|
set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain)
|
2010-03-18 20:07:47 +08:00
|
|
|
{
|
|
|
|
*n = 0;
|
|
|
|
|
2020-09-10 19:39:41 +08:00
|
|
|
if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
|
|
|
|
pl[*n].mem_type = TTM_PL_VRAM;
|
2020-09-30 22:44:16 +08:00
|
|
|
pl[*n].flags = 0;
|
2020-09-11 21:36:30 +08:00
|
|
|
(*n)++;
|
2020-09-10 19:39:41 +08:00
|
|
|
}
|
|
|
|
if (domain & NOUVEAU_GEM_DOMAIN_GART) {
|
|
|
|
pl[*n].mem_type = TTM_PL_TT;
|
2020-09-30 22:44:16 +08:00
|
|
|
pl[*n].flags = 0;
|
2020-09-11 21:36:30 +08:00
|
|
|
(*n)++;
|
2020-09-10 19:39:41 +08:00
|
|
|
}
|
|
|
|
if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
|
|
|
|
pl[*n].mem_type = TTM_PL_SYSTEM;
|
2020-09-30 22:44:16 +08:00
|
|
|
pl[(*n)++].flags = 0;
|
2020-09-10 19:39:41 +08:00
|
|
|
}
|
2010-03-18 20:07:47 +08:00
|
|
|
}
|
|
|
|
|
2010-10-10 12:07:32 +08:00
|
|
|
static void
|
2020-09-08 20:39:36 +08:00
|
|
|
set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
|
2010-10-10 12:07:32 +08:00
|
|
|
{
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
2021-02-11 21:01:40 +08:00
|
|
|
u64 vram_size = drm->client.device.info.ram_size;
|
2014-08-27 19:16:04 +08:00
|
|
|
unsigned i, fpfn, lpfn;
|
2010-10-10 12:07:32 +08:00
|
|
|
|
2016-05-18 11:57:42 +08:00
|
|
|
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
|
2020-09-08 20:39:36 +08:00
|
|
|
nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
|
2021-02-11 21:01:40 +08:00
|
|
|
nvbo->bo.base.size < vram_size / 4) {
|
2010-10-10 12:07:32 +08:00
|
|
|
/*
|
|
|
|
* Make sure that the color and depth buffers are handled
|
|
|
|
* by independent memory controller units. Up to a 9x
|
|
|
|
* speed up when alpha-blending and depth-test are enabled
|
|
|
|
* at the same time.
|
|
|
|
*/
|
2017-11-01 01:56:19 +08:00
|
|
|
if (nvbo->zeta) {
|
2021-02-11 21:01:40 +08:00
|
|
|
fpfn = (vram_size / 2) >> PAGE_SHIFT;
|
2014-08-27 19:16:04 +08:00
|
|
|
lpfn = ~0;
|
2010-10-10 12:07:32 +08:00
|
|
|
} else {
|
2014-08-27 19:16:04 +08:00
|
|
|
fpfn = 0;
|
2021-02-11 21:01:40 +08:00
|
|
|
lpfn = (vram_size / 2) >> PAGE_SHIFT;
|
2014-08-27 19:16:04 +08:00
|
|
|
}
|
|
|
|
for (i = 0; i < nvbo->placement.num_placement; ++i) {
|
|
|
|
nvbo->placements[i].fpfn = fpfn;
|
|
|
|
nvbo->placements[i].lpfn = lpfn;
|
|
|
|
}
|
|
|
|
for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
|
|
|
|
nvbo->busy_placements[i].fpfn = fpfn;
|
|
|
|
nvbo->busy_placements[i].lpfn = lpfn;
|
2010-10-10 12:07:32 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-12-11 17:24:15 +08:00
|
|
|
void
|
2020-09-08 20:39:36 +08:00
|
|
|
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
|
|
|
|
uint32_t busy)
|
2009-12-11 17:24:15 +08:00
|
|
|
{
|
2010-03-18 20:07:47 +08:00
|
|
|
struct ttm_placement *pl = &nvbo->placement;
|
|
|
|
|
|
|
|
pl->placement = nvbo->placements;
|
2020-09-30 22:44:16 +08:00
|
|
|
set_placement_list(nvbo->placements, &pl->num_placement, domain);
|
2010-03-18 20:07:47 +08:00
|
|
|
|
|
|
|
pl->busy_placement = nvbo->busy_placements;
|
2020-09-30 22:44:16 +08:00
|
|
|
set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
|
|
|
|
domain | busy);
|
2010-10-10 12:07:32 +08:00
|
|
|
|
2020-09-08 20:39:36 +08:00
|
|
|
set_placement_range(nvbo, domain);
|
2009-12-11 17:24:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2020-09-08 20:39:36 +08:00
|
|
|
nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
|
2009-12-11 17:24:15 +08:00
|
|
|
{
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
2009-12-11 17:24:15 +08:00
|
|
|
struct ttm_buffer_object *bo = &nvbo->bo;
|
2014-11-10 09:24:27 +08:00
|
|
|
bool force = false, evict = false;
|
2010-03-18 20:07:47 +08:00
|
|
|
int ret;
|
2009-12-11 17:24:15 +08:00
|
|
|
|
2016-04-06 17:12:03 +08:00
|
|
|
ret = ttm_bo_reserve(bo, false, false, NULL);
|
2012-12-12 04:52:30 +08:00
|
|
|
if (ret)
|
2014-11-10 09:12:17 +08:00
|
|
|
return ret;
|
2012-12-12 04:52:30 +08:00
|
|
|
|
2016-05-18 11:57:42 +08:00
|
|
|
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
|
2020-09-08 20:39:36 +08:00
|
|
|
domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
|
2017-11-01 01:56:19 +08:00
|
|
|
if (!nvbo->contig) {
|
|
|
|
nvbo->contig = true;
|
2014-11-10 09:24:27 +08:00
|
|
|
force = true;
|
2017-11-01 01:56:19 +08:00
|
|
|
evict = true;
|
2014-11-10 09:24:27 +08:00
|
|
|
}
|
2009-12-11 17:24:15 +08:00
|
|
|
}
|
|
|
|
|
2020-09-21 21:37:12 +08:00
|
|
|
if (nvbo->bo.pin_count) {
|
2020-09-08 20:39:36 +08:00
|
|
|
bool error = evict;
|
|
|
|
|
2021-04-12 21:11:47 +08:00
|
|
|
switch (bo->resource->mem_type) {
|
2020-09-08 20:39:36 +08:00
|
|
|
case TTM_PL_VRAM:
|
|
|
|
error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
|
|
|
|
break;
|
|
|
|
case TTM_PL_TT:
|
|
|
|
error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
|
2020-11-21 02:35:33 +08:00
|
|
|
break;
|
2020-09-08 20:39:36 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error) {
|
2014-11-10 09:24:27 +08:00
|
|
|
NV_ERROR(drm, "bo %p pinned elsewhere: "
|
|
|
|
"0x%08x vs 0x%08x\n", bo,
|
2021-04-12 21:11:47 +08:00
|
|
|
bo->resource->mem_type, domain);
|
2014-11-10 09:24:27 +08:00
|
|
|
ret = -EBUSY;
|
|
|
|
}
|
2020-09-21 21:37:12 +08:00
|
|
|
ttm_bo_pin(&nvbo->bo);
|
2014-11-10 09:12:17 +08:00
|
|
|
goto out;
|
2014-11-10 09:24:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (evict) {
|
2020-09-08 20:39:36 +08:00
|
|
|
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
|
2014-11-10 09:24:27 +08:00
|
|
|
ret = nouveau_bo_validate(nvbo, false, false);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
2009-12-11 17:24:15 +08:00
|
|
|
|
2020-09-08 20:39:36 +08:00
|
|
|
nouveau_bo_placement_set(nvbo, domain, 0);
|
2012-11-28 19:25:44 +08:00
|
|
|
ret = nouveau_bo_validate(nvbo, false, false);
|
2014-11-06 12:34:31 +08:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
2020-09-21 21:37:12 +08:00
|
|
|
|
|
|
|
ttm_bo_pin(&nvbo->bo);
|
2014-11-06 12:34:31 +08:00
|
|
|
|
2021-04-12 21:11:47 +08:00
|
|
|
switch (bo->resource->mem_type) {
|
2014-11-06 12:34:31 +08:00
|
|
|
case TTM_PL_VRAM:
|
2020-12-09 22:07:50 +08:00
|
|
|
drm->gem.vram_available -= bo->base.size;
|
2014-11-06 12:34:31 +08:00
|
|
|
break;
|
|
|
|
case TTM_PL_TT:
|
2020-12-09 22:07:50 +08:00
|
|
|
drm->gem.gart_available -= bo->base.size;
|
2014-11-06 12:34:31 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
2009-12-11 17:24:15 +08:00
|
|
|
}
|
2014-10-27 17:11:52 +08:00
|
|
|
|
2009-12-11 17:24:15 +08:00
|
|
|
out:
|
2014-11-10 09:24:27 +08:00
|
|
|
if (force && ret)
|
2017-11-01 01:56:19 +08:00
|
|
|
nvbo->contig = false;
|
2012-12-12 04:52:30 +08:00
|
|
|
ttm_bo_unreserve(bo);
|
2009-12-11 17:24:15 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nouveau_bo_unpin(struct nouveau_bo *nvbo)
|
|
|
|
{
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
2009-12-11 17:24:15 +08:00
|
|
|
struct ttm_buffer_object *bo = &nvbo->bo;
|
2020-09-21 21:37:12 +08:00
|
|
|
int ret;
|
2009-12-11 17:24:15 +08:00
|
|
|
|
2016-04-06 17:12:03 +08:00
|
|
|
ret = ttm_bo_reserve(bo, false, false, NULL);
|
2009-12-11 17:24:15 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2020-09-21 21:37:12 +08:00
|
|
|
ttm_bo_unpin(&nvbo->bo);
|
|
|
|
if (!nvbo->bo.pin_count) {
|
2021-04-12 21:11:47 +08:00
|
|
|
switch (bo->resource->mem_type) {
|
2009-12-11 17:24:15 +08:00
|
|
|
case TTM_PL_VRAM:
|
2020-12-09 22:07:50 +08:00
|
|
|
drm->gem.vram_available += bo->base.size;
|
2009-12-11 17:24:15 +08:00
|
|
|
break;
|
|
|
|
case TTM_PL_TT:
|
2020-12-09 22:07:50 +08:00
|
|
|
drm->gem.gart_available += bo->base.size;
|
2009-12-11 17:24:15 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ttm_bo_unreserve(bo);
|
2020-09-21 21:37:12 +08:00
|
|
|
return 0;
|
2009-12-11 17:24:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nouveau_bo_map(struct nouveau_bo *nvbo)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2016-04-06 17:12:03 +08:00
|
|
|
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
|
2009-12-11 17:24:15 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-04-12 21:11:47 +08:00
|
|
|
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap);
|
2014-10-27 17:49:17 +08:00
|
|
|
|
2009-12-11 17:24:15 +08:00
|
|
|
ttm_bo_unreserve(&nvbo->bo);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nouveau_bo_unmap(struct nouveau_bo *nvbo)
|
|
|
|
{
|
2014-10-27 17:49:17 +08:00
|
|
|
if (!nvbo)
|
|
|
|
return;
|
|
|
|
|
2016-07-13 14:29:35 +08:00
|
|
|
ttm_bo_kunmap(&nvbo->kmap);
|
2009-12-11 17:24:15 +08:00
|
|
|
}
|
|
|
|
|
2014-10-27 17:49:19 +08:00
|
|
|
void
|
|
|
|
nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
2020-10-21 20:06:49 +08:00
|
|
|
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
|
2021-02-01 08:56:32 +08:00
|
|
|
int i, j;
|
2014-10-27 17:49:19 +08:00
|
|
|
|
2021-06-11 20:34:50 +08:00
|
|
|
if (!ttm_dma || !ttm_dma->dma_address)
|
2014-10-27 17:49:19 +08:00
|
|
|
return;
|
2021-03-14 06:21:59 +08:00
|
|
|
if (!ttm_dma->pages) {
|
|
|
|
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
|
|
|
|
return;
|
|
|
|
}
|
2014-10-27 17:49:19 +08:00
|
|
|
|
|
|
|
/* Don't waste time looping if the object is coherent */
|
|
|
|
if (nvbo->force_coherent)
|
|
|
|
return;
|
|
|
|
|
2021-03-11 12:35:27 +08:00
|
|
|
i = 0;
|
|
|
|
while (i < ttm_dma->num_pages) {
|
2021-02-01 08:56:32 +08:00
|
|
|
struct page *p = ttm_dma->pages[i];
|
|
|
|
size_t num_pages = 1;
|
|
|
|
|
|
|
|
for (j = i + 1; j < ttm_dma->num_pages; ++j) {
|
|
|
|
if (++p != ttm_dma->pages[j])
|
|
|
|
break;
|
|
|
|
|
|
|
|
++num_pages;
|
|
|
|
}
|
2017-11-01 01:56:19 +08:00
|
|
|
dma_sync_single_for_device(drm->dev->dev,
|
|
|
|
ttm_dma->dma_address[i],
|
2021-02-01 08:56:32 +08:00
|
|
|
num_pages * PAGE_SIZE, DMA_TO_DEVICE);
|
|
|
|
i += num_pages;
|
|
|
|
}
|
2014-10-27 17:49:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
2020-10-21 20:06:49 +08:00
|
|
|
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
|
2021-02-01 08:56:32 +08:00
|
|
|
int i, j;
|
2014-10-27 17:49:19 +08:00
|
|
|
|
2021-06-11 20:34:50 +08:00
|
|
|
if (!ttm_dma || !ttm_dma->dma_address)
|
2014-10-27 17:49:19 +08:00
|
|
|
return;
|
2021-03-14 06:21:59 +08:00
|
|
|
if (!ttm_dma->pages) {
|
|
|
|
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
|
|
|
|
return;
|
|
|
|
}
|
2014-10-27 17:49:19 +08:00
|
|
|
|
|
|
|
/* Don't waste time looping if the object is coherent */
|
|
|
|
if (nvbo->force_coherent)
|
|
|
|
return;
|
|
|
|
|
2021-03-11 12:35:27 +08:00
|
|
|
i = 0;
|
|
|
|
while (i < ttm_dma->num_pages) {
|
2021-02-01 08:56:32 +08:00
|
|
|
struct page *p = ttm_dma->pages[i];
|
|
|
|
size_t num_pages = 1;
|
|
|
|
|
|
|
|
for (j = i + 1; j < ttm_dma->num_pages; ++j) {
|
|
|
|
if (++p != ttm_dma->pages[j])
|
|
|
|
break;
|
|
|
|
|
|
|
|
++num_pages;
|
|
|
|
}
|
|
|
|
|
2017-11-01 01:56:19 +08:00
|
|
|
dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
|
2021-02-01 08:56:32 +08:00
|
|
|
num_pages * PAGE_SIZE, DMA_FROM_DEVICE);
|
|
|
|
i += num_pages;
|
|
|
|
}
|
2014-10-27 17:49:19 +08:00
|
|
|
}
|
|
|
|
|
2020-08-21 22:06:50 +08:00
|
|
|
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
|
|
|
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
|
|
|
|
|
|
|
mutex_lock(&drm->ttm.io_reserve_mutex);
|
|
|
|
list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
|
|
|
|
mutex_unlock(&drm->ttm.io_reserve_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
|
|
|
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
|
|
|
|
|
|
|
mutex_lock(&drm->ttm.io_reserve_mutex);
|
|
|
|
list_del_init(&nvbo->io_reserve_lru);
|
|
|
|
mutex_unlock(&drm->ttm.io_reserve_mutex);
|
|
|
|
}
|
|
|
|
|
2010-11-22 06:50:27 +08:00
|
|
|
int
|
|
|
|
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
|
2012-11-28 19:25:44 +08:00
|
|
|
bool no_wait_gpu)
|
2010-11-22 06:50:27 +08:00
|
|
|
{
|
2017-04-12 20:24:39 +08:00
|
|
|
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
|
2010-11-22 06:50:27 +08:00
|
|
|
int ret;
|
|
|
|
|
2017-04-12 20:24:39 +08:00
|
|
|
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
|
2010-11-22 06:50:27 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2014-10-27 17:49:19 +08:00
|
|
|
nouveau_bo_sync_for_device(nvbo);
|
|
|
|
|
2010-11-22 06:50:27 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-11 17:24:15 +08:00
|
|
|
void
|
|
|
|
nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
|
|
|
|
{
|
|
|
|
bool is_iomem;
|
|
|
|
u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
|
2014-10-27 17:49:17 +08:00
|
|
|
|
2016-07-13 14:29:35 +08:00
|
|
|
mem += index;
|
2014-10-27 17:49:17 +08:00
|
|
|
|
2009-12-11 17:24:15 +08:00
|
|
|
if (is_iomem)
|
|
|
|
iowrite16_native(val, (void __force __iomem *)mem);
|
|
|
|
else
|
|
|
|
*mem = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
u32
|
|
|
|
nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
|
|
|
|
{
|
|
|
|
bool is_iomem;
|
|
|
|
u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
|
2014-10-27 17:49:17 +08:00
|
|
|
|
2016-07-13 14:29:35 +08:00
|
|
|
mem += index;
|
2014-10-27 17:49:17 +08:00
|
|
|
|
2009-12-11 17:24:15 +08:00
|
|
|
if (is_iomem)
|
|
|
|
return ioread32_native((void __force __iomem *)mem);
|
|
|
|
else
|
|
|
|
return *mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
|
|
|
|
{
|
|
|
|
bool is_iomem;
|
|
|
|
u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
|
2014-10-27 17:49:17 +08:00
|
|
|
|
2016-07-13 14:29:35 +08:00
|
|
|
mem += index;
|
2014-10-27 17:49:17 +08:00
|
|
|
|
2009-12-11 17:24:15 +08:00
|
|
|
if (is_iomem)
|
|
|
|
iowrite32_native(val, (void __force __iomem *)mem);
|
|
|
|
else
|
|
|
|
*mem = val;
|
|
|
|
}
|
|
|
|
|
2011-11-02 08:46:13 +08:00
|
|
|
static struct ttm_tt *
|
2018-02-22 17:18:14 +08:00
|
|
|
nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
|
2009-12-11 17:24:15 +08:00
|
|
|
{
|
2015-09-09 22:45:52 +08:00
|
|
|
#if IS_ENABLED(CONFIG_AGP)
|
2018-02-22 17:18:14 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
2009-12-11 17:24:15 +08:00
|
|
|
|
2015-08-20 12:54:23 +08:00
|
|
|
if (drm->agp.bridge) {
|
2018-02-22 17:18:14 +08:00
|
|
|
return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
|
2009-12-11 17:24:15 +08:00
|
|
|
}
|
2012-10-14 05:58:26 +08:00
|
|
|
#endif
|
2009-12-11 17:24:15 +08:00
|
|
|
|
2018-02-22 17:18:14 +08:00
|
|
|
return nouveau_sgdma_create_ttm(bo, page_flags);
|
2009-12-11 17:24:15 +08:00
|
|
|
}
|
|
|
|
|
2020-09-08 04:46:23 +08:00
|
|
|
static int
|
2020-10-01 20:51:40 +08:00
|
|
|
nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
|
2020-09-08 04:46:23 +08:00
|
|
|
struct ttm_resource *reg)
|
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_AGP)
|
|
|
|
struct nouveau_drm *drm = nouveau_bdev(bdev);
|
2020-09-17 10:54:24 +08:00
|
|
|
#endif
|
|
|
|
if (!reg)
|
|
|
|
return -EINVAL;
|
|
|
|
#if IS_ENABLED(CONFIG_AGP)
|
2020-09-08 04:46:23 +08:00
|
|
|
if (drm->agp.bridge)
|
2020-09-08 04:46:29 +08:00
|
|
|
return ttm_agp_bind(ttm, reg);
|
2020-09-08 04:46:23 +08:00
|
|
|
#endif
|
|
|
|
return nouveau_sgdma_bind(bdev, ttm, reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-10-01 20:51:40 +08:00
|
|
|
nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
|
2020-09-08 04:46:23 +08:00
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_AGP)
|
|
|
|
struct nouveau_drm *drm = nouveau_bdev(bdev);
|
|
|
|
|
|
|
|
if (drm->agp.bridge) {
|
2020-09-08 04:46:29 +08:00
|
|
|
ttm_agp_unbind(ttm);
|
2020-09-08 04:46:23 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
nouveau_sgdma_unbind(bdev, ttm);
|
|
|
|
}
|
|
|
|
|
2009-12-11 17:24:15 +08:00
|
|
|
static void
|
|
|
|
nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
|
|
|
|
{
|
|
|
|
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
|
|
|
|
2021-04-12 21:11:47 +08:00
|
|
|
switch (bo->resource->mem_type) {
|
2009-12-12 01:40:17 +08:00
|
|
|
case TTM_PL_VRAM:
|
2020-09-08 20:39:36 +08:00
|
|
|
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
|
|
|
|
NOUVEAU_GEM_DOMAIN_CPU);
|
2009-12-12 01:40:17 +08:00
|
|
|
break;
|
2009-12-11 17:24:15 +08:00
|
|
|
default:
|
2020-09-08 20:39:36 +08:00
|
|
|
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
|
2009-12-11 17:24:15 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-12-12 01:40:17 +08:00
|
|
|
|
|
|
|
*pl = nvbo->placement;
|
2009-12-11 17:24:15 +08:00
|
|
|
}
|
|
|
|
|
2011-06-06 18:54:42 +08:00
|
|
|
static int
|
2013-11-22 08:35:25 +08:00
|
|
|
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
|
2020-08-04 10:56:32 +08:00
|
|
|
struct ttm_resource *reg)
|
2011-06-06 18:54:42 +08:00
|
|
|
{
|
2021-04-12 21:11:47 +08:00
|
|
|
struct nouveau_mem *old_mem = nouveau_mem(bo->resource);
|
2017-11-01 01:56:19 +08:00
|
|
|
struct nouveau_mem *new_mem = nouveau_mem(reg);
|
2017-11-01 01:56:20 +08:00
|
|
|
struct nvif_vmm *vmm = &drm->client.vmm.vmm;
|
2011-06-06 18:54:42 +08:00
|
|
|
int ret;
|
|
|
|
|
2017-11-01 01:56:20 +08:00
|
|
|
ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
|
|
|
|
old_mem->mem.size, &old_mem->vma[0]);
|
2011-06-06 18:54:42 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-11-01 01:56:20 +08:00
|
|
|
ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
|
|
|
|
new_mem->mem.size, &old_mem->vma[1]);
|
|
|
|
if (ret)
|
|
|
|
goto done;
|
2013-11-22 08:35:25 +08:00
|
|
|
|
2017-11-01 01:56:19 +08:00
|
|
|
ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
|
|
|
|
if (ret)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
|
|
|
|
done:
|
|
|
|
if (ret) {
|
2017-11-01 01:56:20 +08:00
|
|
|
nvif_vmm_put(vmm, &old_mem->vma[1]);
|
|
|
|
nvif_vmm_put(vmm, &old_mem->vma[0]);
|
2017-11-01 01:56:19 +08:00
|
|
|
}
|
2011-06-06 18:54:42 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-08-26 09:32:01 +08:00
|
|
|
static int
|
2020-09-23 11:04:48 +08:00
|
|
|
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
|
|
|
|
struct ttm_operation_ctx *ctx,
|
|
|
|
struct ttm_resource *new_reg)
|
2010-08-26 09:32:01 +08:00
|
|
|
{
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
2013-09-18 05:26:34 +08:00
|
|
|
struct nouveau_channel *chan = drm->ttm.chan;
|
2015-08-20 12:54:15 +08:00
|
|
|
struct nouveau_cli *cli = (void *)chan->user.client;
|
2013-11-22 08:39:57 +08:00
|
|
|
struct nouveau_fence *fence;
|
2010-08-26 09:32:01 +08:00
|
|
|
int ret;
|
|
|
|
|
2011-06-06 18:54:42 +08:00
|
|
|
/* create temporary vmas for the transfer and attach them to the
|
2015-01-14 13:36:34 +08:00
|
|
|
* old nvkm_mem node, these will get cleaned up after ttm has
|
2020-08-04 10:56:32 +08:00
|
|
|
* destroyed the ttm_resource
|
2011-02-10 09:22:12 +08:00
|
|
|
*/
|
2016-05-18 11:57:42 +08:00
|
|
|
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
2016-05-17 09:13:37 +08:00
|
|
|
ret = nouveau_bo_move_prep(drm, bo, new_reg);
|
2011-06-06 18:54:42 +08:00
|
|
|
if (ret)
|
2013-11-22 08:35:25 +08:00
|
|
|
return ret;
|
2011-02-10 09:22:12 +08:00
|
|
|
}
|
|
|
|
|
2020-11-28 00:35:28 +08:00
|
|
|
if (drm_drv_uses_atomic_modeset(drm->dev))
|
|
|
|
mutex_lock(&cli->mutex);
|
|
|
|
else
|
|
|
|
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
|
2020-09-23 11:04:48 +08:00
|
|
|
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
|
2010-10-05 14:53:48 +08:00
|
|
|
if (ret == 0) {
|
2021-04-12 21:11:47 +08:00
|
|
|
ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
|
2013-11-22 08:39:57 +08:00
|
|
|
if (ret == 0) {
|
|
|
|
ret = nouveau_fence_new(chan, false, &fence);
|
|
|
|
if (ret == 0) {
|
2014-04-02 23:14:48 +08:00
|
|
|
ret = ttm_bo_move_accel_cleanup(bo,
|
|
|
|
&fence->base,
|
2020-09-17 14:36:14 +08:00
|
|
|
evict, false,
|
2016-05-17 09:13:37 +08:00
|
|
|
new_reg);
|
2013-11-22 08:39:57 +08:00
|
|
|
nouveau_fence_unref(&fence);
|
|
|
|
}
|
|
|
|
}
|
2010-10-05 14:53:48 +08:00
|
|
|
}
|
2014-08-10 02:10:22 +08:00
|
|
|
mutex_unlock(&cli->mutex);
|
2010-10-05 14:53:48 +08:00
|
|
|
return ret;
|
2009-12-11 17:24:15 +08:00
|
|
|
}
|
|
|
|
|
2012-05-04 12:01:52 +08:00
|
|
|
void
|
2012-08-06 17:38:25 +08:00
|
|
|
nouveau_bo_move_init(struct nouveau_drm *drm)
|
2012-05-04 12:01:52 +08:00
|
|
|
{
|
2019-12-17 08:56:12 +08:00
|
|
|
static const struct _method_table {
|
2012-05-04 12:01:52 +08:00
|
|
|
const char *name;
|
2012-05-04 13:17:28 +08:00
|
|
|
int engine;
|
2015-08-20 12:54:16 +08:00
|
|
|
s32 oclass;
|
2012-05-04 12:01:52 +08:00
|
|
|
int (*exec)(struct nouveau_channel *,
|
|
|
|
struct ttm_buffer_object *,
|
2020-08-04 10:56:32 +08:00
|
|
|
struct ttm_resource *, struct ttm_resource *);
|
2012-05-04 12:01:52 +08:00
|
|
|
int (*init)(struct nouveau_channel *, u32 handle);
|
|
|
|
} _methods[] = {
|
2021-09-17 06:04:06 +08:00
|
|
|
{ "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init },
|
2018-12-11 12:50:02 +08:00
|
|
|
{ "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
|
|
|
|
{ "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
|
2018-05-08 18:39:48 +08:00
|
|
|
{ "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
|
|
|
|
{ "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
|
2016-07-09 08:41:01 +08:00
|
|
|
{ "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
|
|
|
|
{ "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
|
2016-07-09 08:41:01 +08:00
|
|
|
{ "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
|
|
|
|
{ "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
|
2015-04-14 09:50:35 +08:00
|
|
|
{ "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
|
|
|
|
{ "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
|
2013-07-09 12:20:15 +08:00
|
|
|
{ "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
|
2012-08-06 17:38:25 +08:00
|
|
|
{ "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
|
2012-05-04 13:17:28 +08:00
|
|
|
{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
|
|
|
|
{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
|
|
|
|
{ "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
|
|
|
|
{ "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
|
|
|
|
{ "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
|
|
|
|
{ "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
|
|
|
|
{ "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
|
2012-05-04 12:34:16 +08:00
|
|
|
{},
|
2019-12-17 08:56:12 +08:00
|
|
|
};
|
|
|
|
const struct _method_table *mthd = _methods;
|
2012-05-04 12:01:52 +08:00
|
|
|
const char *name = "CPU";
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
do {
|
2012-08-06 17:38:25 +08:00
|
|
|
struct nouveau_channel *chan;
|
2012-07-20 06:17:34 +08:00
|
|
|
|
2013-07-09 12:20:15 +08:00
|
|
|
if (mthd->engine)
|
2012-08-06 17:38:25 +08:00
|
|
|
chan = drm->cechan;
|
|
|
|
else
|
|
|
|
chan = drm->channel;
|
|
|
|
if (chan == NULL)
|
|
|
|
continue;
|
|
|
|
|
2020-03-30 07:51:33 +08:00
|
|
|
ret = nvif_object_ctor(&chan->user, "ttmBoMove",
|
2014-08-10 02:10:22 +08:00
|
|
|
mthd->oclass | (mthd->engine << 16),
|
|
|
|
mthd->oclass, NULL, 0,
|
|
|
|
&drm->ttm.copy);
|
2012-05-04 12:01:52 +08:00
|
|
|
if (ret == 0) {
|
2014-08-10 02:10:22 +08:00
|
|
|
ret = mthd->init(chan, drm->ttm.copy.handle);
|
2012-07-20 06:17:34 +08:00
|
|
|
if (ret) {
|
2020-03-30 07:51:33 +08:00
|
|
|
nvif_object_dtor(&drm->ttm.copy);
|
2012-07-20 06:17:34 +08:00
|
|
|
continue;
|
2012-05-04 12:01:52 +08:00
|
|
|
}
|
2012-07-20 06:17:34 +08:00
|
|
|
|
|
|
|
drm->ttm.move = mthd->exec;
|
2013-07-08 08:40:35 +08:00
|
|
|
drm->ttm.chan = chan;
|
2012-07-20 06:17:34 +08:00
|
|
|
name = mthd->name;
|
|
|
|
break;
|
2012-05-04 12:01:52 +08:00
|
|
|
}
|
|
|
|
} while ((++mthd)->exec);
|
|
|
|
|
2012-07-20 06:17:34 +08:00
|
|
|
NV_INFO(drm, "MM: using %s for buffer copies\n", name);
|
2012-05-04 12:01:52 +08:00
|
|
|
}
|
|
|
|
|
2021-02-11 19:35:23 +08:00
|
|
|
static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
|
|
|
|
struct ttm_resource *new_reg)
|
2011-02-10 08:35:16 +08:00
|
|
|
{
|
2017-11-01 01:56:19 +08:00
|
|
|
struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
|
2011-02-10 08:35:16 +08:00
|
|
|
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
2017-11-01 01:56:19 +08:00
|
|
|
struct nouveau_vma *vma;
|
2011-06-06 12:07:04 +08:00
|
|
|
|
drm/ttm: fix two regressions since move_notify changes
Both changes in dc97b3409a790d2a21aac6e5cdb99558b5944119 cause serious
regressions in the nouveau driver.
move_notify() was originally able to presume that bo->mem is the old node,
and new_mem is the new node. The above commit moves the call to
move_notify() to after move() has been done, which means that now, sometimes,
new_mem isn't the new node at all, bo->mem is, and new_mem points at a
stale, possibly-just-been-killed-by-move node.
This is clearly not a good situation. This patch reverts this change, and
replaces it with a cleanup in the move() failure path instead.
The second issue is that the call to move_notify() from cleanup_memtype_use()
causes the TTM ghost objects to get passed into the driver. This is clearly
bad as the driver knows nothing about these "fake" TTM BOs, and ends up
accessing uninitialised memory.
I worked around this in nouveau's move_notify() hook by ensuring the BO
destructor was nouveau's. I don't particularly like this solution, and
would rather TTM never pass the driver these objects. However, I don't
clearly understand the reason why we're calling move_notify() here anyway
and am happy to work around the problem in nouveau instead of breaking the
behaviour expected by other drivers.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Jerome Glisse <j.glisse@gmail.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2012-01-25 13:34:22 +08:00
|
|
|
/* ttm can now (stupidly) pass the driver bos it didn't create... */
|
|
|
|
if (bo->destroy != nouveau_bo_del_ttm)
|
|
|
|
return;
|
|
|
|
|
2020-08-21 22:06:50 +08:00
|
|
|
nouveau_bo_del_io_reserve_lru(bo);
|
|
|
|
|
2017-11-01 01:56:19 +08:00
|
|
|
if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
|
2017-11-01 01:56:19 +08:00
|
|
|
mem->mem.page == nvbo->page) {
|
2017-11-01 01:56:19 +08:00
|
|
|
list_for_each_entry(vma, &nvbo->vma_list, head) {
|
2017-11-01 01:56:19 +08:00
|
|
|
nouveau_vma_map(vma, mem);
|
2017-11-01 01:56:19 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
list_for_each_entry(vma, &nvbo->vma_list, head) {
|
2016-12-12 15:52:45 +08:00
|
|
|
WARN_ON(ttm_bo_wait(bo, false, false));
|
2017-11-01 01:56:19 +08:00
|
|
|
nouveau_vma_unmap(vma);
|
2011-06-06 12:07:04 +08:00
|
|
|
}
|
2011-02-10 08:35:16 +08:00
|
|
|
}
|
2020-06-25 02:26:44 +08:00
|
|
|
|
2021-04-30 15:48:27 +08:00
|
|
|
if (new_reg)
|
|
|
|
nvbo->offset = (new_reg->start << PAGE_SHIFT);
|
2020-06-25 02:26:44 +08:00
|
|
|
|
2011-02-10 08:35:16 +08:00
|
|
|
}
|
|
|
|
|
2009-12-11 17:24:15 +08:00
|
|
|
static int
|
2020-08-04 10:56:32 +08:00
|
|
|
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm_tile **new_tile)
|
2009-12-11 17:24:15 +08:00
|
|
|
{
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
|
|
|
struct drm_device *dev = drm->dev;
|
2009-12-11 23:51:09 +08:00
|
|
|
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
2016-05-17 09:13:37 +08:00
|
|
|
u64 offset = new_reg->start << PAGE_SHIFT;
|
2009-12-11 17:24:15 +08:00
|
|
|
|
2011-02-10 08:35:16 +08:00
|
|
|
*new_tile = NULL;
|
2016-05-17 09:13:37 +08:00
|
|
|
if (new_reg->mem_type != TTM_PL_VRAM)
|
2009-12-11 23:51:09 +08:00
|
|
|
return 0;
|
|
|
|
|
2016-05-18 11:57:42 +08:00
|
|
|
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
|
2020-12-09 22:07:50 +08:00
|
|
|
*new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size,
|
2017-11-01 01:56:19 +08:00
|
|
|
nvbo->mode, nvbo->zeta);
|
2009-12-11 17:24:15 +08:00
|
|
|
}
|
|
|
|
|
2009-12-11 23:51:09 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm_tile *new_tile,
|
|
|
|
struct nouveau_drm_tile **old_tile)
|
2009-12-11 23:51:09 +08:00
|
|
|
{
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
|
|
|
struct drm_device *dev = drm->dev;
|
2021-05-10 22:14:09 +08:00
|
|
|
struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv);
|
2009-12-11 23:51:09 +08:00
|
|
|
|
2014-04-02 23:14:48 +08:00
|
|
|
nv10_bo_put_tile_region(dev, *old_tile, fence);
|
2011-02-10 08:35:16 +08:00
|
|
|
*old_tile = new_tile;
|
2009-12-11 23:51:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2017-04-26 22:31:14 +08:00
|
|
|
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
|
|
|
|
struct ttm_operation_ctx *ctx,
|
2020-10-29 11:58:52 +08:00
|
|
|
struct ttm_resource *new_reg,
|
|
|
|
struct ttm_place *hop)
|
2009-12-11 23:51:09 +08:00
|
|
|
{
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
2009-12-11 23:51:09 +08:00
|
|
|
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
2021-04-12 21:11:47 +08:00
|
|
|
struct ttm_resource *old_reg = bo->resource;
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm_tile *new_tile = NULL;
|
2009-12-11 23:51:09 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2020-10-29 11:59:20 +08:00
|
|
|
|
2020-10-20 09:03:19 +08:00
|
|
|
if (new_reg->mem_type == TTM_PL_TT) {
|
|
|
|
ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-02-11 19:35:23 +08:00
|
|
|
nouveau_bo_move_ntfy(bo, new_reg);
|
2020-09-23 11:04:49 +08:00
|
|
|
ret = ttm_bo_wait_ctx(bo, ctx);
|
2016-06-06 16:17:53 +08:00
|
|
|
if (ret)
|
2020-10-20 09:03:18 +08:00
|
|
|
goto out_ntfy;
|
2016-06-06 16:17:53 +08:00
|
|
|
|
2020-09-21 21:37:12 +08:00
|
|
|
if (nvbo->bo.pin_count)
|
2014-10-27 17:11:52 +08:00
|
|
|
NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
|
|
|
|
|
2016-05-18 11:57:42 +08:00
|
|
|
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
|
2016-05-17 09:13:37 +08:00
|
|
|
ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
|
2011-02-10 08:35:16 +08:00
|
|
|
if (ret)
|
2020-10-20 09:03:18 +08:00
|
|
|
goto out_ntfy;
|
2011-02-10 08:35:16 +08:00
|
|
|
}
|
2009-12-11 23:51:09 +08:00
|
|
|
|
|
|
|
/* Fake bo copy. */
|
2016-05-17 09:13:37 +08:00
|
|
|
if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
|
2020-09-08 04:46:18 +08:00
|
|
|
ttm_bo_move_null(bo, new_reg);
|
2009-12-11 23:51:09 +08:00
|
|
|
goto out;
|
2009-12-11 17:24:15 +08:00
|
|
|
}
|
|
|
|
|
2020-09-24 13:18:05 +08:00
|
|
|
if (old_reg->mem_type == TTM_PL_SYSTEM &&
|
|
|
|
new_reg->mem_type == TTM_PL_TT) {
|
|
|
|
ttm_bo_move_null(bo, new_reg);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (old_reg->mem_type == TTM_PL_TT &&
|
|
|
|
new_reg->mem_type == TTM_PL_SYSTEM) {
|
2020-10-20 09:03:15 +08:00
|
|
|
nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
|
2021-04-15 15:52:58 +08:00
|
|
|
ttm_resource_free(bo, &bo->resource);
|
2020-10-19 15:13:13 +08:00
|
|
|
ttm_bo_assign_mem(bo, new_reg);
|
2020-09-24 13:18:05 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2009-12-11 23:51:09 +08:00
|
|
|
/* Hardware assisted copy. */
|
2013-11-22 08:52:54 +08:00
|
|
|
if (drm->ttm.move) {
|
2020-12-18 04:09:43 +08:00
|
|
|
if ((old_reg->mem_type == TTM_PL_SYSTEM &&
|
|
|
|
new_reg->mem_type == TTM_PL_VRAM) ||
|
|
|
|
(old_reg->mem_type == TTM_PL_VRAM &&
|
|
|
|
new_reg->mem_type == TTM_PL_SYSTEM)) {
|
|
|
|
hop->fpfn = 0;
|
|
|
|
hop->lpfn = 0;
|
|
|
|
hop->mem_type = TTM_PL_TT;
|
|
|
|
hop->flags = 0;
|
|
|
|
return -EMULTIHOP;
|
|
|
|
}
|
2020-10-29 11:59:20 +08:00
|
|
|
ret = nouveau_bo_move_m2mf(bo, evict, ctx,
|
|
|
|
new_reg);
|
2020-12-18 04:09:43 +08:00
|
|
|
} else
|
|
|
|
ret = -ENODEV;
|
2009-12-11 23:51:09 +08:00
|
|
|
|
2020-12-18 04:09:43 +08:00
|
|
|
if (ret) {
|
|
|
|
/* Fallback to software copy. */
|
|
|
|
ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
|
|
|
|
}
|
2009-12-11 23:51:09 +08:00
|
|
|
|
|
|
|
out:
|
2016-05-18 11:57:42 +08:00
|
|
|
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
|
2011-02-10 08:35:16 +08:00
|
|
|
if (ret)
|
|
|
|
nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
|
|
|
|
else
|
|
|
|
nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
|
|
|
|
}
|
2020-10-20 09:03:18 +08:00
|
|
|
out_ntfy:
|
|
|
|
if (ret) {
|
2021-04-12 21:11:47 +08:00
|
|
|
nouveau_bo_move_ntfy(bo, bo->resource);
|
2020-10-20 09:03:18 +08:00
|
|
|
}
|
2009-12-11 23:51:09 +08:00
|
|
|
return ret;
|
2009-12-11 17:24:15 +08:00
|
|
|
}
|
|
|
|
|
2020-08-21 22:06:50 +08:00
|
|
|
static void
|
|
|
|
nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
|
|
|
|
struct ttm_resource *reg)
|
|
|
|
{
|
|
|
|
struct nouveau_mem *mem = nouveau_mem(reg);
|
|
|
|
|
|
|
|
if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
|
|
|
|
switch (reg->mem_type) {
|
|
|
|
case TTM_PL_TT:
|
|
|
|
if (mem->kind)
|
|
|
|
nvif_object_unmap_handle(&mem->mem.object);
|
|
|
|
break;
|
|
|
|
case TTM_PL_VRAM:
|
|
|
|
nvif_object_unmap_handle(&mem->mem.object);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-09 20:39:25 +08:00
|
|
|
static int
|
2020-10-01 20:51:40 +08:00
|
|
|
nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
|
2010-04-09 20:39:25 +08:00
|
|
|
{
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_bdev(bdev);
|
2016-05-18 11:57:42 +08:00
|
|
|
struct nvkm_device *device = nvxx_device(&drm->client.device);
|
2017-11-01 01:56:19 +08:00
|
|
|
struct nouveau_mem *mem = nouveau_mem(reg);
|
2020-09-30 17:17:44 +08:00
|
|
|
struct nvif_mmu *mmu = &drm->client.mmu;
|
2020-08-21 22:06:50 +08:00
|
|
|
int ret;
|
2010-04-09 20:39:25 +08:00
|
|
|
|
2020-08-21 22:06:50 +08:00
|
|
|
mutex_lock(&drm->ttm.io_reserve_mutex);
|
|
|
|
retry:
|
2016-05-17 09:13:37 +08:00
|
|
|
switch (reg->mem_type) {
|
2010-04-09 20:39:25 +08:00
|
|
|
case TTM_PL_SYSTEM:
|
|
|
|
/* System memory */
|
2020-08-21 22:06:50 +08:00
|
|
|
ret = 0;
|
|
|
|
goto out;
|
2010-04-09 20:39:25 +08:00
|
|
|
case TTM_PL_TT:
|
2015-09-09 22:45:52 +08:00
|
|
|
#if IS_ENABLED(CONFIG_AGP)
|
2015-08-20 12:54:23 +08:00
|
|
|
if (drm->agp.bridge) {
|
2020-09-07 19:44:36 +08:00
|
|
|
reg->bus.offset = (reg->start << PAGE_SHIFT) +
|
|
|
|
drm->agp.base;
|
2016-05-17 09:13:37 +08:00
|
|
|
reg->bus.is_iomem = !drm->agp.cma;
|
2020-09-30 17:17:44 +08:00
|
|
|
reg->bus.caching = ttm_write_combined;
|
2010-04-09 20:39:25 +08:00
|
|
|
}
|
|
|
|
#endif
|
2020-08-21 22:06:50 +08:00
|
|
|
if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
|
|
|
|
!mem->kind) {
|
2013-11-12 20:34:09 +08:00
|
|
|
/* untiled */
|
2020-08-21 22:06:50 +08:00
|
|
|
ret = 0;
|
2013-11-12 20:34:09 +08:00
|
|
|
break;
|
2020-08-21 22:06:50 +08:00
|
|
|
}
|
2020-07-08 01:36:28 +08:00
|
|
|
fallthrough; /* tiled memory */
|
2010-04-09 20:39:25 +08:00
|
|
|
case TTM_PL_VRAM:
|
2020-09-07 19:44:36 +08:00
|
|
|
reg->bus.offset = (reg->start << PAGE_SHIFT) +
|
|
|
|
device->func->resource_addr(device, 1);
|
2016-05-17 09:13:37 +08:00
|
|
|
reg->bus.is_iomem = true;
|
2020-09-30 17:17:44 +08:00
|
|
|
|
|
|
|
/* Some BARs do not support being ioremapped WC */
|
|
|
|
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
|
2020-11-12 10:29:43 +08:00
|
|
|
mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED)
|
2020-09-30 17:17:44 +08:00
|
|
|
reg->bus.caching = ttm_uncached;
|
|
|
|
else
|
|
|
|
reg->bus.caching = ttm_write_combined;
|
|
|
|
|
2017-11-01 01:56:20 +08:00
|
|
|
if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
|
|
|
|
union {
|
|
|
|
struct nv50_mem_map_v0 nv50;
|
|
|
|
struct gf100_mem_map_v0 gf100;
|
|
|
|
} args;
|
|
|
|
u64 handle, length;
|
|
|
|
u32 argc = 0;
|
|
|
|
|
|
|
|
switch (mem->mem.object.oclass) {
|
|
|
|
case NVIF_CLASS_MEM_NV50:
|
|
|
|
args.nv50.version = 0;
|
|
|
|
args.nv50.ro = 0;
|
|
|
|
args.nv50.kind = mem->kind;
|
|
|
|
args.nv50.comp = mem->comp;
|
2018-01-19 05:24:12 +08:00
|
|
|
argc = sizeof(args.nv50);
|
2017-11-01 01:56:20 +08:00
|
|
|
break;
|
|
|
|
case NVIF_CLASS_MEM_GF100:
|
|
|
|
args.gf100.version = 0;
|
|
|
|
args.gf100.ro = 0;
|
|
|
|
args.gf100.kind = mem->kind;
|
2018-01-19 05:24:12 +08:00
|
|
|
argc = sizeof(args.gf100);
|
2017-11-01 01:56:20 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN_ON(1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = nvif_object_map_handle(&mem->mem.object,
|
2018-01-19 05:24:12 +08:00
|
|
|
&args, argc,
|
2017-11-01 01:56:20 +08:00
|
|
|
&handle, &length);
|
2020-01-06 10:16:02 +08:00
|
|
|
if (ret != 1) {
|
|
|
|
if (WARN_ON(ret == 0))
|
2020-08-21 22:06:50 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
2020-01-06 10:16:02 +08:00
|
|
|
}
|
2017-11-01 01:56:20 +08:00
|
|
|
|
|
|
|
reg->bus.offset = handle;
|
2010-11-15 09:53:16 +08:00
|
|
|
}
|
2020-11-26 20:35:08 +08:00
|
|
|
ret = 0;
|
2010-04-09 20:39:25 +08:00
|
|
|
break;
|
|
|
|
default:
|
2020-08-21 22:06:50 +08:00
|
|
|
ret = -EINVAL;
|
2010-04-09 20:39:25 +08:00
|
|
|
}
|
2020-08-21 22:06:50 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
if (ret == -ENOSPC) {
|
|
|
|
struct nouveau_bo *nvbo;
|
|
|
|
|
|
|
|
nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
|
|
|
|
typeof(*nvbo),
|
|
|
|
io_reserve_lru);
|
|
|
|
if (nvbo) {
|
|
|
|
list_del_init(&nvbo->io_reserve_lru);
|
|
|
|
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
|
|
|
|
bdev->dev_mapping);
|
2021-04-12 21:11:47 +08:00
|
|
|
nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
|
2020-08-21 22:06:50 +08:00
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
mutex_unlock(&drm->ttm.io_reserve_mutex);
|
|
|
|
return ret;
|
2010-04-09 20:39:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-10-01 20:51:40 +08:00
|
|
|
nouveau_ttm_io_mem_free(struct ttm_device *bdev, struct ttm_resource *reg)
|
2010-04-09 20:39:25 +08:00
|
|
|
{
|
2017-11-01 01:56:20 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_bdev(bdev);
|
2010-11-15 09:53:16 +08:00
|
|
|
|
2020-08-21 22:06:50 +08:00
|
|
|
mutex_lock(&drm->ttm.io_reserve_mutex);
|
|
|
|
nouveau_ttm_io_mem_free_locked(drm, reg);
|
|
|
|
mutex_unlock(&drm->ttm.io_reserve_mutex);
|
2010-04-09 20:39:25 +08:00
|
|
|
}
|
|
|
|
|
2020-09-25 21:42:04 +08:00
|
|
|
vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
|
2010-04-09 20:39:25 +08:00
|
|
|
{
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
2010-09-10 09:12:25 +08:00
|
|
|
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
2016-05-18 11:57:42 +08:00
|
|
|
struct nvkm_device *device = nvxx_device(&drm->client.device);
|
2015-08-20 12:54:23 +08:00
|
|
|
u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
|
2014-08-27 19:16:04 +08:00
|
|
|
int i, ret;
|
2010-09-10 09:12:25 +08:00
|
|
|
|
|
|
|
/* as long as the bo isn't in vram, and isn't tiled, we've got
|
|
|
|
* nothing to do here.
|
|
|
|
*/
|
2021-04-12 21:11:47 +08:00
|
|
|
if (bo->resource->mem_type != TTM_PL_VRAM) {
|
2016-05-18 11:57:42 +08:00
|
|
|
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
|
2017-11-01 01:56:19 +08:00
|
|
|
!nvbo->kind)
|
2010-09-10 09:12:25 +08:00
|
|
|
return 0;
|
2013-11-12 20:34:09 +08:00
|
|
|
|
2021-04-12 21:11:47 +08:00
|
|
|
if (bo->resource->mem_type != TTM_PL_SYSTEM)
|
2020-09-25 21:42:04 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
/* make sure bo is in mappable vram */
|
|
|
|
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
|
2021-04-12 21:11:47 +08:00
|
|
|
bo->resource->start + bo->resource->num_pages < mappable)
|
2020-09-25 21:42:04 +08:00
|
|
|
return 0;
|
2013-11-12 20:34:09 +08:00
|
|
|
|
2020-09-25 21:42:04 +08:00
|
|
|
for (i = 0; i < nvbo->placement.num_placement; ++i) {
|
|
|
|
nvbo->placements[i].fpfn = 0;
|
|
|
|
nvbo->placements[i].lpfn = mappable;
|
2013-11-12 20:34:09 +08:00
|
|
|
}
|
2010-09-10 09:12:25 +08:00
|
|
|
|
2020-09-25 21:42:04 +08:00
|
|
|
for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
|
|
|
|
nvbo->busy_placements[i].fpfn = 0;
|
|
|
|
nvbo->busy_placements[i].lpfn = mappable;
|
|
|
|
}
|
2010-09-10 09:12:25 +08:00
|
|
|
|
2020-09-25 21:42:04 +08:00
|
|
|
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
|
2014-08-27 19:16:04 +08:00
|
|
|
}
|
|
|
|
|
2020-09-25 21:42:04 +08:00
|
|
|
ret = nouveau_bo_validate(nvbo, false, false);
|
|
|
|
if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS))
|
|
|
|
return VM_FAULT_NOPAGE;
|
|
|
|
else if (unlikely(ret))
|
|
|
|
return VM_FAULT_SIGBUS;
|
2010-09-10 09:12:25 +08:00
|
|
|
|
2020-09-25 21:42:04 +08:00
|
|
|
ttm_bo_move_to_lru_tail_unlocked(bo);
|
|
|
|
return 0;
|
2010-04-09 20:39:25 +08:00
|
|
|
}
|
|
|
|
|
2011-10-18 05:14:26 +08:00
|
|
|
static int
|
2020-10-01 20:51:40 +08:00
|
|
|
nouveau_ttm_tt_populate(struct ttm_device *bdev,
|
2020-08-25 07:46:00 +08:00
|
|
|
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
2011-10-18 05:14:26 +08:00
|
|
|
{
|
2020-10-21 20:06:49 +08:00
|
|
|
struct ttm_tt *ttm_dma = (void *)ttm;
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm *drm;
|
2021-09-29 21:26:27 +08:00
|
|
|
bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
|
2011-10-18 05:14:26 +08:00
|
|
|
|
2020-09-15 08:21:15 +08:00
|
|
|
if (ttm_tt_is_populated(ttm))
|
2011-10-18 05:14:26 +08:00
|
|
|
return 0;
|
|
|
|
|
2012-04-02 18:53:06 +08:00
|
|
|
if (slave && ttm->sg) {
|
2020-10-08 18:57:32 +08:00
|
|
|
drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address,
|
|
|
|
ttm->num_pages);
|
2012-04-02 18:53:06 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-25 07:46:00 +08:00
|
|
|
drm = nouveau_bdev(bdev);
|
2011-10-18 05:14:26 +08:00
|
|
|
|
2020-10-24 19:13:25 +08:00
|
|
|
return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx);
|
2011-10-18 05:14:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-10-01 20:51:40 +08:00
|
|
|
nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
|
2020-08-25 07:46:00 +08:00
|
|
|
struct ttm_tt *ttm)
|
2011-10-18 05:14:26 +08:00
|
|
|
{
|
2012-07-20 06:17:34 +08:00
|
|
|
struct nouveau_drm *drm;
|
2021-09-29 21:26:27 +08:00
|
|
|
bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
|
2012-04-02 18:53:06 +08:00
|
|
|
|
|
|
|
if (slave)
|
|
|
|
return;
|
2011-10-18 05:14:26 +08:00
|
|
|
|
2021-07-28 21:05:50 +08:00
|
|
|
nouveau_ttm_tt_unbind(bdev, ttm);
|
|
|
|
|
2020-08-25 07:46:00 +08:00
|
|
|
drm = nouveau_bdev(bdev);
|
2011-10-18 05:14:26 +08:00
|
|
|
|
2020-10-24 19:13:25 +08:00
|
|
|
return ttm_pool_free(&drm->ttm.bdev.pool, ttm);
|
2011-10-18 05:14:26 +08:00
|
|
|
}
|
|
|
|
|
2020-09-08 04:46:23 +08:00
|
|
|
static void
|
2020-10-01 20:51:40 +08:00
|
|
|
nouveau_ttm_tt_destroy(struct ttm_device *bdev,
|
2020-09-08 04:46:23 +08:00
|
|
|
struct ttm_tt *ttm)
|
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_AGP)
|
|
|
|
struct nouveau_drm *drm = nouveau_bdev(bdev);
|
|
|
|
if (drm->agp.bridge) {
|
2020-09-08 04:46:29 +08:00
|
|
|
ttm_agp_destroy(ttm);
|
2020-09-08 04:46:23 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
nouveau_sgdma_destroy(bdev, ttm);
|
|
|
|
}
|
|
|
|
|
2012-04-30 10:51:48 +08:00
|
|
|
void
|
2014-04-09 22:19:30 +08:00
|
|
|
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
|
2012-04-30 10:51:48 +08:00
|
|
|
{
|
2019-08-11 16:06:32 +08:00
|
|
|
struct dma_resv *resv = nvbo->bo.base.resv;
|
2014-01-21 20:02:14 +08:00
|
|
|
|
2014-04-09 22:19:30 +08:00
|
|
|
if (exclusive)
|
2019-08-11 16:06:32 +08:00
|
|
|
dma_resv_add_excl_fence(resv, &fence->base);
|
2014-04-09 22:19:30 +08:00
|
|
|
else if (fence)
|
2019-08-11 16:06:32 +08:00
|
|
|
dma_resv_add_shared_fence(resv, &fence->base);
|
2012-04-30 10:51:48 +08:00
|
|
|
}
|
|
|
|
|
2020-10-21 12:40:29 +08:00
|
|
|
static void
|
|
|
|
nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo)
|
|
|
|
{
|
2021-02-11 19:35:23 +08:00
|
|
|
nouveau_bo_move_ntfy(bo, NULL);
|
2020-10-21 12:40:29 +08:00
|
|
|
}
|
|
|
|
|
2020-10-01 20:51:40 +08:00
|
|
|
struct ttm_device_funcs nouveau_bo_driver = {
|
2011-11-02 08:46:13 +08:00
|
|
|
.ttm_tt_create = &nouveau_ttm_tt_create,
|
2011-10-18 05:14:26 +08:00
|
|
|
.ttm_tt_populate = &nouveau_ttm_tt_populate,
|
|
|
|
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
|
2020-09-08 04:46:23 +08:00
|
|
|
.ttm_tt_destroy = &nouveau_ttm_tt_destroy,
|
2016-08-30 23:26:04 +08:00
|
|
|
.eviction_valuable = ttm_bo_eviction_valuable,
|
2009-12-11 17:24:15 +08:00
|
|
|
.evict_flags = nouveau_bo_evict_flags,
|
2020-10-21 12:40:29 +08:00
|
|
|
.delete_mem_notify = nouveau_bo_delete_mem_notify,
|
2009-12-11 17:24:15 +08:00
|
|
|
.move = nouveau_bo_move,
|
2010-04-09 20:39:25 +08:00
|
|
|
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
|
|
|
|
.io_mem_free = &nouveau_ttm_io_mem_free,
|
2009-12-11 17:24:15 +08:00
|
|
|
};
|