drm/nouveau/mmu: Add correct turing page kinds
Turing introduced a new simplified page kind scheme, reducing the number of possible page kinds from 256 to 16. It also is the first NVIDIA GPU in which the highest possible page kind value is not reserved as an "invalid" page kind. To address this, the invalid page kind is made an explicit property of the MMU HAL, and a new table of page kinds is added to the tu102 MMU HAL. One hardware change not addressed here is that 0x00 is technically no longer a supported page kind, and pitch surfaces are instead intended to share the block-linear generic page kind 0x06. However, because that will be a rather invasive change to nouveau and 0x00 still works fine in practice on Turing hardware, addressing this new behavior is deferred. Signed-off-by: James Jones <jajones@nvidia.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
72ecb0a6ce
commit
176ada03e3
|
@ -35,7 +35,7 @@ struct nvif_mmu_type_v0 {
|
|||
|
||||
struct nvif_mmu_kind_v0 {
|
||||
__u8 version;
|
||||
__u8 pad01[1];
|
||||
__u8 kind_inv;
|
||||
__u16 count;
|
||||
__u8 data[];
|
||||
};
|
||||
|
|
|
@ -7,6 +7,7 @@ struct nvif_mmu {
|
|||
u8 dmabits;
|
||||
u8 heap_nr;
|
||||
u8 type_nr;
|
||||
u8 kind_inv;
|
||||
u16 kind_nr;
|
||||
s32 mem;
|
||||
|
||||
|
@ -36,9 +37,8 @@ void nvif_mmu_fini(struct nvif_mmu *);
|
|||
static inline bool
|
||||
nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind)
|
||||
{
|
||||
const u8 invalid = mmu->kind_nr - 1;
|
||||
if (kind) {
|
||||
if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid)
|
||||
if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -121,6 +121,7 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
|
|||
kind, argc);
|
||||
if (ret == 0)
|
||||
memcpy(mmu->kind, kind->data, kind->count);
|
||||
mmu->kind_inv = kind->kind_inv;
|
||||
kfree(kind);
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
* The value 0xff represents an invalid storage type.
|
||||
*/
|
||||
const u8 *
|
||||
gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
|
||||
gf100_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
|
||||
{
|
||||
static const u8
|
||||
kind[256] = {
|
||||
|
@ -69,6 +69,7 @@ gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
|
|||
};
|
||||
|
||||
*count = ARRAY_SIZE(kind);
|
||||
*invalid = 0xff;
|
||||
return kind;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include <nvif/class.h>
|
||||
|
||||
const u8 *
|
||||
gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
|
||||
gm200_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
|
||||
{
|
||||
static const u8
|
||||
kind[256] = {
|
||||
|
@ -65,6 +65,7 @@ gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
|
|||
0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
|
||||
};
|
||||
*count = ARRAY_SIZE(kind);
|
||||
*invalid = 0xff;
|
||||
return kind;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include <nvif/class.h>
|
||||
|
||||
const u8 *
|
||||
nv50_mmu_kind(struct nvkm_mmu *base, int *count)
|
||||
nv50_mmu_kind(struct nvkm_mmu *base, int *count, u8 *invalid)
|
||||
{
|
||||
/* 0x01: no bank swizzle
|
||||
* 0x02: bank swizzled
|
||||
|
@ -57,6 +57,7 @@ nv50_mmu_kind(struct nvkm_mmu *base, int *count)
|
|||
0x01, 0x01, 0x02, 0x02, 0x01, 0x01, 0x7f, 0x7f
|
||||
};
|
||||
*count = ARRAY_SIZE(kind);
|
||||
*invalid = 0x7f;
|
||||
return kind;
|
||||
}
|
||||
|
||||
|
|
|
@ -35,17 +35,17 @@ struct nvkm_mmu_func {
|
|||
u32 pd_offset;
|
||||
} vmm;
|
||||
|
||||
const u8 *(*kind)(struct nvkm_mmu *, int *count);
|
||||
const u8 *(*kind)(struct nvkm_mmu *, int *count, u8 *invalid);
|
||||
bool kind_sys;
|
||||
};
|
||||
|
||||
extern const struct nvkm_mmu_func nv04_mmu;
|
||||
|
||||
const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count);
|
||||
const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
|
||||
|
||||
const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count);
|
||||
const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
|
||||
|
||||
const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *);
|
||||
const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *);
|
||||
|
||||
struct nvkm_mmu_pt {
|
||||
union {
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
/*
|
||||
* Copyright 2018 Red Hat Inc.
|
||||
* Copyright 2019 NVIDIA Corporation.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
|
@ -26,13 +27,26 @@
|
|||
|
||||
#include <nvif/class.h>
|
||||
|
||||
const u8 *
|
||||
tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
|
||||
{
|
||||
static const u8
|
||||
kind[16] = {
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00 */
|
||||
0x06, 0x06, 0x02, 0x01, 0x03, 0x04, 0x05, 0x07,
|
||||
};
|
||||
*count = ARRAY_SIZE(kind);
|
||||
*invalid = 0x07;
|
||||
return kind;
|
||||
}
|
||||
|
||||
static const struct nvkm_mmu_func
|
||||
tu102_mmu = {
|
||||
.dma_bits = 47,
|
||||
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
|
||||
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
|
||||
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, tu102_vmm_new },
|
||||
.kind = gm200_mmu_kind,
|
||||
.kind = tu102_mmu_kind,
|
||||
.kind_sys = true,
|
||||
};
|
||||
|
||||
|
|
|
@ -111,15 +111,17 @@ nvkm_ummu_kind(struct nvkm_ummu *ummu, void *argv, u32 argc)
|
|||
} *args = argv;
|
||||
const u8 *kind = NULL;
|
||||
int ret = -ENOSYS, count = 0;
|
||||
u8 kind_inv = 0;
|
||||
|
||||
if (mmu->func->kind)
|
||||
kind = mmu->func->kind(mmu, &count);
|
||||
kind = mmu->func->kind(mmu, &count, &kind_inv);
|
||||
|
||||
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
|
||||
if (argc != args->v0.count * sizeof(*args->v0.data))
|
||||
return -EINVAL;
|
||||
if (args->v0.count > count)
|
||||
return -EINVAL;
|
||||
args->v0.kind_inv = kind_inv;
|
||||
memcpy(args->v0.data, kind, args->v0.count);
|
||||
} else
|
||||
return ret;
|
||||
|
@ -157,9 +159,10 @@ nvkm_ummu_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
|
|||
struct nvkm_mmu *mmu = device->mmu;
|
||||
struct nvkm_ummu *ummu;
|
||||
int ret = -ENOSYS, kinds = 0;
|
||||
u8 unused = 0;
|
||||
|
||||
if (mmu->func->kind)
|
||||
mmu->func->kind(mmu, &kinds);
|
||||
mmu->func->kind(mmu, &kinds, &unused);
|
||||
|
||||
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
|
||||
args->v0.dmabits = mmu->dma_bits;
|
||||
|
|
|
@ -247,7 +247,7 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
|
|||
} *args = argv;
|
||||
struct nvkm_device *device = vmm->mmu->subdev.device;
|
||||
struct nvkm_memory *memory = map->memory;
|
||||
u8 kind, priv, ro, vol;
|
||||
u8 kind, kind_inv, priv, ro, vol;
|
||||
int kindn, aper, ret = -ENOSYS;
|
||||
const u8 *kindm;
|
||||
|
||||
|
@ -274,8 +274,8 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
|
|||
if (WARN_ON(aper < 0))
|
||||
return aper;
|
||||
|
||||
kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
|
||||
if (kind >= kindn || kindm[kind] == 0xff) {
|
||||
kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
|
||||
if (kind >= kindn || kindm[kind] == kind_inv) {
|
||||
VMM_DEBUG(vmm, "kind %02x", kind);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -320,7 +320,7 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
|
|||
} *args = argv;
|
||||
struct nvkm_device *device = vmm->mmu->subdev.device;
|
||||
struct nvkm_memory *memory = map->memory;
|
||||
u8 kind, priv, ro, vol;
|
||||
u8 kind, kind_inv, priv, ro, vol;
|
||||
int kindn, aper, ret = -ENOSYS;
|
||||
const u8 *kindm;
|
||||
|
||||
|
@ -347,8 +347,8 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
|
|||
if (WARN_ON(aper < 0))
|
||||
return aper;
|
||||
|
||||
kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
|
||||
if (kind >= kindn || kindm[kind] == 0xff) {
|
||||
kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
|
||||
if (kind >= kindn || kindm[kind] == kind_inv) {
|
||||
VMM_DEBUG(vmm, "kind %02x", kind);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -235,7 +235,7 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
|
|||
struct nvkm_device *device = vmm->mmu->subdev.device;
|
||||
struct nvkm_ram *ram = device->fb->ram;
|
||||
struct nvkm_memory *memory = map->memory;
|
||||
u8 aper, kind, comp, priv, ro;
|
||||
u8 aper, kind, kind_inv, comp, priv, ro;
|
||||
int kindn, ret = -ENOSYS;
|
||||
const u8 *kindm;
|
||||
|
||||
|
@ -278,8 +278,8 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
|
||||
if (kind >= kindn || kindm[kind] == 0x7f) {
|
||||
kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
|
||||
if (kind >= kindn || kindm[kind] == kind_inv) {
|
||||
VMM_DEBUG(vmm, "kind %02x", kind);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue