drm/nouveau/mmu/gp100-: add privileged methods for fault replay/cancel

Host methods exist to do at least some of what we need, but we are not
currently pushing replay/cancels through a channel like UVM does as it's
not clear whether it's necessary in our case (UVM also updates PTEs with
the GPU).

UVM also pushes a software method for fault cancels on Pascal, seemingly
because the host methods don't appear to be sufficient.  If/when we want
to push the replay/cancel on the GPU, we can re-purpose the cancellation
code here to implement that swmthd.

Keep it simple for now, until we figure out exactly what we need here.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Ben Skeggs 2018-07-09 16:07:40 +10:00
parent a5ff307fe1
commit 71871aa6df
8 changed files with 101 additions and 0 deletions

View File

@ -17,6 +17,7 @@ struct nvif_vmm_v0 {
#define NVIF_VMM_V0_UNMAP 0x04
#define NVIF_VMM_V0_PFNMAP 0x05
#define NVIF_VMM_V0_PFNCLR 0x06
#define NVIF_VMM_V0_MTHD(i) ((i) + 0x80)
struct nvif_vmm_page_v0 {
__u8 version;

View File

@ -18,4 +18,19 @@ struct gp100_vmm_map_v0 {
__u8 priv;
__u8 kind;
};
#define GP100_VMM_VN_FAULT_REPLAY NVIF_VMM_V0_MTHD(0x00)
#define GP100_VMM_VN_FAULT_CANCEL NVIF_VMM_V0_MTHD(0x01)
struct gp100_vmm_fault_replay_vn {
};
struct gp100_vmm_fault_cancel_v0 {
__u8 version;
__u8 hub;
__u8 gpc;
__u8 client;
__u8 pad04[4];
__u64 inst;
};
#endif

View File

@ -341,6 +341,13 @@ nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
case NVIF_VMM_V0_PFNMAP: return nvkm_uvmm_mthd_pfnmap(uvmm, argv, argc);
case NVIF_VMM_V0_PFNCLR: return nvkm_uvmm_mthd_pfnclr(uvmm, argv, argc);
case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f):
if (uvmm->vmm->func->mthd) {
return uvmm->vmm->func->mthd(uvmm->vmm,
uvmm->object.client,
mthd, argv, argc);
}
break;
default:
break;
}

View File

@ -145,6 +145,9 @@ struct nvkm_vmm_func {
struct nvkm_vmm_map *);
void (*flush)(struct nvkm_vmm *, int depth);
int (*mthd)(struct nvkm_vmm *, struct nvkm_client *,
u32 mthd, void *argv, u32 argc);
void (*invalidate_pdb)(struct nvkm_vmm *, u64 addr);
u64 page_block;
@ -220,6 +223,7 @@ int gm200_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
void gp100_vmm_flush(struct nvkm_vmm *, int);
int gp100_vmm_mthd(struct nvkm_vmm *, struct nvkm_client *, u32, void *, u32);
void gp100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr);
int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);

View File

@ -21,8 +21,11 @@
*/
#include "vmm.h"
#include <core/client.h>
#include <subdev/fb.h>
#include <subdev/ltc.h>
#include <subdev/timer.h>
#include <engine/gr.h>
#include <nvif/ifc00d.h>
#include <nvif/unpack.h>
@ -384,6 +387,73 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return 0;
}
static int
gp100_vmm_fault_cancel(struct nvkm_vmm *vmm, void *argv, u32 argc)
{
struct nvkm_device *device = vmm->mmu->subdev.device;
union {
struct gp100_vmm_fault_cancel_v0 v0;
} *args = argv;
int ret = -ENOSYS;
u32 inst, aper;
if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false)))
return ret;
/* Translate MaxwellFaultBufferA instance pointer to the same
* format as the NV_GR_FECS_CURRENT_CTX register.
*/
aper = (args->v0.inst >> 8) & 3;
args->v0.inst >>= 12;
args->v0.inst |= aper << 28;
args->v0.inst |= 0x80000000;
if (!WARN_ON(nvkm_gr_ctxsw_pause(device))) {
if ((inst = nvkm_gr_ctxsw_inst(device)) == args->v0.inst) {
gf100_vmm_invalidate(vmm, 0x0000001b
/* CANCEL_TARGETED. */ |
(args->v0.hub << 20) |
(args->v0.gpc << 15) |
(args->v0.client << 9));
}
WARN_ON(nvkm_gr_ctxsw_resume(device));
}
return 0;
}
static int
gp100_vmm_fault_replay(struct nvkm_vmm *vmm, void *argv, u32 argc)
{
union {
struct gp100_vmm_fault_replay_vn vn;
} *args = argv;
int ret = -ENOSYS;
if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
gf100_vmm_invalidate(vmm, 0x0000000b); /* REPLAY_GLOBAL. */
}
return ret;
}
int
gp100_vmm_mthd(struct nvkm_vmm *vmm,
struct nvkm_client *client, u32 mthd, void *argv, u32 argc)
{
if (client->super) {
switch (mthd) {
case GP100_VMM_VN_FAULT_REPLAY:
return gp100_vmm_fault_replay(vmm, argv, argc);
case GP100_VMM_VN_FAULT_CANCEL:
return gp100_vmm_fault_cancel(vmm, argv, argc);
default:
break;
}
}
return -EINVAL;
}
void
gp100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
{
@ -417,6 +487,7 @@ gp100_vmm = {
.aper = gf100_vmm_aper,
.valid = gp100_vmm_valid,
.flush = gp100_vmm_flush,
.mthd = gp100_vmm_mthd,
.invalidate_pdb = gp100_vmm_invalidate_pdb,
.page = {
{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },

View File

@ -28,6 +28,7 @@ gp10b_vmm = {
.aper = gk20a_vmm_aper,
.valid = gp100_vmm_valid,
.flush = gp100_vmm_flush,
.mthd = gp100_vmm_mthd,
.invalidate_pdb = gp100_vmm_invalidate_pdb,
.page = {
{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },

View File

@ -66,6 +66,7 @@ gv100_vmm = {
.aper = gf100_vmm_aper,
.valid = gp100_vmm_valid,
.flush = gp100_vmm_flush,
.mthd = gp100_vmm_mthd,
.invalidate_pdb = gp100_vmm_invalidate_pdb,
.page = {
{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },

View File

@ -56,6 +56,7 @@ tu102_vmm = {
.aper = gf100_vmm_aper,
.valid = gp100_vmm_valid,
.flush = tu102_vmm_flush,
.mthd = gp100_vmm_mthd,
.page = {
{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },