drm/nouveau/mmu/gp100-: support vmms with gcc/tex replayable faults enabled
Some GPU units are capable of supporting "replayable" page faults, where the execution unit will wait for SW to fixup GPU page tables rather than triggering a channel-fatal fault. This feature isn't useful (it's harmful, even) unless something like HMM is being used to manage events appearing in the replayable fault buffer, so, it's disabled by default. This commit allows a client to request it be enabled. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
71871aa6df
commit
ab2ee9ffa3
|
@ -6,6 +6,12 @@ struct gp100_vmm_vn {
|
||||||
/* nvif_vmm_vX ... */
|
/* nvif_vmm_vX ... */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct gp100_vmm_v0 {
|
||||||
|
/* nvif_vmm_vX ... */
|
||||||
|
__u8 version;
|
||||||
|
__u8 fault_replay;
|
||||||
|
};
|
||||||
|
|
||||||
struct gp100_vmm_map_vn {
|
struct gp100_vmm_map_vn {
|
||||||
/* nvif_vmm_map_vX ... */
|
/* nvif_vmm_map_vX ... */
|
||||||
};
|
};
|
||||||
|
|
|
@ -45,6 +45,8 @@ struct nvkm_vmm {
|
||||||
|
|
||||||
dma_addr_t null;
|
dma_addr_t null;
|
||||||
void *nullp;
|
void *nullp;
|
||||||
|
|
||||||
|
bool replay;
|
||||||
};
|
};
|
||||||
|
|
||||||
int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
|
int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
|
||||||
|
|
|
@ -31,7 +31,7 @@ gp100_mmu = {
|
||||||
.dma_bits = 47,
|
.dma_bits = 47,
|
||||||
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
|
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
|
||||||
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
|
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
|
||||||
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
|
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
|
||||||
.kind = gm200_mmu_kind,
|
.kind = gm200_mmu_kind,
|
||||||
.kind_sys = true,
|
.kind_sys = true,
|
||||||
};
|
};
|
||||||
|
|
|
@ -31,7 +31,7 @@ gp10b_mmu = {
|
||||||
.dma_bits = 47,
|
.dma_bits = 47,
|
||||||
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
|
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
|
||||||
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
|
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
|
||||||
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
|
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
|
||||||
.kind = gm200_mmu_kind,
|
.kind = gm200_mmu_kind,
|
||||||
.kind_sys = true,
|
.kind_sys = true,
|
||||||
};
|
};
|
||||||
|
|
|
@ -31,7 +31,7 @@ gv100_mmu = {
|
||||||
.dma_bits = 47,
|
.dma_bits = 47,
|
||||||
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
|
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
|
||||||
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
|
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
|
||||||
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gv100_vmm_new },
|
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gv100_vmm_new },
|
||||||
.kind = gm200_mmu_kind,
|
.kind = gm200_mmu_kind,
|
||||||
.kind_sys = true,
|
.kind_sys = true,
|
||||||
};
|
};
|
||||||
|
|
|
@ -220,6 +220,9 @@ int gm200_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
|
||||||
int gm200_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
|
int gm200_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
|
||||||
int gm200_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
|
int gm200_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
|
||||||
|
|
||||||
|
int gp100_vmm_new_(const struct nvkm_vmm_func *,
|
||||||
|
struct nvkm_mmu *, bool, u64, u64, void *, u32,
|
||||||
|
struct lock_class_key *, const char *, struct nvkm_vmm **);
|
||||||
int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
|
int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
|
||||||
int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
|
int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
|
||||||
void gp100_vmm_flush(struct nvkm_vmm *, int);
|
void gp100_vmm_flush(struct nvkm_vmm *, int);
|
||||||
|
|
|
@ -476,7 +476,11 @@ gp100_vmm_flush(struct nvkm_vmm *vmm, int depth)
|
||||||
int
|
int
|
||||||
gp100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
|
gp100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
|
||||||
{
|
{
|
||||||
const u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11); /* 64KiB */
|
u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11) /* 64KiB */;
|
||||||
|
if (vmm->replay) {
|
||||||
|
base |= BIT_ULL(4); /* FAULT_REPLAY_TEX */
|
||||||
|
base |= BIT_ULL(5); /* FAULT_REPLAY_GCC */
|
||||||
|
}
|
||||||
return gf100_vmm_join_(vmm, inst, base);
|
return gf100_vmm_join_(vmm, inst, base);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -500,11 +504,40 @@ gp100_vmm = {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
int
|
||||||
|
gp100_vmm_new_(const struct nvkm_vmm_func *func,
|
||||||
|
struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
|
||||||
|
void *argv, u32 argc, struct lock_class_key *key,
|
||||||
|
const char *name, struct nvkm_vmm **pvmm)
|
||||||
|
{
|
||||||
|
union {
|
||||||
|
struct gp100_vmm_vn vn;
|
||||||
|
struct gp100_vmm_v0 v0;
|
||||||
|
} *args = argv;
|
||||||
|
int ret = -ENOSYS;
|
||||||
|
bool replay;
|
||||||
|
|
||||||
|
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
|
||||||
|
replay = args->v0.fault_replay != 0;
|
||||||
|
} else
|
||||||
|
if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
|
||||||
|
replay = false;
|
||||||
|
} else
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
(*pvmm)->replay = replay;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
gp100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
|
gp100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
|
||||||
void *argv, u32 argc, struct lock_class_key *key,
|
void *argv, u32 argc, struct lock_class_key *key,
|
||||||
const char *name, struct nvkm_vmm **pvmm)
|
const char *name, struct nvkm_vmm **pvmm)
|
||||||
{
|
{
|
||||||
return nv04_vmm_new_(&gp100_vmm, mmu, 0, managed, addr, size,
|
return gp100_vmm_new_(&gp100_vmm, mmu, managed, addr, size,
|
||||||
argv, argc, key, name, pvmm);
|
argv, argc, key, name, pvmm);
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,6 +46,6 @@ gp10b_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
|
||||||
void *argv, u32 argc, struct lock_class_key *key,
|
void *argv, u32 argc, struct lock_class_key *key,
|
||||||
const char *name, struct nvkm_vmm **pvmm)
|
const char *name, struct nvkm_vmm **pvmm)
|
||||||
{
|
{
|
||||||
return nv04_vmm_new_(&gp10b_vmm, mmu, 0, managed, addr, size,
|
return gp100_vmm_new_(&gp10b_vmm, mmu, managed, addr, size,
|
||||||
argv, argc, key, name, pvmm);
|
argv, argc, key, name, pvmm);
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,6 +84,6 @@ gv100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
|
||||||
void *argv, u32 argc, struct lock_class_key *key,
|
void *argv, u32 argc, struct lock_class_key *key,
|
||||||
const char *name, struct nvkm_vmm **pvmm)
|
const char *name, struct nvkm_vmm **pvmm)
|
||||||
{
|
{
|
||||||
return nv04_vmm_new_(&gv100_vmm, mmu, 0, managed, addr, size,
|
return gp100_vmm_new_(&gv100_vmm, mmu, managed, addr, size,
|
||||||
argv, argc, key, name, pvmm);
|
argv, argc, key, name, pvmm);
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,6 +73,6 @@ tu102_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
|
||||||
void *argv, u32 argc, struct lock_class_key *key,
|
void *argv, u32 argc, struct lock_class_key *key,
|
||||||
const char *name, struct nvkm_vmm **pvmm)
|
const char *name, struct nvkm_vmm **pvmm)
|
||||||
{
|
{
|
||||||
return nv04_vmm_new_(&tu102_vmm, mmu, 0, managed, addr, size,
|
return gp100_vmm_new_(&tu102_vmm, mmu, managed, addr, size,
|
||||||
argv, argc, key, name, pvmm);
|
argv, argc, key, name, pvmm);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue