drm/nouveau/fifo: remove rd32/wr32 accessors from channels

No need for these, we always map USERD to the client.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Reviewed-by: Dave Airlie <airlied@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Ben Skeggs 2022-06-01 20:46:04 +10:00 committed by Dave Airlie
parent 66ff4e4ed4
commit 097d56cdcd
3 changed files with 3 additions and 37 deletions

View File

@ -26,7 +26,6 @@ struct nvkm_fifo_chan {
struct nvkm_gpuobj *inst; struct nvkm_gpuobj *inst;
struct nvkm_gpuobj *push; struct nvkm_gpuobj *push;
struct nvkm_vmm *vmm; struct nvkm_vmm *vmm;
void __iomem *user;
u64 addr; u64 addr;
u32 size; u32 size;

View File

@ -385,7 +385,9 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
struct nv_dma_v0 args = {}; struct nv_dma_v0 args = {};
int ret, i; int ret, i;
nvif_object_map(&chan->user, NULL, 0); ret = nvif_object_map(&chan->user, NULL, 0);
if (ret)
return ret;
if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO && if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO &&
chan->user.oclass < AMPERE_CHANNEL_GPFIFO_B) { chan->user.oclass < AMPERE_CHANNEL_GPFIFO_B) {

View File

@ -271,36 +271,6 @@ nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
return 0; return 0;
} }
static int
nvkm_fifo_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
{
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
if (unlikely(!chan->user)) {
chan->user = ioremap(chan->addr, chan->size);
if (!chan->user)
return -ENOMEM;
}
if (unlikely(addr + 4 > chan->size))
return -EINVAL;
*data = ioread32_native(chan->user + addr);
return 0;
}
static int
nvkm_fifo_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
if (unlikely(!chan->user)) {
chan->user = ioremap(chan->addr, chan->size);
if (!chan->user)
return -ENOMEM;
}
if (unlikely(addr + 4 > chan->size))
return -EINVAL;
iowrite32_native(data, chan->user + addr);
return 0;
}
static int static int
nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend) nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
{ {
@ -332,9 +302,6 @@ nvkm_fifo_chan_dtor(struct nvkm_object *object)
} }
spin_unlock_irqrestore(&fifo->lock, flags); spin_unlock_irqrestore(&fifo->lock, flags);
if (chan->user)
iounmap(chan->user);
if (chan->vmm) { if (chan->vmm) {
nvkm_vmm_part(chan->vmm, chan->inst->memory); nvkm_vmm_part(chan->vmm, chan->inst->memory);
nvkm_vmm_unref(&chan->vmm); nvkm_vmm_unref(&chan->vmm);
@ -352,8 +319,6 @@ nvkm_fifo_chan_func = {
.fini = nvkm_fifo_chan_fini, .fini = nvkm_fifo_chan_fini,
.ntfy = nvkm_fifo_chan_ntfy, .ntfy = nvkm_fifo_chan_ntfy,
.map = nvkm_fifo_chan_map, .map = nvkm_fifo_chan_map,
.rd32 = nvkm_fifo_chan_rd32,
.wr32 = nvkm_fifo_chan_wr32,
.sclass = nvkm_fifo_chan_child_get, .sclass = nvkm_fifo_chan_child_get,
}; };