drm/nouveau/imem: switch to device pri macros
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
fef5cc0f25
commit
d5c5bcf693
|
@ -107,6 +107,7 @@ gk20a_instobj_rd32(struct nvkm_object *object, u64 offset)
|
||||||
{
|
{
|
||||||
struct gk20a_instmem *imem = (void *)nvkm_instmem(object);
|
struct gk20a_instmem *imem = (void *)nvkm_instmem(object);
|
||||||
struct gk20a_instobj *node = (void *)object;
|
struct gk20a_instobj *node = (void *)object;
|
||||||
|
struct nvkm_device *device = imem->base.subdev.device;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
|
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
|
||||||
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
|
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
|
||||||
|
@ -114,10 +115,10 @@ gk20a_instobj_rd32(struct nvkm_object *object, u64 offset)
|
||||||
|
|
||||||
spin_lock_irqsave(&imem->lock, flags);
|
spin_lock_irqsave(&imem->lock, flags);
|
||||||
if (unlikely(imem->addr != base)) {
|
if (unlikely(imem->addr != base)) {
|
||||||
nv_wr32(imem, 0x001700, base >> 16);
|
nvkm_wr32(device, 0x001700, base >> 16);
|
||||||
imem->addr = base;
|
imem->addr = base;
|
||||||
}
|
}
|
||||||
data = nv_rd32(imem, 0x700000 + addr);
|
data = nvkm_rd32(device, 0x700000 + addr);
|
||||||
spin_unlock_irqrestore(&imem->lock, flags);
|
spin_unlock_irqrestore(&imem->lock, flags);
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
@ -127,16 +128,17 @@ gk20a_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
|
||||||
{
|
{
|
||||||
struct gk20a_instmem *imem = (void *)nvkm_instmem(object);
|
struct gk20a_instmem *imem = (void *)nvkm_instmem(object);
|
||||||
struct gk20a_instobj *node = (void *)object;
|
struct gk20a_instobj *node = (void *)object;
|
||||||
|
struct nvkm_device *device = imem->base.subdev.device;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
|
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
|
||||||
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
|
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
|
||||||
|
|
||||||
spin_lock_irqsave(&imem->lock, flags);
|
spin_lock_irqsave(&imem->lock, flags);
|
||||||
if (unlikely(imem->addr != base)) {
|
if (unlikely(imem->addr != base)) {
|
||||||
nv_wr32(imem, 0x001700, base >> 16);
|
nvkm_wr32(device, 0x001700, base >> 16);
|
||||||
imem->addr = base;
|
imem->addr = base;
|
||||||
}
|
}
|
||||||
nv_wr32(imem, 0x700000 + addr, data);
|
nvkm_wr32(device, 0x700000 + addr, data);
|
||||||
spin_unlock_irqrestore(&imem->lock, flags);
|
spin_unlock_irqrestore(&imem->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -105,13 +105,15 @@ nv04_instobj_oclass = {
|
||||||
static u32
|
static u32
|
||||||
nv04_instmem_rd32(struct nvkm_object *object, u64 addr)
|
nv04_instmem_rd32(struct nvkm_object *object, u64 addr)
|
||||||
{
|
{
|
||||||
return nv_rd32(object, 0x700000 + addr);
|
struct nvkm_instmem *imem = (void *)object;
|
||||||
|
return nvkm_rd32(imem->subdev.device, 0x700000 + addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
nv04_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
|
nv04_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
|
||||||
{
|
{
|
||||||
return nv_wr32(object, 0x700000 + addr, data);
|
struct nvkm_instmem *imem = (void *)object;
|
||||||
|
nvkm_wr32(imem->subdev.device, 0x700000 + addr, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
|
@ -75,7 +75,7 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||||
* to fit graphics contexts for every channel, the magics come
|
* to fit graphics contexts for every channel, the magics come
|
||||||
* from engine/gr/nv40.c
|
* from engine/gr/nv40.c
|
||||||
*/
|
*/
|
||||||
vs = hweight8((nv_rd32(imem, 0x001540) & 0x0000ff00) >> 8);
|
vs = hweight8((nvkm_rd32(device, 0x001540) & 0x0000ff00) >> 8);
|
||||||
if (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs;
|
if (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs;
|
||||||
else if (device->chipset < 0x43) imem->base.reserved = 0x4f00 * vs;
|
else if (device->chipset < 0x43) imem->base.reserved = 0x4f00 * vs;
|
||||||
else if (nv44_gr_class(imem)) imem->base.reserved = 0x4980 * vs;
|
else if (nv44_gr_class(imem)) imem->base.reserved = 0x4980 * vs;
|
||||||
|
|
|
@ -45,6 +45,7 @@ nv50_instobj_rd32(struct nvkm_object *object, u64 offset)
|
||||||
{
|
{
|
||||||
struct nv50_instmem *imem = (void *)nvkm_instmem(object);
|
struct nv50_instmem *imem = (void *)nvkm_instmem(object);
|
||||||
struct nv50_instobj *node = (void *)object;
|
struct nv50_instobj *node = (void *)object;
|
||||||
|
struct nvkm_device *device = imem->base.subdev.device;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
|
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
|
||||||
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
|
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
|
||||||
|
@ -52,10 +53,10 @@ nv50_instobj_rd32(struct nvkm_object *object, u64 offset)
|
||||||
|
|
||||||
spin_lock_irqsave(&imem->lock, flags);
|
spin_lock_irqsave(&imem->lock, flags);
|
||||||
if (unlikely(imem->addr != base)) {
|
if (unlikely(imem->addr != base)) {
|
||||||
nv_wr32(imem, 0x001700, base >> 16);
|
nvkm_wr32(device, 0x001700, base >> 16);
|
||||||
imem->addr = base;
|
imem->addr = base;
|
||||||
}
|
}
|
||||||
data = nv_rd32(imem, 0x700000 + addr);
|
data = nvkm_rd32(device, 0x700000 + addr);
|
||||||
spin_unlock_irqrestore(&imem->lock, flags);
|
spin_unlock_irqrestore(&imem->lock, flags);
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
@ -65,16 +66,17 @@ nv50_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
|
||||||
{
|
{
|
||||||
struct nv50_instmem *imem = (void *)nvkm_instmem(object);
|
struct nv50_instmem *imem = (void *)nvkm_instmem(object);
|
||||||
struct nv50_instobj *node = (void *)object;
|
struct nv50_instobj *node = (void *)object;
|
||||||
|
struct nvkm_device *device = imem->base.subdev.device;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
|
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
|
||||||
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
|
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
|
||||||
|
|
||||||
spin_lock_irqsave(&imem->lock, flags);
|
spin_lock_irqsave(&imem->lock, flags);
|
||||||
if (unlikely(imem->addr != base)) {
|
if (unlikely(imem->addr != base)) {
|
||||||
nv_wr32(imem, 0x001700, base >> 16);
|
nvkm_wr32(device, 0x001700, base >> 16);
|
||||||
imem->addr = base;
|
imem->addr = base;
|
||||||
}
|
}
|
||||||
nv_wr32(imem, 0x700000 + addr, data);
|
nvkm_wr32(device, 0x700000 + addr, data);
|
||||||
spin_unlock_irqrestore(&imem->lock, flags);
|
spin_unlock_irqrestore(&imem->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue