drm/nouveau: remove nouveau_gpuobj_ref completely, replace with sanity
Reviewed-by: Francisco Jerez <currojerez@riseup.net> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
de3a6c0a3b
commit
a8eaebc6c5
|
@ -70,14 +70,8 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
|
|||
chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret);
|
||||
if (pushbuf != dev_priv->gart_info.sg_ctxdma)
|
||||
nouveau_gpuobj_del(dev, &pushbuf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
|
||||
nouveau_gpuobj_ref(NULL, &pushbuf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -308,7 +302,7 @@ nouveau_channel_free(struct nouveau_channel *chan)
|
|||
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
/* Release the channel's resources */
|
||||
nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
|
||||
nouveau_gpuobj_ref(NULL, &chan->pushbuf);
|
||||
if (chan->pushbuf_bo) {
|
||||
nouveau_bo_unmap(chan->pushbuf_bo);
|
||||
nouveau_bo_unpin(chan->pushbuf_bo);
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
#include "nouveau_ramht.h"
|
||||
|
||||
void
|
||||
nouveau_dma_pre_init(struct nouveau_channel *chan)
|
||||
|
@ -58,26 +59,27 @@ nouveau_dma_init(struct nouveau_channel *chan)
|
|||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *m2mf = NULL;
|
||||
struct nouveau_gpuobj *nvsw = NULL;
|
||||
struct nouveau_gpuobj *obj = NULL;
|
||||
int ret, i;
|
||||
|
||||
/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
|
||||
ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
|
||||
0x0039 : 0x5039, &m2mf);
|
||||
0x0039 : 0x5039, &obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL);
|
||||
ret = nouveau_ramht_insert(chan, NvM2MF, obj);
|
||||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Create an NV_SW object for various sync purposes */
|
||||
ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
|
||||
ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
|
||||
ret = nouveau_ramht_insert(chan, NvSw, obj);
|
||||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -133,7 +133,6 @@ enum nouveau_flags {
|
|||
#define NVOBJ_ENGINE_DISPLAY 2
|
||||
#define NVOBJ_ENGINE_INT 0xdeadbeef
|
||||
|
||||
#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0)
|
||||
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
|
||||
#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
|
||||
#define NVOBJ_FLAG_FAKE (1 << 3)
|
||||
|
@ -141,7 +140,6 @@ struct nouveau_gpuobj {
|
|||
struct drm_device *dev;
|
||||
struct list_head list;
|
||||
|
||||
struct nouveau_channel *im_channel;
|
||||
struct drm_mm_node *im_pramin;
|
||||
struct nouveau_bo *im_backing;
|
||||
uint32_t im_backing_start;
|
||||
|
@ -162,16 +160,6 @@ struct nouveau_gpuobj {
|
|||
void *priv;
|
||||
};
|
||||
|
||||
struct nouveau_gpuobj_ref {
|
||||
struct list_head list;
|
||||
|
||||
struct nouveau_gpuobj *gpuobj;
|
||||
uint32_t instance;
|
||||
|
||||
struct nouveau_channel *channel;
|
||||
int handle;
|
||||
};
|
||||
|
||||
struct nouveau_channel {
|
||||
struct drm_device *dev;
|
||||
int id;
|
||||
|
@ -197,33 +185,32 @@ struct nouveau_channel {
|
|||
} fence;
|
||||
|
||||
/* DMA push buffer */
|
||||
struct nouveau_gpuobj_ref *pushbuf;
|
||||
struct nouveau_bo *pushbuf_bo;
|
||||
uint32_t pushbuf_base;
|
||||
struct nouveau_gpuobj *pushbuf;
|
||||
struct nouveau_bo *pushbuf_bo;
|
||||
uint32_t pushbuf_base;
|
||||
|
||||
/* Notifier memory */
|
||||
struct nouveau_bo *notifier_bo;
|
||||
struct drm_mm notifier_heap;
|
||||
|
||||
/* PFIFO context */
|
||||
struct nouveau_gpuobj_ref *ramfc;
|
||||
struct nouveau_gpuobj_ref *cache;
|
||||
struct nouveau_gpuobj *ramfc;
|
||||
struct nouveau_gpuobj *cache;
|
||||
|
||||
/* PGRAPH context */
|
||||
/* XXX may be merge 2 pointers as private data ??? */
|
||||
struct nouveau_gpuobj_ref *ramin_grctx;
|
||||
struct nouveau_gpuobj *ramin_grctx;
|
||||
void *pgraph_ctx;
|
||||
|
||||
/* NV50 VM */
|
||||
struct nouveau_gpuobj *vm_pd;
|
||||
struct nouveau_gpuobj_ref *vm_gart_pt;
|
||||
struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR];
|
||||
struct nouveau_gpuobj *vm_pd;
|
||||
struct nouveau_gpuobj *vm_gart_pt;
|
||||
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
|
||||
|
||||
/* Objects */
|
||||
struct nouveau_gpuobj_ref *ramin; /* Private instmem */
|
||||
struct drm_mm ramin_heap; /* Private PRAMIN heap */
|
||||
struct nouveau_gpuobj_ref *ramht; /* Hash table */
|
||||
struct list_head ramht_refs; /* Objects referenced by RAMHT */
|
||||
struct nouveau_gpuobj *ramin; /* Private instmem */
|
||||
struct drm_mm ramin_heap; /* Private PRAMIN heap */
|
||||
struct nouveau_ramht *ramht; /* Hash table */
|
||||
|
||||
/* GPU object info for stuff used in-kernel (mm_enabled) */
|
||||
uint32_t m2mf_ntfy;
|
||||
|
@ -301,7 +288,7 @@ struct nouveau_fb_engine {
|
|||
struct nouveau_fifo_engine {
|
||||
int channels;
|
||||
|
||||
struct nouveau_gpuobj_ref *playlist[2];
|
||||
struct nouveau_gpuobj *playlist[2];
|
||||
int cur_playlist;
|
||||
|
||||
int (*init)(struct drm_device *);
|
||||
|
@ -339,7 +326,7 @@ struct nouveau_pgraph_engine {
|
|||
int grctx_size;
|
||||
|
||||
/* NV2x/NV3x context table (0x400780) */
|
||||
struct nouveau_gpuobj_ref *ctx_table;
|
||||
struct nouveau_gpuobj *ctx_table;
|
||||
|
||||
int (*init)(struct drm_device *);
|
||||
void (*takedown)(struct drm_device *);
|
||||
|
@ -555,7 +542,7 @@ struct drm_nouveau_private {
|
|||
spinlock_t context_switch_lock;
|
||||
|
||||
/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
|
||||
struct nouveau_gpuobj *ramht;
|
||||
struct nouveau_ramht *ramht;
|
||||
uint32_t ramin_rsvd_vram;
|
||||
uint32_t ramht_offset;
|
||||
uint32_t ramht_size;
|
||||
|
@ -764,24 +751,12 @@ extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
|
|||
extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
|
||||
uint32_t size, int align, uint32_t flags,
|
||||
struct nouveau_gpuobj **);
|
||||
extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
|
||||
extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
|
||||
uint32_t handle, struct nouveau_gpuobj *,
|
||||
struct nouveau_gpuobj_ref **);
|
||||
extern int nouveau_gpuobj_ref_del(struct drm_device *,
|
||||
struct nouveau_gpuobj_ref **);
|
||||
extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
|
||||
struct nouveau_gpuobj_ref **ref_ret);
|
||||
extern int nouveau_gpuobj_new_ref(struct drm_device *,
|
||||
struct nouveau_channel *alloc_chan,
|
||||
struct nouveau_channel *ref_chan,
|
||||
uint32_t handle, uint32_t size, int align,
|
||||
uint32_t flags, struct nouveau_gpuobj_ref **);
|
||||
extern void nouveau_gpuobj_ref(struct nouveau_gpuobj *,
|
||||
struct nouveau_gpuobj **);
|
||||
extern int nouveau_gpuobj_new_fake(struct drm_device *,
|
||||
uint32_t p_offset, uint32_t b_offset,
|
||||
uint32_t size, uint32_t flags,
|
||||
struct nouveau_gpuobj **,
|
||||
struct nouveau_gpuobj_ref**);
|
||||
struct nouveau_gpuobj **);
|
||||
extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
|
||||
uint64_t offset, uint64_t size, int access,
|
||||
int target, struct nouveau_gpuobj **);
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include "nouveau_drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_reg.h"
|
||||
#include "nouveau_ramht.h"
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
/* needed for hotplug irq */
|
||||
|
@ -106,15 +107,16 @@ nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
|
|||
const int mthd = addr & 0x1ffc;
|
||||
|
||||
if (mthd == 0x0000) {
|
||||
struct nouveau_gpuobj_ref *ref = NULL;
|
||||
struct nouveau_gpuobj *gpuobj;
|
||||
|
||||
if (nouveau_gpuobj_ref_find(chan, data, &ref))
|
||||
gpuobj = nouveau_ramht_find(chan, data);
|
||||
if (!gpuobj)
|
||||
return false;
|
||||
|
||||
if (ref->gpuobj->engine != NVOBJ_ENGINE_SW)
|
||||
if (gpuobj->engine != NVOBJ_ENGINE_SW)
|
||||
return false;
|
||||
|
||||
chan->sw_subchannel[subc] = ref->gpuobj->class;
|
||||
chan->sw_subchannel[subc] = gpuobj->class;
|
||||
nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
|
||||
NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
|
||||
return true;
|
||||
|
@ -357,7 +359,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev)
|
|||
if (!chan || !chan->ramin_grctx)
|
||||
continue;
|
||||
|
||||
if (inst == chan->ramin_grctx->instance)
|
||||
if (inst == chan->ramin_grctx->pinst)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
|
@ -369,7 +371,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev)
|
|||
if (!chan || !chan->ramin)
|
||||
continue;
|
||||
|
||||
if (inst == chan->ramin->instance)
|
||||
if (inst == chan->ramin->vinst)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -625,7 +627,7 @@ nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
|
|||
if (!chan || !chan->ramin)
|
||||
continue;
|
||||
|
||||
if (trap[1] == chan->ramin->instance >> 12)
|
||||
if (trap[1] == chan->ramin->vinst >> 12)
|
||||
break;
|
||||
}
|
||||
NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_ramht.h"
|
||||
|
||||
int
|
||||
nouveau_notifier_init_channel(struct nouveau_channel *chan)
|
||||
|
@ -146,11 +147,11 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
|
|||
nobj->dtor = nouveau_notifier_gpuobj_dtor;
|
||||
nobj->priv = mem;
|
||||
|
||||
ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL);
|
||||
ret = nouveau_ramht_insert(chan, handle, nobj);
|
||||
nouveau_gpuobj_ref(NULL, &nobj);
|
||||
if (ret) {
|
||||
nouveau_gpuobj_del(dev, &nobj);
|
||||
drm_mm_put_block(mem);
|
||||
NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret);
|
||||
NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
|
|||
NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
|
||||
gpuobj->dev = dev;
|
||||
gpuobj->flags = flags;
|
||||
gpuobj->im_channel = chan;
|
||||
gpuobj->refcount = 1;
|
||||
|
||||
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
|
||||
|
||||
|
@ -108,7 +108,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
|
|||
|
||||
ret = engine->instmem.populate(dev, gpuobj, &size);
|
||||
if (ret) {
|
||||
nouveau_gpuobj_del(dev, &gpuobj);
|
||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -119,14 +119,14 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
|
|||
gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align);
|
||||
|
||||
if (!gpuobj->im_pramin) {
|
||||
nouveau_gpuobj_del(dev, &gpuobj);
|
||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (!chan) {
|
||||
ret = engine->instmem.bind(dev, gpuobj);
|
||||
if (ret) {
|
||||
nouveau_gpuobj_del(dev, &gpuobj);
|
||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -134,13 +134,13 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
|
|||
/* calculate the various different addresses for the object */
|
||||
if (chan) {
|
||||
gpuobj->pinst = gpuobj->im_pramin->start +
|
||||
chan->ramin->gpuobj->im_pramin->start;
|
||||
chan->ramin->im_pramin->start;
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
gpuobj->cinst = gpuobj->pinst;
|
||||
} else {
|
||||
gpuobj->cinst = gpuobj->im_pramin->start;
|
||||
gpuobj->vinst = gpuobj->im_pramin->start +
|
||||
chan->ramin->gpuobj->im_backing_start;
|
||||
chan->ramin->im_backing_start;
|
||||
}
|
||||
} else {
|
||||
gpuobj->pinst = gpuobj->im_pramin->start;
|
||||
|
@ -156,6 +156,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
|
|||
engine->instmem.flush(dev);
|
||||
}
|
||||
|
||||
|
||||
*gpuobj_ret = gpuobj;
|
||||
return 0;
|
||||
}
|
||||
|
@ -176,20 +177,23 @@ int
|
|||
nouveau_gpuobj_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *ramht = NULL;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
ret = nouveau_gpuobj_new_fake(dev,
|
||||
dev_priv->ramht_offset, ~0, dev_priv->ramht_size,
|
||||
NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS,
|
||||
&dev_priv->ramht, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
if (dev_priv->card_type >= NV_50)
|
||||
return 0;
|
||||
|
||||
return 0;
|
||||
ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, ~0,
|
||||
dev_priv->ramht_size,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &ramht);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
|
||||
nouveau_gpuobj_ref(NULL, &ramht);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -199,7 +203,7 @@ nouveau_gpuobj_takedown(struct drm_device *dev)
|
|||
|
||||
NV_DEBUG(dev, "\n");
|
||||
|
||||
nouveau_gpuobj_del(dev, &dev_priv->ramht);
|
||||
nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -216,29 +220,21 @@ nouveau_gpuobj_late_takedown(struct drm_device *dev)
|
|||
|
||||
NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
|
||||
gpuobj, gpuobj->refcount);
|
||||
gpuobj->refcount = 0;
|
||||
nouveau_gpuobj_del(dev, &gpuobj);
|
||||
|
||||
gpuobj->refcount = 1;
|
||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
|
||||
static int
|
||||
nouveau_gpuobj_del(struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
struct drm_device *dev = gpuobj->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_engine *engine = &dev_priv->engine;
|
||||
struct nouveau_gpuobj *gpuobj;
|
||||
int i;
|
||||
|
||||
NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
|
||||
|
||||
if (!dev_priv || !pgpuobj || !(*pgpuobj))
|
||||
return -EINVAL;
|
||||
gpuobj = *pgpuobj;
|
||||
|
||||
if (gpuobj->refcount != 0) {
|
||||
NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount);
|
||||
return -EINVAL;
|
||||
}
|
||||
NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
|
||||
|
||||
if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
|
||||
for (i = 0; i < gpuobj->im_pramin->size; i += 4)
|
||||
|
@ -261,181 +257,26 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
|
|||
|
||||
list_del(&gpuobj->list);
|
||||
|
||||
*pgpuobj = NULL;
|
||||
kfree(gpuobj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_gpuobj_instance_get(struct drm_device *dev,
|
||||
struct nouveau_channel *chan,
|
||||
struct nouveau_gpuobj *gpuobj, uint32_t *inst)
|
||||
void
|
||||
nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *cpramin;
|
||||
if (ref)
|
||||
ref->refcount++;
|
||||
|
||||
/* <NV50 use PRAMIN address everywhere */
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
*inst = gpuobj->im_pramin->start;
|
||||
if (gpuobj->im_channel) {
|
||||
cpramin = gpuobj->im_channel->ramin->gpuobj;
|
||||
*inst += cpramin->im_pramin->start;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
if (*ptr && --(*ptr)->refcount == 0)
|
||||
nouveau_gpuobj_del(*ptr);
|
||||
|
||||
/* NV50 channel-local instance */
|
||||
if (chan) {
|
||||
*inst = gpuobj->im_pramin->start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* NV50 global (VRAM) instance */
|
||||
if (!gpuobj->im_channel) {
|
||||
/* ...from global heap */
|
||||
if (!gpuobj->im_backing) {
|
||||
NV_ERROR(dev, "AII, no VRAM backing gpuobj\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
*inst = gpuobj->im_backing_start;
|
||||
return 0;
|
||||
} else {
|
||||
/* ...from local heap */
|
||||
cpramin = gpuobj->im_channel->ramin->gpuobj;
|
||||
*inst = cpramin->im_backing_start + gpuobj->im_pramin->start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
|
||||
uint32_t handle, struct nouveau_gpuobj *gpuobj,
|
||||
struct nouveau_gpuobj_ref **ref_ret)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj_ref *ref;
|
||||
uint32_t instance;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n",
|
||||
chan ? chan->id : -1, handle, gpuobj);
|
||||
|
||||
if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
|
||||
return -EINVAL;
|
||||
|
||||
if (!chan && !ref_ret)
|
||||
return -EINVAL;
|
||||
|
||||
if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) {
|
||||
/* sw object */
|
||||
instance = 0x40;
|
||||
} else {
|
||||
ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ref = kzalloc(sizeof(*ref), GFP_KERNEL);
|
||||
if (!ref)
|
||||
return -ENOMEM;
|
||||
INIT_LIST_HEAD(&ref->list);
|
||||
ref->gpuobj = gpuobj;
|
||||
ref->channel = chan;
|
||||
ref->instance = instance;
|
||||
|
||||
if (!ref_ret) {
|
||||
ref->handle = handle;
|
||||
|
||||
ret = nouveau_ramht_insert(dev, ref);
|
||||
if (ret) {
|
||||
kfree(ref);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
ref->handle = ~0;
|
||||
*ref_ret = ref;
|
||||
}
|
||||
|
||||
ref->gpuobj->refcount++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
|
||||
{
|
||||
struct nouveau_gpuobj_ref *ref;
|
||||
|
||||
NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL);
|
||||
|
||||
if (!dev || !pref || *pref == NULL)
|
||||
return -EINVAL;
|
||||
ref = *pref;
|
||||
|
||||
if (ref->handle != ~0)
|
||||
nouveau_ramht_remove(dev, ref);
|
||||
|
||||
if (ref->gpuobj) {
|
||||
ref->gpuobj->refcount--;
|
||||
|
||||
if (ref->gpuobj->refcount == 0) {
|
||||
if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
|
||||
nouveau_gpuobj_del(dev, &ref->gpuobj);
|
||||
}
|
||||
}
|
||||
|
||||
*pref = NULL;
|
||||
kfree(ref);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_gpuobj_new_ref(struct drm_device *dev,
|
||||
struct nouveau_channel *oc, struct nouveau_channel *rc,
|
||||
uint32_t handle, uint32_t size, int align,
|
||||
uint32_t flags, struct nouveau_gpuobj_ref **ref)
|
||||
{
|
||||
struct nouveau_gpuobj *gpuobj = NULL;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref);
|
||||
if (ret) {
|
||||
nouveau_gpuobj_del(dev, &gpuobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
|
||||
struct nouveau_gpuobj_ref **ref_ret)
|
||||
{
|
||||
struct nouveau_gpuobj_ref *ref;
|
||||
struct list_head *entry, *tmp;
|
||||
|
||||
list_for_each_safe(entry, tmp, &chan->ramht_refs) {
|
||||
ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
|
||||
|
||||
if (ref->handle == handle) {
|
||||
if (ref_ret)
|
||||
*ref_ret = ref;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
*ptr = ref;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
|
||||
uint32_t b_offset, uint32_t size,
|
||||
uint32_t flags, struct nouveau_gpuobj **pgpuobj,
|
||||
struct nouveau_gpuobj_ref **pref)
|
||||
uint32_t flags, struct nouveau_gpuobj **pgpuobj)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *gpuobj = NULL;
|
||||
|
@ -450,8 +291,8 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
|
|||
return -ENOMEM;
|
||||
NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
|
||||
gpuobj->dev = dev;
|
||||
gpuobj->im_channel = NULL;
|
||||
gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
|
||||
gpuobj->refcount = 1;
|
||||
|
||||
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
|
||||
|
||||
|
@ -459,7 +300,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
|
|||
gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node),
|
||||
GFP_KERNEL);
|
||||
if (!gpuobj->im_pramin) {
|
||||
nouveau_gpuobj_del(dev, &gpuobj);
|
||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||
return -ENOMEM;
|
||||
}
|
||||
gpuobj->im_pramin->start = p_offset;
|
||||
|
@ -481,14 +322,6 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
|
|||
dev_priv->engine.instmem.flush(dev);
|
||||
}
|
||||
|
||||
if (pref) {
|
||||
i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref);
|
||||
if (i) {
|
||||
nouveau_gpuobj_del(dev, &gpuobj);
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
if (pgpuobj)
|
||||
*pgpuobj = gpuobj;
|
||||
return 0;
|
||||
|
@ -628,7 +461,7 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
|
|||
*o_ret = 0;
|
||||
} else
|
||||
if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
|
||||
*gpuobj = dev_priv->gart_info.sg_ctxdma;
|
||||
nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
|
||||
if (offset & ~0xffffffffULL) {
|
||||
NV_ERROR(dev, "obj offset exceeds 32-bits\n");
|
||||
return -EINVAL;
|
||||
|
@ -760,8 +593,11 @@ nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
|
|||
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
|
||||
if (!gpuobj)
|
||||
return -ENOMEM;
|
||||
gpuobj->dev = chan->dev;
|
||||
gpuobj->engine = NVOBJ_ENGINE_SW;
|
||||
gpuobj->class = class;
|
||||
gpuobj->refcount = 1;
|
||||
gpuobj->cinst = 0x40;
|
||||
|
||||
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
|
||||
*gpuobj_ret = gpuobj;
|
||||
|
@ -773,7 +609,6 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
|
|||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *pramin = NULL;
|
||||
uint32_t size;
|
||||
uint32_t base;
|
||||
int ret;
|
||||
|
@ -798,18 +633,16 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
|
|||
size += 0x1000;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
|
||||
&chan->ramin);
|
||||
ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
pramin = chan->ramin->gpuobj;
|
||||
|
||||
ret = drm_mm_init(&chan->ramin_heap, base, size);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
|
||||
nouveau_gpuobj_ref_del(dev, &chan->ramin);
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramin);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -826,8 +659,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
|||
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
|
||||
int ret, i;
|
||||
|
||||
INIT_LIST_HEAD(&chan->ramht_refs);
|
||||
|
||||
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
|
||||
|
||||
/* Allocate a chunk of memory for per-channel object storage */
|
||||
|
@ -846,10 +677,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
|||
uint32_t vm_offset, pde;
|
||||
|
||||
vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
|
||||
vm_offset += chan->ramin->gpuobj->im_pramin->start;
|
||||
vm_offset += chan->ramin->im_pramin->start;
|
||||
|
||||
ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
|
||||
0, &chan->vm_pd, NULL);
|
||||
0, &chan->vm_pd);
|
||||
if (ret)
|
||||
return ret;
|
||||
for (i = 0; i < 0x4000; i += 8) {
|
||||
|
@ -857,25 +688,19 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
|||
nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
|
||||
}
|
||||
|
||||
nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
|
||||
&chan->vm_gart_pt);
|
||||
pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
|
||||
ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
|
||||
dev_priv->gart_info.sg_ctxdma,
|
||||
&chan->vm_gart_pt);
|
||||
if (ret)
|
||||
return ret;
|
||||
nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->instance | 3);
|
||||
nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
|
||||
nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
|
||||
|
||||
pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
|
||||
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
|
||||
ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
|
||||
dev_priv->vm_vram_pt[i],
|
||||
&chan->vm_vram_pt[i]);
|
||||
if (ret)
|
||||
return ret;
|
||||
nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
|
||||
&chan->vm_vram_pt[i]);
|
||||
|
||||
nv_wo32(chan->vm_pd, pde + 0,
|
||||
chan->vm_vram_pt[i]->instance | 0x61);
|
||||
chan->vm_vram_pt[i]->vinst | 0x61);
|
||||
nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
|
||||
pde += 8;
|
||||
}
|
||||
|
@ -885,15 +710,17 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
|||
|
||||
/* RAMHT */
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
|
||||
&chan->ramht);
|
||||
nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
|
||||
} else {
|
||||
struct nouveau_gpuobj *ramht = NULL;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &ramht);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
|
||||
0x8000, 16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&chan->ramht);
|
||||
|
||||
ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
|
||||
nouveau_gpuobj_ref(NULL, &ramht);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -910,24 +737,32 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
|||
}
|
||||
} else {
|
||||
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
|
||||
0, dev_priv->fb_available_size,
|
||||
NV_DMA_ACCESS_RW,
|
||||
NV_DMA_TARGET_VIDMEM, &vram);
|
||||
0, dev_priv->fb_available_size,
|
||||
NV_DMA_ACCESS_RW,
|
||||
NV_DMA_TARGET_VIDMEM, &vram);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL);
|
||||
ret = nouveau_ramht_insert(chan, vram_h, vram);
|
||||
nouveau_gpuobj_ref(NULL, &vram);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret);
|
||||
NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* TT memory ctxdma */
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
tt = vram;
|
||||
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
|
||||
0, dev_priv->vm_end,
|
||||
NV_DMA_ACCESS_RW,
|
||||
NV_DMA_TARGET_AGP, &tt);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
} else
|
||||
if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
|
||||
ret = nouveau_gpuobj_gart_dma_new(chan, 0,
|
||||
|
@ -943,9 +778,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
|
||||
ret = nouveau_ramht_insert(chan, tt_h, tt);
|
||||
nouveau_gpuobj_ref(NULL, &tt);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret);
|
||||
NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -957,33 +793,23 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
|
|||
{
|
||||
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct list_head *entry, *tmp;
|
||||
struct nouveau_gpuobj_ref *ref;
|
||||
int i;
|
||||
|
||||
NV_DEBUG(dev, "ch%d\n", chan->id);
|
||||
|
||||
if (!chan->ramht_refs.next)
|
||||
if (!chan->ramht)
|
||||
return;
|
||||
|
||||
list_for_each_safe(entry, tmp, &chan->ramht_refs) {
|
||||
ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
|
||||
nouveau_ramht_ref(NULL, &chan->ramht, chan);
|
||||
|
||||
nouveau_gpuobj_ref_del(dev, &ref);
|
||||
}
|
||||
|
||||
nouveau_gpuobj_ref_del(dev, &chan->ramht);
|
||||
|
||||
nouveau_gpuobj_del(dev, &chan->vm_pd);
|
||||
nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
|
||||
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
|
||||
nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
|
||||
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
|
||||
nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
|
||||
nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
|
||||
|
||||
if (chan->ramin_heap.free_stack.next)
|
||||
drm_mm_takedown(&chan->ramin_heap);
|
||||
if (chan->ramin)
|
||||
nouveau_gpuobj_ref_del(dev, &chan->ramin);
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramin);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -1095,25 +921,24 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
|
|||
return -EPERM;
|
||||
}
|
||||
|
||||
if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
|
||||
if (nouveau_ramht_find(chan, init->handle))
|
||||
return -EEXIST;
|
||||
|
||||
if (!grc->software)
|
||||
ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
|
||||
else
|
||||
ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
|
||||
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
|
||||
ret, init->channel, init->handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL);
|
||||
ret = nouveau_ramht_insert(chan, init->handle, gr);
|
||||
nouveau_gpuobj_ref(NULL, &gr);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
|
||||
ret, init->channel, init->handle);
|
||||
nouveau_gpuobj_del(dev, &gr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1124,17 +949,16 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_nouveau_gpuobj_free *objfree = data;
|
||||
struct nouveau_gpuobj_ref *ref;
|
||||
struct nouveau_gpuobj *gpuobj;
|
||||
struct nouveau_channel *chan;
|
||||
int ret;
|
||||
|
||||
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
|
||||
|
||||
ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
|
||||
if (ret)
|
||||
return ret;
|
||||
nouveau_gpuobj_ref_del(dev, &ref);
|
||||
gpuobj = nouveau_ramht_find(chan, objfree->handle);
|
||||
if (!gpuobj)
|
||||
return -ENOENT;
|
||||
|
||||
nouveau_ramht_remove(chan, objfree->handle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -62,48 +62,56 @@ nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
|
|||
}
|
||||
|
||||
int
|
||||
nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
|
||||
nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
|
||||
struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
|
||||
struct nouveau_channel *chan = ref->channel;
|
||||
struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
|
||||
struct nouveau_ramht_entry *entry;
|
||||
struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
|
||||
uint32_t ctx, co, ho;
|
||||
|
||||
if (!ramht) {
|
||||
NV_ERROR(dev, "No hash table!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (nouveau_ramht_find(chan, handle))
|
||||
return -EEXIST;
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
entry->channel = chan;
|
||||
entry->gpuobj = NULL;
|
||||
entry->handle = handle;
|
||||
list_add(&entry->head, &chan->ramht->entries);
|
||||
nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
|
||||
|
||||
if (dev_priv->card_type < NV_40) {
|
||||
ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
|
||||
ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) |
|
||||
(chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
|
||||
(ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
|
||||
(gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
|
||||
} else
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
ctx = (ref->instance >> 4) |
|
||||
ctx = (gpuobj->cinst >> 4) |
|
||||
(chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
|
||||
(ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
|
||||
(gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
|
||||
} else {
|
||||
if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
|
||||
ctx = (ref->instance << 10) | 2;
|
||||
if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
|
||||
ctx = (gpuobj->cinst << 10) | 2;
|
||||
} else {
|
||||
ctx = (ref->instance >> 4) |
|
||||
((ref->gpuobj->engine <<
|
||||
ctx = (gpuobj->cinst >> 4) |
|
||||
((gpuobj->engine <<
|
||||
NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
|
||||
}
|
||||
}
|
||||
|
||||
co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
|
||||
co = ho = nouveau_ramht_hash_handle(dev, chan->id, handle);
|
||||
do {
|
||||
if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
|
||||
NV_DEBUG(dev,
|
||||
"insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
|
||||
chan->id, co, ref->handle, ctx);
|
||||
nv_wo32(ramht, co + 0, ref->handle);
|
||||
chan->id, co, handle, ctx);
|
||||
nv_wo32(ramht, co + 0, handle);
|
||||
nv_wo32(ramht, co + 4, ctx);
|
||||
|
||||
list_add_tail(&ref->list, &chan->ramht_refs);
|
||||
instmem->flush(dev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -116,35 +124,40 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
|
|||
} while (co != ho);
|
||||
|
||||
NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
|
||||
list_del(&entry->head);
|
||||
kfree(entry);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
|
||||
nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
|
||||
struct nouveau_channel *chan = ref->channel;
|
||||
struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
|
||||
uint32_t co, ho;
|
||||
struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
|
||||
struct nouveau_ramht_entry *entry, *tmp;
|
||||
u32 co, ho;
|
||||
|
||||
if (!ramht) {
|
||||
NV_ERROR(dev, "No hash table!\n");
|
||||
return;
|
||||
list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) {
|
||||
if (entry->channel != chan || entry->handle != handle)
|
||||
continue;
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &entry->gpuobj);
|
||||
list_del(&entry->head);
|
||||
kfree(entry);
|
||||
break;
|
||||
}
|
||||
|
||||
co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
|
||||
co = ho = nouveau_ramht_hash_handle(dev, chan->id, handle);
|
||||
do {
|
||||
if (nouveau_ramht_entry_valid(dev, ramht, co) &&
|
||||
(ref->handle == nv_ro32(ramht, co))) {
|
||||
(handle == nv_ro32(ramht, co))) {
|
||||
NV_DEBUG(dev,
|
||||
"remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
|
||||
chan->id, co, ref->handle,
|
||||
nv_ro32(ramht, co + 4));
|
||||
chan->id, co, handle, nv_ro32(ramht, co + 4));
|
||||
nv_wo32(ramht, co + 0, 0x00000000);
|
||||
nv_wo32(ramht, co + 4, 0x00000000);
|
||||
|
||||
list_del(&ref->list);
|
||||
instmem->flush(dev);
|
||||
return;
|
||||
}
|
||||
|
@ -153,8 +166,64 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
|
|||
if (co >= dev_priv->ramht_size)
|
||||
co = 0;
|
||||
} while (co != ho);
|
||||
list_del(&ref->list);
|
||||
|
||||
NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
|
||||
chan->id, ref->handle);
|
||||
chan->id, handle);
|
||||
}
|
||||
|
||||
struct nouveau_gpuobj *
|
||||
nouveau_ramht_find(struct nouveau_channel *chan, u32 handle)
|
||||
{
|
||||
struct nouveau_ramht_entry *entry;
|
||||
|
||||
list_for_each_entry(entry, &chan->ramht->entries, head) {
|
||||
if (entry->channel == chan && entry->handle == handle)
|
||||
return entry->gpuobj;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
|
||||
struct nouveau_ramht **pramht)
|
||||
{
|
||||
struct nouveau_ramht *ramht;
|
||||
|
||||
ramht = kzalloc(sizeof(*ramht), GFP_KERNEL);
|
||||
if (!ramht)
|
||||
return -ENOMEM;
|
||||
|
||||
ramht->dev = dev;
|
||||
ramht->refcount = 1;
|
||||
INIT_LIST_HEAD(&ramht->entries);
|
||||
nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj);
|
||||
|
||||
*pramht = ramht;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
|
||||
struct nouveau_channel *chan)
|
||||
{
|
||||
struct nouveau_ramht_entry *entry, *tmp;
|
||||
struct nouveau_ramht *ramht;
|
||||
|
||||
if (ref)
|
||||
ref->refcount++;
|
||||
|
||||
ramht = *ptr;
|
||||
if (ramht) {
|
||||
list_for_each_entry_safe(entry, tmp, &ramht->entries, head) {
|
||||
if (entry->channel == chan)
|
||||
nouveau_ramht_remove(chan, entry->handle);
|
||||
}
|
||||
|
||||
if (--ramht->refcount == 0) {
|
||||
nouveau_gpuobj_ref(NULL, &ramht->gpuobj);
|
||||
kfree(ramht);
|
||||
}
|
||||
}
|
||||
*ptr = ref;
|
||||
}
|
||||
|
|
|
@ -25,7 +25,29 @@
|
|||
#ifndef __NOUVEAU_RAMHT_H__
|
||||
#define __NOUVEAU_RAMHT_H__
|
||||
|
||||
extern int nouveau_ramht_insert(struct drm_device *, struct nouveau_gpuobj_ref *);
|
||||
extern void nouveau_ramht_remove(struct drm_device *, struct nouveau_gpuobj_ref *);
|
||||
struct nouveau_ramht_entry {
|
||||
struct list_head head;
|
||||
struct nouveau_channel *channel;
|
||||
struct nouveau_gpuobj *gpuobj;
|
||||
u32 handle;
|
||||
};
|
||||
|
||||
struct nouveau_ramht {
|
||||
struct drm_device *dev;
|
||||
int refcount;
|
||||
struct nouveau_gpuobj *gpuobj;
|
||||
struct list_head entries;
|
||||
};
|
||||
|
||||
extern int nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *,
|
||||
struct nouveau_ramht **);
|
||||
extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
|
||||
struct nouveau_channel *unref_channel);
|
||||
|
||||
extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
|
||||
struct nouveau_gpuobj *);
|
||||
extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
|
||||
extern struct nouveau_gpuobj *
|
||||
nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -234,7 +234,6 @@ nouveau_sgdma_init(struct drm_device *dev)
|
|||
}
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
|
||||
NVOBJ_FLAG_ALLOW_NO_REFS |
|
||||
NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ZERO_FREE, &gpuobj);
|
||||
if (ret) {
|
||||
|
@ -245,7 +244,7 @@ nouveau_sgdma_init(struct drm_device *dev)
|
|||
dev_priv->gart_info.sg_dummy_page =
|
||||
alloc_page(GFP_KERNEL|__GFP_DMA32);
|
||||
if (!dev_priv->gart_info.sg_dummy_page) {
|
||||
nouveau_gpuobj_del(dev, &gpuobj);
|
||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -254,11 +253,17 @@ nouveau_sgdma_init(struct drm_device *dev)
|
|||
pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
|
||||
nouveau_gpuobj_del(dev, &gpuobj);
|
||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
/* special case, allocated from global instmem heap so
|
||||
* cinst is invalid, we use it on all channels though so
|
||||
* cinst needs to be valid, set it the same as pinst
|
||||
*/
|
||||
gpuobj->cinst = gpuobj->pinst;
|
||||
|
||||
/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
|
||||
* confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
|
||||
* on those cards? */
|
||||
|
@ -302,7 +307,7 @@ nouveau_sgdma_takedown(struct drm_device *dev)
|
|||
dev_priv->gart_info.sg_dummy_bus = 0;
|
||||
}
|
||||
|
||||
nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
|
||||
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include "nouveau_drv.h"
|
||||
#include "nouveau_drm.h"
|
||||
#include "nouveau_fbcon.h"
|
||||
#include "nouveau_ramht.h"
|
||||
#include "nv50_display.h"
|
||||
|
||||
static void nouveau_stub_takedown(struct drm_device *dev) {}
|
||||
|
@ -437,16 +438,14 @@ static int
|
|||
nouveau_card_init_channel(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *gpuobj;
|
||||
struct nouveau_gpuobj *gpuobj = NULL;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_channel_alloc(dev, &dev_priv->channel,
|
||||
(struct drm_file *)-2,
|
||||
NvDmaFB, NvDmaTT);
|
||||
(struct drm_file *)-2, NvDmaFB, NvDmaTT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
gpuobj = NULL;
|
||||
ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
|
||||
0, dev_priv->vram_size,
|
||||
NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
|
||||
|
@ -454,26 +453,25 @@ nouveau_card_init_channel(struct drm_device *dev)
|
|||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
|
||||
gpuobj, NULL);
|
||||
ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj);
|
||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
gpuobj = NULL;
|
||||
ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
|
||||
dev_priv->gart_info.aper_size,
|
||||
NV_DMA_ACCESS_RW, &gpuobj, NULL);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
|
||||
gpuobj, NULL);
|
||||
ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj);
|
||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
nouveau_gpuobj_del(dev, &gpuobj);
|
||||
nouveau_channel_free(dev_priv->channel);
|
||||
dev_priv->channel = NULL;
|
||||
return ret;
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
#include "nouveau_ramht.h"
|
||||
#include "nouveau_fbcon.h"
|
||||
|
||||
void
|
||||
|
@ -169,11 +170,9 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
ret = nouveau_ramht_insert(dev_priv->channel, handle, obj);
|
||||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -38,10 +38,8 @@
|
|||
#define NV04_RAMFC_ENGINE 0x14
|
||||
#define NV04_RAMFC_PULL1_ENGINE 0x18
|
||||
|
||||
#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc->gpuobj, \
|
||||
NV04_RAMFC_##offset, (val))
|
||||
#define RAMFC_RD(offset) nv_ro32(chan->ramfc->gpuobj, \
|
||||
NV04_RAMFC_##offset)
|
||||
#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val))
|
||||
#define RAMFC_RD(offset) nv_ro32(chan->ramfc, NV04_RAMFC_##offset)
|
||||
|
||||
void
|
||||
nv04_fifo_disable(struct drm_device *dev)
|
||||
|
@ -130,7 +128,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
|
|||
NV04_RAMFC__SIZE,
|
||||
NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ZERO_FREE,
|
||||
NULL, &chan->ramfc);
|
||||
&chan->ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -139,7 +137,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
|
|||
/* Setup initial state */
|
||||
RAMFC_WR(DMA_PUT, chan->pushbuf_base);
|
||||
RAMFC_WR(DMA_GET, chan->pushbuf_base);
|
||||
RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
|
||||
RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4);
|
||||
RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
|
||||
|
@ -161,7 +159,7 @@ nv04_fifo_destroy_context(struct nouveau_channel *chan)
|
|||
nv_wr32(dev, NV04_PFIFO_MODE,
|
||||
nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
|
||||
|
||||
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramfc);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -48,7 +48,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
|
|||
|
||||
ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
|
||||
NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
|
||||
NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -57,7 +57,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
|
|||
*/
|
||||
nv_wi32(dev, fc + 0, chan->pushbuf_base);
|
||||
nv_wi32(dev, fc + 4, chan->pushbuf_base);
|
||||
nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
|
||||
nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4);
|
||||
nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
|
||||
|
@ -80,7 +80,7 @@ nv10_fifo_destroy_context(struct nouveau_channel *chan)
|
|||
nv_wr32(dev, NV04_PFIFO_MODE,
|
||||
nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
|
||||
|
||||
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramfc);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -403,21 +403,19 @@ nv20_graph_create_context(struct nouveau_channel *chan)
|
|||
BUG_ON(1);
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
|
||||
16, NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&chan->ramin_grctx);
|
||||
ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Initialise default context values */
|
||||
ctx_init(dev, chan->ramin_grctx->gpuobj);
|
||||
ctx_init(dev, chan->ramin_grctx);
|
||||
|
||||
/* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
|
||||
nv_wo32(chan->ramin_grctx->gpuobj, idoffs,
|
||||
nv_wo32(chan->ramin_grctx, idoffs,
|
||||
(chan->id << 24) | 0x1); /* CTX_USER */
|
||||
|
||||
nv_wo32(pgraph->ctx_table->gpuobj, chan->id * 4,
|
||||
chan->ramin_grctx->instance >> 4);
|
||||
nv_wo32(pgraph->ctx_table, chan->id * 4, chan->ramin_grctx->pinst >> 4);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -428,10 +426,8 @@ nv20_graph_destroy_context(struct nouveau_channel *chan)
|
|||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
|
||||
if (chan->ramin_grctx)
|
||||
nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
|
||||
|
||||
nv_wo32(pgraph->ctx_table->gpuobj, chan->id * 4, 0);
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
|
||||
nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -442,7 +438,7 @@ nv20_graph_load_context(struct nouveau_channel *chan)
|
|||
|
||||
if (!chan->ramin_grctx)
|
||||
return -EINVAL;
|
||||
inst = chan->ramin_grctx->instance >> 4;
|
||||
inst = chan->ramin_grctx->pinst >> 4;
|
||||
|
||||
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
|
||||
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
|
||||
|
@ -465,7 +461,7 @@ nv20_graph_unload_context(struct drm_device *dev)
|
|||
chan = pgraph->channel(dev);
|
||||
if (!chan)
|
||||
return 0;
|
||||
inst = chan->ramin_grctx->instance >> 4;
|
||||
inst = chan->ramin_grctx->pinst >> 4;
|
||||
|
||||
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
|
||||
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
|
||||
|
@ -552,15 +548,15 @@ nv20_graph_init(struct drm_device *dev)
|
|||
|
||||
if (!pgraph->ctx_table) {
|
||||
/* Create Context Pointer Table */
|
||||
ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&pgraph->ctx_table);
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&pgraph->ctx_table);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
|
||||
pgraph->ctx_table->instance >> 4);
|
||||
pgraph->ctx_table->pinst >> 4);
|
||||
|
||||
nv20_graph_rdi(dev);
|
||||
|
||||
|
@ -646,7 +642,7 @@ nv20_graph_takedown(struct drm_device *dev)
|
|||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
|
||||
nouveau_gpuobj_ref_del(dev, &pgraph->ctx_table);
|
||||
nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -681,15 +677,15 @@ nv30_graph_init(struct drm_device *dev)
|
|||
|
||||
if (!pgraph->ctx_table) {
|
||||
/* Create Context Pointer Table */
|
||||
ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&pgraph->ctx_table);
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&pgraph->ctx_table);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
|
||||
pgraph->ctx_table->instance >> 4);
|
||||
pgraph->ctx_table->pinst >> 4);
|
||||
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
|
||||
|
|
|
@ -42,7 +42,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
|
|||
|
||||
ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
|
||||
NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
|
||||
NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -50,7 +50,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
|
|||
|
||||
nv_wi32(dev, fc + 0, chan->pushbuf_base);
|
||||
nv_wi32(dev, fc + 4, chan->pushbuf_base);
|
||||
nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
|
||||
nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4);
|
||||
nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
|
||||
|
@ -58,7 +58,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
|
|||
NV_PFIFO_CACHE1_BIG_ENDIAN |
|
||||
#endif
|
||||
0x30000000 /* no idea.. */);
|
||||
nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
|
||||
nv_wi32(dev, fc + 56, chan->ramin_grctx->pinst >> 4);
|
||||
nv_wi32(dev, fc + 60, 0x0001FFFF);
|
||||
|
||||
/* enable the fifo dma operation */
|
||||
|
@ -77,8 +77,7 @@ nv40_fifo_destroy_context(struct nouveau_channel *chan)
|
|||
nv_wr32(dev, NV04_PFIFO_MODE,
|
||||
nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
|
||||
|
||||
if (chan->ramfc)
|
||||
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramfc);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -45,7 +45,7 @@ nv40_graph_channel(struct drm_device *dev)
|
|||
struct nouveau_channel *chan = dev_priv->fifos[i];
|
||||
|
||||
if (chan && chan->ramin_grctx &&
|
||||
chan->ramin_grctx->instance == inst)
|
||||
chan->ramin_grctx->pinst == inst)
|
||||
return chan;
|
||||
}
|
||||
|
||||
|
@ -61,27 +61,25 @@ nv40_graph_create_context(struct nouveau_channel *chan)
|
|||
struct nouveau_grctx ctx = {};
|
||||
int ret;
|
||||
|
||||
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
|
||||
16, NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&chan->ramin_grctx);
|
||||
ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Initialise default context values */
|
||||
ctx.dev = chan->dev;
|
||||
ctx.mode = NOUVEAU_GRCTX_VALS;
|
||||
ctx.data = chan->ramin_grctx->gpuobj;
|
||||
ctx.data = chan->ramin_grctx;
|
||||
nv40_grctx_init(&ctx);
|
||||
|
||||
nv_wo32(chan->ramin_grctx->gpuobj, 0,
|
||||
chan->ramin_grctx->gpuobj->im_pramin->start);
|
||||
nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->im_pramin->start);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv40_graph_destroy_context(struct nouveau_channel *chan)
|
||||
{
|
||||
nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -135,7 +133,7 @@ nv40_graph_load_context(struct nouveau_channel *chan)
|
|||
|
||||
if (!chan->ramin_grctx)
|
||||
return -EINVAL;
|
||||
inst = chan->ramin_grctx->instance >> 4;
|
||||
inst = chan->ramin_grctx->pinst >> 4;
|
||||
|
||||
ret = nv40_graph_transfer_context(dev, inst, 0);
|
||||
if (ret)
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "nouveau_connector.h"
|
||||
#include "nouveau_fb.h"
|
||||
#include "nouveau_fbcon.h"
|
||||
#include "nouveau_ramht.h"
|
||||
#include "drm_crtc_helper.h"
|
||||
|
||||
static void
|
||||
|
@ -66,12 +67,6 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
|
|||
return ret;
|
||||
obj->engine = NVOBJ_ENGINE_DISPLAY;
|
||||
|
||||
ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL);
|
||||
if (ret) {
|
||||
nouveau_gpuobj_del(dev, &obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
|
||||
nv_wo32(obj, 4, limit);
|
||||
nv_wo32(obj, 8, offset);
|
||||
|
@ -83,6 +78,12 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
|
|||
nv_wo32(obj, 20, 0x00020000);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
|
||||
ret = nouveau_ramht_insert(evo, name, obj);
|
||||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -90,6 +91,7 @@ static int
|
|||
nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *ramht = NULL;
|
||||
struct nouveau_channel *chan;
|
||||
int ret;
|
||||
|
||||
|
@ -103,10 +105,8 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
|
|||
chan->user_get = 4;
|
||||
chan->user_put = 0;
|
||||
|
||||
INIT_LIST_HEAD(&chan->ramht_refs);
|
||||
|
||||
ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
|
||||
nv50_evo_channel_del(pchan);
|
||||
|
@ -120,14 +120,20 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16,
|
||||
0, &chan->ramht);
|
||||
ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
|
||||
nv50_evo_channel_del(pchan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
|
||||
nouveau_gpuobj_ref(NULL, &ramht);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(pchan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (dev_priv->chipset != 0x50) {
|
||||
ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
|
||||
0, 0xffffffff);
|
||||
|
@ -321,7 +327,7 @@ nv50_display_init(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9);
|
||||
nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
|
||||
|
||||
/* initialise fifo */
|
||||
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
#include "nouveau_ramht.h"
|
||||
#include "nouveau_fbcon.h"
|
||||
|
||||
void
|
||||
|
@ -193,7 +194,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, Nv2D, eng2d, NULL);
|
||||
ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d);
|
||||
nouveau_gpuobj_ref(NULL, &eng2d);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -27,13 +27,14 @@
|
|||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_ramht.h"
|
||||
|
||||
static void
|
||||
nv50_fifo_playlist_update(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
|
||||
struct nouveau_gpuobj_ref *cur;
|
||||
struct nouveau_gpuobj *cur;
|
||||
int i, nr;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
|
@ -44,13 +45,13 @@ nv50_fifo_playlist_update(struct drm_device *dev)
|
|||
/* We never schedule channel 0 or 127 */
|
||||
for (i = 1, nr = 0; i < 127; i++) {
|
||||
if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) {
|
||||
nv_wo32(cur->gpuobj, (nr * 4), i);
|
||||
nv_wo32(cur, (nr * 4), i);
|
||||
nr++;
|
||||
}
|
||||
}
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
|
||||
nv_wr32(dev, 0x32f4, cur->instance >> 12);
|
||||
nv_wr32(dev, 0x32f4, cur->vinst >> 12);
|
||||
nv_wr32(dev, 0x32ec, nr);
|
||||
nv_wr32(dev, 0x2500, 0x101);
|
||||
}
|
||||
|
@ -65,9 +66,9 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel)
|
|||
NV_DEBUG(dev, "ch%d\n", channel);
|
||||
|
||||
if (dev_priv->chipset == 0x50)
|
||||
inst = chan->ramfc->instance >> 12;
|
||||
inst = chan->ramfc->vinst >> 12;
|
||||
else
|
||||
inst = chan->ramfc->instance >> 8;
|
||||
inst = chan->ramfc->vinst >> 8;
|
||||
|
||||
nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst |
|
||||
NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
|
||||
|
@ -165,19 +166,19 @@ nv50_fifo_init(struct drm_device *dev)
|
|||
goto just_reset;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&pfifo->playlist[0]);
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&pfifo->playlist[0]);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "error creating playlist 0: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&pfifo->playlist[1]);
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&pfifo->playlist[1]);
|
||||
if (ret) {
|
||||
nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]);
|
||||
nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
|
||||
NV_ERROR(dev, "error creating playlist 1: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -205,8 +206,8 @@ nv50_fifo_takedown(struct drm_device *dev)
|
|||
if (!pfifo->playlist[0])
|
||||
return;
|
||||
|
||||
nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]);
|
||||
nouveau_gpuobj_ref_del(dev, &pfifo->playlist[1]);
|
||||
nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
|
||||
nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -228,42 +229,39 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
|
|||
NV_DEBUG(dev, "ch%d\n", chan->id);
|
||||
|
||||
if (dev_priv->chipset == 0x50) {
|
||||
ret = nouveau_gpuobj_new_fake(dev, chan->ramin->gpuobj->pinst,
|
||||
chan->ramin->gpuobj->vinst, 0x100,
|
||||
ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
|
||||
chan->ramin->vinst, 0x100,
|
||||
NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ZERO_FREE, &ramfc,
|
||||
NVOBJ_FLAG_ZERO_FREE,
|
||||
&chan->ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new_fake(dev, chan->ramin->gpuobj->pinst +
|
||||
0x0400,
|
||||
chan->ramin->gpuobj->vinst +
|
||||
0x0400, 4096, 0, NULL,
|
||||
&chan->cache);
|
||||
ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400,
|
||||
chan->ramin->vinst + 0x0400,
|
||||
4096, 0, &chan->cache);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
|
||||
NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ZERO_FREE,
|
||||
&chan->ramfc);
|
||||
ret = nouveau_gpuobj_new(dev, chan, 0x100, 256,
|
||||
NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
ramfc = chan->ramfc->gpuobj;
|
||||
|
||||
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024,
|
||||
0, &chan->cache);
|
||||
ret = nouveau_gpuobj_new(dev, chan, 4096, 1024,
|
||||
0, &chan->cache);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ramfc = chan->ramfc;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
nv_wo32(ramfc, 0x48, chan->pushbuf->instance >> 4);
|
||||
nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
|
||||
nv_wo32(ramfc, 0x80, (0 << 27) /* 4KiB */ |
|
||||
(4 << 24) /* SEARCH_FULL */ |
|
||||
(chan->ramht->instance >> 4));
|
||||
(chan->ramht->gpuobj->cinst >> 4));
|
||||
nv_wo32(ramfc, 0x44, 0x2101ffff);
|
||||
nv_wo32(ramfc, 0x60, 0x7fffffff);
|
||||
nv_wo32(ramfc, 0x40, 0x00000000);
|
||||
|
@ -274,11 +272,11 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
|
|||
nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16);
|
||||
|
||||
if (dev_priv->chipset != 0x50) {
|
||||
nv_wo32(chan->ramin->gpuobj, 0, chan->id);
|
||||
nv_wo32(chan->ramin->gpuobj, 4, chan->ramfc->instance >> 8);
|
||||
nv_wo32(chan->ramin, 0, chan->id);
|
||||
nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8);
|
||||
|
||||
nv_wo32(ramfc, 0x88, chan->cache->instance >> 10);
|
||||
nv_wo32(ramfc, 0x98, chan->ramin->instance >> 12);
|
||||
nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10);
|
||||
nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12);
|
||||
}
|
||||
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
|
@ -293,12 +291,13 @@ void
|
|||
nv50_fifo_destroy_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
|
||||
struct nouveau_gpuobj *ramfc = NULL;
|
||||
|
||||
NV_DEBUG(dev, "ch%d\n", chan->id);
|
||||
|
||||
/* This will ensure the channel is seen as disabled. */
|
||||
chan->ramfc = NULL;
|
||||
nouveau_gpuobj_ref(chan->ramfc, &ramfc);
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramfc);
|
||||
nv50_fifo_channel_disable(dev, chan->id);
|
||||
|
||||
/* Dummy channel, also used on ch 127 */
|
||||
|
@ -306,8 +305,8 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan)
|
|||
nv50_fifo_channel_disable(dev, 127);
|
||||
nv50_fifo_playlist_update(dev);
|
||||
|
||||
nouveau_gpuobj_ref_del(dev, &ramfc);
|
||||
nouveau_gpuobj_ref_del(dev, &chan->cache);
|
||||
nouveau_gpuobj_ref(NULL, &ramfc);
|
||||
nouveau_gpuobj_ref(NULL, &chan->cache);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -315,8 +314,8 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
|
|||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
|
||||
struct nouveau_gpuobj *cache = chan->cache->gpuobj;
|
||||
struct nouveau_gpuobj *ramfc = chan->ramfc;
|
||||
struct nouveau_gpuobj *cache = chan->cache;
|
||||
int ptr, cnt;
|
||||
|
||||
NV_DEBUG(dev, "ch%d\n", chan->id);
|
||||
|
@ -399,8 +398,8 @@ nv50_fifo_unload_context(struct drm_device *dev)
|
|||
return -EINVAL;
|
||||
}
|
||||
NV_DEBUG(dev, "ch%d\n", chan->id);
|
||||
ramfc = chan->ramfc->gpuobj;
|
||||
cache = chan->cache->gpuobj;
|
||||
ramfc = chan->ramfc;
|
||||
cache = chan->cache;
|
||||
|
||||
nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330));
|
||||
nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334));
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
|
||||
#include "nouveau_ramht.h"
|
||||
#include "nouveau_grctx.h"
|
||||
|
||||
static void
|
||||
|
@ -192,7 +192,7 @@ nv50_graph_channel(struct drm_device *dev)
|
|||
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
|
||||
struct nouveau_channel *chan = dev_priv->fifos[i];
|
||||
|
||||
if (chan && chan->ramin && chan->ramin->instance == inst)
|
||||
if (chan && chan->ramin && chan->ramin->vinst == inst)
|
||||
return chan;
|
||||
}
|
||||
|
||||
|
@ -204,36 +204,34 @@ nv50_graph_create_context(struct nouveau_channel *chan)
|
|||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
|
||||
struct nouveau_gpuobj *obj;
|
||||
struct nouveau_gpuobj *ramin = chan->ramin;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nouveau_grctx ctx = {};
|
||||
int hdr, ret;
|
||||
|
||||
NV_DEBUG(dev, "ch%d\n", chan->id);
|
||||
|
||||
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
|
||||
0x1000, NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
|
||||
ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000,
|
||||
NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
obj = chan->ramin_grctx->gpuobj;
|
||||
|
||||
hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
|
||||
nv_wo32(ramin, hdr + 0x00, 0x00190002);
|
||||
nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->instance +
|
||||
nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->vinst +
|
||||
pgraph->grctx_size - 1);
|
||||
nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->instance);
|
||||
nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->vinst);
|
||||
nv_wo32(ramin, hdr + 0x0c, 0);
|
||||
nv_wo32(ramin, hdr + 0x10, 0);
|
||||
nv_wo32(ramin, hdr + 0x14, 0x00010000);
|
||||
|
||||
ctx.dev = chan->dev;
|
||||
ctx.mode = NOUVEAU_GRCTX_VALS;
|
||||
ctx.data = obj;
|
||||
ctx.data = chan->ramin_grctx;
|
||||
nv50_grctx_init(&ctx);
|
||||
|
||||
nv_wo32(obj, 0x00000, chan->ramin->instance >> 12);
|
||||
nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);
|
||||
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
return 0;
|
||||
|
@ -248,14 +246,14 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
|
|||
|
||||
NV_DEBUG(dev, "ch%d\n", chan->id);
|
||||
|
||||
if (!chan->ramin || !chan->ramin->gpuobj)
|
||||
if (!chan->ramin)
|
||||
return;
|
||||
|
||||
for (i = hdr; i < hdr + 24; i += 4)
|
||||
nv_wo32(chan->ramin->gpuobj, i, 0);
|
||||
nv_wo32(chan->ramin, i, 0);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
|
||||
nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -282,7 +280,7 @@ nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
|
|||
int
|
||||
nv50_graph_load_context(struct nouveau_channel *chan)
|
||||
{
|
||||
uint32_t inst = chan->ramin->instance >> 12;
|
||||
uint32_t inst = chan->ramin->vinst >> 12;
|
||||
|
||||
NV_DEBUG(chan->dev, "ch%d\n", chan->id);
|
||||
return nv50_graph_do_load_context(chan->dev, inst);
|
||||
|
@ -327,15 +325,16 @@ static int
|
|||
nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
{
|
||||
struct nouveau_gpuobj_ref *ref = NULL;
|
||||
struct nouveau_gpuobj *gpuobj;
|
||||
|
||||
if (nouveau_gpuobj_ref_find(chan, data, &ref))
|
||||
gpuobj = nouveau_ramht_find(chan, data);
|
||||
if (!gpuobj)
|
||||
return -ENOENT;
|
||||
|
||||
if (nouveau_notifier_offset(ref->gpuobj, NULL))
|
||||
if (nouveau_notifier_offset(gpuobj, NULL))
|
||||
return -EINVAL;
|
||||
|
||||
chan->nvsw.vblsem = ref->gpuobj;
|
||||
chan->nvsw.vblsem = gpuobj;
|
||||
chan->nvsw.vblsem_offset = ~0;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -32,9 +32,9 @@
|
|||
struct nv50_instmem_priv {
|
||||
uint32_t save1700[5]; /* 0x1700->0x1710 */
|
||||
|
||||
struct nouveau_gpuobj_ref *pramin_pt;
|
||||
struct nouveau_gpuobj_ref *pramin_bar;
|
||||
struct nouveau_gpuobj_ref *fb_bar;
|
||||
struct nouveau_gpuobj *pramin_pt;
|
||||
struct nouveau_gpuobj *pramin_bar;
|
||||
struct nouveau_gpuobj *fb_bar;
|
||||
};
|
||||
|
||||
#define NV50_INSTMEM_PAGE_SHIFT 12
|
||||
|
@ -44,15 +44,8 @@ struct nv50_instmem_priv {
|
|||
/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
|
||||
*/
|
||||
#define BAR0_WI32(g, o, v) do { \
|
||||
uint32_t offset; \
|
||||
if ((g)->im_backing) { \
|
||||
offset = (g)->im_backing_start; \
|
||||
} else { \
|
||||
offset = chan->ramin->gpuobj->im_backing_start; \
|
||||
offset += (g)->im_pramin->start; \
|
||||
} \
|
||||
offset += (o); \
|
||||
nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \
|
||||
u32 offset = (g)->vinst + (o); \
|
||||
nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \
|
||||
} while (0)
|
||||
|
||||
int
|
||||
|
@ -142,8 +135,7 @@ nv50_instmem_init(struct drm_device *dev)
|
|||
INIT_LIST_HEAD(&chan->ramht_refs);
|
||||
|
||||
/* Channel's PRAMIN object + heap */
|
||||
ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
|
||||
NULL, &chan->ramin);
|
||||
ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, &chan->ramin);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -152,16 +144,16 @@ nv50_instmem_init(struct drm_device *dev)
|
|||
|
||||
/* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
|
||||
ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
|
||||
0x4000, 0, NULL, &chan->ramfc);
|
||||
0x4000, 0, &chan->ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < c_vmpd; i += 4)
|
||||
BAR0_WI32(chan->ramin->gpuobj, i, 0);
|
||||
BAR0_WI32(chan->ramin, i, 0);
|
||||
|
||||
/* VM page directory */
|
||||
ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
|
||||
0x4000, 0, &chan->vm_pd, NULL);
|
||||
0x4000, 0, &chan->vm_pd);
|
||||
if (ret)
|
||||
return ret;
|
||||
for (i = 0; i < 0x4000; i += 8) {
|
||||
|
@ -172,8 +164,8 @@ nv50_instmem_init(struct drm_device *dev)
|
|||
/* PRAMIN page table, cheat and map into VM at 0x0000000000.
|
||||
* We map the entire fake channel into the start of the PRAMIN BAR
|
||||
*/
|
||||
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
|
||||
0, &priv->pramin_pt);
|
||||
ret = nouveau_gpuobj_new(dev, chan, pt_size, 0x1000, 0,
|
||||
&priv->pramin_pt);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -185,76 +177,74 @@ nv50_instmem_init(struct drm_device *dev)
|
|||
|
||||
i = 0;
|
||||
while (v < dev_priv->vram_sys_base + c_offset + c_size) {
|
||||
BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v));
|
||||
BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v));
|
||||
BAR0_WI32(priv->pramin_pt, i + 0, lower_32_bits(v));
|
||||
BAR0_WI32(priv->pramin_pt, i + 4, upper_32_bits(v));
|
||||
v += 0x1000;
|
||||
i += 8;
|
||||
}
|
||||
|
||||
while (i < pt_size) {
|
||||
BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
|
||||
BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
|
||||
BAR0_WI32(priv->pramin_pt, i + 0, 0x00000000);
|
||||
BAR0_WI32(priv->pramin_pt, i + 4, 0x00000000);
|
||||
i += 8;
|
||||
}
|
||||
|
||||
BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
|
||||
BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->vinst | 0x63);
|
||||
BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
|
||||
|
||||
/* VRAM page table(s), mapped into VM at +1GiB */
|
||||
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
|
||||
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0,
|
||||
NV50_VM_BLOCK/65536*8, 0, 0,
|
||||
&chan->vm_vram_pt[i]);
|
||||
ret = nouveau_gpuobj_new(dev, chan, NV50_VM_BLOCK / 0x10000 * 8,
|
||||
0, 0, &chan->vm_vram_pt[i]);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error creating VRAM page tables: %d\n",
|
||||
ret);
|
||||
dev_priv->vm_vram_pt_nr = i;
|
||||
return ret;
|
||||
}
|
||||
dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj;
|
||||
/*XXX: double-check this is ok */
|
||||
dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i];
|
||||
|
||||
for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size;
|
||||
v += 4)
|
||||
BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0);
|
||||
|
||||
BAR0_WI32(chan->vm_pd, 0x10 + (i*8),
|
||||
chan->vm_vram_pt[i]->instance | 0x61);
|
||||
chan->vm_vram_pt[i]->vinst | 0x61);
|
||||
BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0);
|
||||
}
|
||||
|
||||
/* DMA object for PRAMIN BAR */
|
||||
ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
|
||||
&priv->pramin_bar);
|
||||
ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar);
|
||||
if (ret)
|
||||
return ret;
|
||||
BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
|
||||
BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1);
|
||||
BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
|
||||
BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
|
||||
BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
|
||||
BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
|
||||
BAR0_WI32(priv->pramin_bar, 0x00, 0x7fc00000);
|
||||
BAR0_WI32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1);
|
||||
BAR0_WI32(priv->pramin_bar, 0x08, 0x00000000);
|
||||
BAR0_WI32(priv->pramin_bar, 0x0c, 0x00000000);
|
||||
BAR0_WI32(priv->pramin_bar, 0x10, 0x00000000);
|
||||
BAR0_WI32(priv->pramin_bar, 0x14, 0x00000000);
|
||||
|
||||
/* DMA object for FB BAR */
|
||||
ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
|
||||
&priv->fb_bar);
|
||||
ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar);
|
||||
if (ret)
|
||||
return ret;
|
||||
BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000);
|
||||
BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 +
|
||||
pci_resource_len(dev->pdev, 1) - 1);
|
||||
BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000);
|
||||
BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000);
|
||||
BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000);
|
||||
BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000);
|
||||
BAR0_WI32(priv->fb_bar, 0x00, 0x7fc00000);
|
||||
BAR0_WI32(priv->fb_bar, 0x04, 0x40000000 +
|
||||
pci_resource_len(dev->pdev, 1) - 1);
|
||||
BAR0_WI32(priv->fb_bar, 0x08, 0x40000000);
|
||||
BAR0_WI32(priv->fb_bar, 0x0c, 0x00000000);
|
||||
BAR0_WI32(priv->fb_bar, 0x10, 0x00000000);
|
||||
BAR0_WI32(priv->fb_bar, 0x14, 0x00000000);
|
||||
|
||||
/* Poke the relevant regs, and pray it works :) */
|
||||
nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
|
||||
nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
|
||||
nv_wr32(dev, NV50_PUNK_UNK1710, 0);
|
||||
nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
|
||||
nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
|
||||
NV50_PUNK_BAR_CFG_BASE_VALID);
|
||||
nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
|
||||
nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) |
|
||||
NV50_PUNK_BAR1_CTXDMA_VALID);
|
||||
nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
|
||||
nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) |
|
||||
NV50_PUNK_BAR3_CTXDMA_VALID);
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
|
@ -301,21 +291,19 @@ nv50_instmem_takedown(struct drm_device *dev)
|
|||
for (i = 0x1700; i <= 0x1710; i += 4)
|
||||
nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
|
||||
|
||||
nouveau_gpuobj_ref_del(dev, &priv->fb_bar);
|
||||
nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
|
||||
nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
|
||||
nouveau_gpuobj_ref(NULL, &priv->fb_bar);
|
||||
nouveau_gpuobj_ref(NULL, &priv->pramin_bar);
|
||||
nouveau_gpuobj_ref(NULL, &priv->pramin_pt);
|
||||
|
||||
/* Destroy dummy channel */
|
||||
if (chan) {
|
||||
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
|
||||
nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
|
||||
dev_priv->vm_vram_pt[i] = NULL;
|
||||
}
|
||||
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
|
||||
nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
|
||||
dev_priv->vm_vram_pt_nr = 0;
|
||||
|
||||
nouveau_gpuobj_del(dev, &chan->vm_pd);
|
||||
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
|
||||
nouveau_gpuobj_ref_del(dev, &chan->ramin);
|
||||
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramfc);
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramin);
|
||||
drm_mm_takedown(&chan->ramin_heap);
|
||||
|
||||
dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
|
||||
|
@ -331,7 +319,7 @@ nv50_instmem_suspend(struct drm_device *dev)
|
|||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan = dev_priv->fifos[0];
|
||||
struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
|
||||
struct nouveau_gpuobj *ramin = chan->ramin;
|
||||
int i;
|
||||
|
||||
ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size);
|
||||
|
@ -349,7 +337,7 @@ nv50_instmem_resume(struct drm_device *dev)
|
|||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
||||
struct nouveau_channel *chan = dev_priv->fifos[0];
|
||||
struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
|
||||
struct nouveau_gpuobj *ramin = chan->ramin;
|
||||
int i;
|
||||
|
||||
nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16));
|
||||
|
@ -359,13 +347,13 @@ nv50_instmem_resume(struct drm_device *dev)
|
|||
ramin->im_backing_suspend = NULL;
|
||||
|
||||
/* Poke the relevant regs, and pray it works :) */
|
||||
nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
|
||||
nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
|
||||
nv_wr32(dev, NV50_PUNK_UNK1710, 0);
|
||||
nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
|
||||
nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
|
||||
NV50_PUNK_BAR_CFG_BASE_VALID);
|
||||
nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
|
||||
nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) |
|
||||
NV50_PUNK_BAR1_CTXDMA_VALID);
|
||||
nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
|
||||
nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) |
|
||||
NV50_PUNK_BAR3_CTXDMA_VALID);
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
|
@ -424,7 +412,7 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
|||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
||||
struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
|
||||
struct nouveau_gpuobj *pramin_pt = priv->pramin_pt;
|
||||
uint32_t pte, pte_end;
|
||||
uint64_t vram;
|
||||
|
||||
|
@ -477,8 +465,8 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
|||
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
|
||||
|
||||
while (pte < pte_end) {
|
||||
nv_wo32(priv->pramin_pt->gpuobj, (pte * 4) + 0, 0x00000000);
|
||||
nv_wo32(priv->pramin_pt->gpuobj, (pte * 4) + 4, 0x00000000);
|
||||
nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000);
|
||||
nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000);
|
||||
pte += 2;
|
||||
}
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
|
|
Loading…
Reference in New Issue