Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "Most of the GPU drivers people were at XDC last week, so I didn't get much to send, so I let it rollover until this week. Also Alex was away for 3 weeks so amdgpu/radeon got a bit more stuff than usual in one go. I've been trying to figure out some 4.2 issues with i915 still (that are fixed in 4.3, but bisecting ends up in a merge commit). Hopefully next week I or i915 people can work that out" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (46 commits) drm: Allow also control clients to check the drm version drm/vmwgfx: Fix uninitialized return in vmw_kms_helper_dirty() drm/vmwgfx: Fix uninitialized return in vmw_cotable_unbind() drm/layerscape: fix handling fsl_dcu_drm_plane_index result drm/mgag200: Fix driver_load error handling drm/mgag200: Fix error handling paths in fbdev driver drm/qxl: only report first monitor as connected if we have no state drm/radeon: add quirk for MSI R7 370 drm/amdgpu: Sprinkle drm_modeset_lock_all to appease locking checks drm/radeon: Sprinkle drm_modeset_lock_all to appease locking checks drm/amdgpu: sync ce and me with SWITCH_BUFFER(2) drm/amdgpu: integer overflow in amdgpu_mode_dumb_create() drm/amdgpu: info leak in amdgpu_gem_metadata_ioctl() drm/amdgpu: integer overflow in amdgpu_info_ioctl() drm/amdgpu: unwind properly in amdgpu_cs_parser_init() drm/amdgpu: Fix max_vblank_count value for current display engines drm/amdgpu: use kmemdup rather than duplicating its implementation drm/amdgpu: fix UVD suspend and resume for VI APU drm/amdgpu: fix the UVD suspend sequence order drm/amdgpu: make UVD handle checking more strict ...
This commit is contained in:
commit
cc8b8faea4
|
@ -82,6 +82,7 @@ extern int amdgpu_vm_block_size;
|
|||
extern int amdgpu_enable_scheduler;
|
||||
extern int amdgpu_sched_jobs;
|
||||
extern int amdgpu_sched_hw_submission;
|
||||
extern int amdgpu_enable_semaphores;
|
||||
|
||||
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
|
||||
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
|
||||
|
@ -432,7 +433,7 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev);
|
|||
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
|
||||
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
||||
struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type);
|
||||
|
@ -890,7 +891,7 @@ struct amdgpu_ring {
|
|||
struct amdgpu_device *adev;
|
||||
const struct amdgpu_ring_funcs *funcs;
|
||||
struct amdgpu_fence_driver fence_drv;
|
||||
struct amd_gpu_scheduler *scheduler;
|
||||
struct amd_gpu_scheduler sched;
|
||||
|
||||
spinlock_t fence_lock;
|
||||
struct mutex *ring_lock;
|
||||
|
@ -1201,8 +1202,6 @@ struct amdgpu_gfx {
|
|||
struct amdgpu_irq_src priv_inst_irq;
|
||||
/* gfx status */
|
||||
uint32_t gfx_current_status;
|
||||
/* sync signal for const engine */
|
||||
unsigned ce_sync_offs;
|
||||
/* ce ram size*/
|
||||
unsigned ce_ram_size;
|
||||
};
|
||||
|
@ -1274,8 +1273,10 @@ struct amdgpu_job {
|
|||
uint32_t num_ibs;
|
||||
struct mutex job_lock;
|
||||
struct amdgpu_user_fence uf;
|
||||
int (*free_job)(struct amdgpu_job *sched_job);
|
||||
int (*free_job)(struct amdgpu_job *job);
|
||||
};
|
||||
#define to_amdgpu_job(sched_job) \
|
||||
container_of((sched_job), struct amdgpu_job, base)
|
||||
|
||||
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
|
||||
{
|
||||
|
|
|
@ -183,7 +183,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
|||
return -ENOMEM;
|
||||
|
||||
r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
|
||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo);
|
||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
|
||||
if (r) {
|
||||
dev_err(rdev->dev,
|
||||
"failed to allocate BO for amdkfd (%d)\n", r);
|
||||
|
|
|
@ -79,7 +79,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
|
|||
int time;
|
||||
|
||||
n = AMDGPU_BENCHMARK_ITERATIONS;
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
|
||||
NULL, &sobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
@ -91,7 +92,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
|
|||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
|
||||
NULL, &dobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
|
|
@ -86,7 +86,7 @@ static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
|
|||
|
||||
struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
|
||||
ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = amdgpu_bo_reserve(bo, false);
|
||||
|
@ -197,7 +197,8 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
|
|||
|
||||
ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
|
||||
true, domain, flags,
|
||||
NULL, &placement, &obj);
|
||||
NULL, &placement, NULL,
|
||||
&obj);
|
||||
if (ret) {
|
||||
DRM_ERROR("(%d) bo create failed\n", ret);
|
||||
return ret;
|
||||
|
|
|
@ -154,42 +154,41 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|||
{
|
||||
union drm_amdgpu_cs *cs = data;
|
||||
uint64_t *chunk_array_user;
|
||||
uint64_t *chunk_array = NULL;
|
||||
uint64_t *chunk_array;
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
unsigned size, i;
|
||||
int r = 0;
|
||||
int ret;
|
||||
|
||||
if (!cs->in.num_chunks)
|
||||
goto out;
|
||||
if (cs->in.num_chunks == 0)
|
||||
return 0;
|
||||
|
||||
chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
|
||||
if (!chunk_array)
|
||||
return -ENOMEM;
|
||||
|
||||
p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
|
||||
if (!p->ctx) {
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
ret = -EINVAL;
|
||||
goto free_chunk;
|
||||
}
|
||||
|
||||
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
|
||||
|
||||
/* get chunks */
|
||||
INIT_LIST_HEAD(&p->validated);
|
||||
chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
|
||||
if (chunk_array == NULL) {
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
chunk_array_user = (uint64_t __user *)(cs->in.chunks);
|
||||
if (copy_from_user(chunk_array, chunk_array_user,
|
||||
sizeof(uint64_t)*cs->in.num_chunks)) {
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
ret = -EFAULT;
|
||||
goto put_bo_list;
|
||||
}
|
||||
|
||||
p->nchunks = cs->in.num_chunks;
|
||||
p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
|
||||
GFP_KERNEL);
|
||||
if (p->chunks == NULL) {
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
if (!p->chunks) {
|
||||
ret = -ENOMEM;
|
||||
goto put_bo_list;
|
||||
}
|
||||
|
||||
for (i = 0; i < p->nchunks; i++) {
|
||||
|
@ -200,8 +199,9 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|||
chunk_ptr = (void __user *)chunk_array[i];
|
||||
if (copy_from_user(&user_chunk, chunk_ptr,
|
||||
sizeof(struct drm_amdgpu_cs_chunk))) {
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
ret = -EFAULT;
|
||||
i--;
|
||||
goto free_partial_kdata;
|
||||
}
|
||||
p->chunks[i].chunk_id = user_chunk.chunk_id;
|
||||
p->chunks[i].length_dw = user_chunk.length_dw;
|
||||
|
@ -212,13 +212,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|||
|
||||
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
|
||||
if (p->chunks[i].kdata == NULL) {
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
ret = -ENOMEM;
|
||||
i--;
|
||||
goto free_partial_kdata;
|
||||
}
|
||||
size *= sizeof(uint32_t);
|
||||
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
ret = -EFAULT;
|
||||
goto free_partial_kdata;
|
||||
}
|
||||
|
||||
switch (p->chunks[i].chunk_id) {
|
||||
|
@ -238,15 +239,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|||
gobj = drm_gem_object_lookup(p->adev->ddev,
|
||||
p->filp, handle);
|
||||
if (gobj == NULL) {
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
ret = -EINVAL;
|
||||
goto free_partial_kdata;
|
||||
}
|
||||
|
||||
p->uf.bo = gem_to_amdgpu_bo(gobj);
|
||||
p->uf.offset = fence_data->offset;
|
||||
} else {
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
ret = -EINVAL;
|
||||
goto free_partial_kdata;
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -254,19 +255,35 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|||
break;
|
||||
|
||||
default:
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
ret = -EINVAL;
|
||||
goto free_partial_kdata;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
|
||||
if (!p->ibs)
|
||||
r = -ENOMEM;
|
||||
if (!p->ibs) {
|
||||
ret = -ENOMEM;
|
||||
goto free_all_kdata;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(chunk_array);
|
||||
return r;
|
||||
return 0;
|
||||
|
||||
free_all_kdata:
|
||||
i = p->nchunks - 1;
|
||||
free_partial_kdata:
|
||||
for (; i >= 0; i--)
|
||||
drm_free_large(p->chunks[i].kdata);
|
||||
kfree(p->chunks);
|
||||
put_bo_list:
|
||||
if (p->bo_list)
|
||||
amdgpu_bo_list_put(p->bo_list);
|
||||
amdgpu_ctx_put(p->ctx);
|
||||
free_chunk:
|
||||
kfree(chunk_array);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Returns how many bytes TTM can move per IB.
|
||||
|
@ -321,25 +338,17 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
|
|||
return max(bytes_moved_threshold, 1024*1024ull);
|
||||
}
|
||||
|
||||
int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
|
||||
int amdgpu_cs_list_validate(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct list_head *validated)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
struct amdgpu_device *adev = p->adev;
|
||||
struct amdgpu_bo_list_entry *lobj;
|
||||
struct list_head duplicates;
|
||||
struct amdgpu_bo *bo;
|
||||
u64 bytes_moved = 0, initial_bytes_moved;
|
||||
u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev);
|
||||
int r;
|
||||
|
||||
INIT_LIST_HEAD(&duplicates);
|
||||
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
|
||||
if (unlikely(r != 0)) {
|
||||
return r;
|
||||
}
|
||||
|
||||
list_for_each_entry(lobj, &p->validated, tv.head) {
|
||||
list_for_each_entry(lobj, validated, tv.head) {
|
||||
bo = lobj->robj;
|
||||
if (!bo->pin_count) {
|
||||
u32 domain = lobj->prefered_domains;
|
||||
|
@ -373,7 +382,6 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
|
|||
domain = lobj->allowed_domains;
|
||||
goto retry;
|
||||
}
|
||||
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
@ -386,6 +394,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
|
|||
{
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct amdgpu_cs_buckets buckets;
|
||||
struct list_head duplicates;
|
||||
bool need_mmap_lock = false;
|
||||
int i, r;
|
||||
|
||||
|
@ -405,8 +414,22 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
|
|||
if (need_mmap_lock)
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
|
||||
r = amdgpu_cs_list_validate(p);
|
||||
INIT_LIST_HEAD(&duplicates);
|
||||
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
|
||||
if (unlikely(r != 0))
|
||||
goto error_reserve;
|
||||
|
||||
r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
|
||||
if (r)
|
||||
goto error_validate;
|
||||
|
||||
r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates);
|
||||
|
||||
error_validate:
|
||||
if (r)
|
||||
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
|
||||
|
||||
error_reserve:
|
||||
if (need_mmap_lock)
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
|
@ -772,15 +795,15 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_free_job(struct amdgpu_job *sched_job)
|
||||
static int amdgpu_cs_free_job(struct amdgpu_job *job)
|
||||
{
|
||||
int i;
|
||||
if (sched_job->ibs)
|
||||
for (i = 0; i < sched_job->num_ibs; i++)
|
||||
amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
|
||||
kfree(sched_job->ibs);
|
||||
if (sched_job->uf.bo)
|
||||
drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base);
|
||||
if (job->ibs)
|
||||
for (i = 0; i < job->num_ibs; i++)
|
||||
amdgpu_ib_free(job->adev, &job->ibs[i]);
|
||||
kfree(job->ibs);
|
||||
if (job->uf.bo)
|
||||
drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -804,7 +827,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
r = amdgpu_cs_parser_init(parser, data);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to initialize parser !\n");
|
||||
amdgpu_cs_parser_fini(parser, r, false);
|
||||
kfree(parser);
|
||||
up_read(&adev->exclusive_lock);
|
||||
r = amdgpu_cs_handle_lockup(adev, r);
|
||||
return r;
|
||||
|
@ -842,7 +865,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
|
||||
if (!job)
|
||||
return -ENOMEM;
|
||||
job->base.sched = ring->scheduler;
|
||||
job->base.sched = &ring->sched;
|
||||
job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
|
||||
job->adev = parser->adev;
|
||||
job->ibs = parser->ibs;
|
||||
|
@ -857,7 +880,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
|
||||
job->free_job = amdgpu_cs_free_job;
|
||||
mutex_lock(&job->job_lock);
|
||||
r = amd_sched_entity_push_job((struct amd_sched_job *)job);
|
||||
r = amd_sched_entity_push_job(&job->base);
|
||||
if (r) {
|
||||
mutex_unlock(&job->job_lock);
|
||||
amdgpu_cs_free_job(job);
|
||||
|
|
|
@ -43,10 +43,10 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
|
|||
for (i = 0; i < adev->num_rings; i++) {
|
||||
struct amd_sched_rq *rq;
|
||||
if (kernel)
|
||||
rq = &adev->rings[i]->scheduler->kernel_rq;
|
||||
rq = &adev->rings[i]->sched.kernel_rq;
|
||||
else
|
||||
rq = &adev->rings[i]->scheduler->sched_rq;
|
||||
r = amd_sched_entity_init(adev->rings[i]->scheduler,
|
||||
rq = &adev->rings[i]->sched.sched_rq;
|
||||
r = amd_sched_entity_init(&adev->rings[i]->sched,
|
||||
&ctx->rings[i].entity,
|
||||
rq, amdgpu_sched_jobs);
|
||||
if (r)
|
||||
|
@ -55,7 +55,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
|
|||
|
||||
if (i < adev->num_rings) {
|
||||
for (j = 0; j < i; j++)
|
||||
amd_sched_entity_fini(adev->rings[j]->scheduler,
|
||||
amd_sched_entity_fini(&adev->rings[j]->sched,
|
||||
&ctx->rings[j].entity);
|
||||
kfree(ctx);
|
||||
return r;
|
||||
|
@ -75,7 +75,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
|
|||
|
||||
if (amdgpu_enable_scheduler) {
|
||||
for (i = 0; i < adev->num_rings; i++)
|
||||
amd_sched_entity_fini(adev->rings[i]->scheduler,
|
||||
amd_sched_entity_fini(&adev->rings[i]->sched,
|
||||
&ctx->rings[i].entity);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -246,7 +246,7 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
|
|||
r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
|
||||
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &adev->vram_scratch.robj);
|
||||
NULL, NULL, &adev->vram_scratch.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
@ -449,7 +449,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
|
|||
|
||||
if (adev->wb.wb_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &adev->wb.wb_obj);
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
||||
&adev->wb.wb_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
|
||||
return r;
|
||||
|
@ -1650,9 +1651,11 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
|
|||
drm_kms_helper_poll_disable(dev);
|
||||
|
||||
/* turn off display hw */
|
||||
drm_modeset_lock_all(dev);
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
/* unpin the front buffers */
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
|
@ -1747,9 +1750,11 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
|
|||
if (fbcon) {
|
||||
drm_helper_resume_force_mode(dev);
|
||||
/* turn on display hw */
|
||||
drm_modeset_lock_all(dev);
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
|
|
|
@ -79,6 +79,7 @@ int amdgpu_exp_hw_support = 0;
|
|||
int amdgpu_enable_scheduler = 0;
|
||||
int amdgpu_sched_jobs = 16;
|
||||
int amdgpu_sched_hw_submission = 2;
|
||||
int amdgpu_enable_semaphores = 1;
|
||||
|
||||
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
|
||||
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
|
||||
|
@ -152,6 +153,9 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
|
|||
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
|
||||
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)");
|
||||
module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
/* Kaveri */
|
||||
|
|
|
@ -609,9 +609,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
|||
* Init the fence driver for the requested ring (all asics).
|
||||
* Helper function for amdgpu_fence_driver_init().
|
||||
*/
|
||||
void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
|
||||
{
|
||||
int i;
|
||||
int i, r;
|
||||
|
||||
ring->fence_drv.cpu_addr = NULL;
|
||||
ring->fence_drv.gpu_addr = 0;
|
||||
|
@ -625,15 +625,19 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
|
|||
amdgpu_fence_check_lockup);
|
||||
ring->fence_drv.ring = ring;
|
||||
|
||||
init_waitqueue_head(&ring->fence_drv.fence_queue);
|
||||
|
||||
if (amdgpu_enable_scheduler) {
|
||||
ring->scheduler = amd_sched_create(&amdgpu_sched_ops,
|
||||
ring->idx,
|
||||
amdgpu_sched_hw_submission,
|
||||
(void *)ring->adev);
|
||||
if (!ring->scheduler)
|
||||
DRM_ERROR("Failed to create scheduler on ring %d.\n",
|
||||
ring->idx);
|
||||
r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
|
||||
amdgpu_sched_hw_submission, ring->name);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create scheduler on ring %s.\n",
|
||||
ring->name);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -681,8 +685,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
|
|||
wake_up_all(&ring->fence_drv.fence_queue);
|
||||
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
if (ring->scheduler)
|
||||
amd_sched_destroy(ring->scheduler);
|
||||
amd_sched_fini(&ring->sched);
|
||||
ring->fence_drv.initialized = false;
|
||||
}
|
||||
mutex_unlock(&adev->ring_lock);
|
||||
|
|
|
@ -127,7 +127,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
|
|||
r = amdgpu_bo_create(adev, adev->gart.table_size,
|
||||
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &adev->gart.robj);
|
||||
NULL, NULL, &adev->gart.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -69,7 +69,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
|||
}
|
||||
}
|
||||
retry:
|
||||
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj);
|
||||
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
|
||||
flags, NULL, NULL, &robj);
|
||||
if (r) {
|
||||
if (r != -ERESTARTSYS) {
|
||||
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
|
@ -426,6 +427,10 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
|
|||
&args->data.data_size_bytes,
|
||||
&args->data.flags);
|
||||
} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
|
||||
if (args->data.data_size_bytes > sizeof(args->data.data)) {
|
||||
r = -EINVAL;
|
||||
goto unreserve;
|
||||
}
|
||||
r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
|
||||
if (!r)
|
||||
r = amdgpu_bo_set_metadata(robj, args->data.data,
|
||||
|
@ -433,6 +438,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
|
|||
args->data.flags);
|
||||
}
|
||||
|
||||
unreserve:
|
||||
amdgpu_bo_unreserve(robj);
|
||||
out:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
|
@ -454,11 +460,12 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
|||
struct ttm_validate_buffer tv, *entry;
|
||||
struct amdgpu_bo_list_entry *vm_bos;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct list_head list;
|
||||
struct list_head list, duplicates;
|
||||
unsigned domain;
|
||||
int r;
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
INIT_LIST_HEAD(&duplicates);
|
||||
|
||||
tv.bo = &bo_va->bo->tbo;
|
||||
tv.shared = true;
|
||||
|
@ -468,7 +475,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
|||
if (!vm_bos)
|
||||
return;
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
|
||||
/* Provide duplicates to avoid -EALREADY */
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
|
@ -651,7 +659,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
|||
int r;
|
||||
|
||||
args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
|
||||
args->size = args->pitch * args->height;
|
||||
args->size = (u64)args->pitch * args->height;
|
||||
args->size = ALIGN(args->size, PAGE_SIZE);
|
||||
|
||||
r = amdgpu_gem_object_create(adev, args->size, 0,
|
||||
|
|
|
@ -43,7 +43,7 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
|
|||
r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0,
|
||||
NULL, &adev->irq.ih.ring_obj);
|
||||
NULL, NULL, &adev->irq.ih.ring_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
|
||||
return r;
|
||||
|
|
|
@ -140,7 +140,7 @@ void amdgpu_irq_preinstall(struct drm_device *dev)
|
|||
*/
|
||||
int amdgpu_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
dev->max_vblank_count = 0x001fffff;
|
||||
dev->max_vblank_count = 0x00ffffff;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -390,7 +390,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
|
||||
}
|
||||
case AMDGPU_INFO_READ_MMR_REG: {
|
||||
unsigned n, alloc_size = info->read_mmr_reg.count * 4;
|
||||
unsigned n, alloc_size;
|
||||
uint32_t *regs;
|
||||
unsigned se_num = (info->read_mmr_reg.instance >>
|
||||
AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
|
||||
|
@ -406,9 +406,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
|
||||
sh_num = 0xffffffff;
|
||||
|
||||
regs = kmalloc(alloc_size, GFP_KERNEL);
|
||||
regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
|
||||
if (!regs)
|
||||
return -ENOMEM;
|
||||
alloc_size = info->read_mmr_reg.count * sizeof(*regs);
|
||||
|
||||
for (i = 0; i < info->read_mmr_reg.count; i++)
|
||||
if (amdgpu_asic_read_register(adev, se_num, sh_num,
|
||||
|
|
|
@ -215,6 +215,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
bool kernel, u32 domain, u64 flags,
|
||||
struct sg_table *sg,
|
||||
struct ttm_placement *placement,
|
||||
struct reservation_object *resv,
|
||||
struct amdgpu_bo **bo_ptr)
|
||||
{
|
||||
struct amdgpu_bo *bo;
|
||||
|
@ -261,7 +262,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
/* Kernel allocation are uninterruptible */
|
||||
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, page_align, !kernel, NULL,
|
||||
acc_size, sg, NULL, &amdgpu_ttm_bo_destroy);
|
||||
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
|
||||
if (unlikely(r != 0)) {
|
||||
return r;
|
||||
}
|
||||
|
@ -275,7 +276,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||
unsigned long size, int byte_align,
|
||||
bool kernel, u32 domain, u64 flags,
|
||||
struct sg_table *sg, struct amdgpu_bo **bo_ptr)
|
||||
struct sg_table *sg,
|
||||
struct reservation_object *resv,
|
||||
struct amdgpu_bo **bo_ptr)
|
||||
{
|
||||
struct ttm_placement placement = {0};
|
||||
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
|
||||
|
@ -286,11 +289,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||
amdgpu_ttm_placement_init(adev, &placement,
|
||||
placements, domain, flags);
|
||||
|
||||
return amdgpu_bo_create_restricted(adev, size, byte_align,
|
||||
kernel, domain, flags,
|
||||
sg,
|
||||
&placement,
|
||||
bo_ptr);
|
||||
return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
|
||||
domain, flags, sg, &placement,
|
||||
resv, bo_ptr);
|
||||
}
|
||||
|
||||
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
|
||||
|
@ -535,12 +536,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
|
|||
if (metadata == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
buffer = kzalloc(metadata_size, GFP_KERNEL);
|
||||
buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
|
||||
if (buffer == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(buffer, metadata, metadata_size);
|
||||
|
||||
kfree(bo->metadata);
|
||||
bo->metadata_flags = flags;
|
||||
bo->metadata = buffer;
|
||||
|
|
|
@ -129,12 +129,14 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||
unsigned long size, int byte_align,
|
||||
bool kernel, u32 domain, u64 flags,
|
||||
struct sg_table *sg,
|
||||
struct reservation_object *resv,
|
||||
struct amdgpu_bo **bo_ptr);
|
||||
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
||||
unsigned long size, int byte_align,
|
||||
bool kernel, u32 domain, u64 flags,
|
||||
struct sg_table *sg,
|
||||
struct ttm_placement *placement,
|
||||
struct reservation_object *resv,
|
||||
struct amdgpu_bo **bo_ptr);
|
||||
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
|
||||
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
|
||||
|
|
|
@ -61,12 +61,15 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
|||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sg)
|
||||
{
|
||||
struct reservation_object *resv = attach->dmabuf->resv;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_bo *bo;
|
||||
int ret;
|
||||
|
||||
ww_mutex_lock(&resv->lock, NULL);
|
||||
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
|
||||
ww_mutex_unlock(&resv->lock);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
|
|
|
@ -357,11 +357,11 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
ring->adev = adev;
|
||||
ring->idx = adev->num_rings++;
|
||||
adev->rings[ring->idx] = ring;
|
||||
amdgpu_fence_driver_init_ring(ring);
|
||||
r = amdgpu_fence_driver_init_ring(ring);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
init_waitqueue_head(&ring->fence_drv.fence_queue);
|
||||
|
||||
r = amdgpu_wb_get(adev, &ring->rptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
|
||||
|
@ -407,7 +407,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
if (ring->ring_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0,
|
||||
NULL, &ring->ring_obj);
|
||||
NULL, NULL, &ring->ring_obj);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring create failed\n", r);
|
||||
return r;
|
||||
|
|
|
@ -64,8 +64,8 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
|
|||
INIT_LIST_HEAD(&sa_manager->flist[i]);
|
||||
}
|
||||
|
||||
r = amdgpu_bo_create(adev, size, align, true,
|
||||
domain, 0, NULL, &sa_manager->bo);
|
||||
r = amdgpu_bo_create(adev, size, align, true, domain,
|
||||
0, NULL, NULL, &sa_manager->bo);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
|
||||
return r;
|
||||
|
@ -145,8 +145,13 @@ static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
|
|||
struct amd_sched_fence *s_fence;
|
||||
|
||||
s_fence = to_amd_sched_fence(f);
|
||||
if (s_fence)
|
||||
return s_fence->scheduler->ring_id;
|
||||
if (s_fence) {
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
|
||||
return ring->idx;
|
||||
}
|
||||
|
||||
a_fence = to_amdgpu_fence(f);
|
||||
if (a_fence)
|
||||
return a_fence->ring->idx;
|
||||
|
@ -412,6 +417,26 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
|
|||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
|
||||
{
|
||||
struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
|
||||
struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
|
||||
|
||||
if (a_fence)
|
||||
seq_printf(m, " protected by 0x%016llx on ring %d",
|
||||
a_fence->seq, a_fence->ring->idx);
|
||||
|
||||
if (s_fence) {
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
|
||||
ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
|
||||
seq_printf(m, " protected by 0x%016x on ring %d",
|
||||
s_fence->base.seqno, ring->idx);
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
|
||||
struct seq_file *m)
|
||||
{
|
||||
|
@ -428,18 +453,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
|
|||
}
|
||||
seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
|
||||
soffset, eoffset, eoffset - soffset);
|
||||
if (i->fence) {
|
||||
struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence);
|
||||
struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
|
||||
if (a_fence)
|
||||
seq_printf(m, " protected by 0x%016llx on ring %d",
|
||||
a_fence->seq, a_fence->ring->idx);
|
||||
if (s_fence)
|
||||
seq_printf(m, " protected by 0x%016x on ring %d",
|
||||
s_fence->base.seqno,
|
||||
s_fence->scheduler->ring_id);
|
||||
|
||||
}
|
||||
if (i->fence)
|
||||
amdgpu_sa_bo_dump_fence(i->fence, m);
|
||||
seq_printf(m, "\n");
|
||||
}
|
||||
spin_unlock(&sa_manager->wq.lock);
|
||||
|
|
|
@ -27,63 +27,48 @@
|
|||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
|
||||
static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job)
|
||||
static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
|
||||
{
|
||||
struct amdgpu_job *sched_job = (struct amdgpu_job *)job;
|
||||
return amdgpu_sync_get_fence(&sched_job->ibs->sync);
|
||||
struct amdgpu_job *job = to_amdgpu_job(sched_job);
|
||||
return amdgpu_sync_get_fence(&job->ibs->sync);
|
||||
}
|
||||
|
||||
static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
|
||||
static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
|
||||
{
|
||||
struct amdgpu_job *sched_job;
|
||||
struct amdgpu_fence *fence;
|
||||
struct amdgpu_fence *fence = NULL;
|
||||
struct amdgpu_job *job;
|
||||
int r;
|
||||
|
||||
if (!job) {
|
||||
if (!sched_job) {
|
||||
DRM_ERROR("job is null\n");
|
||||
return NULL;
|
||||
}
|
||||
sched_job = (struct amdgpu_job *)job;
|
||||
mutex_lock(&sched_job->job_lock);
|
||||
r = amdgpu_ib_schedule(sched_job->adev,
|
||||
sched_job->num_ibs,
|
||||
sched_job->ibs,
|
||||
sched_job->base.owner);
|
||||
if (r)
|
||||
job = to_amdgpu_job(sched_job);
|
||||
mutex_lock(&job->job_lock);
|
||||
r = amdgpu_ib_schedule(job->adev,
|
||||
job->num_ibs,
|
||||
job->ibs,
|
||||
job->base.owner);
|
||||
if (r) {
|
||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||
goto err;
|
||||
fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
|
||||
}
|
||||
|
||||
if (sched_job->free_job)
|
||||
sched_job->free_job(sched_job);
|
||||
|
||||
mutex_unlock(&sched_job->job_lock);
|
||||
return &fence->base;
|
||||
fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence);
|
||||
|
||||
err:
|
||||
DRM_ERROR("Run job error\n");
|
||||
mutex_unlock(&sched_job->job_lock);
|
||||
job->sched->ops->process_job(job);
|
||||
return NULL;
|
||||
}
|
||||
if (job->free_job)
|
||||
job->free_job(job);
|
||||
|
||||
static void amdgpu_sched_process_job(struct amd_sched_job *job)
|
||||
{
|
||||
struct amdgpu_job *sched_job;
|
||||
|
||||
if (!job) {
|
||||
DRM_ERROR("job is null\n");
|
||||
return;
|
||||
}
|
||||
sched_job = (struct amdgpu_job *)job;
|
||||
/* after processing job, free memory */
|
||||
fence_put(&sched_job->base.s_fence->base);
|
||||
kfree(sched_job);
|
||||
mutex_unlock(&job->job_lock);
|
||||
fence_put(&job->base.s_fence->base);
|
||||
kfree(job);
|
||||
return fence ? &fence->base : NULL;
|
||||
}
|
||||
|
||||
struct amd_sched_backend_ops amdgpu_sched_ops = {
|
||||
.dependency = amdgpu_sched_dependency,
|
||||
.run_job = amdgpu_sched_run_job,
|
||||
.process_job = amdgpu_sched_process_job
|
||||
};
|
||||
|
||||
int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
|
||||
|
@ -100,7 +85,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
|
|||
kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
|
||||
if (!job)
|
||||
return -ENOMEM;
|
||||
job->base.sched = ring->scheduler;
|
||||
job->base.sched = &ring->sched;
|
||||
job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
|
||||
job->adev = adev;
|
||||
job->ibs = ibs;
|
||||
|
@ -109,7 +94,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
|
|||
mutex_init(&job->job_lock);
|
||||
job->free_job = free_job;
|
||||
mutex_lock(&job->job_lock);
|
||||
r = amd_sched_entity_push_job((struct amd_sched_job *)job);
|
||||
r = amd_sched_entity_push_job(&job->base);
|
||||
if (r) {
|
||||
mutex_unlock(&job->job_lock);
|
||||
kfree(job);
|
||||
|
|
|
@ -65,8 +65,14 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
|
|||
|
||||
if (a_fence)
|
||||
return a_fence->ring->adev == adev;
|
||||
if (s_fence)
|
||||
return (struct amdgpu_device *)s_fence->scheduler->priv == adev;
|
||||
|
||||
if (s_fence) {
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
|
||||
return ring->adev == adev;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -251,6 +257,20 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
|
|||
fence_put(e->fence);
|
||||
kfree(e);
|
||||
}
|
||||
|
||||
if (amdgpu_enable_semaphores)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_fence *fence = sync->sync_to[i];
|
||||
if (!fence)
|
||||
continue;
|
||||
|
||||
r = fence_wait(&fence->base, false);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -285,7 +305,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
|
||||
if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores ||
|
||||
(count >= AMDGPU_NUM_SYNCS)) {
|
||||
/* not enough room, wait manually */
|
||||
r = fence_wait(&fence->base, false);
|
||||
if (r)
|
||||
|
|
|
@ -59,8 +59,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
|||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
|
||||
NULL, &vram_obj);
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0,
|
||||
NULL, NULL, &vram_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create VRAM object\n");
|
||||
goto out_cleanup;
|
||||
|
@ -80,7 +81,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
|||
struct fence *fence = NULL;
|
||||
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
|
||||
NULL, gtt_obj + i);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create GTT object %d\n", i);
|
||||
goto out_lclean;
|
||||
|
|
|
@ -861,7 +861,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||
r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &adev->stollen_vga_memory);
|
||||
NULL, NULL, &adev->stollen_vga_memory);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -247,7 +247,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
|||
const struct common_firmware_header *header = NULL;
|
||||
|
||||
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, bo);
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
|
||||
if (err) {
|
||||
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
|
||||
err = -ENOMEM;
|
||||
|
|
|
@ -156,7 +156,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
|||
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &adev->uvd.vcpu_bo);
|
||||
NULL, NULL, &adev->uvd.vcpu_bo);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
|
||||
return r;
|
||||
|
@ -543,46 +543,60 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (msg_type == 1) {
|
||||
switch (msg_type) {
|
||||
case 0:
|
||||
/* it's a create msg, calc image size (width * height) */
|
||||
amdgpu_bo_kunmap(bo);
|
||||
|
||||
/* try to alloc a new handle */
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
|
||||
if (atomic_read(&adev->uvd.handles[i]) == handle) {
|
||||
DRM_ERROR("Handle 0x%x already in use!\n", handle);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
|
||||
adev->uvd.filp[i] = ctx->parser->filp;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_ERROR("No more free UVD handles!\n");
|
||||
return -EINVAL;
|
||||
|
||||
case 1:
|
||||
/* it's a decode msg, calc buffer sizes */
|
||||
r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
|
||||
amdgpu_bo_kunmap(bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
} else if (msg_type == 2) {
|
||||
/* validate the handle */
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
|
||||
if (atomic_read(&adev->uvd.handles[i]) == handle) {
|
||||
if (adev->uvd.filp[i] != ctx->parser->filp) {
|
||||
DRM_ERROR("UVD handle collision detected!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
|
||||
return -ENOENT;
|
||||
|
||||
case 2:
|
||||
/* it's a destroy msg, free the handle */
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
|
||||
atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
|
||||
amdgpu_bo_kunmap(bo);
|
||||
return 0;
|
||||
} else {
|
||||
/* it's a create msg */
|
||||
amdgpu_bo_kunmap(bo);
|
||||
|
||||
if (msg_type != 0) {
|
||||
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* it's a create msg, no special handling needed */
|
||||
default:
|
||||
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* create or decode, validate the handle */
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
|
||||
if (atomic_read(&adev->uvd.handles[i]) == handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* handle not found try to alloc a new one */
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
|
||||
if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
|
||||
adev->uvd.filp[i] = ctx->parser->filp;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_ERROR("No more free UVD handles!\n");
|
||||
BUG();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -805,10 +819,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
|
|||
}
|
||||
|
||||
static int amdgpu_uvd_free_job(
|
||||
struct amdgpu_job *sched_job)
|
||||
struct amdgpu_job *job)
|
||||
{
|
||||
amdgpu_ib_free(sched_job->adev, sched_job->ibs);
|
||||
kfree(sched_job->ibs);
|
||||
amdgpu_ib_free(job->adev, job->ibs);
|
||||
kfree(job->ibs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -905,7 +919,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &bo);
|
||||
NULL, NULL, &bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -954,7 +968,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &bo);
|
||||
NULL, NULL, &bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
|
|||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &adev->vce.vcpu_bo);
|
||||
NULL, NULL, &adev->vce.vcpu_bo);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
|
||||
return r;
|
||||
|
@ -342,10 +342,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
|
|||
}
|
||||
|
||||
static int amdgpu_vce_free_job(
|
||||
struct amdgpu_job *sched_job)
|
||||
struct amdgpu_job *job)
|
||||
{
|
||||
amdgpu_ib_free(sched_job->adev, sched_job->ibs);
|
||||
kfree(sched_job->ibs);
|
||||
amdgpu_ib_free(job->adev, job->ibs);
|
||||
kfree(job->ibs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -316,12 +316,12 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
|
||||
int amdgpu_vm_free_job(struct amdgpu_job *sched_job)
|
||||
int amdgpu_vm_free_job(struct amdgpu_job *job)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < sched_job->num_ibs; i++)
|
||||
amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
|
||||
kfree(sched_job->ibs);
|
||||
for (i = 0; i < job->num_ibs; i++)
|
||||
amdgpu_ib_free(job->adev, &job->ibs[i]);
|
||||
kfree(job->ibs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -685,31 +685,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_fence_pts - fence page tables after an update
|
||||
*
|
||||
* @vm: requested vm
|
||||
* @start: start of GPU address range
|
||||
* @end: end of GPU address range
|
||||
* @fence: fence to use
|
||||
*
|
||||
* Fence the page tables in the range @start - @end (cayman+).
|
||||
*
|
||||
* Global and local mutex must be locked!
|
||||
*/
|
||||
static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
|
||||
uint64_t start, uint64_t end,
|
||||
struct fence *fence)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
start >>= amdgpu_vm_block_size;
|
||||
end >>= amdgpu_vm_block_size;
|
||||
|
||||
for (i = start; i <= end; ++i)
|
||||
amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
|
||||
*
|
||||
|
@ -813,8 +788,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||
if (r)
|
||||
goto error_free;
|
||||
|
||||
amdgpu_vm_fence_pts(vm, mapping->it.start,
|
||||
mapping->it.last + 1, f);
|
||||
amdgpu_bo_fence(vm->page_directory, f, true);
|
||||
if (fence) {
|
||||
fence_put(*fence);
|
||||
*fence = fence_get(f);
|
||||
|
@ -855,7 +829,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|||
int r;
|
||||
|
||||
if (mem) {
|
||||
addr = mem->start << PAGE_SHIFT;
|
||||
addr = (u64)mem->start << PAGE_SHIFT;
|
||||
if (mem->mem_type != TTM_PL_TT)
|
||||
addr += adev->vm_manager.vram_base_offset;
|
||||
} else {
|
||||
|
@ -1089,6 +1063,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
|
||||
/* walk over the address space and allocate the page tables */
|
||||
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
|
||||
struct reservation_object *resv = vm->page_directory->tbo.resv;
|
||||
struct amdgpu_bo *pt;
|
||||
|
||||
if (vm->page_tables[pt_idx].bo)
|
||||
|
@ -1097,11 +1072,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
/* drop mutex to allocate and clear page table */
|
||||
mutex_unlock(&vm->mutex);
|
||||
|
||||
ww_mutex_lock(&resv->lock, NULL);
|
||||
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
|
||||
AMDGPU_GPU_PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
|
||||
NULL, &pt);
|
||||
NULL, resv, &pt);
|
||||
ww_mutex_unlock(&resv->lock);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
|
@ -1303,7 +1280,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
r = amdgpu_bo_create(adev, pd_size, align, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
|
||||
NULL, &vm->page_directory);
|
||||
NULL, NULL, &vm->page_directory);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -814,7 +814,8 @@ int cz_smu_init(struct amdgpu_device *adev)
|
|||
* 3. map kernel virtual address
|
||||
*/
|
||||
ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
|
||||
true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf);
|
||||
true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
||||
toc_buf);
|
||||
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
|
||||
|
@ -822,7 +823,8 @@ int cz_smu_init(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
|
||||
true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf);
|
||||
true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
||||
smu_buf);
|
||||
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
|
||||
|
|
|
@ -764,7 +764,7 @@ int fiji_smu_init(struct amdgpu_device *adev)
|
|||
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
|
||||
true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, toc_buf);
|
||||
NULL, NULL, toc_buf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
|
||||
return -ENOMEM;
|
||||
|
@ -774,7 +774,7 @@ int fiji_smu_init(struct amdgpu_device *adev)
|
|||
ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
|
||||
true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, smu_buf);
|
||||
NULL, NULL, smu_buf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -3206,7 +3206,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
|
|||
r = amdgpu_bo_create(adev,
|
||||
adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
||||
&adev->gfx.mec.hpd_eop_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
|
||||
|
@ -3373,7 +3373,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
|
|||
r = amdgpu_bo_create(adev,
|
||||
sizeof(struct bonaire_mqd),
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
||||
&ring->mqd_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
|
||||
|
@ -3610,41 +3610,6 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void gfx_v7_0_ce_sync_me(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4;
|
||||
|
||||
/* instruct DE to set a magic number */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
|
||||
WRITE_DATA_DST_SEL(5)));
|
||||
amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, 1);
|
||||
|
||||
/* let CE wait till condition satisfied */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
|
||||
amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
|
||||
WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
|
||||
WAIT_REG_MEM_FUNCTION(3) | /* == */
|
||||
WAIT_REG_MEM_ENGINE(2))); /* ce */
|
||||
amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, 1);
|
||||
amdgpu_ring_write(ring, 0xffffffff);
|
||||
amdgpu_ring_write(ring, 4); /* poll interval */
|
||||
|
||||
/* instruct CE to reset wb of ce_sync to zero */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
|
||||
WRITE_DATA_DST_SEL(5) |
|
||||
WR_CONFIRM));
|
||||
amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm
|
||||
* VMID 0 is the physical GPU addresses as used by the kernel.
|
||||
|
@ -3663,6 +3628,13 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|||
unsigned vm_id, uint64_t pd_addr)
|
||||
{
|
||||
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
|
||||
if (usepfp) {
|
||||
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
|
||||
|
@ -3703,7 +3675,10 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_write(ring, 0x0);
|
||||
|
||||
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
|
||||
gfx_v7_0_ce_sync_me(ring);
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3788,7 +3763,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
|
|||
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &adev->gfx.rlc.save_restore_obj);
|
||||
NULL, NULL,
|
||||
&adev->gfx.rlc.save_restore_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
|
||||
return r;
|
||||
|
@ -3831,7 +3807,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
|
|||
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &adev->gfx.rlc.clear_state_obj);
|
||||
NULL, NULL,
|
||||
&adev->gfx.rlc.clear_state_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
|
@ -3870,7 +3847,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
|
|||
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &adev->gfx.rlc.cp_table_obj);
|
||||
NULL, NULL,
|
||||
&adev->gfx.rlc.cp_table_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
|
@ -4802,12 +4780,6 @@ static int gfx_v7_0_sw_init(void *handle)
|
|||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
|
||||
if (r) {
|
||||
DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
|
||||
ring = &adev->gfx.gfx_ring[i];
|
||||
ring->ring_obj = NULL;
|
||||
|
@ -4851,21 +4823,21 @@ static int gfx_v7_0_sw_init(void *handle)
|
|||
r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GDS, 0,
|
||||
NULL, &adev->gds.gds_gfx_bo);
|
||||
NULL, NULL, &adev->gds.gds_gfx_bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GWS, 0,
|
||||
NULL, &adev->gds.gws_gfx_bo);
|
||||
NULL, NULL, &adev->gds.gws_gfx_bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_OA, 0,
|
||||
NULL, &adev->gds.oa_gfx_bo);
|
||||
NULL, NULL, &adev->gds.oa_gfx_bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -4886,8 +4858,6 @@ static int gfx_v7_0_sw_fini(void *handle)
|
|||
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
||||
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
|
||||
|
||||
amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
|
||||
|
||||
gfx_v7_0_cp_compute_fini(adev);
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
gfx_v7_0_mec_fini(adev);
|
||||
|
|
|
@ -868,7 +868,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
|
|||
r = amdgpu_bo_create(adev,
|
||||
adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
||||
&adev->gfx.mec.hpd_eop_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
|
||||
|
@ -940,12 +940,6 @@ static int gfx_v8_0_sw_init(void *handle)
|
|||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
|
||||
if (r) {
|
||||
DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* set up the gfx ring */
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
|
||||
ring = &adev->gfx.gfx_ring[i];
|
||||
|
@ -995,21 +989,21 @@ static int gfx_v8_0_sw_init(void *handle)
|
|||
/* reserve GDS, GWS and OA resource for gfx */
|
||||
r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GDS, 0,
|
||||
AMDGPU_GEM_DOMAIN_GDS, 0, NULL,
|
||||
NULL, &adev->gds.gds_gfx_bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GWS, 0,
|
||||
AMDGPU_GEM_DOMAIN_GWS, 0, NULL,
|
||||
NULL, &adev->gds.gws_gfx_bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_OA, 0,
|
||||
AMDGPU_GEM_DOMAIN_OA, 0, NULL,
|
||||
NULL, &adev->gds.oa_gfx_bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -1033,8 +1027,6 @@ static int gfx_v8_0_sw_fini(void *handle)
|
|||
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
||||
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
|
||||
|
||||
amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
|
||||
|
||||
gfx_v8_0_mec_fini(adev);
|
||||
|
||||
return 0;
|
||||
|
@ -3106,7 +3098,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
|
|||
sizeof(struct vi_mqd),
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
|
||||
&ring->mqd_obj);
|
||||
NULL, &ring->mqd_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
|
||||
return r;
|
||||
|
@ -3965,6 +3957,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
|
|||
DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
|
||||
amdgpu_ring_write(ring, lower_32_bits(seq));
|
||||
amdgpu_ring_write(ring, upper_32_bits(seq));
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4005,49 +3998,34 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ce_sync_me(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4;
|
||||
|
||||
/* instruct DE to set a magic number */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
|
||||
WRITE_DATA_DST_SEL(5)));
|
||||
amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, 1);
|
||||
|
||||
/* let CE wait till condition satisfied */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
|
||||
amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
|
||||
WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
|
||||
WAIT_REG_MEM_FUNCTION(3) | /* == */
|
||||
WAIT_REG_MEM_ENGINE(2))); /* ce */
|
||||
amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, 1);
|
||||
amdgpu_ring_write(ring, 0xffffffff);
|
||||
amdgpu_ring_write(ring, 4); /* poll interval */
|
||||
|
||||
/* instruct CE to reset wb of ce_sync to zero */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
|
||||
WRITE_DATA_DST_SEL(5) |
|
||||
WR_CONFIRM));
|
||||
amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vm_id, uint64_t pd_addr)
|
||||
{
|
||||
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
|
||||
uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
|
||||
uint64_t addr = ring->fence_drv.gpu_addr;
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
|
||||
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
|
||||
WAIT_REG_MEM_FUNCTION(3))); /* equal */
|
||||
amdgpu_ring_write(ring, addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, seq);
|
||||
amdgpu_ring_write(ring, 0xffffffff);
|
||||
amdgpu_ring_write(ring, 4); /* poll interval */
|
||||
|
||||
if (usepfp) {
|
||||
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
|
||||
WRITE_DATA_DST_SEL(0)));
|
||||
WRITE_DATA_DST_SEL(0)) |
|
||||
WR_CONFIRM);
|
||||
if (vm_id < 8) {
|
||||
amdgpu_ring_write(ring,
|
||||
(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
|
||||
|
@ -4083,9 +4061,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|||
/* sync PFP to ME, otherwise we might get invalid PFP reads */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
|
||||
amdgpu_ring_write(ring, 0x0);
|
||||
|
||||
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
|
||||
gfx_v8_0_ce_sync_me(ring);
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -625,7 +625,7 @@ int iceland_smu_init(struct amdgpu_device *adev)
|
|||
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
|
||||
true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, toc_buf);
|
||||
NULL, NULL, toc_buf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -763,7 +763,7 @@ int tonga_smu_init(struct amdgpu_device *adev)
|
|||
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
|
||||
true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, toc_buf);
|
||||
NULL, NULL, toc_buf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
|
||||
return -ENOMEM;
|
||||
|
@ -773,7 +773,7 @@ int tonga_smu_init(struct amdgpu_device *adev)
|
|||
ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
|
||||
true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, smu_buf);
|
||||
NULL, NULL, smu_buf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
|
|||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = uvd_v4_2_hw_fini(adev);
|
||||
r = amdgpu_uvd_suspend(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_uvd_suspend(adev);
|
||||
r = uvd_v4_2_hw_fini(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
|
|||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = uvd_v5_0_hw_fini(adev);
|
||||
r = amdgpu_uvd_suspend(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_uvd_suspend(adev);
|
||||
r = uvd_v5_0_hw_fini(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -214,14 +214,16 @@ static int uvd_v6_0_suspend(void *handle)
|
|||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* Skip this for APU for now */
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
r = amdgpu_uvd_suspend(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
r = uvd_v6_0_hw_fini(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_uvd_suspend(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -230,10 +232,12 @@ static int uvd_v6_0_resume(void *handle)
|
|||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = amdgpu_uvd_resume(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Skip this for APU for now */
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
r = amdgpu_uvd_resume(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
r = uvd_v6_0_hw_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -1400,7 +1400,8 @@ static int vi_common_early_init(void *handle)
|
|||
case CHIP_CARRIZO:
|
||||
adev->has_uvd = true;
|
||||
adev->cg_flags = 0;
|
||||
adev->pg_flags = AMDGPU_PG_SUPPORT_UVD | AMDGPU_PG_SUPPORT_VCE;
|
||||
/* Disable UVD pg */
|
||||
adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
|
||||
adev->external_rev_id = adev->rev_id + 0x1;
|
||||
if (amdgpu_smc_load_fw && smc_enabled)
|
||||
adev->firmware.smu_load = true;
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _GPU_SCHED_TRACE_H_
|
||||
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM gpu_sched
|
||||
#define TRACE_INCLUDE_FILE gpu_sched_trace
|
||||
|
||||
TRACE_EVENT(amd_sched_job,
|
||||
TP_PROTO(struct amd_sched_job *sched_job),
|
||||
TP_ARGS(sched_job),
|
||||
TP_STRUCT__entry(
|
||||
__field(struct amd_sched_entity *, entity)
|
||||
__field(const char *, name)
|
||||
__field(u32, job_count)
|
||||
__field(int, hw_job_count)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->entity = sched_job->s_entity;
|
||||
__entry->name = sched_job->sched->name;
|
||||
__entry->job_count = kfifo_len(
|
||||
&sched_job->s_entity->job_queue) / sizeof(sched_job);
|
||||
__entry->hw_job_count = atomic_read(
|
||||
&sched_job->sched->hw_rq_count);
|
||||
),
|
||||
TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d",
|
||||
__entry->entity, __entry->name, __entry->job_count,
|
||||
__entry->hw_job_count)
|
||||
);
|
||||
#endif
|
||||
|
||||
/* This part must be outside protection */
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#include <trace/define_trace.h>
|
|
@ -27,6 +27,9 @@
|
|||
#include <drm/drmP.h>
|
||||
#include "gpu_scheduler.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "gpu_sched_trace.h"
|
||||
|
||||
static struct amd_sched_job *
|
||||
amd_sched_entity_pop_job(struct amd_sched_entity *entity);
|
||||
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
|
||||
|
@ -65,29 +68,29 @@ static struct amd_sched_job *
|
|||
amd_sched_rq_select_job(struct amd_sched_rq *rq)
|
||||
{
|
||||
struct amd_sched_entity *entity;
|
||||
struct amd_sched_job *job;
|
||||
struct amd_sched_job *sched_job;
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
|
||||
entity = rq->current_entity;
|
||||
if (entity) {
|
||||
list_for_each_entry_continue(entity, &rq->entities, list) {
|
||||
job = amd_sched_entity_pop_job(entity);
|
||||
if (job) {
|
||||
sched_job = amd_sched_entity_pop_job(entity);
|
||||
if (sched_job) {
|
||||
rq->current_entity = entity;
|
||||
spin_unlock(&rq->lock);
|
||||
return job;
|
||||
return sched_job;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(entity, &rq->entities, list) {
|
||||
|
||||
job = amd_sched_entity_pop_job(entity);
|
||||
if (job) {
|
||||
sched_job = amd_sched_entity_pop_job(entity);
|
||||
if (sched_job) {
|
||||
rq->current_entity = entity;
|
||||
spin_unlock(&rq->lock);
|
||||
return job;
|
||||
return sched_job;
|
||||
}
|
||||
|
||||
if (entity == rq->current_entity)
|
||||
|
@ -115,23 +118,27 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
|
|||
struct amd_sched_rq *rq,
|
||||
uint32_t jobs)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (!(sched && entity && rq))
|
||||
return -EINVAL;
|
||||
|
||||
memset(entity, 0, sizeof(struct amd_sched_entity));
|
||||
entity->belongto_rq = rq;
|
||||
entity->scheduler = sched;
|
||||
entity->fence_context = fence_context_alloc(1);
|
||||
if(kfifo_alloc(&entity->job_queue,
|
||||
jobs * sizeof(void *),
|
||||
GFP_KERNEL))
|
||||
return -EINVAL;
|
||||
INIT_LIST_HEAD(&entity->list);
|
||||
entity->rq = rq;
|
||||
entity->sched = sched;
|
||||
|
||||
spin_lock_init(&entity->queue_lock);
|
||||
r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
atomic_set(&entity->fence_seq, 0);
|
||||
entity->fence_context = fence_context_alloc(1);
|
||||
|
||||
/* Add the entity to the run queue */
|
||||
amd_sched_rq_add_entity(rq, entity);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -146,8 +153,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
|
|||
static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_entity *entity)
|
||||
{
|
||||
return entity->scheduler == sched &&
|
||||
entity->belongto_rq != NULL;
|
||||
return entity->sched == sched &&
|
||||
entity->rq != NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -177,7 +184,7 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
|
|||
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_entity *entity)
|
||||
{
|
||||
struct amd_sched_rq *rq = entity->belongto_rq;
|
||||
struct amd_sched_rq *rq = entity->rq;
|
||||
|
||||
if (!amd_sched_entity_is_initialized(sched, entity))
|
||||
return;
|
||||
|
@ -198,22 +205,22 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
|
|||
container_of(cb, struct amd_sched_entity, cb);
|
||||
entity->dependency = NULL;
|
||||
fence_put(f);
|
||||
amd_sched_wakeup(entity->scheduler);
|
||||
amd_sched_wakeup(entity->sched);
|
||||
}
|
||||
|
||||
static struct amd_sched_job *
|
||||
amd_sched_entity_pop_job(struct amd_sched_entity *entity)
|
||||
{
|
||||
struct amd_gpu_scheduler *sched = entity->scheduler;
|
||||
struct amd_sched_job *job;
|
||||
struct amd_gpu_scheduler *sched = entity->sched;
|
||||
struct amd_sched_job *sched_job;
|
||||
|
||||
if (ACCESS_ONCE(entity->dependency))
|
||||
return NULL;
|
||||
|
||||
if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job)))
|
||||
if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
|
||||
return NULL;
|
||||
|
||||
while ((entity->dependency = sched->ops->dependency(job))) {
|
||||
while ((entity->dependency = sched->ops->dependency(sched_job))) {
|
||||
|
||||
if (fence_add_callback(entity->dependency, &entity->cb,
|
||||
amd_sched_entity_wakeup))
|
||||
|
@ -222,32 +229,33 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
return job;
|
||||
return sched_job;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper to submit a job to the job queue
|
||||
*
|
||||
* @job The pointer to job required to submit
|
||||
* @sched_job The pointer to job required to submit
|
||||
*
|
||||
* Returns true if we could submit the job.
|
||||
*/
|
||||
static bool amd_sched_entity_in(struct amd_sched_job *job)
|
||||
static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
|
||||
{
|
||||
struct amd_sched_entity *entity = job->s_entity;
|
||||
struct amd_sched_entity *entity = sched_job->s_entity;
|
||||
bool added, first = false;
|
||||
|
||||
spin_lock(&entity->queue_lock);
|
||||
added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job);
|
||||
added = kfifo_in(&entity->job_queue, &sched_job,
|
||||
sizeof(sched_job)) == sizeof(sched_job);
|
||||
|
||||
if (added && kfifo_len(&entity->job_queue) == sizeof(job))
|
||||
if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
|
||||
first = true;
|
||||
|
||||
spin_unlock(&entity->queue_lock);
|
||||
|
||||
/* first job wakes up scheduler */
|
||||
if (first)
|
||||
amd_sched_wakeup(job->sched);
|
||||
amd_sched_wakeup(sched_job->sched);
|
||||
|
||||
return added;
|
||||
}
|
||||
|
@ -255,7 +263,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *job)
|
|||
/**
|
||||
* Submit a job to the job queue
|
||||
*
|
||||
* @job The pointer to job required to submit
|
||||
* @sched_job The pointer to job required to submit
|
||||
*
|
||||
* Returns 0 for success, negative error code otherwise.
|
||||
*/
|
||||
|
@ -271,9 +279,9 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
|
|||
fence_get(&fence->base);
|
||||
sched_job->s_fence = fence;
|
||||
|
||||
wait_event(entity->scheduler->job_scheduled,
|
||||
wait_event(entity->sched->job_scheduled,
|
||||
amd_sched_entity_in(sched_job));
|
||||
|
||||
trace_amd_sched_job(sched_job);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -301,30 +309,28 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
|
|||
static struct amd_sched_job *
|
||||
amd_sched_select_job(struct amd_gpu_scheduler *sched)
|
||||
{
|
||||
struct amd_sched_job *job;
|
||||
struct amd_sched_job *sched_job;
|
||||
|
||||
if (!amd_sched_ready(sched))
|
||||
return NULL;
|
||||
|
||||
/* Kernel run queue has higher priority than normal run queue*/
|
||||
job = amd_sched_rq_select_job(&sched->kernel_rq);
|
||||
if (job == NULL)
|
||||
job = amd_sched_rq_select_job(&sched->sched_rq);
|
||||
sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
|
||||
if (sched_job == NULL)
|
||||
sched_job = amd_sched_rq_select_job(&sched->sched_rq);
|
||||
|
||||
return job;
|
||||
return sched_job;
|
||||
}
|
||||
|
||||
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
|
||||
{
|
||||
struct amd_sched_job *sched_job =
|
||||
container_of(cb, struct amd_sched_job, cb);
|
||||
struct amd_gpu_scheduler *sched;
|
||||
struct amd_sched_fence *s_fence =
|
||||
container_of(cb, struct amd_sched_fence, cb);
|
||||
struct amd_gpu_scheduler *sched = s_fence->sched;
|
||||
|
||||
sched = sched_job->sched;
|
||||
amd_sched_fence_signal(sched_job->s_fence);
|
||||
atomic_dec(&sched->hw_rq_count);
|
||||
fence_put(&sched_job->s_fence->base);
|
||||
sched->ops->process_job(sched_job);
|
||||
amd_sched_fence_signal(s_fence);
|
||||
fence_put(&s_fence->base);
|
||||
wake_up_interruptible(&sched->wake_up_worker);
|
||||
}
|
||||
|
||||
|
@ -338,87 +344,82 @@ static int amd_sched_main(void *param)
|
|||
|
||||
while (!kthread_should_stop()) {
|
||||
struct amd_sched_entity *entity;
|
||||
struct amd_sched_job *job;
|
||||
struct amd_sched_fence *s_fence;
|
||||
struct amd_sched_job *sched_job;
|
||||
struct fence *fence;
|
||||
|
||||
wait_event_interruptible(sched->wake_up_worker,
|
||||
kthread_should_stop() ||
|
||||
(job = amd_sched_select_job(sched)));
|
||||
(sched_job = amd_sched_select_job(sched)));
|
||||
|
||||
if (!job)
|
||||
if (!sched_job)
|
||||
continue;
|
||||
|
||||
entity = job->s_entity;
|
||||
entity = sched_job->s_entity;
|
||||
s_fence = sched_job->s_fence;
|
||||
atomic_inc(&sched->hw_rq_count);
|
||||
fence = sched->ops->run_job(job);
|
||||
fence = sched->ops->run_job(sched_job);
|
||||
if (fence) {
|
||||
r = fence_add_callback(fence, &job->cb,
|
||||
r = fence_add_callback(fence, &s_fence->cb,
|
||||
amd_sched_process_job);
|
||||
if (r == -ENOENT)
|
||||
amd_sched_process_job(fence, &job->cb);
|
||||
amd_sched_process_job(fence, &s_fence->cb);
|
||||
else if (r)
|
||||
DRM_ERROR("fence add callback failed (%d)\n", r);
|
||||
fence_put(fence);
|
||||
} else {
|
||||
DRM_ERROR("Failed to run job!\n");
|
||||
amd_sched_process_job(NULL, &s_fence->cb);
|
||||
}
|
||||
|
||||
count = kfifo_out(&entity->job_queue, &job, sizeof(job));
|
||||
WARN_ON(count != sizeof(job));
|
||||
count = kfifo_out(&entity->job_queue, &sched_job,
|
||||
sizeof(sched_job));
|
||||
WARN_ON(count != sizeof(sched_job));
|
||||
wake_up(&sched->job_scheduled);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a gpu scheduler
|
||||
* Init a gpu scheduler instance
|
||||
*
|
||||
* @sched The pointer to the scheduler
|
||||
* @ops The backend operations for this scheduler.
|
||||
* @ring The the ring id for the scheduler.
|
||||
* @hw_submissions Number of hw submissions to do.
|
||||
* @name Name used for debugging
|
||||
*
|
||||
* Return the pointer to scheduler for success, otherwise return NULL
|
||||
* Return 0 on success, otherwise error code.
|
||||
*/
|
||||
struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
|
||||
unsigned ring, unsigned hw_submission,
|
||||
void *priv)
|
||||
int amd_sched_init(struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_backend_ops *ops,
|
||||
unsigned hw_submission, const char *name)
|
||||
{
|
||||
struct amd_gpu_scheduler *sched;
|
||||
|
||||
sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
|
||||
if (!sched)
|
||||
return NULL;
|
||||
|
||||
sched->ops = ops;
|
||||
sched->ring_id = ring;
|
||||
sched->hw_submission_limit = hw_submission;
|
||||
sched->priv = priv;
|
||||
snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
|
||||
sched->name = name;
|
||||
amd_sched_rq_init(&sched->sched_rq);
|
||||
amd_sched_rq_init(&sched->kernel_rq);
|
||||
|
||||
init_waitqueue_head(&sched->wake_up_worker);
|
||||
init_waitqueue_head(&sched->job_scheduled);
|
||||
atomic_set(&sched->hw_rq_count, 0);
|
||||
|
||||
/* Each scheduler will run on a seperate kernel thread */
|
||||
sched->thread = kthread_run(amd_sched_main, sched, sched->name);
|
||||
if (IS_ERR(sched->thread)) {
|
||||
DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
|
||||
kfree(sched);
|
||||
return NULL;
|
||||
DRM_ERROR("Failed to create scheduler for %s.\n", name);
|
||||
return PTR_ERR(sched->thread);
|
||||
}
|
||||
|
||||
return sched;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy a gpu scheduler
|
||||
*
|
||||
* @sched The pointer to the scheduler
|
||||
*
|
||||
* return 0 if succeed. -1 if failed.
|
||||
*/
|
||||
int amd_sched_destroy(struct amd_gpu_scheduler *sched)
|
||||
void amd_sched_fini(struct amd_gpu_scheduler *sched)
|
||||
{
|
||||
kthread_stop(sched->thread);
|
||||
kfree(sched);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -38,13 +38,15 @@ struct amd_sched_rq;
|
|||
*/
|
||||
struct amd_sched_entity {
|
||||
struct list_head list;
|
||||
struct amd_sched_rq *belongto_rq;
|
||||
atomic_t fence_seq;
|
||||
/* the job_queue maintains the jobs submitted by clients */
|
||||
struct kfifo job_queue;
|
||||
struct amd_sched_rq *rq;
|
||||
struct amd_gpu_scheduler *sched;
|
||||
|
||||
spinlock_t queue_lock;
|
||||
struct amd_gpu_scheduler *scheduler;
|
||||
struct kfifo job_queue;
|
||||
|
||||
atomic_t fence_seq;
|
||||
uint64_t fence_context;
|
||||
|
||||
struct fence *dependency;
|
||||
struct fence_cb cb;
|
||||
};
|
||||
|
@ -62,13 +64,13 @@ struct amd_sched_rq {
|
|||
|
||||
struct amd_sched_fence {
|
||||
struct fence base;
|
||||
struct amd_gpu_scheduler *scheduler;
|
||||
struct fence_cb cb;
|
||||
struct amd_gpu_scheduler *sched;
|
||||
spinlock_t lock;
|
||||
void *owner;
|
||||
};
|
||||
|
||||
struct amd_sched_job {
|
||||
struct fence_cb cb;
|
||||
struct amd_gpu_scheduler *sched;
|
||||
struct amd_sched_entity *s_entity;
|
||||
struct amd_sched_fence *s_fence;
|
||||
|
@ -91,32 +93,29 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
|
|||
* these functions should be implemented in driver side
|
||||
*/
|
||||
struct amd_sched_backend_ops {
|
||||
struct fence *(*dependency)(struct amd_sched_job *job);
|
||||
struct fence *(*run_job)(struct amd_sched_job *job);
|
||||
void (*process_job)(struct amd_sched_job *job);
|
||||
struct fence *(*dependency)(struct amd_sched_job *sched_job);
|
||||
struct fence *(*run_job)(struct amd_sched_job *sched_job);
|
||||
};
|
||||
|
||||
/**
|
||||
* One scheduler is implemented for each hardware ring
|
||||
*/
|
||||
struct amd_gpu_scheduler {
|
||||
struct task_struct *thread;
|
||||
struct amd_sched_backend_ops *ops;
|
||||
uint32_t hw_submission_limit;
|
||||
const char *name;
|
||||
struct amd_sched_rq sched_rq;
|
||||
struct amd_sched_rq kernel_rq;
|
||||
atomic_t hw_rq_count;
|
||||
struct amd_sched_backend_ops *ops;
|
||||
uint32_t ring_id;
|
||||
wait_queue_head_t wake_up_worker;
|
||||
wait_queue_head_t job_scheduled;
|
||||
uint32_t hw_submission_limit;
|
||||
char name[20];
|
||||
void *priv;
|
||||
atomic_t hw_rq_count;
|
||||
struct task_struct *thread;
|
||||
};
|
||||
|
||||
struct amd_gpu_scheduler *
|
||||
amd_sched_create(struct amd_sched_backend_ops *ops,
|
||||
uint32_t ring, uint32_t hw_submission, void *priv);
|
||||
int amd_sched_destroy(struct amd_gpu_scheduler *sched);
|
||||
int amd_sched_init(struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_backend_ops *ops,
|
||||
uint32_t hw_submission, const char *name);
|
||||
void amd_sched_fini(struct amd_gpu_scheduler *sched);
|
||||
|
||||
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_entity *entity,
|
||||
|
|
|
@ -36,7 +36,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
|
|||
if (fence == NULL)
|
||||
return NULL;
|
||||
fence->owner = owner;
|
||||
fence->scheduler = s_entity->scheduler;
|
||||
fence->sched = s_entity->sched;
|
||||
spin_lock_init(&fence->lock);
|
||||
|
||||
seq = atomic_inc_return(&s_entity->fence_seq);
|
||||
|
@ -63,7 +63,7 @@ static const char *amd_sched_fence_get_driver_name(struct fence *fence)
|
|||
static const char *amd_sched_fence_get_timeline_name(struct fence *f)
|
||||
{
|
||||
struct amd_sched_fence *fence = to_amd_sched_fence(f);
|
||||
return (const char *)fence->scheduler->name;
|
||||
return (const char *)fence->sched->name;
|
||||
}
|
||||
|
||||
static bool amd_sched_fence_enable_signaling(struct fence *f)
|
||||
|
|
|
@ -520,7 +520,8 @@ EXPORT_SYMBOL(drm_ioctl_permit);
|
|||
|
||||
/** Ioctl table */
|
||||
static const struct drm_ioctl_desc drm_ioctls[] = {
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
|
||||
DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
|
||||
|
|
|
@ -58,7 +58,8 @@ static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane,
|
|||
struct drm_plane_state *old_state)
|
||||
{
|
||||
struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
|
||||
unsigned int index, value, ret;
|
||||
unsigned int value;
|
||||
int index, ret;
|
||||
|
||||
index = fsl_dcu_drm_plane_index(plane);
|
||||
if (index < 0)
|
||||
|
|
|
@ -639,6 +639,32 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
|
|||
else
|
||||
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
|
||||
|
||||
/*
|
||||
* On HSW, the DSL reg (0x70000) appears to return 0 if we
|
||||
* read it just before the start of vblank. So try it again
|
||||
* so we don't accidentally end up spanning a vblank frame
|
||||
* increment, causing the pipe_update_end() code to squak at us.
|
||||
*
|
||||
* The nature of this problem means we can't simply check the ISR
|
||||
* bit and return the vblank start value; nor can we use the scanline
|
||||
* debug register in the transcoder as it appears to have the same
|
||||
* problem. We may need to extend this to include other platforms,
|
||||
* but so far testing only shows the problem on HSW.
|
||||
*/
|
||||
if (IS_HASWELL(dev) && !position) {
|
||||
int i, temp;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
udelay(1);
|
||||
temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
|
||||
DSL_LINEMASK_GEN3;
|
||||
if (temp != position) {
|
||||
position = temp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* See update_scanline_offset() for the details on the
|
||||
* scanline_offset adjustment.
|
||||
|
|
|
@ -430,7 +430,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
|
|||
|
||||
/**
|
||||
* intel_audio_codec_disable - Disable the audio codec for HD audio
|
||||
* @encoder: encoder on which to disable audio
|
||||
* @intel_encoder: encoder on which to disable audio
|
||||
*
|
||||
* The disable sequences must be performed before disabling the transcoder or
|
||||
* port.
|
||||
|
|
|
@ -42,7 +42,7 @@ find_section(const void *_bdb, int section_id)
|
|||
const struct bdb_header *bdb = _bdb;
|
||||
const u8 *base = _bdb;
|
||||
int index = 0;
|
||||
u16 total, current_size;
|
||||
u32 total, current_size;
|
||||
u8 current_id;
|
||||
|
||||
/* skip to first section */
|
||||
|
@ -57,6 +57,10 @@ find_section(const void *_bdb, int section_id)
|
|||
current_size = *((const u16 *)(base + index));
|
||||
index += 2;
|
||||
|
||||
/* The MIPI Sequence Block v3+ has a separate size field. */
|
||||
if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
|
||||
current_size = *((const u32 *)(base + index + 1));
|
||||
|
||||
if (index + current_size > total)
|
||||
return NULL;
|
||||
|
||||
|
@ -799,6 +803,12 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
|
|||
return;
|
||||
}
|
||||
|
||||
/* Fail gracefully for forward incompatible sequence block. */
|
||||
if (sequence->version >= 3) {
|
||||
DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
|
||||
|
||||
block_size = get_blocksize(sequence);
|
||||
|
|
|
@ -15087,9 +15087,12 @@ static void readout_plane_state(struct intel_crtc *crtc,
|
|||
|
||||
plane_state = to_intel_plane_state(p->base.state);
|
||||
|
||||
if (p->base.type == DRM_PLANE_TYPE_PRIMARY)
|
||||
if (p->base.type == DRM_PLANE_TYPE_PRIMARY) {
|
||||
plane_state->visible = primary_get_hw_state(crtc);
|
||||
else {
|
||||
if (plane_state->visible)
|
||||
crtc->base.state->plane_mask |=
|
||||
1 << drm_plane_index(&p->base);
|
||||
} else {
|
||||
if (active)
|
||||
p->disable_plane(&p->base, &crtc->base);
|
||||
|
||||
|
|
|
@ -186,17 +186,19 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
|
|||
|
||||
sysram = vmalloc(size);
|
||||
if (!sysram)
|
||||
return -ENOMEM;
|
||||
goto err_sysram;
|
||||
|
||||
info = drm_fb_helper_alloc_fbi(helper);
|
||||
if (IS_ERR(info))
|
||||
return PTR_ERR(info);
|
||||
if (IS_ERR(info)) {
|
||||
ret = PTR_ERR(info);
|
||||
goto err_alloc_fbi;
|
||||
}
|
||||
|
||||
info->par = mfbdev;
|
||||
|
||||
ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_framebuffer_init;
|
||||
|
||||
mfbdev->sysram = sysram;
|
||||
mfbdev->size = size;
|
||||
|
@ -225,7 +227,17 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
|
|||
|
||||
DRM_DEBUG_KMS("allocated %dx%d\n",
|
||||
fb->width, fb->height);
|
||||
|
||||
return 0;
|
||||
|
||||
err_framebuffer_init:
|
||||
drm_fb_helper_release_fbi(helper);
|
||||
err_alloc_fbi:
|
||||
vfree(sysram);
|
||||
err_sysram:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mga_fbdev_destroy(struct drm_device *dev,
|
||||
|
@ -276,23 +288,26 @@ int mgag200_fbdev_init(struct mga_device *mdev)
|
|||
ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
|
||||
mdev->num_crtc, MGAG200FB_CONN_LIMIT);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_fb_helper;
|
||||
|
||||
ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
|
||||
if (ret)
|
||||
goto fini;
|
||||
goto err_fb_setup;
|
||||
|
||||
/* disable all the possible outputs/crtcs before entering KMS mode */
|
||||
drm_helper_disable_unused_functions(mdev->dev);
|
||||
|
||||
ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
|
||||
if (ret)
|
||||
goto fini;
|
||||
goto err_fb_setup;
|
||||
|
||||
return 0;
|
||||
|
||||
fini:
|
||||
err_fb_setup:
|
||||
drm_fb_helper_fini(&mfbdev->helper);
|
||||
err_fb_helper:
|
||||
mdev->mfbdev = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
}
|
||||
r = mgag200_mm_init(mdev);
|
||||
if (r)
|
||||
goto out;
|
||||
goto err_mm;
|
||||
|
||||
drm_mode_config_init(dev);
|
||||
dev->mode_config.funcs = (void *)&mga_mode_funcs;
|
||||
|
@ -233,7 +233,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
r = mgag200_modeset_init(mdev);
|
||||
if (r) {
|
||||
dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
|
||||
goto out;
|
||||
goto err_modeset;
|
||||
}
|
||||
|
||||
/* Make small buffers to store a hardware cursor (double buffered icon updates) */
|
||||
|
@ -241,20 +241,24 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
&mdev->cursor.pixels_1);
|
||||
mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
|
||||
&mdev->cursor.pixels_2);
|
||||
if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1)
|
||||
goto cursor_nospace;
|
||||
mdev->cursor.pixels_current = mdev->cursor.pixels_1;
|
||||
mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
|
||||
goto cursor_done;
|
||||
cursor_nospace:
|
||||
mdev->cursor.pixels_1 = NULL;
|
||||
mdev->cursor.pixels_2 = NULL;
|
||||
dev_warn(&dev->pdev->dev, "Could not allocate space for cursors. Not doing hardware cursors.\n");
|
||||
cursor_done:
|
||||
if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) {
|
||||
mdev->cursor.pixels_1 = NULL;
|
||||
mdev->cursor.pixels_2 = NULL;
|
||||
dev_warn(&dev->pdev->dev,
|
||||
"Could not allocate space for cursors. Not doing hardware cursors.\n");
|
||||
} else {
|
||||
mdev->cursor.pixels_current = mdev->cursor.pixels_1;
|
||||
mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_modeset:
|
||||
drm_mode_config_cleanup(dev);
|
||||
mgag200_mm_fini(mdev);
|
||||
err_mm:
|
||||
dev->dev_private = NULL;
|
||||
|
||||
out:
|
||||
if (r)
|
||||
mgag200_driver_unload(dev);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -886,13 +886,15 @@ static enum drm_connector_status qxl_conn_detect(
|
|||
drm_connector_to_qxl_output(connector);
|
||||
struct drm_device *ddev = connector->dev;
|
||||
struct qxl_device *qdev = ddev->dev_private;
|
||||
int connected;
|
||||
bool connected = false;
|
||||
|
||||
/* The first monitor is always connected */
|
||||
connected = (output->index == 0) ||
|
||||
(qdev->client_monitors_config &&
|
||||
qdev->client_monitors_config->count > output->index &&
|
||||
qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
|
||||
if (!qdev->client_monitors_config) {
|
||||
if (output->index == 0)
|
||||
connected = true;
|
||||
} else
|
||||
connected = qdev->client_monitors_config->count > output->index &&
|
||||
qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
|
||||
|
||||
DRM_DEBUG("#%d connected: %d\n", output->index, connected);
|
||||
if (!connected)
|
||||
|
|
|
@ -1573,10 +1573,12 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
|
|||
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
/* turn off display hw */
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
/* unpin the front buffers and cursors */
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
|
@ -1734,9 +1736,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
|
|||
if (fbcon) {
|
||||
drm_helper_resume_force_mode(dev);
|
||||
/* turn on display hw */
|
||||
drm_modeset_lock_all(dev);
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
|
|
|
@ -2927,6 +2927,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
|
|||
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
|
||||
{ 0, 0, 0, 0 },
|
||||
};
|
||||
|
||||
|
|
|
@ -882,6 +882,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||
if (ret)
|
||||
return ret;
|
||||
man = &bdev->man[mem_type];
|
||||
if (!man->has_type || !man->use_type)
|
||||
continue;
|
||||
|
||||
type_ok = ttm_bo_mt_compatible(man, mem_type, place,
|
||||
&cur_flags);
|
||||
|
@ -889,6 +891,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||
if (!type_ok)
|
||||
continue;
|
||||
|
||||
type_found = true;
|
||||
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
|
||||
cur_flags);
|
||||
/*
|
||||
|
@ -901,12 +904,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||
if (mem_type == TTM_PL_SYSTEM)
|
||||
break;
|
||||
|
||||
if (man->has_type && man->use_type) {
|
||||
type_found = true;
|
||||
ret = (*man->func->get_node)(man, bo, place, mem);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
}
|
||||
ret = (*man->func->get_node)(man, bo, place, mem);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
if (mem->mm_node)
|
||||
break;
|
||||
}
|
||||
|
@ -917,9 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (!type_found)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < placement->num_busy_placement; ++i) {
|
||||
const struct ttm_place *place = &placement->busy_placement[i];
|
||||
|
||||
|
@ -927,11 +925,12 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||
if (ret)
|
||||
return ret;
|
||||
man = &bdev->man[mem_type];
|
||||
if (!man->has_type)
|
||||
if (!man->has_type || !man->use_type)
|
||||
continue;
|
||||
if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
|
||||
continue;
|
||||
|
||||
type_found = true;
|
||||
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
|
||||
cur_flags);
|
||||
/*
|
||||
|
@ -957,8 +956,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||
if (ret == -ERESTARTSYS)
|
||||
has_erestartsys = true;
|
||||
}
|
||||
ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
|
||||
return ret;
|
||||
|
||||
if (!type_found) {
|
||||
printk(KERN_ERR TTM_PFX "No compatible memory type found.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_mem_space);
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config DRM_VMWGFX
|
||||
tristate "DRM driver for VMware Virtual GPU"
|
||||
depends on DRM && PCI
|
||||
depends on DRM && PCI && X86
|
||||
select FB_DEFERRED_IO
|
||||
select FB_CFB_FILLRECT
|
||||
select FB_CFB_COPYAREA
|
||||
|
|
|
@ -311,7 +311,6 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
|
|||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct ttm_buffer_object *bo = val_buf->bo;
|
||||
struct vmw_fence_obj *fence;
|
||||
int ret;
|
||||
|
||||
if (list_empty(&res->mob_head))
|
||||
return 0;
|
||||
|
@ -328,7 +327,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
|
|||
if (likely(fence != NULL))
|
||||
vmw_fence_obj_unreference(&fence);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -752,12 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
|
||||
dev_priv->active_master = &dev_priv->fbdev_master;
|
||||
|
||||
|
||||
dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
|
||||
dev_priv->mmio_size);
|
||||
|
||||
dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
|
||||
dev_priv->mmio_size);
|
||||
dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start,
|
||||
dev_priv->mmio_size);
|
||||
|
||||
if (unlikely(dev_priv->mmio_virt == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -913,7 +909,6 @@ out_no_device:
|
|||
out_err4:
|
||||
iounmap(dev_priv->mmio_virt);
|
||||
out_err3:
|
||||
arch_phys_wc_del(dev_priv->mmio_mtrr);
|
||||
vmw_ttm_global_release(dev_priv);
|
||||
out_err0:
|
||||
for (i = vmw_res_context; i < vmw_res_max; ++i)
|
||||
|
@ -964,7 +959,6 @@ static int vmw_driver_unload(struct drm_device *dev)
|
|||
|
||||
ttm_object_device_release(&dev_priv->tdev);
|
||||
iounmap(dev_priv->mmio_virt);
|
||||
arch_phys_wc_del(dev_priv->mmio_mtrr);
|
||||
if (dev_priv->ctx.staged_bindings)
|
||||
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
|
||||
vmw_ttm_global_release(dev_priv);
|
||||
|
|
|
@ -376,7 +376,6 @@ struct vmw_private {
|
|||
uint32_t initial_width;
|
||||
uint32_t initial_height;
|
||||
u32 __iomem *mmio_virt;
|
||||
int mmio_mtrr;
|
||||
uint32_t capabilities;
|
||||
uint32_t max_gmr_ids;
|
||||
uint32_t max_gmr_pages;
|
||||
|
@ -631,7 +630,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
|
|||
uint32_t size,
|
||||
bool shareable,
|
||||
uint32_t *handle,
|
||||
struct vmw_dma_buffer **p_dma_buf);
|
||||
struct vmw_dma_buffer **p_dma_buf,
|
||||
struct ttm_base_object **p_base);
|
||||
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
|
||||
struct vmw_dma_buffer *dma_buf,
|
||||
uint32_t *handle);
|
||||
|
@ -645,7 +645,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
|
|||
uint32_t cur_validate_node);
|
||||
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
|
||||
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t id, struct vmw_dma_buffer **out);
|
||||
uint32_t id, struct vmw_dma_buffer **out,
|
||||
struct ttm_base_object **base);
|
||||
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
|
||||
|
|
|
@ -1236,7 +1236,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
|
|||
struct vmw_relocation *reloc;
|
||||
int ret;
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
|
||||
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
|
||||
NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not find or use MOB buffer.\n");
|
||||
ret = -EINVAL;
|
||||
|
@ -1296,7 +1297,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
|
|||
struct vmw_relocation *reloc;
|
||||
int ret;
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
|
||||
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
|
||||
NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not find or use GMR region.\n");
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -1685,7 +1685,6 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
|
|||
struct drm_crtc *crtc;
|
||||
u32 num_units = 0;
|
||||
u32 i, k;
|
||||
int ret;
|
||||
|
||||
dirty->dev_priv = dev_priv;
|
||||
|
||||
|
@ -1711,7 +1710,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
|
|||
if (!dirty->cmd) {
|
||||
DRM_ERROR("Couldn't reserve fifo space "
|
||||
"for dirty blits.\n");
|
||||
return ret;
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(dirty->cmd, 0, dirty->fifo_reserve_size);
|
||||
}
|
||||
|
|
|
@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
|
||||
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
|
|
|
@ -354,7 +354,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
|
|||
}
|
||||
|
||||
*out_surf = NULL;
|
||||
ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
|
||||
ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -481,7 +481,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
|
|||
uint32_t size,
|
||||
bool shareable,
|
||||
uint32_t *handle,
|
||||
struct vmw_dma_buffer **p_dma_buf)
|
||||
struct vmw_dma_buffer **p_dma_buf,
|
||||
struct ttm_base_object **p_base)
|
||||
{
|
||||
struct vmw_user_dma_buffer *user_bo;
|
||||
struct ttm_buffer_object *tmp;
|
||||
|
@ -515,6 +516,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
|
|||
}
|
||||
|
||||
*p_dma_buf = &user_bo->dma;
|
||||
if (p_base) {
|
||||
*p_base = &user_bo->prime.base;
|
||||
kref_get(&(*p_base)->refcount);
|
||||
}
|
||||
*handle = user_bo->prime.base.hash.key;
|
||||
|
||||
out_no_base_object:
|
||||
|
@ -631,6 +636,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
|
|||
struct vmw_dma_buffer *dma_buf;
|
||||
struct vmw_user_dma_buffer *user_bo;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct ttm_base_object *buffer_base;
|
||||
int ret;
|
||||
|
||||
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
|
||||
|
@ -643,7 +649,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
switch (arg->op) {
|
||||
case drm_vmw_synccpu_grab:
|
||||
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
|
||||
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
|
||||
&buffer_base);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
|
@ -651,6 +658,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
|
|||
dma);
|
||||
ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
|
||||
vmw_dmabuf_unreference(&dma_buf);
|
||||
ttm_base_object_unref(&buffer_base);
|
||||
if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
|
||||
ret != -EBUSY)) {
|
||||
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
|
||||
|
@ -692,7 +700,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
|
|||
return ret;
|
||||
|
||||
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
|
||||
req->size, false, &handle, &dma_buf);
|
||||
req->size, false, &handle, &dma_buf,
|
||||
NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_dmabuf;
|
||||
|
||||
|
@ -721,7 +730,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t handle, struct vmw_dma_buffer **out)
|
||||
uint32_t handle, struct vmw_dma_buffer **out,
|
||||
struct ttm_base_object **p_base)
|
||||
{
|
||||
struct vmw_user_dma_buffer *vmw_user_bo;
|
||||
struct ttm_base_object *base;
|
||||
|
@ -743,7 +753,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
|||
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
|
||||
prime.base);
|
||||
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
|
||||
ttm_base_object_unref(&base);
|
||||
if (p_base)
|
||||
*p_base = base;
|
||||
else
|
||||
ttm_base_object_unref(&base);
|
||||
*out = &vmw_user_bo->dma;
|
||||
|
||||
return 0;
|
||||
|
@ -1004,7 +1017,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
|
|||
|
||||
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
|
||||
args->size, false, &args->handle,
|
||||
&dma_buf);
|
||||
&dma_buf, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_dmabuf;
|
||||
|
||||
|
@ -1032,7 +1045,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
|
|||
struct vmw_dma_buffer *out_buf;
|
||||
int ret;
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
|
||||
ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
|
||||
if (ret != 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -855,7 +855,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
|
|||
|
||||
if (buffer_handle != SVGA3D_INVALID_ID) {
|
||||
ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
|
||||
&buffer);
|
||||
&buffer, NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not find buffer for shader "
|
||||
"creation.\n");
|
||||
|
|
|
@ -46,6 +46,7 @@ struct vmw_user_surface {
|
|||
struct vmw_surface srf;
|
||||
uint32_t size;
|
||||
struct drm_master *master;
|
||||
struct ttm_base_object *backup_base;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -656,6 +657,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
|
|||
struct vmw_resource *res = &user_srf->srf.res;
|
||||
|
||||
*p_base = NULL;
|
||||
ttm_base_object_unref(&user_srf->backup_base);
|
||||
vmw_resource_unreference(&res);
|
||||
}
|
||||
|
||||
|
@ -851,7 +853,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
res->backup_size,
|
||||
true,
|
||||
&backup_handle,
|
||||
&res->backup);
|
||||
&res->backup,
|
||||
&user_srf->backup_base);
|
||||
if (unlikely(ret != 0)) {
|
||||
vmw_resource_unreference(&res);
|
||||
goto out_unlock;
|
||||
|
@ -1321,7 +1324,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
if (req->buffer_handle != SVGA3D_INVALID_ID) {
|
||||
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
|
||||
&res->backup);
|
||||
&res->backup,
|
||||
&user_srf->backup_base);
|
||||
if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
|
||||
res->backup_size) {
|
||||
DRM_ERROR("Surface backup buffer is too small.\n");
|
||||
|
@ -1335,7 +1339,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
req->drm_surface_flags &
|
||||
drm_vmw_surface_flag_shareable,
|
||||
&backup_handle,
|
||||
&res->backup);
|
||||
&res->backup,
|
||||
&user_srf->backup_base);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
vmw_resource_unreference(&res);
|
||||
|
|
Loading…
Reference in New Issue