drm/msm: change gem->vmap() to get/put
Before we can add vmap shrinking, we really need to know which vmap'ings are currently being used. So switch to get/put interface. Stubbed put fxns for now. Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
parent
68209390f1
commit
18f23049f6
|
@ -407,7 +407,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
|
||||
adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo);
|
||||
if (IS_ERR(adreno_gpu->memptrs)) {
|
||||
dev_err(drm->dev, "could not vmap memptrs\n");
|
||||
return -ENOMEM;
|
||||
|
@ -426,8 +426,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||
void adreno_gpu_cleanup(struct adreno_gpu *gpu)
|
||||
{
|
||||
if (gpu->memptrs_bo) {
|
||||
if (gpu->memptrs)
|
||||
msm_gem_put_vaddr(gpu->memptrs_bo);
|
||||
|
||||
if (gpu->memptrs_iova)
|
||||
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
|
||||
|
||||
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
|
||||
}
|
||||
release_firmware(gpu->pm4);
|
||||
|
|
|
@ -1066,7 +1066,7 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
|
|||
}
|
||||
|
||||
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
|
||||
data = msm_gem_vaddr(msm_host->tx_gem_obj);
|
||||
data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
|
||||
if (IS_ERR(data)) {
|
||||
ret = PTR_ERR(data);
|
||||
pr_err("%s: get vaddr failed, %d\n", __func__, ret);
|
||||
|
@ -1094,6 +1094,9 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
|
|||
if (packet.size < len)
|
||||
memset(data + packet.size, 0xff, len - packet.size);
|
||||
|
||||
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
|
||||
msm_gem_put_vaddr(msm_host->tx_gem_obj);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
|
|
|
@ -200,8 +200,10 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
|
|||
struct dma_buf_attachment *attach, struct sg_table *sg);
|
||||
int msm_gem_prime_pin(struct drm_gem_object *obj);
|
||||
void msm_gem_prime_unpin(struct drm_gem_object *obj);
|
||||
void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
|
||||
void *msm_gem_vaddr(struct drm_gem_object *obj);
|
||||
void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
|
||||
void *msm_gem_get_vaddr(struct drm_gem_object *obj);
|
||||
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
|
||||
void msm_gem_put_vaddr(struct drm_gem_object *obj);
|
||||
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
|
||||
void msm_gem_purge(struct drm_gem_object *obj);
|
||||
int msm_gem_sync_object(struct drm_gem_object *obj,
|
||||
|
|
|
@ -158,7 +158,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
|||
|
||||
dev->mode_config.fb_base = paddr;
|
||||
|
||||
fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
|
||||
fbi->screen_base = msm_gem_get_vaddr_locked(fbdev->bo);
|
||||
if (IS_ERR(fbi->screen_base)) {
|
||||
ret = PTR_ERR(fbi->screen_base);
|
||||
goto fail_unlock;
|
||||
|
@ -251,6 +251,7 @@ void msm_fbdev_free(struct drm_device *dev)
|
|||
|
||||
/* this will free the backing object */
|
||||
if (fbdev->fb) {
|
||||
msm_gem_put_vaddr(fbdev->bo);
|
||||
drm_framebuffer_unregister_private(fbdev->fb);
|
||||
drm_framebuffer_remove(fbdev->fb);
|
||||
}
|
||||
|
|
|
@ -408,7 +408,7 @@ fail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
|
||||
void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
||||
|
@ -424,15 +424,26 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
|
|||
return msm_obj->vaddr;
|
||||
}
|
||||
|
||||
void *msm_gem_vaddr(struct drm_gem_object *obj)
|
||||
void *msm_gem_get_vaddr(struct drm_gem_object *obj)
|
||||
{
|
||||
void *ret;
|
||||
mutex_lock(&obj->dev->struct_mutex);
|
||||
ret = msm_gem_vaddr_locked(obj);
|
||||
ret = msm_gem_get_vaddr_locked(obj);
|
||||
mutex_unlock(&obj->dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
|
||||
{
|
||||
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
||||
/* no-op for now */
|
||||
}
|
||||
|
||||
void msm_gem_put_vaddr(struct drm_gem_object *obj)
|
||||
{
|
||||
/* no-op for now */
|
||||
}
|
||||
|
||||
/* Update madvise status, returns true if not purged, else
|
||||
* false or -errno.
|
||||
*/
|
||||
|
|
|
@ -33,12 +33,12 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
|||
|
||||
void *msm_gem_prime_vmap(struct drm_gem_object *obj)
|
||||
{
|
||||
return msm_gem_vaddr(obj);
|
||||
return msm_gem_get_vaddr(obj);
|
||||
}
|
||||
|
||||
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
|
||||
{
|
||||
/* TODO msm_gem_vunmap() */
|
||||
msm_gem_put_vaddr(obj);
|
||||
}
|
||||
|
||||
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
|
||||
|
|
|
@ -279,7 +279,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
|||
/* For now, just map the entire thing. Eventually we probably
|
||||
* to do it page-by-page, w/ kmap() if not vmap()d..
|
||||
*/
|
||||
ptr = msm_gem_vaddr_locked(&obj->base);
|
||||
ptr = msm_gem_get_vaddr_locked(&obj->base);
|
||||
|
||||
if (IS_ERR(ptr)) {
|
||||
ret = PTR_ERR(ptr);
|
||||
|
@ -332,6 +332,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
|||
last_offset = off;
|
||||
}
|
||||
|
||||
msm_gem_put_vaddr_locked(&obj->base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -310,7 +310,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
|
|||
uint32_t iova = submit->cmd[i].iova;
|
||||
uint32_t szd = submit->cmd[i].size; /* in dwords */
|
||||
struct msm_gem_object *obj = submit->bos[idx].obj;
|
||||
const char *buf = msm_gem_vaddr_locked(&obj->base);
|
||||
const char *buf = msm_gem_get_vaddr_locked(&obj->base);
|
||||
|
||||
if (IS_ERR(buf))
|
||||
continue;
|
||||
|
@ -335,6 +335,8 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
|
|||
(uint32_t[2]){ iova, szd }, 8);
|
||||
break;
|
||||
}
|
||||
|
||||
msm_gem_put_vaddr_locked(&obj->base);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -39,7 +39,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
ring->start = msm_gem_vaddr_locked(ring->bo);
|
||||
ring->start = msm_gem_get_vaddr_locked(ring->bo);
|
||||
if (IS_ERR(ring->start)) {
|
||||
ret = PTR_ERR(ring->start);
|
||||
goto fail;
|
||||
|
@ -59,7 +59,9 @@ fail:
|
|||
|
||||
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
|
||||
{
|
||||
if (ring->bo)
|
||||
if (ring->bo) {
|
||||
msm_gem_put_vaddr(ring->bo);
|
||||
drm_gem_object_unreference_unlocked(ring->bo);
|
||||
}
|
||||
kfree(ring);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue