drm/vmwgfx: Fix up user_dmabuf refcounting
If user space calls unreference on a user_dmabuf it will typically kill the struct ttm_base_object member which is responsible for the user-space visibility. However the dmabuf part may still be alive and refcounted. In some situations, like for shared guest-backed surface referencing/opening, the driver may try to reference the struct ttm_base_object member again, causing an immediate kernel warning and a later kernel NULL pointer dereference. Fix this by always maintaining a reference on the struct ttm_base_object member, in situations where it might subsequently be referenced. Cc: <stable@vger.kernel.org> Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Brian Paul <brianp@vmware.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com>
This commit is contained in:
parent
9fbcc7c007
commit
54c12bc374
|
@ -631,7 +631,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
|
|||
uint32_t size,
|
||||
bool shareable,
|
||||
uint32_t *handle,
|
||||
struct vmw_dma_buffer **p_dma_buf);
|
||||
struct vmw_dma_buffer **p_dma_buf,
|
||||
struct ttm_base_object **p_base);
|
||||
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
|
||||
struct vmw_dma_buffer *dma_buf,
|
||||
uint32_t *handle);
|
||||
|
@ -645,7 +646,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
|
|||
uint32_t cur_validate_node);
|
||||
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
|
||||
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t id, struct vmw_dma_buffer **out);
|
||||
uint32_t id, struct vmw_dma_buffer **out,
|
||||
struct ttm_base_object **base);
|
||||
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
|
||||
|
|
|
@ -1236,7 +1236,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
|
|||
struct vmw_relocation *reloc;
|
||||
int ret;
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
|
||||
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
|
||||
NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not find or use MOB buffer.\n");
|
||||
ret = -EINVAL;
|
||||
|
@ -1296,7 +1297,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
|
|||
struct vmw_relocation *reloc;
|
||||
int ret;
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
|
||||
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
|
||||
NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not find or use GMR region.\n");
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
|
||||
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
|
|
|
@ -354,7 +354,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
|
|||
}
|
||||
|
||||
*out_surf = NULL;
|
||||
ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
|
||||
ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -481,7 +481,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
|
|||
uint32_t size,
|
||||
bool shareable,
|
||||
uint32_t *handle,
|
||||
struct vmw_dma_buffer **p_dma_buf)
|
||||
struct vmw_dma_buffer **p_dma_buf,
|
||||
struct ttm_base_object **p_base)
|
||||
{
|
||||
struct vmw_user_dma_buffer *user_bo;
|
||||
struct ttm_buffer_object *tmp;
|
||||
|
@ -515,6 +516,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
|
|||
}
|
||||
|
||||
*p_dma_buf = &user_bo->dma;
|
||||
if (p_base) {
|
||||
*p_base = &user_bo->prime.base;
|
||||
kref_get(&(*p_base)->refcount);
|
||||
}
|
||||
*handle = user_bo->prime.base.hash.key;
|
||||
|
||||
out_no_base_object:
|
||||
|
@ -631,6 +636,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
|
|||
struct vmw_dma_buffer *dma_buf;
|
||||
struct vmw_user_dma_buffer *user_bo;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct ttm_base_object *buffer_base;
|
||||
int ret;
|
||||
|
||||
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
|
||||
|
@ -643,7 +649,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
switch (arg->op) {
|
||||
case drm_vmw_synccpu_grab:
|
||||
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
|
||||
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
|
||||
&buffer_base);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
|
@ -651,6 +658,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
|
|||
dma);
|
||||
ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
|
||||
vmw_dmabuf_unreference(&dma_buf);
|
||||
ttm_base_object_unref(&buffer_base);
|
||||
if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
|
||||
ret != -EBUSY)) {
|
||||
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
|
||||
|
@ -692,7 +700,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
|
|||
return ret;
|
||||
|
||||
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
|
||||
req->size, false, &handle, &dma_buf);
|
||||
req->size, false, &handle, &dma_buf,
|
||||
NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_dmabuf;
|
||||
|
||||
|
@ -721,7 +730,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t handle, struct vmw_dma_buffer **out)
|
||||
uint32_t handle, struct vmw_dma_buffer **out,
|
||||
struct ttm_base_object **p_base)
|
||||
{
|
||||
struct vmw_user_dma_buffer *vmw_user_bo;
|
||||
struct ttm_base_object *base;
|
||||
|
@ -743,7 +753,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
|||
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
|
||||
prime.base);
|
||||
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
|
||||
ttm_base_object_unref(&base);
|
||||
if (p_base)
|
||||
*p_base = base;
|
||||
else
|
||||
ttm_base_object_unref(&base);
|
||||
*out = &vmw_user_bo->dma;
|
||||
|
||||
return 0;
|
||||
|
@ -1004,7 +1017,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
|
|||
|
||||
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
|
||||
args->size, false, &args->handle,
|
||||
&dma_buf);
|
||||
&dma_buf, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_dmabuf;
|
||||
|
||||
|
@ -1032,7 +1045,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
|
|||
struct vmw_dma_buffer *out_buf;
|
||||
int ret;
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
|
||||
ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
|
||||
if (ret != 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -855,7 +855,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
|
|||
|
||||
if (buffer_handle != SVGA3D_INVALID_ID) {
|
||||
ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
|
||||
&buffer);
|
||||
&buffer, NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not find buffer for shader "
|
||||
"creation.\n");
|
||||
|
|
|
@ -46,6 +46,7 @@ struct vmw_user_surface {
|
|||
struct vmw_surface srf;
|
||||
uint32_t size;
|
||||
struct drm_master *master;
|
||||
struct ttm_base_object *backup_base;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -656,6 +657,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
|
|||
struct vmw_resource *res = &user_srf->srf.res;
|
||||
|
||||
*p_base = NULL;
|
||||
ttm_base_object_unref(&user_srf->backup_base);
|
||||
vmw_resource_unreference(&res);
|
||||
}
|
||||
|
||||
|
@ -851,7 +853,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
res->backup_size,
|
||||
true,
|
||||
&backup_handle,
|
||||
&res->backup);
|
||||
&res->backup,
|
||||
&user_srf->backup_base);
|
||||
if (unlikely(ret != 0)) {
|
||||
vmw_resource_unreference(&res);
|
||||
goto out_unlock;
|
||||
|
@ -1321,7 +1324,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
if (req->buffer_handle != SVGA3D_INVALID_ID) {
|
||||
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
|
||||
&res->backup);
|
||||
&res->backup,
|
||||
&user_srf->backup_base);
|
||||
if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
|
||||
res->backup_size) {
|
||||
DRM_ERROR("Surface backup buffer is too small.\n");
|
||||
|
@ -1335,7 +1339,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
req->drm_surface_flags &
|
||||
drm_vmw_surface_flag_shareable,
|
||||
&backup_handle,
|
||||
&res->backup);
|
||||
&res->backup,
|
||||
&user_srf->backup_base);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
vmw_resource_unreference(&res);
|
||||
|
|
Loading…
Reference in New Issue