drm/gem: completely close gem_open vs. gem_close races
The gem flink name holds a reference onto the object itself, and this self-reference would prevent an flink'ed object from every being freed. To break that loop we remove the flink name when the last userspace handle disappears, i.e. when obj->handle_count reaches 0. Now in gem_open we drop the dev->object_name_lock between the flink name lookup and actually adding the handle. This means a concurrent gem_close of the last handle could result in the flink name getting reaped right inbetween, i.e. Thread 1 Thread 2 gem_open gem_close flink -> obj lookup handle_count drops to 0 remove flink name create_handle handle_count++ If someone now flinks this object again, we'll get a new flink name. We can close this race by removing the lock dropping and making the entire lookup+handle_create sequence atomic. Unfortunately to still be able to share the handle_create logic this requires a handle_create_tail function which drops the lock - we can't hold the object_name_lock while calling into a driver's ->gem_open callback. Note that for flink fixing this race isn't really important, since racing gem_open against gem_close is clearly a userspace bug. And no matter how the race ends, we won't leak any references. But with dma-buf where the userspace dma-buf fd itself is refcounted this is a valid sequence and hence we should fix it. Therefore this patch here is just a warm-up exercise (and for consistency between flink buffer sharing and dma-buf buffer sharing with self-imports). Also note that this extension of the critical section in gem_open protected by dev->object_name_lock only works because it's now a mutex: A spinlock would conflict with the potential memory allocation in idr_preload(). This is exercises by igt/gem_flink_race/flink_name. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
cd4f013f3a
commit
20228c4478
|
@ -308,23 +308,26 @@ int drm_gem_dumb_destroy(struct drm_file *file,
|
||||||
EXPORT_SYMBOL(drm_gem_dumb_destroy);
|
EXPORT_SYMBOL(drm_gem_dumb_destroy);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a handle for this object. This adds a handle reference
|
* drm_gem_handle_create_tail - internal functions to create a handle
|
||||||
* to the object, which includes a regular reference count. Callers
|
*
|
||||||
* will likely want to dereference the object afterwards.
|
* This expects the dev->object_name_lock to be held already and will drop it
|
||||||
|
* before returning. Used to avoid races in establishing new handles when
|
||||||
|
* importing an object from either an flink name or a dma-buf.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
drm_gem_handle_create(struct drm_file *file_priv,
|
drm_gem_handle_create_tail(struct drm_file *file_priv,
|
||||||
struct drm_gem_object *obj,
|
struct drm_gem_object *obj,
|
||||||
u32 *handlep)
|
u32 *handlep)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = obj->dev;
|
struct drm_device *dev = obj->dev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
WARN_ON(!mutex_is_locked(&dev->object_name_lock));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get the user-visible handle using idr. Preload and perform
|
* Get the user-visible handle using idr. Preload and perform
|
||||||
* allocation under our spinlock.
|
* allocation under our spinlock.
|
||||||
*/
|
*/
|
||||||
mutex_lock(&dev->object_name_lock);
|
|
||||||
idr_preload(GFP_KERNEL);
|
idr_preload(GFP_KERNEL);
|
||||||
spin_lock(&file_priv->table_lock);
|
spin_lock(&file_priv->table_lock);
|
||||||
|
|
||||||
|
@ -351,6 +354,21 @@ drm_gem_handle_create(struct drm_file *file_priv,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a handle for this object. This adds a handle reference
|
||||||
|
* to the object, which includes a regular reference count. Callers
|
||||||
|
* will likely want to dereference the object afterwards.
|
||||||
|
*/
|
||||||
|
int
|
||||||
|
drm_gem_handle_create(struct drm_file *file_priv,
|
||||||
|
struct drm_gem_object *obj,
|
||||||
|
u32 *handlep)
|
||||||
|
{
|
||||||
|
mutex_lock(&obj->dev->object_name_lock);
|
||||||
|
|
||||||
|
return drm_gem_handle_create_tail(file_priv, obj, handlep);
|
||||||
|
}
|
||||||
EXPORT_SYMBOL(drm_gem_handle_create);
|
EXPORT_SYMBOL(drm_gem_handle_create);
|
||||||
|
|
||||||
|
|
||||||
|
@ -627,13 +645,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
|
||||||
|
|
||||||
mutex_lock(&dev->object_name_lock);
|
mutex_lock(&dev->object_name_lock);
|
||||||
obj = idr_find(&dev->object_name_idr, (int) args->name);
|
obj = idr_find(&dev->object_name_idr, (int) args->name);
|
||||||
if (obj)
|
if (obj) {
|
||||||
drm_gem_object_reference(obj);
|
drm_gem_object_reference(obj);
|
||||||
mutex_unlock(&dev->object_name_lock);
|
} else {
|
||||||
if (!obj)
|
mutex_unlock(&dev->object_name_lock);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
}
|
||||||
|
|
||||||
ret = drm_gem_handle_create(file_priv, obj, &handle);
|
/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
|
||||||
|
ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
|
||||||
drm_gem_object_unreference_unlocked(obj);
|
drm_gem_object_unreference_unlocked(obj);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -1575,6 +1575,9 @@ drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int drm_gem_handle_create_tail(struct drm_file *file_priv,
|
||||||
|
struct drm_gem_object *obj,
|
||||||
|
u32 *handlep);
|
||||||
int drm_gem_handle_create(struct drm_file *file_priv,
|
int drm_gem_handle_create(struct drm_file *file_priv,
|
||||||
struct drm_gem_object *obj,
|
struct drm_gem_object *obj,
|
||||||
u32 *handlep);
|
u32 *handlep);
|
||||||
|
|
Loading…
Reference in New Issue