drm/armada: use a private mutex to protect priv->linear
Reusing the Big DRM Lock just leaks, and the few things left that dev->struct_mutex protected are very well contained - it's just the linear drm_mm manager. With this armada is completely struct_mutex free! v2: Convert things properly and also take the lock in armada_gem_free_object, and remove the stale comment (Russell). Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
4bd3fd443a
commit
0b8ebeacf5
|
@ -21,9 +21,9 @@ static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
|
|||
struct armada_private *priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
mutex_lock(&priv->linear_lock);
|
||||
ret = drm_mm_dump_table(m, &priv->linear);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&priv->linear_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -57,7 +57,8 @@ struct armada_private {
|
|||
DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
|
||||
struct drm_fb_helper *fbdev;
|
||||
struct armada_crtc *dcrtc[2];
|
||||
struct drm_mm linear;
|
||||
struct drm_mm linear; /* protected by linear_lock */
|
||||
struct mutex linear_lock;
|
||||
struct drm_property *csc_yuv_prop;
|
||||
struct drm_property *csc_rgb_prop;
|
||||
struct drm_property *colorkey_prop;
|
||||
|
|
|
@ -101,6 +101,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
|
|||
dev->mode_config.preferred_depth = 24;
|
||||
dev->mode_config.funcs = &armada_drm_mode_config_funcs;
|
||||
drm_mm_init(&priv->linear, mem->start, resource_size(mem));
|
||||
mutex_init(&priv->linear_lock);
|
||||
|
||||
ret = component_bind_all(dev->dev, dev);
|
||||
if (ret)
|
||||
|
|
|
@ -46,22 +46,26 @@ static size_t roundup_gem_size(size_t size)
|
|||
return roundup(size, PAGE_SIZE);
|
||||
}
|
||||
|
||||
/* dev->struct_mutex is held here */
|
||||
void armada_gem_free_object(struct drm_gem_object *obj)
|
||||
{
|
||||
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
|
||||
struct armada_private *priv = obj->dev->dev_private;
|
||||
|
||||
DRM_DEBUG_DRIVER("release obj %p\n", dobj);
|
||||
|
||||
drm_gem_free_mmap_offset(&dobj->obj);
|
||||
|
||||
might_lock(&priv->linear_lock);
|
||||
|
||||
if (dobj->page) {
|
||||
/* page backed memory */
|
||||
unsigned int order = get_order(dobj->obj.size);
|
||||
__free_pages(dobj->page, order);
|
||||
} else if (dobj->linear) {
|
||||
/* linear backed memory */
|
||||
mutex_lock(&priv->linear_lock);
|
||||
drm_mm_remove_node(dobj->linear);
|
||||
mutex_unlock(&priv->linear_lock);
|
||||
kfree(dobj->linear);
|
||||
if (dobj->addr)
|
||||
iounmap(dobj->addr);
|
||||
|
@ -144,10 +148,10 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
|
|||
if (!node)
|
||||
return -ENOSPC;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
mutex_lock(&priv->linear_lock);
|
||||
ret = drm_mm_insert_node(&priv->linear, node, size, align,
|
||||
DRM_MM_SEARCH_DEFAULT);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&priv->linear_lock);
|
||||
if (ret) {
|
||||
kfree(node);
|
||||
return ret;
|
||||
|
@ -158,9 +162,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
|
|||
/* Ensure that the memory we're returning is cleared. */
|
||||
ptr = ioremap_wc(obj->linear->start, size);
|
||||
if (!ptr) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
mutex_lock(&priv->linear_lock);
|
||||
drm_mm_remove_node(obj->linear);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&priv->linear_lock);
|
||||
kfree(obj->linear);
|
||||
obj->linear = NULL;
|
||||
return -ENOMEM;
|
||||
|
|
Loading…
Reference in New Issue