drm: mm always protect change to unused_nodes with unused_lock spinlock

unused_nodes modification needs to be protected by unused_lock spinlock.
Here is an example of an usage where there is no such protection without
this patch.

  Process 1: 1-drm_mm_pre_get(this function modify unused_nodes list)
             2-spin_lock(spinlock protecting mm struct)
             3-drm_mm_put_block(this function might modify unused_nodes
               list but doesn't protect modification with unused_lock)
             4-spin_unlock(spinlock protecting mm struct)
  Process2:  1-drm_mm_pre_get(this function modify unused_nodes list)
At this point Process1 & Process2 might both be doing modification to
unused_nodes list. This patch add unused_lock protection into
drm_mm_put_block to avoid such issue.

Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Jerome Glisse 2009-11-13 20:56:58 +01:00 committed by Dave Airlie
parent 0beb81ab45
commit a698cf34ea
1 changed files with 9 additions and 0 deletions

View File

@ -103,6 +103,11 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
return child;
}
/* drm_mm_pre_get() - pre allocate drm_mm_node structure
* drm_mm: memory manager struct we are pre-allocating for
*
* Returns 0 on success or -ENOMEM if allocation fails.
*/
int drm_mm_pre_get(struct drm_mm *mm)
{
struct drm_mm_node *node;
@ -253,12 +258,14 @@ void drm_mm_put_block(struct drm_mm_node *cur)
prev_node->size += next_node->size;
list_del(&next_node->ml_entry);
list_del(&next_node->fl_entry);
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&next_node->fl_entry,
&mm->unused_nodes);
++mm->num_unused;
} else
kfree(next_node);
spin_unlock(&mm->unused_lock);
} else {
next_node->size += cur->size;
next_node->start = cur->start;
@ -271,11 +278,13 @@ void drm_mm_put_block(struct drm_mm_node *cur)
list_add(&cur->fl_entry, &mm->fl_entry);
} else {
list_del(&cur->ml_entry);
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&cur->fl_entry, &mm->unused_nodes);
++mm->num_unused;
} else
kfree(cur);
spin_unlock(&mm->unused_lock);
}
}