drm: kselftest for drm_mm and bottom-up allocation
Check that if we request bottom-up allocation from drm_mm_insert_node() we receive the next available hole from the bottom. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20170202114434.3060-2-chris@chris-wilson.co.uk
This commit is contained in:
parent
4e64e5539d
commit
bb18dfcc64
|
@ -17,6 +17,7 @@ selftest(align32, igt_align32)
|
|||
selftest(align64, igt_align64)
|
||||
selftest(evict, igt_evict)
|
||||
selftest(evict_range, igt_evict_range)
|
||||
selftest(bottomup, igt_bottomup)
|
||||
selftest(topdown, igt_topdown)
|
||||
selftest(color, igt_color)
|
||||
selftest(color_evict, igt_color_evict)
|
||||
|
|
|
@ -1697,6 +1697,106 @@ err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int igt_bottomup(void *ignored)
|
||||
{
|
||||
const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
|
||||
DRM_RND_STATE(prng, random_seed);
|
||||
const unsigned int count = 8192;
|
||||
unsigned int size;
|
||||
unsigned long *bitmap;
|
||||
struct drm_mm mm;
|
||||
struct drm_mm_node *nodes, *node, *next;
|
||||
unsigned int *order, n, m, o = 0;
|
||||
int ret;
|
||||
|
||||
/* Like igt_topdown, but instead of searching for the last hole,
|
||||
* we search for the first.
|
||||
*/
|
||||
|
||||
ret = -ENOMEM;
|
||||
nodes = vzalloc(count * sizeof(*nodes));
|
||||
if (!nodes)
|
||||
goto err;
|
||||
|
||||
bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
|
||||
GFP_TEMPORARY);
|
||||
if (!bitmap)
|
||||
goto err_nodes;
|
||||
|
||||
order = drm_random_order(count, &prng);
|
||||
if (!order)
|
||||
goto err_bitmap;
|
||||
|
||||
ret = -EINVAL;
|
||||
for (size = 1; size <= 64; size <<= 1) {
|
||||
drm_mm_init(&mm, 0, size*count);
|
||||
for (n = 0; n < count; n++) {
|
||||
if (!expect_insert(&mm, &nodes[n],
|
||||
size, 0, n,
|
||||
bottomup)) {
|
||||
pr_err("bottomup insert failed, size %u step %d\n", size, n);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!assert_one_hole(&mm, size*(n + 1), size*count))
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!assert_continuous(&mm, size))
|
||||
goto out;
|
||||
|
||||
drm_random_reorder(order, count, &prng);
|
||||
for_each_prime_number_from(n, 1, min(count, max_prime)) {
|
||||
for (m = 0; m < n; m++) {
|
||||
node = &nodes[order[(o + m) % count]];
|
||||
drm_mm_remove_node(node);
|
||||
__set_bit(node_index(node), bitmap);
|
||||
}
|
||||
|
||||
for (m = 0; m < n; m++) {
|
||||
unsigned int first;
|
||||
|
||||
node = &nodes[order[(o + m) % count]];
|
||||
if (!expect_insert(&mm, node,
|
||||
size, 0, 0,
|
||||
bottomup)) {
|
||||
pr_err("insert failed, step %d/%d\n", m, n);
|
||||
goto out;
|
||||
}
|
||||
|
||||
first = find_first_bit(bitmap, count);
|
||||
if (node_index(node) != first) {
|
||||
pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n",
|
||||
m, n, first, node_index(node));
|
||||
goto out;
|
||||
}
|
||||
__clear_bit(first, bitmap);
|
||||
}
|
||||
|
||||
DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
|
||||
|
||||
o += n;
|
||||
}
|
||||
|
||||
drm_mm_for_each_node_safe(node, next, &mm)
|
||||
drm_mm_remove_node(node);
|
||||
DRM_MM_BUG_ON(!drm_mm_clean(&mm));
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
drm_mm_for_each_node_safe(node, next, &mm)
|
||||
drm_mm_remove_node(node);
|
||||
drm_mm_takedown(&mm);
|
||||
kfree(order);
|
||||
err_bitmap:
|
||||
kfree(bitmap);
|
||||
err_nodes:
|
||||
vfree(nodes);
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void separate_adjacent_colors(const struct drm_mm_node *node,
|
||||
unsigned long color,
|
||||
u64 *start,
|
||||
|
|
Loading…
Reference in New Issue