blob: 6d2a5cd211f3cf2a993230d9b625b5369c47d59e [file] [log] [blame]
Chris Wilson50f00332016-12-22 08:36:09 +00001/*
2 * Test cases for the drm_mm range manager
3 */
4
5#define pr_fmt(fmt) "drm_mm: " fmt
6
7#include <linux/module.h>
8#include <linux/prime_numbers.h>
9#include <linux/slab.h>
10#include <linux/random.h>
11#include <linux/vmalloc.h>
12
13#include <drm/drm_mm.h>
14
15#include "../lib/drm_random.h"
16
17#define TESTS "drm_mm_selftests.h"
18#include "drm_selftest.h"
19
20static unsigned int random_seed;
21static unsigned int max_iterations = 8192;
22static unsigned int max_prime = 128;
23
Chris Wilson78866922016-12-22 08:36:13 +000024enum {
25 DEFAULT,
26 TOPDOWN,
27 BEST,
28};
29
30static const struct insert_mode {
31 const char *name;
32 unsigned int search_flags;
33 unsigned int create_flags;
34} insert_modes[] = {
35 [DEFAULT] = { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
36 [TOPDOWN] = { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
37 [BEST] = { "best", DRM_MM_SEARCH_BEST, DRM_MM_CREATE_DEFAULT },
38 {}
Chris Wilson560b3282016-12-22 08:36:17 +000039}, evict_modes[] = {
40 { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
41 { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
42 {}
Chris Wilson78866922016-12-22 08:36:13 +000043};
44
Chris Wilson50f00332016-12-22 08:36:09 +000045static int igt_sanitycheck(void *ignored)
46{
47 pr_info("%s - ok!\n", __func__);
48 return 0;
49}
50
Chris Wilson393b50f2016-12-22 08:36:10 +000051static bool assert_no_holes(const struct drm_mm *mm)
52{
53 struct drm_mm_node *hole;
54 u64 hole_start, hole_end;
55 unsigned long count;
56
57 count = 0;
58 drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
59 count++;
60 if (count) {
61 pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
62 return false;
63 }
64
65 drm_mm_for_each_node(hole, mm) {
Chris Wilson3f85fb32016-12-22 08:36:37 +000066 if (drm_mm_hole_follows(hole)) {
Chris Wilson393b50f2016-12-22 08:36:10 +000067 pr_err("Hole follows node, expected none!\n");
68 return false;
69 }
70 }
71
72 return true;
73}
74
75static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
76{
77 struct drm_mm_node *hole;
78 u64 hole_start, hole_end;
79 unsigned long count;
80 bool ok = true;
81
82 if (end <= start)
83 return true;
84
85 count = 0;
86 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
87 if (start != hole_start || end != hole_end) {
88 if (ok)
89 pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
90 hole_start, hole_end,
91 start, end);
92 ok = false;
93 }
94 count++;
95 }
96 if (count != 1) {
97 pr_err("Expected to find one hole, found %lu instead\n", count);
98 ok = false;
99 }
100
101 return ok;
102}
103
Chris Wilson900537d2016-12-22 08:36:12 +0000104static bool assert_continuous(const struct drm_mm *mm, u64 size)
105{
106 struct drm_mm_node *node, *check, *found;
107 unsigned long n;
108 u64 addr;
109
110 if (!assert_no_holes(mm))
111 return false;
112
113 n = 0;
114 addr = 0;
115 drm_mm_for_each_node(node, mm) {
116 if (node->start != addr) {
117 pr_err("node[%ld] list out of order, expected %llx found %llx\n",
118 n, addr, node->start);
119 return false;
120 }
121
122 if (node->size != size) {
123 pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
124 n, size, node->size);
125 return false;
126 }
127
Chris Wilson3f85fb32016-12-22 08:36:37 +0000128 if (drm_mm_hole_follows(node)) {
Chris Wilson900537d2016-12-22 08:36:12 +0000129 pr_err("node[%ld] is followed by a hole!\n", n);
130 return false;
131 }
132
133 found = NULL;
134 drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
135 if (node != check) {
136 pr_err("lookup return wrong node, expected start %llx, found %llx\n",
137 node->start, check->start);
138 return false;
139 }
140 found = check;
141 }
142 if (!found) {
143 pr_err("lookup failed for node %llx + %llx\n",
144 addr, size);
145 return false;
146 }
147
148 addr += size;
149 n++;
150 }
151
152 return true;
153}
154
Chris Wilson78866922016-12-22 08:36:13 +0000155static u64 misalignment(struct drm_mm_node *node, u64 alignment)
156{
157 u64 rem;
158
159 if (!alignment)
160 return 0;
161
162 div64_u64_rem(node->start, alignment, &rem);
163 return rem;
164}
165
166static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
167 u64 size, u64 alignment, unsigned long color)
168{
169 bool ok = true;
170
171 if (!drm_mm_node_allocated(node) || node->mm != mm) {
172 pr_err("node not allocated\n");
173 ok = false;
174 }
175
176 if (node->size != size) {
177 pr_err("node has wrong size, found %llu, expected %llu\n",
178 node->size, size);
179 ok = false;
180 }
181
182 if (misalignment(node, alignment)) {
183 pr_err("node is misalinged, start %llx rem %llu, expected alignment %llu\n",
184 node->start, misalignment(node, alignment), alignment);
185 ok = false;
186 }
187
188 if (node->color != color) {
189 pr_err("node has wrong color, found %lu, expected %lu\n",
190 node->color, color);
191 ok = false;
192 }
193
194 return ok;
195}
196
Daniel Vetterb5c37142016-12-29 12:09:24 +0100197#define show_mm(mm) do { \
198 struct drm_printer __p = drm_debug_printer(__func__); \
199 drm_mm_print((mm), &__p); } while (0)
200
Chris Wilson393b50f2016-12-22 08:36:10 +0000201static int igt_init(void *ignored)
202{
203 const unsigned int size = 4096;
204 struct drm_mm mm;
205 struct drm_mm_node tmp;
206 int ret = -EINVAL;
207
208 /* Start with some simple checks on initialising the struct drm_mm */
209 memset(&mm, 0, sizeof(mm));
210 if (drm_mm_initialized(&mm)) {
211 pr_err("zeroed mm claims to be initialized\n");
212 return ret;
213 }
214
215 memset(&mm, 0xff, sizeof(mm));
216 drm_mm_init(&mm, 0, size);
217 if (!drm_mm_initialized(&mm)) {
218 pr_err("mm claims not to be initialized\n");
219 goto out;
220 }
221
222 if (!drm_mm_clean(&mm)) {
223 pr_err("mm not empty on creation\n");
224 goto out;
225 }
226
227 /* After creation, it should all be one massive hole */
228 if (!assert_one_hole(&mm, 0, size)) {
229 ret = -EINVAL;
230 goto out;
231 }
232
233 memset(&tmp, 0, sizeof(tmp));
234 tmp.start = 0;
235 tmp.size = size;
236 ret = drm_mm_reserve_node(&mm, &tmp);
237 if (ret) {
238 pr_err("failed to reserve whole drm_mm\n");
239 goto out;
240 }
241
242 /* After filling the range entirely, there should be no holes */
243 if (!assert_no_holes(&mm)) {
244 ret = -EINVAL;
245 goto out;
246 }
247
248 /* And then after emptying it again, the massive hole should be back */
249 drm_mm_remove_node(&tmp);
250 if (!assert_one_hole(&mm, 0, size)) {
251 ret = -EINVAL;
252 goto out;
253 }
254
255out:
256 if (ret)
Daniel Vetterb5c37142016-12-29 12:09:24 +0100257 show_mm(&mm);
Chris Wilson393b50f2016-12-22 08:36:10 +0000258 drm_mm_takedown(&mm);
259 return ret;
260}
261
Chris Wilson06df8ac2016-12-22 08:36:11 +0000262static int igt_debug(void *ignored)
263{
264 struct drm_mm mm;
265 struct drm_mm_node nodes[2];
266 int ret;
267
268 /* Create a small drm_mm with a couple of nodes and a few holes, and
269 * check that the debug iterator doesn't explode over a trivial drm_mm.
270 */
271
272 drm_mm_init(&mm, 0, 4096);
273
274 memset(nodes, 0, sizeof(nodes));
275 nodes[0].start = 512;
276 nodes[0].size = 1024;
277 ret = drm_mm_reserve_node(&mm, &nodes[0]);
278 if (ret) {
279 pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
280 nodes[0].start, nodes[0].size);
281 return ret;
282 }
283
284 nodes[1].size = 1024;
285 nodes[1].start = 4096 - 512 - nodes[1].size;
286 ret = drm_mm_reserve_node(&mm, &nodes[1]);
287 if (ret) {
288 pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
289 nodes[1].start, nodes[1].size);
290 return ret;
291 }
292
Daniel Vetterb5c37142016-12-29 12:09:24 +0100293 show_mm(&mm);
Chris Wilson06df8ac2016-12-22 08:36:11 +0000294 return 0;
295}
296
Chris Wilson900537d2016-12-22 08:36:12 +0000297static struct drm_mm_node *set_node(struct drm_mm_node *node,
298 u64 start, u64 size)
299{
300 node->start = start;
301 node->size = size;
302 return node;
303}
304
305static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
306{
307 int err;
308
309 err = drm_mm_reserve_node(mm, node);
310 if (likely(err == -ENOSPC))
311 return true;
312
313 if (!err) {
314 pr_err("impossible reserve succeeded, node %llu + %llu\n",
315 node->start, node->size);
316 drm_mm_remove_node(node);
317 } else {
318 pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
319 err, -ENOSPC, node->start, node->size);
320 }
321 return false;
322}
323
324static bool check_reserve_boundaries(struct drm_mm *mm,
325 unsigned int count,
326 u64 size)
327{
328 const struct boundary {
329 u64 start, size;
330 const char *name;
331 } boundaries[] = {
332#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
333 B(0, 0),
334 B(-size, 0),
335 B(size, 0),
336 B(size * count, 0),
337 B(-size, size),
338 B(-size, -size),
339 B(-size, 2*size),
340 B(0, -size),
341 B(size, -size),
342 B(count*size, size),
343 B(count*size, -size),
344 B(count*size, count*size),
345 B(count*size, -count*size),
346 B(count*size, -(count+1)*size),
347 B((count+1)*size, size),
348 B((count+1)*size, -size),
349 B((count+1)*size, -2*size),
350#undef B
351 };
352 struct drm_mm_node tmp = {};
353 int n;
354
355 for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
356 if (!expect_reserve_fail(mm,
357 set_node(&tmp,
358 boundaries[n].start,
359 boundaries[n].size))) {
360 pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
361 n, boundaries[n].name, count, size);
362 return false;
363 }
364 }
365
366 return true;
367}
368
369static int __igt_reserve(unsigned int count, u64 size)
370{
371 DRM_RND_STATE(prng, random_seed);
372 struct drm_mm mm;
373 struct drm_mm_node tmp, *nodes, *node, *next;
374 unsigned int *order, n, m, o = 0;
375 int ret, err;
376
377 /* For exercising drm_mm_reserve_node(), we want to check that
378 * reservations outside of the drm_mm range are rejected, and to
379 * overlapping and otherwise already occupied ranges. Afterwards,
380 * the tree and nodes should be intact.
381 */
382
383 DRM_MM_BUG_ON(!count);
384 DRM_MM_BUG_ON(!size);
385
386 ret = -ENOMEM;
387 order = drm_random_order(count, &prng);
388 if (!order)
389 goto err;
390
391 nodes = vzalloc(sizeof(*nodes) * count);
392 if (!nodes)
393 goto err_order;
394
395 ret = -EINVAL;
396 drm_mm_init(&mm, 0, count * size);
397
398 if (!check_reserve_boundaries(&mm, count, size))
399 goto out;
400
401 for (n = 0; n < count; n++) {
402 nodes[n].start = order[n] * size;
403 nodes[n].size = size;
404
405 err = drm_mm_reserve_node(&mm, &nodes[n]);
406 if (err) {
407 pr_err("reserve failed, step %d, start %llu\n",
408 n, nodes[n].start);
409 ret = err;
410 goto out;
411 }
412
413 if (!drm_mm_node_allocated(&nodes[n])) {
414 pr_err("reserved node not allocated! step %d, start %llu\n",
415 n, nodes[n].start);
416 goto out;
417 }
418
419 if (!expect_reserve_fail(&mm, &nodes[n]))
420 goto out;
421 }
422
423 /* After random insertion the nodes should be in order */
424 if (!assert_continuous(&mm, size))
425 goto out;
426
427 /* Repeated use should then fail */
428 drm_random_reorder(order, count, &prng);
429 for (n = 0; n < count; n++) {
430 if (!expect_reserve_fail(&mm,
431 set_node(&tmp, order[n] * size, 1)))
432 goto out;
433
434 /* Remove and reinsert should work */
435 drm_mm_remove_node(&nodes[order[n]]);
436 err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
437 if (err) {
438 pr_err("reserve failed, step %d, start %llu\n",
439 n, nodes[n].start);
440 ret = err;
441 goto out;
442 }
443 }
444
445 if (!assert_continuous(&mm, size))
446 goto out;
447
448 /* Overlapping use should then fail */
449 for (n = 0; n < count; n++) {
450 if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
451 goto out;
452 }
453 for (n = 0; n < count; n++) {
454 if (!expect_reserve_fail(&mm,
455 set_node(&tmp,
456 size * n,
457 size * (count - n))))
458 goto out;
459 }
460
461 /* Remove several, reinsert, check full */
462 for_each_prime_number(n, min(max_prime, count)) {
463 for (m = 0; m < n; m++) {
464 node = &nodes[order[(o + m) % count]];
465 drm_mm_remove_node(node);
466 }
467
468 for (m = 0; m < n; m++) {
469 node = &nodes[order[(o + m) % count]];
470 err = drm_mm_reserve_node(&mm, node);
471 if (err) {
472 pr_err("reserve failed, step %d/%d, start %llu\n",
473 m, n, node->start);
474 ret = err;
475 goto out;
476 }
477 }
478
479 o += n;
480
481 if (!assert_continuous(&mm, size))
482 goto out;
483 }
484
485 ret = 0;
486out:
487 drm_mm_for_each_node_safe(node, next, &mm)
488 drm_mm_remove_node(node);
489 drm_mm_takedown(&mm);
490 vfree(nodes);
491err_order:
492 kfree(order);
493err:
494 return ret;
495}
496
497static int igt_reserve(void *ignored)
498{
499 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
500 int n, ret;
501
502 for_each_prime_number_from(n, 1, 54) {
503 u64 size = BIT_ULL(n);
504
505 ret = __igt_reserve(count, size - 1);
506 if (ret)
507 return ret;
508
509 ret = __igt_reserve(count, size);
510 if (ret)
511 return ret;
512
513 ret = __igt_reserve(count, size + 1);
514 if (ret)
515 return ret;
516 }
517
518 return 0;
519}
520
Chris Wilson78866922016-12-22 08:36:13 +0000521static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
522 u64 size, u64 alignment, unsigned long color,
523 const struct insert_mode *mode)
524{
525 int err;
526
527 err = drm_mm_insert_node_generic(mm, node,
528 size, alignment, color,
529 mode->search_flags,
530 mode->create_flags);
531 if (err) {
532 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
533 size, alignment, color, mode->name, err);
534 return false;
535 }
536
537 if (!assert_node(node, mm, size, alignment, color)) {
538 drm_mm_remove_node(node);
539 return false;
540 }
541
542 return true;
543}
544
545static bool expect_insert_fail(struct drm_mm *mm, u64 size)
546{
547 struct drm_mm_node tmp = {};
548 int err;
549
550 err = drm_mm_insert_node(mm, &tmp, size, 0, DRM_MM_SEARCH_DEFAULT);
551 if (likely(err == -ENOSPC))
552 return true;
553
554 if (!err) {
555 pr_err("impossible insert succeeded, node %llu + %llu\n",
556 tmp.start, tmp.size);
557 drm_mm_remove_node(&tmp);
558 } else {
559 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
560 err, -ENOSPC, size);
561 }
562 return false;
563}
564
Chris Wilson2bd966d2016-12-22 08:36:14 +0000565static int __igt_insert(unsigned int count, u64 size, bool replace)
Chris Wilson78866922016-12-22 08:36:13 +0000566{
567 DRM_RND_STATE(prng, random_seed);
568 const struct insert_mode *mode;
569 struct drm_mm mm;
570 struct drm_mm_node *nodes, *node, *next;
571 unsigned int *order, n, m, o = 0;
572 int ret;
573
574 /* Fill a range with lots of nodes, check it doesn't fail too early */
575
576 DRM_MM_BUG_ON(!count);
577 DRM_MM_BUG_ON(!size);
578
579 ret = -ENOMEM;
Chris Wilson2bd966d2016-12-22 08:36:14 +0000580 nodes = vmalloc(count * sizeof(*nodes));
Chris Wilson78866922016-12-22 08:36:13 +0000581 if (!nodes)
582 goto err;
583
584 order = drm_random_order(count, &prng);
585 if (!order)
586 goto err_nodes;
587
588 ret = -EINVAL;
589 drm_mm_init(&mm, 0, count * size);
590
591 for (mode = insert_modes; mode->name; mode++) {
592 for (n = 0; n < count; n++) {
Chris Wilson2bd966d2016-12-22 08:36:14 +0000593 struct drm_mm_node tmp;
594
595 node = replace ? &tmp : &nodes[n];
596 memset(node, 0, sizeof(*node));
597 if (!expect_insert(&mm, node, size, 0, n, mode)) {
Chris Wilson78866922016-12-22 08:36:13 +0000598 pr_err("%s insert failed, size %llu step %d\n",
599 mode->name, size, n);
600 goto out;
601 }
Chris Wilson2bd966d2016-12-22 08:36:14 +0000602
603 if (replace) {
604 drm_mm_replace_node(&tmp, &nodes[n]);
605 if (drm_mm_node_allocated(&tmp)) {
606 pr_err("replaced old-node still allocated! step %d\n",
607 n);
608 goto out;
609 }
610
611 if (!assert_node(&nodes[n], &mm, size, 0, n)) {
612 pr_err("replaced node did not inherit parameters, size %llu step %d\n",
613 size, n);
614 goto out;
615 }
616
617 if (tmp.start != nodes[n].start) {
618 pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
619 tmp.start, size,
620 nodes[n].start, nodes[n].size);
621 goto out;
622 }
623 }
Chris Wilson78866922016-12-22 08:36:13 +0000624 }
625
626 /* After random insertion the nodes should be in order */
627 if (!assert_continuous(&mm, size))
628 goto out;
629
630 /* Repeated use should then fail */
631 if (!expect_insert_fail(&mm, size))
632 goto out;
633
634 /* Remove one and reinsert, as the only hole it should refill itself */
635 for (n = 0; n < count; n++) {
636 u64 addr = nodes[n].start;
637
638 drm_mm_remove_node(&nodes[n]);
639 if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
640 pr_err("%s reinsert failed, size %llu step %d\n",
641 mode->name, size, n);
642 goto out;
643 }
644
645 if (nodes[n].start != addr) {
646 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
647 mode->name, n, addr, nodes[n].start);
648 goto out;
649 }
650
651 if (!assert_continuous(&mm, size))
652 goto out;
653 }
654
655 /* Remove several, reinsert, check full */
656 for_each_prime_number(n, min(max_prime, count)) {
657 for (m = 0; m < n; m++) {
658 node = &nodes[order[(o + m) % count]];
659 drm_mm_remove_node(node);
660 }
661
662 for (m = 0; m < n; m++) {
663 node = &nodes[order[(o + m) % count]];
664 if (!expect_insert(&mm, node, size, 0, n, mode)) {
665 pr_err("%s multiple reinsert failed, size %llu step %d\n",
666 mode->name, size, n);
667 goto out;
668 }
669 }
670
671 o += n;
672
673 if (!assert_continuous(&mm, size))
674 goto out;
675
676 if (!expect_insert_fail(&mm, size))
677 goto out;
678 }
679
680 drm_mm_for_each_node_safe(node, next, &mm)
681 drm_mm_remove_node(node);
682 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
683 }
684
685 ret = 0;
686out:
687 drm_mm_for_each_node_safe(node, next, &mm)
688 drm_mm_remove_node(node);
689 drm_mm_takedown(&mm);
690 kfree(order);
691err_nodes:
692 vfree(nodes);
693err:
694 return ret;
695}
696
697static int igt_insert(void *ignored)
698{
699 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
700 unsigned int n;
701 int ret;
702
703 for_each_prime_number_from(n, 1, 54) {
704 u64 size = BIT_ULL(n);
705
Chris Wilson2bd966d2016-12-22 08:36:14 +0000706 ret = __igt_insert(count, size - 1, false);
Chris Wilson78866922016-12-22 08:36:13 +0000707 if (ret)
708 return ret;
709
Chris Wilson2bd966d2016-12-22 08:36:14 +0000710 ret = __igt_insert(count, size, false);
Chris Wilson78866922016-12-22 08:36:13 +0000711 if (ret)
712 return ret;
713
Chris Wilson2bd966d2016-12-22 08:36:14 +0000714 ret = __igt_insert(count, size + 1, false);
715 }
716
717 return 0;
718}
719
720static int igt_replace(void *ignored)
721{
722 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
723 unsigned int n;
724 int ret;
725
726 /* Reuse igt_insert to exercise replacement by inserting a dummy node,
727 * then replacing it with the intended node. We want to check that
728 * the tree is intact and all the information we need is carried
729 * across to the target node.
730 */
731
732 for_each_prime_number_from(n, 1, 54) {
733 u64 size = BIT_ULL(n);
734
735 ret = __igt_insert(count, size - 1, true);
Chris Wilson78866922016-12-22 08:36:13 +0000736 if (ret)
737 return ret;
Chris Wilson2bd966d2016-12-22 08:36:14 +0000738
739 ret = __igt_insert(count, size, true);
740 if (ret)
741 return ret;
742
743 ret = __igt_insert(count, size + 1, true);
Chris Wilson78866922016-12-22 08:36:13 +0000744 }
745
746 return 0;
747}
748
Chris Wilson2fba0de2016-12-22 08:36:15 +0000749static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
750 u64 size, u64 alignment, unsigned long color,
751 u64 range_start, u64 range_end,
752 const struct insert_mode *mode)
753{
754 int err;
755
756 err = drm_mm_insert_node_in_range_generic(mm, node,
757 size, alignment, color,
758 range_start, range_end,
759 mode->search_flags,
760 mode->create_flags);
761 if (err) {
762 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
763 size, alignment, color, mode->name,
764 range_start, range_end, err);
765 return false;
766 }
767
768 if (!assert_node(node, mm, size, alignment, color)) {
769 drm_mm_remove_node(node);
770 return false;
771 }
772
773 return true;
774}
775
776static bool expect_insert_in_range_fail(struct drm_mm *mm,
777 u64 size,
778 u64 range_start,
779 u64 range_end)
780{
781 struct drm_mm_node tmp = {};
782 int err;
783
784 err = drm_mm_insert_node_in_range_generic(mm, &tmp,
785 size, 0, 0,
786 range_start, range_end,
787 DRM_MM_SEARCH_DEFAULT,
788 DRM_MM_CREATE_DEFAULT);
789 if (likely(err == -ENOSPC))
790 return true;
791
792 if (!err) {
793 pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
794 tmp.start, tmp.size, range_start, range_end);
795 drm_mm_remove_node(&tmp);
796 } else {
797 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
798 err, -ENOSPC, size, range_start, range_end);
799 }
800
801 return false;
802}
803
804static bool assert_contiguous_in_range(struct drm_mm *mm,
805 u64 size,
806 u64 start,
807 u64 end)
808{
809 struct drm_mm_node *node;
810 unsigned int n;
811
812 if (!expect_insert_in_range_fail(mm, size, start, end))
813 return false;
814
815 n = div64_u64(start + size - 1, size);
816 drm_mm_for_each_node(node, mm) {
817 if (node->start < start || node->start + node->size > end) {
818 pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
819 n, node->start, node->start + node->size, start, end);
820 return false;
821 }
822
823 if (node->start != n * size) {
824 pr_err("node %d out of order, expected start %llx, found %llx\n",
825 n, n * size, node->start);
826 return false;
827 }
828
829 if (node->size != size) {
830 pr_err("node %d has wrong size, expected size %llx, found %llx\n",
831 n, size, node->size);
832 return false;
833 }
834
Chris Wilson3f85fb32016-12-22 08:36:37 +0000835 if (drm_mm_hole_follows(node) &&
836 drm_mm_hole_node_end(node) < end) {
Chris Wilson2fba0de2016-12-22 08:36:15 +0000837 pr_err("node %d is followed by a hole!\n", n);
838 return false;
839 }
840
841 n++;
842 }
843
844 drm_mm_for_each_node_in_range(node, mm, 0, start) {
845 if (node) {
846 pr_err("node before start: node=%llx+%llu, start=%llx\n",
847 node->start, node->size, start);
848 return false;
849 }
850 }
851
852 drm_mm_for_each_node_in_range(node, mm, end, U64_MAX) {
853 if (node) {
854 pr_err("node after end: node=%llx+%llu, end=%llx\n",
855 node->start, node->size, end);
856 return false;
857 }
858 }
859
860 return true;
861}
862
863static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
864{
865 const struct insert_mode *mode;
866 struct drm_mm mm;
867 struct drm_mm_node *nodes, *node, *next;
868 unsigned int n, start_n, end_n;
869 int ret;
870
871 DRM_MM_BUG_ON(!count);
872 DRM_MM_BUG_ON(!size);
873 DRM_MM_BUG_ON(end <= start);
874
875 /* Very similar to __igt_insert(), but now instead of populating the
876 * full range of the drm_mm, we try to fill a small portion of it.
877 */
878
879 ret = -ENOMEM;
880 nodes = vzalloc(count * sizeof(*nodes));
881 if (!nodes)
882 goto err;
883
884 ret = -EINVAL;
885 drm_mm_init(&mm, 0, count * size);
886
887 start_n = div64_u64(start + size - 1, size);
888 end_n = div64_u64(end - size, size);
889
890 for (mode = insert_modes; mode->name; mode++) {
891 for (n = start_n; n <= end_n; n++) {
892 if (!expect_insert_in_range(&mm, &nodes[n],
893 size, size, n,
894 start, end, mode)) {
895 pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
896 mode->name, size, n,
897 start_n, end_n,
898 start, end);
899 goto out;
900 }
901 }
902
903 if (!assert_contiguous_in_range(&mm, size, start, end)) {
904 pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
905 mode->name, start, end, size);
906 goto out;
907 }
908
909 /* Remove one and reinsert, it should refill itself */
910 for (n = start_n; n <= end_n; n++) {
911 u64 addr = nodes[n].start;
912
913 drm_mm_remove_node(&nodes[n]);
914 if (!expect_insert_in_range(&mm, &nodes[n],
915 size, size, n,
916 start, end, mode)) {
917 pr_err("%s reinsert failed, step %d\n", mode->name, n);
918 goto out;
919 }
920
921 if (nodes[n].start != addr) {
922 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
923 mode->name, n, addr, nodes[n].start);
924 goto out;
925 }
926 }
927
928 if (!assert_contiguous_in_range(&mm, size, start, end)) {
929 pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
930 mode->name, start, end, size);
931 goto out;
932 }
933
934 drm_mm_for_each_node_safe(node, next, &mm)
935 drm_mm_remove_node(node);
936 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
937 }
938
939 ret = 0;
940out:
941 drm_mm_for_each_node_safe(node, next, &mm)
942 drm_mm_remove_node(node);
943 drm_mm_takedown(&mm);
944 vfree(nodes);
945err:
946 return ret;
947}
948
949static int insert_outside_range(void)
950{
951 struct drm_mm mm;
952 const unsigned int start = 1024;
953 const unsigned int end = 2048;
954 const unsigned int size = end - start;
955
956 drm_mm_init(&mm, start, size);
957
958 if (!expect_insert_in_range_fail(&mm, 1, 0, start))
959 return -EINVAL;
960
961 if (!expect_insert_in_range_fail(&mm, size,
962 start - size/2, start + (size+1)/2))
963 return -EINVAL;
964
965 if (!expect_insert_in_range_fail(&mm, size,
966 end - (size+1)/2, end + size/2))
967 return -EINVAL;
968
969 if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
970 return -EINVAL;
971
972 drm_mm_takedown(&mm);
973 return 0;
974}
975
976static int igt_insert_range(void *ignored)
977{
978 const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
979 unsigned int n;
980 int ret;
981
982 /* Check that requests outside the bounds of drm_mm are rejected. */
983 ret = insert_outside_range();
984 if (ret)
985 return ret;
986
987 for_each_prime_number_from(n, 1, 50) {
988 const u64 size = BIT_ULL(n);
989 const u64 max = count * size;
990
991 ret = __igt_insert_range(count, size, 0, max);
992 if (ret)
993 return ret;
994
995 ret = __igt_insert_range(count, size, 1, max);
996 if (ret)
997 return ret;
998
999 ret = __igt_insert_range(count, size, 0, max - 1);
1000 if (ret)
1001 return ret;
1002
1003 ret = __igt_insert_range(count, size, 0, max/2);
1004 if (ret)
1005 return ret;
1006
1007 ret = __igt_insert_range(count, size, max/2, max);
1008 if (ret)
1009 return ret;
1010
1011 ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
1012 if (ret)
1013 return ret;
1014 }
1015
1016 return 0;
1017}
1018
Chris Wilson9b26f2e2016-12-22 08:36:16 +00001019static int igt_align(void *ignored)
1020{
1021 const struct insert_mode *mode;
1022 const unsigned int max_count = min(8192u, max_prime);
1023 struct drm_mm mm;
1024 struct drm_mm_node *nodes, *node, *next;
1025 unsigned int prime;
1026 int ret = -EINVAL;
1027
1028 /* For each of the possible insertion modes, we pick a few
1029 * arbitrary alignments and check that the inserted node
1030 * meets our requirements.
1031 */
1032
1033 nodes = vzalloc(max_count * sizeof(*nodes));
1034 if (!nodes)
1035 goto err;
1036
1037 drm_mm_init(&mm, 1, U64_MAX - 2);
1038
1039 for (mode = insert_modes; mode->name; mode++) {
1040 unsigned int i = 0;
1041
1042 for_each_prime_number_from(prime, 1, max_count) {
1043 u64 size = next_prime_number(prime);
1044
1045 if (!expect_insert(&mm, &nodes[i],
1046 size, prime, i,
1047 mode)) {
1048 pr_err("%s insert failed with alignment=%d",
1049 mode->name, prime);
1050 goto out;
1051 }
1052
1053 i++;
1054 }
1055
1056 drm_mm_for_each_node_safe(node, next, &mm)
1057 drm_mm_remove_node(node);
1058 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1059 }
1060
1061 ret = 0;
1062out:
1063 drm_mm_for_each_node_safe(node, next, &mm)
1064 drm_mm_remove_node(node);
1065 drm_mm_takedown(&mm);
1066 vfree(nodes);
1067err:
1068 return ret;
1069}
1070
1071static int igt_align_pot(int max)
1072{
1073 struct drm_mm mm;
1074 struct drm_mm_node *node, *next;
1075 int bit;
1076 int ret = -EINVAL;
1077
1078 /* Check that we can align to the full u64 address space */
1079
1080 drm_mm_init(&mm, 1, U64_MAX - 2);
1081
1082 for (bit = max - 1; bit; bit--) {
1083 u64 align, size;
1084
1085 node = kzalloc(sizeof(*node), GFP_KERNEL);
1086 if (!node) {
1087 ret = -ENOMEM;
1088 goto out;
1089 }
1090
1091 align = BIT_ULL(bit);
1092 size = BIT_ULL(bit-1) + 1;
1093 if (!expect_insert(&mm, node,
1094 size, align, bit,
1095 &insert_modes[0])) {
1096 pr_err("insert failed with alignment=%llx [%d]",
1097 align, bit);
1098 goto out;
1099 }
1100 }
1101
1102 ret = 0;
1103out:
1104 drm_mm_for_each_node_safe(node, next, &mm) {
1105 drm_mm_remove_node(node);
1106 kfree(node);
1107 }
1108 drm_mm_takedown(&mm);
1109 return ret;
1110}
1111
1112static int igt_align32(void *ignored)
1113{
1114 return igt_align_pot(32);
1115}
1116
1117static int igt_align64(void *ignored)
1118{
1119 return igt_align_pot(64);
1120}
1121
Chris Wilson9a71e272016-12-22 08:36:29 +00001122static void show_scan(const struct drm_mm_scan *scan)
Chris Wilson560b3282016-12-22 08:36:17 +00001123{
Chris Wilson71733202016-12-22 08:36:24 +00001124 pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
Chris Wilson9a71e272016-12-22 08:36:29 +00001125 scan->hit_start, scan->hit_end,
1126 scan->size, scan->alignment, scan->color);
Chris Wilson560b3282016-12-22 08:36:17 +00001127}
1128
1129static void show_holes(const struct drm_mm *mm, int count)
1130{
1131 u64 hole_start, hole_end;
1132 struct drm_mm_node *hole;
1133
1134 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1135 struct drm_mm_node *next = list_next_entry(hole, node_list);
1136 const char *node1 = NULL, *node2 = NULL;
1137
1138 if (hole->allocated)
1139 node1 = kasprintf(GFP_KERNEL,
1140 "[%llx + %lld, color=%ld], ",
1141 hole->start, hole->size, hole->color);
1142
1143 if (next->allocated)
1144 node2 = kasprintf(GFP_KERNEL,
1145 ", [%llx + %lld, color=%ld]",
1146 next->start, next->size, next->color);
1147
1148 pr_info("%sHole [%llx - %llx, size %lld]%s\n",
1149 node1,
1150 hole_start, hole_end, hole_end - hole_start,
1151 node2);
1152
1153 kfree(node2);
1154 kfree(node1);
1155
1156 if (!--count)
1157 break;
1158 }
1159}
1160
1161struct evict_node {
1162 struct drm_mm_node node;
1163 struct list_head link;
1164};
1165
Chris Wilson9a71e272016-12-22 08:36:29 +00001166static bool evict_nodes(struct drm_mm_scan *scan,
Chris Wilson560b3282016-12-22 08:36:17 +00001167 struct evict_node *nodes,
1168 unsigned int *order,
1169 unsigned int count,
Chris Wilson3fa489d2016-12-22 08:36:36 +00001170 bool use_color,
Chris Wilson560b3282016-12-22 08:36:17 +00001171 struct list_head *evict_list)
1172{
1173 struct evict_node *e, *en;
1174 unsigned int i;
1175
1176 for (i = 0; i < count; i++) {
1177 e = &nodes[order ? order[i] : i];
1178 list_add(&e->link, evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001179 if (drm_mm_scan_add_block(scan, &e->node))
Chris Wilson560b3282016-12-22 08:36:17 +00001180 break;
1181 }
1182 list_for_each_entry_safe(e, en, evict_list, link) {
Chris Wilson9a71e272016-12-22 08:36:29 +00001183 if (!drm_mm_scan_remove_block(scan, &e->node))
Chris Wilson560b3282016-12-22 08:36:17 +00001184 list_del(&e->link);
1185 }
1186 if (list_empty(evict_list)) {
Chris Wilson71733202016-12-22 08:36:24 +00001187 pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
Chris Wilson9a71e272016-12-22 08:36:29 +00001188 scan->size, count, scan->alignment, scan->color);
Chris Wilson560b3282016-12-22 08:36:17 +00001189 return false;
1190 }
1191
1192 list_for_each_entry(e, evict_list, link)
1193 drm_mm_remove_node(&e->node);
1194
Chris Wilson3fa489d2016-12-22 08:36:36 +00001195 if (use_color) {
1196 struct drm_mm_node *node;
1197
1198 while ((node = drm_mm_scan_color_evict(scan))) {
1199 e = container_of(node, typeof(*e), node);
1200 drm_mm_remove_node(&e->node);
1201 list_add(&e->link, evict_list);
1202 }
1203 } else {
1204 if (drm_mm_scan_color_evict(scan)) {
1205 pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
1206 return false;
1207 }
1208 }
1209
Chris Wilson560b3282016-12-22 08:36:17 +00001210 return true;
1211}
1212
1213static bool evict_nothing(struct drm_mm *mm,
1214 unsigned int total_size,
1215 struct evict_node *nodes)
1216{
Chris Wilson9a71e272016-12-22 08:36:29 +00001217 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001218 LIST_HEAD(evict_list);
1219 struct evict_node *e;
1220 struct drm_mm_node *node;
1221 unsigned int n;
1222
Chris Wilson0b04d472016-12-22 08:36:33 +00001223 drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
Chris Wilson560b3282016-12-22 08:36:17 +00001224 for (n = 0; n < total_size; n++) {
1225 e = &nodes[n];
1226 list_add(&e->link, &evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001227 drm_mm_scan_add_block(&scan, &e->node);
Chris Wilson560b3282016-12-22 08:36:17 +00001228 }
1229 list_for_each_entry(e, &evict_list, link)
Chris Wilson9a71e272016-12-22 08:36:29 +00001230 drm_mm_scan_remove_block(&scan, &e->node);
Chris Wilson560b3282016-12-22 08:36:17 +00001231
1232 for (n = 0; n < total_size; n++) {
1233 e = &nodes[n];
1234
1235 if (!drm_mm_node_allocated(&e->node)) {
1236 pr_err("node[%d] no longer allocated!\n", n);
1237 return false;
1238 }
1239
1240 e->link.next = NULL;
1241 }
1242
1243 drm_mm_for_each_node(node, mm) {
1244 e = container_of(node, typeof(*e), node);
1245 e->link.next = &e->link;
1246 }
1247
1248 for (n = 0; n < total_size; n++) {
1249 e = &nodes[n];
1250
1251 if (!e->link.next) {
1252 pr_err("node[%d] no longer connected!\n", n);
1253 return false;
1254 }
1255 }
1256
1257 return assert_continuous(mm, nodes[0].node.size);
1258}
1259
1260static bool evict_everything(struct drm_mm *mm,
1261 unsigned int total_size,
1262 struct evict_node *nodes)
1263{
Chris Wilson9a71e272016-12-22 08:36:29 +00001264 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001265 LIST_HEAD(evict_list);
1266 struct evict_node *e;
1267 unsigned int n;
1268 int err;
1269
Chris Wilson0b04d472016-12-22 08:36:33 +00001270 drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
Chris Wilson560b3282016-12-22 08:36:17 +00001271 for (n = 0; n < total_size; n++) {
1272 e = &nodes[n];
1273 list_add(&e->link, &evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001274 if (drm_mm_scan_add_block(&scan, &e->node))
1275 break;
Chris Wilson560b3282016-12-22 08:36:17 +00001276 }
1277 list_for_each_entry(e, &evict_list, link) {
Chris Wilson9a71e272016-12-22 08:36:29 +00001278 if (!drm_mm_scan_remove_block(&scan, &e->node)) {
Chris Wilson560b3282016-12-22 08:36:17 +00001279 pr_err("Node %lld not marked for eviction!\n",
1280 e->node.start);
1281 list_del(&e->link);
1282 }
1283 }
1284
1285 list_for_each_entry(e, &evict_list, link)
1286 drm_mm_remove_node(&e->node);
1287
1288 if (!assert_one_hole(mm, 0, total_size))
1289 return false;
1290
1291 list_for_each_entry(e, &evict_list, link) {
1292 err = drm_mm_reserve_node(mm, &e->node);
1293 if (err) {
1294 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1295 e->node.start);
1296 return false;
1297 }
1298 }
1299
1300 return assert_continuous(mm, nodes[0].node.size);
1301}
1302
1303static int evict_something(struct drm_mm *mm,
Chris Wilson0e483252016-12-22 08:36:18 +00001304 u64 range_start, u64 range_end,
Chris Wilson560b3282016-12-22 08:36:17 +00001305 struct evict_node *nodes,
1306 unsigned int *order,
1307 unsigned int count,
1308 unsigned int size,
1309 unsigned int alignment,
1310 const struct insert_mode *mode)
1311{
Chris Wilson9a71e272016-12-22 08:36:29 +00001312 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001313 LIST_HEAD(evict_list);
1314 struct evict_node *e;
1315 struct drm_mm_node tmp;
1316 int err;
1317
Chris Wilson9a71e272016-12-22 08:36:29 +00001318 drm_mm_scan_init_with_range(&scan, mm,
Chris Wilson0e483252016-12-22 08:36:18 +00001319 size, alignment, 0,
Chris Wilson0b04d472016-12-22 08:36:33 +00001320 range_start, range_end,
1321 mode->create_flags);
Chris Wilson9a71e272016-12-22 08:36:29 +00001322 if (!evict_nodes(&scan,
Chris Wilson3fa489d2016-12-22 08:36:36 +00001323 nodes, order, count, false,
Chris Wilson560b3282016-12-22 08:36:17 +00001324 &evict_list))
1325 return -EINVAL;
1326
1327 memset(&tmp, 0, sizeof(tmp));
1328 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1329 mode->search_flags,
1330 mode->create_flags);
1331 if (err) {
1332 pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1333 size, alignment);
Chris Wilson9a71e272016-12-22 08:36:29 +00001334 show_scan(&scan);
Chris Wilson560b3282016-12-22 08:36:17 +00001335 show_holes(mm, 3);
1336 return err;
1337 }
1338
Chris Wilson0e483252016-12-22 08:36:18 +00001339 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1340 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1341 tmp.start, tmp.size, range_start, range_end);
1342 err = -EINVAL;
1343 }
1344
Chris Wilson3f85fb32016-12-22 08:36:37 +00001345 if (!assert_node(&tmp, mm, size, alignment, 0) ||
1346 drm_mm_hole_follows(&tmp)) {
Chris Wilson560b3282016-12-22 08:36:17 +00001347 pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1348 tmp.size, size,
1349 alignment, misalignment(&tmp, alignment),
Chris Wilson3f85fb32016-12-22 08:36:37 +00001350 tmp.start, drm_mm_hole_follows(&tmp));
Chris Wilson560b3282016-12-22 08:36:17 +00001351 err = -EINVAL;
1352 }
1353
1354 drm_mm_remove_node(&tmp);
1355 if (err)
1356 return err;
1357
1358 list_for_each_entry(e, &evict_list, link) {
1359 err = drm_mm_reserve_node(mm, &e->node);
1360 if (err) {
1361 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1362 e->node.start);
1363 return err;
1364 }
1365 }
1366
1367 if (!assert_continuous(mm, nodes[0].node.size)) {
1368 pr_err("range is no longer continuous\n");
1369 return -EINVAL;
1370 }
1371
1372 return 0;
1373}
1374
1375static int igt_evict(void *ignored)
1376{
1377 DRM_RND_STATE(prng, random_seed);
1378 const unsigned int size = 8192;
1379 const struct insert_mode *mode;
1380 struct drm_mm mm;
1381 struct evict_node *nodes;
1382 struct drm_mm_node *node, *next;
1383 unsigned int *order, n;
1384 int ret, err;
1385
1386 /* Here we populate a full drm_mm and then try and insert a new node
1387 * by evicting other nodes in a random order. The drm_mm_scan should
1388 * pick the first matching hole it finds from the random list. We
1389 * repeat that for different allocation strategies, alignments and
1390 * sizes to try and stress the hole finder.
1391 */
1392
1393 ret = -ENOMEM;
1394 nodes = vzalloc(size * sizeof(*nodes));
1395 if (!nodes)
1396 goto err;
1397
1398 order = drm_random_order(size, &prng);
1399 if (!order)
1400 goto err_nodes;
1401
1402 ret = -EINVAL;
1403 drm_mm_init(&mm, 0, size);
1404 for (n = 0; n < size; n++) {
1405 err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
1406 DRM_MM_SEARCH_DEFAULT);
1407 if (err) {
1408 pr_err("insert failed, step %d\n", n);
1409 ret = err;
1410 goto out;
1411 }
1412 }
1413
1414 /* First check that using the scanner doesn't break the mm */
1415 if (!evict_nothing(&mm, size, nodes)) {
1416 pr_err("evict_nothing() failed\n");
1417 goto out;
1418 }
1419 if (!evict_everything(&mm, size, nodes)) {
1420 pr_err("evict_everything() failed\n");
1421 goto out;
1422 }
1423
1424 for (mode = evict_modes; mode->name; mode++) {
1425 for (n = 1; n <= size; n <<= 1) {
1426 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001427 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001428 nodes, order, size,
1429 n, 1,
1430 mode);
1431 if (err) {
1432 pr_err("%s evict_something(size=%u) failed\n",
1433 mode->name, n);
1434 ret = err;
1435 goto out;
1436 }
1437 }
1438
1439 for (n = 1; n < size; n <<= 1) {
1440 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001441 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001442 nodes, order, size,
1443 size/2, n,
1444 mode);
1445 if (err) {
1446 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1447 mode->name, size/2, n);
1448 ret = err;
1449 goto out;
1450 }
1451 }
1452
1453 for_each_prime_number_from(n, 1, min(size, max_prime)) {
1454 unsigned int nsize = (size - n + 1) / 2;
1455
1456 DRM_MM_BUG_ON(!nsize);
1457
1458 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001459 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001460 nodes, order, size,
1461 nsize, n,
1462 mode);
1463 if (err) {
1464 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1465 mode->name, nsize, n);
1466 ret = err;
1467 goto out;
1468 }
1469 }
1470 }
1471
1472 ret = 0;
1473out:
1474 drm_mm_for_each_node_safe(node, next, &mm)
1475 drm_mm_remove_node(node);
1476 drm_mm_takedown(&mm);
1477 kfree(order);
1478err_nodes:
1479 vfree(nodes);
1480err:
1481 return ret;
1482}
1483
Chris Wilson0e483252016-12-22 08:36:18 +00001484static int igt_evict_range(void *ignored)
1485{
1486 DRM_RND_STATE(prng, random_seed);
1487 const unsigned int size = 8192;
1488 const unsigned int range_size = size / 2;
1489 const unsigned int range_start = size / 4;
1490 const unsigned int range_end = range_start + range_size;
1491 const struct insert_mode *mode;
1492 struct drm_mm mm;
1493 struct evict_node *nodes;
1494 struct drm_mm_node *node, *next;
1495 unsigned int *order, n;
1496 int ret, err;
1497
1498 /* Like igt_evict() but now we are limiting the search to a
1499 * small portion of the full drm_mm.
1500 */
1501
1502 ret = -ENOMEM;
1503 nodes = vzalloc(size * sizeof(*nodes));
1504 if (!nodes)
1505 goto err;
1506
1507 order = drm_random_order(size, &prng);
1508 if (!order)
1509 goto err_nodes;
1510
1511 ret = -EINVAL;
1512 drm_mm_init(&mm, 0, size);
1513 for (n = 0; n < size; n++) {
1514 err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
1515 DRM_MM_SEARCH_DEFAULT);
1516 if (err) {
1517 pr_err("insert failed, step %d\n", n);
1518 ret = err;
1519 goto out;
1520 }
1521 }
1522
1523 for (mode = evict_modes; mode->name; mode++) {
1524 for (n = 1; n <= range_size; n <<= 1) {
1525 drm_random_reorder(order, size, &prng);
1526 err = evict_something(&mm, range_start, range_end,
1527 nodes, order, size,
1528 n, 1,
1529 mode);
1530 if (err) {
1531 pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
1532 mode->name, n, range_start, range_end);
1533 goto out;
1534 }
1535 }
1536
1537 for (n = 1; n <= range_size; n <<= 1) {
1538 drm_random_reorder(order, size, &prng);
1539 err = evict_something(&mm, range_start, range_end,
1540 nodes, order, size,
1541 range_size/2, n,
1542 mode);
1543 if (err) {
1544 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1545 mode->name, range_size/2, n, range_start, range_end);
1546 goto out;
1547 }
1548 }
1549
1550 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1551 unsigned int nsize = (range_size - n + 1) / 2;
1552
1553 DRM_MM_BUG_ON(!nsize);
1554
1555 drm_random_reorder(order, size, &prng);
1556 err = evict_something(&mm, range_start, range_end,
1557 nodes, order, size,
1558 nsize, n,
1559 mode);
1560 if (err) {
1561 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1562 mode->name, nsize, n, range_start, range_end);
1563 goto out;
1564 }
1565 }
1566 }
1567
1568 ret = 0;
1569out:
1570 drm_mm_for_each_node_safe(node, next, &mm)
1571 drm_mm_remove_node(node);
1572 drm_mm_takedown(&mm);
1573 kfree(order);
1574err_nodes:
1575 vfree(nodes);
1576err:
1577 return ret;
1578}
1579
Chris Wilson05ab3c22016-12-22 08:36:19 +00001580static unsigned int node_index(const struct drm_mm_node *node)
1581{
1582 return div64_u64(node->start, node->size);
1583}
1584
1585static int igt_topdown(void *ignored)
1586{
1587 const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1588 DRM_RND_STATE(prng, random_seed);
1589 const unsigned int count = 8192;
1590 unsigned int size;
1591 unsigned long *bitmap = NULL;
1592 struct drm_mm mm;
1593 struct drm_mm_node *nodes, *node, *next;
1594 unsigned int *order, n, m, o = 0;
1595 int ret;
1596
1597 /* When allocating top-down, we expect to be returned a node
1598 * from a suitable hole at the top of the drm_mm. We check that
1599 * the returned node does match the highest available slot.
1600 */
1601
1602 ret = -ENOMEM;
1603 nodes = vzalloc(count * sizeof(*nodes));
1604 if (!nodes)
1605 goto err;
1606
1607 bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
1608 GFP_TEMPORARY);
1609 if (!bitmap)
1610 goto err_nodes;
1611
1612 order = drm_random_order(count, &prng);
1613 if (!order)
1614 goto err_bitmap;
1615
1616 ret = -EINVAL;
1617 for (size = 1; size <= 64; size <<= 1) {
1618 drm_mm_init(&mm, 0, size*count);
1619 for (n = 0; n < count; n++) {
1620 if (!expect_insert(&mm, &nodes[n],
1621 size, 0, n,
1622 topdown)) {
1623 pr_err("insert failed, size %u step %d\n", size, n);
1624 goto out;
1625 }
1626
Chris Wilson3f85fb32016-12-22 08:36:37 +00001627 if (drm_mm_hole_follows(&nodes[n])) {
Chris Wilson05ab3c22016-12-22 08:36:19 +00001628 pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
1629 n, nodes[n].start, size);
1630 goto out;
1631 }
1632
1633 if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
1634 goto out;
1635 }
1636
1637 if (!assert_continuous(&mm, size))
1638 goto out;
1639
1640 drm_random_reorder(order, count, &prng);
1641 for_each_prime_number_from(n, 1, min(count, max_prime)) {
1642 for (m = 0; m < n; m++) {
1643 node = &nodes[order[(o + m) % count]];
1644 drm_mm_remove_node(node);
1645 __set_bit(node_index(node), bitmap);
1646 }
1647
1648 for (m = 0; m < n; m++) {
1649 unsigned int last;
1650
1651 node = &nodes[order[(o + m) % count]];
1652 if (!expect_insert(&mm, node,
1653 size, 0, 0,
1654 topdown)) {
1655 pr_err("insert failed, step %d/%d\n", m, n);
1656 goto out;
1657 }
1658
Chris Wilson3f85fb32016-12-22 08:36:37 +00001659 if (drm_mm_hole_follows(node)) {
Chris Wilson05ab3c22016-12-22 08:36:19 +00001660 pr_err("hole after topdown insert %d/%d, start=%llx\n",
1661 m, n, node->start);
1662 goto out;
1663 }
1664
1665 last = find_last_bit(bitmap, count);
1666 if (node_index(node) != last) {
1667 pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1668 m, n, size, last, node_index(node));
1669 goto out;
1670 }
1671
1672 __clear_bit(last, bitmap);
1673 }
1674
1675 DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1676
1677 o += n;
1678 }
1679
1680 drm_mm_for_each_node_safe(node, next, &mm)
1681 drm_mm_remove_node(node);
1682 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1683 }
1684
1685 ret = 0;
1686out:
1687 drm_mm_for_each_node_safe(node, next, &mm)
1688 drm_mm_remove_node(node);
1689 drm_mm_takedown(&mm);
1690 kfree(order);
1691err_bitmap:
1692 kfree(bitmap);
1693err_nodes:
1694 vfree(nodes);
1695err:
1696 return ret;
1697}
1698
Chris Wilson4c2ba552016-12-22 08:36:20 +00001699static void separate_adjacent_colors(const struct drm_mm_node *node,
1700 unsigned long color,
1701 u64 *start,
1702 u64 *end)
1703{
1704 if (node->allocated && node->color != color)
1705 ++*start;
1706
1707 node = list_next_entry(node, node_list);
1708 if (node->allocated && node->color != color)
1709 --*end;
1710}
1711
1712static bool colors_abutt(const struct drm_mm_node *node)
1713{
Chris Wilson3f85fb32016-12-22 08:36:37 +00001714 if (!drm_mm_hole_follows(node) &&
Chris Wilson4c2ba552016-12-22 08:36:20 +00001715 list_next_entry(node, node_list)->allocated) {
1716 pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
1717 node->color, node->start, node->size,
1718 list_next_entry(node, node_list)->color,
1719 list_next_entry(node, node_list)->start,
1720 list_next_entry(node, node_list)->size);
1721 return true;
1722 }
1723
1724 return false;
1725}
1726
1727static int igt_color(void *ignored)
1728{
1729 const unsigned int count = min(4096u, max_iterations);
1730 const struct insert_mode *mode;
1731 struct drm_mm mm;
1732 struct drm_mm_node *node, *nn;
1733 unsigned int n;
1734 int ret = -EINVAL, err;
1735
1736 /* Color adjustment complicates everything. First we just check
1737 * that when we insert a node we apply any color_adjustment callback.
1738 * The callback we use should ensure that there is a gap between
1739 * any two nodes, and so after each insertion we check that those
1740 * holes are inserted and that they are preserved.
1741 */
1742
1743 drm_mm_init(&mm, 0, U64_MAX);
1744
1745 for (n = 1; n <= count; n++) {
1746 node = kzalloc(sizeof(*node), GFP_KERNEL);
1747 if (!node) {
1748 ret = -ENOMEM;
1749 goto out;
1750 }
1751
1752 if (!expect_insert(&mm, node,
1753 n, 0, n,
1754 &insert_modes[0])) {
1755 pr_err("insert failed, step %d\n", n);
1756 kfree(node);
1757 goto out;
1758 }
1759 }
1760
1761 drm_mm_for_each_node_safe(node, nn, &mm) {
1762 if (node->color != node->size) {
1763 pr_err("invalid color stored: expected %lld, found %ld\n",
1764 node->size, node->color);
1765
1766 goto out;
1767 }
1768
1769 drm_mm_remove_node(node);
1770 kfree(node);
1771 }
1772
1773 /* Now, let's start experimenting with applying a color callback */
1774 mm.color_adjust = separate_adjacent_colors;
1775 for (mode = insert_modes; mode->name; mode++) {
1776 u64 last;
1777
1778 node = kzalloc(sizeof(*node), GFP_KERNEL);
1779 if (!node) {
1780 ret = -ENOMEM;
1781 goto out;
1782 }
1783
1784 node->size = 1 + 2*count;
1785 node->color = node->size;
1786
1787 err = drm_mm_reserve_node(&mm, node);
1788 if (err) {
1789 pr_err("initial reserve failed!\n");
1790 ret = err;
1791 goto out;
1792 }
1793
1794 last = node->start + node->size;
1795
1796 for (n = 1; n <= count; n++) {
1797 int rem;
1798
1799 node = kzalloc(sizeof(*node), GFP_KERNEL);
1800 if (!node) {
1801 ret = -ENOMEM;
1802 goto out;
1803 }
1804
1805 node->start = last;
1806 node->size = n + count;
1807 node->color = node->size;
1808
1809 err = drm_mm_reserve_node(&mm, node);
1810 if (err != -ENOSPC) {
1811 pr_err("reserve %d did not report color overlap! err=%d\n",
1812 n, err);
1813 goto out;
1814 }
1815
1816 node->start += n + 1;
1817 rem = misalignment(node, n + count);
1818 node->start += n + count - rem;
1819
1820 err = drm_mm_reserve_node(&mm, node);
1821 if (err) {
1822 pr_err("reserve %d failed, err=%d\n", n, err);
1823 ret = err;
1824 goto out;
1825 }
1826
1827 last = node->start + node->size;
1828 }
1829
1830 for (n = 1; n <= count; n++) {
1831 node = kzalloc(sizeof(*node), GFP_KERNEL);
1832 if (!node) {
1833 ret = -ENOMEM;
1834 goto out;
1835 }
1836
1837 if (!expect_insert(&mm, node,
1838 n, n, n,
1839 mode)) {
1840 pr_err("%s insert failed, step %d\n",
1841 mode->name, n);
1842 kfree(node);
1843 goto out;
1844 }
1845 }
1846
1847 drm_mm_for_each_node_safe(node, nn, &mm) {
1848 u64 rem;
1849
1850 if (node->color != node->size) {
1851 pr_err("%s invalid color stored: expected %lld, found %ld\n",
1852 mode->name, node->size, node->color);
1853
1854 goto out;
1855 }
1856
1857 if (colors_abutt(node))
1858 goto out;
1859
1860 div64_u64_rem(node->start, node->size, &rem);
1861 if (rem) {
1862 pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
1863 mode->name, node->start, node->size, rem);
1864 goto out;
1865 }
1866
1867 drm_mm_remove_node(node);
1868 kfree(node);
1869 }
1870 }
1871
1872 ret = 0;
1873out:
1874 drm_mm_for_each_node_safe(node, nn, &mm) {
1875 drm_mm_remove_node(node);
1876 kfree(node);
1877 }
1878 drm_mm_takedown(&mm);
1879 return ret;
1880}
1881
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001882static int evict_color(struct drm_mm *mm,
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001883 u64 range_start, u64 range_end,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001884 struct evict_node *nodes,
1885 unsigned int *order,
1886 unsigned int count,
1887 unsigned int size,
1888 unsigned int alignment,
1889 unsigned long color,
1890 const struct insert_mode *mode)
1891{
Chris Wilson9a71e272016-12-22 08:36:29 +00001892 struct drm_mm_scan scan;
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001893 LIST_HEAD(evict_list);
1894 struct evict_node *e;
1895 struct drm_mm_node tmp;
1896 int err;
1897
Chris Wilson9a71e272016-12-22 08:36:29 +00001898 drm_mm_scan_init_with_range(&scan, mm,
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001899 size, alignment, color,
Chris Wilson0b04d472016-12-22 08:36:33 +00001900 range_start, range_end,
1901 mode->create_flags);
Chris Wilson9a71e272016-12-22 08:36:29 +00001902 if (!evict_nodes(&scan,
Chris Wilson3fa489d2016-12-22 08:36:36 +00001903 nodes, order, count, true,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001904 &evict_list))
1905 return -EINVAL;
1906
1907 memset(&tmp, 0, sizeof(tmp));
1908 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
1909 mode->search_flags,
1910 mode->create_flags);
1911 if (err) {
1912 pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
1913 size, alignment, color, err);
Chris Wilson9a71e272016-12-22 08:36:29 +00001914 show_scan(&scan);
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001915 show_holes(mm, 3);
1916 return err;
1917 }
1918
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001919 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1920 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1921 tmp.start, tmp.size, range_start, range_end);
1922 err = -EINVAL;
1923 }
1924
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001925 if (colors_abutt(&tmp))
1926 err = -EINVAL;
1927
1928 if (!assert_node(&tmp, mm, size, alignment, color)) {
1929 pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
1930 tmp.size, size,
1931 alignment, misalignment(&tmp, alignment), tmp.start);
1932 err = -EINVAL;
1933 }
1934
1935 drm_mm_remove_node(&tmp);
1936 if (err)
1937 return err;
1938
1939 list_for_each_entry(e, &evict_list, link) {
1940 err = drm_mm_reserve_node(mm, &e->node);
1941 if (err) {
1942 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1943 e->node.start);
1944 return err;
1945 }
1946 }
1947
1948 return 0;
1949}
1950
1951static int igt_color_evict(void *ignored)
1952{
1953 DRM_RND_STATE(prng, random_seed);
1954 const unsigned int total_size = min(8192u, max_iterations);
1955 const struct insert_mode *mode;
1956 unsigned long color = 0;
1957 struct drm_mm mm;
1958 struct evict_node *nodes;
1959 struct drm_mm_node *node, *next;
1960 unsigned int *order, n;
1961 int ret, err;
1962
1963 /* Check that the drm_mm_scan also honours color adjustment when
1964 * choosing its victims to create a hole. Our color_adjust does not
1965 * allow two nodes to be placed together without an intervening hole
1966 * enlarging the set of victims that must be evicted.
1967 */
1968
1969 ret = -ENOMEM;
1970 nodes = vzalloc(total_size * sizeof(*nodes));
1971 if (!nodes)
1972 goto err;
1973
1974 order = drm_random_order(total_size, &prng);
1975 if (!order)
1976 goto err_nodes;
1977
1978 ret = -EINVAL;
1979 drm_mm_init(&mm, 0, 2*total_size - 1);
1980 mm.color_adjust = separate_adjacent_colors;
1981 for (n = 0; n < total_size; n++) {
1982 if (!expect_insert(&mm, &nodes[n].node,
1983 1, 0, color++,
1984 &insert_modes[0])) {
1985 pr_err("insert failed, step %d\n", n);
1986 goto out;
1987 }
1988 }
1989
1990 for (mode = evict_modes; mode->name; mode++) {
1991 for (n = 1; n <= total_size; n <<= 1) {
1992 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001993 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001994 nodes, order, total_size,
1995 n, 1, color++,
1996 mode);
1997 if (err) {
1998 pr_err("%s evict_color(size=%u) failed\n",
1999 mode->name, n);
2000 goto out;
2001 }
2002 }
2003
2004 for (n = 1; n < total_size; n <<= 1) {
2005 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002006 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00002007 nodes, order, total_size,
2008 total_size/2, n, color++,
2009 mode);
2010 if (err) {
2011 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2012 mode->name, total_size/2, n);
2013 goto out;
2014 }
2015 }
2016
2017 for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
2018 unsigned int nsize = (total_size - n + 1) / 2;
2019
2020 DRM_MM_BUG_ON(!nsize);
2021
2022 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002023 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00002024 nodes, order, total_size,
2025 nsize, n, color++,
2026 mode);
2027 if (err) {
2028 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2029 mode->name, nsize, n);
2030 goto out;
2031 }
2032 }
2033 }
2034
2035 ret = 0;
2036out:
2037 if (ret)
Daniel Vetterb5c37142016-12-29 12:09:24 +01002038 show_mm(&mm);
Chris Wilsonc1b702c2016-12-22 08:36:21 +00002039 drm_mm_for_each_node_safe(node, next, &mm)
2040 drm_mm_remove_node(node);
2041 drm_mm_takedown(&mm);
2042 kfree(order);
2043err_nodes:
2044 vfree(nodes);
2045err:
2046 return ret;
2047}
2048
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002049static int igt_color_evict_range(void *ignored)
2050{
2051 DRM_RND_STATE(prng, random_seed);
2052 const unsigned int total_size = 8192;
2053 const unsigned int range_size = total_size / 2;
2054 const unsigned int range_start = total_size / 4;
2055 const unsigned int range_end = range_start + range_size;
2056 const struct insert_mode *mode;
2057 unsigned long color = 0;
2058 struct drm_mm mm;
2059 struct evict_node *nodes;
2060 struct drm_mm_node *node, *next;
2061 unsigned int *order, n;
2062 int ret, err;
2063
2064 /* Like igt_color_evict(), but limited to small portion of the full
2065 * drm_mm range.
2066 */
2067
2068 ret = -ENOMEM;
2069 nodes = vzalloc(total_size * sizeof(*nodes));
2070 if (!nodes)
2071 goto err;
2072
2073 order = drm_random_order(total_size, &prng);
2074 if (!order)
2075 goto err_nodes;
2076
2077 ret = -EINVAL;
2078 drm_mm_init(&mm, 0, 2*total_size - 1);
2079 mm.color_adjust = separate_adjacent_colors;
2080 for (n = 0; n < total_size; n++) {
2081 if (!expect_insert(&mm, &nodes[n].node,
2082 1, 0, color++,
2083 &insert_modes[0])) {
2084 pr_err("insert failed, step %d\n", n);
2085 goto out;
2086 }
2087 }
2088
2089 for (mode = evict_modes; mode->name; mode++) {
2090 for (n = 1; n <= range_size; n <<= 1) {
2091 drm_random_reorder(order, range_size, &prng);
2092 err = evict_color(&mm, range_start, range_end,
2093 nodes, order, total_size,
2094 n, 1, color++,
2095 mode);
2096 if (err) {
2097 pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
2098 mode->name, n, range_start, range_end);
2099 goto out;
2100 }
2101 }
2102
2103 for (n = 1; n < range_size; n <<= 1) {
2104 drm_random_reorder(order, total_size, &prng);
2105 err = evict_color(&mm, range_start, range_end,
2106 nodes, order, total_size,
2107 range_size/2, n, color++,
2108 mode);
2109 if (err) {
2110 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2111 mode->name, total_size/2, n, range_start, range_end);
2112 goto out;
2113 }
2114 }
2115
2116 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2117 unsigned int nsize = (range_size - n + 1) / 2;
2118
2119 DRM_MM_BUG_ON(!nsize);
2120
2121 drm_random_reorder(order, total_size, &prng);
2122 err = evict_color(&mm, range_start, range_end,
2123 nodes, order, total_size,
2124 nsize, n, color++,
2125 mode);
2126 if (err) {
2127 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2128 mode->name, nsize, n, range_start, range_end);
2129 goto out;
2130 }
2131 }
2132 }
2133
2134 ret = 0;
2135out:
2136 if (ret)
Daniel Vetterb5c37142016-12-29 12:09:24 +01002137 show_mm(&mm);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002138 drm_mm_for_each_node_safe(node, next, &mm)
2139 drm_mm_remove_node(node);
2140 drm_mm_takedown(&mm);
2141 kfree(order);
2142err_nodes:
2143 vfree(nodes);
2144err:
2145 return ret;
2146}
2147
Chris Wilson50f00332016-12-22 08:36:09 +00002148#include "drm_selftest.c"
2149
2150static int __init test_drm_mm_init(void)
2151{
2152 int err;
2153
2154 while (!random_seed)
2155 random_seed = get_random_int();
2156
2157 pr_info("Testing DRM range manger (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2158 random_seed, max_iterations, max_prime);
2159 err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
2160
2161 return err > 0 ? 0 : err;
2162}
2163
2164static void __exit test_drm_mm_exit(void)
2165{
2166}
2167
2168module_init(test_drm_mm_init);
2169module_exit(test_drm_mm_exit);
2170
2171module_param(random_seed, uint, 0400);
2172module_param(max_iterations, uint, 0400);
2173module_param(max_prime, uint, 0400);
2174
2175MODULE_AUTHOR("Intel Corporation");
2176MODULE_LICENSE("GPL");