1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/prime_numbers.h>
7 
8 #include "../i915_selftest.h"
9 #include "i915_random.h"
10 
__igt_dump_block(struct i915_buddy_mm * mm,struct i915_buddy_block * block,bool buddy)11 static void __igt_dump_block(struct i915_buddy_mm *mm,
12 			     struct i915_buddy_block *block,
13 			     bool buddy)
14 {
15 	pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n",
16 	       block->header,
17 	       i915_buddy_block_state(block),
18 	       i915_buddy_block_order(block),
19 	       i915_buddy_block_offset(block),
20 	       i915_buddy_block_size(mm, block),
21 	       yesno(!block->parent),
22 	       yesno(buddy));
23 }
24 
igt_dump_block(struct i915_buddy_mm * mm,struct i915_buddy_block * block)25 static void igt_dump_block(struct i915_buddy_mm *mm,
26 			   struct i915_buddy_block *block)
27 {
28 	struct i915_buddy_block *buddy;
29 
30 	__igt_dump_block(mm, block, false);
31 
32 	buddy = get_buddy(block);
33 	if (buddy)
34 		__igt_dump_block(mm, buddy, true);
35 }
36 
igt_check_block(struct i915_buddy_mm * mm,struct i915_buddy_block * block)37 static int igt_check_block(struct i915_buddy_mm *mm,
38 			   struct i915_buddy_block *block)
39 {
40 	struct i915_buddy_block *buddy;
41 	unsigned int block_state;
42 	u64 block_size;
43 	u64 offset;
44 	int err = 0;
45 
46 	block_state = i915_buddy_block_state(block);
47 
48 	if (block_state != I915_BUDDY_ALLOCATED &&
49 	    block_state != I915_BUDDY_FREE &&
50 	    block_state != I915_BUDDY_SPLIT) {
51 		pr_err("block state mismatch\n");
52 		err = -EINVAL;
53 	}
54 
55 	block_size = i915_buddy_block_size(mm, block);
56 	offset = i915_buddy_block_offset(block);
57 
58 	if (block_size < mm->chunk_size) {
59 		pr_err("block size smaller than min size\n");
60 		err = -EINVAL;
61 	}
62 
63 	if (!is_power_of_2(block_size)) {
64 		pr_err("block size not power of two\n");
65 		err = -EINVAL;
66 	}
67 
68 	if (!IS_ALIGNED(block_size, mm->chunk_size)) {
69 		pr_err("block size not aligned to min size\n");
70 		err = -EINVAL;
71 	}
72 
73 	if (!IS_ALIGNED(offset, mm->chunk_size)) {
74 		pr_err("block offset not aligned to min size\n");
75 		err = -EINVAL;
76 	}
77 
78 	if (!IS_ALIGNED(offset, block_size)) {
79 		pr_err("block offset not aligned to block size\n");
80 		err = -EINVAL;
81 	}
82 
83 	buddy = get_buddy(block);
84 
85 	if (!buddy && block->parent) {
86 		pr_err("buddy has gone fishing\n");
87 		err = -EINVAL;
88 	}
89 
90 	if (buddy) {
91 		if (i915_buddy_block_offset(buddy) != (offset ^ block_size)) {
92 			pr_err("buddy has wrong offset\n");
93 			err = -EINVAL;
94 		}
95 
96 		if (i915_buddy_block_size(mm, buddy) != block_size) {
97 			pr_err("buddy size mismatch\n");
98 			err = -EINVAL;
99 		}
100 
101 		if (i915_buddy_block_state(buddy) == block_state &&
102 		    block_state == I915_BUDDY_FREE) {
103 			pr_err("block and its buddy are free\n");
104 			err = -EINVAL;
105 		}
106 	}
107 
108 	return err;
109 }
110 
igt_check_blocks(struct i915_buddy_mm * mm,struct list_head * blocks,u64 expected_size,bool is_contiguous)111 static int igt_check_blocks(struct i915_buddy_mm *mm,
112 			    struct list_head *blocks,
113 			    u64 expected_size,
114 			    bool is_contiguous)
115 {
116 	struct i915_buddy_block *block;
117 	struct i915_buddy_block *prev;
118 	u64 total;
119 	int err = 0;
120 
121 	block = NULL;
122 	prev = NULL;
123 	total = 0;
124 
125 	list_for_each_entry(block, blocks, link) {
126 		err = igt_check_block(mm, block);
127 
128 		if (!i915_buddy_block_is_allocated(block)) {
129 			pr_err("block not allocated\n"),
130 			err = -EINVAL;
131 		}
132 
133 		if (is_contiguous && prev) {
134 			u64 prev_block_size;
135 			u64 prev_offset;
136 			u64 offset;
137 
138 			prev_offset = i915_buddy_block_offset(prev);
139 			prev_block_size = i915_buddy_block_size(mm, prev);
140 			offset = i915_buddy_block_offset(block);
141 
142 			if (offset != (prev_offset + prev_block_size)) {
143 				pr_err("block offset mismatch\n");
144 				err = -EINVAL;
145 			}
146 		}
147 
148 		if (err)
149 			break;
150 
151 		total += i915_buddy_block_size(mm, block);
152 		prev = block;
153 	}
154 
155 	if (!err) {
156 		if (total != expected_size) {
157 			pr_err("size mismatch, expected=%llx, found=%llx\n",
158 			       expected_size, total);
159 			err = -EINVAL;
160 		}
161 		return err;
162 	}
163 
164 	if (prev) {
165 		pr_err("prev block, dump:\n");
166 		igt_dump_block(mm, prev);
167 	}
168 
169 	pr_err("bad block, dump:\n");
170 	igt_dump_block(mm, block);
171 
172 	return err;
173 }
174 
igt_check_mm(struct i915_buddy_mm * mm)175 static int igt_check_mm(struct i915_buddy_mm *mm)
176 {
177 	struct i915_buddy_block *root;
178 	struct i915_buddy_block *prev;
179 	unsigned int i;
180 	u64 total;
181 	int err = 0;
182 
183 	if (!mm->n_roots) {
184 		pr_err("n_roots is zero\n");
185 		return -EINVAL;
186 	}
187 
188 	if (mm->n_roots != hweight64(mm->size)) {
189 		pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n",
190 		       mm->n_roots, hweight64(mm->size));
191 		return -EINVAL;
192 	}
193 
194 	root = NULL;
195 	prev = NULL;
196 	total = 0;
197 
198 	for (i = 0; i < mm->n_roots; ++i) {
199 		struct i915_buddy_block *block;
200 		unsigned int order;
201 
202 		root = mm->roots[i];
203 		if (!root) {
204 			pr_err("root(%u) is NULL\n", i);
205 			err = -EINVAL;
206 			break;
207 		}
208 
209 		err = igt_check_block(mm, root);
210 
211 		if (!i915_buddy_block_is_free(root)) {
212 			pr_err("root not free\n");
213 			err = -EINVAL;
214 		}
215 
216 		order = i915_buddy_block_order(root);
217 
218 		if (!i) {
219 			if (order != mm->max_order) {
220 				pr_err("max order root missing\n");
221 				err = -EINVAL;
222 			}
223 		}
224 
225 		if (prev) {
226 			u64 prev_block_size;
227 			u64 prev_offset;
228 			u64 offset;
229 
230 			prev_offset = i915_buddy_block_offset(prev);
231 			prev_block_size = i915_buddy_block_size(mm, prev);
232 			offset = i915_buddy_block_offset(root);
233 
234 			if (offset != (prev_offset + prev_block_size)) {
235 				pr_err("root offset mismatch\n");
236 				err = -EINVAL;
237 			}
238 		}
239 
240 		block = list_first_entry_or_null(&mm->free_list[order],
241 						 struct i915_buddy_block,
242 						 link);
243 		if (block != root) {
244 			pr_err("root mismatch at order=%u\n", order);
245 			err = -EINVAL;
246 		}
247 
248 		if (err)
249 			break;
250 
251 		prev = root;
252 		total += i915_buddy_block_size(mm, root);
253 	}
254 
255 	if (!err) {
256 		if (total != mm->size) {
257 			pr_err("expected mm size=%llx, found=%llx\n", mm->size,
258 			       total);
259 			err = -EINVAL;
260 		}
261 		return err;
262 	}
263 
264 	if (prev) {
265 		pr_err("prev root(%u), dump:\n", i - 1);
266 		igt_dump_block(mm, prev);
267 	}
268 
269 	if (root) {
270 		pr_err("bad root(%u), dump:\n", i);
271 		igt_dump_block(mm, root);
272 	}
273 
274 	return err;
275 }
276 
igt_mm_config(u64 * size,u64 * chunk_size)277 static void igt_mm_config(u64 *size, u64 *chunk_size)
278 {
279 	I915_RND_STATE(prng);
280 	u32 s, ms;
281 
282 	/* Nothing fancy, just try to get an interesting bit pattern */
283 
284 	prandom_seed_state(&prng, i915_selftest.random_seed);
285 
286 	/* Let size be a random number of pages up to 8 GB (2M pages) */
287 	s = 1 + i915_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
288 	/* Let the chunk size be a random power of 2 less than size */
289 	ms = BIT(i915_prandom_u32_max_state(ilog2(s), &prng));
290 	/* Round size down to the chunk size */
291 	s &= -ms;
292 
293 	/* Convert from pages to bytes */
294 	*chunk_size = (u64)ms << 12;
295 	*size = (u64)s << 12;
296 }
297 
igt_buddy_alloc_smoke(void * arg)298 static int igt_buddy_alloc_smoke(void *arg)
299 {
300 	struct i915_buddy_mm mm;
301 	IGT_TIMEOUT(end_time);
302 	I915_RND_STATE(prng);
303 	u64 chunk_size;
304 	u64 mm_size;
305 	int *order;
306 	int err, i;
307 
308 	igt_mm_config(&mm_size, &chunk_size);
309 
310 	pr_info("buddy_init with size=%llx, chunk_size=%llx\n", mm_size, chunk_size);
311 
312 	err = i915_buddy_init(&mm, mm_size, chunk_size);
313 	if (err) {
314 		pr_err("buddy_init failed(%d)\n", err);
315 		return err;
316 	}
317 
318 	order = i915_random_order(mm.max_order + 1, &prng);
319 	if (!order)
320 		goto out_fini;
321 
322 	for (i = 0; i <= mm.max_order; ++i) {
323 		struct i915_buddy_block *block;
324 		int max_order = order[i];
325 		bool timeout = false;
326 		LIST_HEAD(blocks);
327 		int order;
328 		u64 total;
329 
330 		err = igt_check_mm(&mm);
331 		if (err) {
332 			pr_err("pre-mm check failed, abort\n");
333 			break;
334 		}
335 
336 		pr_info("filling from max_order=%u\n", max_order);
337 
338 		order = max_order;
339 		total = 0;
340 
341 		do {
342 retry:
343 			block = i915_buddy_alloc(&mm, order);
344 			if (IS_ERR(block)) {
345 				err = PTR_ERR(block);
346 				if (err == -ENOMEM) {
347 					pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
348 						order);
349 				} else {
350 					if (order--) {
351 						err = 0;
352 						goto retry;
353 					}
354 
355 					pr_err("buddy_alloc with order=%d failed(%d)\n",
356 					       order, err);
357 				}
358 
359 				break;
360 			}
361 
362 			list_add_tail(&block->link, &blocks);
363 
364 			if (i915_buddy_block_order(block) != order) {
365 				pr_err("buddy_alloc order mismatch\n");
366 				err = -EINVAL;
367 				break;
368 			}
369 
370 			total += i915_buddy_block_size(&mm, block);
371 
372 			if (__igt_timeout(end_time, NULL)) {
373 				timeout = true;
374 				break;
375 			}
376 		} while (total < mm.size);
377 
378 		if (!err)
379 			err = igt_check_blocks(&mm, &blocks, total, false);
380 
381 		i915_buddy_free_list(&mm, &blocks);
382 
383 		if (!err) {
384 			err = igt_check_mm(&mm);
385 			if (err)
386 				pr_err("post-mm check failed\n");
387 		}
388 
389 		if (err || timeout)
390 			break;
391 
392 		cond_resched();
393 	}
394 
395 	if (err == -ENOMEM)
396 		err = 0;
397 
398 	kfree(order);
399 out_fini:
400 	i915_buddy_fini(&mm);
401 
402 	return err;
403 }
404 
igt_buddy_alloc_pessimistic(void * arg)405 static int igt_buddy_alloc_pessimistic(void *arg)
406 {
407 	const unsigned int max_order = 16;
408 	struct i915_buddy_block *block, *bn;
409 	struct i915_buddy_mm mm;
410 	unsigned int order;
411 	LIST_HEAD(blocks);
412 	int err;
413 
414 	/*
415 	 * Create a pot-sized mm, then allocate one of each possible
416 	 * order within. This should leave the mm with exactly one
417 	 * page left.
418 	 */
419 
420 	err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
421 	if (err) {
422 		pr_err("buddy_init failed(%d)\n", err);
423 		return err;
424 	}
425 	GEM_BUG_ON(mm.max_order != max_order);
426 
427 	for (order = 0; order < max_order; order++) {
428 		block = i915_buddy_alloc(&mm, order);
429 		if (IS_ERR(block)) {
430 			pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
431 				order);
432 			err = PTR_ERR(block);
433 			goto err;
434 		}
435 
436 		list_add_tail(&block->link, &blocks);
437 	}
438 
439 	/* And now the last remaining block available */
440 	block = i915_buddy_alloc(&mm, 0);
441 	if (IS_ERR(block)) {
442 		pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
443 		err = PTR_ERR(block);
444 		goto err;
445 	}
446 	list_add_tail(&block->link, &blocks);
447 
448 	/* Should be completely full! */
449 	for (order = max_order; order--; ) {
450 		block = i915_buddy_alloc(&mm, order);
451 		if (!IS_ERR(block)) {
452 			pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
453 				order);
454 			list_add_tail(&block->link, &blocks);
455 			err = -EINVAL;
456 			goto err;
457 		}
458 	}
459 
460 	block = list_last_entry(&blocks, typeof(*block), link);
461 	list_del(&block->link);
462 	i915_buddy_free(&mm, block);
463 
464 	/* As we free in increasing size, we make available larger blocks */
465 	order = 1;
466 	list_for_each_entry_safe(block, bn, &blocks, link) {
467 		list_del(&block->link);
468 		i915_buddy_free(&mm, block);
469 
470 		block = i915_buddy_alloc(&mm, order);
471 		if (IS_ERR(block)) {
472 			pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
473 				order);
474 			err = PTR_ERR(block);
475 			goto err;
476 		}
477 		i915_buddy_free(&mm, block);
478 		order++;
479 	}
480 
481 	/* To confirm, now the whole mm should be available */
482 	block = i915_buddy_alloc(&mm, max_order);
483 	if (IS_ERR(block)) {
484 		pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
485 			max_order);
486 		err = PTR_ERR(block);
487 		goto err;
488 	}
489 	i915_buddy_free(&mm, block);
490 
491 err:
492 	i915_buddy_free_list(&mm, &blocks);
493 	i915_buddy_fini(&mm);
494 	return err;
495 }
496 
igt_buddy_alloc_optimistic(void * arg)497 static int igt_buddy_alloc_optimistic(void *arg)
498 {
499 	const int max_order = 16;
500 	struct i915_buddy_block *block;
501 	struct i915_buddy_mm mm;
502 	LIST_HEAD(blocks);
503 	int order;
504 	int err;
505 
506 	/*
507 	 * Create a mm with one block of each order available, and
508 	 * try to allocate them all.
509 	 */
510 
511 	err = i915_buddy_init(&mm,
512 			      PAGE_SIZE * ((1 << (max_order + 1)) - 1),
513 			      PAGE_SIZE);
514 	if (err) {
515 		pr_err("buddy_init failed(%d)\n", err);
516 		return err;
517 	}
518 	GEM_BUG_ON(mm.max_order != max_order);
519 
520 	for (order = 0; order <= max_order; order++) {
521 		block = i915_buddy_alloc(&mm, order);
522 		if (IS_ERR(block)) {
523 			pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
524 				order);
525 			err = PTR_ERR(block);
526 			goto err;
527 		}
528 
529 		list_add_tail(&block->link, &blocks);
530 	}
531 
532 	/* Should be completely full! */
533 	block = i915_buddy_alloc(&mm, 0);
534 	if (!IS_ERR(block)) {
535 		pr_info("buddy_alloc unexpectedly succeeded, it should be full!");
536 		list_add_tail(&block->link, &blocks);
537 		err = -EINVAL;
538 		goto err;
539 	}
540 
541 err:
542 	i915_buddy_free_list(&mm, &blocks);
543 	i915_buddy_fini(&mm);
544 	return err;
545 }
546 
igt_buddy_alloc_pathological(void * arg)547 static int igt_buddy_alloc_pathological(void *arg)
548 {
549 	const int max_order = 16;
550 	struct i915_buddy_block *block;
551 	struct i915_buddy_mm mm;
552 	LIST_HEAD(blocks);
553 	LIST_HEAD(holes);
554 	int order, top;
555 	int err;
556 
557 	/*
558 	 * Create a pot-sized mm, then allocate one of each possible
559 	 * order within. This should leave the mm with exactly one
560 	 * page left. Free the largest block, then whittle down again.
561 	 * Eventually we will have a fully 50% fragmented mm.
562 	 */
563 
564 	err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
565 	if (err) {
566 		pr_err("buddy_init failed(%d)\n", err);
567 		return err;
568 	}
569 	GEM_BUG_ON(mm.max_order != max_order);
570 
571 	for (top = max_order; top; top--) {
572 		/* Make room by freeing the largest allocated block */
573 		block = list_first_entry_or_null(&blocks, typeof(*block), link);
574 		if (block) {
575 			list_del(&block->link);
576 			i915_buddy_free(&mm, block);
577 		}
578 
579 		for (order = top; order--; ) {
580 			block = i915_buddy_alloc(&mm, order);
581 			if (IS_ERR(block)) {
582 				pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
583 					order, top);
584 				err = PTR_ERR(block);
585 				goto err;
586 			}
587 			list_add_tail(&block->link, &blocks);
588 		}
589 
590 		/* There should be one final page for this sub-allocation */
591 		block = i915_buddy_alloc(&mm, 0);
592 		if (IS_ERR(block)) {
593 			pr_info("buddy_alloc hit -ENOMEM for hole\n");
594 			err = PTR_ERR(block);
595 			goto err;
596 		}
597 		list_add_tail(&block->link, &holes);
598 
599 		block = i915_buddy_alloc(&mm, top);
600 		if (!IS_ERR(block)) {
601 			pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
602 				top, max_order);
603 			list_add_tail(&block->link, &blocks);
604 			err = -EINVAL;
605 			goto err;
606 		}
607 	}
608 
609 	i915_buddy_free_list(&mm, &holes);
610 
611 	/* Nothing larger than blocks of chunk_size now available */
612 	for (order = 1; order <= max_order; order++) {
613 		block = i915_buddy_alloc(&mm, order);
614 		if (!IS_ERR(block)) {
615 			pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
616 				order);
617 			list_add_tail(&block->link, &blocks);
618 			err = -EINVAL;
619 			goto err;
620 		}
621 	}
622 
623 err:
624 	list_splice_tail(&holes, &blocks);
625 	i915_buddy_free_list(&mm, &blocks);
626 	i915_buddy_fini(&mm);
627 	return err;
628 }
629 
igt_buddy_alloc_range(void * arg)630 static int igt_buddy_alloc_range(void *arg)
631 {
632 	struct i915_buddy_mm mm;
633 	unsigned long page_num;
634 	LIST_HEAD(blocks);
635 	u64 chunk_size;
636 	u64 offset;
637 	u64 size;
638 	u64 rem;
639 	int err;
640 
641 	igt_mm_config(&size, &chunk_size);
642 
643 	pr_info("buddy_init with size=%llx, chunk_size=%llx\n", size, chunk_size);
644 
645 	err = i915_buddy_init(&mm, size, chunk_size);
646 	if (err) {
647 		pr_err("buddy_init failed(%d)\n", err);
648 		return err;
649 	}
650 
651 	err = igt_check_mm(&mm);
652 	if (err) {
653 		pr_err("pre-mm check failed, abort, abort, abort!\n");
654 		goto err_fini;
655 	}
656 
657 	rem = mm.size;
658 	offset = 0;
659 
660 	for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
661 		struct i915_buddy_block *block;
662 		LIST_HEAD(tmp);
663 
664 		size = min(page_num * mm.chunk_size, rem);
665 
666 		err = i915_buddy_alloc_range(&mm, &tmp, offset, size);
667 		if (err) {
668 			if (err == -ENOMEM) {
669 				pr_info("alloc_range hit -ENOMEM with size=%llx\n",
670 					size);
671 			} else {
672 				pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n",
673 				       offset, size, err);
674 			}
675 
676 			break;
677 		}
678 
679 		block = list_first_entry_or_null(&tmp,
680 						 struct i915_buddy_block,
681 						 link);
682 		if (!block) {
683 			pr_err("alloc_range has no blocks\n");
684 			err = -EINVAL;
685 			break;
686 		}
687 
688 		if (i915_buddy_block_offset(block) != offset) {
689 			pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n",
690 			       i915_buddy_block_offset(block), offset);
691 			err = -EINVAL;
692 		}
693 
694 		if (!err)
695 			err = igt_check_blocks(&mm, &tmp, size, true);
696 
697 		list_splice_tail(&tmp, &blocks);
698 
699 		if (err)
700 			break;
701 
702 		offset += size;
703 
704 		rem -= size;
705 		if (!rem)
706 			break;
707 
708 		cond_resched();
709 	}
710 
711 	if (err == -ENOMEM)
712 		err = 0;
713 
714 	i915_buddy_free_list(&mm, &blocks);
715 
716 	if (!err) {
717 		err = igt_check_mm(&mm);
718 		if (err)
719 			pr_err("post-mm check failed\n");
720 	}
721 
722 err_fini:
723 	i915_buddy_fini(&mm);
724 
725 	return err;
726 }
727 
igt_buddy_alloc_limit(void * arg)728 static int igt_buddy_alloc_limit(void *arg)
729 {
730 	struct i915_buddy_block *block;
731 	struct i915_buddy_mm mm;
732 	const u64 size = U64_MAX;
733 	int err;
734 
735 	err = i915_buddy_init(&mm, size, PAGE_SIZE);
736 	if (err)
737 		return err;
738 
739 	if (mm.max_order != I915_BUDDY_MAX_ORDER) {
740 		pr_err("mm.max_order(%d) != %d\n",
741 		       mm.max_order, I915_BUDDY_MAX_ORDER);
742 		err = -EINVAL;
743 		goto out_fini;
744 	}
745 
746 	block = i915_buddy_alloc(&mm, mm.max_order);
747 	if (IS_ERR(block)) {
748 		err = PTR_ERR(block);
749 		goto out_fini;
750 	}
751 
752 	if (i915_buddy_block_order(block) != mm.max_order) {
753 		pr_err("block order(%d) != %d\n",
754 		       i915_buddy_block_order(block), mm.max_order);
755 		err = -EINVAL;
756 		goto out_free;
757 	}
758 
759 	if (i915_buddy_block_size(&mm, block) !=
760 	    BIT_ULL(mm.max_order) * PAGE_SIZE) {
761 		pr_err("block size(%llu) != %llu\n",
762 		       i915_buddy_block_size(&mm, block),
763 		       BIT_ULL(mm.max_order) * PAGE_SIZE);
764 		err = -EINVAL;
765 		goto out_free;
766 	}
767 
768 out_free:
769 	i915_buddy_free(&mm, block);
770 out_fini:
771 	i915_buddy_fini(&mm);
772 	return err;
773 }
774 
i915_buddy_mock_selftests(void)775 int i915_buddy_mock_selftests(void)
776 {
777 	static const struct i915_subtest tests[] = {
778 		SUBTEST(igt_buddy_alloc_pessimistic),
779 		SUBTEST(igt_buddy_alloc_optimistic),
780 		SUBTEST(igt_buddy_alloc_pathological),
781 		SUBTEST(igt_buddy_alloc_smoke),
782 		SUBTEST(igt_buddy_alloc_range),
783 		SUBTEST(igt_buddy_alloc_limit),
784 	};
785 
786 	return i915_subtests(tests, NULL);
787 }
788