1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016 Intel Corporation
5 */
6
7 #include <linux/prime_numbers.h>
8
9 #include "gt/intel_engine_pm.h"
10 #include "gt/intel_gpu_commands.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_pm.h"
13 #include "gem/i915_gem_region.h"
14 #include "huge_gem_object.h"
15 #include "i915_selftest.h"
16 #include "selftests/i915_random.h"
17 #include "selftests/igt_flush_test.h"
18 #include "selftests/igt_mmap.h"
19
20 struct tile {
21 unsigned int width;
22 unsigned int height;
23 unsigned int stride;
24 unsigned int size;
25 unsigned int tiling;
26 unsigned int swizzle;
27 };
28
swizzle_bit(unsigned int bit,u64 offset)29 static u64 swizzle_bit(unsigned int bit, u64 offset)
30 {
31 return (offset & BIT_ULL(bit)) >> (bit - 6);
32 }
33
tiled_offset(const struct tile * tile,u64 v)34 static u64 tiled_offset(const struct tile *tile, u64 v)
35 {
36 u64 x, y;
37
38 if (tile->tiling == I915_TILING_NONE)
39 return v;
40
41 y = div64_u64_rem(v, tile->stride, &x);
42 v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
43
44 if (tile->tiling == I915_TILING_X) {
45 v += y * tile->width;
46 v += div64_u64_rem(x, tile->width, &x) << tile->size;
47 v += x;
48 } else if (tile->width == 128) {
49 const unsigned int ytile_span = 16;
50 const unsigned int ytile_height = 512;
51
52 v += y * ytile_span;
53 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
54 v += x;
55 } else {
56 const unsigned int ytile_span = 32;
57 const unsigned int ytile_height = 256;
58
59 v += y * ytile_span;
60 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
61 v += x;
62 }
63
64 switch (tile->swizzle) {
65 case I915_BIT_6_SWIZZLE_9:
66 v ^= swizzle_bit(9, v);
67 break;
68 case I915_BIT_6_SWIZZLE_9_10:
69 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
70 break;
71 case I915_BIT_6_SWIZZLE_9_11:
72 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
73 break;
74 case I915_BIT_6_SWIZZLE_9_10_11:
75 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
76 break;
77 }
78
79 return v;
80 }
81
check_partial_mapping(struct drm_i915_gem_object * obj,const struct tile * tile,struct rnd_state * prng)82 static int check_partial_mapping(struct drm_i915_gem_object *obj,
83 const struct tile *tile,
84 struct rnd_state *prng)
85 {
86 const unsigned long npages = obj->base.size / PAGE_SIZE;
87 struct i915_ggtt_view view;
88 struct i915_vma *vma;
89 unsigned long page;
90 u32 __iomem *io;
91 struct page *p;
92 unsigned int n;
93 u64 offset;
94 u32 *cpu;
95 int err;
96
97 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
98 if (err) {
99 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
100 tile->tiling, tile->stride, err);
101 return err;
102 }
103
104 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
105 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
106
107 i915_gem_object_lock(obj, NULL);
108 err = i915_gem_object_set_to_gtt_domain(obj, true);
109 i915_gem_object_unlock(obj);
110 if (err) {
111 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
112 return err;
113 }
114
115 page = i915_prandom_u32_max_state(npages, prng);
116 view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
117
118 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
119 if (IS_ERR(vma)) {
120 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
121 page, (int)PTR_ERR(vma));
122 return PTR_ERR(vma);
123 }
124
125 n = page - view.partial.offset;
126 GEM_BUG_ON(n >= view.partial.size);
127
128 io = i915_vma_pin_iomap(vma);
129 i915_vma_unpin(vma);
130 if (IS_ERR(io)) {
131 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
132 page, (int)PTR_ERR(io));
133 err = PTR_ERR(io);
134 goto out;
135 }
136
137 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
138 i915_vma_unpin_iomap(vma);
139
140 offset = tiled_offset(tile, page << PAGE_SHIFT);
141 if (offset >= obj->base.size)
142 goto out;
143
144 intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
145
146 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
147 cpu = kmap(p) + offset_in_page(offset);
148 drm_clflush_virt_range(cpu, sizeof(*cpu));
149 if (*cpu != (u32)page) {
150 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
151 page, n,
152 view.partial.offset,
153 view.partial.size,
154 vma->size >> PAGE_SHIFT,
155 tile->tiling ? tile_row_pages(obj) : 0,
156 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
157 offset >> PAGE_SHIFT,
158 (unsigned int)offset_in_page(offset),
159 offset,
160 (u32)page, *cpu);
161 err = -EINVAL;
162 }
163 *cpu = 0;
164 drm_clflush_virt_range(cpu, sizeof(*cpu));
165 kunmap(p);
166
167 out:
168 __i915_vma_put(vma);
169 return err;
170 }
171
check_partial_mappings(struct drm_i915_gem_object * obj,const struct tile * tile,unsigned long end_time)172 static int check_partial_mappings(struct drm_i915_gem_object *obj,
173 const struct tile *tile,
174 unsigned long end_time)
175 {
176 const unsigned int nreal = obj->scratch / PAGE_SIZE;
177 const unsigned long npages = obj->base.size / PAGE_SIZE;
178 struct i915_vma *vma;
179 unsigned long page;
180 int err;
181
182 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
183 if (err) {
184 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
185 tile->tiling, tile->stride, err);
186 return err;
187 }
188
189 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
190 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
191
192 i915_gem_object_lock(obj, NULL);
193 err = i915_gem_object_set_to_gtt_domain(obj, true);
194 i915_gem_object_unlock(obj);
195 if (err) {
196 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
197 return err;
198 }
199
200 for_each_prime_number_from(page, 1, npages) {
201 struct i915_ggtt_view view =
202 compute_partial_view(obj, page, MIN_CHUNK_PAGES);
203 u32 __iomem *io;
204 struct page *p;
205 unsigned int n;
206 u64 offset;
207 u32 *cpu;
208
209 GEM_BUG_ON(view.partial.size > nreal);
210 cond_resched();
211
212 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
213 if (IS_ERR(vma)) {
214 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
215 page, (int)PTR_ERR(vma));
216 return PTR_ERR(vma);
217 }
218
219 n = page - view.partial.offset;
220 GEM_BUG_ON(n >= view.partial.size);
221
222 io = i915_vma_pin_iomap(vma);
223 i915_vma_unpin(vma);
224 if (IS_ERR(io)) {
225 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
226 page, (int)PTR_ERR(io));
227 return PTR_ERR(io);
228 }
229
230 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
231 i915_vma_unpin_iomap(vma);
232
233 offset = tiled_offset(tile, page << PAGE_SHIFT);
234 if (offset >= obj->base.size)
235 continue;
236
237 intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
238
239 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
240 cpu = kmap(p) + offset_in_page(offset);
241 drm_clflush_virt_range(cpu, sizeof(*cpu));
242 if (*cpu != (u32)page) {
243 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
244 page, n,
245 view.partial.offset,
246 view.partial.size,
247 vma->size >> PAGE_SHIFT,
248 tile->tiling ? tile_row_pages(obj) : 0,
249 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
250 offset >> PAGE_SHIFT,
251 (unsigned int)offset_in_page(offset),
252 offset,
253 (u32)page, *cpu);
254 err = -EINVAL;
255 }
256 *cpu = 0;
257 drm_clflush_virt_range(cpu, sizeof(*cpu));
258 kunmap(p);
259 if (err)
260 return err;
261
262 __i915_vma_put(vma);
263
264 if (igt_timeout(end_time,
265 "%s: timed out after tiling=%d stride=%d\n",
266 __func__, tile->tiling, tile->stride))
267 return -EINTR;
268 }
269
270 return 0;
271 }
272
273 static unsigned int
setup_tile_size(struct tile * tile,struct drm_i915_private * i915)274 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
275 {
276 if (GRAPHICS_VER(i915) <= 2) {
277 tile->height = 16;
278 tile->width = 128;
279 tile->size = 11;
280 } else if (tile->tiling == I915_TILING_Y &&
281 HAS_128_BYTE_Y_TILING(i915)) {
282 tile->height = 32;
283 tile->width = 128;
284 tile->size = 12;
285 } else {
286 tile->height = 8;
287 tile->width = 512;
288 tile->size = 12;
289 }
290
291 if (GRAPHICS_VER(i915) < 4)
292 return 8192 / tile->width;
293 else if (GRAPHICS_VER(i915) < 7)
294 return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
295 else
296 return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
297 }
298
igt_partial_tiling(void * arg)299 static int igt_partial_tiling(void *arg)
300 {
301 const unsigned int nreal = 1 << 12; /* largest tile row x2 */
302 struct drm_i915_private *i915 = arg;
303 struct drm_i915_gem_object *obj;
304 intel_wakeref_t wakeref;
305 int tiling;
306 int err;
307
308 if (!i915_ggtt_has_aperture(&i915->ggtt))
309 return 0;
310
311 /* We want to check the page mapping and fencing of a large object
312 * mmapped through the GTT. The object we create is larger than can
313 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
314 * We then check that a write through each partial GGTT vma ends up
315 * in the right set of pages within the object, and with the expected
316 * tiling, which we verify by manual swizzling.
317 */
318
319 obj = huge_gem_object(i915,
320 nreal << PAGE_SHIFT,
321 (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
322 if (IS_ERR(obj))
323 return PTR_ERR(obj);
324
325 err = i915_gem_object_pin_pages_unlocked(obj);
326 if (err) {
327 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
328 nreal, obj->base.size / PAGE_SIZE, err);
329 goto out;
330 }
331
332 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
333
334 if (1) {
335 IGT_TIMEOUT(end);
336 struct tile tile;
337
338 tile.height = 1;
339 tile.width = 1;
340 tile.size = 0;
341 tile.stride = 0;
342 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
343 tile.tiling = I915_TILING_NONE;
344
345 err = check_partial_mappings(obj, &tile, end);
346 if (err && err != -EINTR)
347 goto out_unlock;
348 }
349
350 for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
351 IGT_TIMEOUT(end);
352 unsigned int max_pitch;
353 unsigned int pitch;
354 struct tile tile;
355
356 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
357 /*
358 * The swizzling pattern is actually unknown as it
359 * varies based on physical address of each page.
360 * See i915_gem_detect_bit_6_swizzle().
361 */
362 break;
363
364 tile.tiling = tiling;
365 switch (tiling) {
366 case I915_TILING_X:
367 tile.swizzle = i915->ggtt.bit_6_swizzle_x;
368 break;
369 case I915_TILING_Y:
370 tile.swizzle = i915->ggtt.bit_6_swizzle_y;
371 break;
372 }
373
374 GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
375 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
376 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
377 continue;
378
379 max_pitch = setup_tile_size(&tile, i915);
380
381 for (pitch = max_pitch; pitch; pitch >>= 1) {
382 tile.stride = tile.width * pitch;
383 err = check_partial_mappings(obj, &tile, end);
384 if (err == -EINTR)
385 goto next_tiling;
386 if (err)
387 goto out_unlock;
388
389 if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
390 tile.stride = tile.width * (pitch - 1);
391 err = check_partial_mappings(obj, &tile, end);
392 if (err == -EINTR)
393 goto next_tiling;
394 if (err)
395 goto out_unlock;
396 }
397
398 if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
399 tile.stride = tile.width * (pitch + 1);
400 err = check_partial_mappings(obj, &tile, end);
401 if (err == -EINTR)
402 goto next_tiling;
403 if (err)
404 goto out_unlock;
405 }
406 }
407
408 if (GRAPHICS_VER(i915) >= 4) {
409 for_each_prime_number(pitch, max_pitch) {
410 tile.stride = tile.width * pitch;
411 err = check_partial_mappings(obj, &tile, end);
412 if (err == -EINTR)
413 goto next_tiling;
414 if (err)
415 goto out_unlock;
416 }
417 }
418
419 next_tiling: ;
420 }
421
422 out_unlock:
423 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
424 i915_gem_object_unpin_pages(obj);
425 out:
426 i915_gem_object_put(obj);
427 return err;
428 }
429
igt_smoke_tiling(void * arg)430 static int igt_smoke_tiling(void *arg)
431 {
432 const unsigned int nreal = 1 << 12; /* largest tile row x2 */
433 struct drm_i915_private *i915 = arg;
434 struct drm_i915_gem_object *obj;
435 intel_wakeref_t wakeref;
436 I915_RND_STATE(prng);
437 unsigned long count;
438 IGT_TIMEOUT(end);
439 int err;
440
441 if (!i915_ggtt_has_aperture(&i915->ggtt))
442 return 0;
443
444 /*
445 * igt_partial_tiling() does an exhastive check of partial tiling
446 * chunking, but will undoubtably run out of time. Here, we do a
447 * randomised search and hope over many runs of 1s with different
448 * seeds we will do a thorough check.
449 *
450 * Remember to look at the st_seed if we see a flip-flop in BAT!
451 */
452
453 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
454 return 0;
455
456 obj = huge_gem_object(i915,
457 nreal << PAGE_SHIFT,
458 (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
459 if (IS_ERR(obj))
460 return PTR_ERR(obj);
461
462 err = i915_gem_object_pin_pages_unlocked(obj);
463 if (err) {
464 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
465 nreal, obj->base.size / PAGE_SIZE, err);
466 goto out;
467 }
468
469 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
470
471 count = 0;
472 do {
473 struct tile tile;
474
475 tile.tiling =
476 i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
477 switch (tile.tiling) {
478 case I915_TILING_NONE:
479 tile.height = 1;
480 tile.width = 1;
481 tile.size = 0;
482 tile.stride = 0;
483 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
484 break;
485
486 case I915_TILING_X:
487 tile.swizzle = i915->ggtt.bit_6_swizzle_x;
488 break;
489 case I915_TILING_Y:
490 tile.swizzle = i915->ggtt.bit_6_swizzle_y;
491 break;
492 }
493
494 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
495 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
496 continue;
497
498 if (tile.tiling != I915_TILING_NONE) {
499 unsigned int max_pitch = setup_tile_size(&tile, i915);
500
501 tile.stride =
502 i915_prandom_u32_max_state(max_pitch, &prng);
503 tile.stride = (1 + tile.stride) * tile.width;
504 if (GRAPHICS_VER(i915) < 4)
505 tile.stride = rounddown_pow_of_two(tile.stride);
506 }
507
508 err = check_partial_mapping(obj, &tile, &prng);
509 if (err)
510 break;
511
512 count++;
513 } while (!__igt_timeout(end, NULL));
514
515 pr_info("%s: Completed %lu trials\n", __func__, count);
516
517 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
518 i915_gem_object_unpin_pages(obj);
519 out:
520 i915_gem_object_put(obj);
521 return err;
522 }
523
make_obj_busy(struct drm_i915_gem_object * obj)524 static int make_obj_busy(struct drm_i915_gem_object *obj)
525 {
526 struct drm_i915_private *i915 = to_i915(obj->base.dev);
527 struct intel_engine_cs *engine;
528
529 for_each_uabi_engine(engine, i915) {
530 struct i915_request *rq;
531 struct i915_vma *vma;
532 struct i915_gem_ww_ctx ww;
533 int err;
534
535 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
536 if (IS_ERR(vma))
537 return PTR_ERR(vma);
538
539 i915_gem_ww_ctx_init(&ww, false);
540 retry:
541 err = i915_gem_object_lock(obj, &ww);
542 if (!err)
543 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
544 if (err)
545 goto err;
546
547 rq = intel_engine_create_kernel_request(engine);
548 if (IS_ERR(rq)) {
549 err = PTR_ERR(rq);
550 goto err_unpin;
551 }
552
553 err = i915_request_await_object(rq, vma->obj, true);
554 if (err == 0)
555 err = i915_vma_move_to_active(vma, rq,
556 EXEC_OBJECT_WRITE);
557
558 i915_request_add(rq);
559 err_unpin:
560 i915_vma_unpin(vma);
561 err:
562 if (err == -EDEADLK) {
563 err = i915_gem_ww_ctx_backoff(&ww);
564 if (!err)
565 goto retry;
566 }
567 i915_gem_ww_ctx_fini(&ww);
568 if (err)
569 return err;
570 }
571
572 i915_gem_object_put(obj); /* leave it only alive via its active ref */
573 return 0;
574 }
575
default_mapping(struct drm_i915_private * i915)576 static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
577 {
578 if (HAS_LMEM(i915))
579 return I915_MMAP_TYPE_FIXED;
580
581 return I915_MMAP_TYPE_GTT;
582 }
583
584 static struct drm_i915_gem_object *
create_sys_or_internal(struct drm_i915_private * i915,unsigned long size)585 create_sys_or_internal(struct drm_i915_private *i915,
586 unsigned long size)
587 {
588 if (HAS_LMEM(i915)) {
589 struct intel_memory_region *sys_region =
590 i915->mm.regions[INTEL_REGION_SMEM];
591
592 return __i915_gem_object_create_user(i915, size, &sys_region, 1);
593 }
594
595 return i915_gem_object_create_internal(i915, size);
596 }
597
assert_mmap_offset(struct drm_i915_private * i915,unsigned long size,int expected)598 static bool assert_mmap_offset(struct drm_i915_private *i915,
599 unsigned long size,
600 int expected)
601 {
602 struct drm_i915_gem_object *obj;
603 u64 offset;
604 int ret;
605
606 obj = create_sys_or_internal(i915, size);
607 if (IS_ERR(obj))
608 return expected && expected == PTR_ERR(obj);
609
610 ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
611 i915_gem_object_put(obj);
612
613 return ret == expected;
614 }
615
disable_retire_worker(struct drm_i915_private * i915)616 static void disable_retire_worker(struct drm_i915_private *i915)
617 {
618 i915_gem_driver_unregister__shrinker(i915);
619 intel_gt_pm_get(&i915->gt);
620 cancel_delayed_work_sync(&i915->gt.requests.retire_work);
621 }
622
restore_retire_worker(struct drm_i915_private * i915)623 static void restore_retire_worker(struct drm_i915_private *i915)
624 {
625 igt_flush_test(i915);
626 intel_gt_pm_put(&i915->gt);
627 i915_gem_driver_register__shrinker(i915);
628 }
629
mmap_offset_lock(struct drm_i915_private * i915)630 static void mmap_offset_lock(struct drm_i915_private *i915)
631 __acquires(&i915->drm.vma_offset_manager->vm_lock)
632 {
633 write_lock(&i915->drm.vma_offset_manager->vm_lock);
634 }
635
mmap_offset_unlock(struct drm_i915_private * i915)636 static void mmap_offset_unlock(struct drm_i915_private *i915)
637 __releases(&i915->drm.vma_offset_manager->vm_lock)
638 {
639 write_unlock(&i915->drm.vma_offset_manager->vm_lock);
640 }
641
igt_mmap_offset_exhaustion(void * arg)642 static int igt_mmap_offset_exhaustion(void *arg)
643 {
644 struct drm_i915_private *i915 = arg;
645 struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
646 struct drm_i915_gem_object *obj;
647 struct drm_mm_node *hole, *next;
648 int loop, err = 0;
649 u64 offset;
650 int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
651
652 /* Disable background reaper */
653 disable_retire_worker(i915);
654 GEM_BUG_ON(!i915->gt.awake);
655 intel_gt_retire_requests(&i915->gt);
656 i915_gem_drain_freed_objects(i915);
657
658 /* Trim the device mmap space to only a page */
659 mmap_offset_lock(i915);
660 loop = 1; /* PAGE_SIZE units */
661 list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
662 struct drm_mm_node *resv;
663
664 resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
665 if (!resv) {
666 err = -ENOMEM;
667 goto out_park;
668 }
669
670 resv->start = drm_mm_hole_node_start(hole) + loop;
671 resv->size = hole->hole_size - loop;
672 resv->color = -1ul;
673 loop = 0;
674
675 if (!resv->size) {
676 kfree(resv);
677 continue;
678 }
679
680 pr_debug("Reserving hole [%llx + %llx]\n",
681 resv->start, resv->size);
682
683 err = drm_mm_reserve_node(mm, resv);
684 if (err) {
685 pr_err("Failed to trim VMA manager, err=%d\n", err);
686 kfree(resv);
687 goto out_park;
688 }
689 }
690 GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
691 mmap_offset_unlock(i915);
692
693 /* Just fits! */
694 if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
695 pr_err("Unable to insert object into single page hole\n");
696 err = -EINVAL;
697 goto out;
698 }
699
700 /* Too large */
701 if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
702 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
703 err = -EINVAL;
704 goto out;
705 }
706
707 /* Fill the hole, further allocation attempts should then fail */
708 obj = create_sys_or_internal(i915, PAGE_SIZE);
709 if (IS_ERR(obj)) {
710 err = PTR_ERR(obj);
711 pr_err("Unable to create object for reclaimed hole\n");
712 goto out;
713 }
714
715 err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
716 if (err) {
717 pr_err("Unable to insert object into reclaimed hole\n");
718 goto err_obj;
719 }
720
721 if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
722 pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
723 err = -EINVAL;
724 goto err_obj;
725 }
726
727 i915_gem_object_put(obj);
728
729 /* Now fill with busy dead objects that we expect to reap */
730 for (loop = 0; loop < 3; loop++) {
731 if (intel_gt_is_wedged(&i915->gt))
732 break;
733
734 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
735 if (IS_ERR(obj)) {
736 err = PTR_ERR(obj);
737 goto out;
738 }
739
740 err = make_obj_busy(obj);
741 if (err) {
742 pr_err("[loop %d] Failed to busy the object\n", loop);
743 goto err_obj;
744 }
745 }
746
747 out:
748 mmap_offset_lock(i915);
749 out_park:
750 drm_mm_for_each_node_safe(hole, next, mm) {
751 if (hole->color != -1ul)
752 continue;
753
754 drm_mm_remove_node(hole);
755 kfree(hole);
756 }
757 mmap_offset_unlock(i915);
758 restore_retire_worker(i915);
759 return err;
760 err_obj:
761 i915_gem_object_put(obj);
762 goto out;
763 }
764
gtt_set(struct drm_i915_gem_object * obj)765 static int gtt_set(struct drm_i915_gem_object *obj)
766 {
767 struct i915_vma *vma;
768 void __iomem *map;
769 int err = 0;
770
771 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
772 if (IS_ERR(vma))
773 return PTR_ERR(vma);
774
775 intel_gt_pm_get(vma->vm->gt);
776 map = i915_vma_pin_iomap(vma);
777 i915_vma_unpin(vma);
778 if (IS_ERR(map)) {
779 err = PTR_ERR(map);
780 goto out;
781 }
782
783 memset_io(map, POISON_INUSE, obj->base.size);
784 i915_vma_unpin_iomap(vma);
785
786 out:
787 intel_gt_pm_put(vma->vm->gt);
788 return err;
789 }
790
gtt_check(struct drm_i915_gem_object * obj)791 static int gtt_check(struct drm_i915_gem_object *obj)
792 {
793 struct i915_vma *vma;
794 void __iomem *map;
795 int err = 0;
796
797 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
798 if (IS_ERR(vma))
799 return PTR_ERR(vma);
800
801 intel_gt_pm_get(vma->vm->gt);
802 map = i915_vma_pin_iomap(vma);
803 i915_vma_unpin(vma);
804 if (IS_ERR(map)) {
805 err = PTR_ERR(map);
806 goto out;
807 }
808
809 if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
810 pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
811 obj->mm.region->name);
812 err = -EINVAL;
813 }
814 i915_vma_unpin_iomap(vma);
815
816 out:
817 intel_gt_pm_put(vma->vm->gt);
818 return err;
819 }
820
wc_set(struct drm_i915_gem_object * obj)821 static int wc_set(struct drm_i915_gem_object *obj)
822 {
823 void *vaddr;
824
825 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
826 if (IS_ERR(vaddr))
827 return PTR_ERR(vaddr);
828
829 memset(vaddr, POISON_INUSE, obj->base.size);
830 i915_gem_object_flush_map(obj);
831 i915_gem_object_unpin_map(obj);
832
833 return 0;
834 }
835
wc_check(struct drm_i915_gem_object * obj)836 static int wc_check(struct drm_i915_gem_object *obj)
837 {
838 void *vaddr;
839 int err = 0;
840
841 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
842 if (IS_ERR(vaddr))
843 return PTR_ERR(vaddr);
844
845 if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
846 pr_err("%s: Write via mmap did not land in backing store (WC)\n",
847 obj->mm.region->name);
848 err = -EINVAL;
849 }
850 i915_gem_object_unpin_map(obj);
851
852 return err;
853 }
854
can_mmap(struct drm_i915_gem_object * obj,enum i915_mmap_type type)855 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
856 {
857 bool no_map;
858
859 if (obj->ops->mmap_offset)
860 return type == I915_MMAP_TYPE_FIXED;
861 else if (type == I915_MMAP_TYPE_FIXED)
862 return false;
863
864 if (type == I915_MMAP_TYPE_GTT &&
865 !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
866 return false;
867
868 i915_gem_object_lock(obj, NULL);
869 no_map = (type != I915_MMAP_TYPE_GTT &&
870 !i915_gem_object_has_struct_page(obj) &&
871 !i915_gem_object_has_iomem(obj));
872 i915_gem_object_unlock(obj);
873
874 return !no_map;
875 }
876
877 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
__igt_mmap(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)878 static int __igt_mmap(struct drm_i915_private *i915,
879 struct drm_i915_gem_object *obj,
880 enum i915_mmap_type type)
881 {
882 struct vm_area_struct *area;
883 unsigned long addr;
884 int err, i;
885 u64 offset;
886
887 if (!can_mmap(obj, type))
888 return 0;
889
890 err = wc_set(obj);
891 if (err == -ENXIO)
892 err = gtt_set(obj);
893 if (err)
894 return err;
895
896 err = __assign_mmap_offset(obj, type, &offset, NULL);
897 if (err)
898 return err;
899
900 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
901 if (IS_ERR_VALUE(addr))
902 return addr;
903
904 pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
905
906 mmap_read_lock(current->mm);
907 area = vma_lookup(current->mm, addr);
908 mmap_read_unlock(current->mm);
909 if (!area) {
910 pr_err("%s: Did not create a vm_area_struct for the mmap\n",
911 obj->mm.region->name);
912 err = -EINVAL;
913 goto out_unmap;
914 }
915
916 for (i = 0; i < obj->base.size / sizeof(u32); i++) {
917 u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
918 u32 x;
919
920 if (get_user(x, ux)) {
921 pr_err("%s: Unable to read from mmap, offset:%zd\n",
922 obj->mm.region->name, i * sizeof(x));
923 err = -EFAULT;
924 goto out_unmap;
925 }
926
927 if (x != expand32(POISON_INUSE)) {
928 pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
929 obj->mm.region->name,
930 i * sizeof(x), x, expand32(POISON_INUSE));
931 err = -EINVAL;
932 goto out_unmap;
933 }
934
935 x = expand32(POISON_FREE);
936 if (put_user(x, ux)) {
937 pr_err("%s: Unable to write to mmap, offset:%zd\n",
938 obj->mm.region->name, i * sizeof(x));
939 err = -EFAULT;
940 goto out_unmap;
941 }
942 }
943
944 if (type == I915_MMAP_TYPE_GTT)
945 intel_gt_flush_ggtt_writes(&i915->gt);
946
947 err = wc_check(obj);
948 if (err == -ENXIO)
949 err = gtt_check(obj);
950 out_unmap:
951 vm_munmap(addr, obj->base.size);
952 return err;
953 }
954
igt_mmap(void * arg)955 static int igt_mmap(void *arg)
956 {
957 struct drm_i915_private *i915 = arg;
958 struct intel_memory_region *mr;
959 enum intel_region_id id;
960
961 for_each_memory_region(mr, i915, id) {
962 unsigned long sizes[] = {
963 PAGE_SIZE,
964 mr->min_page_size,
965 SZ_4M,
966 };
967 int i;
968
969 for (i = 0; i < ARRAY_SIZE(sizes); i++) {
970 struct drm_i915_gem_object *obj;
971 int err;
972
973 obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1);
974 if (obj == ERR_PTR(-ENODEV))
975 continue;
976
977 if (IS_ERR(obj))
978 return PTR_ERR(obj);
979
980 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
981 if (err == 0)
982 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
983 if (err == 0)
984 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED);
985
986 i915_gem_object_put(obj);
987 if (err)
988 return err;
989 }
990 }
991
992 return 0;
993 }
994
repr_mmap_type(enum i915_mmap_type type)995 static const char *repr_mmap_type(enum i915_mmap_type type)
996 {
997 switch (type) {
998 case I915_MMAP_TYPE_GTT: return "gtt";
999 case I915_MMAP_TYPE_WB: return "wb";
1000 case I915_MMAP_TYPE_WC: return "wc";
1001 case I915_MMAP_TYPE_UC: return "uc";
1002 case I915_MMAP_TYPE_FIXED: return "fixed";
1003 default: return "unknown";
1004 }
1005 }
1006
can_access(struct drm_i915_gem_object * obj)1007 static bool can_access(struct drm_i915_gem_object *obj)
1008 {
1009 bool access;
1010
1011 i915_gem_object_lock(obj, NULL);
1012 access = i915_gem_object_has_struct_page(obj) ||
1013 i915_gem_object_has_iomem(obj);
1014 i915_gem_object_unlock(obj);
1015
1016 return access;
1017 }
1018
__igt_mmap_access(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)1019 static int __igt_mmap_access(struct drm_i915_private *i915,
1020 struct drm_i915_gem_object *obj,
1021 enum i915_mmap_type type)
1022 {
1023 unsigned long __user *ptr;
1024 unsigned long A, B;
1025 unsigned long x, y;
1026 unsigned long addr;
1027 int err;
1028 u64 offset;
1029
1030 memset(&A, 0xAA, sizeof(A));
1031 memset(&B, 0xBB, sizeof(B));
1032
1033 if (!can_mmap(obj, type) || !can_access(obj))
1034 return 0;
1035
1036 err = __assign_mmap_offset(obj, type, &offset, NULL);
1037 if (err)
1038 return err;
1039
1040 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1041 if (IS_ERR_VALUE(addr))
1042 return addr;
1043 ptr = (unsigned long __user *)addr;
1044
1045 err = __put_user(A, ptr);
1046 if (err) {
1047 pr_err("%s(%s): failed to write into user mmap\n",
1048 obj->mm.region->name, repr_mmap_type(type));
1049 goto out_unmap;
1050 }
1051
1052 intel_gt_flush_ggtt_writes(&i915->gt);
1053
1054 err = access_process_vm(current, addr, &x, sizeof(x), 0);
1055 if (err != sizeof(x)) {
1056 pr_err("%s(%s): access_process_vm() read failed\n",
1057 obj->mm.region->name, repr_mmap_type(type));
1058 goto out_unmap;
1059 }
1060
1061 err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
1062 if (err != sizeof(B)) {
1063 pr_err("%s(%s): access_process_vm() write failed\n",
1064 obj->mm.region->name, repr_mmap_type(type));
1065 goto out_unmap;
1066 }
1067
1068 intel_gt_flush_ggtt_writes(&i915->gt);
1069
1070 err = __get_user(y, ptr);
1071 if (err) {
1072 pr_err("%s(%s): failed to read from user mmap\n",
1073 obj->mm.region->name, repr_mmap_type(type));
1074 goto out_unmap;
1075 }
1076
1077 if (x != A || y != B) {
1078 pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
1079 obj->mm.region->name, repr_mmap_type(type),
1080 x, y);
1081 err = -EINVAL;
1082 goto out_unmap;
1083 }
1084
1085 out_unmap:
1086 vm_munmap(addr, obj->base.size);
1087 return err;
1088 }
1089
igt_mmap_access(void * arg)1090 static int igt_mmap_access(void *arg)
1091 {
1092 struct drm_i915_private *i915 = arg;
1093 struct intel_memory_region *mr;
1094 enum intel_region_id id;
1095
1096 for_each_memory_region(mr, i915, id) {
1097 struct drm_i915_gem_object *obj;
1098 int err;
1099
1100 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1101 if (obj == ERR_PTR(-ENODEV))
1102 continue;
1103
1104 if (IS_ERR(obj))
1105 return PTR_ERR(obj);
1106
1107 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
1108 if (err == 0)
1109 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
1110 if (err == 0)
1111 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
1112 if (err == 0)
1113 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
1114 if (err == 0)
1115 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED);
1116
1117 i915_gem_object_put(obj);
1118 if (err)
1119 return err;
1120 }
1121
1122 return 0;
1123 }
1124
__igt_mmap_gpu(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)1125 static int __igt_mmap_gpu(struct drm_i915_private *i915,
1126 struct drm_i915_gem_object *obj,
1127 enum i915_mmap_type type)
1128 {
1129 struct intel_engine_cs *engine;
1130 unsigned long addr;
1131 u32 __user *ux;
1132 u32 bbe;
1133 int err;
1134 u64 offset;
1135
1136 /*
1137 * Verify that the mmap access into the backing store aligns with
1138 * that of the GPU, i.e. that mmap is indeed writing into the same
1139 * page as being read by the GPU.
1140 */
1141
1142 if (!can_mmap(obj, type))
1143 return 0;
1144
1145 err = wc_set(obj);
1146 if (err == -ENXIO)
1147 err = gtt_set(obj);
1148 if (err)
1149 return err;
1150
1151 err = __assign_mmap_offset(obj, type, &offset, NULL);
1152 if (err)
1153 return err;
1154
1155 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1156 if (IS_ERR_VALUE(addr))
1157 return addr;
1158
1159 ux = u64_to_user_ptr((u64)addr);
1160 bbe = MI_BATCH_BUFFER_END;
1161 if (put_user(bbe, ux)) {
1162 pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
1163 err = -EFAULT;
1164 goto out_unmap;
1165 }
1166
1167 if (type == I915_MMAP_TYPE_GTT)
1168 intel_gt_flush_ggtt_writes(&i915->gt);
1169
1170 for_each_uabi_engine(engine, i915) {
1171 struct i915_request *rq;
1172 struct i915_vma *vma;
1173 struct i915_gem_ww_ctx ww;
1174
1175 vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1176 if (IS_ERR(vma)) {
1177 err = PTR_ERR(vma);
1178 goto out_unmap;
1179 }
1180
1181 i915_gem_ww_ctx_init(&ww, false);
1182 retry:
1183 err = i915_gem_object_lock(obj, &ww);
1184 if (!err)
1185 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
1186 if (err)
1187 goto out_ww;
1188
1189 rq = i915_request_create(engine->kernel_context);
1190 if (IS_ERR(rq)) {
1191 err = PTR_ERR(rq);
1192 goto out_unpin;
1193 }
1194
1195 err = i915_request_await_object(rq, vma->obj, false);
1196 if (err == 0)
1197 err = i915_vma_move_to_active(vma, rq, 0);
1198
1199 err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
1200 i915_request_get(rq);
1201 i915_request_add(rq);
1202
1203 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1204 struct drm_printer p =
1205 drm_info_printer(engine->i915->drm.dev);
1206
1207 pr_err("%s(%s, %s): Failed to execute batch\n",
1208 __func__, engine->name, obj->mm.region->name);
1209 intel_engine_dump(engine, &p,
1210 "%s\n", engine->name);
1211
1212 intel_gt_set_wedged(engine->gt);
1213 err = -EIO;
1214 }
1215 i915_request_put(rq);
1216
1217 out_unpin:
1218 i915_vma_unpin(vma);
1219 out_ww:
1220 if (err == -EDEADLK) {
1221 err = i915_gem_ww_ctx_backoff(&ww);
1222 if (!err)
1223 goto retry;
1224 }
1225 i915_gem_ww_ctx_fini(&ww);
1226 if (err)
1227 goto out_unmap;
1228 }
1229
1230 out_unmap:
1231 vm_munmap(addr, obj->base.size);
1232 return err;
1233 }
1234
igt_mmap_gpu(void * arg)1235 static int igt_mmap_gpu(void *arg)
1236 {
1237 struct drm_i915_private *i915 = arg;
1238 struct intel_memory_region *mr;
1239 enum intel_region_id id;
1240
1241 for_each_memory_region(mr, i915, id) {
1242 struct drm_i915_gem_object *obj;
1243 int err;
1244
1245 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1246 if (obj == ERR_PTR(-ENODEV))
1247 continue;
1248
1249 if (IS_ERR(obj))
1250 return PTR_ERR(obj);
1251
1252 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1253 if (err == 0)
1254 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1255 if (err == 0)
1256 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED);
1257
1258 i915_gem_object_put(obj);
1259 if (err)
1260 return err;
1261 }
1262
1263 return 0;
1264 }
1265
check_present_pte(pte_t * pte,unsigned long addr,void * data)1266 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1267 {
1268 if (!pte_present(*pte) || pte_none(*pte)) {
1269 pr_err("missing PTE:%lx\n",
1270 (addr - (unsigned long)data) >> PAGE_SHIFT);
1271 return -EINVAL;
1272 }
1273
1274 return 0;
1275 }
1276
check_absent_pte(pte_t * pte,unsigned long addr,void * data)1277 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1278 {
1279 if (pte_present(*pte) && !pte_none(*pte)) {
1280 pr_err("present PTE:%lx; expected to be revoked\n",
1281 (addr - (unsigned long)data) >> PAGE_SHIFT);
1282 return -EINVAL;
1283 }
1284
1285 return 0;
1286 }
1287
check_present(unsigned long addr,unsigned long len)1288 static int check_present(unsigned long addr, unsigned long len)
1289 {
1290 return apply_to_page_range(current->mm, addr, len,
1291 check_present_pte, (void *)addr);
1292 }
1293
check_absent(unsigned long addr,unsigned long len)1294 static int check_absent(unsigned long addr, unsigned long len)
1295 {
1296 return apply_to_page_range(current->mm, addr, len,
1297 check_absent_pte, (void *)addr);
1298 }
1299
prefault_range(u64 start,u64 len)1300 static int prefault_range(u64 start, u64 len)
1301 {
1302 const char __user *addr, *end;
1303 char __maybe_unused c;
1304 int err;
1305
1306 addr = u64_to_user_ptr(start);
1307 end = addr + len;
1308
1309 for (; addr < end; addr += PAGE_SIZE) {
1310 err = __get_user(c, addr);
1311 if (err)
1312 return err;
1313 }
1314
1315 return __get_user(c, end - 1);
1316 }
1317
__igt_mmap_revoke(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)1318 static int __igt_mmap_revoke(struct drm_i915_private *i915,
1319 struct drm_i915_gem_object *obj,
1320 enum i915_mmap_type type)
1321 {
1322 unsigned long addr;
1323 int err;
1324 u64 offset;
1325
1326 if (!can_mmap(obj, type))
1327 return 0;
1328
1329 err = __assign_mmap_offset(obj, type, &offset, NULL);
1330 if (err)
1331 return err;
1332
1333 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1334 if (IS_ERR_VALUE(addr))
1335 return addr;
1336
1337 err = prefault_range(addr, obj->base.size);
1338 if (err)
1339 goto out_unmap;
1340
1341 err = check_present(addr, obj->base.size);
1342 if (err) {
1343 pr_err("%s: was not present\n", obj->mm.region->name);
1344 goto out_unmap;
1345 }
1346
1347 /*
1348 * After unbinding the object from the GGTT, its address may be reused
1349 * for other objects. Ergo we have to revoke the previous mmap PTE
1350 * access as it no longer points to the same object.
1351 */
1352 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
1353 if (err) {
1354 pr_err("Failed to unbind object!\n");
1355 goto out_unmap;
1356 }
1357
1358 if (type != I915_MMAP_TYPE_GTT) {
1359 i915_gem_object_lock(obj, NULL);
1360 __i915_gem_object_put_pages(obj);
1361 i915_gem_object_unlock(obj);
1362 if (i915_gem_object_has_pages(obj)) {
1363 pr_err("Failed to put-pages object!\n");
1364 err = -EINVAL;
1365 goto out_unmap;
1366 }
1367 }
1368
1369 if (!obj->ops->mmap_ops) {
1370 err = check_absent(addr, obj->base.size);
1371 if (err) {
1372 pr_err("%s: was not absent\n", obj->mm.region->name);
1373 goto out_unmap;
1374 }
1375 } else {
1376 /* ttm allows access to evicted regions by design */
1377
1378 err = check_present(addr, obj->base.size);
1379 if (err) {
1380 pr_err("%s: was not present\n", obj->mm.region->name);
1381 goto out_unmap;
1382 }
1383 }
1384
1385 out_unmap:
1386 vm_munmap(addr, obj->base.size);
1387 return err;
1388 }
1389
igt_mmap_revoke(void * arg)1390 static int igt_mmap_revoke(void *arg)
1391 {
1392 struct drm_i915_private *i915 = arg;
1393 struct intel_memory_region *mr;
1394 enum intel_region_id id;
1395
1396 for_each_memory_region(mr, i915, id) {
1397 struct drm_i915_gem_object *obj;
1398 int err;
1399
1400 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1401 if (obj == ERR_PTR(-ENODEV))
1402 continue;
1403
1404 if (IS_ERR(obj))
1405 return PTR_ERR(obj);
1406
1407 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1408 if (err == 0)
1409 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1410 if (err == 0)
1411 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED);
1412
1413 i915_gem_object_put(obj);
1414 if (err)
1415 return err;
1416 }
1417
1418 return 0;
1419 }
1420
i915_gem_mman_live_selftests(struct drm_i915_private * i915)1421 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1422 {
1423 static const struct i915_subtest tests[] = {
1424 SUBTEST(igt_partial_tiling),
1425 SUBTEST(igt_smoke_tiling),
1426 SUBTEST(igt_mmap_offset_exhaustion),
1427 SUBTEST(igt_mmap),
1428 SUBTEST(igt_mmap_access),
1429 SUBTEST(igt_mmap_revoke),
1430 SUBTEST(igt_mmap_gpu),
1431 };
1432
1433 return i915_subtests(tests, i915);
1434 }
1435