1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2011-2012 Intel Corporation
5 */
6
7 /*
8 * This file implements HW context support. On gen5+ a HW context consists of an
9 * opaque GPU object which is referenced at times of context saves and restores.
10 * With RC6 enabled, the context is also referenced as the GPU enters and exists
11 * from RC6 (GPU has it's own internal power context, except on gen5). Though
12 * something like a context does exist for the media ring, the code only
13 * supports contexts for the render ring.
14 *
15 * In software, there is a distinction between contexts created by the user,
16 * and the default HW context. The default HW context is used by GPU clients
17 * that do not request setup of their own hardware context. The default
18 * context's state is never restored to help prevent programming errors. This
19 * would happen if a client ran and piggy-backed off another clients GPU state.
20 * The default context only exists to give the GPU some offset to load as the
21 * current to invoke a save of the context we actually care about. In fact, the
22 * code could likely be constructed, albeit in a more complicated fashion, to
23 * never use the default context, though that limits the driver's ability to
24 * swap out, and/or destroy other contexts.
25 *
26 * All other contexts are created as a request by the GPU client. These contexts
27 * store GPU state, and thus allow GPU clients to not re-emit state (and
28 * potentially query certain state) at any time. The kernel driver makes
29 * certain that the appropriate commands are inserted.
30 *
31 * The context life cycle is semi-complicated in that context BOs may live
32 * longer than the context itself because of the way the hardware, and object
33 * tracking works. Below is a very crude representation of the state machine
34 * describing the context life.
35 * refcount pincount active
36 * S0: initial state 0 0 0
37 * S1: context created 1 0 0
38 * S2: context is currently running 2 1 X
39 * S3: GPU referenced, but not current 2 0 1
40 * S4: context is current, but destroyed 1 1 0
41 * S5: like S3, but destroyed 1 0 1
42 *
43 * The most common (but not all) transitions:
44 * S0->S1: client creates a context
45 * S1->S2: client submits execbuf with context
46 * S2->S3: other clients submits execbuf with context
47 * S3->S1: context object was retired
48 * S3->S2: clients submits another execbuf
49 * S2->S4: context destroy called with current context
50 * S3->S5->S0: destroy path
51 * S4->S5->S0: destroy path on current context
52 *
53 * There are two confusing terms used above:
54 * The "current context" means the context which is currently running on the
55 * GPU. The GPU has loaded its state already and has stored away the gtt
56 * offset of the BO. The GPU is not actively referencing the data at this
57 * offset, but it will on the next context switch. The only way to avoid this
58 * is to do a GPU reset.
59 *
60 * An "active context' is one which was previously the "current context" and is
61 * on the active list waiting for the next context switch to occur. Until this
62 * happens, the object must remain at the same gtt offset. It is therefore
63 * possible to destroy a context, but it is still active.
64 *
65 */
66
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
69
70 #include <drm/drm_syncobj.h>
71
72 #include "gt/gen6_ppgtt.h"
73 #include "gt/intel_context.h"
74 #include "gt/intel_context_param.h"
75 #include "gt/intel_engine_heartbeat.h"
76 #include "gt/intel_engine_user.h"
77 #include "gt/intel_gpu_commands.h"
78 #include "gt/intel_ring.h"
79
80 #include "pxp/intel_pxp.h"
81
82 #include "i915_gem_context.h"
83 #include "i915_trace.h"
84 #include "i915_user_extensions.h"
85
86 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
87
88 static struct kmem_cache *slab_luts;
89
i915_lut_handle_alloc(void)90 struct i915_lut_handle *i915_lut_handle_alloc(void)
91 {
92 return kmem_cache_alloc(slab_luts, GFP_KERNEL);
93 }
94
i915_lut_handle_free(struct i915_lut_handle * lut)95 void i915_lut_handle_free(struct i915_lut_handle *lut)
96 {
97 return kmem_cache_free(slab_luts, lut);
98 }
99
lut_close(struct i915_gem_context * ctx)100 static void lut_close(struct i915_gem_context *ctx)
101 {
102 struct radix_tree_iter iter;
103 void __rcu **slot;
104
105 mutex_lock(&ctx->lut_mutex);
106 rcu_read_lock();
107 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
108 struct i915_vma *vma = rcu_dereference_raw(*slot);
109 struct drm_i915_gem_object *obj = vma->obj;
110 struct i915_lut_handle *lut;
111
112 if (!kref_get_unless_zero(&obj->base.refcount))
113 continue;
114
115 spin_lock(&obj->lut_lock);
116 list_for_each_entry(lut, &obj->lut_list, obj_link) {
117 if (lut->ctx != ctx)
118 continue;
119
120 if (lut->handle != iter.index)
121 continue;
122
123 list_del(&lut->obj_link);
124 break;
125 }
126 spin_unlock(&obj->lut_lock);
127
128 if (&lut->obj_link != &obj->lut_list) {
129 i915_lut_handle_free(lut);
130 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
131 i915_vma_close(vma);
132 i915_gem_object_put(obj);
133 }
134
135 i915_gem_object_put(obj);
136 }
137 rcu_read_unlock();
138 mutex_unlock(&ctx->lut_mutex);
139 }
140
141 static struct intel_context *
lookup_user_engine(struct i915_gem_context * ctx,unsigned long flags,const struct i915_engine_class_instance * ci)142 lookup_user_engine(struct i915_gem_context *ctx,
143 unsigned long flags,
144 const struct i915_engine_class_instance *ci)
145 #define LOOKUP_USER_INDEX BIT(0)
146 {
147 int idx;
148
149 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
150 return ERR_PTR(-EINVAL);
151
152 if (!i915_gem_context_user_engines(ctx)) {
153 struct intel_engine_cs *engine;
154
155 engine = intel_engine_lookup_user(ctx->i915,
156 ci->engine_class,
157 ci->engine_instance);
158 if (!engine)
159 return ERR_PTR(-EINVAL);
160
161 idx = engine->legacy_idx;
162 } else {
163 idx = ci->engine_instance;
164 }
165
166 return i915_gem_context_get_engine(ctx, idx);
167 }
168
validate_priority(struct drm_i915_private * i915,const struct drm_i915_gem_context_param * args)169 static int validate_priority(struct drm_i915_private *i915,
170 const struct drm_i915_gem_context_param *args)
171 {
172 s64 priority = args->value;
173
174 if (args->size)
175 return -EINVAL;
176
177 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
178 return -ENODEV;
179
180 if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
181 priority < I915_CONTEXT_MIN_USER_PRIORITY)
182 return -EINVAL;
183
184 if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
185 !capable(CAP_SYS_NICE))
186 return -EPERM;
187
188 return 0;
189 }
190
proto_context_close(struct drm_i915_private * i915,struct i915_gem_proto_context * pc)191 static void proto_context_close(struct drm_i915_private *i915,
192 struct i915_gem_proto_context *pc)
193 {
194 int i;
195
196 if (pc->pxp_wakeref)
197 intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref);
198 if (pc->vm)
199 i915_vm_put(pc->vm);
200 if (pc->user_engines) {
201 for (i = 0; i < pc->num_user_engines; i++)
202 kfree(pc->user_engines[i].siblings);
203 kfree(pc->user_engines);
204 }
205 kfree(pc);
206 }
207
proto_context_set_persistence(struct drm_i915_private * i915,struct i915_gem_proto_context * pc,bool persist)208 static int proto_context_set_persistence(struct drm_i915_private *i915,
209 struct i915_gem_proto_context *pc,
210 bool persist)
211 {
212 if (persist) {
213 /*
214 * Only contexts that are short-lived [that will expire or be
215 * reset] are allowed to survive past termination. We require
216 * hangcheck to ensure that the persistent requests are healthy.
217 */
218 if (!i915->params.enable_hangcheck)
219 return -EINVAL;
220
221 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
222 } else {
223 /* To cancel a context we use "preempt-to-idle" */
224 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
225 return -ENODEV;
226
227 /*
228 * If the cancel fails, we then need to reset, cleanly!
229 *
230 * If the per-engine reset fails, all hope is lost! We resort
231 * to a full GPU reset in that unlikely case, but realistically
232 * if the engine could not reset, the full reset does not fare
233 * much better. The damage has been done.
234 *
235 * However, if we cannot reset an engine by itself, we cannot
236 * cleanup a hanging persistent context without causing
237 * colateral damage, and we should not pretend we can by
238 * exposing the interface.
239 */
240 if (!intel_has_reset_engine(&i915->gt))
241 return -ENODEV;
242
243 pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
244 }
245
246 return 0;
247 }
248
proto_context_set_protected(struct drm_i915_private * i915,struct i915_gem_proto_context * pc,bool protected)249 static int proto_context_set_protected(struct drm_i915_private *i915,
250 struct i915_gem_proto_context *pc,
251 bool protected)
252 {
253 int ret = 0;
254
255 if (!protected) {
256 pc->uses_protected_content = false;
257 } else if (!intel_pxp_is_enabled(&i915->gt.pxp)) {
258 ret = -ENODEV;
259 } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
260 !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
261 ret = -EPERM;
262 } else {
263 pc->uses_protected_content = true;
264
265 /*
266 * protected context usage requires the PXP session to be up,
267 * which in turn requires the device to be active.
268 */
269 pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
270
271 if (!intel_pxp_is_active(&i915->gt.pxp))
272 ret = intel_pxp_start(&i915->gt.pxp);
273 }
274
275 return ret;
276 }
277
278 static struct i915_gem_proto_context *
proto_context_create(struct drm_i915_private * i915,unsigned int flags)279 proto_context_create(struct drm_i915_private *i915, unsigned int flags)
280 {
281 struct i915_gem_proto_context *pc, *err;
282
283 pc = kzalloc(sizeof(*pc), GFP_KERNEL);
284 if (!pc)
285 return ERR_PTR(-ENOMEM);
286
287 pc->num_user_engines = -1;
288 pc->user_engines = NULL;
289 pc->user_flags = BIT(UCONTEXT_BANNABLE) |
290 BIT(UCONTEXT_RECOVERABLE);
291 if (i915->params.enable_hangcheck)
292 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
293 pc->sched.priority = I915_PRIORITY_NORMAL;
294
295 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
296 if (!HAS_EXECLISTS(i915)) {
297 err = ERR_PTR(-EINVAL);
298 goto proto_close;
299 }
300 pc->single_timeline = true;
301 }
302
303 return pc;
304
305 proto_close:
306 proto_context_close(i915, pc);
307 return err;
308 }
309
proto_context_register_locked(struct drm_i915_file_private * fpriv,struct i915_gem_proto_context * pc,u32 * id)310 static int proto_context_register_locked(struct drm_i915_file_private *fpriv,
311 struct i915_gem_proto_context *pc,
312 u32 *id)
313 {
314 int ret;
315 void *old;
316
317 lockdep_assert_held(&fpriv->proto_context_lock);
318
319 ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL);
320 if (ret)
321 return ret;
322
323 old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL);
324 if (xa_is_err(old)) {
325 xa_erase(&fpriv->context_xa, *id);
326 return xa_err(old);
327 }
328 WARN_ON(old);
329
330 return 0;
331 }
332
proto_context_register(struct drm_i915_file_private * fpriv,struct i915_gem_proto_context * pc,u32 * id)333 static int proto_context_register(struct drm_i915_file_private *fpriv,
334 struct i915_gem_proto_context *pc,
335 u32 *id)
336 {
337 int ret;
338
339 mutex_lock(&fpriv->proto_context_lock);
340 ret = proto_context_register_locked(fpriv, pc, id);
341 mutex_unlock(&fpriv->proto_context_lock);
342
343 return ret;
344 }
345
set_proto_ctx_vm(struct drm_i915_file_private * fpriv,struct i915_gem_proto_context * pc,const struct drm_i915_gem_context_param * args)346 static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
347 struct i915_gem_proto_context *pc,
348 const struct drm_i915_gem_context_param *args)
349 {
350 struct drm_i915_private *i915 = fpriv->dev_priv;
351 struct i915_address_space *vm;
352
353 if (args->size)
354 return -EINVAL;
355
356 if (!HAS_FULL_PPGTT(i915))
357 return -ENODEV;
358
359 if (upper_32_bits(args->value))
360 return -ENOENT;
361
362 vm = i915_gem_vm_lookup(fpriv, args->value);
363 if (!vm)
364 return -ENOENT;
365
366 if (pc->vm)
367 i915_vm_put(pc->vm);
368 pc->vm = vm;
369
370 return 0;
371 }
372
373 struct set_proto_ctx_engines {
374 struct drm_i915_private *i915;
375 unsigned num_engines;
376 struct i915_gem_proto_engine *engines;
377 };
378
379 static int
set_proto_ctx_engines_balance(struct i915_user_extension __user * base,void * data)380 set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
381 void *data)
382 {
383 struct i915_context_engines_load_balance __user *ext =
384 container_of_user(base, typeof(*ext), base);
385 const struct set_proto_ctx_engines *set = data;
386 struct drm_i915_private *i915 = set->i915;
387 struct intel_engine_cs **siblings;
388 u16 num_siblings, idx;
389 unsigned int n;
390 int err;
391
392 if (!HAS_EXECLISTS(i915))
393 return -ENODEV;
394
395 if (get_user(idx, &ext->engine_index))
396 return -EFAULT;
397
398 if (idx >= set->num_engines) {
399 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
400 idx, set->num_engines);
401 return -EINVAL;
402 }
403
404 idx = array_index_nospec(idx, set->num_engines);
405 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) {
406 drm_dbg(&i915->drm,
407 "Invalid placement[%d], already occupied\n", idx);
408 return -EEXIST;
409 }
410
411 if (get_user(num_siblings, &ext->num_siblings))
412 return -EFAULT;
413
414 err = check_user_mbz(&ext->flags);
415 if (err)
416 return err;
417
418 err = check_user_mbz(&ext->mbz64);
419 if (err)
420 return err;
421
422 if (num_siblings == 0)
423 return 0;
424
425 siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL);
426 if (!siblings)
427 return -ENOMEM;
428
429 for (n = 0; n < num_siblings; n++) {
430 struct i915_engine_class_instance ci;
431
432 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
433 err = -EFAULT;
434 goto err_siblings;
435 }
436
437 siblings[n] = intel_engine_lookup_user(i915,
438 ci.engine_class,
439 ci.engine_instance);
440 if (!siblings[n]) {
441 drm_dbg(&i915->drm,
442 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
443 n, ci.engine_class, ci.engine_instance);
444 err = -EINVAL;
445 goto err_siblings;
446 }
447 }
448
449 if (num_siblings == 1) {
450 set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
451 set->engines[idx].engine = siblings[0];
452 kfree(siblings);
453 } else {
454 set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED;
455 set->engines[idx].num_siblings = num_siblings;
456 set->engines[idx].siblings = siblings;
457 }
458
459 return 0;
460
461 err_siblings:
462 kfree(siblings);
463
464 return err;
465 }
466
467 static int
set_proto_ctx_engines_bond(struct i915_user_extension __user * base,void * data)468 set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
469 {
470 struct i915_context_engines_bond __user *ext =
471 container_of_user(base, typeof(*ext), base);
472 const struct set_proto_ctx_engines *set = data;
473 struct drm_i915_private *i915 = set->i915;
474 struct i915_engine_class_instance ci;
475 struct intel_engine_cs *master;
476 u16 idx, num_bonds;
477 int err, n;
478
479 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) &&
480 !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) {
481 drm_dbg(&i915->drm,
482 "Bonding on gen12+ aside from TGL, RKL, and ADL_S not supported\n");
483 return -ENODEV;
484 }
485
486 if (get_user(idx, &ext->virtual_index))
487 return -EFAULT;
488
489 if (idx >= set->num_engines) {
490 drm_dbg(&i915->drm,
491 "Invalid index for virtual engine: %d >= %d\n",
492 idx, set->num_engines);
493 return -EINVAL;
494 }
495
496 idx = array_index_nospec(idx, set->num_engines);
497 if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) {
498 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
499 return -EINVAL;
500 }
501
502 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) {
503 drm_dbg(&i915->drm,
504 "Bonding with virtual engines not allowed\n");
505 return -EINVAL;
506 }
507
508 err = check_user_mbz(&ext->flags);
509 if (err)
510 return err;
511
512 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
513 err = check_user_mbz(&ext->mbz64[n]);
514 if (err)
515 return err;
516 }
517
518 if (copy_from_user(&ci, &ext->master, sizeof(ci)))
519 return -EFAULT;
520
521 master = intel_engine_lookup_user(i915,
522 ci.engine_class,
523 ci.engine_instance);
524 if (!master) {
525 drm_dbg(&i915->drm,
526 "Unrecognised master engine: { class:%u, instance:%u }\n",
527 ci.engine_class, ci.engine_instance);
528 return -EINVAL;
529 }
530
531 if (intel_engine_uses_guc(master)) {
532 DRM_DEBUG("bonding extension not supported with GuC submission");
533 return -ENODEV;
534 }
535
536 if (get_user(num_bonds, &ext->num_bonds))
537 return -EFAULT;
538
539 for (n = 0; n < num_bonds; n++) {
540 struct intel_engine_cs *bond;
541
542 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
543 return -EFAULT;
544
545 bond = intel_engine_lookup_user(i915,
546 ci.engine_class,
547 ci.engine_instance);
548 if (!bond) {
549 drm_dbg(&i915->drm,
550 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
551 n, ci.engine_class, ci.engine_instance);
552 return -EINVAL;
553 }
554 }
555
556 return 0;
557 }
558
559 static int
set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user * base,void * data)560 set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
561 void *data)
562 {
563 struct i915_context_engines_parallel_submit __user *ext =
564 container_of_user(base, typeof(*ext), base);
565 const struct set_proto_ctx_engines *set = data;
566 struct drm_i915_private *i915 = set->i915;
567 struct i915_engine_class_instance prev_engine;
568 u64 flags;
569 int err = 0, n, i, j;
570 u16 slot, width, num_siblings;
571 struct intel_engine_cs **siblings = NULL;
572 intel_engine_mask_t prev_mask;
573
574 /* FIXME: This is NIY for execlists */
575 if (!(intel_uc_uses_guc_submission(&i915->gt.uc)))
576 return -ENODEV;
577
578 if (get_user(slot, &ext->engine_index))
579 return -EFAULT;
580
581 if (get_user(width, &ext->width))
582 return -EFAULT;
583
584 if (get_user(num_siblings, &ext->num_siblings))
585 return -EFAULT;
586
587 if (slot >= set->num_engines) {
588 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
589 slot, set->num_engines);
590 return -EINVAL;
591 }
592
593 if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) {
594 drm_dbg(&i915->drm,
595 "Invalid placement[%d], already occupied\n", slot);
596 return -EINVAL;
597 }
598
599 if (get_user(flags, &ext->flags))
600 return -EFAULT;
601
602 if (flags) {
603 drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags);
604 return -EINVAL;
605 }
606
607 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
608 err = check_user_mbz(&ext->mbz64[n]);
609 if (err)
610 return err;
611 }
612
613 if (width < 2) {
614 drm_dbg(&i915->drm, "Width (%d) < 2\n", width);
615 return -EINVAL;
616 }
617
618 if (num_siblings < 1) {
619 drm_dbg(&i915->drm, "Number siblings (%d) < 1\n",
620 num_siblings);
621 return -EINVAL;
622 }
623
624 siblings = kmalloc_array(num_siblings * width,
625 sizeof(*siblings),
626 GFP_KERNEL);
627 if (!siblings)
628 return -ENOMEM;
629
630 /* Create contexts / engines */
631 for (i = 0; i < width; ++i) {
632 intel_engine_mask_t current_mask = 0;
633
634 for (j = 0; j < num_siblings; ++j) {
635 struct i915_engine_class_instance ci;
636
637 n = i * num_siblings + j;
638 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
639 err = -EFAULT;
640 goto out_err;
641 }
642
643 siblings[n] =
644 intel_engine_lookup_user(i915, ci.engine_class,
645 ci.engine_instance);
646 if (!siblings[n]) {
647 drm_dbg(&i915->drm,
648 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
649 n, ci.engine_class, ci.engine_instance);
650 err = -EINVAL;
651 goto out_err;
652 }
653
654 if (n) {
655 if (prev_engine.engine_class !=
656 ci.engine_class) {
657 drm_dbg(&i915->drm,
658 "Mismatched class %d, %d\n",
659 prev_engine.engine_class,
660 ci.engine_class);
661 err = -EINVAL;
662 goto out_err;
663 }
664 }
665
666 prev_engine = ci;
667 current_mask |= siblings[n]->logical_mask;
668 }
669
670 if (i > 0) {
671 if (current_mask != prev_mask << 1) {
672 drm_dbg(&i915->drm,
673 "Non contiguous logical mask 0x%x, 0x%x\n",
674 prev_mask, current_mask);
675 err = -EINVAL;
676 goto out_err;
677 }
678 }
679 prev_mask = current_mask;
680 }
681
682 set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL;
683 set->engines[slot].num_siblings = num_siblings;
684 set->engines[slot].width = width;
685 set->engines[slot].siblings = siblings;
686
687 return 0;
688
689 out_err:
690 kfree(siblings);
691
692 return err;
693 }
694
695 static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
696 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
697 [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
698 [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] =
699 set_proto_ctx_engines_parallel_submit,
700 };
701
set_proto_ctx_engines(struct drm_i915_file_private * fpriv,struct i915_gem_proto_context * pc,const struct drm_i915_gem_context_param * args)702 static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
703 struct i915_gem_proto_context *pc,
704 const struct drm_i915_gem_context_param *args)
705 {
706 struct drm_i915_private *i915 = fpriv->dev_priv;
707 struct set_proto_ctx_engines set = { .i915 = i915 };
708 struct i915_context_param_engines __user *user =
709 u64_to_user_ptr(args->value);
710 unsigned int n;
711 u64 extensions;
712 int err;
713
714 if (pc->num_user_engines >= 0) {
715 drm_dbg(&i915->drm, "Cannot set engines twice");
716 return -EINVAL;
717 }
718
719 if (args->size < sizeof(*user) ||
720 !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) {
721 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
722 args->size);
723 return -EINVAL;
724 }
725
726 set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
727 /* RING_MASK has no shift so we can use it directly here */
728 if (set.num_engines > I915_EXEC_RING_MASK + 1)
729 return -EINVAL;
730
731 set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL);
732 if (!set.engines)
733 return -ENOMEM;
734
735 for (n = 0; n < set.num_engines; n++) {
736 struct i915_engine_class_instance ci;
737 struct intel_engine_cs *engine;
738
739 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
740 kfree(set.engines);
741 return -EFAULT;
742 }
743
744 memset(&set.engines[n], 0, sizeof(set.engines[n]));
745
746 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
747 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE)
748 continue;
749
750 engine = intel_engine_lookup_user(i915,
751 ci.engine_class,
752 ci.engine_instance);
753 if (!engine) {
754 drm_dbg(&i915->drm,
755 "Invalid engine[%d]: { class:%d, instance:%d }\n",
756 n, ci.engine_class, ci.engine_instance);
757 kfree(set.engines);
758 return -ENOENT;
759 }
760
761 set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
762 set.engines[n].engine = engine;
763 }
764
765 err = -EFAULT;
766 if (!get_user(extensions, &user->extensions))
767 err = i915_user_extensions(u64_to_user_ptr(extensions),
768 set_proto_ctx_engines_extensions,
769 ARRAY_SIZE(set_proto_ctx_engines_extensions),
770 &set);
771 if (err) {
772 kfree(set.engines);
773 return err;
774 }
775
776 pc->num_user_engines = set.num_engines;
777 pc->user_engines = set.engines;
778
779 return 0;
780 }
781
set_proto_ctx_sseu(struct drm_i915_file_private * fpriv,struct i915_gem_proto_context * pc,struct drm_i915_gem_context_param * args)782 static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
783 struct i915_gem_proto_context *pc,
784 struct drm_i915_gem_context_param *args)
785 {
786 struct drm_i915_private *i915 = fpriv->dev_priv;
787 struct drm_i915_gem_context_param_sseu user_sseu;
788 struct intel_sseu *sseu;
789 int ret;
790
791 if (args->size < sizeof(user_sseu))
792 return -EINVAL;
793
794 if (GRAPHICS_VER(i915) != 11)
795 return -ENODEV;
796
797 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
798 sizeof(user_sseu)))
799 return -EFAULT;
800
801 if (user_sseu.rsvd)
802 return -EINVAL;
803
804 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
805 return -EINVAL;
806
807 if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0))
808 return -EINVAL;
809
810 if (pc->num_user_engines >= 0) {
811 int idx = user_sseu.engine.engine_instance;
812 struct i915_gem_proto_engine *pe;
813
814 if (idx >= pc->num_user_engines)
815 return -EINVAL;
816
817 pe = &pc->user_engines[idx];
818
819 /* Only render engine supports RPCS configuration. */
820 if (pe->engine->class != RENDER_CLASS)
821 return -EINVAL;
822
823 sseu = &pe->sseu;
824 } else {
825 /* Only render engine supports RPCS configuration. */
826 if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER)
827 return -EINVAL;
828
829 /* There is only one render engine */
830 if (user_sseu.engine.engine_instance != 0)
831 return -EINVAL;
832
833 sseu = &pc->legacy_rcs_sseu;
834 }
835
836 ret = i915_gem_user_to_context_sseu(&i915->gt, &user_sseu, sseu);
837 if (ret)
838 return ret;
839
840 args->size = sizeof(user_sseu);
841
842 return 0;
843 }
844
set_proto_ctx_param(struct drm_i915_file_private * fpriv,struct i915_gem_proto_context * pc,struct drm_i915_gem_context_param * args)845 static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
846 struct i915_gem_proto_context *pc,
847 struct drm_i915_gem_context_param *args)
848 {
849 int ret = 0;
850
851 switch (args->param) {
852 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
853 if (args->size)
854 ret = -EINVAL;
855 else if (args->value)
856 pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE);
857 else
858 pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE);
859 break;
860
861 case I915_CONTEXT_PARAM_BANNABLE:
862 if (args->size)
863 ret = -EINVAL;
864 else if (!capable(CAP_SYS_ADMIN) && !args->value)
865 ret = -EPERM;
866 else if (args->value)
867 pc->user_flags |= BIT(UCONTEXT_BANNABLE);
868 else if (pc->uses_protected_content)
869 ret = -EPERM;
870 else
871 pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
872 break;
873
874 case I915_CONTEXT_PARAM_RECOVERABLE:
875 if (args->size)
876 ret = -EINVAL;
877 else if (!args->value)
878 pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
879 else if (pc->uses_protected_content)
880 ret = -EPERM;
881 else
882 pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
883 break;
884
885 case I915_CONTEXT_PARAM_PRIORITY:
886 ret = validate_priority(fpriv->dev_priv, args);
887 if (!ret)
888 pc->sched.priority = args->value;
889 break;
890
891 case I915_CONTEXT_PARAM_SSEU:
892 ret = set_proto_ctx_sseu(fpriv, pc, args);
893 break;
894
895 case I915_CONTEXT_PARAM_VM:
896 ret = set_proto_ctx_vm(fpriv, pc, args);
897 break;
898
899 case I915_CONTEXT_PARAM_ENGINES:
900 ret = set_proto_ctx_engines(fpriv, pc, args);
901 break;
902
903 case I915_CONTEXT_PARAM_PERSISTENCE:
904 if (args->size)
905 ret = -EINVAL;
906 ret = proto_context_set_persistence(fpriv->dev_priv, pc,
907 args->value);
908 break;
909
910 case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
911 ret = proto_context_set_protected(fpriv->dev_priv, pc,
912 args->value);
913 break;
914
915 case I915_CONTEXT_PARAM_NO_ZEROMAP:
916 case I915_CONTEXT_PARAM_BAN_PERIOD:
917 case I915_CONTEXT_PARAM_RINGSIZE:
918 default:
919 ret = -EINVAL;
920 break;
921 }
922
923 return ret;
924 }
925
intel_context_set_gem(struct intel_context * ce,struct i915_gem_context * ctx,struct intel_sseu sseu)926 static int intel_context_set_gem(struct intel_context *ce,
927 struct i915_gem_context *ctx,
928 struct intel_sseu sseu)
929 {
930 int ret = 0;
931
932 GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
933 RCU_INIT_POINTER(ce->gem_context, ctx);
934
935 GEM_BUG_ON(intel_context_is_pinned(ce));
936 ce->ring_size = SZ_16K;
937
938 i915_vm_put(ce->vm);
939 ce->vm = i915_gem_context_get_eb_vm(ctx);
940
941 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
942 intel_engine_has_timeslices(ce->engine) &&
943 intel_engine_has_semaphores(ce->engine))
944 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
945
946 if (CONFIG_DRM_I915_REQUEST_TIMEOUT &&
947 ctx->i915->params.request_timeout_ms) {
948 unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
949
950 intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
951 }
952
953 /* A valid SSEU has no zero fields */
954 if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
955 ret = intel_context_reconfigure_sseu(ce, sseu);
956
957 return ret;
958 }
959
__unpin_engines(struct i915_gem_engines * e,unsigned int count)960 static void __unpin_engines(struct i915_gem_engines *e, unsigned int count)
961 {
962 while (count--) {
963 struct intel_context *ce = e->engines[count], *child;
964
965 if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags))
966 continue;
967
968 for_each_child(ce, child)
969 intel_context_unpin(child);
970 intel_context_unpin(ce);
971 }
972 }
973
unpin_engines(struct i915_gem_engines * e)974 static void unpin_engines(struct i915_gem_engines *e)
975 {
976 __unpin_engines(e, e->num_engines);
977 }
978
__free_engines(struct i915_gem_engines * e,unsigned int count)979 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
980 {
981 while (count--) {
982 if (!e->engines[count])
983 continue;
984
985 intel_context_put(e->engines[count]);
986 }
987 kfree(e);
988 }
989
free_engines(struct i915_gem_engines * e)990 static void free_engines(struct i915_gem_engines *e)
991 {
992 __free_engines(e, e->num_engines);
993 }
994
free_engines_rcu(struct rcu_head * rcu)995 static void free_engines_rcu(struct rcu_head *rcu)
996 {
997 struct i915_gem_engines *engines =
998 container_of(rcu, struct i915_gem_engines, rcu);
999
1000 i915_sw_fence_fini(&engines->fence);
1001 free_engines(engines);
1002 }
1003
1004 static int __i915_sw_fence_call
engines_notify(struct i915_sw_fence * fence,enum i915_sw_fence_notify state)1005 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
1006 {
1007 struct i915_gem_engines *engines =
1008 container_of(fence, typeof(*engines), fence);
1009
1010 switch (state) {
1011 case FENCE_COMPLETE:
1012 if (!list_empty(&engines->link)) {
1013 struct i915_gem_context *ctx = engines->ctx;
1014 unsigned long flags;
1015
1016 spin_lock_irqsave(&ctx->stale.lock, flags);
1017 list_del(&engines->link);
1018 spin_unlock_irqrestore(&ctx->stale.lock, flags);
1019 }
1020 i915_gem_context_put(engines->ctx);
1021 break;
1022
1023 case FENCE_FREE:
1024 init_rcu_head(&engines->rcu);
1025 call_rcu(&engines->rcu, free_engines_rcu);
1026 break;
1027 }
1028
1029 return NOTIFY_DONE;
1030 }
1031
alloc_engines(unsigned int count)1032 static struct i915_gem_engines *alloc_engines(unsigned int count)
1033 {
1034 struct i915_gem_engines *e;
1035
1036 e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
1037 if (!e)
1038 return NULL;
1039
1040 i915_sw_fence_init(&e->fence, engines_notify);
1041 return e;
1042 }
1043
default_engines(struct i915_gem_context * ctx,struct intel_sseu rcs_sseu)1044 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
1045 struct intel_sseu rcs_sseu)
1046 {
1047 const struct intel_gt *gt = &ctx->i915->gt;
1048 struct intel_engine_cs *engine;
1049 struct i915_gem_engines *e, *err;
1050 enum intel_engine_id id;
1051
1052 e = alloc_engines(I915_NUM_ENGINES);
1053 if (!e)
1054 return ERR_PTR(-ENOMEM);
1055
1056 for_each_engine(engine, gt, id) {
1057 struct intel_context *ce;
1058 struct intel_sseu sseu = {};
1059 int ret;
1060
1061 if (engine->legacy_idx == INVALID_ENGINE)
1062 continue;
1063
1064 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
1065 GEM_BUG_ON(e->engines[engine->legacy_idx]);
1066
1067 ce = intel_context_create(engine);
1068 if (IS_ERR(ce)) {
1069 err = ERR_CAST(ce);
1070 goto free_engines;
1071 }
1072
1073 e->engines[engine->legacy_idx] = ce;
1074 e->num_engines = max(e->num_engines, engine->legacy_idx + 1);
1075
1076 if (engine->class == RENDER_CLASS)
1077 sseu = rcs_sseu;
1078
1079 ret = intel_context_set_gem(ce, ctx, sseu);
1080 if (ret) {
1081 err = ERR_PTR(ret);
1082 goto free_engines;
1083 }
1084
1085 }
1086
1087 return e;
1088
1089 free_engines:
1090 free_engines(e);
1091 return err;
1092 }
1093
perma_pin_contexts(struct intel_context * ce)1094 static int perma_pin_contexts(struct intel_context *ce)
1095 {
1096 struct intel_context *child;
1097 int i = 0, j = 0, ret;
1098
1099 GEM_BUG_ON(!intel_context_is_parent(ce));
1100
1101 ret = intel_context_pin(ce);
1102 if (unlikely(ret))
1103 return ret;
1104
1105 for_each_child(ce, child) {
1106 ret = intel_context_pin(child);
1107 if (unlikely(ret))
1108 goto unwind;
1109 ++i;
1110 }
1111
1112 set_bit(CONTEXT_PERMA_PIN, &ce->flags);
1113
1114 return 0;
1115
1116 unwind:
1117 intel_context_unpin(ce);
1118 for_each_child(ce, child) {
1119 if (j++ < i)
1120 intel_context_unpin(child);
1121 else
1122 break;
1123 }
1124
1125 return ret;
1126 }
1127
user_engines(struct i915_gem_context * ctx,unsigned int num_engines,struct i915_gem_proto_engine * pe)1128 static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
1129 unsigned int num_engines,
1130 struct i915_gem_proto_engine *pe)
1131 {
1132 struct i915_gem_engines *e, *err;
1133 unsigned int n;
1134
1135 e = alloc_engines(num_engines);
1136 if (!e)
1137 return ERR_PTR(-ENOMEM);
1138 e->num_engines = num_engines;
1139
1140 for (n = 0; n < num_engines; n++) {
1141 struct intel_context *ce, *child;
1142 int ret;
1143
1144 switch (pe[n].type) {
1145 case I915_GEM_ENGINE_TYPE_PHYSICAL:
1146 ce = intel_context_create(pe[n].engine);
1147 break;
1148
1149 case I915_GEM_ENGINE_TYPE_BALANCED:
1150 ce = intel_engine_create_virtual(pe[n].siblings,
1151 pe[n].num_siblings, 0);
1152 break;
1153
1154 case I915_GEM_ENGINE_TYPE_PARALLEL:
1155 ce = intel_engine_create_parallel(pe[n].siblings,
1156 pe[n].num_siblings,
1157 pe[n].width);
1158 break;
1159
1160 case I915_GEM_ENGINE_TYPE_INVALID:
1161 default:
1162 GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID);
1163 continue;
1164 }
1165
1166 if (IS_ERR(ce)) {
1167 err = ERR_CAST(ce);
1168 goto free_engines;
1169 }
1170
1171 e->engines[n] = ce;
1172
1173 ret = intel_context_set_gem(ce, ctx, pe->sseu);
1174 if (ret) {
1175 err = ERR_PTR(ret);
1176 goto free_engines;
1177 }
1178 for_each_child(ce, child) {
1179 ret = intel_context_set_gem(child, ctx, pe->sseu);
1180 if (ret) {
1181 err = ERR_PTR(ret);
1182 goto free_engines;
1183 }
1184 }
1185
1186 /*
1187 * XXX: Must be done after calling intel_context_set_gem as that
1188 * function changes the ring size. The ring is allocated when
1189 * the context is pinned. If the ring size is changed after
1190 * allocation we have a mismatch of the ring size and will cause
1191 * the context to hang. Presumably with a bit of reordering we
1192 * could move the perma-pin step to the backend function
1193 * intel_engine_create_parallel.
1194 */
1195 if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) {
1196 ret = perma_pin_contexts(ce);
1197 if (ret) {
1198 err = ERR_PTR(ret);
1199 goto free_engines;
1200 }
1201 }
1202 }
1203
1204 return e;
1205
1206 free_engines:
1207 free_engines(e);
1208 return err;
1209 }
1210
i915_gem_context_release_work(struct work_struct * work)1211 static void i915_gem_context_release_work(struct work_struct *work)
1212 {
1213 struct i915_gem_context *ctx = container_of(work, typeof(*ctx),
1214 release_work);
1215 struct i915_address_space *vm;
1216
1217 trace_i915_context_free(ctx);
1218 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1219
1220 if (ctx->syncobj)
1221 drm_syncobj_put(ctx->syncobj);
1222
1223 vm = ctx->vm;
1224 if (vm)
1225 i915_vm_put(vm);
1226
1227 if (ctx->pxp_wakeref)
1228 intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref);
1229
1230 mutex_destroy(&ctx->engines_mutex);
1231 mutex_destroy(&ctx->lut_mutex);
1232
1233 put_pid(ctx->pid);
1234 mutex_destroy(&ctx->mutex);
1235
1236 kfree_rcu(ctx, rcu);
1237 }
1238
i915_gem_context_release(struct kref * ref)1239 void i915_gem_context_release(struct kref *ref)
1240 {
1241 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
1242
1243 queue_work(ctx->i915->wq, &ctx->release_work);
1244 }
1245
1246 static inline struct i915_gem_engines *
__context_engines_static(const struct i915_gem_context * ctx)1247 __context_engines_static(const struct i915_gem_context *ctx)
1248 {
1249 return rcu_dereference_protected(ctx->engines, true);
1250 }
1251
__reset_context(struct i915_gem_context * ctx,struct intel_engine_cs * engine)1252 static void __reset_context(struct i915_gem_context *ctx,
1253 struct intel_engine_cs *engine)
1254 {
1255 intel_gt_handle_error(engine->gt, engine->mask, 0,
1256 "context closure in %s", ctx->name);
1257 }
1258
__cancel_engine(struct intel_engine_cs * engine)1259 static bool __cancel_engine(struct intel_engine_cs *engine)
1260 {
1261 /*
1262 * Send a "high priority pulse" down the engine to cause the
1263 * current request to be momentarily preempted. (If it fails to
1264 * be preempted, it will be reset). As we have marked our context
1265 * as banned, any incomplete request, including any running, will
1266 * be skipped following the preemption.
1267 *
1268 * If there is no hangchecking (one of the reasons why we try to
1269 * cancel the context) and no forced preemption, there may be no
1270 * means by which we reset the GPU and evict the persistent hog.
1271 * Ergo if we are unable to inject a preemptive pulse that can
1272 * kill the banned context, we fallback to doing a local reset
1273 * instead.
1274 */
1275 return intel_engine_pulse(engine) == 0;
1276 }
1277
active_engine(struct intel_context * ce)1278 static struct intel_engine_cs *active_engine(struct intel_context *ce)
1279 {
1280 struct intel_engine_cs *engine = NULL;
1281 struct i915_request *rq;
1282
1283 if (intel_context_has_inflight(ce))
1284 return intel_context_inflight(ce);
1285
1286 if (!ce->timeline)
1287 return NULL;
1288
1289 /*
1290 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
1291 * to the request to prevent it being transferred to a new timeline
1292 * (and onto a new timeline->requests list).
1293 */
1294 rcu_read_lock();
1295 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
1296 bool found;
1297
1298 /* timeline is already completed upto this point? */
1299 if (!i915_request_get_rcu(rq))
1300 break;
1301
1302 /* Check with the backend if the request is inflight */
1303 found = true;
1304 if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
1305 found = i915_request_active_engine(rq, &engine);
1306
1307 i915_request_put(rq);
1308 if (found)
1309 break;
1310 }
1311 rcu_read_unlock();
1312
1313 return engine;
1314 }
1315
kill_engines(struct i915_gem_engines * engines,bool ban)1316 static void kill_engines(struct i915_gem_engines *engines, bool ban)
1317 {
1318 struct i915_gem_engines_iter it;
1319 struct intel_context *ce;
1320
1321 /*
1322 * Map the user's engine back to the actual engines; one virtual
1323 * engine will be mapped to multiple engines, and using ctx->engine[]
1324 * the same engine may be have multiple instances in the user's map.
1325 * However, we only care about pending requests, so only include
1326 * engines on which there are incomplete requests.
1327 */
1328 for_each_gem_engine(ce, engines, it) {
1329 struct intel_engine_cs *engine;
1330
1331 if (ban && intel_context_ban(ce, NULL))
1332 continue;
1333
1334 /*
1335 * Check the current active state of this context; if we
1336 * are currently executing on the GPU we need to evict
1337 * ourselves. On the other hand, if we haven't yet been
1338 * submitted to the GPU or if everything is complete,
1339 * we have nothing to do.
1340 */
1341 engine = active_engine(ce);
1342
1343 /* First attempt to gracefully cancel the context */
1344 if (engine && !__cancel_engine(engine) && ban)
1345 /*
1346 * If we are unable to send a preemptive pulse to bump
1347 * the context from the GPU, we have to resort to a full
1348 * reset. We hope the collateral damage is worth it.
1349 */
1350 __reset_context(engines->ctx, engine);
1351 }
1352 }
1353
kill_context(struct i915_gem_context * ctx)1354 static void kill_context(struct i915_gem_context *ctx)
1355 {
1356 bool ban = (!i915_gem_context_is_persistent(ctx) ||
1357 !ctx->i915->params.enable_hangcheck);
1358 struct i915_gem_engines *pos, *next;
1359
1360 spin_lock_irq(&ctx->stale.lock);
1361 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1362 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
1363 if (!i915_sw_fence_await(&pos->fence)) {
1364 list_del_init(&pos->link);
1365 continue;
1366 }
1367
1368 spin_unlock_irq(&ctx->stale.lock);
1369
1370 kill_engines(pos, ban);
1371
1372 spin_lock_irq(&ctx->stale.lock);
1373 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
1374 list_safe_reset_next(pos, next, link);
1375 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
1376
1377 i915_sw_fence_complete(&pos->fence);
1378 }
1379 spin_unlock_irq(&ctx->stale.lock);
1380 }
1381
engines_idle_release(struct i915_gem_context * ctx,struct i915_gem_engines * engines)1382 static void engines_idle_release(struct i915_gem_context *ctx,
1383 struct i915_gem_engines *engines)
1384 {
1385 struct i915_gem_engines_iter it;
1386 struct intel_context *ce;
1387
1388 INIT_LIST_HEAD(&engines->link);
1389
1390 engines->ctx = i915_gem_context_get(ctx);
1391
1392 for_each_gem_engine(ce, engines, it) {
1393 int err;
1394
1395 /* serialises with execbuf */
1396 set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
1397 if (!intel_context_pin_if_active(ce))
1398 continue;
1399
1400 /* Wait until context is finally scheduled out and retired */
1401 err = i915_sw_fence_await_active(&engines->fence,
1402 &ce->active,
1403 I915_ACTIVE_AWAIT_BARRIER);
1404 intel_context_unpin(ce);
1405 if (err)
1406 goto kill;
1407 }
1408
1409 spin_lock_irq(&ctx->stale.lock);
1410 if (!i915_gem_context_is_closed(ctx))
1411 list_add_tail(&engines->link, &ctx->stale.engines);
1412 spin_unlock_irq(&ctx->stale.lock);
1413
1414 kill:
1415 if (list_empty(&engines->link)) /* raced, already closed */
1416 kill_engines(engines, true);
1417
1418 i915_sw_fence_commit(&engines->fence);
1419 }
1420
set_closed_name(struct i915_gem_context * ctx)1421 static void set_closed_name(struct i915_gem_context *ctx)
1422 {
1423 char *s;
1424
1425 /* Replace '[]' with '<>' to indicate closed in debug prints */
1426
1427 s = strrchr(ctx->name, '[');
1428 if (!s)
1429 return;
1430
1431 *s = '<';
1432
1433 s = strchr(s + 1, ']');
1434 if (s)
1435 *s = '>';
1436 }
1437
context_close(struct i915_gem_context * ctx)1438 static void context_close(struct i915_gem_context *ctx)
1439 {
1440 struct i915_address_space *vm;
1441
1442 /* Flush any concurrent set_engines() */
1443 mutex_lock(&ctx->engines_mutex);
1444 unpin_engines(__context_engines_static(ctx));
1445 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
1446 i915_gem_context_set_closed(ctx);
1447 mutex_unlock(&ctx->engines_mutex);
1448
1449 mutex_lock(&ctx->mutex);
1450
1451 set_closed_name(ctx);
1452
1453 vm = ctx->vm;
1454 if (vm) {
1455 /* i915_vm_close drops the final reference, which is a bit too
1456 * early and could result in surprises with concurrent
1457 * operations racing with thist ctx close. Keep a full reference
1458 * until the end.
1459 */
1460 i915_vm_get(vm);
1461 i915_vm_close(vm);
1462 }
1463
1464 ctx->file_priv = ERR_PTR(-EBADF);
1465
1466 /*
1467 * The LUT uses the VMA as a backpointer to unref the object,
1468 * so we need to clear the LUT before we close all the VMA (inside
1469 * the ppgtt).
1470 */
1471 lut_close(ctx);
1472
1473 spin_lock(&ctx->i915->gem.contexts.lock);
1474 list_del(&ctx->link);
1475 spin_unlock(&ctx->i915->gem.contexts.lock);
1476
1477 mutex_unlock(&ctx->mutex);
1478
1479 /*
1480 * If the user has disabled hangchecking, we can not be sure that
1481 * the batches will ever complete after the context is closed,
1482 * keeping the context and all resources pinned forever. So in this
1483 * case we opt to forcibly kill off all remaining requests on
1484 * context close.
1485 */
1486 kill_context(ctx);
1487
1488 i915_gem_context_put(ctx);
1489 }
1490
__context_set_persistence(struct i915_gem_context * ctx,bool state)1491 static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
1492 {
1493 if (i915_gem_context_is_persistent(ctx) == state)
1494 return 0;
1495
1496 if (state) {
1497 /*
1498 * Only contexts that are short-lived [that will expire or be
1499 * reset] are allowed to survive past termination. We require
1500 * hangcheck to ensure that the persistent requests are healthy.
1501 */
1502 if (!ctx->i915->params.enable_hangcheck)
1503 return -EINVAL;
1504
1505 i915_gem_context_set_persistence(ctx);
1506 } else {
1507 /* To cancel a context we use "preempt-to-idle" */
1508 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
1509 return -ENODEV;
1510
1511 /*
1512 * If the cancel fails, we then need to reset, cleanly!
1513 *
1514 * If the per-engine reset fails, all hope is lost! We resort
1515 * to a full GPU reset in that unlikely case, but realistically
1516 * if the engine could not reset, the full reset does not fare
1517 * much better. The damage has been done.
1518 *
1519 * However, if we cannot reset an engine by itself, we cannot
1520 * cleanup a hanging persistent context without causing
1521 * colateral damage, and we should not pretend we can by
1522 * exposing the interface.
1523 */
1524 if (!intel_has_reset_engine(&ctx->i915->gt))
1525 return -ENODEV;
1526
1527 i915_gem_context_clear_persistence(ctx);
1528 }
1529
1530 return 0;
1531 }
1532
1533 static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private * i915,const struct i915_gem_proto_context * pc)1534 i915_gem_create_context(struct drm_i915_private *i915,
1535 const struct i915_gem_proto_context *pc)
1536 {
1537 struct i915_gem_context *ctx;
1538 struct i915_address_space *vm = NULL;
1539 struct i915_gem_engines *e;
1540 int err;
1541 int i;
1542
1543 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1544 if (!ctx)
1545 return ERR_PTR(-ENOMEM);
1546
1547 kref_init(&ctx->ref);
1548 ctx->i915 = i915;
1549 ctx->sched = pc->sched;
1550 mutex_init(&ctx->mutex);
1551 INIT_LIST_HEAD(&ctx->link);
1552 INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
1553
1554 spin_lock_init(&ctx->stale.lock);
1555 INIT_LIST_HEAD(&ctx->stale.engines);
1556
1557 if (pc->vm) {
1558 vm = i915_vm_get(pc->vm);
1559 } else if (HAS_FULL_PPGTT(i915)) {
1560 struct i915_ppgtt *ppgtt;
1561
1562 ppgtt = i915_ppgtt_create(&i915->gt, 0);
1563 if (IS_ERR(ppgtt)) {
1564 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
1565 PTR_ERR(ppgtt));
1566 err = PTR_ERR(ppgtt);
1567 goto err_ctx;
1568 }
1569 vm = &ppgtt->vm;
1570 }
1571 if (vm) {
1572 ctx->vm = i915_vm_open(vm);
1573
1574 /* i915_vm_open() takes a reference */
1575 i915_vm_put(vm);
1576 }
1577
1578 mutex_init(&ctx->engines_mutex);
1579 if (pc->num_user_engines >= 0) {
1580 i915_gem_context_set_user_engines(ctx);
1581 e = user_engines(ctx, pc->num_user_engines, pc->user_engines);
1582 } else {
1583 i915_gem_context_clear_user_engines(ctx);
1584 e = default_engines(ctx, pc->legacy_rcs_sseu);
1585 }
1586 if (IS_ERR(e)) {
1587 err = PTR_ERR(e);
1588 goto err_vm;
1589 }
1590 RCU_INIT_POINTER(ctx->engines, e);
1591
1592 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
1593 mutex_init(&ctx->lut_mutex);
1594
1595 /* NB: Mark all slices as needing a remap so that when the context first
1596 * loads it will restore whatever remap state already exists. If there
1597 * is no remap info, it will be a NOP. */
1598 ctx->remap_slice = ALL_L3_SLICES(i915);
1599
1600 ctx->user_flags = pc->user_flags;
1601
1602 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
1603 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
1604
1605 if (pc->single_timeline) {
1606 err = drm_syncobj_create(&ctx->syncobj,
1607 DRM_SYNCOBJ_CREATE_SIGNALED,
1608 NULL);
1609 if (err)
1610 goto err_engines;
1611 }
1612
1613 if (pc->uses_protected_content) {
1614 ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1615 ctx->uses_protected_content = true;
1616 }
1617
1618 trace_i915_context_create(ctx);
1619
1620 return ctx;
1621
1622 err_engines:
1623 free_engines(e);
1624 err_vm:
1625 if (ctx->vm)
1626 i915_vm_close(ctx->vm);
1627 err_ctx:
1628 kfree(ctx);
1629 return ERR_PTR(err);
1630 }
1631
init_contexts(struct i915_gem_contexts * gc)1632 static void init_contexts(struct i915_gem_contexts *gc)
1633 {
1634 spin_lock_init(&gc->lock);
1635 INIT_LIST_HEAD(&gc->list);
1636 }
1637
i915_gem_init__contexts(struct drm_i915_private * i915)1638 void i915_gem_init__contexts(struct drm_i915_private *i915)
1639 {
1640 init_contexts(&i915->gem.contexts);
1641 }
1642
gem_context_register(struct i915_gem_context * ctx,struct drm_i915_file_private * fpriv,u32 id)1643 static void gem_context_register(struct i915_gem_context *ctx,
1644 struct drm_i915_file_private *fpriv,
1645 u32 id)
1646 {
1647 struct drm_i915_private *i915 = ctx->i915;
1648 void *old;
1649
1650 ctx->file_priv = fpriv;
1651
1652 ctx->pid = get_task_pid(current, PIDTYPE_PID);
1653 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
1654 current->comm, pid_nr(ctx->pid));
1655
1656 /* And finally expose ourselves to userspace via the idr */
1657 old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
1658 WARN_ON(old);
1659
1660 spin_lock(&i915->gem.contexts.lock);
1661 list_add_tail(&ctx->link, &i915->gem.contexts.list);
1662 spin_unlock(&i915->gem.contexts.lock);
1663 }
1664
i915_gem_context_open(struct drm_i915_private * i915,struct drm_file * file)1665 int i915_gem_context_open(struct drm_i915_private *i915,
1666 struct drm_file *file)
1667 {
1668 struct drm_i915_file_private *file_priv = file->driver_priv;
1669 struct i915_gem_proto_context *pc;
1670 struct i915_gem_context *ctx;
1671 int err;
1672
1673 mutex_init(&file_priv->proto_context_lock);
1674 xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC);
1675
1676 /* 0 reserved for the default context */
1677 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1);
1678
1679 /* 0 reserved for invalid/unassigned ppgtt */
1680 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
1681
1682 pc = proto_context_create(i915, 0);
1683 if (IS_ERR(pc)) {
1684 err = PTR_ERR(pc);
1685 goto err;
1686 }
1687
1688 ctx = i915_gem_create_context(i915, pc);
1689 proto_context_close(i915, pc);
1690 if (IS_ERR(ctx)) {
1691 err = PTR_ERR(ctx);
1692 goto err;
1693 }
1694
1695 gem_context_register(ctx, file_priv, 0);
1696
1697 return 0;
1698
1699 err:
1700 xa_destroy(&file_priv->vm_xa);
1701 xa_destroy(&file_priv->context_xa);
1702 xa_destroy(&file_priv->proto_context_xa);
1703 mutex_destroy(&file_priv->proto_context_lock);
1704 return err;
1705 }
1706
i915_gem_context_close(struct drm_file * file)1707 void i915_gem_context_close(struct drm_file *file)
1708 {
1709 struct drm_i915_file_private *file_priv = file->driver_priv;
1710 struct i915_gem_proto_context *pc;
1711 struct i915_address_space *vm;
1712 struct i915_gem_context *ctx;
1713 unsigned long idx;
1714
1715 xa_for_each(&file_priv->proto_context_xa, idx, pc)
1716 proto_context_close(file_priv->dev_priv, pc);
1717 xa_destroy(&file_priv->proto_context_xa);
1718 mutex_destroy(&file_priv->proto_context_lock);
1719
1720 xa_for_each(&file_priv->context_xa, idx, ctx)
1721 context_close(ctx);
1722 xa_destroy(&file_priv->context_xa);
1723
1724 xa_for_each(&file_priv->vm_xa, idx, vm)
1725 i915_vm_put(vm);
1726 xa_destroy(&file_priv->vm_xa);
1727 }
1728
i915_gem_vm_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)1729 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
1730 struct drm_file *file)
1731 {
1732 struct drm_i915_private *i915 = to_i915(dev);
1733 struct drm_i915_gem_vm_control *args = data;
1734 struct drm_i915_file_private *file_priv = file->driver_priv;
1735 struct i915_ppgtt *ppgtt;
1736 u32 id;
1737 int err;
1738
1739 if (!HAS_FULL_PPGTT(i915))
1740 return -ENODEV;
1741
1742 if (args->flags)
1743 return -EINVAL;
1744
1745 ppgtt = i915_ppgtt_create(&i915->gt, 0);
1746 if (IS_ERR(ppgtt))
1747 return PTR_ERR(ppgtt);
1748
1749 if (args->extensions) {
1750 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1751 NULL, 0,
1752 ppgtt);
1753 if (err)
1754 goto err_put;
1755 }
1756
1757 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
1758 xa_limit_32b, GFP_KERNEL);
1759 if (err)
1760 goto err_put;
1761
1762 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1763 args->vm_id = id;
1764 return 0;
1765
1766 err_put:
1767 i915_vm_put(&ppgtt->vm);
1768 return err;
1769 }
1770
i915_gem_vm_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file)1771 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
1772 struct drm_file *file)
1773 {
1774 struct drm_i915_file_private *file_priv = file->driver_priv;
1775 struct drm_i915_gem_vm_control *args = data;
1776 struct i915_address_space *vm;
1777
1778 if (args->flags)
1779 return -EINVAL;
1780
1781 if (args->extensions)
1782 return -EINVAL;
1783
1784 vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1785 if (!vm)
1786 return -ENOENT;
1787
1788 i915_vm_put(vm);
1789 return 0;
1790 }
1791
get_ppgtt(struct drm_i915_file_private * file_priv,struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)1792 static int get_ppgtt(struct drm_i915_file_private *file_priv,
1793 struct i915_gem_context *ctx,
1794 struct drm_i915_gem_context_param *args)
1795 {
1796 struct i915_address_space *vm;
1797 int err;
1798 u32 id;
1799
1800 if (!i915_gem_context_has_full_ppgtt(ctx))
1801 return -ENODEV;
1802
1803 vm = ctx->vm;
1804 GEM_BUG_ON(!vm);
1805
1806 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1807 if (err)
1808 return err;
1809
1810 i915_vm_open(vm);
1811
1812 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1813 args->value = id;
1814 args->size = 0;
1815
1816 return err;
1817 }
1818
1819 int
i915_gem_user_to_context_sseu(struct intel_gt * gt,const struct drm_i915_gem_context_param_sseu * user,struct intel_sseu * context)1820 i915_gem_user_to_context_sseu(struct intel_gt *gt,
1821 const struct drm_i915_gem_context_param_sseu *user,
1822 struct intel_sseu *context)
1823 {
1824 const struct sseu_dev_info *device = >->info.sseu;
1825 struct drm_i915_private *i915 = gt->i915;
1826
1827 /* No zeros in any field. */
1828 if (!user->slice_mask || !user->subslice_mask ||
1829 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1830 return -EINVAL;
1831
1832 /* Max > min. */
1833 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1834 return -EINVAL;
1835
1836 /*
1837 * Some future proofing on the types since the uAPI is wider than the
1838 * current internal implementation.
1839 */
1840 if (overflows_type(user->slice_mask, context->slice_mask) ||
1841 overflows_type(user->subslice_mask, context->subslice_mask) ||
1842 overflows_type(user->min_eus_per_subslice,
1843 context->min_eus_per_subslice) ||
1844 overflows_type(user->max_eus_per_subslice,
1845 context->max_eus_per_subslice))
1846 return -EINVAL;
1847
1848 /* Check validity against hardware. */
1849 if (user->slice_mask & ~device->slice_mask)
1850 return -EINVAL;
1851
1852 if (user->subslice_mask & ~device->subslice_mask[0])
1853 return -EINVAL;
1854
1855 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1856 return -EINVAL;
1857
1858 context->slice_mask = user->slice_mask;
1859 context->subslice_mask = user->subslice_mask;
1860 context->min_eus_per_subslice = user->min_eus_per_subslice;
1861 context->max_eus_per_subslice = user->max_eus_per_subslice;
1862
1863 /* Part specific restrictions. */
1864 if (GRAPHICS_VER(i915) == 11) {
1865 unsigned int hw_s = hweight8(device->slice_mask);
1866 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1867 unsigned int req_s = hweight8(context->slice_mask);
1868 unsigned int req_ss = hweight8(context->subslice_mask);
1869
1870 /*
1871 * Only full subslice enablement is possible if more than one
1872 * slice is turned on.
1873 */
1874 if (req_s > 1 && req_ss != hw_ss_per_s)
1875 return -EINVAL;
1876
1877 /*
1878 * If more than four (SScount bitfield limit) subslices are
1879 * requested then the number has to be even.
1880 */
1881 if (req_ss > 4 && (req_ss & 1))
1882 return -EINVAL;
1883
1884 /*
1885 * If only one slice is enabled and subslice count is below the
1886 * device full enablement, it must be at most half of the all
1887 * available subslices.
1888 */
1889 if (req_s == 1 && req_ss < hw_ss_per_s &&
1890 req_ss > (hw_ss_per_s / 2))
1891 return -EINVAL;
1892
1893 /* ABI restriction - VME use case only. */
1894
1895 /* All slices or one slice only. */
1896 if (req_s != 1 && req_s != hw_s)
1897 return -EINVAL;
1898
1899 /*
1900 * Half subslices or full enablement only when one slice is
1901 * enabled.
1902 */
1903 if (req_s == 1 &&
1904 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1905 return -EINVAL;
1906
1907 /* No EU configuration changes. */
1908 if ((user->min_eus_per_subslice !=
1909 device->max_eus_per_subslice) ||
1910 (user->max_eus_per_subslice !=
1911 device->max_eus_per_subslice))
1912 return -EINVAL;
1913 }
1914
1915 return 0;
1916 }
1917
set_sseu(struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)1918 static int set_sseu(struct i915_gem_context *ctx,
1919 struct drm_i915_gem_context_param *args)
1920 {
1921 struct drm_i915_private *i915 = ctx->i915;
1922 struct drm_i915_gem_context_param_sseu user_sseu;
1923 struct intel_context *ce;
1924 struct intel_sseu sseu;
1925 unsigned long lookup;
1926 int ret;
1927
1928 if (args->size < sizeof(user_sseu))
1929 return -EINVAL;
1930
1931 if (GRAPHICS_VER(i915) != 11)
1932 return -ENODEV;
1933
1934 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1935 sizeof(user_sseu)))
1936 return -EFAULT;
1937
1938 if (user_sseu.rsvd)
1939 return -EINVAL;
1940
1941 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1942 return -EINVAL;
1943
1944 lookup = 0;
1945 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1946 lookup |= LOOKUP_USER_INDEX;
1947
1948 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1949 if (IS_ERR(ce))
1950 return PTR_ERR(ce);
1951
1952 /* Only render engine supports RPCS configuration. */
1953 if (ce->engine->class != RENDER_CLASS) {
1954 ret = -ENODEV;
1955 goto out_ce;
1956 }
1957
1958 ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
1959 if (ret)
1960 goto out_ce;
1961
1962 ret = intel_context_reconfigure_sseu(ce, sseu);
1963 if (ret)
1964 goto out_ce;
1965
1966 args->size = sizeof(user_sseu);
1967
1968 out_ce:
1969 intel_context_put(ce);
1970 return ret;
1971 }
1972
1973 static int
set_persistence(struct i915_gem_context * ctx,const struct drm_i915_gem_context_param * args)1974 set_persistence(struct i915_gem_context *ctx,
1975 const struct drm_i915_gem_context_param *args)
1976 {
1977 if (args->size)
1978 return -EINVAL;
1979
1980 return __context_set_persistence(ctx, args->value);
1981 }
1982
set_priority(struct i915_gem_context * ctx,const struct drm_i915_gem_context_param * args)1983 static int set_priority(struct i915_gem_context *ctx,
1984 const struct drm_i915_gem_context_param *args)
1985 {
1986 struct i915_gem_engines_iter it;
1987 struct intel_context *ce;
1988 int err;
1989
1990 err = validate_priority(ctx->i915, args);
1991 if (err)
1992 return err;
1993
1994 ctx->sched.priority = args->value;
1995
1996 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1997 if (!intel_engine_has_timeslices(ce->engine))
1998 continue;
1999
2000 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
2001 intel_engine_has_semaphores(ce->engine))
2002 intel_context_set_use_semaphores(ce);
2003 else
2004 intel_context_clear_use_semaphores(ce);
2005 }
2006 i915_gem_context_unlock_engines(ctx);
2007
2008 return 0;
2009 }
2010
get_protected(struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)2011 static int get_protected(struct i915_gem_context *ctx,
2012 struct drm_i915_gem_context_param *args)
2013 {
2014 args->size = 0;
2015 args->value = i915_gem_context_uses_protected_content(ctx);
2016
2017 return 0;
2018 }
2019
ctx_setparam(struct drm_i915_file_private * fpriv,struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)2020 static int ctx_setparam(struct drm_i915_file_private *fpriv,
2021 struct i915_gem_context *ctx,
2022 struct drm_i915_gem_context_param *args)
2023 {
2024 int ret = 0;
2025
2026 switch (args->param) {
2027 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2028 if (args->size)
2029 ret = -EINVAL;
2030 else if (args->value)
2031 i915_gem_context_set_no_error_capture(ctx);
2032 else
2033 i915_gem_context_clear_no_error_capture(ctx);
2034 break;
2035
2036 case I915_CONTEXT_PARAM_BANNABLE:
2037 if (args->size)
2038 ret = -EINVAL;
2039 else if (!capable(CAP_SYS_ADMIN) && !args->value)
2040 ret = -EPERM;
2041 else if (args->value)
2042 i915_gem_context_set_bannable(ctx);
2043 else if (i915_gem_context_uses_protected_content(ctx))
2044 ret = -EPERM; /* can't clear this for protected contexts */
2045 else
2046 i915_gem_context_clear_bannable(ctx);
2047 break;
2048
2049 case I915_CONTEXT_PARAM_RECOVERABLE:
2050 if (args->size)
2051 ret = -EINVAL;
2052 else if (!args->value)
2053 i915_gem_context_clear_recoverable(ctx);
2054 else if (i915_gem_context_uses_protected_content(ctx))
2055 ret = -EPERM; /* can't set this for protected contexts */
2056 else
2057 i915_gem_context_set_recoverable(ctx);
2058 break;
2059
2060 case I915_CONTEXT_PARAM_PRIORITY:
2061 ret = set_priority(ctx, args);
2062 break;
2063
2064 case I915_CONTEXT_PARAM_SSEU:
2065 ret = set_sseu(ctx, args);
2066 break;
2067
2068 case I915_CONTEXT_PARAM_PERSISTENCE:
2069 ret = set_persistence(ctx, args);
2070 break;
2071
2072 case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2073 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2074 case I915_CONTEXT_PARAM_BAN_PERIOD:
2075 case I915_CONTEXT_PARAM_RINGSIZE:
2076 case I915_CONTEXT_PARAM_VM:
2077 case I915_CONTEXT_PARAM_ENGINES:
2078 default:
2079 ret = -EINVAL;
2080 break;
2081 }
2082
2083 return ret;
2084 }
2085
2086 struct create_ext {
2087 struct i915_gem_proto_context *pc;
2088 struct drm_i915_file_private *fpriv;
2089 };
2090
create_setparam(struct i915_user_extension __user * ext,void * data)2091 static int create_setparam(struct i915_user_extension __user *ext, void *data)
2092 {
2093 struct drm_i915_gem_context_create_ext_setparam local;
2094 const struct create_ext *arg = data;
2095
2096 if (copy_from_user(&local, ext, sizeof(local)))
2097 return -EFAULT;
2098
2099 if (local.param.ctx_id)
2100 return -EINVAL;
2101
2102 return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param);
2103 }
2104
invalid_ext(struct i915_user_extension __user * ext,void * data)2105 static int invalid_ext(struct i915_user_extension __user *ext, void *data)
2106 {
2107 return -EINVAL;
2108 }
2109
2110 static const i915_user_extension_fn create_extensions[] = {
2111 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2112 [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
2113 };
2114
client_is_banned(struct drm_i915_file_private * file_priv)2115 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2116 {
2117 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2118 }
2119
2120 static inline struct i915_gem_context *
__context_lookup(struct drm_i915_file_private * file_priv,u32 id)2121 __context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2122 {
2123 struct i915_gem_context *ctx;
2124
2125 rcu_read_lock();
2126 ctx = xa_load(&file_priv->context_xa, id);
2127 if (ctx && !kref_get_unless_zero(&ctx->ref))
2128 ctx = NULL;
2129 rcu_read_unlock();
2130
2131 return ctx;
2132 }
2133
2134 static struct i915_gem_context *
finalize_create_context_locked(struct drm_i915_file_private * file_priv,struct i915_gem_proto_context * pc,u32 id)2135 finalize_create_context_locked(struct drm_i915_file_private *file_priv,
2136 struct i915_gem_proto_context *pc, u32 id)
2137 {
2138 struct i915_gem_context *ctx;
2139 void *old;
2140
2141 lockdep_assert_held(&file_priv->proto_context_lock);
2142
2143 ctx = i915_gem_create_context(file_priv->dev_priv, pc);
2144 if (IS_ERR(ctx))
2145 return ctx;
2146
2147 gem_context_register(ctx, file_priv, id);
2148
2149 old = xa_erase(&file_priv->proto_context_xa, id);
2150 GEM_BUG_ON(old != pc);
2151 proto_context_close(file_priv->dev_priv, pc);
2152
2153 /* One for the xarray and one for the caller */
2154 return i915_gem_context_get(ctx);
2155 }
2156
2157 struct i915_gem_context *
i915_gem_context_lookup(struct drm_i915_file_private * file_priv,u32 id)2158 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2159 {
2160 struct i915_gem_proto_context *pc;
2161 struct i915_gem_context *ctx;
2162
2163 ctx = __context_lookup(file_priv, id);
2164 if (ctx)
2165 return ctx;
2166
2167 mutex_lock(&file_priv->proto_context_lock);
2168 /* Try one more time under the lock */
2169 ctx = __context_lookup(file_priv, id);
2170 if (!ctx) {
2171 pc = xa_load(&file_priv->proto_context_xa, id);
2172 if (!pc)
2173 ctx = ERR_PTR(-ENOENT);
2174 else
2175 ctx = finalize_create_context_locked(file_priv, pc, id);
2176 }
2177 mutex_unlock(&file_priv->proto_context_lock);
2178
2179 return ctx;
2180 }
2181
i915_gem_context_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2182 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2183 struct drm_file *file)
2184 {
2185 struct drm_i915_private *i915 = to_i915(dev);
2186 struct drm_i915_gem_context_create_ext *args = data;
2187 struct create_ext ext_data;
2188 int ret;
2189 u32 id;
2190
2191 if (!DRIVER_CAPS(i915)->has_logical_contexts)
2192 return -ENODEV;
2193
2194 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2195 return -EINVAL;
2196
2197 ret = intel_gt_terminally_wedged(&i915->gt);
2198 if (ret)
2199 return ret;
2200
2201 ext_data.fpriv = file->driver_priv;
2202 if (client_is_banned(ext_data.fpriv)) {
2203 drm_dbg(&i915->drm,
2204 "client %s[%d] banned from creating ctx\n",
2205 current->comm, task_pid_nr(current));
2206 return -EIO;
2207 }
2208
2209 ext_data.pc = proto_context_create(i915, args->flags);
2210 if (IS_ERR(ext_data.pc))
2211 return PTR_ERR(ext_data.pc);
2212
2213 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2214 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2215 create_extensions,
2216 ARRAY_SIZE(create_extensions),
2217 &ext_data);
2218 if (ret)
2219 goto err_pc;
2220 }
2221
2222 if (GRAPHICS_VER(i915) > 12) {
2223 struct i915_gem_context *ctx;
2224
2225 /* Get ourselves a context ID */
2226 ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL,
2227 xa_limit_32b, GFP_KERNEL);
2228 if (ret)
2229 goto err_pc;
2230
2231 ctx = i915_gem_create_context(i915, ext_data.pc);
2232 if (IS_ERR(ctx)) {
2233 ret = PTR_ERR(ctx);
2234 goto err_pc;
2235 }
2236
2237 proto_context_close(i915, ext_data.pc);
2238 gem_context_register(ctx, ext_data.fpriv, id);
2239 } else {
2240 ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id);
2241 if (ret < 0)
2242 goto err_pc;
2243 }
2244
2245 args->ctx_id = id;
2246 drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
2247
2248 return 0;
2249
2250 err_pc:
2251 proto_context_close(i915, ext_data.pc);
2252 return ret;
2253 }
2254
i915_gem_context_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2255 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2256 struct drm_file *file)
2257 {
2258 struct drm_i915_gem_context_destroy *args = data;
2259 struct drm_i915_file_private *file_priv = file->driver_priv;
2260 struct i915_gem_proto_context *pc;
2261 struct i915_gem_context *ctx;
2262
2263 if (args->pad != 0)
2264 return -EINVAL;
2265
2266 if (!args->ctx_id)
2267 return -ENOENT;
2268
2269 /* We need to hold the proto-context lock here to prevent races
2270 * with finalize_create_context_locked().
2271 */
2272 mutex_lock(&file_priv->proto_context_lock);
2273 ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2274 pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id);
2275 mutex_unlock(&file_priv->proto_context_lock);
2276
2277 if (!ctx && !pc)
2278 return -ENOENT;
2279 GEM_WARN_ON(ctx && pc);
2280
2281 if (pc)
2282 proto_context_close(file_priv->dev_priv, pc);
2283
2284 if (ctx)
2285 context_close(ctx);
2286
2287 return 0;
2288 }
2289
get_sseu(struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)2290 static int get_sseu(struct i915_gem_context *ctx,
2291 struct drm_i915_gem_context_param *args)
2292 {
2293 struct drm_i915_gem_context_param_sseu user_sseu;
2294 struct intel_context *ce;
2295 unsigned long lookup;
2296 int err;
2297
2298 if (args->size == 0)
2299 goto out;
2300 else if (args->size < sizeof(user_sseu))
2301 return -EINVAL;
2302
2303 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2304 sizeof(user_sseu)))
2305 return -EFAULT;
2306
2307 if (user_sseu.rsvd)
2308 return -EINVAL;
2309
2310 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2311 return -EINVAL;
2312
2313 lookup = 0;
2314 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2315 lookup |= LOOKUP_USER_INDEX;
2316
2317 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2318 if (IS_ERR(ce))
2319 return PTR_ERR(ce);
2320
2321 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2322 if (err) {
2323 intel_context_put(ce);
2324 return err;
2325 }
2326
2327 user_sseu.slice_mask = ce->sseu.slice_mask;
2328 user_sseu.subslice_mask = ce->sseu.subslice_mask;
2329 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2330 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2331
2332 intel_context_unlock_pinned(ce);
2333 intel_context_put(ce);
2334
2335 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2336 sizeof(user_sseu)))
2337 return -EFAULT;
2338
2339 out:
2340 args->size = sizeof(user_sseu);
2341
2342 return 0;
2343 }
2344
i915_gem_context_getparam_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2345 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2346 struct drm_file *file)
2347 {
2348 struct drm_i915_file_private *file_priv = file->driver_priv;
2349 struct drm_i915_gem_context_param *args = data;
2350 struct i915_gem_context *ctx;
2351 struct i915_address_space *vm;
2352 int ret = 0;
2353
2354 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2355 if (IS_ERR(ctx))
2356 return PTR_ERR(ctx);
2357
2358 switch (args->param) {
2359 case I915_CONTEXT_PARAM_GTT_SIZE:
2360 args->size = 0;
2361 vm = i915_gem_context_get_eb_vm(ctx);
2362 args->value = vm->total;
2363 i915_vm_put(vm);
2364
2365 break;
2366
2367 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2368 args->size = 0;
2369 args->value = i915_gem_context_no_error_capture(ctx);
2370 break;
2371
2372 case I915_CONTEXT_PARAM_BANNABLE:
2373 args->size = 0;
2374 args->value = i915_gem_context_is_bannable(ctx);
2375 break;
2376
2377 case I915_CONTEXT_PARAM_RECOVERABLE:
2378 args->size = 0;
2379 args->value = i915_gem_context_is_recoverable(ctx);
2380 break;
2381
2382 case I915_CONTEXT_PARAM_PRIORITY:
2383 args->size = 0;
2384 args->value = ctx->sched.priority;
2385 break;
2386
2387 case I915_CONTEXT_PARAM_SSEU:
2388 ret = get_sseu(ctx, args);
2389 break;
2390
2391 case I915_CONTEXT_PARAM_VM:
2392 ret = get_ppgtt(file_priv, ctx, args);
2393 break;
2394
2395 case I915_CONTEXT_PARAM_PERSISTENCE:
2396 args->size = 0;
2397 args->value = i915_gem_context_is_persistent(ctx);
2398 break;
2399
2400 case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2401 ret = get_protected(ctx, args);
2402 break;
2403
2404 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2405 case I915_CONTEXT_PARAM_BAN_PERIOD:
2406 case I915_CONTEXT_PARAM_ENGINES:
2407 case I915_CONTEXT_PARAM_RINGSIZE:
2408 default:
2409 ret = -EINVAL;
2410 break;
2411 }
2412
2413 i915_gem_context_put(ctx);
2414 return ret;
2415 }
2416
i915_gem_context_setparam_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2417 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2418 struct drm_file *file)
2419 {
2420 struct drm_i915_file_private *file_priv = file->driver_priv;
2421 struct drm_i915_gem_context_param *args = data;
2422 struct i915_gem_proto_context *pc;
2423 struct i915_gem_context *ctx;
2424 int ret = 0;
2425
2426 mutex_lock(&file_priv->proto_context_lock);
2427 ctx = __context_lookup(file_priv, args->ctx_id);
2428 if (!ctx) {
2429 pc = xa_load(&file_priv->proto_context_xa, args->ctx_id);
2430 if (pc) {
2431 /* Contexts should be finalized inside
2432 * GEM_CONTEXT_CREATE starting with graphics
2433 * version 13.
2434 */
2435 WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12);
2436 ret = set_proto_ctx_param(file_priv, pc, args);
2437 } else {
2438 ret = -ENOENT;
2439 }
2440 }
2441 mutex_unlock(&file_priv->proto_context_lock);
2442
2443 if (ctx) {
2444 ret = ctx_setparam(file_priv, ctx, args);
2445 i915_gem_context_put(ctx);
2446 }
2447
2448 return ret;
2449 }
2450
i915_gem_context_reset_stats_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2451 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2452 void *data, struct drm_file *file)
2453 {
2454 struct drm_i915_private *i915 = to_i915(dev);
2455 struct drm_i915_reset_stats *args = data;
2456 struct i915_gem_context *ctx;
2457
2458 if (args->flags || args->pad)
2459 return -EINVAL;
2460
2461 ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
2462 if (IS_ERR(ctx))
2463 return PTR_ERR(ctx);
2464
2465 /*
2466 * We opt for unserialised reads here. This may result in tearing
2467 * in the extremely unlikely event of a GPU hang on this context
2468 * as we are querying them. If we need that extra layer of protection,
2469 * we should wrap the hangstats with a seqlock.
2470 */
2471
2472 if (capable(CAP_SYS_ADMIN))
2473 args->reset_count = i915_reset_count(&i915->gpu_error);
2474 else
2475 args->reset_count = 0;
2476
2477 args->batch_active = atomic_read(&ctx->guilty_count);
2478 args->batch_pending = atomic_read(&ctx->active_count);
2479
2480 i915_gem_context_put(ctx);
2481 return 0;
2482 }
2483
2484 /* GEM context-engines iterator: for_each_gem_engine() */
2485 struct intel_context *
i915_gem_engines_iter_next(struct i915_gem_engines_iter * it)2486 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2487 {
2488 const struct i915_gem_engines *e = it->engines;
2489 struct intel_context *ctx;
2490
2491 if (unlikely(!e))
2492 return NULL;
2493
2494 do {
2495 if (it->idx >= e->num_engines)
2496 return NULL;
2497
2498 ctx = e->engines[it->idx++];
2499 } while (!ctx);
2500
2501 return ctx;
2502 }
2503
2504 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2505 #include "selftests/mock_context.c"
2506 #include "selftests/i915_gem_context.c"
2507 #endif
2508
i915_gem_context_module_exit(void)2509 void i915_gem_context_module_exit(void)
2510 {
2511 kmem_cache_destroy(slab_luts);
2512 }
2513
i915_gem_context_module_init(void)2514 int __init i915_gem_context_module_init(void)
2515 {
2516 slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2517 if (!slab_luts)
2518 return -ENOMEM;
2519
2520 return 0;
2521 }
2522