1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <linux/sched/signal.h>
29
30 #include "vmwgfx_drv.h"
31
32 #define VMW_FENCE_WRAP (1 << 31)
33
34 struct vmw_fence_manager {
35 int num_fence_objects;
36 struct vmw_private *dev_priv;
37 spinlock_t lock;
38 struct list_head fence_list;
39 struct work_struct work;
40 u32 user_fence_size;
41 u32 fence_size;
42 u32 event_fence_action_size;
43 bool fifo_down;
44 struct list_head cleanup_list;
45 uint32_t pending_actions[VMW_ACTION_MAX];
46 struct mutex goal_irq_mutex;
47 bool goal_irq_on; /* Protected by @goal_irq_mutex */
48 bool seqno_valid; /* Protected by @lock, and may not be set to true
49 without the @goal_irq_mutex held. */
50 u64 ctx;
51 };
52
53 struct vmw_user_fence {
54 struct ttm_base_object base;
55 struct vmw_fence_obj fence;
56 };
57
58 /**
59 * struct vmw_event_fence_action - fence action that delivers a drm event.
60 *
61 * @action: A struct vmw_fence_action to hook up to a fence.
62 * @event: A pointer to the pending event.
63 * @fence: A referenced pointer to the fence to keep it alive while @action
64 * hangs on it.
65 * @dev: Pointer to a struct drm_device so we can access the event stuff.
66 * @tv_sec: If non-null, the variable pointed to will be assigned
67 * current time tv_sec val when the fence signals.
68 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
69 * be assigned the current time tv_usec val when the fence signals.
70 */
71 struct vmw_event_fence_action {
72 struct vmw_fence_action action;
73
74 struct drm_pending_event *event;
75 struct vmw_fence_obj *fence;
76 struct drm_device *dev;
77
78 uint32_t *tv_sec;
79 uint32_t *tv_usec;
80 };
81
82 static struct vmw_fence_manager *
fman_from_fence(struct vmw_fence_obj * fence)83 fman_from_fence(struct vmw_fence_obj *fence)
84 {
85 return container_of(fence->base.lock, struct vmw_fence_manager, lock);
86 }
87
88 /*
89 * Note on fencing subsystem usage of irqs:
90 * Typically the vmw_fences_update function is called
91 *
92 * a) When a new fence seqno has been submitted by the fifo code.
93 * b) On-demand when we have waiters. Sleeping waiters will switch on the
94 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
95 * irq is received. When the last fence waiter is gone, that IRQ is masked
96 * away.
97 *
98 * In situations where there are no waiters and we don't submit any new fences,
99 * fence objects may not be signaled. This is perfectly OK, since there are
100 * no consumers of the signaled data, but that is NOT ok when there are fence
101 * actions attached to a fence. The fencing subsystem then makes use of the
102 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
103 * which has an action attached, and each time vmw_fences_update is called,
104 * the subsystem makes sure the fence goal seqno is updated.
105 *
106 * The fence goal seqno irq is on as long as there are unsignaled fence
107 * objects with actions attached to them.
108 */
109
vmw_fence_obj_destroy(struct dma_fence * f)110 static void vmw_fence_obj_destroy(struct dma_fence *f)
111 {
112 struct vmw_fence_obj *fence =
113 container_of(f, struct vmw_fence_obj, base);
114
115 struct vmw_fence_manager *fman = fman_from_fence(fence);
116
117 spin_lock(&fman->lock);
118 list_del_init(&fence->head);
119 --fman->num_fence_objects;
120 spin_unlock(&fman->lock);
121 fence->destroy(fence);
122 }
123
vmw_fence_get_driver_name(struct dma_fence * f)124 static const char *vmw_fence_get_driver_name(struct dma_fence *f)
125 {
126 return "vmwgfx";
127 }
128
vmw_fence_get_timeline_name(struct dma_fence * f)129 static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
130 {
131 return "svga";
132 }
133
vmw_fence_enable_signaling(struct dma_fence * f)134 static bool vmw_fence_enable_signaling(struct dma_fence *f)
135 {
136 struct vmw_fence_obj *fence =
137 container_of(f, struct vmw_fence_obj, base);
138
139 struct vmw_fence_manager *fman = fman_from_fence(fence);
140 struct vmw_private *dev_priv = fman->dev_priv;
141
142 u32 seqno = vmw_fence_read(dev_priv);
143 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
144 return false;
145
146 return true;
147 }
148
149 struct vmwgfx_wait_cb {
150 struct dma_fence_cb base;
151 struct task_struct *task;
152 };
153
154 static void
vmwgfx_wait_cb(struct dma_fence * fence,struct dma_fence_cb * cb)155 vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
156 {
157 struct vmwgfx_wait_cb *wait =
158 container_of(cb, struct vmwgfx_wait_cb, base);
159
160 wake_up_process(wait->task);
161 }
162
163 static void __vmw_fences_update(struct vmw_fence_manager *fman);
164
vmw_fence_wait(struct dma_fence * f,bool intr,signed long timeout)165 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
166 {
167 struct vmw_fence_obj *fence =
168 container_of(f, struct vmw_fence_obj, base);
169
170 struct vmw_fence_manager *fman = fman_from_fence(fence);
171 struct vmw_private *dev_priv = fman->dev_priv;
172 struct vmwgfx_wait_cb cb;
173 long ret = timeout;
174
175 if (likely(vmw_fence_obj_signaled(fence)))
176 return timeout;
177
178 vmw_seqno_waiter_add(dev_priv);
179
180 spin_lock(f->lock);
181
182 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
183 goto out;
184
185 if (intr && signal_pending(current)) {
186 ret = -ERESTARTSYS;
187 goto out;
188 }
189
190 cb.base.func = vmwgfx_wait_cb;
191 cb.task = current;
192 list_add(&cb.base.node, &f->cb_list);
193
194 for (;;) {
195 __vmw_fences_update(fman);
196
197 /*
198 * We can use the barrier free __set_current_state() since
199 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
200 * fence spinlock.
201 */
202 if (intr)
203 __set_current_state(TASK_INTERRUPTIBLE);
204 else
205 __set_current_state(TASK_UNINTERRUPTIBLE);
206
207 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
208 if (ret == 0 && timeout > 0)
209 ret = 1;
210 break;
211 }
212
213 if (intr && signal_pending(current)) {
214 ret = -ERESTARTSYS;
215 break;
216 }
217
218 if (ret == 0)
219 break;
220
221 spin_unlock(f->lock);
222
223 ret = schedule_timeout(ret);
224
225 spin_lock(f->lock);
226 }
227 __set_current_state(TASK_RUNNING);
228 if (!list_empty(&cb.base.node))
229 list_del(&cb.base.node);
230
231 out:
232 spin_unlock(f->lock);
233
234 vmw_seqno_waiter_remove(dev_priv);
235
236 return ret;
237 }
238
239 static const struct dma_fence_ops vmw_fence_ops = {
240 .get_driver_name = vmw_fence_get_driver_name,
241 .get_timeline_name = vmw_fence_get_timeline_name,
242 .enable_signaling = vmw_fence_enable_signaling,
243 .wait = vmw_fence_wait,
244 .release = vmw_fence_obj_destroy,
245 };
246
247
248 /*
249 * Execute signal actions on fences recently signaled.
250 * This is done from a workqueue so we don't have to execute
251 * signal actions from atomic context.
252 */
253
vmw_fence_work_func(struct work_struct * work)254 static void vmw_fence_work_func(struct work_struct *work)
255 {
256 struct vmw_fence_manager *fman =
257 container_of(work, struct vmw_fence_manager, work);
258 struct list_head list;
259 struct vmw_fence_action *action, *next_action;
260 bool seqno_valid;
261
262 do {
263 INIT_LIST_HEAD(&list);
264 mutex_lock(&fman->goal_irq_mutex);
265
266 spin_lock(&fman->lock);
267 list_splice_init(&fman->cleanup_list, &list);
268 seqno_valid = fman->seqno_valid;
269 spin_unlock(&fman->lock);
270
271 if (!seqno_valid && fman->goal_irq_on) {
272 fman->goal_irq_on = false;
273 vmw_goal_waiter_remove(fman->dev_priv);
274 }
275 mutex_unlock(&fman->goal_irq_mutex);
276
277 if (list_empty(&list))
278 return;
279
280 /*
281 * At this point, only we should be able to manipulate the
282 * list heads of the actions we have on the private list.
283 * hence fman::lock not held.
284 */
285
286 list_for_each_entry_safe(action, next_action, &list, head) {
287 list_del_init(&action->head);
288 if (action->cleanup)
289 action->cleanup(action);
290 }
291 } while (1);
292 }
293
vmw_fence_manager_init(struct vmw_private * dev_priv)294 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
295 {
296 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
297
298 if (unlikely(!fman))
299 return NULL;
300
301 fman->dev_priv = dev_priv;
302 spin_lock_init(&fman->lock);
303 INIT_LIST_HEAD(&fman->fence_list);
304 INIT_LIST_HEAD(&fman->cleanup_list);
305 INIT_WORK(&fman->work, &vmw_fence_work_func);
306 fman->fifo_down = true;
307 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
308 TTM_OBJ_EXTRA_SIZE;
309 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
310 fman->event_fence_action_size =
311 ttm_round_pot(sizeof(struct vmw_event_fence_action));
312 mutex_init(&fman->goal_irq_mutex);
313 fman->ctx = dma_fence_context_alloc(1);
314
315 return fman;
316 }
317
vmw_fence_manager_takedown(struct vmw_fence_manager * fman)318 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
319 {
320 bool lists_empty;
321
322 (void) cancel_work_sync(&fman->work);
323
324 spin_lock(&fman->lock);
325 lists_empty = list_empty(&fman->fence_list) &&
326 list_empty(&fman->cleanup_list);
327 spin_unlock(&fman->lock);
328
329 BUG_ON(!lists_empty);
330 kfree(fman);
331 }
332
vmw_fence_obj_init(struct vmw_fence_manager * fman,struct vmw_fence_obj * fence,u32 seqno,void (* destroy)(struct vmw_fence_obj * fence))333 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
334 struct vmw_fence_obj *fence, u32 seqno,
335 void (*destroy) (struct vmw_fence_obj *fence))
336 {
337 int ret = 0;
338
339 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
340 fman->ctx, seqno);
341 INIT_LIST_HEAD(&fence->seq_passed_actions);
342 fence->destroy = destroy;
343
344 spin_lock(&fman->lock);
345 if (unlikely(fman->fifo_down)) {
346 ret = -EBUSY;
347 goto out_unlock;
348 }
349 list_add_tail(&fence->head, &fman->fence_list);
350 ++fman->num_fence_objects;
351
352 out_unlock:
353 spin_unlock(&fman->lock);
354 return ret;
355
356 }
357
vmw_fences_perform_actions(struct vmw_fence_manager * fman,struct list_head * list)358 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
359 struct list_head *list)
360 {
361 struct vmw_fence_action *action, *next_action;
362
363 list_for_each_entry_safe(action, next_action, list, head) {
364 list_del_init(&action->head);
365 fman->pending_actions[action->type]--;
366 if (action->seq_passed != NULL)
367 action->seq_passed(action);
368
369 /*
370 * Add the cleanup action to the cleanup list so that
371 * it will be performed by a worker task.
372 */
373
374 list_add_tail(&action->head, &fman->cleanup_list);
375 }
376 }
377
378 /**
379 * vmw_fence_goal_new_locked - Figure out a new device fence goal
380 * seqno if needed.
381 *
382 * @fman: Pointer to a fence manager.
383 * @passed_seqno: The seqno the device currently signals as passed.
384 *
385 * This function should be called with the fence manager lock held.
386 * It is typically called when we have a new passed_seqno, and
387 * we might need to update the fence goal. It checks to see whether
388 * the current fence goal has already passed, and, in that case,
389 * scans through all unsignaled fences to get the next fence object with an
390 * action attached, and sets the seqno of that fence as a new fence goal.
391 *
392 * returns true if the device goal seqno was updated. False otherwise.
393 */
vmw_fence_goal_new_locked(struct vmw_fence_manager * fman,u32 passed_seqno)394 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
395 u32 passed_seqno)
396 {
397 u32 goal_seqno;
398 struct vmw_fence_obj *fence;
399
400 if (likely(!fman->seqno_valid))
401 return false;
402
403 goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
404 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
405 return false;
406
407 fman->seqno_valid = false;
408 list_for_each_entry(fence, &fman->fence_list, head) {
409 if (!list_empty(&fence->seq_passed_actions)) {
410 fman->seqno_valid = true;
411 vmw_fifo_mem_write(fman->dev_priv,
412 SVGA_FIFO_FENCE_GOAL,
413 fence->base.seqno);
414 break;
415 }
416 }
417
418 return true;
419 }
420
421
422 /**
423 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
424 * needed.
425 *
426 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
427 * considered as a device fence goal.
428 *
429 * This function should be called with the fence manager lock held.
430 * It is typically called when an action has been attached to a fence to
431 * check whether the seqno of that fence should be used for a fence
432 * goal interrupt. This is typically needed if the current fence goal is
433 * invalid, or has a higher seqno than that of the current fence object.
434 *
435 * returns true if the device goal seqno was updated. False otherwise.
436 */
vmw_fence_goal_check_locked(struct vmw_fence_obj * fence)437 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
438 {
439 struct vmw_fence_manager *fman = fman_from_fence(fence);
440 u32 goal_seqno;
441
442 if (dma_fence_is_signaled_locked(&fence->base))
443 return false;
444
445 goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
446 if (likely(fman->seqno_valid &&
447 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
448 return false;
449
450 vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL,
451 fence->base.seqno);
452 fman->seqno_valid = true;
453
454 return true;
455 }
456
__vmw_fences_update(struct vmw_fence_manager * fman)457 static void __vmw_fences_update(struct vmw_fence_manager *fman)
458 {
459 struct vmw_fence_obj *fence, *next_fence;
460 struct list_head action_list;
461 bool needs_rerun;
462 uint32_t seqno, new_seqno;
463
464 seqno = vmw_fence_read(fman->dev_priv);
465 rerun:
466 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
467 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
468 list_del_init(&fence->head);
469 dma_fence_signal_locked(&fence->base);
470 INIT_LIST_HEAD(&action_list);
471 list_splice_init(&fence->seq_passed_actions,
472 &action_list);
473 vmw_fences_perform_actions(fman, &action_list);
474 } else
475 break;
476 }
477
478 /*
479 * Rerun if the fence goal seqno was updated, and the
480 * hardware might have raced with that update, so that
481 * we missed a fence_goal irq.
482 */
483
484 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
485 if (unlikely(needs_rerun)) {
486 new_seqno = vmw_fence_read(fman->dev_priv);
487 if (new_seqno != seqno) {
488 seqno = new_seqno;
489 goto rerun;
490 }
491 }
492
493 if (!list_empty(&fman->cleanup_list))
494 (void) schedule_work(&fman->work);
495 }
496
vmw_fences_update(struct vmw_fence_manager * fman)497 void vmw_fences_update(struct vmw_fence_manager *fman)
498 {
499 spin_lock(&fman->lock);
500 __vmw_fences_update(fman);
501 spin_unlock(&fman->lock);
502 }
503
vmw_fence_obj_signaled(struct vmw_fence_obj * fence)504 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
505 {
506 struct vmw_fence_manager *fman = fman_from_fence(fence);
507
508 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
509 return true;
510
511 vmw_fences_update(fman);
512
513 return dma_fence_is_signaled(&fence->base);
514 }
515
vmw_fence_obj_wait(struct vmw_fence_obj * fence,bool lazy,bool interruptible,unsigned long timeout)516 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
517 bool interruptible, unsigned long timeout)
518 {
519 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
520
521 if (likely(ret > 0))
522 return 0;
523 else if (ret == 0)
524 return -EBUSY;
525 else
526 return ret;
527 }
528
vmw_fence_destroy(struct vmw_fence_obj * fence)529 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
530 {
531 dma_fence_free(&fence->base);
532 }
533
vmw_fence_create(struct vmw_fence_manager * fman,uint32_t seqno,struct vmw_fence_obj ** p_fence)534 int vmw_fence_create(struct vmw_fence_manager *fman,
535 uint32_t seqno,
536 struct vmw_fence_obj **p_fence)
537 {
538 struct vmw_fence_obj *fence;
539 int ret;
540
541 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
542 if (unlikely(!fence))
543 return -ENOMEM;
544
545 ret = vmw_fence_obj_init(fman, fence, seqno,
546 vmw_fence_destroy);
547 if (unlikely(ret != 0))
548 goto out_err_init;
549
550 *p_fence = fence;
551 return 0;
552
553 out_err_init:
554 kfree(fence);
555 return ret;
556 }
557
558
vmw_user_fence_destroy(struct vmw_fence_obj * fence)559 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
560 {
561 struct vmw_user_fence *ufence =
562 container_of(fence, struct vmw_user_fence, fence);
563 struct vmw_fence_manager *fman = fman_from_fence(fence);
564
565 ttm_base_object_kfree(ufence, base);
566 /*
567 * Free kernel space accounting.
568 */
569 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
570 fman->user_fence_size);
571 }
572
vmw_user_fence_base_release(struct ttm_base_object ** p_base)573 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
574 {
575 struct ttm_base_object *base = *p_base;
576 struct vmw_user_fence *ufence =
577 container_of(base, struct vmw_user_fence, base);
578 struct vmw_fence_obj *fence = &ufence->fence;
579
580 *p_base = NULL;
581 vmw_fence_obj_unreference(&fence);
582 }
583
vmw_user_fence_create(struct drm_file * file_priv,struct vmw_fence_manager * fman,uint32_t seqno,struct vmw_fence_obj ** p_fence,uint32_t * p_handle)584 int vmw_user_fence_create(struct drm_file *file_priv,
585 struct vmw_fence_manager *fman,
586 uint32_t seqno,
587 struct vmw_fence_obj **p_fence,
588 uint32_t *p_handle)
589 {
590 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
591 struct vmw_user_fence *ufence;
592 struct vmw_fence_obj *tmp;
593 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
594 struct ttm_operation_ctx ctx = {
595 .interruptible = false,
596 .no_wait_gpu = false
597 };
598 int ret;
599
600 /*
601 * Kernel memory space accounting, since this object may
602 * be created by a user-space request.
603 */
604
605 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
606 &ctx);
607 if (unlikely(ret != 0))
608 return ret;
609
610 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
611 if (unlikely(!ufence)) {
612 ret = -ENOMEM;
613 goto out_no_object;
614 }
615
616 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
617 vmw_user_fence_destroy);
618 if (unlikely(ret != 0)) {
619 kfree(ufence);
620 goto out_no_object;
621 }
622
623 /*
624 * The base object holds a reference which is freed in
625 * vmw_user_fence_base_release.
626 */
627 tmp = vmw_fence_obj_reference(&ufence->fence);
628 ret = ttm_base_object_init(tfile, &ufence->base, false,
629 VMW_RES_FENCE,
630 &vmw_user_fence_base_release, NULL);
631
632
633 if (unlikely(ret != 0)) {
634 /*
635 * Free the base object's reference
636 */
637 vmw_fence_obj_unreference(&tmp);
638 goto out_err;
639 }
640
641 *p_fence = &ufence->fence;
642 *p_handle = ufence->base.handle;
643
644 return 0;
645 out_err:
646 tmp = &ufence->fence;
647 vmw_fence_obj_unreference(&tmp);
648 out_no_object:
649 ttm_mem_global_free(mem_glob, fman->user_fence_size);
650 return ret;
651 }
652
653
654 /**
655 * vmw_wait_dma_fence - Wait for a dma fence
656 *
657 * @fman: pointer to a fence manager
658 * @fence: DMA fence to wait on
659 *
660 * This function handles the case when the fence is actually a fence
661 * array. If that's the case, it'll wait on each of the child fence
662 */
vmw_wait_dma_fence(struct vmw_fence_manager * fman,struct dma_fence * fence)663 int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
664 struct dma_fence *fence)
665 {
666 struct dma_fence_array *fence_array;
667 int ret = 0;
668 int i;
669
670
671 if (dma_fence_is_signaled(fence))
672 return 0;
673
674 if (!dma_fence_is_array(fence))
675 return dma_fence_wait(fence, true);
676
677 /* From i915: Note that if the fence-array was created in
678 * signal-on-any mode, we should *not* decompose it into its individual
679 * fences. However, we don't currently store which mode the fence-array
680 * is operating in. Fortunately, the only user of signal-on-any is
681 * private to amdgpu and we should not see any incoming fence-array
682 * from sync-file being in signal-on-any mode.
683 */
684
685 fence_array = to_dma_fence_array(fence);
686 for (i = 0; i < fence_array->num_fences; i++) {
687 struct dma_fence *child = fence_array->fences[i];
688
689 ret = dma_fence_wait(child, true);
690
691 if (ret < 0)
692 return ret;
693 }
694
695 return 0;
696 }
697
698
699 /*
700 * vmw_fence_fifo_down - signal all unsignaled fence objects.
701 */
702
vmw_fence_fifo_down(struct vmw_fence_manager * fman)703 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
704 {
705 struct list_head action_list;
706 int ret;
707
708 /*
709 * The list may be altered while we traverse it, so always
710 * restart when we've released the fman->lock.
711 */
712
713 spin_lock(&fman->lock);
714 fman->fifo_down = true;
715 while (!list_empty(&fman->fence_list)) {
716 struct vmw_fence_obj *fence =
717 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
718 head);
719 dma_fence_get(&fence->base);
720 spin_unlock(&fman->lock);
721
722 ret = vmw_fence_obj_wait(fence, false, false,
723 VMW_FENCE_WAIT_TIMEOUT);
724
725 if (unlikely(ret != 0)) {
726 list_del_init(&fence->head);
727 dma_fence_signal(&fence->base);
728 INIT_LIST_HEAD(&action_list);
729 list_splice_init(&fence->seq_passed_actions,
730 &action_list);
731 vmw_fences_perform_actions(fman, &action_list);
732 }
733
734 BUG_ON(!list_empty(&fence->head));
735 dma_fence_put(&fence->base);
736 spin_lock(&fman->lock);
737 }
738 spin_unlock(&fman->lock);
739 }
740
vmw_fence_fifo_up(struct vmw_fence_manager * fman)741 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
742 {
743 spin_lock(&fman->lock);
744 fman->fifo_down = false;
745 spin_unlock(&fman->lock);
746 }
747
748
749 /**
750 * vmw_fence_obj_lookup - Look up a user-space fence object
751 *
752 * @tfile: A struct ttm_object_file identifying the caller.
753 * @handle: A handle identifying the fence object.
754 * @return: A struct vmw_user_fence base ttm object on success or
755 * an error pointer on failure.
756 *
757 * The fence object is looked up and type-checked. The caller needs
758 * to have opened the fence object first, but since that happens on
759 * creation and fence objects aren't shareable, that's not an
760 * issue currently.
761 */
762 static struct ttm_base_object *
vmw_fence_obj_lookup(struct ttm_object_file * tfile,u32 handle)763 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
764 {
765 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
766
767 if (!base) {
768 pr_err("Invalid fence object handle 0x%08lx.\n",
769 (unsigned long)handle);
770 return ERR_PTR(-EINVAL);
771 }
772
773 if (base->refcount_release != vmw_user_fence_base_release) {
774 pr_err("Invalid fence object handle 0x%08lx.\n",
775 (unsigned long)handle);
776 ttm_base_object_unref(&base);
777 return ERR_PTR(-EINVAL);
778 }
779
780 return base;
781 }
782
783
vmw_fence_obj_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)784 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
785 struct drm_file *file_priv)
786 {
787 struct drm_vmw_fence_wait_arg *arg =
788 (struct drm_vmw_fence_wait_arg *)data;
789 unsigned long timeout;
790 struct ttm_base_object *base;
791 struct vmw_fence_obj *fence;
792 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
793 int ret;
794 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
795
796 /*
797 * 64-bit division not present on 32-bit systems, so do an
798 * approximation. (Divide by 1000000).
799 */
800
801 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
802 (wait_timeout >> 26);
803
804 if (!arg->cookie_valid) {
805 arg->cookie_valid = 1;
806 arg->kernel_cookie = jiffies + wait_timeout;
807 }
808
809 base = vmw_fence_obj_lookup(tfile, arg->handle);
810 if (IS_ERR(base))
811 return PTR_ERR(base);
812
813 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
814
815 timeout = jiffies;
816 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
817 ret = ((vmw_fence_obj_signaled(fence)) ?
818 0 : -EBUSY);
819 goto out;
820 }
821
822 timeout = (unsigned long)arg->kernel_cookie - timeout;
823
824 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
825
826 out:
827 ttm_base_object_unref(&base);
828
829 /*
830 * Optionally unref the fence object.
831 */
832
833 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
834 return ttm_ref_object_base_unref(tfile, arg->handle,
835 TTM_REF_USAGE);
836 return ret;
837 }
838
vmw_fence_obj_signaled_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)839 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
840 struct drm_file *file_priv)
841 {
842 struct drm_vmw_fence_signaled_arg *arg =
843 (struct drm_vmw_fence_signaled_arg *) data;
844 struct ttm_base_object *base;
845 struct vmw_fence_obj *fence;
846 struct vmw_fence_manager *fman;
847 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
848 struct vmw_private *dev_priv = vmw_priv(dev);
849
850 base = vmw_fence_obj_lookup(tfile, arg->handle);
851 if (IS_ERR(base))
852 return PTR_ERR(base);
853
854 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
855 fman = fman_from_fence(fence);
856
857 arg->signaled = vmw_fence_obj_signaled(fence);
858
859 arg->signaled_flags = arg->flags;
860 spin_lock(&fman->lock);
861 arg->passed_seqno = dev_priv->last_read_seqno;
862 spin_unlock(&fman->lock);
863
864 ttm_base_object_unref(&base);
865
866 return 0;
867 }
868
869
vmw_fence_obj_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)870 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
871 struct drm_file *file_priv)
872 {
873 struct drm_vmw_fence_arg *arg =
874 (struct drm_vmw_fence_arg *) data;
875
876 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
877 arg->handle,
878 TTM_REF_USAGE);
879 }
880
881 /**
882 * vmw_event_fence_action_seq_passed
883 *
884 * @action: The struct vmw_fence_action embedded in a struct
885 * vmw_event_fence_action.
886 *
887 * This function is called when the seqno of the fence where @action is
888 * attached has passed. It queues the event on the submitter's event list.
889 * This function is always called from atomic context.
890 */
vmw_event_fence_action_seq_passed(struct vmw_fence_action * action)891 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
892 {
893 struct vmw_event_fence_action *eaction =
894 container_of(action, struct vmw_event_fence_action, action);
895 struct drm_device *dev = eaction->dev;
896 struct drm_pending_event *event = eaction->event;
897
898 if (unlikely(event == NULL))
899 return;
900
901 spin_lock_irq(&dev->event_lock);
902
903 if (likely(eaction->tv_sec != NULL)) {
904 struct timespec64 ts;
905
906 ktime_get_ts64(&ts);
907 /* monotonic time, so no y2038 overflow */
908 *eaction->tv_sec = ts.tv_sec;
909 *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
910 }
911
912 drm_send_event_locked(dev, eaction->event);
913 eaction->event = NULL;
914 spin_unlock_irq(&dev->event_lock);
915 }
916
917 /**
918 * vmw_event_fence_action_cleanup
919 *
920 * @action: The struct vmw_fence_action embedded in a struct
921 * vmw_event_fence_action.
922 *
923 * This function is the struct vmw_fence_action destructor. It's typically
924 * called from a workqueue.
925 */
vmw_event_fence_action_cleanup(struct vmw_fence_action * action)926 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
927 {
928 struct vmw_event_fence_action *eaction =
929 container_of(action, struct vmw_event_fence_action, action);
930
931 vmw_fence_obj_unreference(&eaction->fence);
932 kfree(eaction);
933 }
934
935
936 /**
937 * vmw_fence_obj_add_action - Add an action to a fence object.
938 *
939 * @fence: The fence object.
940 * @action: The action to add.
941 *
942 * Note that the action callbacks may be executed before this function
943 * returns.
944 */
vmw_fence_obj_add_action(struct vmw_fence_obj * fence,struct vmw_fence_action * action)945 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
946 struct vmw_fence_action *action)
947 {
948 struct vmw_fence_manager *fman = fman_from_fence(fence);
949 bool run_update = false;
950
951 mutex_lock(&fman->goal_irq_mutex);
952 spin_lock(&fman->lock);
953
954 fman->pending_actions[action->type]++;
955 if (dma_fence_is_signaled_locked(&fence->base)) {
956 struct list_head action_list;
957
958 INIT_LIST_HEAD(&action_list);
959 list_add_tail(&action->head, &action_list);
960 vmw_fences_perform_actions(fman, &action_list);
961 } else {
962 list_add_tail(&action->head, &fence->seq_passed_actions);
963
964 /*
965 * This function may set fman::seqno_valid, so it must
966 * be run with the goal_irq_mutex held.
967 */
968 run_update = vmw_fence_goal_check_locked(fence);
969 }
970
971 spin_unlock(&fman->lock);
972
973 if (run_update) {
974 if (!fman->goal_irq_on) {
975 fman->goal_irq_on = true;
976 vmw_goal_waiter_add(fman->dev_priv);
977 }
978 vmw_fences_update(fman);
979 }
980 mutex_unlock(&fman->goal_irq_mutex);
981
982 }
983
984 /**
985 * vmw_event_fence_action_queue - Post an event for sending when a fence
986 * object seqno has passed.
987 *
988 * @file_priv: The file connection on which the event should be posted.
989 * @fence: The fence object on which to post the event.
990 * @event: Event to be posted. This event should've been alloced
991 * using k[mz]alloc, and should've been completely initialized.
992 * @tv_sec: If non-null, the variable pointed to will be assigned
993 * current time tv_sec val when the fence signals.
994 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
995 * be assigned the current time tv_usec val when the fence signals.
996 * @interruptible: Interruptible waits if possible.
997 *
998 * As a side effect, the object pointed to by @event may have been
999 * freed when this function returns. If this function returns with
1000 * an error code, the caller needs to free that object.
1001 */
1002
vmw_event_fence_action_queue(struct drm_file * file_priv,struct vmw_fence_obj * fence,struct drm_pending_event * event,uint32_t * tv_sec,uint32_t * tv_usec,bool interruptible)1003 int vmw_event_fence_action_queue(struct drm_file *file_priv,
1004 struct vmw_fence_obj *fence,
1005 struct drm_pending_event *event,
1006 uint32_t *tv_sec,
1007 uint32_t *tv_usec,
1008 bool interruptible)
1009 {
1010 struct vmw_event_fence_action *eaction;
1011 struct vmw_fence_manager *fman = fman_from_fence(fence);
1012
1013 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1014 if (unlikely(!eaction))
1015 return -ENOMEM;
1016
1017 eaction->event = event;
1018
1019 eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1020 eaction->action.cleanup = vmw_event_fence_action_cleanup;
1021 eaction->action.type = VMW_ACTION_EVENT;
1022
1023 eaction->fence = vmw_fence_obj_reference(fence);
1024 eaction->dev = &fman->dev_priv->drm;
1025 eaction->tv_sec = tv_sec;
1026 eaction->tv_usec = tv_usec;
1027
1028 vmw_fence_obj_add_action(fence, &eaction->action);
1029
1030 return 0;
1031 }
1032
1033 struct vmw_event_fence_pending {
1034 struct drm_pending_event base;
1035 struct drm_vmw_event_fence event;
1036 };
1037
vmw_event_fence_action_create(struct drm_file * file_priv,struct vmw_fence_obj * fence,uint32_t flags,uint64_t user_data,bool interruptible)1038 static int vmw_event_fence_action_create(struct drm_file *file_priv,
1039 struct vmw_fence_obj *fence,
1040 uint32_t flags,
1041 uint64_t user_data,
1042 bool interruptible)
1043 {
1044 struct vmw_event_fence_pending *event;
1045 struct vmw_fence_manager *fman = fman_from_fence(fence);
1046 struct drm_device *dev = &fman->dev_priv->drm;
1047 int ret;
1048
1049 event = kzalloc(sizeof(*event), GFP_KERNEL);
1050 if (unlikely(!event)) {
1051 DRM_ERROR("Failed to allocate an event.\n");
1052 ret = -ENOMEM;
1053 goto out_no_space;
1054 }
1055
1056 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1057 event->event.base.length = sizeof(*event);
1058 event->event.user_data = user_data;
1059
1060 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1061
1062 if (unlikely(ret != 0)) {
1063 DRM_ERROR("Failed to allocate event space for this file.\n");
1064 kfree(event);
1065 goto out_no_space;
1066 }
1067
1068 if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1069 ret = vmw_event_fence_action_queue(file_priv, fence,
1070 &event->base,
1071 &event->event.tv_sec,
1072 &event->event.tv_usec,
1073 interruptible);
1074 else
1075 ret = vmw_event_fence_action_queue(file_priv, fence,
1076 &event->base,
1077 NULL,
1078 NULL,
1079 interruptible);
1080 if (ret != 0)
1081 goto out_no_queue;
1082
1083 return 0;
1084
1085 out_no_queue:
1086 drm_event_cancel_free(dev, &event->base);
1087 out_no_space:
1088 return ret;
1089 }
1090
vmw_fence_event_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1091 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1092 struct drm_file *file_priv)
1093 {
1094 struct vmw_private *dev_priv = vmw_priv(dev);
1095 struct drm_vmw_fence_event_arg *arg =
1096 (struct drm_vmw_fence_event_arg *) data;
1097 struct vmw_fence_obj *fence = NULL;
1098 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1099 struct ttm_object_file *tfile = vmw_fp->tfile;
1100 struct drm_vmw_fence_rep __user *user_fence_rep =
1101 (struct drm_vmw_fence_rep __user *)(unsigned long)
1102 arg->fence_rep;
1103 uint32_t handle;
1104 int ret;
1105
1106 /*
1107 * Look up an existing fence object,
1108 * and if user-space wants a new reference,
1109 * add one.
1110 */
1111 if (arg->handle) {
1112 struct ttm_base_object *base =
1113 vmw_fence_obj_lookup(tfile, arg->handle);
1114
1115 if (IS_ERR(base))
1116 return PTR_ERR(base);
1117
1118 fence = &(container_of(base, struct vmw_user_fence,
1119 base)->fence);
1120 (void) vmw_fence_obj_reference(fence);
1121
1122 if (user_fence_rep != NULL) {
1123 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1124 TTM_REF_USAGE, NULL, false);
1125 if (unlikely(ret != 0)) {
1126 DRM_ERROR("Failed to reference a fence "
1127 "object.\n");
1128 goto out_no_ref_obj;
1129 }
1130 handle = base->handle;
1131 }
1132 ttm_base_object_unref(&base);
1133 }
1134
1135 /*
1136 * Create a new fence object.
1137 */
1138 if (!fence) {
1139 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1140 &fence,
1141 (user_fence_rep) ?
1142 &handle : NULL);
1143 if (unlikely(ret != 0)) {
1144 DRM_ERROR("Fence event failed to create fence.\n");
1145 return ret;
1146 }
1147 }
1148
1149 BUG_ON(fence == NULL);
1150
1151 ret = vmw_event_fence_action_create(file_priv, fence,
1152 arg->flags,
1153 arg->user_data,
1154 true);
1155 if (unlikely(ret != 0)) {
1156 if (ret != -ERESTARTSYS)
1157 DRM_ERROR("Failed to attach event to fence.\n");
1158 goto out_no_create;
1159 }
1160
1161 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1162 handle, -1, NULL);
1163 vmw_fence_obj_unreference(&fence);
1164 return 0;
1165 out_no_create:
1166 if (user_fence_rep != NULL)
1167 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1168 out_no_ref_obj:
1169 vmw_fence_obj_unreference(&fence);
1170 return ret;
1171 }
1172