1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3 *
4 * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28 /*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 *
31 * While no substantial code is shared, the prime code is inspired by
32 * drm_prime.c, with
33 * Authors:
34 * Dave Airlie <airlied@redhat.com>
35 * Rob Clark <rob.clark@linaro.org>
36 */
37 /** @file ttm_ref_object.c
38 *
39 * Base- and reference object implementation for the various
40 * ttm objects. Implements reference counting, minimal security checks
41 * and release on file close.
42 */
43
44
45 #define pr_fmt(fmt) "[TTM] " fmt
46
47 #include <linux/list.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/atomic.h>
51 #include <linux/module.h>
52 #include "ttm_object.h"
53
54 MODULE_IMPORT_NS(DMA_BUF);
55
56 /**
57 * struct ttm_object_file
58 *
59 * @tdev: Pointer to the ttm_object_device.
60 *
61 * @lock: Lock that protects the ref_list list and the
62 * ref_hash hash tables.
63 *
64 * @ref_list: List of ttm_ref_objects to be destroyed at
65 * file release.
66 *
67 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
68 * for fast lookup of ref objects given a base object.
69 *
70 * @refcount: reference/usage count
71 */
72 struct ttm_object_file {
73 struct ttm_object_device *tdev;
74 spinlock_t lock;
75 struct list_head ref_list;
76 struct drm_open_hash ref_hash[TTM_REF_NUM];
77 struct kref refcount;
78 };
79
80 /*
81 * struct ttm_object_device
82 *
83 * @object_lock: lock that protects the object_hash hash table.
84 *
85 * @object_hash: hash table for fast lookup of object global names.
86 *
87 * @object_count: Per device object count.
88 *
89 * This is the per-device data structure needed for ttm object management.
90 */
91
92 struct ttm_object_device {
93 spinlock_t object_lock;
94 struct drm_open_hash object_hash;
95 atomic_t object_count;
96 struct ttm_mem_global *mem_glob;
97 struct dma_buf_ops ops;
98 void (*dmabuf_release)(struct dma_buf *dma_buf);
99 size_t dma_buf_size;
100 struct idr idr;
101 };
102
103 /*
104 * struct ttm_ref_object
105 *
106 * @hash: Hash entry for the per-file object reference hash.
107 *
108 * @head: List entry for the per-file list of ref-objects.
109 *
110 * @kref: Ref count.
111 *
112 * @obj: Base object this ref object is referencing.
113 *
114 * @ref_type: Type of ref object.
115 *
116 * This is similar to an idr object, but it also has a hash table entry
117 * that allows lookup with a pointer to the referenced object as a key. In
118 * that way, one can easily detect whether a base object is referenced by
119 * a particular ttm_object_file. It also carries a ref count to avoid creating
120 * multiple ref objects if a ttm_object_file references the same base
121 * object more than once.
122 */
123
124 struct ttm_ref_object {
125 struct rcu_head rcu_head;
126 struct drm_hash_item hash;
127 struct list_head head;
128 struct kref kref;
129 enum ttm_ref_type ref_type;
130 struct ttm_base_object *obj;
131 struct ttm_object_file *tfile;
132 };
133
134 static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
135
136 static inline struct ttm_object_file *
ttm_object_file_ref(struct ttm_object_file * tfile)137 ttm_object_file_ref(struct ttm_object_file *tfile)
138 {
139 kref_get(&tfile->refcount);
140 return tfile;
141 }
142
ttm_object_file_destroy(struct kref * kref)143 static void ttm_object_file_destroy(struct kref *kref)
144 {
145 struct ttm_object_file *tfile =
146 container_of(kref, struct ttm_object_file, refcount);
147
148 kfree(tfile);
149 }
150
151
ttm_object_file_unref(struct ttm_object_file ** p_tfile)152 static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
153 {
154 struct ttm_object_file *tfile = *p_tfile;
155
156 *p_tfile = NULL;
157 kref_put(&tfile->refcount, ttm_object_file_destroy);
158 }
159
160
ttm_base_object_init(struct ttm_object_file * tfile,struct ttm_base_object * base,bool shareable,enum ttm_object_type object_type,void (* refcount_release)(struct ttm_base_object **),void (* ref_obj_release)(struct ttm_base_object *,enum ttm_ref_type ref_type))161 int ttm_base_object_init(struct ttm_object_file *tfile,
162 struct ttm_base_object *base,
163 bool shareable,
164 enum ttm_object_type object_type,
165 void (*refcount_release) (struct ttm_base_object **),
166 void (*ref_obj_release) (struct ttm_base_object *,
167 enum ttm_ref_type ref_type))
168 {
169 struct ttm_object_device *tdev = tfile->tdev;
170 int ret;
171
172 base->shareable = shareable;
173 base->tfile = ttm_object_file_ref(tfile);
174 base->refcount_release = refcount_release;
175 base->ref_obj_release = ref_obj_release;
176 base->object_type = object_type;
177 kref_init(&base->refcount);
178 idr_preload(GFP_KERNEL);
179 spin_lock(&tdev->object_lock);
180 ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT);
181 spin_unlock(&tdev->object_lock);
182 idr_preload_end();
183 if (ret < 0)
184 return ret;
185
186 base->handle = ret;
187 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
188 if (unlikely(ret != 0))
189 goto out_err1;
190
191 ttm_base_object_unref(&base);
192
193 return 0;
194 out_err1:
195 spin_lock(&tdev->object_lock);
196 idr_remove(&tdev->idr, base->handle);
197 spin_unlock(&tdev->object_lock);
198 return ret;
199 }
200
ttm_release_base(struct kref * kref)201 static void ttm_release_base(struct kref *kref)
202 {
203 struct ttm_base_object *base =
204 container_of(kref, struct ttm_base_object, refcount);
205 struct ttm_object_device *tdev = base->tfile->tdev;
206
207 spin_lock(&tdev->object_lock);
208 idr_remove(&tdev->idr, base->handle);
209 spin_unlock(&tdev->object_lock);
210
211 /*
212 * Note: We don't use synchronize_rcu() here because it's far
213 * too slow. It's up to the user to free the object using
214 * call_rcu() or ttm_base_object_kfree().
215 */
216
217 ttm_object_file_unref(&base->tfile);
218 if (base->refcount_release)
219 base->refcount_release(&base);
220 }
221
ttm_base_object_unref(struct ttm_base_object ** p_base)222 void ttm_base_object_unref(struct ttm_base_object **p_base)
223 {
224 struct ttm_base_object *base = *p_base;
225
226 *p_base = NULL;
227
228 kref_put(&base->refcount, ttm_release_base);
229 }
230
231 /**
232 * ttm_base_object_noref_lookup - look up a base object without reference
233 * @tfile: The struct ttm_object_file the object is registered with.
234 * @key: The object handle.
235 *
236 * This function looks up a ttm base object and returns a pointer to it
237 * without refcounting the pointer. The returned pointer is only valid
238 * until ttm_base_object_noref_release() is called, and the object
239 * pointed to by the returned pointer may be doomed. Any persistent usage
240 * of the object requires a refcount to be taken using kref_get_unless_zero().
241 * Iff this function returns successfully it needs to be paired with
242 * ttm_base_object_noref_release() and no sleeping- or scheduling functions
243 * may be called inbetween these function callse.
244 *
245 * Return: A pointer to the object if successful or NULL otherwise.
246 */
247 struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file * tfile,uint32_t key)248 ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
249 {
250 struct drm_hash_item *hash;
251 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
252 int ret;
253
254 rcu_read_lock();
255 ret = drm_ht_find_item_rcu(ht, key, &hash);
256 if (ret) {
257 rcu_read_unlock();
258 return NULL;
259 }
260
261 __release(RCU);
262 return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
263 }
264 EXPORT_SYMBOL(ttm_base_object_noref_lookup);
265
ttm_base_object_lookup(struct ttm_object_file * tfile,uint32_t key)266 struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
267 uint32_t key)
268 {
269 struct ttm_base_object *base = NULL;
270 struct drm_hash_item *hash;
271 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
272 int ret;
273
274 rcu_read_lock();
275 ret = drm_ht_find_item_rcu(ht, key, &hash);
276
277 if (likely(ret == 0)) {
278 base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
279 if (!kref_get_unless_zero(&base->refcount))
280 base = NULL;
281 }
282 rcu_read_unlock();
283
284 return base;
285 }
286
287 struct ttm_base_object *
ttm_base_object_lookup_for_ref(struct ttm_object_device * tdev,uint32_t key)288 ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
289 {
290 struct ttm_base_object *base;
291
292 rcu_read_lock();
293 base = idr_find(&tdev->idr, key);
294
295 if (base && !kref_get_unless_zero(&base->refcount))
296 base = NULL;
297 rcu_read_unlock();
298
299 return base;
300 }
301
302 /**
303 * ttm_ref_object_exists - Check whether a caller has a valid ref object
304 * (has opened) a base object.
305 *
306 * @tfile: Pointer to a struct ttm_object_file identifying the caller.
307 * @base: Pointer to a struct base object.
308 *
309 * Checks wether the caller identified by @tfile has put a valid USAGE
310 * reference object on the base object identified by @base.
311 */
ttm_ref_object_exists(struct ttm_object_file * tfile,struct ttm_base_object * base)312 bool ttm_ref_object_exists(struct ttm_object_file *tfile,
313 struct ttm_base_object *base)
314 {
315 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
316 struct drm_hash_item *hash;
317 struct ttm_ref_object *ref;
318
319 rcu_read_lock();
320 if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
321 goto out_false;
322
323 /*
324 * Verify that the ref object is really pointing to our base object.
325 * Our base object could actually be dead, and the ref object pointing
326 * to another base object with the same handle.
327 */
328 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
329 if (unlikely(base != ref->obj))
330 goto out_false;
331
332 /*
333 * Verify that the ref->obj pointer was actually valid!
334 */
335 rmb();
336 if (unlikely(kref_read(&ref->kref) == 0))
337 goto out_false;
338
339 rcu_read_unlock();
340 return true;
341
342 out_false:
343 rcu_read_unlock();
344 return false;
345 }
346
ttm_ref_object_add(struct ttm_object_file * tfile,struct ttm_base_object * base,enum ttm_ref_type ref_type,bool * existed,bool require_existed)347 int ttm_ref_object_add(struct ttm_object_file *tfile,
348 struct ttm_base_object *base,
349 enum ttm_ref_type ref_type, bool *existed,
350 bool require_existed)
351 {
352 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
353 struct ttm_ref_object *ref;
354 struct drm_hash_item *hash;
355 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
356 struct ttm_operation_ctx ctx = {
357 .interruptible = false,
358 .no_wait_gpu = false
359 };
360 int ret = -EINVAL;
361
362 if (base->tfile != tfile && !base->shareable)
363 return -EPERM;
364
365 if (existed != NULL)
366 *existed = true;
367
368 while (ret == -EINVAL) {
369 rcu_read_lock();
370 ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
371
372 if (ret == 0) {
373 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
374 if (kref_get_unless_zero(&ref->kref)) {
375 rcu_read_unlock();
376 break;
377 }
378 }
379
380 rcu_read_unlock();
381 if (require_existed)
382 return -EPERM;
383
384 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
385 &ctx);
386 if (unlikely(ret != 0))
387 return ret;
388 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
389 if (unlikely(ref == NULL)) {
390 ttm_mem_global_free(mem_glob, sizeof(*ref));
391 return -ENOMEM;
392 }
393
394 ref->hash.key = base->handle;
395 ref->obj = base;
396 ref->tfile = tfile;
397 ref->ref_type = ref_type;
398 kref_init(&ref->kref);
399
400 spin_lock(&tfile->lock);
401 ret = drm_ht_insert_item_rcu(ht, &ref->hash);
402
403 if (likely(ret == 0)) {
404 list_add_tail(&ref->head, &tfile->ref_list);
405 kref_get(&base->refcount);
406 spin_unlock(&tfile->lock);
407 if (existed != NULL)
408 *existed = false;
409 break;
410 }
411
412 spin_unlock(&tfile->lock);
413 BUG_ON(ret != -EINVAL);
414
415 ttm_mem_global_free(mem_glob, sizeof(*ref));
416 kfree(ref);
417 }
418
419 return ret;
420 }
421
422 static void __releases(tfile->lock) __acquires(tfile->lock)
ttm_ref_object_release(struct kref * kref)423 ttm_ref_object_release(struct kref *kref)
424 {
425 struct ttm_ref_object *ref =
426 container_of(kref, struct ttm_ref_object, kref);
427 struct ttm_base_object *base = ref->obj;
428 struct ttm_object_file *tfile = ref->tfile;
429 struct drm_open_hash *ht;
430 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
431
432 ht = &tfile->ref_hash[ref->ref_type];
433 (void)drm_ht_remove_item_rcu(ht, &ref->hash);
434 list_del(&ref->head);
435 spin_unlock(&tfile->lock);
436
437 if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
438 base->ref_obj_release(base, ref->ref_type);
439
440 ttm_base_object_unref(&ref->obj);
441 ttm_mem_global_free(mem_glob, sizeof(*ref));
442 kfree_rcu(ref, rcu_head);
443 spin_lock(&tfile->lock);
444 }
445
ttm_ref_object_base_unref(struct ttm_object_file * tfile,unsigned long key,enum ttm_ref_type ref_type)446 int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
447 unsigned long key, enum ttm_ref_type ref_type)
448 {
449 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
450 struct ttm_ref_object *ref;
451 struct drm_hash_item *hash;
452 int ret;
453
454 spin_lock(&tfile->lock);
455 ret = drm_ht_find_item(ht, key, &hash);
456 if (unlikely(ret != 0)) {
457 spin_unlock(&tfile->lock);
458 return -EINVAL;
459 }
460 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
461 kref_put(&ref->kref, ttm_ref_object_release);
462 spin_unlock(&tfile->lock);
463 return 0;
464 }
465
ttm_object_file_release(struct ttm_object_file ** p_tfile)466 void ttm_object_file_release(struct ttm_object_file **p_tfile)
467 {
468 struct ttm_ref_object *ref;
469 struct list_head *list;
470 unsigned int i;
471 struct ttm_object_file *tfile = *p_tfile;
472
473 *p_tfile = NULL;
474 spin_lock(&tfile->lock);
475
476 /*
477 * Since we release the lock within the loop, we have to
478 * restart it from the beginning each time.
479 */
480
481 while (!list_empty(&tfile->ref_list)) {
482 list = tfile->ref_list.next;
483 ref = list_entry(list, struct ttm_ref_object, head);
484 ttm_ref_object_release(&ref->kref);
485 }
486
487 spin_unlock(&tfile->lock);
488 for (i = 0; i < TTM_REF_NUM; ++i)
489 drm_ht_remove(&tfile->ref_hash[i]);
490
491 ttm_object_file_unref(&tfile);
492 }
493
ttm_object_file_init(struct ttm_object_device * tdev,unsigned int hash_order)494 struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
495 unsigned int hash_order)
496 {
497 struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
498 unsigned int i;
499 unsigned int j = 0;
500 int ret;
501
502 if (unlikely(tfile == NULL))
503 return NULL;
504
505 spin_lock_init(&tfile->lock);
506 tfile->tdev = tdev;
507 kref_init(&tfile->refcount);
508 INIT_LIST_HEAD(&tfile->ref_list);
509
510 for (i = 0; i < TTM_REF_NUM; ++i) {
511 ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
512 if (ret) {
513 j = i;
514 goto out_err;
515 }
516 }
517
518 return tfile;
519 out_err:
520 for (i = 0; i < j; ++i)
521 drm_ht_remove(&tfile->ref_hash[i]);
522
523 kfree(tfile);
524
525 return NULL;
526 }
527
528 struct ttm_object_device *
ttm_object_device_init(struct ttm_mem_global * mem_glob,unsigned int hash_order,const struct dma_buf_ops * ops)529 ttm_object_device_init(struct ttm_mem_global *mem_glob,
530 unsigned int hash_order,
531 const struct dma_buf_ops *ops)
532 {
533 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
534 int ret;
535
536 if (unlikely(tdev == NULL))
537 return NULL;
538
539 tdev->mem_glob = mem_glob;
540 spin_lock_init(&tdev->object_lock);
541 atomic_set(&tdev->object_count, 0);
542 ret = drm_ht_create(&tdev->object_hash, hash_order);
543 if (ret != 0)
544 goto out_no_object_hash;
545
546 idr_init_base(&tdev->idr, 1);
547 tdev->ops = *ops;
548 tdev->dmabuf_release = tdev->ops.release;
549 tdev->ops.release = ttm_prime_dmabuf_release;
550 tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
551 ttm_round_pot(sizeof(struct file));
552 return tdev;
553
554 out_no_object_hash:
555 kfree(tdev);
556 return NULL;
557 }
558
ttm_object_device_release(struct ttm_object_device ** p_tdev)559 void ttm_object_device_release(struct ttm_object_device **p_tdev)
560 {
561 struct ttm_object_device *tdev = *p_tdev;
562
563 *p_tdev = NULL;
564
565 WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
566 idr_destroy(&tdev->idr);
567 drm_ht_remove(&tdev->object_hash);
568
569 kfree(tdev);
570 }
571
572 /**
573 * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
574 *
575 * @dmabuf: Non-refcounted pointer to a struct dma-buf.
576 *
577 * Obtain a file reference from a lookup structure that doesn't refcount
578 * the file, but synchronizes with its release method to make sure it has
579 * not been freed yet. See for example kref_get_unless_zero documentation.
580 * Returns true if refcounting succeeds, false otherwise.
581 *
582 * Nobody really wants this as a public API yet, so let it mature here
583 * for some time...
584 */
get_dma_buf_unless_doomed(struct dma_buf * dmabuf)585 static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
586 {
587 return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
588 }
589
590 /**
591 * ttm_prime_refcount_release - refcount release method for a prime object.
592 *
593 * @p_base: Pointer to ttm_base_object pointer.
594 *
595 * This is a wrapper that calls the refcount_release founction of the
596 * underlying object. At the same time it cleans up the prime object.
597 * This function is called when all references to the base object we
598 * derive from are gone.
599 */
ttm_prime_refcount_release(struct ttm_base_object ** p_base)600 static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
601 {
602 struct ttm_base_object *base = *p_base;
603 struct ttm_prime_object *prime;
604
605 *p_base = NULL;
606 prime = container_of(base, struct ttm_prime_object, base);
607 BUG_ON(prime->dma_buf != NULL);
608 mutex_destroy(&prime->mutex);
609 if (prime->refcount_release)
610 prime->refcount_release(&base);
611 }
612
613 /**
614 * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
615 *
616 * @dma_buf:
617 *
618 * This function first calls the dma_buf release method the driver
619 * provides. Then it cleans up our dma_buf pointer used for lookup,
620 * and finally releases the reference the dma_buf has on our base
621 * object.
622 */
ttm_prime_dmabuf_release(struct dma_buf * dma_buf)623 static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
624 {
625 struct ttm_prime_object *prime =
626 (struct ttm_prime_object *) dma_buf->priv;
627 struct ttm_base_object *base = &prime->base;
628 struct ttm_object_device *tdev = base->tfile->tdev;
629
630 if (tdev->dmabuf_release)
631 tdev->dmabuf_release(dma_buf);
632 mutex_lock(&prime->mutex);
633 if (prime->dma_buf == dma_buf)
634 prime->dma_buf = NULL;
635 mutex_unlock(&prime->mutex);
636 ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
637 ttm_base_object_unref(&base);
638 }
639
640 /**
641 * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
642 *
643 * @tfile: A struct ttm_object_file identifying the caller.
644 * @fd: The prime / dmabuf fd.
645 * @handle: The returned handle.
646 *
647 * This function returns a handle to an object that previously exported
648 * a dma-buf. Note that we don't handle imports yet, because we simply
649 * have no consumers of that implementation.
650 */
ttm_prime_fd_to_handle(struct ttm_object_file * tfile,int fd,u32 * handle)651 int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
652 int fd, u32 *handle)
653 {
654 struct ttm_object_device *tdev = tfile->tdev;
655 struct dma_buf *dma_buf;
656 struct ttm_prime_object *prime;
657 struct ttm_base_object *base;
658 int ret;
659
660 dma_buf = dma_buf_get(fd);
661 if (IS_ERR(dma_buf))
662 return PTR_ERR(dma_buf);
663
664 if (dma_buf->ops != &tdev->ops)
665 return -ENOSYS;
666
667 prime = (struct ttm_prime_object *) dma_buf->priv;
668 base = &prime->base;
669 *handle = base->handle;
670 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
671
672 dma_buf_put(dma_buf);
673
674 return ret;
675 }
676
677 /**
678 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
679 *
680 * @tfile: Struct ttm_object_file identifying the caller.
681 * @handle: Handle to the object we're exporting from.
682 * @flags: flags for dma-buf creation. We just pass them on.
683 * @prime_fd: The returned file descriptor.
684 *
685 */
ttm_prime_handle_to_fd(struct ttm_object_file * tfile,uint32_t handle,uint32_t flags,int * prime_fd)686 int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
687 uint32_t handle, uint32_t flags,
688 int *prime_fd)
689 {
690 struct ttm_object_device *tdev = tfile->tdev;
691 struct ttm_base_object *base;
692 struct dma_buf *dma_buf;
693 struct ttm_prime_object *prime;
694 int ret;
695
696 base = ttm_base_object_lookup(tfile, handle);
697 if (unlikely(base == NULL ||
698 base->object_type != ttm_prime_type)) {
699 ret = -ENOENT;
700 goto out_unref;
701 }
702
703 prime = container_of(base, struct ttm_prime_object, base);
704 if (unlikely(!base->shareable)) {
705 ret = -EPERM;
706 goto out_unref;
707 }
708
709 ret = mutex_lock_interruptible(&prime->mutex);
710 if (unlikely(ret != 0)) {
711 ret = -ERESTARTSYS;
712 goto out_unref;
713 }
714
715 dma_buf = prime->dma_buf;
716 if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
717 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
718 struct ttm_operation_ctx ctx = {
719 .interruptible = true,
720 .no_wait_gpu = false
721 };
722 exp_info.ops = &tdev->ops;
723 exp_info.size = prime->size;
724 exp_info.flags = flags;
725 exp_info.priv = prime;
726
727 /*
728 * Need to create a new dma_buf, with memory accounting.
729 */
730 ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
731 &ctx);
732 if (unlikely(ret != 0)) {
733 mutex_unlock(&prime->mutex);
734 goto out_unref;
735 }
736
737 dma_buf = dma_buf_export(&exp_info);
738 if (IS_ERR(dma_buf)) {
739 ret = PTR_ERR(dma_buf);
740 ttm_mem_global_free(tdev->mem_glob,
741 tdev->dma_buf_size);
742 mutex_unlock(&prime->mutex);
743 goto out_unref;
744 }
745
746 /*
747 * dma_buf has taken the base object reference
748 */
749 base = NULL;
750 prime->dma_buf = dma_buf;
751 }
752 mutex_unlock(&prime->mutex);
753
754 ret = dma_buf_fd(dma_buf, flags);
755 if (ret >= 0) {
756 *prime_fd = ret;
757 ret = 0;
758 } else
759 dma_buf_put(dma_buf);
760
761 out_unref:
762 if (base)
763 ttm_base_object_unref(&base);
764 return ret;
765 }
766
767 /**
768 * ttm_prime_object_init - Initialize a ttm_prime_object
769 *
770 * @tfile: struct ttm_object_file identifying the caller
771 * @size: The size of the dma_bufs we export.
772 * @prime: The object to be initialized.
773 * @shareable: See ttm_base_object_init
774 * @type: See ttm_base_object_init
775 * @refcount_release: See ttm_base_object_init
776 * @ref_obj_release: See ttm_base_object_init
777 *
778 * Initializes an object which is compatible with the drm_prime model
779 * for data sharing between processes and devices.
780 */
ttm_prime_object_init(struct ttm_object_file * tfile,size_t size,struct ttm_prime_object * prime,bool shareable,enum ttm_object_type type,void (* refcount_release)(struct ttm_base_object **),void (* ref_obj_release)(struct ttm_base_object *,enum ttm_ref_type ref_type))781 int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
782 struct ttm_prime_object *prime, bool shareable,
783 enum ttm_object_type type,
784 void (*refcount_release) (struct ttm_base_object **),
785 void (*ref_obj_release) (struct ttm_base_object *,
786 enum ttm_ref_type ref_type))
787 {
788 mutex_init(&prime->mutex);
789 prime->size = PAGE_ALIGN(size);
790 prime->real_type = type;
791 prime->dma_buf = NULL;
792 prime->refcount_release = refcount_release;
793 return ttm_base_object_init(tfile, &prime->base, shareable,
794 ttm_prime_type,
795 ttm_prime_refcount_release,
796 ref_obj_release);
797 }
798