1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/mutex.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include <linux/module.h>
13
14 #ifdef CONFIG_X86
15 #include <asm/set_memory.h>
16 #endif
17
18 #include <drm/drm.h>
19 #include <drm/drm_device.h>
20 #include <drm/drm_drv.h>
21 #include <drm/drm_gem_shmem_helper.h>
22 #include <drm/drm_prime.h>
23 #include <drm/drm_print.h>
24
25 MODULE_IMPORT_NS(DMA_BUF);
26
27 /**
28 * DOC: overview
29 *
30 * This library provides helpers for GEM objects backed by shmem buffers
31 * allocated using anonymous pageable memory.
32 */
33
34 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
35 .free = drm_gem_shmem_free_object,
36 .print_info = drm_gem_shmem_print_info,
37 .pin = drm_gem_shmem_pin,
38 .unpin = drm_gem_shmem_unpin,
39 .get_sg_table = drm_gem_shmem_get_sg_table,
40 .vmap = drm_gem_shmem_vmap,
41 .vunmap = drm_gem_shmem_vunmap,
42 .mmap = drm_gem_shmem_mmap,
43 };
44
45 static struct drm_gem_shmem_object *
__drm_gem_shmem_create(struct drm_device * dev,size_t size,bool private)46 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
47 {
48 struct drm_gem_shmem_object *shmem;
49 struct drm_gem_object *obj;
50 int ret = 0;
51
52 size = PAGE_ALIGN(size);
53
54 if (dev->driver->gem_create_object)
55 obj = dev->driver->gem_create_object(dev, size);
56 else
57 obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
58 if (!obj)
59 return ERR_PTR(-ENOMEM);
60
61 shmem = to_drm_gem_shmem_obj(obj);
62
63 if (!obj->funcs)
64 obj->funcs = &drm_gem_shmem_funcs;
65
66 if (private) {
67 drm_gem_private_object_init(dev, obj, size);
68 shmem->map_wc = false; /* dma-buf mappings use always writecombine */
69 } else {
70 ret = drm_gem_object_init(dev, obj, size);
71 }
72 if (ret)
73 goto err_free;
74
75 ret = drm_gem_create_mmap_offset(obj);
76 if (ret)
77 goto err_release;
78
79 mutex_init(&shmem->pages_lock);
80 mutex_init(&shmem->vmap_lock);
81 INIT_LIST_HEAD(&shmem->madv_list);
82
83 if (!private) {
84 /*
85 * Our buffers are kept pinned, so allocating them
86 * from the MOVABLE zone is a really bad idea, and
87 * conflicts with CMA. See comments above new_inode()
88 * why this is required _and_ expected if you're
89 * going to pin these pages.
90 */
91 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
92 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
93 }
94
95 return shmem;
96
97 err_release:
98 drm_gem_object_release(obj);
99 err_free:
100 kfree(obj);
101
102 return ERR_PTR(ret);
103 }
104 /**
105 * drm_gem_shmem_create - Allocate an object with the given size
106 * @dev: DRM device
107 * @size: Size of the object to allocate
108 *
109 * This function creates a shmem GEM object.
110 *
111 * Returns:
112 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
113 * error code on failure.
114 */
drm_gem_shmem_create(struct drm_device * dev,size_t size)115 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
116 {
117 return __drm_gem_shmem_create(dev, size, false);
118 }
119 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
120
121 /**
122 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
123 * @obj: GEM object to free
124 *
125 * This function cleans up the GEM object state and frees the memory used to
126 * store the object itself. It should be used to implement
127 * &drm_gem_object_funcs.free.
128 */
drm_gem_shmem_free_object(struct drm_gem_object * obj)129 void drm_gem_shmem_free_object(struct drm_gem_object *obj)
130 {
131 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
132
133 WARN_ON(shmem->vmap_use_count);
134
135 if (obj->import_attach) {
136 drm_prime_gem_destroy(obj, shmem->sgt);
137 } else {
138 if (shmem->sgt) {
139 dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
140 DMA_BIDIRECTIONAL, 0);
141 sg_free_table(shmem->sgt);
142 kfree(shmem->sgt);
143 }
144 if (shmem->pages)
145 drm_gem_shmem_put_pages(shmem);
146 }
147
148 WARN_ON(shmem->pages_use_count);
149
150 drm_gem_object_release(obj);
151 mutex_destroy(&shmem->pages_lock);
152 mutex_destroy(&shmem->vmap_lock);
153 kfree(shmem);
154 }
155 EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
156
drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object * shmem)157 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
158 {
159 struct drm_gem_object *obj = &shmem->base;
160 struct page **pages;
161
162 if (shmem->pages_use_count++ > 0)
163 return 0;
164
165 pages = drm_gem_get_pages(obj);
166 if (IS_ERR(pages)) {
167 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
168 shmem->pages_use_count = 0;
169 return PTR_ERR(pages);
170 }
171
172 /*
173 * TODO: Allocating WC pages which are correctly flushed is only
174 * supported on x86. Ideal solution would be a GFP_WC flag, which also
175 * ttm_pool.c could use.
176 */
177 #ifdef CONFIG_X86
178 if (shmem->map_wc)
179 set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
180 #endif
181
182 shmem->pages = pages;
183
184 return 0;
185 }
186
187 /*
188 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
189 * @shmem: shmem GEM object
190 *
191 * This function makes sure that backing pages exists for the shmem GEM object
192 * and increases the use count.
193 *
194 * Returns:
195 * 0 on success or a negative error code on failure.
196 */
drm_gem_shmem_get_pages(struct drm_gem_shmem_object * shmem)197 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
198 {
199 int ret;
200
201 WARN_ON(shmem->base.import_attach);
202
203 ret = mutex_lock_interruptible(&shmem->pages_lock);
204 if (ret)
205 return ret;
206 ret = drm_gem_shmem_get_pages_locked(shmem);
207 mutex_unlock(&shmem->pages_lock);
208
209 return ret;
210 }
211 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
212
drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object * shmem)213 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
214 {
215 struct drm_gem_object *obj = &shmem->base;
216
217 if (WARN_ON_ONCE(!shmem->pages_use_count))
218 return;
219
220 if (--shmem->pages_use_count > 0)
221 return;
222
223 #ifdef CONFIG_X86
224 if (shmem->map_wc)
225 set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
226 #endif
227
228 drm_gem_put_pages(obj, shmem->pages,
229 shmem->pages_mark_dirty_on_put,
230 shmem->pages_mark_accessed_on_put);
231 shmem->pages = NULL;
232 }
233
234 /*
235 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
236 * @shmem: shmem GEM object
237 *
238 * This function decreases the use count and puts the backing pages when use drops to zero.
239 */
drm_gem_shmem_put_pages(struct drm_gem_shmem_object * shmem)240 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
241 {
242 mutex_lock(&shmem->pages_lock);
243 drm_gem_shmem_put_pages_locked(shmem);
244 mutex_unlock(&shmem->pages_lock);
245 }
246 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
247
248 /**
249 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
250 * @obj: GEM object
251 *
252 * This function makes sure the backing pages are pinned in memory while the
253 * buffer is exported. It should only be used to implement
254 * &drm_gem_object_funcs.pin.
255 *
256 * Returns:
257 * 0 on success or a negative error code on failure.
258 */
drm_gem_shmem_pin(struct drm_gem_object * obj)259 int drm_gem_shmem_pin(struct drm_gem_object *obj)
260 {
261 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
262
263 WARN_ON(shmem->base.import_attach);
264
265 return drm_gem_shmem_get_pages(shmem);
266 }
267 EXPORT_SYMBOL(drm_gem_shmem_pin);
268
269 /**
270 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
271 * @obj: GEM object
272 *
273 * This function removes the requirement that the backing pages are pinned in
274 * memory. It should only be used to implement &drm_gem_object_funcs.unpin.
275 */
drm_gem_shmem_unpin(struct drm_gem_object * obj)276 void drm_gem_shmem_unpin(struct drm_gem_object *obj)
277 {
278 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
279
280 WARN_ON(shmem->base.import_attach);
281
282 drm_gem_shmem_put_pages(shmem);
283 }
284 EXPORT_SYMBOL(drm_gem_shmem_unpin);
285
drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object * shmem,struct dma_buf_map * map)286 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
287 {
288 struct drm_gem_object *obj = &shmem->base;
289 int ret = 0;
290
291 if (shmem->vmap_use_count++ > 0) {
292 dma_buf_map_set_vaddr(map, shmem->vaddr);
293 return 0;
294 }
295
296 if (obj->import_attach) {
297 ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
298 if (!ret) {
299 if (WARN_ON(map->is_iomem)) {
300 ret = -EIO;
301 goto err_put_pages;
302 }
303 shmem->vaddr = map->vaddr;
304 }
305 } else {
306 pgprot_t prot = PAGE_KERNEL;
307
308 ret = drm_gem_shmem_get_pages(shmem);
309 if (ret)
310 goto err_zero_use;
311
312 if (shmem->map_wc)
313 prot = pgprot_writecombine(prot);
314 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
315 VM_MAP, prot);
316 if (!shmem->vaddr)
317 ret = -ENOMEM;
318 else
319 dma_buf_map_set_vaddr(map, shmem->vaddr);
320 }
321
322 if (ret) {
323 DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
324 goto err_put_pages;
325 }
326
327 return 0;
328
329 err_put_pages:
330 if (!obj->import_attach)
331 drm_gem_shmem_put_pages(shmem);
332 err_zero_use:
333 shmem->vmap_use_count = 0;
334
335 return ret;
336 }
337
338 /*
339 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
340 * @shmem: shmem GEM object
341 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
342 * store.
343 *
344 * This function makes sure that a contiguous kernel virtual address mapping
345 * exists for the buffer backing the shmem GEM object.
346 *
347 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
348 * also be called by drivers directly, in which case it will hide the
349 * differences between dma-buf imported and natively allocated objects.
350 *
351 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
352 *
353 * Returns:
354 * 0 on success or a negative error code on failure.
355 */
drm_gem_shmem_vmap(struct drm_gem_object * obj,struct dma_buf_map * map)356 int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
357 {
358 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
359 int ret;
360
361 ret = mutex_lock_interruptible(&shmem->vmap_lock);
362 if (ret)
363 return ret;
364 ret = drm_gem_shmem_vmap_locked(shmem, map);
365 mutex_unlock(&shmem->vmap_lock);
366
367 return ret;
368 }
369 EXPORT_SYMBOL(drm_gem_shmem_vmap);
370
drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object * shmem,struct dma_buf_map * map)371 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
372 struct dma_buf_map *map)
373 {
374 struct drm_gem_object *obj = &shmem->base;
375
376 if (WARN_ON_ONCE(!shmem->vmap_use_count))
377 return;
378
379 if (--shmem->vmap_use_count > 0)
380 return;
381
382 if (obj->import_attach) {
383 dma_buf_vunmap(obj->import_attach->dmabuf, map);
384 } else {
385 vunmap(shmem->vaddr);
386 drm_gem_shmem_put_pages(shmem);
387 }
388
389 shmem->vaddr = NULL;
390 }
391
392 /*
393 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
394 * @shmem: shmem GEM object
395 * @map: Kernel virtual address where the SHMEM GEM object was mapped
396 *
397 * This function cleans up a kernel virtual address mapping acquired by
398 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
399 * zero.
400 *
401 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
402 * also be called by drivers directly, in which case it will hide the
403 * differences between dma-buf imported and natively allocated objects.
404 */
drm_gem_shmem_vunmap(struct drm_gem_object * obj,struct dma_buf_map * map)405 void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
406 {
407 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
408
409 mutex_lock(&shmem->vmap_lock);
410 drm_gem_shmem_vunmap_locked(shmem, map);
411 mutex_unlock(&shmem->vmap_lock);
412 }
413 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
414
415 struct drm_gem_shmem_object *
drm_gem_shmem_create_with_handle(struct drm_file * file_priv,struct drm_device * dev,size_t size,uint32_t * handle)416 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
417 struct drm_device *dev, size_t size,
418 uint32_t *handle)
419 {
420 struct drm_gem_shmem_object *shmem;
421 int ret;
422
423 shmem = drm_gem_shmem_create(dev, size);
424 if (IS_ERR(shmem))
425 return shmem;
426
427 /*
428 * Allocate an id of idr table where the obj is registered
429 * and handle has the id what user can see.
430 */
431 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
432 /* drop reference from allocate - handle holds it now. */
433 drm_gem_object_put(&shmem->base);
434 if (ret)
435 return ERR_PTR(ret);
436
437 return shmem;
438 }
439 EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
440
441 /* Update madvise status, returns true if not purged, else
442 * false or -errno.
443 */
drm_gem_shmem_madvise(struct drm_gem_object * obj,int madv)444 int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
445 {
446 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
447
448 mutex_lock(&shmem->pages_lock);
449
450 if (shmem->madv >= 0)
451 shmem->madv = madv;
452
453 madv = shmem->madv;
454
455 mutex_unlock(&shmem->pages_lock);
456
457 return (madv >= 0);
458 }
459 EXPORT_SYMBOL(drm_gem_shmem_madvise);
460
drm_gem_shmem_purge_locked(struct drm_gem_object * obj)461 void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
462 {
463 struct drm_device *dev = obj->dev;
464 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
465
466 WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
467
468 dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
469 sg_free_table(shmem->sgt);
470 kfree(shmem->sgt);
471 shmem->sgt = NULL;
472
473 drm_gem_shmem_put_pages_locked(shmem);
474
475 shmem->madv = -1;
476
477 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
478 drm_gem_free_mmap_offset(obj);
479
480 /* Our goal here is to return as much of the memory as
481 * is possible back to the system as we are called from OOM.
482 * To do this we must instruct the shmfs to drop all of its
483 * backing pages, *now*.
484 */
485 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
486
487 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
488 0, (loff_t)-1);
489 }
490 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
491
drm_gem_shmem_purge(struct drm_gem_object * obj)492 bool drm_gem_shmem_purge(struct drm_gem_object *obj)
493 {
494 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
495
496 if (!mutex_trylock(&shmem->pages_lock))
497 return false;
498 drm_gem_shmem_purge_locked(obj);
499 mutex_unlock(&shmem->pages_lock);
500
501 return true;
502 }
503 EXPORT_SYMBOL(drm_gem_shmem_purge);
504
505 /**
506 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
507 * @file: DRM file structure to create the dumb buffer for
508 * @dev: DRM device
509 * @args: IOCTL data
510 *
511 * This function computes the pitch of the dumb buffer and rounds it up to an
512 * integer number of bytes per pixel. Drivers for hardware that doesn't have
513 * any additional restrictions on the pitch can directly use this function as
514 * their &drm_driver.dumb_create callback.
515 *
516 * For hardware with additional restrictions, drivers can adjust the fields
517 * set up by userspace before calling into this function.
518 *
519 * Returns:
520 * 0 on success or a negative error code on failure.
521 */
drm_gem_shmem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)522 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
523 struct drm_mode_create_dumb *args)
524 {
525 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
526 struct drm_gem_shmem_object *shmem;
527
528 if (!args->pitch || !args->size) {
529 args->pitch = min_pitch;
530 args->size = PAGE_ALIGN(args->pitch * args->height);
531 } else {
532 /* ensure sane minimum values */
533 if (args->pitch < min_pitch)
534 args->pitch = min_pitch;
535 if (args->size < args->pitch * args->height)
536 args->size = PAGE_ALIGN(args->pitch * args->height);
537 }
538
539 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
540
541 return PTR_ERR_OR_ZERO(shmem);
542 }
543 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
544
drm_gem_shmem_fault(struct vm_fault * vmf)545 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
546 {
547 struct vm_area_struct *vma = vmf->vma;
548 struct drm_gem_object *obj = vma->vm_private_data;
549 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
550 loff_t num_pages = obj->size >> PAGE_SHIFT;
551 vm_fault_t ret;
552 struct page *page;
553 pgoff_t page_offset;
554
555 /* We don't use vmf->pgoff since that has the fake offset */
556 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
557
558 mutex_lock(&shmem->pages_lock);
559
560 if (page_offset >= num_pages ||
561 WARN_ON_ONCE(!shmem->pages) ||
562 shmem->madv < 0) {
563 ret = VM_FAULT_SIGBUS;
564 } else {
565 page = shmem->pages[page_offset];
566
567 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
568 }
569
570 mutex_unlock(&shmem->pages_lock);
571
572 return ret;
573 }
574
drm_gem_shmem_vm_open(struct vm_area_struct * vma)575 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
576 {
577 struct drm_gem_object *obj = vma->vm_private_data;
578 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
579 int ret;
580
581 WARN_ON(shmem->base.import_attach);
582
583 ret = drm_gem_shmem_get_pages(shmem);
584 WARN_ON_ONCE(ret != 0);
585
586 drm_gem_vm_open(vma);
587 }
588
drm_gem_shmem_vm_close(struct vm_area_struct * vma)589 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
590 {
591 struct drm_gem_object *obj = vma->vm_private_data;
592 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
593
594 drm_gem_shmem_put_pages(shmem);
595 drm_gem_vm_close(vma);
596 }
597
598 static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
599 .fault = drm_gem_shmem_fault,
600 .open = drm_gem_shmem_vm_open,
601 .close = drm_gem_shmem_vm_close,
602 };
603
604 /**
605 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
606 * @obj: gem object
607 * @vma: VMA for the area to be mapped
608 *
609 * This function implements an augmented version of the GEM DRM file mmap
610 * operation for shmem objects. Drivers which employ the shmem helpers should
611 * use this function as their &drm_gem_object_funcs.mmap handler.
612 *
613 * Returns:
614 * 0 on success or a negative error code on failure.
615 */
drm_gem_shmem_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)616 int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
617 {
618 struct drm_gem_shmem_object *shmem;
619 int ret;
620
621 if (obj->import_attach) {
622 /* Drop the reference drm_gem_mmap_obj() acquired.*/
623 drm_gem_object_put(obj);
624 vma->vm_private_data = NULL;
625
626 return dma_buf_mmap(obj->dma_buf, vma, 0);
627 }
628
629 shmem = to_drm_gem_shmem_obj(obj);
630
631 ret = drm_gem_shmem_get_pages(shmem);
632 if (ret) {
633 drm_gem_vm_close(vma);
634 return ret;
635 }
636
637 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND;
638 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
639 if (shmem->map_wc)
640 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
641 vma->vm_ops = &drm_gem_shmem_vm_ops;
642
643 return 0;
644 }
645 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
646
647 /**
648 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
649 * @p: DRM printer
650 * @indent: Tab indentation level
651 * @obj: GEM object
652 *
653 * This implements the &drm_gem_object_funcs.info callback.
654 */
drm_gem_shmem_print_info(struct drm_printer * p,unsigned int indent,const struct drm_gem_object * obj)655 void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
656 const struct drm_gem_object *obj)
657 {
658 const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
659
660 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
661 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
662 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
663 }
664 EXPORT_SYMBOL(drm_gem_shmem_print_info);
665
666 /**
667 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
668 * pages for a shmem GEM object
669 * @obj: GEM object
670 *
671 * This function exports a scatter/gather table suitable for PRIME usage by
672 * calling the standard DMA mapping API. Drivers should not call this function
673 * directly, instead it should only be used as an implementation for
674 * &drm_gem_object_funcs.get_sg_table.
675 *
676 * Drivers who need to acquire an scatter/gather table for objects need to call
677 * drm_gem_shmem_get_pages_sgt() instead.
678 *
679 * Returns:
680 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
681 */
drm_gem_shmem_get_sg_table(struct drm_gem_object * obj)682 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
683 {
684 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
685
686 WARN_ON(shmem->base.import_attach);
687
688 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
689 }
690 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
691
692 /**
693 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
694 * scatter/gather table for a shmem GEM object.
695 * @obj: GEM object
696 *
697 * This function returns a scatter/gather table suitable for driver usage. If
698 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
699 * table created.
700 *
701 * This is the main function for drivers to get at backing storage, and it hides
702 * and difference between dma-buf imported and natively allocated objects.
703 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
704 *
705 * Returns:
706 * A pointer to the scatter/gather table of pinned pages or errno on failure.
707 */
drm_gem_shmem_get_pages_sgt(struct drm_gem_object * obj)708 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
709 {
710 int ret;
711 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
712 struct sg_table *sgt;
713
714 if (shmem->sgt)
715 return shmem->sgt;
716
717 WARN_ON(obj->import_attach);
718
719 ret = drm_gem_shmem_get_pages(shmem);
720 if (ret)
721 return ERR_PTR(ret);
722
723 sgt = drm_gem_shmem_get_sg_table(&shmem->base);
724 if (IS_ERR(sgt)) {
725 ret = PTR_ERR(sgt);
726 goto err_put_pages;
727 }
728 /* Map the pages for use by the h/w. */
729 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
730 if (ret)
731 goto err_free_sgt;
732
733 shmem->sgt = sgt;
734
735 return sgt;
736
737 err_free_sgt:
738 sg_free_table(sgt);
739 kfree(sgt);
740 err_put_pages:
741 drm_gem_shmem_put_pages(shmem);
742 return ERR_PTR(ret);
743 }
744 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
745
746 /**
747 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
748 * another driver's scatter/gather table of pinned pages
749 * @dev: Device to import into
750 * @attach: DMA-BUF attachment
751 * @sgt: Scatter/gather table of pinned pages
752 *
753 * This function imports a scatter/gather table exported via DMA-BUF by
754 * another driver. Drivers that use the shmem helpers should set this as their
755 * &drm_driver.gem_prime_import_sg_table callback.
756 *
757 * Returns:
758 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
759 * error code on failure.
760 */
761 struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)762 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
763 struct dma_buf_attachment *attach,
764 struct sg_table *sgt)
765 {
766 size_t size = PAGE_ALIGN(attach->dmabuf->size);
767 struct drm_gem_shmem_object *shmem;
768
769 shmem = __drm_gem_shmem_create(dev, size, true);
770 if (IS_ERR(shmem))
771 return ERR_CAST(shmem);
772
773 shmem->sgt = sgt;
774
775 DRM_DEBUG_PRIME("size = %zu\n", size);
776
777 return &shmem->base;
778 }
779 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
780