1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
4 * Author: Rob Clark <rob.clark@linaro.org>
5 */
6
7 #include <linux/dma-buf.h>
8 #include <linux/highmem.h>
9
10 #include <drm/drm_prime.h>
11
12 #include "omap_drv.h"
13
14 MODULE_IMPORT_NS(DMA_BUF);
15
16 /* -----------------------------------------------------------------------------
17 * DMABUF Export
18 */
19
omap_gem_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction dir)20 static struct sg_table *omap_gem_map_dma_buf(
21 struct dma_buf_attachment *attachment,
22 enum dma_data_direction dir)
23 {
24 struct drm_gem_object *obj = attachment->dmabuf->priv;
25 struct sg_table *sg;
26 dma_addr_t dma_addr;
27 int ret;
28
29 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
30 if (!sg)
31 return ERR_PTR(-ENOMEM);
32
33 /* camera, etc, need physically contiguous.. but we need a
34 * better way to know this..
35 */
36 ret = omap_gem_pin(obj, &dma_addr);
37 if (ret)
38 goto out;
39
40 ret = sg_alloc_table(sg, 1, GFP_KERNEL);
41 if (ret)
42 goto out;
43
44 sg_init_table(sg->sgl, 1);
45 sg_dma_len(sg->sgl) = obj->size;
46 sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0);
47 sg_dma_address(sg->sgl) = dma_addr;
48
49 /* this must be after omap_gem_pin() to ensure we have pages attached */
50 omap_gem_dma_sync_buffer(obj, dir);
51
52 return sg;
53 out:
54 kfree(sg);
55 return ERR_PTR(ret);
56 }
57
omap_gem_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * sg,enum dma_data_direction dir)58 static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
59 struct sg_table *sg, enum dma_data_direction dir)
60 {
61 struct drm_gem_object *obj = attachment->dmabuf->priv;
62 omap_gem_unpin(obj);
63 sg_free_table(sg);
64 kfree(sg);
65 }
66
omap_gem_dmabuf_begin_cpu_access(struct dma_buf * buffer,enum dma_data_direction dir)67 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
68 enum dma_data_direction dir)
69 {
70 struct drm_gem_object *obj = buffer->priv;
71 struct page **pages;
72 if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) {
73 /* TODO we would need to pin at least part of the buffer to
74 * get de-tiled view. For now just reject it.
75 */
76 return -ENOMEM;
77 }
78 /* make sure we have the pages: */
79 return omap_gem_get_pages(obj, &pages, true);
80 }
81
omap_gem_dmabuf_end_cpu_access(struct dma_buf * buffer,enum dma_data_direction dir)82 static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
83 enum dma_data_direction dir)
84 {
85 struct drm_gem_object *obj = buffer->priv;
86 omap_gem_put_pages(obj);
87 return 0;
88 }
89
omap_gem_dmabuf_mmap(struct dma_buf * buffer,struct vm_area_struct * vma)90 static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
91 struct vm_area_struct *vma)
92 {
93 struct drm_gem_object *obj = buffer->priv;
94 int ret = 0;
95
96 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
97 if (ret < 0)
98 return ret;
99
100 return omap_gem_mmap_obj(obj, vma);
101 }
102
103 static const struct dma_buf_ops omap_dmabuf_ops = {
104 .map_dma_buf = omap_gem_map_dma_buf,
105 .unmap_dma_buf = omap_gem_unmap_dma_buf,
106 .release = drm_gem_dmabuf_release,
107 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
108 .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
109 .mmap = omap_gem_dmabuf_mmap,
110 };
111
omap_gem_prime_export(struct drm_gem_object * obj,int flags)112 struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags)
113 {
114 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
115
116 exp_info.ops = &omap_dmabuf_ops;
117 exp_info.size = obj->size;
118 exp_info.flags = flags;
119 exp_info.priv = obj;
120
121 return drm_gem_dmabuf_export(obj->dev, &exp_info);
122 }
123
124 /* -----------------------------------------------------------------------------
125 * DMABUF Import
126 */
127
omap_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)128 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
129 struct dma_buf *dma_buf)
130 {
131 struct dma_buf_attachment *attach;
132 struct drm_gem_object *obj;
133 struct sg_table *sgt;
134 int ret;
135
136 if (dma_buf->ops == &omap_dmabuf_ops) {
137 obj = dma_buf->priv;
138 if (obj->dev == dev) {
139 /*
140 * Importing dmabuf exported from out own gem increases
141 * refcount on gem itself instead of f_count of dmabuf.
142 */
143 drm_gem_object_get(obj);
144 return obj;
145 }
146 }
147
148 attach = dma_buf_attach(dma_buf, dev->dev);
149 if (IS_ERR(attach))
150 return ERR_CAST(attach);
151
152 get_dma_buf(dma_buf);
153
154 sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
155 if (IS_ERR(sgt)) {
156 ret = PTR_ERR(sgt);
157 goto fail_detach;
158 }
159
160 obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
161 if (IS_ERR(obj)) {
162 ret = PTR_ERR(obj);
163 goto fail_unmap;
164 }
165
166 obj->import_attach = attach;
167
168 return obj;
169
170 fail_unmap:
171 dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
172 fail_detach:
173 dma_buf_detach(dma_buf, attach);
174 dma_buf_put(dma_buf);
175
176 return ERR_PTR(ret);
177 }
178