1 /*
2 * (c) 2014 Alexander Warg <alexander.warg@kernkonzept.com>
3 *
4 * This file is part of TUD:OS and distributed under the terms of the
5 * GNU General Public License 2.
6 * Please see the COPYING-GPL-2 file for details.
7 */
8 #include "dma_space.h"
9 #include "dataspace.h"
10 #include <l4/re/error_helper>
11 #include <l4/cxx/hlist>
12 #include <l4/sys/task>
13 #include <l4/cxx/unique_ptr>
14
15 // TODO:
16 // 1. Add the Cache handling for ARM etc.
17 // 2. check and garbage collect Dma_space_task_mappers when their task capability vanishes
18 // (may be also when the last Dma_space associated with the task vanishes)
19
20 namespace Moe {
21 namespace Dma {
22
23 class Phys_mapper : public Mapper
24 {
25 private:
26 typedef Mapping::Map Map;
27 Map _map;
28
29 public:
map(Dataspace * ds,Q_alloc * alloc,l4_addr_t offset,l4_size_t * size,Attributes attrs,Direction dir,Dma_addr * dma_addr)30 Mapping *map(Dataspace *ds, Q_alloc *alloc, l4_addr_t offset,
31 l4_size_t *size, Attributes attrs, Direction dir,
32 Dma_addr *dma_addr) override
33 {
34 L4Re::chksys(ds->dma_map(0, offset, size, attrs, dir, dma_addr));
35
36 cxx::unique_ptr<Dma::Mapping> m(alloc->make_obj<Dma::Mapping>());
37
38 if (!m)
39 L4Re::chksys(-L4_ENOMEM);
40
41 m->key = Dma::Region(*dma_addr, *dma_addr + *size -1);
42 if (!_map.insert(m.get()).second)
43 L4Re::chksys(-L4_EEXIST);
44
45 m->mapper = this;
46 m->attrs = attrs;
47 m->dir = dir;
48 return m.release();
49 }
50
unmap(Dma_addr dma_addr,l4_size_t,Attributes,Direction)51 int unmap(Dma_addr dma_addr, l4_size_t, Attributes, Direction) override
52 {
53 auto *m = _map.find_node(dma_addr);
54 if (!m)
55 return -L4_ENOENT;
56
57 // XXX: think about splitting etc.
58 delete m;
59 return 0;
60 }
61
remove(Dma::Mapping * m)62 void remove(Dma::Mapping *m) override
63 {
64 _map.remove(m->key);
65 // possibly do the right cache flushing and unpinning
66 }
67 };
68
69
70 class Task_mapper :
71 public Mapper,
72 public cxx::H_list_item_t<Task_mapper>
73 {
74 private:
75 static cxx::H_list_t<Task_mapper> _mappers;
76 typedef Mapping::Map Map;
77
78 l4_addr_t min = 1 << 20;
79 l4_addr_t max = ~0UL;
80 Map _map;
81
find_free(l4_addr_t start,l4_addr_t end,unsigned long size,unsigned char align)82 l4_addr_t find_free(l4_addr_t start, l4_addr_t end,
83 unsigned long size, unsigned char align)
84 {
85 if (size == 0)
86 return L4_INVALID_ADDR;
87
88 l4_addr_t a = start;
89 if (a < min)
90 a = min;
91
92 if (end > max)
93 end = max;
94
95 end = l4_trunc_size(end, align);
96 if (end <= start)
97 return L4_INVALID_ADDR;
98
99 a = l4_round_size(a, align);
100 if (a + size - 1 > end)
101 return L4_INVALID_ADDR;
102
103 for (;;)
104 {
105 auto n = _map.find_node(Region(a, a + size - 1));
106 if (!n)
107 return a;
108
109 a = n->key.end;
110 if (a >= end)
111 return L4_INVALID_ADDR;
112
113 a = a + 1;
114 a = l4_round_size(a, align);
115 if (a >= end)
116 return L4_INVALID_ADDR;
117
118 if (a + size - 1 > end)
119 return L4_INVALID_ADDR;
120
121 }
122 }
123
124 L4::Cap<L4::Task> _dma_kern_space;
125
is_equal(L4::Cap<L4::Task> s) const126 bool is_equal(L4::Cap<L4::Task> s) const
127 {
128 L4::Cap<L4::Task> myself(L4_BASE_TASK_CAP);
129 return myself->cap_equal(s, _dma_kern_space).label();
130 }
131
132 public:
remove(Dma::Mapping * m)133 void remove(Dma::Mapping *m) override
134 {
135 _map.remove(m->key);
136
137 l4_addr_t a = m->key.start;
138 l4_size_t s = m->key.end - m->key.start + 1;
139 unsigned o = L4_PAGESHIFT;
140 if (0)
141 printf("DMA: unmap %lx-%lx\n", a, a+s-1);
142 while (s > 0)
143 {
144 while ((1UL << o) > s)
145 --o;
146
147 while ((1UL << o) <= s && (a & ((1UL << o) - 1)) == 0)
148 ++o;
149
150 --o;
151 l4_fpage_t fp = l4_fpage(a, o, L4_FPAGE_RWX);
152
153 if (0)
154 printf("DMA: unmap %lx-%lx\n", a, a+(1UL << o)-1);
155
156 _dma_kern_space->unmap(fp, L4_FP_ALL_SPACES);
157 s -= (1UL << o);
158 a += (1UL << o);
159 }
160 }
161
Task_mapper(L4::Cap<L4::Task> s)162 explicit Task_mapper(L4::Cap<L4::Task> s)
163 : _dma_kern_space(s)
164 { _mappers.add(this); }
165
~Task_mapper()166 ~Task_mapper() noexcept
167 {
168 if (_dma_kern_space)
169 object_pool.cap_alloc()->free(_dma_kern_space);
170 }
171
find_mapper(L4::Cap<L4::Task> task)172 static Task_mapper *find_mapper(L4::Cap<L4::Task> task)
173 {
174 for (auto m: _mappers)
175 if (m->is_equal(task))
176 return m;
177 return 0;
178 }
179
map(Dataspace * ds,Q_alloc * alloc,l4_addr_t offset,l4_size_t * _size,Attributes attrs,Direction dir,Dma_space::Dma_addr * dma_addr)180 Mapping *map(Dataspace *ds, Q_alloc *alloc, l4_addr_t offset,
181 l4_size_t *_size, Attributes attrs, Direction dir,
182 Dma_space::Dma_addr *dma_addr) override
183 {
184 if (0)
185 printf("DMA %p: map: offs=%lx sz=%zx ...\n", this, offset, *_size);
186
187 // Only full pages can be mapped, so work with a rounded offset internally.
188 l4_addr_t aligned_offset = l4_trunc_page(offset);
189
190 unsigned long max_sz = ds->round_size();
191 if (offset >= max_sz)
192 L4Re::chksys(-L4_ERANGE);
193
194 max_sz -= offset;
195
196 if (*_size > max_sz)
197 *_size = max_sz;
198
199 l4_size_t size = *_size + (offset - aligned_offset);
200 l4_addr_t a = find_free(min, max, size, L4_SUPERPAGESHIFT); //ds->page_shift());
201 if (a == L4_INVALID_ADDR)
202 L4Re::chksys(-L4_ENOMEM);
203
204 cxx::unique_ptr<Dma::Mapping> node(alloc->make_obj<Dma::Mapping>());
205
206 if (!node)
207 L4Re::chksys(-L4_ENOMEM);
208
209 node->key = Region(a, a + size - 1);
210 if (!_map.insert(node.get()).second)
211 {
212 // This should not really happen if find_free() above found a free
213 // region.
214 L4Re::chksys(-L4_EEXIST);
215 }
216
217 node->mapper = this;
218 node->attrs = attrs;
219 node->dir = dir;
220
221 // Return the address of the requested offset. This works with unmap
222 // below because unmap accepts any address in the region for unmapping.
223 *dma_addr = a + (offset - aligned_offset);
224 for (;;)
225 {
226 L4::Ipc::Snd_fpage fpage;
227 L4Re::chksys(ds->map(aligned_offset, a, L4Re::Dataspace::F::RW,
228 a, a + size - 1, fpage));
229
230 L4::Cap<L4::Task> myself(L4_BASE_TASK_CAP);
231
232 l4_fpage_t f;
233 f.raw = fpage.data();
234 L4Re::chksys(_dma_kern_space->map(myself, f, a));
235
236 unsigned long s = 1UL << fpage.order();
237 if (size <= s)
238 break;
239
240 aligned_offset += s;
241 a += s;
242 size -= s;
243 }
244
245 return node.release();
246 }
247
unmap(Dma_addr dma_addr,l4_size_t,Attributes,Direction)248 int unmap(Dma_addr dma_addr, l4_size_t, Attributes, Direction) override
249 {
250 auto *m = _map.find_node(dma_addr);
251 if (!m)
252 return -L4_ENOENT;
253
254 // XXX: think about node splitting, merging
255 delete m;
256 return 0;
257 }
258 };
259
260 cxx::H_list_t<Task_mapper> Task_mapper::_mappers(true);
261
262 } // namespace Dma
263
_get_ds(L4::Ipc::Snd_fpage src_cap)264 static Dataspace *_get_ds(L4::Ipc::Snd_fpage src_cap)
265 {
266 if (!src_cap.id_received())
267 L4Re::chksys(-L4_EINVAL);
268
269 if (!(src_cap.data() & L4_CAP_FPAGE_W))
270 L4Re::chksys(-L4_EPERM);
271
272 Dataspace *src
273 = dynamic_cast<Dataspace*>(object_pool.find(src_cap.data()));
274
275 if (!src)
276 L4Re::chksys(-L4_EINVAL);
277
278 return src;
279 }
280
281 long
op_map(L4Re::Dma_space::Rights,L4::Ipc::Snd_fpage src_ds,l4_addr_t offset,l4_size_t & size,Attributes attrs,Direction dir,Dma_space::Dma_addr & dma_addr)282 Dma_space::op_map(L4Re::Dma_space::Rights,
283 L4::Ipc::Snd_fpage src_ds, l4_addr_t offset,
284 l4_size_t &size, Attributes attrs, Direction dir,
285 Dma_space::Dma_addr &dma_addr)
286 {
287 if (!_mapper)
288 return -L4_EINVAL;
289
290 auto *m =_mapper->map(_get_ds(src_ds), this->qalloc(), offset, &size,
291 attrs, dir, &dma_addr);
292 _mappings.add(m);
293 return 0;
294 }
295
296 long
op_unmap(L4Re::Dma_space::Rights,Dma_addr dma_addr,l4_size_t size,Attributes attrs,Direction dir)297 Dma_space::op_unmap(L4Re::Dma_space::Rights,
298 Dma_addr dma_addr, l4_size_t size,
299 Attributes attrs, Direction dir)
300 {
301 if (!_mapper)
302 return -L4_EINVAL;
303
304 return _mapper->unmap(dma_addr, size, attrs, dir);
305 }
306
307 long
op_associate(L4Re::Dma_space::Rights,L4::Ipc::Snd_fpage dma_task,Space_attribs attr)308 Dma_space::op_associate(L4Re::Dma_space::Rights,
309 L4::Ipc::Snd_fpage dma_task,
310 Space_attribs attr)
311 {
312 _attr = attr;
313 if (_mapper)
314 {
315 delete_all_mappings();
316 _mapper = 0;
317 }
318
319 if (attr & L4Re::Dma_space::Phys_space)
320 {
321 _mapper = cxx::Ref_ptr<Dma::Mapper>(qalloc()->make_obj<Dma::Phys_mapper>());
322 return 0;
323 }
324 else
325 {
326 L4::Cap<L4::Task> rcv_cap(Rcv_cap << L4_CAP_SHIFT);
327 if (!dma_task.cap_received())
328 return -L4_EINVAL;
329
330 Dma::Mapper *mapper = Dma::Task_mapper::find_mapper(rcv_cap);
331 if (!mapper)
332 {
333 if (0)
334 printf("new DMA task assigned, allocate new mapper\n");
335
336 L4::Cap<L4::Task> nc = object_pool.cap_alloc()->alloc<L4::Task>();
337 if (!nc.is_valid())
338 return -L4_ENOMEM;
339
340 nc.move(rcv_cap);
341 mapper = new Dma::Task_mapper(nc);
342 }
343
344 _mapper = cxx::Ref_ptr<Dma::Mapper>(mapper);
345 return 0;
346 }
347 }
348
349 long
op_disassociate(L4Re::Dma_space::Rights)350 Dma_space::op_disassociate(L4Re::Dma_space::Rights)
351 {
352 if (!_mapper)
353 return -L4_ENOENT;
354
355 delete_all_mappings();
356 _mapper = 0;
357 return 0;
358 };
359
360 void
delete_all_mappings()361 Dma_space::delete_all_mappings()
362 {
363 while (!_mappings.empty())
364 delete _mappings.pop_front();
365 }
366 }
367