1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2021 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10 #include "../include/hw_ip/mmu/mmu_general.h"
11
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
14 #include <linux/pci-p2pdma.h>
15
16 MODULE_IMPORT_NS(DMA_BUF);
17
18 #define HL_MMU_DEBUG 0
19
20 /* use small pages for supporting non-pow2 (32M/40M/48M) DRAM phys page sizes */
21 #define DRAM_POOL_PAGE_SIZE SZ_8M
22
23 /*
24 * The va ranges in context object contain a list with the available chunks of
25 * device virtual memory.
26 * There is one range for host allocations and one for DRAM allocations.
27 *
28 * On initialization each range contains one chunk of all of its available
29 * virtual range which is a half of the total device virtual range.
30 *
31 * On each mapping of physical pages, a suitable virtual range chunk (with a
32 * minimum size) is selected from the list. If the chunk size equals the
33 * requested size, the chunk is returned. Otherwise, the chunk is split into
34 * two chunks - one to return as result and a remainder to stay in the list.
35 *
36 * On each Unmapping of a virtual address, the relevant virtual chunk is
37 * returned to the list. The chunk is added to the list and if its edges match
38 * the edges of the adjacent chunks (means a contiguous chunk can be created),
39 * the chunks are merged.
40 *
41 * On finish, the list is checked to have only one chunk of all the relevant
42 * virtual range (which is a half of the device total virtual range).
43 * If not (means not all mappings were unmapped), a warning is printed.
44 */
45
46 /*
47 * alloc_device_memory() - allocate device memory.
48 * @ctx: pointer to the context structure.
49 * @args: host parameters containing the requested size.
50 * @ret_handle: result handle.
51 *
52 * This function does the following:
53 * - Allocate the requested size rounded up to 'dram_page_size' pages.
54 * - Return unique handle for later map/unmap/free.
55 */
alloc_device_memory(struct hl_ctx * ctx,struct hl_mem_in * args,u32 * ret_handle)56 static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
57 u32 *ret_handle)
58 {
59 struct hl_device *hdev = ctx->hdev;
60 struct hl_vm *vm = &hdev->vm;
61 struct hl_vm_phys_pg_pack *phys_pg_pack;
62 u64 paddr = 0, total_size, num_pgs, i;
63 u32 num_curr_pgs, page_size;
64 int handle, rc;
65 bool contiguous;
66
67 num_curr_pgs = 0;
68 page_size = hdev->asic_prop.dram_page_size;
69 num_pgs = DIV_ROUND_UP_ULL(args->alloc.mem_size, page_size);
70 total_size = num_pgs * page_size;
71
72 if (!total_size) {
73 dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
74 return -EINVAL;
75 }
76
77 contiguous = args->flags & HL_MEM_CONTIGUOUS;
78
79 if (contiguous) {
80 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
81 if (!paddr) {
82 dev_err(hdev->dev,
83 "failed to allocate %llu contiguous pages with total size of %llu\n",
84 num_pgs, total_size);
85 return -ENOMEM;
86 }
87 }
88
89 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
90 if (!phys_pg_pack) {
91 rc = -ENOMEM;
92 goto pages_pack_err;
93 }
94
95 phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
96 phys_pg_pack->asid = ctx->asid;
97 phys_pg_pack->npages = num_pgs;
98 phys_pg_pack->page_size = page_size;
99 phys_pg_pack->total_size = total_size;
100 phys_pg_pack->flags = args->flags;
101 phys_pg_pack->contiguous = contiguous;
102
103 phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
104 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
105 rc = -ENOMEM;
106 goto pages_arr_err;
107 }
108
109 if (phys_pg_pack->contiguous) {
110 for (i = 0 ; i < num_pgs ; i++)
111 phys_pg_pack->pages[i] = paddr + i * page_size;
112 } else {
113 for (i = 0 ; i < num_pgs ; i++) {
114 phys_pg_pack->pages[i] = (u64) gen_pool_alloc(
115 vm->dram_pg_pool,
116 page_size);
117 if (!phys_pg_pack->pages[i]) {
118 dev_err(hdev->dev,
119 "Failed to allocate device memory (out of memory)\n");
120 rc = -ENOMEM;
121 goto page_err;
122 }
123
124 num_curr_pgs++;
125 }
126 }
127
128 spin_lock(&vm->idr_lock);
129 handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
130 GFP_ATOMIC);
131 spin_unlock(&vm->idr_lock);
132
133 if (handle < 0) {
134 dev_err(hdev->dev, "Failed to get handle for page\n");
135 rc = -EFAULT;
136 goto idr_err;
137 }
138
139 for (i = 0 ; i < num_pgs ; i++)
140 kref_get(&vm->dram_pg_pool_refcount);
141
142 phys_pg_pack->handle = handle;
143
144 atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
145 atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
146
147 *ret_handle = handle;
148
149 return 0;
150
151 idr_err:
152 page_err:
153 if (!phys_pg_pack->contiguous)
154 for (i = 0 ; i < num_curr_pgs ; i++)
155 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
156 page_size);
157
158 kvfree(phys_pg_pack->pages);
159 pages_arr_err:
160 kfree(phys_pg_pack);
161 pages_pack_err:
162 if (contiguous)
163 gen_pool_free(vm->dram_pg_pool, paddr, total_size);
164
165 return rc;
166 }
167
168 /**
169 * dma_map_host_va() - DMA mapping of the given host virtual address.
170 * @hdev: habanalabs device structure.
171 * @addr: the host virtual address of the memory area.
172 * @size: the size of the memory area.
173 * @p_userptr: pointer to result userptr structure.
174 *
175 * This function does the following:
176 * - Allocate userptr structure.
177 * - Pin the given host memory using the userptr structure.
178 * - Perform DMA mapping to have the DMA addresses of the pages.
179 */
dma_map_host_va(struct hl_device * hdev,u64 addr,u64 size,struct hl_userptr ** p_userptr)180 static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
181 struct hl_userptr **p_userptr)
182 {
183 struct hl_userptr *userptr;
184 int rc;
185
186 userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
187 if (!userptr) {
188 rc = -ENOMEM;
189 goto userptr_err;
190 }
191
192 rc = hl_pin_host_memory(hdev, addr, size, userptr);
193 if (rc) {
194 dev_err(hdev->dev, "Failed to pin host memory\n");
195 goto pin_err;
196 }
197
198 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
199 userptr->sgt->nents, DMA_BIDIRECTIONAL);
200 if (rc) {
201 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
202 goto dma_map_err;
203 }
204
205 userptr->dma_mapped = true;
206 userptr->dir = DMA_BIDIRECTIONAL;
207 userptr->vm_type = VM_TYPE_USERPTR;
208
209 *p_userptr = userptr;
210
211 return 0;
212
213 dma_map_err:
214 hl_unpin_host_memory(hdev, userptr);
215 pin_err:
216 kfree(userptr);
217 userptr_err:
218
219 return rc;
220 }
221
222 /**
223 * dma_unmap_host_va() - DMA unmapping of the given host virtual address.
224 * @hdev: habanalabs device structure.
225 * @userptr: userptr to free.
226 *
227 * This function does the following:
228 * - Unpins the physical pages.
229 * - Frees the userptr structure.
230 */
dma_unmap_host_va(struct hl_device * hdev,struct hl_userptr * userptr)231 static void dma_unmap_host_va(struct hl_device *hdev,
232 struct hl_userptr *userptr)
233 {
234 hl_unpin_host_memory(hdev, userptr);
235 kfree(userptr);
236 }
237
238 /**
239 * dram_pg_pool_do_release() - free DRAM pages pool
240 * @ref: pointer to reference object.
241 *
242 * This function does the following:
243 * - Frees the idr structure of physical pages handles.
244 * - Frees the generic pool of DRAM physical pages.
245 */
dram_pg_pool_do_release(struct kref * ref)246 static void dram_pg_pool_do_release(struct kref *ref)
247 {
248 struct hl_vm *vm = container_of(ref, struct hl_vm,
249 dram_pg_pool_refcount);
250
251 /*
252 * free the idr here as only here we know for sure that there are no
253 * allocated physical pages and hence there are no handles in use
254 */
255 idr_destroy(&vm->phys_pg_pack_handles);
256 gen_pool_destroy(vm->dram_pg_pool);
257 }
258
259 /**
260 * free_phys_pg_pack() - free physical page pack.
261 * @hdev: habanalabs device structure.
262 * @phys_pg_pack: physical page pack to free.
263 *
264 * This function does the following:
265 * - For DRAM memory only
266 * - iterate over the pack, scrub and free each physical block structure by
267 * returning it to the general pool.
268 * In case of error during scrubbing, initiate hard reset.
269 * Once hard reset is triggered, scrubbing is bypassed while freeing the
270 * memory continues.
271 * - Free the hl_vm_phys_pg_pack structure.
272 */
free_phys_pg_pack(struct hl_device * hdev,struct hl_vm_phys_pg_pack * phys_pg_pack)273 static int free_phys_pg_pack(struct hl_device *hdev,
274 struct hl_vm_phys_pg_pack *phys_pg_pack)
275 {
276 struct hl_vm *vm = &hdev->vm;
277 u64 i;
278 int rc = 0;
279
280 if (phys_pg_pack->created_from_userptr)
281 goto end;
282
283 if (phys_pg_pack->contiguous) {
284 if (hdev->memory_scrub && !hdev->disabled) {
285 rc = hdev->asic_funcs->scrub_device_mem(hdev,
286 phys_pg_pack->pages[0],
287 phys_pg_pack->total_size);
288 if (rc)
289 dev_err(hdev->dev,
290 "Failed to scrub contiguous device memory\n");
291 }
292
293 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
294 phys_pg_pack->total_size);
295
296 for (i = 0; i < phys_pg_pack->npages ; i++)
297 kref_put(&vm->dram_pg_pool_refcount,
298 dram_pg_pool_do_release);
299 } else {
300 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
301 if (hdev->memory_scrub && !hdev->disabled && rc == 0) {
302 rc = hdev->asic_funcs->scrub_device_mem(
303 hdev,
304 phys_pg_pack->pages[i],
305 phys_pg_pack->page_size);
306 if (rc)
307 dev_err(hdev->dev,
308 "Failed to scrub device memory\n");
309 }
310 gen_pool_free(vm->dram_pg_pool,
311 phys_pg_pack->pages[i],
312 phys_pg_pack->page_size);
313 kref_put(&vm->dram_pg_pool_refcount,
314 dram_pg_pool_do_release);
315 }
316 }
317
318 if (rc && !hdev->disabled)
319 hl_device_reset(hdev, HL_RESET_HARD);
320
321 end:
322 kvfree(phys_pg_pack->pages);
323 kfree(phys_pg_pack);
324
325 return rc;
326 }
327
328 /**
329 * free_device_memory() - free device memory.
330 * @ctx: pointer to the context structure.
331 * @args: host parameters containing the requested size.
332 *
333 * This function does the following:
334 * - Free the device memory related to the given handle.
335 */
free_device_memory(struct hl_ctx * ctx,struct hl_mem_in * args)336 static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
337 {
338 struct hl_device *hdev = ctx->hdev;
339 struct hl_vm *vm = &hdev->vm;
340 struct hl_vm_phys_pg_pack *phys_pg_pack;
341 u32 handle = args->free.handle;
342
343 spin_lock(&vm->idr_lock);
344 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
345 if (phys_pg_pack) {
346 if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
347 dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
348 handle);
349 spin_unlock(&vm->idr_lock);
350 return -EINVAL;
351 }
352
353 if (phys_pg_pack->exporting_cnt) {
354 dev_dbg(hdev->dev, "handle %u is exported, cannot free\n", handle);
355 spin_unlock(&vm->idr_lock);
356 return -EINVAL;
357 }
358
359 /*
360 * must remove from idr before the freeing of the physical
361 * pages as the refcount of the pool is also the trigger of the
362 * idr destroy
363 */
364 idr_remove(&vm->phys_pg_pack_handles, handle);
365 spin_unlock(&vm->idr_lock);
366
367 atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
368 atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
369
370 return free_phys_pg_pack(hdev, phys_pg_pack);
371 } else {
372 spin_unlock(&vm->idr_lock);
373 dev_err(hdev->dev,
374 "free device memory failed, no match for handle %u\n",
375 handle);
376 return -EINVAL;
377 }
378
379 return 0;
380 }
381
382 /**
383 * clear_va_list_locked() - free virtual addresses list.
384 * @hdev: habanalabs device structure.
385 * @va_list: list of virtual addresses to free.
386 *
387 * This function does the following:
388 * - Iterate over the list and free each virtual addresses block.
389 *
390 * This function should be called only when va_list lock is taken.
391 */
clear_va_list_locked(struct hl_device * hdev,struct list_head * va_list)392 static void clear_va_list_locked(struct hl_device *hdev,
393 struct list_head *va_list)
394 {
395 struct hl_vm_va_block *va_block, *tmp;
396
397 list_for_each_entry_safe(va_block, tmp, va_list, node) {
398 list_del(&va_block->node);
399 kfree(va_block);
400 }
401 }
402
403 /**
404 * print_va_list_locked() - print virtual addresses list.
405 * @hdev: habanalabs device structure.
406 * @va_list: list of virtual addresses to print.
407 *
408 * This function does the following:
409 * - Iterate over the list and print each virtual addresses block.
410 *
411 * This function should be called only when va_list lock is taken.
412 */
print_va_list_locked(struct hl_device * hdev,struct list_head * va_list)413 static void print_va_list_locked(struct hl_device *hdev,
414 struct list_head *va_list)
415 {
416 #if HL_MMU_DEBUG
417 struct hl_vm_va_block *va_block;
418
419 dev_dbg(hdev->dev, "print va list:\n");
420
421 list_for_each_entry(va_block, va_list, node)
422 dev_dbg(hdev->dev,
423 "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
424 va_block->start, va_block->end, va_block->size);
425 #endif
426 }
427
428 /**
429 * merge_va_blocks_locked() - merge a virtual block if possible.
430 * @hdev: pointer to the habanalabs device structure.
431 * @va_list: pointer to the virtual addresses block list.
432 * @va_block: virtual block to merge with adjacent blocks.
433 *
434 * This function does the following:
435 * - Merge the given blocks with the adjacent blocks if their virtual ranges
436 * create a contiguous virtual range.
437 *
438 * This Function should be called only when va_list lock is taken.
439 */
merge_va_blocks_locked(struct hl_device * hdev,struct list_head * va_list,struct hl_vm_va_block * va_block)440 static void merge_va_blocks_locked(struct hl_device *hdev,
441 struct list_head *va_list, struct hl_vm_va_block *va_block)
442 {
443 struct hl_vm_va_block *prev, *next;
444
445 prev = list_prev_entry(va_block, node);
446 if (&prev->node != va_list && prev->end + 1 == va_block->start) {
447 prev->end = va_block->end;
448 prev->size = prev->end - prev->start;
449 list_del(&va_block->node);
450 kfree(va_block);
451 va_block = prev;
452 }
453
454 next = list_next_entry(va_block, node);
455 if (&next->node != va_list && va_block->end + 1 == next->start) {
456 next->start = va_block->start;
457 next->size = next->end - next->start;
458 list_del(&va_block->node);
459 kfree(va_block);
460 }
461 }
462
463 /**
464 * add_va_block_locked() - add a virtual block to the virtual addresses list.
465 * @hdev: pointer to the habanalabs device structure.
466 * @va_list: pointer to the virtual addresses block list.
467 * @start: start virtual address.
468 * @end: end virtual address.
469 *
470 * This function does the following:
471 * - Add the given block to the virtual blocks list and merge with other blocks
472 * if a contiguous virtual block can be created.
473 *
474 * This Function should be called only when va_list lock is taken.
475 */
add_va_block_locked(struct hl_device * hdev,struct list_head * va_list,u64 start,u64 end)476 static int add_va_block_locked(struct hl_device *hdev,
477 struct list_head *va_list, u64 start, u64 end)
478 {
479 struct hl_vm_va_block *va_block, *res = NULL;
480 u64 size = end - start;
481
482 print_va_list_locked(hdev, va_list);
483
484 list_for_each_entry(va_block, va_list, node) {
485 /* TODO: remove upon matureness */
486 if (hl_mem_area_crosses_range(start, size, va_block->start,
487 va_block->end)) {
488 dev_err(hdev->dev,
489 "block crossing ranges at start 0x%llx, end 0x%llx\n",
490 va_block->start, va_block->end);
491 return -EINVAL;
492 }
493
494 if (va_block->end < start)
495 res = va_block;
496 }
497
498 va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
499 if (!va_block)
500 return -ENOMEM;
501
502 va_block->start = start;
503 va_block->end = end;
504 va_block->size = size;
505
506 if (!res)
507 list_add(&va_block->node, va_list);
508 else
509 list_add(&va_block->node, &res->node);
510
511 merge_va_blocks_locked(hdev, va_list, va_block);
512
513 print_va_list_locked(hdev, va_list);
514
515 return 0;
516 }
517
518 /**
519 * add_va_block() - wrapper for add_va_block_locked.
520 * @hdev: pointer to the habanalabs device structure.
521 * @va_list: pointer to the virtual addresses block list.
522 * @start: start virtual address.
523 * @end: end virtual address.
524 *
525 * This function does the following:
526 * - Takes the list lock and calls add_va_block_locked.
527 */
add_va_block(struct hl_device * hdev,struct hl_va_range * va_range,u64 start,u64 end)528 static inline int add_va_block(struct hl_device *hdev,
529 struct hl_va_range *va_range, u64 start, u64 end)
530 {
531 int rc;
532
533 mutex_lock(&va_range->lock);
534 rc = add_va_block_locked(hdev, &va_range->list, start, end);
535 mutex_unlock(&va_range->lock);
536
537 return rc;
538 }
539
540 /**
541 * is_hint_crossing_range() - check if hint address crossing specified reserved
542 * range.
543 */
is_hint_crossing_range(enum hl_va_range_type range_type,u64 start_addr,u32 size,struct asic_fixed_properties * prop)544 static inline bool is_hint_crossing_range(enum hl_va_range_type range_type,
545 u64 start_addr, u32 size, struct asic_fixed_properties *prop) {
546 bool range_cross;
547
548 if (range_type == HL_VA_RANGE_TYPE_DRAM)
549 range_cross =
550 hl_mem_area_crosses_range(start_addr, size,
551 prop->hints_dram_reserved_va_range.start_addr,
552 prop->hints_dram_reserved_va_range.end_addr);
553 else if (range_type == HL_VA_RANGE_TYPE_HOST)
554 range_cross =
555 hl_mem_area_crosses_range(start_addr, size,
556 prop->hints_host_reserved_va_range.start_addr,
557 prop->hints_host_reserved_va_range.end_addr);
558 else
559 range_cross =
560 hl_mem_area_crosses_range(start_addr, size,
561 prop->hints_host_hpage_reserved_va_range.start_addr,
562 prop->hints_host_hpage_reserved_va_range.end_addr);
563
564 return range_cross;
565 }
566
567 /**
568 * get_va_block() - get a virtual block for the given size and alignment.
569 *
570 * @hdev: pointer to the habanalabs device structure.
571 * @va_range: pointer to the virtual addresses range.
572 * @size: requested block size.
573 * @hint_addr: hint for requested address by the user.
574 * @va_block_align: required alignment of the virtual block start address.
575 * @range_type: va range type (host, dram)
576 * @flags: additional memory flags, currently only uses HL_MEM_FORCE_HINT
577 *
578 * This function does the following:
579 * - Iterate on the virtual block list to find a suitable virtual block for the
580 * given size, hint address and alignment.
581 * - Reserve the requested block and update the list.
582 * - Return the start address of the virtual block.
583 */
get_va_block(struct hl_device * hdev,struct hl_va_range * va_range,u64 size,u64 hint_addr,u32 va_block_align,enum hl_va_range_type range_type,u32 flags)584 static u64 get_va_block(struct hl_device *hdev,
585 struct hl_va_range *va_range,
586 u64 size, u64 hint_addr, u32 va_block_align,
587 enum hl_va_range_type range_type,
588 u32 flags)
589 {
590 struct hl_vm_va_block *va_block, *new_va_block = NULL;
591 struct asic_fixed_properties *prop = &hdev->asic_prop;
592 u64 tmp_hint_addr, valid_start, valid_size, prev_start, prev_end,
593 align_mask, reserved_valid_start = 0, reserved_valid_size = 0,
594 dram_hint_mask = prop->dram_hints_align_mask;
595 bool add_prev = false;
596 bool is_align_pow_2 = is_power_of_2(va_range->page_size);
597 bool is_hint_dram_addr = hl_is_dram_va(hdev, hint_addr);
598 bool force_hint = flags & HL_MEM_FORCE_HINT;
599
600 if (is_align_pow_2)
601 align_mask = ~((u64)va_block_align - 1);
602 else
603 /*
604 * with non-power-of-2 range we work only with page granularity
605 * and the start address is page aligned,
606 * so no need for alignment checking.
607 */
608 size = DIV_ROUND_UP_ULL(size, va_range->page_size) *
609 va_range->page_size;
610
611 tmp_hint_addr = hint_addr & ~dram_hint_mask;
612
613 /* Check if we need to ignore hint address */
614 if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
615 (!is_align_pow_2 && is_hint_dram_addr &&
616 do_div(tmp_hint_addr, va_range->page_size))) {
617
618 if (force_hint) {
619 /* Hint must be respected, so here we just fail */
620 dev_err(hdev->dev,
621 "Hint address 0x%llx is not page aligned - cannot be respected\n",
622 hint_addr);
623 return 0;
624 }
625
626 dev_dbg(hdev->dev,
627 "Hint address 0x%llx will be ignored because it is not aligned\n",
628 hint_addr);
629 hint_addr = 0;
630 }
631
632 mutex_lock(&va_range->lock);
633
634 print_va_list_locked(hdev, &va_range->list);
635
636 list_for_each_entry(va_block, &va_range->list, node) {
637 /* Calc the first possible aligned addr */
638 valid_start = va_block->start;
639
640 if (is_align_pow_2 && (valid_start & (va_block_align - 1))) {
641 valid_start &= align_mask;
642 valid_start += va_block_align;
643 if (valid_start > va_block->end)
644 continue;
645 }
646
647 valid_size = va_block->end - valid_start;
648 if (valid_size < size)
649 continue;
650
651 /*
652 * In case hint address is 0, and arc_hints_range_reservation
653 * property enabled, then avoid allocating va blocks from the
654 * range reserved for hint addresses
655 */
656 if (prop->hints_range_reservation && !hint_addr)
657 if (is_hint_crossing_range(range_type, valid_start,
658 size, prop))
659 continue;
660
661 /* Pick the minimal length block which has the required size */
662 if (!new_va_block || (valid_size < reserved_valid_size)) {
663 new_va_block = va_block;
664 reserved_valid_start = valid_start;
665 reserved_valid_size = valid_size;
666 }
667
668 if (hint_addr && hint_addr >= valid_start &&
669 (hint_addr + size) <= va_block->end) {
670 new_va_block = va_block;
671 reserved_valid_start = hint_addr;
672 reserved_valid_size = valid_size;
673 break;
674 }
675 }
676
677 if (!new_va_block) {
678 dev_err(hdev->dev, "no available va block for size %llu\n",
679 size);
680 goto out;
681 }
682
683 if (force_hint && reserved_valid_start != hint_addr) {
684 /* Hint address must be respected. If we are here - this means
685 * we could not respect it.
686 */
687 dev_err(hdev->dev,
688 "Hint address 0x%llx could not be respected\n",
689 hint_addr);
690 reserved_valid_start = 0;
691 goto out;
692 }
693
694 /*
695 * Check if there is some leftover range due to reserving the new
696 * va block, then return it to the main virtual addresses list.
697 */
698 if (reserved_valid_start > new_va_block->start) {
699 prev_start = new_va_block->start;
700 prev_end = reserved_valid_start - 1;
701
702 new_va_block->start = reserved_valid_start;
703 new_va_block->size = reserved_valid_size;
704
705 add_prev = true;
706 }
707
708 if (new_va_block->size > size) {
709 new_va_block->start += size;
710 new_va_block->size = new_va_block->end - new_va_block->start;
711 } else {
712 list_del(&new_va_block->node);
713 kfree(new_va_block);
714 }
715
716 if (add_prev)
717 add_va_block_locked(hdev, &va_range->list, prev_start,
718 prev_end);
719
720 print_va_list_locked(hdev, &va_range->list);
721 out:
722 mutex_unlock(&va_range->lock);
723
724 return reserved_valid_start;
725 }
726
727 /*
728 * hl_reserve_va_block() - reserve a virtual block of a given size.
729 * @hdev: pointer to the habanalabs device structure.
730 * @ctx: current context
731 * @type: virtual addresses range type.
732 * @size: requested block size.
733 * @alignment: required alignment in bytes of the virtual block start address,
734 * 0 means no alignment.
735 *
736 * This function does the following:
737 * - Iterate on the virtual block list to find a suitable virtual block for the
738 * given size and alignment.
739 * - Reserve the requested block and update the list.
740 * - Return the start address of the virtual block.
741 */
hl_reserve_va_block(struct hl_device * hdev,struct hl_ctx * ctx,enum hl_va_range_type type,u32 size,u32 alignment)742 u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
743 enum hl_va_range_type type, u32 size, u32 alignment)
744 {
745 return get_va_block(hdev, ctx->va_range[type], size, 0,
746 max(alignment, ctx->va_range[type]->page_size),
747 type, 0);
748 }
749
750 /**
751 * hl_get_va_range_type() - get va_range type for the given address and size.
752 * @address: the start address of the area we want to validate.
753 * @size: the size in bytes of the area we want to validate.
754 * @type: returned va_range type.
755 *
756 * Return: true if the area is inside a valid range, false otherwise.
757 */
hl_get_va_range_type(struct hl_ctx * ctx,u64 address,u64 size,enum hl_va_range_type * type)758 static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size,
759 enum hl_va_range_type *type)
760 {
761 int i;
762
763 for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX; i++) {
764 if (hl_mem_area_inside_range(address, size,
765 ctx->va_range[i]->start_addr,
766 ctx->va_range[i]->end_addr)) {
767 *type = i;
768 return 0;
769 }
770 }
771
772 return -EINVAL;
773 }
774
775 /**
776 * hl_unreserve_va_block() - wrapper for add_va_block to unreserve a va block.
777 * @hdev: pointer to the habanalabs device structure
778 * @ctx: pointer to the context structure.
779 * @start: start virtual address.
780 * @end: end virtual address.
781 *
782 * This function does the following:
783 * - Takes the list lock and calls add_va_block_locked.
784 */
hl_unreserve_va_block(struct hl_device * hdev,struct hl_ctx * ctx,u64 start_addr,u64 size)785 int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
786 u64 start_addr, u64 size)
787 {
788 enum hl_va_range_type type;
789 int rc;
790
791 rc = hl_get_va_range_type(ctx, start_addr, size, &type);
792 if (rc) {
793 dev_err(hdev->dev,
794 "cannot find va_range for va %#llx size %llu",
795 start_addr, size);
796 return rc;
797 }
798
799 rc = add_va_block(hdev, ctx->va_range[type], start_addr,
800 start_addr + size - 1);
801 if (rc)
802 dev_warn(hdev->dev,
803 "add va block failed for vaddr: 0x%llx\n", start_addr);
804
805 return rc;
806 }
807
808 /**
809 * init_phys_pg_pack_from_userptr() - initialize physical page pack from host
810 * memory
811 * @ctx: pointer to the context structure.
812 * @userptr: userptr to initialize from.
813 * @pphys_pg_pack: result pointer.
814 * @force_regular_page: tell the function to ignore huge page optimization,
815 * even if possible. Needed for cases where the device VA
816 * is allocated before we know the composition of the
817 * physical pages
818 *
819 * This function does the following:
820 * - Pin the physical pages related to the given virtual block.
821 * - Create a physical page pack from the physical pages related to the given
822 * virtual block.
823 */
init_phys_pg_pack_from_userptr(struct hl_ctx * ctx,struct hl_userptr * userptr,struct hl_vm_phys_pg_pack ** pphys_pg_pack,bool force_regular_page)824 static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
825 struct hl_userptr *userptr,
826 struct hl_vm_phys_pg_pack **pphys_pg_pack,
827 bool force_regular_page)
828 {
829 u32 npages, page_size = PAGE_SIZE,
830 huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
831 u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
832 struct hl_vm_phys_pg_pack *phys_pg_pack;
833 bool first = true, is_huge_page_opt;
834 u64 page_mask, total_npages;
835 struct scatterlist *sg;
836 dma_addr_t dma_addr;
837 int rc, i, j;
838
839 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
840 if (!phys_pg_pack)
841 return -ENOMEM;
842
843 phys_pg_pack->vm_type = userptr->vm_type;
844 phys_pg_pack->created_from_userptr = true;
845 phys_pg_pack->asid = ctx->asid;
846 atomic_set(&phys_pg_pack->mapping_cnt, 1);
847
848 is_huge_page_opt = (force_regular_page ? false : true);
849
850 /* Only if all dma_addrs are aligned to 2MB and their
851 * sizes is at least 2MB, we can use huge page mapping.
852 * We limit the 2MB optimization to this condition,
853 * since later on we acquire the related VA range as one
854 * consecutive block.
855 */
856 total_npages = 0;
857 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
858 npages = hl_get_sg_info(sg, &dma_addr);
859
860 total_npages += npages;
861
862 if ((npages % pgs_in_huge_page) ||
863 (dma_addr & (huge_page_size - 1)))
864 is_huge_page_opt = false;
865 }
866
867 if (is_huge_page_opt) {
868 page_size = huge_page_size;
869 do_div(total_npages, pgs_in_huge_page);
870 }
871
872 page_mask = ~(((u64) page_size) - 1);
873
874 phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
875 GFP_KERNEL);
876 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
877 rc = -ENOMEM;
878 goto page_pack_arr_mem_err;
879 }
880
881 phys_pg_pack->npages = total_npages;
882 phys_pg_pack->page_size = page_size;
883 phys_pg_pack->total_size = total_npages * page_size;
884
885 j = 0;
886 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
887 npages = hl_get_sg_info(sg, &dma_addr);
888
889 /* align down to physical page size and save the offset */
890 if (first) {
891 first = false;
892 phys_pg_pack->offset = dma_addr & (page_size - 1);
893 dma_addr &= page_mask;
894 }
895
896 while (npages) {
897 phys_pg_pack->pages[j++] = dma_addr;
898 dma_addr += page_size;
899
900 if (is_huge_page_opt)
901 npages -= pgs_in_huge_page;
902 else
903 npages--;
904 }
905 }
906
907 *pphys_pg_pack = phys_pg_pack;
908
909 return 0;
910
911 page_pack_arr_mem_err:
912 kfree(phys_pg_pack);
913
914 return rc;
915 }
916
917 /**
918 * map_phys_pg_pack() - maps the physical page pack..
919 * @ctx: pointer to the context structure.
920 * @vaddr: start address of the virtual area to map from.
921 * @phys_pg_pack: the pack of physical pages to map to.
922 *
923 * This function does the following:
924 * - Maps each chunk of virtual memory to matching physical chunk.
925 * - Stores number of successful mappings in the given argument.
926 * - Returns 0 on success, error code otherwise.
927 */
map_phys_pg_pack(struct hl_ctx * ctx,u64 vaddr,struct hl_vm_phys_pg_pack * phys_pg_pack)928 static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
929 struct hl_vm_phys_pg_pack *phys_pg_pack)
930 {
931 struct hl_device *hdev = ctx->hdev;
932 u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
933 u32 page_size = phys_pg_pack->page_size;
934 int rc = 0;
935 bool is_host_addr;
936
937 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
938 paddr = phys_pg_pack->pages[i];
939
940 rc = hl_mmu_map_page(ctx, next_vaddr, paddr, page_size,
941 (i + 1) == phys_pg_pack->npages);
942 if (rc) {
943 dev_err(hdev->dev,
944 "map failed for handle %u, npages: %llu, mapped: %llu",
945 phys_pg_pack->handle, phys_pg_pack->npages,
946 mapped_pg_cnt);
947 goto err;
948 }
949
950 mapped_pg_cnt++;
951 next_vaddr += page_size;
952 }
953
954 return 0;
955
956 err:
957 is_host_addr = !hl_is_dram_va(hdev, vaddr);
958
959 next_vaddr = vaddr;
960 for (i = 0 ; i < mapped_pg_cnt ; i++) {
961 if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
962 (i + 1) == mapped_pg_cnt))
963 dev_warn_ratelimited(hdev->dev,
964 "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
965 phys_pg_pack->handle, next_vaddr,
966 phys_pg_pack->pages[i], page_size);
967
968 next_vaddr += page_size;
969
970 /*
971 * unmapping on Palladium can be really long, so avoid a CPU
972 * soft lockup bug by sleeping a little between unmapping pages
973 *
974 * In addition, on host num of pages could be huge,
975 * because page size could be 4KB, so when unmapping host
976 * pages sleep every 32K pages to avoid soft lockup
977 */
978 if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
979 usleep_range(50, 200);
980 }
981
982 return rc;
983 }
984
985 /**
986 * unmap_phys_pg_pack() - unmaps the physical page pack.
987 * @ctx: pointer to the context structure.
988 * @vaddr: start address of the virtual area to unmap.
989 * @phys_pg_pack: the pack of physical pages to unmap.
990 */
unmap_phys_pg_pack(struct hl_ctx * ctx,u64 vaddr,struct hl_vm_phys_pg_pack * phys_pg_pack)991 static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
992 struct hl_vm_phys_pg_pack *phys_pg_pack)
993 {
994 struct hl_device *hdev = ctx->hdev;
995 u64 next_vaddr, i;
996 bool is_host_addr;
997 u32 page_size;
998
999 is_host_addr = !hl_is_dram_va(hdev, vaddr);
1000 page_size = phys_pg_pack->page_size;
1001 next_vaddr = vaddr;
1002
1003 for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
1004 if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
1005 (i + 1) == phys_pg_pack->npages))
1006 dev_warn_ratelimited(hdev->dev,
1007 "unmap failed for vaddr: 0x%llx\n", next_vaddr);
1008
1009 /*
1010 * unmapping on Palladium can be really long, so avoid a CPU
1011 * soft lockup bug by sleeping a little between unmapping pages
1012 *
1013 * In addition, on host num of pages could be huge,
1014 * because page size could be 4KB, so when unmapping host
1015 * pages sleep every 32K pages to avoid soft lockup
1016 */
1017 if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
1018 usleep_range(50, 200);
1019 }
1020 }
1021
get_paddr_from_handle(struct hl_ctx * ctx,struct hl_mem_in * args,u64 * paddr)1022 static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
1023 u64 *paddr)
1024 {
1025 struct hl_device *hdev = ctx->hdev;
1026 struct hl_vm *vm = &hdev->vm;
1027 struct hl_vm_phys_pg_pack *phys_pg_pack;
1028 u32 handle;
1029
1030 handle = lower_32_bits(args->map_device.handle);
1031 spin_lock(&vm->idr_lock);
1032 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
1033 if (!phys_pg_pack) {
1034 spin_unlock(&vm->idr_lock);
1035 dev_err(hdev->dev, "no match for handle %u\n", handle);
1036 return -EINVAL;
1037 }
1038
1039 *paddr = phys_pg_pack->pages[0];
1040
1041 spin_unlock(&vm->idr_lock);
1042
1043 return 0;
1044 }
1045
1046 /**
1047 * map_device_va() - map the given memory.
1048 * @ctx: pointer to the context structure.
1049 * @args: host parameters with handle/host virtual address.
1050 * @device_addr: pointer to result device virtual address.
1051 *
1052 * This function does the following:
1053 * - If given a physical device memory handle, map to a device virtual block
1054 * and return the start address of this block.
1055 * - If given a host virtual address and size, find the related physical pages,
1056 * map a device virtual block to this pages and return the start address of
1057 * this block.
1058 */
map_device_va(struct hl_ctx * ctx,struct hl_mem_in * args,u64 * device_addr)1059 static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
1060 u64 *device_addr)
1061 {
1062 struct hl_device *hdev = ctx->hdev;
1063 struct hl_vm *vm = &hdev->vm;
1064 struct hl_vm_phys_pg_pack *phys_pg_pack;
1065 struct hl_userptr *userptr = NULL;
1066 struct hl_vm_hash_node *hnode;
1067 struct hl_va_range *va_range;
1068 enum vm_type *vm_type;
1069 u64 ret_vaddr, hint_addr;
1070 u32 handle = 0, va_block_align;
1071 int rc;
1072 bool is_userptr = args->flags & HL_MEM_USERPTR;
1073 enum hl_va_range_type va_range_type = 0;
1074
1075 /* Assume failure */
1076 *device_addr = 0;
1077
1078 if (is_userptr) {
1079 u64 addr = args->map_host.host_virt_addr,
1080 size = args->map_host.mem_size;
1081 u32 page_size = hdev->asic_prop.pmmu.page_size,
1082 huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
1083
1084 rc = dma_map_host_va(hdev, addr, size, &userptr);
1085 if (rc) {
1086 dev_err(hdev->dev, "failed to get userptr from va\n");
1087 return rc;
1088 }
1089
1090 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1091 &phys_pg_pack, false);
1092 if (rc) {
1093 dev_err(hdev->dev,
1094 "unable to init page pack for vaddr 0x%llx\n",
1095 addr);
1096 goto init_page_pack_err;
1097 }
1098
1099 vm_type = (enum vm_type *) userptr;
1100 hint_addr = args->map_host.hint_addr;
1101 handle = phys_pg_pack->handle;
1102
1103 /* get required alignment */
1104 if (phys_pg_pack->page_size == page_size) {
1105 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1106 va_range_type = HL_VA_RANGE_TYPE_HOST;
1107 /*
1108 * huge page alignment may be needed in case of regular
1109 * page mapping, depending on the host VA alignment
1110 */
1111 if (addr & (huge_page_size - 1))
1112 va_block_align = page_size;
1113 else
1114 va_block_align = huge_page_size;
1115 } else {
1116 /*
1117 * huge page alignment is needed in case of huge page
1118 * mapping
1119 */
1120 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
1121 va_range_type = HL_VA_RANGE_TYPE_HOST_HUGE;
1122 va_block_align = huge_page_size;
1123 }
1124 } else {
1125 handle = lower_32_bits(args->map_device.handle);
1126
1127 spin_lock(&vm->idr_lock);
1128 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
1129 if (!phys_pg_pack) {
1130 spin_unlock(&vm->idr_lock);
1131 dev_err(hdev->dev,
1132 "no match for handle %u\n", handle);
1133 return -EINVAL;
1134 }
1135
1136 /* increment now to avoid freeing device memory while mapping */
1137 atomic_inc(&phys_pg_pack->mapping_cnt);
1138
1139 spin_unlock(&vm->idr_lock);
1140
1141 vm_type = (enum vm_type *) phys_pg_pack;
1142
1143 hint_addr = args->map_device.hint_addr;
1144
1145 /* DRAM VA alignment is the same as the MMU page size */
1146 va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
1147 va_range_type = HL_VA_RANGE_TYPE_DRAM;
1148 va_block_align = hdev->asic_prop.dmmu.page_size;
1149 }
1150
1151 /*
1152 * relevant for mapping device physical memory only, as host memory is
1153 * implicitly shared
1154 */
1155 if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
1156 phys_pg_pack->asid != ctx->asid) {
1157 dev_err(hdev->dev,
1158 "Failed to map memory, handle %u is not shared\n",
1159 handle);
1160 rc = -EPERM;
1161 goto shared_err;
1162 }
1163
1164 hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
1165 if (!hnode) {
1166 rc = -ENOMEM;
1167 goto hnode_err;
1168 }
1169
1170 if (hint_addr && phys_pg_pack->offset) {
1171 if (args->flags & HL_MEM_FORCE_HINT) {
1172 /* Fail if hint must be respected but it can't be */
1173 dev_err(hdev->dev,
1174 "Hint address 0x%llx cannot be respected because source memory is not aligned 0x%x\n",
1175 hint_addr, phys_pg_pack->offset);
1176 rc = -EINVAL;
1177 goto va_block_err;
1178 }
1179 dev_dbg(hdev->dev,
1180 "Hint address 0x%llx will be ignored because source memory is not aligned 0x%x\n",
1181 hint_addr, phys_pg_pack->offset);
1182 }
1183
1184 ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
1185 hint_addr, va_block_align,
1186 va_range_type, args->flags);
1187 if (!ret_vaddr) {
1188 dev_err(hdev->dev, "no available va block for handle %u\n",
1189 handle);
1190 rc = -ENOMEM;
1191 goto va_block_err;
1192 }
1193
1194 mutex_lock(&ctx->mmu_lock);
1195
1196 rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
1197 if (rc) {
1198 mutex_unlock(&ctx->mmu_lock);
1199 dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
1200 handle);
1201 goto map_err;
1202 }
1203
1204 rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, false,
1205 *vm_type, ctx->asid, ret_vaddr, phys_pg_pack->total_size);
1206
1207 mutex_unlock(&ctx->mmu_lock);
1208
1209 if (rc) {
1210 dev_err(hdev->dev,
1211 "mapping handle %u failed due to MMU cache invalidation\n",
1212 handle);
1213 goto map_err;
1214 }
1215
1216 ret_vaddr += phys_pg_pack->offset;
1217
1218 hnode->ptr = vm_type;
1219 hnode->vaddr = ret_vaddr;
1220
1221 mutex_lock(&ctx->mem_hash_lock);
1222 hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
1223 mutex_unlock(&ctx->mem_hash_lock);
1224
1225 *device_addr = ret_vaddr;
1226
1227 if (is_userptr)
1228 rc = free_phys_pg_pack(hdev, phys_pg_pack);
1229
1230 return rc;
1231
1232 map_err:
1233 if (add_va_block(hdev, va_range, ret_vaddr,
1234 ret_vaddr + phys_pg_pack->total_size - 1))
1235 dev_warn(hdev->dev,
1236 "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
1237 handle, ret_vaddr);
1238
1239 va_block_err:
1240 kfree(hnode);
1241 hnode_err:
1242 shared_err:
1243 atomic_dec(&phys_pg_pack->mapping_cnt);
1244 if (is_userptr)
1245 free_phys_pg_pack(hdev, phys_pg_pack);
1246 init_page_pack_err:
1247 if (is_userptr)
1248 dma_unmap_host_va(hdev, userptr);
1249
1250 return rc;
1251 }
1252
1253 /**
1254 * unmap_device_va() - unmap the given device virtual address.
1255 * @ctx: pointer to the context structure.
1256 * @args: host parameters with device virtual address to unmap.
1257 * @ctx_free: true if in context free flow, false otherwise.
1258 *
1259 * This function does the following:
1260 * - unmap the physical pages related to the given virtual address.
1261 * - return the device virtual block to the virtual block list.
1262 */
unmap_device_va(struct hl_ctx * ctx,struct hl_mem_in * args,bool ctx_free)1263 static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
1264 bool ctx_free)
1265 {
1266 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
1267 u64 vaddr = args->unmap.device_virt_addr;
1268 struct hl_vm_hash_node *hnode = NULL;
1269 struct asic_fixed_properties *prop;
1270 struct hl_device *hdev = ctx->hdev;
1271 struct hl_userptr *userptr = NULL;
1272 struct hl_va_range *va_range;
1273 enum vm_type *vm_type;
1274 bool is_userptr;
1275 int rc = 0;
1276
1277 prop = &hdev->asic_prop;
1278
1279 /* protect from double entrance */
1280 mutex_lock(&ctx->mem_hash_lock);
1281 hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
1282 if (vaddr == hnode->vaddr)
1283 break;
1284
1285 if (!hnode) {
1286 mutex_unlock(&ctx->mem_hash_lock);
1287 dev_err(hdev->dev,
1288 "unmap failed, no mem hnode for vaddr 0x%llx\n",
1289 vaddr);
1290 return -EINVAL;
1291 }
1292
1293 hash_del(&hnode->node);
1294 mutex_unlock(&ctx->mem_hash_lock);
1295
1296 vm_type = hnode->ptr;
1297
1298 if (*vm_type == VM_TYPE_USERPTR) {
1299 is_userptr = true;
1300 userptr = hnode->ptr;
1301
1302 rc = init_phys_pg_pack_from_userptr(ctx, userptr, &phys_pg_pack,
1303 false);
1304 if (rc) {
1305 dev_err(hdev->dev,
1306 "unable to init page pack for vaddr 0x%llx\n",
1307 vaddr);
1308 goto vm_type_err;
1309 }
1310
1311 if (phys_pg_pack->page_size ==
1312 hdev->asic_prop.pmmu.page_size)
1313 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1314 else
1315 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
1316 } else if (*vm_type == VM_TYPE_PHYS_PACK) {
1317 is_userptr = false;
1318 va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
1319 phys_pg_pack = hnode->ptr;
1320 } else {
1321 dev_warn(hdev->dev,
1322 "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1323 vaddr);
1324 rc = -EFAULT;
1325 goto vm_type_err;
1326 }
1327
1328 if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
1329 dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1330 rc = -EINVAL;
1331 goto mapping_cnt_err;
1332 }
1333
1334 if (!is_userptr && !is_power_of_2(phys_pg_pack->page_size))
1335 vaddr = prop->dram_base_address +
1336 DIV_ROUND_DOWN_ULL(vaddr - prop->dram_base_address,
1337 phys_pg_pack->page_size) *
1338 phys_pg_pack->page_size;
1339 else
1340 vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
1341
1342 mutex_lock(&ctx->mmu_lock);
1343
1344 unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
1345
1346 /*
1347 * During context free this function is called in a loop to clean all
1348 * the context mappings. Hence the cache invalidation can be called once
1349 * at the loop end rather than for each iteration
1350 */
1351 if (!ctx_free)
1352 rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, true,
1353 *vm_type, ctx->asid, vaddr,
1354 phys_pg_pack->total_size);
1355
1356 mutex_unlock(&ctx->mmu_lock);
1357
1358 /*
1359 * If the context is closing we don't need to check for the MMU cache
1360 * invalidation return code and update the VA free list as in this flow
1361 * we invalidate the MMU cache outside of this unmap function and the VA
1362 * free list will be freed anyway.
1363 */
1364 if (!ctx_free) {
1365 int tmp_rc;
1366
1367 if (rc)
1368 dev_err(hdev->dev,
1369 "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n",
1370 vaddr);
1371
1372 tmp_rc = add_va_block(hdev, va_range, vaddr,
1373 vaddr + phys_pg_pack->total_size - 1);
1374 if (tmp_rc) {
1375 dev_warn(hdev->dev,
1376 "add va block failed for vaddr: 0x%llx\n",
1377 vaddr);
1378 if (!rc)
1379 rc = tmp_rc;
1380 }
1381 }
1382
1383 atomic_dec(&phys_pg_pack->mapping_cnt);
1384 kfree(hnode);
1385
1386 if (is_userptr) {
1387 free_phys_pg_pack(hdev, phys_pg_pack);
1388 dma_unmap_host_va(hdev, userptr);
1389 }
1390
1391 return rc;
1392
1393 mapping_cnt_err:
1394 if (is_userptr)
1395 free_phys_pg_pack(hdev, phys_pg_pack);
1396 vm_type_err:
1397 mutex_lock(&ctx->mem_hash_lock);
1398 hash_add(ctx->mem_hash, &hnode->node, vaddr);
1399 mutex_unlock(&ctx->mem_hash_lock);
1400
1401 return rc;
1402 }
1403
map_block(struct hl_device * hdev,u64 address,u64 * handle,u32 * size)1404 static int map_block(struct hl_device *hdev, u64 address, u64 *handle,
1405 u32 *size)
1406 {
1407 u32 block_id = 0;
1408 int rc;
1409
1410 rc = hdev->asic_funcs->get_hw_block_id(hdev, address, size, &block_id);
1411
1412 *handle = block_id | HL_MMAP_TYPE_BLOCK;
1413 *handle <<= PAGE_SHIFT;
1414
1415 return rc;
1416 }
1417
hw_block_vm_close(struct vm_area_struct * vma)1418 static void hw_block_vm_close(struct vm_area_struct *vma)
1419 {
1420 struct hl_vm_hw_block_list_node *lnode =
1421 (struct hl_vm_hw_block_list_node *) vma->vm_private_data;
1422 struct hl_ctx *ctx = lnode->ctx;
1423
1424 mutex_lock(&ctx->hw_block_list_lock);
1425 list_del(&lnode->node);
1426 mutex_unlock(&ctx->hw_block_list_lock);
1427 hl_ctx_put(ctx);
1428 kfree(lnode);
1429 vma->vm_private_data = NULL;
1430 }
1431
1432 static const struct vm_operations_struct hw_block_vm_ops = {
1433 .close = hw_block_vm_close
1434 };
1435
1436 /**
1437 * hl_hw_block_mmap() - mmap a hw block to user.
1438 * @hpriv: pointer to the private data of the fd
1439 * @vma: pointer to vm_area_struct of the process
1440 *
1441 * Driver increments context reference for every HW block mapped in order
1442 * to prevent user from closing FD without unmapping first
1443 */
hl_hw_block_mmap(struct hl_fpriv * hpriv,struct vm_area_struct * vma)1444 int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
1445 {
1446 struct hl_vm_hw_block_list_node *lnode;
1447 struct hl_device *hdev = hpriv->hdev;
1448 struct hl_ctx *ctx = hpriv->ctx;
1449 u32 block_id, block_size;
1450 int rc;
1451
1452 /* We use the page offset to hold the block id and thus we need to clear
1453 * it before doing the mmap itself
1454 */
1455 block_id = vma->vm_pgoff;
1456 vma->vm_pgoff = 0;
1457
1458 /* Driver only allows mapping of a complete HW block */
1459 block_size = vma->vm_end - vma->vm_start;
1460
1461 if (!access_ok((void __user *) (uintptr_t) vma->vm_start, block_size)) {
1462 dev_err(hdev->dev,
1463 "user pointer is invalid - 0x%lx\n",
1464 vma->vm_start);
1465
1466 return -EINVAL;
1467 }
1468
1469 lnode = kzalloc(sizeof(*lnode), GFP_KERNEL);
1470 if (!lnode)
1471 return -ENOMEM;
1472
1473 vma->vm_ops = &hw_block_vm_ops;
1474 vma->vm_private_data = lnode;
1475
1476 hl_ctx_get(hdev, ctx);
1477
1478 rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
1479 if (rc) {
1480 hl_ctx_put(ctx);
1481 kfree(lnode);
1482 return rc;
1483 }
1484
1485 lnode->ctx = ctx;
1486 lnode->vaddr = vma->vm_start;
1487 lnode->size = block_size;
1488 lnode->id = block_id;
1489
1490 mutex_lock(&ctx->hw_block_list_lock);
1491 list_add_tail(&lnode->node, &ctx->hw_block_mem_list);
1492 mutex_unlock(&ctx->hw_block_list_lock);
1493
1494 vma->vm_pgoff = block_id;
1495
1496 return 0;
1497 }
1498
set_dma_sg(struct scatterlist * sg,u64 bar_address,u64 chunk_size,struct device * dev,enum dma_data_direction dir)1499 static int set_dma_sg(struct scatterlist *sg, u64 bar_address, u64 chunk_size,
1500 struct device *dev, enum dma_data_direction dir)
1501 {
1502 dma_addr_t addr;
1503 int rc;
1504
1505 addr = dma_map_resource(dev, bar_address, chunk_size, dir,
1506 DMA_ATTR_SKIP_CPU_SYNC);
1507 rc = dma_mapping_error(dev, addr);
1508 if (rc)
1509 return rc;
1510
1511 sg_set_page(sg, NULL, chunk_size, 0);
1512 sg_dma_address(sg) = addr;
1513 sg_dma_len(sg) = chunk_size;
1514
1515 return 0;
1516 }
1517
alloc_sgt_from_device_pages(struct hl_device * hdev,u64 * pages,u64 npages,u64 page_size,struct device * dev,enum dma_data_direction dir)1518 static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 *pages, u64 npages,
1519 u64 page_size, struct device *dev,
1520 enum dma_data_direction dir)
1521 {
1522 u64 chunk_size, bar_address, dma_max_seg_size;
1523 struct asic_fixed_properties *prop;
1524 int rc, i, j, nents, cur_page;
1525 struct scatterlist *sg;
1526 struct sg_table *sgt;
1527
1528 prop = &hdev->asic_prop;
1529
1530 dma_max_seg_size = dma_get_max_seg_size(dev);
1531
1532 /* We would like to align the max segment size to PAGE_SIZE, so the
1533 * SGL will contain aligned addresses that can be easily mapped to
1534 * an MMU
1535 */
1536 dma_max_seg_size = ALIGN_DOWN(dma_max_seg_size, PAGE_SIZE);
1537 if (dma_max_seg_size < PAGE_SIZE) {
1538 dev_err_ratelimited(hdev->dev,
1539 "dma_max_seg_size %llu can't be smaller than PAGE_SIZE\n",
1540 dma_max_seg_size);
1541 return ERR_PTR(-EINVAL);
1542 }
1543
1544 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
1545 if (!sgt)
1546 return ERR_PTR(-ENOMEM);
1547
1548 /* If the size of each page is larger than the dma max segment size,
1549 * then we can't combine pages and the number of entries in the SGL
1550 * will just be the
1551 * <number of pages> * <chunks of max segment size in each page>
1552 */
1553 if (page_size > dma_max_seg_size)
1554 nents = npages * DIV_ROUND_UP_ULL(page_size, dma_max_seg_size);
1555 else
1556 /* Get number of non-contiguous chunks */
1557 for (i = 1, nents = 1, chunk_size = page_size ; i < npages ; i++) {
1558 if (pages[i - 1] + page_size != pages[i] ||
1559 chunk_size + page_size > dma_max_seg_size) {
1560 nents++;
1561 chunk_size = page_size;
1562 continue;
1563 }
1564
1565 chunk_size += page_size;
1566 }
1567
1568 rc = sg_alloc_table(sgt, nents, GFP_KERNEL | __GFP_ZERO);
1569 if (rc)
1570 goto error_free;
1571
1572 cur_page = 0;
1573
1574 if (page_size > dma_max_seg_size) {
1575 u64 size_left, cur_device_address = 0;
1576
1577 size_left = page_size;
1578
1579 /* Need to split each page into the number of chunks of
1580 * dma_max_seg_size
1581 */
1582 for_each_sgtable_dma_sg(sgt, sg, i) {
1583 if (size_left == page_size)
1584 cur_device_address =
1585 pages[cur_page] - prop->dram_base_address;
1586 else
1587 cur_device_address += dma_max_seg_size;
1588
1589 chunk_size = min(size_left, dma_max_seg_size);
1590
1591 bar_address = hdev->dram_pci_bar_start + cur_device_address;
1592
1593 rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
1594 if (rc)
1595 goto error_unmap;
1596
1597 if (size_left > dma_max_seg_size) {
1598 size_left -= dma_max_seg_size;
1599 } else {
1600 cur_page++;
1601 size_left = page_size;
1602 }
1603 }
1604 } else {
1605 /* Merge pages and put them into the scatterlist */
1606 for_each_sgtable_dma_sg(sgt, sg, i) {
1607 chunk_size = page_size;
1608 for (j = cur_page + 1 ; j < npages ; j++) {
1609 if (pages[j - 1] + page_size != pages[j] ||
1610 chunk_size + page_size > dma_max_seg_size)
1611 break;
1612
1613 chunk_size += page_size;
1614 }
1615
1616 bar_address = hdev->dram_pci_bar_start +
1617 (pages[cur_page] - prop->dram_base_address);
1618
1619 rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
1620 if (rc)
1621 goto error_unmap;
1622
1623 cur_page = j;
1624 }
1625 }
1626
1627 /* Because we are not going to include a CPU list we want to have some
1628 * chance that other users will detect this by setting the orig_nents
1629 * to 0 and using only nents (length of DMA list) when going over the
1630 * sgl
1631 */
1632 sgt->orig_nents = 0;
1633
1634 return sgt;
1635
1636 error_unmap:
1637 for_each_sgtable_dma_sg(sgt, sg, i) {
1638 if (!sg_dma_len(sg))
1639 continue;
1640
1641 dma_unmap_resource(dev, sg_dma_address(sg),
1642 sg_dma_len(sg), dir,
1643 DMA_ATTR_SKIP_CPU_SYNC);
1644 }
1645
1646 sg_free_table(sgt);
1647
1648 error_free:
1649 kfree(sgt);
1650 return ERR_PTR(rc);
1651 }
1652
hl_dmabuf_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)1653 static int hl_dmabuf_attach(struct dma_buf *dmabuf,
1654 struct dma_buf_attachment *attachment)
1655 {
1656 struct hl_dmabuf_priv *hl_dmabuf;
1657 struct hl_device *hdev;
1658 int rc;
1659
1660 hl_dmabuf = dmabuf->priv;
1661 hdev = hl_dmabuf->ctx->hdev;
1662
1663 rc = pci_p2pdma_distance_many(hdev->pdev, &attachment->dev, 1, true);
1664
1665 if (rc < 0)
1666 attachment->peer2peer = false;
1667 return 0;
1668 }
1669
hl_map_dmabuf(struct dma_buf_attachment * attachment,enum dma_data_direction dir)1670 static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment,
1671 enum dma_data_direction dir)
1672 {
1673 struct dma_buf *dma_buf = attachment->dmabuf;
1674 struct hl_vm_phys_pg_pack *phys_pg_pack;
1675 struct hl_dmabuf_priv *hl_dmabuf;
1676 struct hl_device *hdev;
1677 struct sg_table *sgt;
1678
1679 hl_dmabuf = dma_buf->priv;
1680 hdev = hl_dmabuf->ctx->hdev;
1681 phys_pg_pack = hl_dmabuf->phys_pg_pack;
1682
1683 if (!attachment->peer2peer) {
1684 dev_dbg(hdev->dev, "Failed to map dmabuf because p2p is disabled\n");
1685 return ERR_PTR(-EPERM);
1686 }
1687
1688 if (phys_pg_pack)
1689 sgt = alloc_sgt_from_device_pages(hdev,
1690 phys_pg_pack->pages,
1691 phys_pg_pack->npages,
1692 phys_pg_pack->page_size,
1693 attachment->dev,
1694 dir);
1695 else
1696 sgt = alloc_sgt_from_device_pages(hdev,
1697 &hl_dmabuf->device_address,
1698 1,
1699 hl_dmabuf->dmabuf->size,
1700 attachment->dev,
1701 dir);
1702
1703 if (IS_ERR(sgt))
1704 dev_err(hdev->dev, "failed (%ld) to initialize sgt for dmabuf\n", PTR_ERR(sgt));
1705
1706 return sgt;
1707 }
1708
hl_unmap_dmabuf(struct dma_buf_attachment * attachment,struct sg_table * sgt,enum dma_data_direction dir)1709 static void hl_unmap_dmabuf(struct dma_buf_attachment *attachment,
1710 struct sg_table *sgt,
1711 enum dma_data_direction dir)
1712 {
1713 struct scatterlist *sg;
1714 int i;
1715
1716 /* The memory behind the dma-buf has *always* resided on the device itself, i.e. it lives
1717 * only in the 'device' domain (after all, it maps a PCI bar address which points to the
1718 * device memory).
1719 *
1720 * Therefore, it was never in the 'CPU' domain and hence, there is no need to perform
1721 * a sync of the memory to the CPU's cache, as it never resided inside that cache.
1722 */
1723 for_each_sgtable_dma_sg(sgt, sg, i)
1724 dma_unmap_resource(attachment->dev, sg_dma_address(sg),
1725 sg_dma_len(sg), dir,
1726 DMA_ATTR_SKIP_CPU_SYNC);
1727
1728 /* Need to restore orig_nents because sg_free_table use that field */
1729 sgt->orig_nents = sgt->nents;
1730 sg_free_table(sgt);
1731 kfree(sgt);
1732 }
1733
hl_release_dmabuf(struct dma_buf * dmabuf)1734 static void hl_release_dmabuf(struct dma_buf *dmabuf)
1735 {
1736 struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
1737 struct hl_ctx *ctx = hl_dmabuf->ctx;
1738 struct hl_device *hdev = ctx->hdev;
1739 struct hl_vm *vm = &hdev->vm;
1740
1741 if (hl_dmabuf->phys_pg_pack) {
1742 spin_lock(&vm->idr_lock);
1743 hl_dmabuf->phys_pg_pack->exporting_cnt--;
1744 spin_unlock(&vm->idr_lock);
1745 }
1746
1747 hl_ctx_put(hl_dmabuf->ctx);
1748
1749 kfree(hl_dmabuf);
1750 }
1751
1752 static const struct dma_buf_ops habanalabs_dmabuf_ops = {
1753 .attach = hl_dmabuf_attach,
1754 .map_dma_buf = hl_map_dmabuf,
1755 .unmap_dma_buf = hl_unmap_dmabuf,
1756 .release = hl_release_dmabuf,
1757 };
1758
export_dmabuf_common(struct hl_ctx * ctx,struct hl_dmabuf_priv * hl_dmabuf,u64 total_size,int flags,int * dmabuf_fd)1759 static int export_dmabuf_common(struct hl_ctx *ctx,
1760 struct hl_dmabuf_priv *hl_dmabuf,
1761 u64 total_size, int flags, int *dmabuf_fd)
1762 {
1763 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1764 struct hl_device *hdev = ctx->hdev;
1765 int rc, fd;
1766
1767 exp_info.ops = &habanalabs_dmabuf_ops;
1768 exp_info.size = total_size;
1769 exp_info.flags = flags;
1770 exp_info.priv = hl_dmabuf;
1771
1772 hl_dmabuf->dmabuf = dma_buf_export(&exp_info);
1773 if (IS_ERR(hl_dmabuf->dmabuf)) {
1774 dev_err(hdev->dev, "failed to export dma-buf\n");
1775 return PTR_ERR(hl_dmabuf->dmabuf);
1776 }
1777
1778 fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
1779 if (fd < 0) {
1780 dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf\n");
1781 rc = fd;
1782 goto err_dma_buf_put;
1783 }
1784
1785 hl_dmabuf->ctx = ctx;
1786 hl_ctx_get(hdev, hl_dmabuf->ctx);
1787
1788 *dmabuf_fd = fd;
1789
1790 return 0;
1791
1792 err_dma_buf_put:
1793 dma_buf_put(hl_dmabuf->dmabuf);
1794 return rc;
1795 }
1796
1797 /**
1798 * export_dmabuf_from_addr() - export a dma-buf object for the given memory
1799 * address and size.
1800 * @ctx: pointer to the context structure.
1801 * @device_addr: device memory physical address.
1802 * @size: size of device memory.
1803 * @flags: DMA-BUF file/FD flags.
1804 * @dmabuf_fd: pointer to result FD that represents the dma-buf object.
1805 *
1806 * Create and export a dma-buf object for an existing memory allocation inside
1807 * the device memory, and return a FD which is associated with the dma-buf
1808 * object.
1809 *
1810 * Return: 0 on success, non-zero for failure.
1811 */
export_dmabuf_from_addr(struct hl_ctx * ctx,u64 device_addr,u64 size,int flags,int * dmabuf_fd)1812 static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 device_addr,
1813 u64 size, int flags, int *dmabuf_fd)
1814 {
1815 struct hl_dmabuf_priv *hl_dmabuf;
1816 struct hl_device *hdev = ctx->hdev;
1817 struct asic_fixed_properties *prop;
1818 u64 bar_address;
1819 int rc;
1820
1821 prop = &hdev->asic_prop;
1822
1823 if (!IS_ALIGNED(device_addr, PAGE_SIZE)) {
1824 dev_dbg(hdev->dev,
1825 "exported device memory address 0x%llx should be aligned to 0x%lx\n",
1826 device_addr, PAGE_SIZE);
1827 return -EINVAL;
1828 }
1829
1830 if (size < PAGE_SIZE) {
1831 dev_dbg(hdev->dev,
1832 "exported device memory size %llu should be equal to or greater than %lu\n",
1833 size, PAGE_SIZE);
1834 return -EINVAL;
1835 }
1836
1837 if (device_addr < prop->dram_user_base_address ||
1838 device_addr + size > prop->dram_end_address ||
1839 device_addr + size < device_addr) {
1840 dev_dbg(hdev->dev,
1841 "DRAM memory range 0x%llx (+0x%llx) is outside of DRAM boundaries\n",
1842 device_addr, size);
1843 return -EINVAL;
1844 }
1845
1846 bar_address = hdev->dram_pci_bar_start +
1847 (device_addr - prop->dram_base_address);
1848
1849 if (bar_address + size >
1850 hdev->dram_pci_bar_start + prop->dram_pci_bar_size ||
1851 bar_address + size < bar_address) {
1852 dev_dbg(hdev->dev,
1853 "DRAM memory range 0x%llx (+0x%llx) is outside of PCI BAR boundaries\n",
1854 device_addr, size);
1855 return -EINVAL;
1856 }
1857
1858 hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
1859 if (!hl_dmabuf)
1860 return -ENOMEM;
1861
1862 hl_dmabuf->device_address = device_addr;
1863
1864 rc = export_dmabuf_common(ctx, hl_dmabuf, size, flags, dmabuf_fd);
1865 if (rc)
1866 goto err_free_dmabuf_wrapper;
1867
1868 return 0;
1869
1870 err_free_dmabuf_wrapper:
1871 kfree(hl_dmabuf);
1872 return rc;
1873 }
1874
1875 /**
1876 * export_dmabuf_from_handle() - export a dma-buf object for the given memory
1877 * handle.
1878 * @ctx: pointer to the context structure.
1879 * @handle: device memory allocation handle.
1880 * @flags: DMA-BUF file/FD flags.
1881 * @dmabuf_fd: pointer to result FD that represents the dma-buf object.
1882 *
1883 * Create and export a dma-buf object for an existing memory allocation inside
1884 * the device memory, and return a FD which is associated with the dma-buf
1885 * object.
1886 *
1887 * Return: 0 on success, non-zero for failure.
1888 */
export_dmabuf_from_handle(struct hl_ctx * ctx,u64 handle,int flags,int * dmabuf_fd)1889 static int export_dmabuf_from_handle(struct hl_ctx *ctx, u64 handle, int flags,
1890 int *dmabuf_fd)
1891 {
1892 struct hl_vm_phys_pg_pack *phys_pg_pack;
1893 struct hl_dmabuf_priv *hl_dmabuf;
1894 struct hl_device *hdev = ctx->hdev;
1895 struct asic_fixed_properties *prop;
1896 struct hl_vm *vm = &hdev->vm;
1897 u64 bar_address;
1898 int rc, i;
1899
1900 prop = &hdev->asic_prop;
1901
1902 if (upper_32_bits(handle)) {
1903 dev_dbg(hdev->dev, "no match for handle 0x%llx\n", handle);
1904 return -EINVAL;
1905 }
1906
1907 spin_lock(&vm->idr_lock);
1908
1909 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, (u32) handle);
1910 if (!phys_pg_pack) {
1911 spin_unlock(&vm->idr_lock);
1912 dev_dbg(hdev->dev, "no match for handle 0x%x\n", (u32) handle);
1913 return -EINVAL;
1914 }
1915
1916 /* increment now to avoid freeing device memory while exporting */
1917 phys_pg_pack->exporting_cnt++;
1918
1919 spin_unlock(&vm->idr_lock);
1920
1921 if (phys_pg_pack->vm_type != VM_TYPE_PHYS_PACK) {
1922 dev_dbg(hdev->dev, "handle 0x%llx does not represent DRAM memory\n", handle);
1923 rc = -EINVAL;
1924 goto err_dec_exporting_cnt;
1925 }
1926
1927 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
1928
1929 bar_address = hdev->dram_pci_bar_start +
1930 (phys_pg_pack->pages[i] -
1931 prop->dram_base_address);
1932
1933 if (bar_address + phys_pg_pack->page_size >
1934 hdev->dram_pci_bar_start + prop->dram_pci_bar_size ||
1935 bar_address + phys_pg_pack->page_size < bar_address) {
1936
1937 dev_dbg(hdev->dev,
1938 "DRAM memory range 0x%llx (+0x%x) is outside of PCI BAR boundaries\n",
1939 phys_pg_pack->pages[i],
1940 phys_pg_pack->page_size);
1941
1942 rc = -EINVAL;
1943 goto err_dec_exporting_cnt;
1944 }
1945 }
1946
1947 hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
1948 if (!hl_dmabuf) {
1949 rc = -ENOMEM;
1950 goto err_dec_exporting_cnt;
1951 }
1952
1953 hl_dmabuf->phys_pg_pack = phys_pg_pack;
1954
1955 rc = export_dmabuf_common(ctx, hl_dmabuf, phys_pg_pack->total_size,
1956 flags, dmabuf_fd);
1957 if (rc)
1958 goto err_free_dmabuf_wrapper;
1959
1960 return 0;
1961
1962 err_free_dmabuf_wrapper:
1963 kfree(hl_dmabuf);
1964
1965 err_dec_exporting_cnt:
1966 spin_lock(&vm->idr_lock);
1967 phys_pg_pack->exporting_cnt--;
1968 spin_unlock(&vm->idr_lock);
1969
1970 return rc;
1971 }
1972
mem_ioctl_no_mmu(struct hl_fpriv * hpriv,union hl_mem_args * args)1973 static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
1974 {
1975 struct hl_device *hdev = hpriv->hdev;
1976 struct hl_ctx *ctx = hpriv->ctx;
1977 u64 block_handle, device_addr = 0;
1978 u32 handle = 0, block_size;
1979 int rc, dmabuf_fd = -EBADF;
1980
1981 switch (args->in.op) {
1982 case HL_MEM_OP_ALLOC:
1983 if (args->in.alloc.mem_size == 0) {
1984 dev_err(hdev->dev,
1985 "alloc size must be larger than 0\n");
1986 rc = -EINVAL;
1987 goto out;
1988 }
1989
1990 /* Force contiguous as there are no real MMU
1991 * translations to overcome physical memory gaps
1992 */
1993 args->in.flags |= HL_MEM_CONTIGUOUS;
1994 rc = alloc_device_memory(ctx, &args->in, &handle);
1995
1996 memset(args, 0, sizeof(*args));
1997 args->out.handle = (__u64) handle;
1998 break;
1999
2000 case HL_MEM_OP_FREE:
2001 rc = free_device_memory(ctx, &args->in);
2002 break;
2003
2004 case HL_MEM_OP_MAP:
2005 if (args->in.flags & HL_MEM_USERPTR) {
2006 device_addr = args->in.map_host.host_virt_addr;
2007 rc = 0;
2008 } else {
2009 rc = get_paddr_from_handle(ctx, &args->in,
2010 &device_addr);
2011 }
2012
2013 memset(args, 0, sizeof(*args));
2014 args->out.device_virt_addr = device_addr;
2015 break;
2016
2017 case HL_MEM_OP_UNMAP:
2018 rc = 0;
2019 break;
2020
2021 case HL_MEM_OP_MAP_BLOCK:
2022 rc = map_block(hdev, args->in.map_block.block_addr,
2023 &block_handle, &block_size);
2024 args->out.block_handle = block_handle;
2025 args->out.block_size = block_size;
2026 break;
2027
2028 case HL_MEM_OP_EXPORT_DMABUF_FD:
2029 rc = export_dmabuf_from_addr(ctx,
2030 args->in.export_dmabuf_fd.handle,
2031 args->in.export_dmabuf_fd.mem_size,
2032 args->in.flags,
2033 &dmabuf_fd);
2034 memset(args, 0, sizeof(*args));
2035 args->out.fd = dmabuf_fd;
2036 break;
2037
2038 default:
2039 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
2040 rc = -ENOTTY;
2041 break;
2042 }
2043
2044 out:
2045 return rc;
2046 }
2047
hl_mem_ioctl(struct hl_fpriv * hpriv,void * data)2048 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
2049 {
2050 enum hl_device_status status;
2051 union hl_mem_args *args = data;
2052 struct hl_device *hdev = hpriv->hdev;
2053 struct hl_ctx *ctx = hpriv->ctx;
2054 u64 block_handle, device_addr = 0;
2055 u32 handle = 0, block_size;
2056 int rc, dmabuf_fd = -EBADF;
2057
2058 if (!hl_device_operational(hdev, &status)) {
2059 dev_warn_ratelimited(hdev->dev,
2060 "Device is %s. Can't execute MEMORY IOCTL\n",
2061 hdev->status[status]);
2062 return -EBUSY;
2063 }
2064
2065 if (!hdev->mmu_enable)
2066 return mem_ioctl_no_mmu(hpriv, args);
2067
2068 switch (args->in.op) {
2069 case HL_MEM_OP_ALLOC:
2070 if (args->in.alloc.mem_size == 0) {
2071 dev_err(hdev->dev,
2072 "alloc size must be larger than 0\n");
2073 rc = -EINVAL;
2074 goto out;
2075 }
2076
2077 /* If DRAM does not support virtual memory the driver won't
2078 * handle the allocation/freeing of that memory. However, for
2079 * system administration/monitoring purposes, the driver will
2080 * keep track of the amount of DRAM memory that is allocated
2081 * and freed by the user. Because this code totally relies on
2082 * the user's input, the driver can't ensure the validity
2083 * of this accounting.
2084 */
2085 if (!hdev->asic_prop.dram_supports_virtual_memory) {
2086 atomic64_add(args->in.alloc.mem_size,
2087 &ctx->dram_phys_mem);
2088 atomic64_add(args->in.alloc.mem_size,
2089 &hdev->dram_used_mem);
2090
2091 dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
2092 rc = 0;
2093
2094 memset(args, 0, sizeof(*args));
2095 args->out.handle = 0;
2096 goto out;
2097 }
2098
2099 rc = alloc_device_memory(ctx, &args->in, &handle);
2100
2101 memset(args, 0, sizeof(*args));
2102 args->out.handle = (__u64) handle;
2103 break;
2104
2105 case HL_MEM_OP_FREE:
2106 /* If DRAM does not support virtual memory the driver won't
2107 * handle the allocation/freeing of that memory. However, for
2108 * system administration/monitoring purposes, the driver will
2109 * keep track of the amount of DRAM memory that is allocated
2110 * and freed by the user. Because this code totally relies on
2111 * the user's input, the driver can't ensure the validity
2112 * of this accounting.
2113 */
2114 if (!hdev->asic_prop.dram_supports_virtual_memory) {
2115 atomic64_sub(args->in.alloc.mem_size,
2116 &ctx->dram_phys_mem);
2117 atomic64_sub(args->in.alloc.mem_size,
2118 &hdev->dram_used_mem);
2119
2120 dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
2121 rc = 0;
2122
2123 goto out;
2124 }
2125
2126 rc = free_device_memory(ctx, &args->in);
2127 break;
2128
2129 case HL_MEM_OP_MAP:
2130 rc = map_device_va(ctx, &args->in, &device_addr);
2131
2132 memset(args, 0, sizeof(*args));
2133 args->out.device_virt_addr = device_addr;
2134 break;
2135
2136 case HL_MEM_OP_UNMAP:
2137 rc = unmap_device_va(ctx, &args->in, false);
2138 break;
2139
2140 case HL_MEM_OP_MAP_BLOCK:
2141 rc = map_block(hdev, args->in.map_block.block_addr,
2142 &block_handle, &block_size);
2143 args->out.block_handle = block_handle;
2144 args->out.block_size = block_size;
2145 break;
2146
2147 case HL_MEM_OP_EXPORT_DMABUF_FD:
2148 if (hdev->asic_prop.dram_supports_virtual_memory)
2149 rc = export_dmabuf_from_handle(ctx,
2150 args->in.export_dmabuf_fd.handle,
2151 args->in.flags,
2152 &dmabuf_fd);
2153 else
2154 rc = export_dmabuf_from_addr(ctx,
2155 args->in.export_dmabuf_fd.handle,
2156 args->in.export_dmabuf_fd.mem_size,
2157 args->in.flags,
2158 &dmabuf_fd);
2159 memset(args, 0, sizeof(*args));
2160 args->out.fd = dmabuf_fd;
2161 break;
2162
2163 default:
2164 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
2165 rc = -ENOTTY;
2166 break;
2167 }
2168
2169 out:
2170 return rc;
2171 }
2172
get_user_memory(struct hl_device * hdev,u64 addr,u64 size,u32 npages,u64 start,u32 offset,struct hl_userptr * userptr)2173 static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
2174 u32 npages, u64 start, u32 offset,
2175 struct hl_userptr *userptr)
2176 {
2177 int rc;
2178
2179 if (!access_ok((void __user *) (uintptr_t) addr, size)) {
2180 dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
2181 return -EFAULT;
2182 }
2183
2184 userptr->pages = kvmalloc_array(npages, sizeof(*userptr->pages),
2185 GFP_KERNEL);
2186 if (!userptr->pages)
2187 return -ENOMEM;
2188
2189 rc = pin_user_pages_fast(start, npages,
2190 FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
2191 userptr->pages);
2192
2193 if (rc != npages) {
2194 dev_err(hdev->dev,
2195 "Failed (%d) to pin host memory with user ptr 0x%llx, size 0x%llx, npages %d\n",
2196 rc, addr, size, npages);
2197 if (rc < 0)
2198 goto destroy_pages;
2199 npages = rc;
2200 rc = -EFAULT;
2201 goto put_pages;
2202 }
2203 userptr->npages = npages;
2204
2205 rc = sg_alloc_table_from_pages(userptr->sgt,
2206 userptr->pages,
2207 npages, offset, size, GFP_KERNEL);
2208 if (rc < 0) {
2209 dev_err(hdev->dev, "failed to create SG table from pages\n");
2210 goto put_pages;
2211 }
2212
2213 return 0;
2214
2215 put_pages:
2216 unpin_user_pages(userptr->pages, npages);
2217 destroy_pages:
2218 kvfree(userptr->pages);
2219 return rc;
2220 }
2221
2222 /**
2223 * hl_pin_host_memory() - pins a chunk of host memory.
2224 * @hdev: pointer to the habanalabs device structure.
2225 * @addr: the host virtual address of the memory area.
2226 * @size: the size of the memory area.
2227 * @userptr: pointer to hl_userptr structure.
2228 *
2229 * This function does the following:
2230 * - Pins the physical pages.
2231 * - Create an SG list from those pages.
2232 */
hl_pin_host_memory(struct hl_device * hdev,u64 addr,u64 size,struct hl_userptr * userptr)2233 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
2234 struct hl_userptr *userptr)
2235 {
2236 u64 start, end;
2237 u32 npages, offset;
2238 int rc;
2239
2240 if (!size) {
2241 dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
2242 return -EINVAL;
2243 }
2244
2245 /*
2246 * If the combination of the address and size requested for this memory
2247 * region causes an integer overflow, return error.
2248 */
2249 if (((addr + size) < addr) ||
2250 PAGE_ALIGN(addr + size) < (addr + size)) {
2251 dev_err(hdev->dev,
2252 "user pointer 0x%llx + %llu causes integer overflow\n",
2253 addr, size);
2254 return -EINVAL;
2255 }
2256
2257 userptr->pid = current->pid;
2258 userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_KERNEL);
2259 if (!userptr->sgt)
2260 return -ENOMEM;
2261
2262 start = addr & PAGE_MASK;
2263 offset = addr & ~PAGE_MASK;
2264 end = PAGE_ALIGN(addr + size);
2265 npages = (end - start) >> PAGE_SHIFT;
2266
2267 userptr->size = size;
2268 userptr->addr = addr;
2269 userptr->dma_mapped = false;
2270 INIT_LIST_HEAD(&userptr->job_node);
2271
2272 rc = get_user_memory(hdev, addr, size, npages, start, offset,
2273 userptr);
2274 if (rc) {
2275 dev_err(hdev->dev,
2276 "failed to get user memory for address 0x%llx\n",
2277 addr);
2278 goto free_sgt;
2279 }
2280
2281 hl_debugfs_add_userptr(hdev, userptr);
2282
2283 return 0;
2284
2285 free_sgt:
2286 kfree(userptr->sgt);
2287 return rc;
2288 }
2289
2290 /*
2291 * hl_unpin_host_memory - unpins a chunk of host memory.
2292 * @hdev: pointer to the habanalabs device structure
2293 * @userptr: pointer to hl_userptr structure
2294 *
2295 * This function does the following:
2296 * - Unpins the physical pages related to the host memory
2297 * - Free the SG list
2298 */
hl_unpin_host_memory(struct hl_device * hdev,struct hl_userptr * userptr)2299 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
2300 {
2301 hl_debugfs_remove_userptr(hdev, userptr);
2302
2303 if (userptr->dma_mapped)
2304 hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
2305 userptr->sgt->nents,
2306 userptr->dir);
2307
2308 unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
2309 kvfree(userptr->pages);
2310
2311 list_del(&userptr->job_node);
2312
2313 sg_free_table(userptr->sgt);
2314 kfree(userptr->sgt);
2315 }
2316
2317 /**
2318 * hl_userptr_delete_list() - clear userptr list.
2319 * @hdev: pointer to the habanalabs device structure.
2320 * @userptr_list: pointer to the list to clear.
2321 *
2322 * This function does the following:
2323 * - Iterates over the list and unpins the host memory and frees the userptr
2324 * structure.
2325 */
hl_userptr_delete_list(struct hl_device * hdev,struct list_head * userptr_list)2326 void hl_userptr_delete_list(struct hl_device *hdev,
2327 struct list_head *userptr_list)
2328 {
2329 struct hl_userptr *userptr, *tmp;
2330
2331 list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
2332 hl_unpin_host_memory(hdev, userptr);
2333 kfree(userptr);
2334 }
2335
2336 INIT_LIST_HEAD(userptr_list);
2337 }
2338
2339 /**
2340 * hl_userptr_is_pinned() - returns whether the given userptr is pinned.
2341 * @hdev: pointer to the habanalabs device structure.
2342 * @userptr_list: pointer to the list to clear.
2343 * @userptr: pointer to userptr to check.
2344 *
2345 * This function does the following:
2346 * - Iterates over the list and checks if the given userptr is in it, means is
2347 * pinned. If so, returns true, otherwise returns false.
2348 */
hl_userptr_is_pinned(struct hl_device * hdev,u64 addr,u32 size,struct list_head * userptr_list,struct hl_userptr ** userptr)2349 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
2350 u32 size, struct list_head *userptr_list,
2351 struct hl_userptr **userptr)
2352 {
2353 list_for_each_entry((*userptr), userptr_list, job_node) {
2354 if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
2355 return true;
2356 }
2357
2358 return false;
2359 }
2360
2361 /**
2362 * va_range_init() - initialize virtual addresses range.
2363 * @hdev: pointer to the habanalabs device structure.
2364 * @va_range: pointer to the range to initialize.
2365 * @start: range start address.
2366 * @end: range end address.
2367 *
2368 * This function does the following:
2369 * - Initializes the virtual addresses list of the given range with the given
2370 * addresses.
2371 */
va_range_init(struct hl_device * hdev,struct hl_va_range * va_range,u64 start,u64 end,u32 page_size)2372 static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
2373 u64 start, u64 end, u32 page_size)
2374 {
2375 int rc;
2376
2377 INIT_LIST_HEAD(&va_range->list);
2378
2379 /*
2380 * PAGE_SIZE alignment
2381 * it is the callers responsibility to align the addresses if the
2382 * page size is not a power of 2
2383 */
2384
2385 if (is_power_of_2(page_size)) {
2386 if (start & (PAGE_SIZE - 1)) {
2387 start &= PAGE_MASK;
2388 start += PAGE_SIZE;
2389 }
2390
2391 if (end & (PAGE_SIZE - 1))
2392 end &= PAGE_MASK;
2393 }
2394
2395 if (start >= end) {
2396 dev_err(hdev->dev, "too small vm range for va list\n");
2397 return -EFAULT;
2398 }
2399
2400 rc = add_va_block(hdev, va_range, start, end);
2401
2402 if (rc) {
2403 dev_err(hdev->dev, "Failed to init host va list\n");
2404 return rc;
2405 }
2406
2407 va_range->start_addr = start;
2408 va_range->end_addr = end;
2409 va_range->page_size = page_size;
2410
2411 return 0;
2412 }
2413
2414 /**
2415 * va_range_fini() - clear a virtual addresses range.
2416 * @hdev: pointer to the habanalabs structure.
2417 * va_range: pointer to virtual addresses rang.e
2418 *
2419 * This function does the following:
2420 * - Frees the virtual addresses block list and its lock.
2421 */
va_range_fini(struct hl_device * hdev,struct hl_va_range * va_range)2422 static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
2423 {
2424 mutex_lock(&va_range->lock);
2425 clear_va_list_locked(hdev, &va_range->list);
2426 mutex_unlock(&va_range->lock);
2427
2428 mutex_destroy(&va_range->lock);
2429 kfree(va_range);
2430 }
2431
2432 /**
2433 * vm_ctx_init_with_ranges() - initialize virtual memory for context.
2434 * @ctx: pointer to the habanalabs context structure.
2435 * @host_range_start: host virtual addresses range start.
2436 * @host_range_end: host virtual addresses range end.
2437 * @host_huge_range_start: host virtual addresses range start for memory
2438 * allocated with huge pages.
2439 * @host_huge_range_end: host virtual addresses range end for memory allocated
2440 * with huge pages.
2441 * @dram_range_start: dram virtual addresses range start.
2442 * @dram_range_end: dram virtual addresses range end.
2443 *
2444 * This function initializes the following:
2445 * - MMU for context.
2446 * - Virtual address to area descriptor hashtable.
2447 * - Virtual block list of available virtual memory.
2448 */
vm_ctx_init_with_ranges(struct hl_ctx * ctx,u64 host_range_start,u64 host_range_end,u32 host_page_size,u64 host_huge_range_start,u64 host_huge_range_end,u32 host_huge_page_size,u64 dram_range_start,u64 dram_range_end,u32 dram_page_size)2449 static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
2450 u64 host_range_start,
2451 u64 host_range_end,
2452 u32 host_page_size,
2453 u64 host_huge_range_start,
2454 u64 host_huge_range_end,
2455 u32 host_huge_page_size,
2456 u64 dram_range_start,
2457 u64 dram_range_end,
2458 u32 dram_page_size)
2459 {
2460 struct hl_device *hdev = ctx->hdev;
2461 int i, rc;
2462
2463 for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) {
2464 ctx->va_range[i] =
2465 kzalloc(sizeof(struct hl_va_range), GFP_KERNEL);
2466 if (!ctx->va_range[i]) {
2467 rc = -ENOMEM;
2468 goto free_va_range;
2469 }
2470 }
2471
2472 rc = hl_mmu_ctx_init(ctx);
2473 if (rc) {
2474 dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
2475 goto free_va_range;
2476 }
2477
2478 mutex_init(&ctx->mem_hash_lock);
2479 hash_init(ctx->mem_hash);
2480
2481 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2482
2483 rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST],
2484 host_range_start, host_range_end, host_page_size);
2485 if (rc) {
2486 dev_err(hdev->dev, "failed to init host vm range\n");
2487 goto mmu_ctx_fini;
2488 }
2489
2490 if (hdev->pmmu_huge_range) {
2491 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2492
2493 rc = va_range_init(hdev,
2494 ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE],
2495 host_huge_range_start, host_huge_range_end,
2496 host_huge_page_size);
2497 if (rc) {
2498 dev_err(hdev->dev,
2499 "failed to init host huge vm range\n");
2500 goto clear_host_va_range;
2501 }
2502 } else {
2503 kfree(ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
2504 ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE] =
2505 ctx->va_range[HL_VA_RANGE_TYPE_HOST];
2506 }
2507
2508 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
2509
2510 rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM],
2511 dram_range_start, dram_range_end, dram_page_size);
2512 if (rc) {
2513 dev_err(hdev->dev, "failed to init dram vm range\n");
2514 goto clear_host_huge_va_range;
2515 }
2516
2517 hl_debugfs_add_ctx_mem_hash(hdev, ctx);
2518
2519 return 0;
2520
2521 clear_host_huge_va_range:
2522 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
2523
2524 if (hdev->pmmu_huge_range) {
2525 mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2526 clear_va_list_locked(hdev,
2527 &ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->list);
2528 mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2529 }
2530 clear_host_va_range:
2531 if (hdev->pmmu_huge_range)
2532 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2533 mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2534 clear_va_list_locked(hdev, &ctx->va_range[HL_VA_RANGE_TYPE_HOST]->list);
2535 mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2536 mmu_ctx_fini:
2537 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2538 mutex_destroy(&ctx->mem_hash_lock);
2539 hl_mmu_ctx_fini(ctx);
2540 free_va_range:
2541 for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++)
2542 kfree(ctx->va_range[i]);
2543
2544 return rc;
2545 }
2546
hl_vm_ctx_init(struct hl_ctx * ctx)2547 int hl_vm_ctx_init(struct hl_ctx *ctx)
2548 {
2549 struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
2550 u64 host_range_start, host_range_end, host_huge_range_start,
2551 host_huge_range_end, dram_range_start, dram_range_end;
2552 u32 host_page_size, host_huge_page_size, dram_page_size;
2553
2554 atomic64_set(&ctx->dram_phys_mem, 0);
2555
2556 /*
2557 * - If MMU is enabled, init the ranges as usual.
2558 * - If MMU is disabled, in case of host mapping, the returned address
2559 * is the given one.
2560 * In case of DRAM mapping, the returned address is the physical
2561 * address of the memory related to the given handle.
2562 */
2563 if (!ctx->hdev->mmu_enable)
2564 return 0;
2565
2566 dram_range_start = prop->dmmu.start_addr;
2567 dram_range_end = prop->dmmu.end_addr;
2568 dram_page_size = prop->dram_page_size ?
2569 prop->dram_page_size : prop->dmmu.page_size;
2570 host_range_start = prop->pmmu.start_addr;
2571 host_range_end = prop->pmmu.end_addr;
2572 host_page_size = prop->pmmu.page_size;
2573 host_huge_range_start = prop->pmmu_huge.start_addr;
2574 host_huge_range_end = prop->pmmu_huge.end_addr;
2575 host_huge_page_size = prop->pmmu_huge.page_size;
2576
2577 return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
2578 host_page_size, host_huge_range_start,
2579 host_huge_range_end, host_huge_page_size,
2580 dram_range_start, dram_range_end, dram_page_size);
2581 }
2582
2583 /**
2584 * hl_vm_ctx_fini() - virtual memory teardown of context.
2585 * @ctx: pointer to the habanalabs context structure.
2586 *
2587 * This function perform teardown the following:
2588 * - Virtual block list of available virtual memory.
2589 * - Virtual address to area descriptor hashtable.
2590 * - MMU for context.
2591 *
2592 * In addition this function does the following:
2593 * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
2594 * hashtable should be empty as no valid mappings should exist at this
2595 * point.
2596 * - Frees any existing physical page list from the idr which relates to the
2597 * current context asid.
2598 * - This function checks the virtual block list for correctness. At this point
2599 * the list should contain one element which describes the whole virtual
2600 * memory range of the context. Otherwise, a warning is printed.
2601 */
hl_vm_ctx_fini(struct hl_ctx * ctx)2602 void hl_vm_ctx_fini(struct hl_ctx *ctx)
2603 {
2604 struct hl_device *hdev = ctx->hdev;
2605 struct hl_vm *vm = &hdev->vm;
2606 struct hl_vm_phys_pg_pack *phys_pg_list;
2607 struct hl_vm_hash_node *hnode;
2608 struct hlist_node *tmp_node;
2609 struct hl_mem_in args;
2610 int i;
2611
2612 if (!hdev->mmu_enable)
2613 return;
2614
2615 hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
2616
2617 /*
2618 * Clearly something went wrong on hard reset so no point in printing
2619 * another side effect error
2620 */
2621 if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
2622 dev_dbg(hdev->dev,
2623 "user released device without removing its memory mappings\n");
2624
2625 hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
2626 dev_dbg(hdev->dev,
2627 "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
2628 hnode->vaddr, ctx->asid);
2629 args.unmap.device_virt_addr = hnode->vaddr;
2630 unmap_device_va(ctx, &args, true);
2631 }
2632
2633 mutex_lock(&ctx->mmu_lock);
2634
2635 /* invalidate the cache once after the unmapping loop */
2636 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
2637 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
2638
2639 mutex_unlock(&ctx->mmu_lock);
2640
2641 spin_lock(&vm->idr_lock);
2642 idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
2643 if (phys_pg_list->asid == ctx->asid) {
2644 dev_dbg(hdev->dev,
2645 "page list 0x%px of asid %d is still alive\n",
2646 phys_pg_list, ctx->asid);
2647 atomic64_sub(phys_pg_list->total_size,
2648 &hdev->dram_used_mem);
2649 free_phys_pg_pack(hdev, phys_pg_list);
2650 idr_remove(&vm->phys_pg_pack_handles, i);
2651 }
2652 spin_unlock(&vm->idr_lock);
2653
2654 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]);
2655 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]);
2656
2657 if (hdev->pmmu_huge_range)
2658 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
2659
2660 mutex_destroy(&ctx->mem_hash_lock);
2661 hl_mmu_ctx_fini(ctx);
2662
2663 /* In this case we need to clear the global accounting of DRAM usage
2664 * because the user notifies us on allocations. If the user is no more,
2665 * all DRAM is available
2666 */
2667 if (ctx->asid != HL_KERNEL_ASID_ID &&
2668 !hdev->asic_prop.dram_supports_virtual_memory)
2669 atomic64_set(&hdev->dram_used_mem, 0);
2670 }
2671
2672 /**
2673 * hl_vm_init() - initialize virtual memory module.
2674 * @hdev: pointer to the habanalabs device structure.
2675 *
2676 * This function initializes the following:
2677 * - MMU module.
2678 * - DRAM physical pages pool of 2MB.
2679 * - Idr for device memory allocation handles.
2680 */
hl_vm_init(struct hl_device * hdev)2681 int hl_vm_init(struct hl_device *hdev)
2682 {
2683 struct asic_fixed_properties *prop = &hdev->asic_prop;
2684 struct hl_vm *vm = &hdev->vm;
2685 int rc;
2686
2687 if (is_power_of_2(prop->dram_page_size))
2688 vm->dram_pg_pool =
2689 gen_pool_create(__ffs(prop->dram_page_size), -1);
2690 else
2691 vm->dram_pg_pool =
2692 gen_pool_create(__ffs(DRAM_POOL_PAGE_SIZE), -1);
2693
2694 if (!vm->dram_pg_pool) {
2695 dev_err(hdev->dev, "Failed to create dram page pool\n");
2696 return -ENOMEM;
2697 }
2698
2699 kref_init(&vm->dram_pg_pool_refcount);
2700
2701 rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
2702 prop->dram_end_address - prop->dram_user_base_address,
2703 -1);
2704
2705 if (rc) {
2706 dev_err(hdev->dev,
2707 "Failed to add memory to dram page pool %d\n", rc);
2708 goto pool_add_err;
2709 }
2710
2711 spin_lock_init(&vm->idr_lock);
2712 idr_init(&vm->phys_pg_pack_handles);
2713
2714 atomic64_set(&hdev->dram_used_mem, 0);
2715
2716 vm->init_done = true;
2717
2718 return 0;
2719
2720 pool_add_err:
2721 gen_pool_destroy(vm->dram_pg_pool);
2722
2723 return rc;
2724 }
2725
2726 /**
2727 * hl_vm_fini() - virtual memory module teardown.
2728 * @hdev: pointer to the habanalabs device structure.
2729 *
2730 * This function perform teardown to the following:
2731 * - Idr for device memory allocation handles.
2732 * - DRAM physical pages pool of 2MB.
2733 * - MMU module.
2734 */
hl_vm_fini(struct hl_device * hdev)2735 void hl_vm_fini(struct hl_device *hdev)
2736 {
2737 struct hl_vm *vm = &hdev->vm;
2738
2739 if (!vm->init_done)
2740 return;
2741
2742 /*
2743 * At this point all the contexts should be freed and hence no DRAM
2744 * memory should be in use. Hence the DRAM pool should be freed here.
2745 */
2746 if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
2747 dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
2748 __func__);
2749
2750 vm->init_done = false;
2751 }
2752
2753 /**
2754 * hl_hw_block_mem_init() - HW block memory initialization.
2755 * @ctx: pointer to the habanalabs context structure.
2756 *
2757 * This function initializes the HW block virtual mapped addresses list and
2758 * it's lock.
2759 */
hl_hw_block_mem_init(struct hl_ctx * ctx)2760 void hl_hw_block_mem_init(struct hl_ctx *ctx)
2761 {
2762 mutex_init(&ctx->hw_block_list_lock);
2763 INIT_LIST_HEAD(&ctx->hw_block_mem_list);
2764 }
2765
2766 /**
2767 * hl_hw_block_mem_fini() - HW block memory teardown.
2768 * @ctx: pointer to the habanalabs context structure.
2769 *
2770 * This function clears the HW block virtual mapped addresses list and destroys
2771 * it's lock.
2772 */
hl_hw_block_mem_fini(struct hl_ctx * ctx)2773 void hl_hw_block_mem_fini(struct hl_ctx *ctx)
2774 {
2775 struct hl_vm_hw_block_list_node *lnode, *tmp;
2776
2777 if (!list_empty(&ctx->hw_block_mem_list))
2778 dev_crit(ctx->hdev->dev, "HW block mem list isn't empty\n");
2779
2780 list_for_each_entry_safe(lnode, tmp, &ctx->hw_block_mem_list, node) {
2781 list_del(&lnode->node);
2782 kfree(lnode);
2783 }
2784
2785 mutex_destroy(&ctx->hw_block_list_lock);
2786 }
2787