1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2022 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #include <uapi/drm/habanalabs_accel.h>
9 #include "habanalabs.h"
10 #include "../include/hw_ip/mmu/mmu_general.h"
11
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/pci-p2pdma.h>
16
17 MODULE_IMPORT_NS(DMA_BUF);
18
19 #define HL_MMU_DEBUG 0
20
21 /* use small pages for supporting non-pow2 (32M/40M/48M) DRAM phys page sizes */
22 #define DRAM_POOL_PAGE_SIZE SZ_8M
23
24 #define MEM_HANDLE_INVALID ULONG_MAX
25
26 static int allocate_timestamps_buffers(struct hl_fpriv *hpriv,
27 struct hl_mem_in *args, u64 *handle);
28
set_alloc_page_size(struct hl_device * hdev,struct hl_mem_in * args,u32 * page_size)29 static int set_alloc_page_size(struct hl_device *hdev, struct hl_mem_in *args, u32 *page_size)
30 {
31 struct asic_fixed_properties *prop = &hdev->asic_prop;
32 u64 psize;
33
34 /*
35 * for ASIC that supports setting the allocation page size by user we will address
36 * user's choice only if it is not 0 (as 0 means taking the default page size)
37 */
38 if (prop->supports_user_set_page_size && args->alloc.page_size) {
39 psize = args->alloc.page_size;
40
41 if (!is_power_of_2(psize)) {
42 dev_err(hdev->dev, "user page size (%#llx) is not power of 2\n", psize);
43 return -EINVAL;
44 }
45 } else {
46 psize = prop->device_mem_alloc_default_page_size;
47 }
48
49 *page_size = psize;
50
51 return 0;
52 }
53
54 /*
55 * The va ranges in context object contain a list with the available chunks of
56 * device virtual memory.
57 * There is one range for host allocations and one for DRAM allocations.
58 *
59 * On initialization each range contains one chunk of all of its available
60 * virtual range which is a half of the total device virtual range.
61 *
62 * On each mapping of physical pages, a suitable virtual range chunk (with a
63 * minimum size) is selected from the list. If the chunk size equals the
64 * requested size, the chunk is returned. Otherwise, the chunk is split into
65 * two chunks - one to return as result and a remainder to stay in the list.
66 *
67 * On each Unmapping of a virtual address, the relevant virtual chunk is
68 * returned to the list. The chunk is added to the list and if its edges match
69 * the edges of the adjacent chunks (means a contiguous chunk can be created),
70 * the chunks are merged.
71 *
72 * On finish, the list is checked to have only one chunk of all the relevant
73 * virtual range (which is a half of the device total virtual range).
74 * If not (means not all mappings were unmapped), a warning is printed.
75 */
76
77 /*
78 * alloc_device_memory() - allocate device memory.
79 * @ctx: pointer to the context structure.
80 * @args: host parameters containing the requested size.
81 * @ret_handle: result handle.
82 *
83 * This function does the following:
84 * - Allocate the requested size rounded up to 'dram_page_size' pages.
85 * - Return unique handle for later map/unmap/free.
86 */
alloc_device_memory(struct hl_ctx * ctx,struct hl_mem_in * args,u32 * ret_handle)87 static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
88 u32 *ret_handle)
89 {
90 struct hl_device *hdev = ctx->hdev;
91 struct hl_vm *vm = &hdev->vm;
92 struct hl_vm_phys_pg_pack *phys_pg_pack;
93 u64 paddr = 0, total_size, num_pgs, i;
94 u32 num_curr_pgs, page_size;
95 bool contiguous;
96 int handle, rc;
97
98 num_curr_pgs = 0;
99
100 rc = set_alloc_page_size(hdev, args, &page_size);
101 if (rc)
102 return rc;
103
104 num_pgs = DIV_ROUND_UP_ULL(args->alloc.mem_size, page_size);
105 total_size = num_pgs * page_size;
106
107 if (!total_size) {
108 dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
109 return -EINVAL;
110 }
111
112 contiguous = args->flags & HL_MEM_CONTIGUOUS;
113
114 if (contiguous) {
115 if (is_power_of_2(page_size))
116 paddr = (uintptr_t) gen_pool_dma_alloc_align(vm->dram_pg_pool,
117 total_size, NULL, page_size);
118 else
119 paddr = gen_pool_alloc(vm->dram_pg_pool, total_size);
120 if (!paddr) {
121 dev_err(hdev->dev,
122 "Cannot allocate %llu contiguous pages with total size of %llu\n",
123 num_pgs, total_size);
124 return -ENOMEM;
125 }
126 }
127
128 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
129 if (!phys_pg_pack) {
130 rc = -ENOMEM;
131 goto pages_pack_err;
132 }
133
134 phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
135 phys_pg_pack->asid = ctx->asid;
136 phys_pg_pack->npages = num_pgs;
137 phys_pg_pack->page_size = page_size;
138 phys_pg_pack->total_size = total_size;
139 phys_pg_pack->flags = args->flags;
140 phys_pg_pack->contiguous = contiguous;
141
142 phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
143 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
144 rc = -ENOMEM;
145 goto pages_arr_err;
146 }
147
148 if (phys_pg_pack->contiguous) {
149 for (i = 0 ; i < num_pgs ; i++)
150 phys_pg_pack->pages[i] = paddr + i * page_size;
151 } else {
152 for (i = 0 ; i < num_pgs ; i++) {
153 if (is_power_of_2(page_size))
154 phys_pg_pack->pages[i] =
155 (uintptr_t)gen_pool_dma_alloc_align(vm->dram_pg_pool,
156 page_size, NULL,
157 page_size);
158 else
159 phys_pg_pack->pages[i] = gen_pool_alloc(vm->dram_pg_pool,
160 page_size);
161
162 if (!phys_pg_pack->pages[i]) {
163 dev_err(hdev->dev,
164 "Cannot allocate device memory (out of memory)\n");
165 rc = -ENOMEM;
166 goto page_err;
167 }
168
169 num_curr_pgs++;
170 }
171 }
172
173 spin_lock(&vm->idr_lock);
174 handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
175 GFP_ATOMIC);
176 spin_unlock(&vm->idr_lock);
177
178 if (handle < 0) {
179 dev_err(hdev->dev, "Failed to get handle for page\n");
180 rc = -EFAULT;
181 goto idr_err;
182 }
183
184 for (i = 0 ; i < num_pgs ; i++)
185 kref_get(&vm->dram_pg_pool_refcount);
186
187 phys_pg_pack->handle = handle;
188
189 atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
190 atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
191
192 *ret_handle = handle;
193
194 return 0;
195
196 idr_err:
197 page_err:
198 if (!phys_pg_pack->contiguous)
199 for (i = 0 ; i < num_curr_pgs ; i++)
200 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
201 page_size);
202
203 kvfree(phys_pg_pack->pages);
204 pages_arr_err:
205 kfree(phys_pg_pack);
206 pages_pack_err:
207 if (contiguous)
208 gen_pool_free(vm->dram_pg_pool, paddr, total_size);
209
210 return rc;
211 }
212
213 /**
214 * dma_map_host_va() - DMA mapping of the given host virtual address.
215 * @hdev: habanalabs device structure.
216 * @addr: the host virtual address of the memory area.
217 * @size: the size of the memory area.
218 * @p_userptr: pointer to result userptr structure.
219 *
220 * This function does the following:
221 * - Allocate userptr structure.
222 * - Pin the given host memory using the userptr structure.
223 * - Perform DMA mapping to have the DMA addresses of the pages.
224 */
dma_map_host_va(struct hl_device * hdev,u64 addr,u64 size,struct hl_userptr ** p_userptr)225 static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
226 struct hl_userptr **p_userptr)
227 {
228 struct hl_userptr *userptr;
229 int rc;
230
231 userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
232 if (!userptr) {
233 rc = -ENOMEM;
234 goto userptr_err;
235 }
236
237 rc = hl_pin_host_memory(hdev, addr, size, userptr);
238 if (rc) {
239 dev_err(hdev->dev, "Failed to pin host memory\n");
240 goto pin_err;
241 }
242
243 userptr->dma_mapped = true;
244 userptr->dir = DMA_BIDIRECTIONAL;
245 userptr->vm_type = VM_TYPE_USERPTR;
246
247 *p_userptr = userptr;
248
249 rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, DMA_BIDIRECTIONAL);
250 if (rc) {
251 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
252 goto dma_map_err;
253 }
254
255 return 0;
256
257 dma_map_err:
258 hl_unpin_host_memory(hdev, userptr);
259 pin_err:
260 kfree(userptr);
261 userptr_err:
262
263 return rc;
264 }
265
266 /**
267 * dma_unmap_host_va() - DMA unmapping of the given host virtual address.
268 * @hdev: habanalabs device structure.
269 * @userptr: userptr to free.
270 *
271 * This function does the following:
272 * - Unpins the physical pages.
273 * - Frees the userptr structure.
274 */
dma_unmap_host_va(struct hl_device * hdev,struct hl_userptr * userptr)275 static void dma_unmap_host_va(struct hl_device *hdev,
276 struct hl_userptr *userptr)
277 {
278 hl_unpin_host_memory(hdev, userptr);
279 kfree(userptr);
280 }
281
282 /**
283 * dram_pg_pool_do_release() - free DRAM pages pool
284 * @ref: pointer to reference object.
285 *
286 * This function does the following:
287 * - Frees the idr structure of physical pages handles.
288 * - Frees the generic pool of DRAM physical pages.
289 */
dram_pg_pool_do_release(struct kref * ref)290 static void dram_pg_pool_do_release(struct kref *ref)
291 {
292 struct hl_vm *vm = container_of(ref, struct hl_vm,
293 dram_pg_pool_refcount);
294
295 /*
296 * free the idr here as only here we know for sure that there are no
297 * allocated physical pages and hence there are no handles in use
298 */
299 idr_destroy(&vm->phys_pg_pack_handles);
300 gen_pool_destroy(vm->dram_pg_pool);
301 }
302
303 /**
304 * free_phys_pg_pack() - free physical page pack.
305 * @hdev: habanalabs device structure.
306 * @phys_pg_pack: physical page pack to free.
307 *
308 * This function does the following:
309 * - For DRAM memory only
310 * - iterate over the pack, free each physical block structure by
311 * returning it to the general pool.
312 * - Free the hl_vm_phys_pg_pack structure.
313 */
free_phys_pg_pack(struct hl_device * hdev,struct hl_vm_phys_pg_pack * phys_pg_pack)314 static void free_phys_pg_pack(struct hl_device *hdev,
315 struct hl_vm_phys_pg_pack *phys_pg_pack)
316 {
317 struct hl_vm *vm = &hdev->vm;
318 u64 i;
319
320 if (phys_pg_pack->created_from_userptr)
321 goto end;
322
323 if (phys_pg_pack->contiguous) {
324 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
325 phys_pg_pack->total_size);
326
327 for (i = 0; i < phys_pg_pack->npages ; i++)
328 kref_put(&vm->dram_pg_pool_refcount,
329 dram_pg_pool_do_release);
330 } else {
331 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
332 gen_pool_free(vm->dram_pg_pool,
333 phys_pg_pack->pages[i],
334 phys_pg_pack->page_size);
335 kref_put(&vm->dram_pg_pool_refcount,
336 dram_pg_pool_do_release);
337 }
338 }
339
340 end:
341 kvfree(phys_pg_pack->pages);
342 kfree(phys_pg_pack);
343
344 return;
345 }
346
347 /**
348 * free_device_memory() - free device memory.
349 * @ctx: pointer to the context structure.
350 * @args: host parameters containing the requested size.
351 *
352 * This function does the following:
353 * - Free the device memory related to the given handle.
354 */
free_device_memory(struct hl_ctx * ctx,struct hl_mem_in * args)355 static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
356 {
357 struct hl_device *hdev = ctx->hdev;
358 struct hl_vm *vm = &hdev->vm;
359 struct hl_vm_phys_pg_pack *phys_pg_pack;
360 u32 handle = args->free.handle;
361
362 spin_lock(&vm->idr_lock);
363 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
364 if (!phys_pg_pack) {
365 spin_unlock(&vm->idr_lock);
366 dev_err(hdev->dev, "free device memory failed, no match for handle %u\n", handle);
367 return -EINVAL;
368 }
369
370 if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
371 spin_unlock(&vm->idr_lock);
372 dev_err(hdev->dev, "handle %u is mapped, cannot free\n", handle);
373 return -EINVAL;
374 }
375
376 /* must remove from idr before the freeing of the physical pages as the refcount of the pool
377 * is also the trigger of the idr destroy
378 */
379 idr_remove(&vm->phys_pg_pack_handles, handle);
380 spin_unlock(&vm->idr_lock);
381
382 atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
383 atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
384
385 free_phys_pg_pack(hdev, phys_pg_pack);
386
387 return 0;
388 }
389
390 /**
391 * clear_va_list_locked() - free virtual addresses list.
392 * @hdev: habanalabs device structure.
393 * @va_list: list of virtual addresses to free.
394 *
395 * This function does the following:
396 * - Iterate over the list and free each virtual addresses block.
397 *
398 * This function should be called only when va_list lock is taken.
399 */
clear_va_list_locked(struct hl_device * hdev,struct list_head * va_list)400 static void clear_va_list_locked(struct hl_device *hdev,
401 struct list_head *va_list)
402 {
403 struct hl_vm_va_block *va_block, *tmp;
404
405 list_for_each_entry_safe(va_block, tmp, va_list, node) {
406 list_del(&va_block->node);
407 kfree(va_block);
408 }
409 }
410
411 /**
412 * print_va_list_locked() - print virtual addresses list.
413 * @hdev: habanalabs device structure.
414 * @va_list: list of virtual addresses to print.
415 *
416 * This function does the following:
417 * - Iterate over the list and print each virtual addresses block.
418 *
419 * This function should be called only when va_list lock is taken.
420 */
print_va_list_locked(struct hl_device * hdev,struct list_head * va_list)421 static void print_va_list_locked(struct hl_device *hdev,
422 struct list_head *va_list)
423 {
424 #if HL_MMU_DEBUG
425 struct hl_vm_va_block *va_block;
426
427 dev_dbg(hdev->dev, "print va list:\n");
428
429 list_for_each_entry(va_block, va_list, node)
430 dev_dbg(hdev->dev,
431 "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
432 va_block->start, va_block->end, va_block->size);
433 #endif
434 }
435
436 /**
437 * merge_va_blocks_locked() - merge a virtual block if possible.
438 * @hdev: pointer to the habanalabs device structure.
439 * @va_list: pointer to the virtual addresses block list.
440 * @va_block: virtual block to merge with adjacent blocks.
441 *
442 * This function does the following:
443 * - Merge the given blocks with the adjacent blocks if their virtual ranges
444 * create a contiguous virtual range.
445 *
446 * This Function should be called only when va_list lock is taken.
447 */
merge_va_blocks_locked(struct hl_device * hdev,struct list_head * va_list,struct hl_vm_va_block * va_block)448 static void merge_va_blocks_locked(struct hl_device *hdev,
449 struct list_head *va_list, struct hl_vm_va_block *va_block)
450 {
451 struct hl_vm_va_block *prev, *next;
452
453 prev = list_prev_entry(va_block, node);
454 if (&prev->node != va_list && prev->end + 1 == va_block->start) {
455 prev->end = va_block->end;
456 prev->size = prev->end - prev->start + 1;
457 list_del(&va_block->node);
458 kfree(va_block);
459 va_block = prev;
460 }
461
462 next = list_next_entry(va_block, node);
463 if (&next->node != va_list && va_block->end + 1 == next->start) {
464 next->start = va_block->start;
465 next->size = next->end - next->start + 1;
466 list_del(&va_block->node);
467 kfree(va_block);
468 }
469 }
470
471 /**
472 * add_va_block_locked() - add a virtual block to the virtual addresses list.
473 * @hdev: pointer to the habanalabs device structure.
474 * @va_list: pointer to the virtual addresses block list.
475 * @start: start virtual address.
476 * @end: end virtual address.
477 *
478 * This function does the following:
479 * - Add the given block to the virtual blocks list and merge with other blocks
480 * if a contiguous virtual block can be created.
481 *
482 * This Function should be called only when va_list lock is taken.
483 */
add_va_block_locked(struct hl_device * hdev,struct list_head * va_list,u64 start,u64 end)484 static int add_va_block_locked(struct hl_device *hdev,
485 struct list_head *va_list, u64 start, u64 end)
486 {
487 struct hl_vm_va_block *va_block, *res = NULL;
488 u64 size = end - start + 1;
489
490 print_va_list_locked(hdev, va_list);
491
492 list_for_each_entry(va_block, va_list, node) {
493 /* TODO: remove upon matureness */
494 if (hl_mem_area_crosses_range(start, size, va_block->start,
495 va_block->end)) {
496 dev_err(hdev->dev,
497 "block crossing ranges at start 0x%llx, end 0x%llx\n",
498 va_block->start, va_block->end);
499 return -EINVAL;
500 }
501
502 if (va_block->end < start)
503 res = va_block;
504 }
505
506 va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
507 if (!va_block)
508 return -ENOMEM;
509
510 va_block->start = start;
511 va_block->end = end;
512 va_block->size = size;
513
514 if (!res)
515 list_add(&va_block->node, va_list);
516 else
517 list_add(&va_block->node, &res->node);
518
519 merge_va_blocks_locked(hdev, va_list, va_block);
520
521 print_va_list_locked(hdev, va_list);
522
523 return 0;
524 }
525
526 /**
527 * add_va_block() - wrapper for add_va_block_locked.
528 * @hdev: pointer to the habanalabs device structure.
529 * @va_range: pointer to the virtual addresses range object.
530 * @start: start virtual address.
531 * @end: end virtual address.
532 *
533 * This function does the following:
534 * - Takes the list lock and calls add_va_block_locked.
535 */
add_va_block(struct hl_device * hdev,struct hl_va_range * va_range,u64 start,u64 end)536 static inline int add_va_block(struct hl_device *hdev,
537 struct hl_va_range *va_range, u64 start, u64 end)
538 {
539 int rc;
540
541 mutex_lock(&va_range->lock);
542 rc = add_va_block_locked(hdev, &va_range->list, start, end);
543 mutex_unlock(&va_range->lock);
544
545 return rc;
546 }
547
548 /**
549 * is_hint_crossing_range() - check if hint address crossing specified reserved.
550 * @range_type: virtual space range type.
551 * @start_addr: start virtual address.
552 * @size: block size.
553 * @prop: asic properties structure to retrieve reserved ranges from.
554 */
is_hint_crossing_range(enum hl_va_range_type range_type,u64 start_addr,u32 size,struct asic_fixed_properties * prop)555 static inline bool is_hint_crossing_range(enum hl_va_range_type range_type,
556 u64 start_addr, u32 size, struct asic_fixed_properties *prop) {
557 bool range_cross;
558
559 if (range_type == HL_VA_RANGE_TYPE_DRAM)
560 range_cross =
561 hl_mem_area_crosses_range(start_addr, size,
562 prop->hints_dram_reserved_va_range.start_addr,
563 prop->hints_dram_reserved_va_range.end_addr);
564 else if (range_type == HL_VA_RANGE_TYPE_HOST)
565 range_cross =
566 hl_mem_area_crosses_range(start_addr, size,
567 prop->hints_host_reserved_va_range.start_addr,
568 prop->hints_host_reserved_va_range.end_addr);
569 else
570 range_cross =
571 hl_mem_area_crosses_range(start_addr, size,
572 prop->hints_host_hpage_reserved_va_range.start_addr,
573 prop->hints_host_hpage_reserved_va_range.end_addr);
574
575 return range_cross;
576 }
577
578 /**
579 * get_va_block() - get a virtual block for the given size and alignment.
580 *
581 * @hdev: pointer to the habanalabs device structure.
582 * @va_range: pointer to the virtual addresses range.
583 * @size: requested block size.
584 * @hint_addr: hint for requested address by the user.
585 * @va_block_align: required alignment of the virtual block start address.
586 * @range_type: va range type (host, dram)
587 * @flags: additional memory flags, currently only uses HL_MEM_FORCE_HINT
588 *
589 * This function does the following:
590 * - Iterate on the virtual block list to find a suitable virtual block for the
591 * given size, hint address and alignment.
592 * - Reserve the requested block and update the list.
593 * - Return the start address of the virtual block.
594 */
get_va_block(struct hl_device * hdev,struct hl_va_range * va_range,u64 size,u64 hint_addr,u32 va_block_align,enum hl_va_range_type range_type,u32 flags)595 static u64 get_va_block(struct hl_device *hdev,
596 struct hl_va_range *va_range,
597 u64 size, u64 hint_addr, u32 va_block_align,
598 enum hl_va_range_type range_type,
599 u32 flags)
600 {
601 struct hl_vm_va_block *va_block, *new_va_block = NULL;
602 struct asic_fixed_properties *prop = &hdev->asic_prop;
603 u64 tmp_hint_addr, valid_start, valid_size, prev_start, prev_end,
604 align_mask, reserved_valid_start = 0, reserved_valid_size = 0,
605 dram_hint_mask = prop->dram_hints_align_mask;
606 bool add_prev = false;
607 bool is_align_pow_2 = is_power_of_2(va_range->page_size);
608 bool is_hint_dram_addr = hl_is_dram_va(hdev, hint_addr);
609 bool force_hint = flags & HL_MEM_FORCE_HINT;
610
611 if (is_align_pow_2)
612 align_mask = ~((u64)va_block_align - 1);
613 else
614 /*
615 * with non-power-of-2 range we work only with page granularity
616 * and the start address is page aligned,
617 * so no need for alignment checking.
618 */
619 size = DIV_ROUND_UP_ULL(size, va_range->page_size) *
620 va_range->page_size;
621
622 tmp_hint_addr = hint_addr & ~dram_hint_mask;
623
624 /* Check if we need to ignore hint address */
625 if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
626 (!is_align_pow_2 && is_hint_dram_addr &&
627 do_div(tmp_hint_addr, va_range->page_size))) {
628
629 if (force_hint) {
630 /* Hint must be respected, so here we just fail */
631 dev_err(hdev->dev,
632 "Hint address 0x%llx is not page aligned - cannot be respected\n",
633 hint_addr);
634 return 0;
635 }
636
637 dev_dbg(hdev->dev,
638 "Hint address 0x%llx will be ignored because it is not aligned\n",
639 hint_addr);
640 hint_addr = 0;
641 }
642
643 mutex_lock(&va_range->lock);
644
645 print_va_list_locked(hdev, &va_range->list);
646
647 list_for_each_entry(va_block, &va_range->list, node) {
648 /* Calc the first possible aligned addr */
649 valid_start = va_block->start;
650
651 if (is_align_pow_2 && (valid_start & (va_block_align - 1))) {
652 valid_start &= align_mask;
653 valid_start += va_block_align;
654 if (valid_start > va_block->end)
655 continue;
656 }
657
658 valid_size = va_block->end - valid_start + 1;
659 if (valid_size < size)
660 continue;
661
662 /*
663 * In case hint address is 0, and hints_range_reservation
664 * property enabled, then avoid allocating va blocks from the
665 * range reserved for hint addresses
666 */
667 if (prop->hints_range_reservation && !hint_addr)
668 if (is_hint_crossing_range(range_type, valid_start,
669 size, prop))
670 continue;
671
672 /* Pick the minimal length block which has the required size */
673 if (!new_va_block || (valid_size < reserved_valid_size)) {
674 new_va_block = va_block;
675 reserved_valid_start = valid_start;
676 reserved_valid_size = valid_size;
677 }
678
679 if (hint_addr && hint_addr >= valid_start &&
680 (hint_addr + size) <= va_block->end) {
681 new_va_block = va_block;
682 reserved_valid_start = hint_addr;
683 reserved_valid_size = valid_size;
684 break;
685 }
686 }
687
688 if (!new_va_block) {
689 dev_err(hdev->dev, "no available va block for size %llu\n",
690 size);
691 goto out;
692 }
693
694 if (force_hint && reserved_valid_start != hint_addr) {
695 /* Hint address must be respected. If we are here - this means
696 * we could not respect it.
697 */
698 dev_err(hdev->dev,
699 "Hint address 0x%llx could not be respected\n",
700 hint_addr);
701 reserved_valid_start = 0;
702 goto out;
703 }
704
705 /*
706 * Check if there is some leftover range due to reserving the new
707 * va block, then return it to the main virtual addresses list.
708 */
709 if (reserved_valid_start > new_va_block->start) {
710 prev_start = new_va_block->start;
711 prev_end = reserved_valid_start - 1;
712
713 new_va_block->start = reserved_valid_start;
714 new_va_block->size = reserved_valid_size;
715
716 add_prev = true;
717 }
718
719 if (new_va_block->size > size) {
720 new_va_block->start += size;
721 new_va_block->size = new_va_block->end - new_va_block->start + 1;
722 } else {
723 list_del(&new_va_block->node);
724 kfree(new_va_block);
725 }
726
727 if (add_prev)
728 add_va_block_locked(hdev, &va_range->list, prev_start,
729 prev_end);
730
731 print_va_list_locked(hdev, &va_range->list);
732 out:
733 mutex_unlock(&va_range->lock);
734
735 return reserved_valid_start;
736 }
737
738 /*
739 * hl_reserve_va_block() - reserve a virtual block of a given size.
740 * @hdev: pointer to the habanalabs device structure.
741 * @ctx: current context
742 * @type: virtual addresses range type.
743 * @size: requested block size.
744 * @alignment: required alignment in bytes of the virtual block start address,
745 * 0 means no alignment.
746 *
747 * This function does the following:
748 * - Iterate on the virtual block list to find a suitable virtual block for the
749 * given size and alignment.
750 * - Reserve the requested block and update the list.
751 * - Return the start address of the virtual block.
752 */
hl_reserve_va_block(struct hl_device * hdev,struct hl_ctx * ctx,enum hl_va_range_type type,u64 size,u32 alignment)753 u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
754 enum hl_va_range_type type, u64 size, u32 alignment)
755 {
756 return get_va_block(hdev, ctx->va_range[type], size, 0,
757 max(alignment, ctx->va_range[type]->page_size),
758 type, 0);
759 }
760
761 /**
762 * hl_get_va_range_type() - get va_range type for the given address and size.
763 * @ctx: context to fetch va_range from.
764 * @address: the start address of the area we want to validate.
765 * @size: the size in bytes of the area we want to validate.
766 * @type: returned va_range type.
767 *
768 * Return: true if the area is inside a valid range, false otherwise.
769 */
hl_get_va_range_type(struct hl_ctx * ctx,u64 address,u64 size,enum hl_va_range_type * type)770 static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size,
771 enum hl_va_range_type *type)
772 {
773 int i;
774
775 for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX; i++) {
776 if (hl_mem_area_inside_range(address, size,
777 ctx->va_range[i]->start_addr,
778 ctx->va_range[i]->end_addr)) {
779 *type = i;
780 return 0;
781 }
782 }
783
784 return -EINVAL;
785 }
786
787 /**
788 * hl_unreserve_va_block() - wrapper for add_va_block to unreserve a va block.
789 * @hdev: pointer to the habanalabs device structure
790 * @ctx: pointer to the context structure.
791 * @start_addr: start virtual address.
792 * @size: number of bytes to unreserve.
793 *
794 * This function does the following:
795 * - Takes the list lock and calls add_va_block_locked.
796 */
hl_unreserve_va_block(struct hl_device * hdev,struct hl_ctx * ctx,u64 start_addr,u64 size)797 int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
798 u64 start_addr, u64 size)
799 {
800 enum hl_va_range_type type;
801 int rc;
802
803 rc = hl_get_va_range_type(ctx, start_addr, size, &type);
804 if (rc) {
805 dev_err(hdev->dev,
806 "cannot find va_range for va %#llx size %llu",
807 start_addr, size);
808 return rc;
809 }
810
811 rc = add_va_block(hdev, ctx->va_range[type], start_addr,
812 start_addr + size - 1);
813 if (rc)
814 dev_warn(hdev->dev,
815 "add va block failed for vaddr: 0x%llx\n", start_addr);
816
817 return rc;
818 }
819
820 /**
821 * init_phys_pg_pack_from_userptr() - initialize physical page pack from host
822 * memory
823 * @ctx: pointer to the context structure.
824 * @userptr: userptr to initialize from.
825 * @pphys_pg_pack: result pointer.
826 * @force_regular_page: tell the function to ignore huge page optimization,
827 * even if possible. Needed for cases where the device VA
828 * is allocated before we know the composition of the
829 * physical pages
830 *
831 * This function does the following:
832 * - Pin the physical pages related to the given virtual block.
833 * - Create a physical page pack from the physical pages related to the given
834 * virtual block.
835 */
init_phys_pg_pack_from_userptr(struct hl_ctx * ctx,struct hl_userptr * userptr,struct hl_vm_phys_pg_pack ** pphys_pg_pack,bool force_regular_page)836 static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
837 struct hl_userptr *userptr,
838 struct hl_vm_phys_pg_pack **pphys_pg_pack,
839 bool force_regular_page)
840 {
841 u32 npages, page_size = PAGE_SIZE,
842 huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
843 u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
844 struct hl_vm_phys_pg_pack *phys_pg_pack;
845 bool first = true, is_huge_page_opt;
846 u64 page_mask, total_npages;
847 struct scatterlist *sg;
848 dma_addr_t dma_addr;
849 int rc, i, j;
850
851 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
852 if (!phys_pg_pack)
853 return -ENOMEM;
854
855 phys_pg_pack->vm_type = userptr->vm_type;
856 phys_pg_pack->created_from_userptr = true;
857 phys_pg_pack->asid = ctx->asid;
858 atomic_set(&phys_pg_pack->mapping_cnt, 1);
859
860 is_huge_page_opt = (force_regular_page ? false : true);
861
862 /* Only if all dma_addrs are aligned to 2MB and their
863 * sizes is at least 2MB, we can use huge page mapping.
864 * We limit the 2MB optimization to this condition,
865 * since later on we acquire the related VA range as one
866 * consecutive block.
867 */
868 total_npages = 0;
869 for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
870 npages = hl_get_sg_info(sg, &dma_addr);
871
872 total_npages += npages;
873
874 if ((npages % pgs_in_huge_page) ||
875 (dma_addr & (huge_page_size - 1)))
876 is_huge_page_opt = false;
877 }
878
879 if (is_huge_page_opt) {
880 page_size = huge_page_size;
881 do_div(total_npages, pgs_in_huge_page);
882 }
883
884 page_mask = ~(((u64) page_size) - 1);
885
886 phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
887 GFP_KERNEL);
888 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
889 rc = -ENOMEM;
890 goto page_pack_arr_mem_err;
891 }
892
893 phys_pg_pack->npages = total_npages;
894 phys_pg_pack->page_size = page_size;
895 phys_pg_pack->total_size = total_npages * page_size;
896
897 j = 0;
898 for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
899 npages = hl_get_sg_info(sg, &dma_addr);
900
901 /* align down to physical page size and save the offset */
902 if (first) {
903 first = false;
904 phys_pg_pack->offset = dma_addr & (page_size - 1);
905 dma_addr &= page_mask;
906 }
907
908 while (npages) {
909 phys_pg_pack->pages[j++] = dma_addr;
910 dma_addr += page_size;
911
912 if (is_huge_page_opt)
913 npages -= pgs_in_huge_page;
914 else
915 npages--;
916 }
917 }
918
919 *pphys_pg_pack = phys_pg_pack;
920
921 return 0;
922
923 page_pack_arr_mem_err:
924 kfree(phys_pg_pack);
925
926 return rc;
927 }
928
929 /**
930 * map_phys_pg_pack() - maps the physical page pack..
931 * @ctx: pointer to the context structure.
932 * @vaddr: start address of the virtual area to map from.
933 * @phys_pg_pack: the pack of physical pages to map to.
934 *
935 * This function does the following:
936 * - Maps each chunk of virtual memory to matching physical chunk.
937 * - Stores number of successful mappings in the given argument.
938 * - Returns 0 on success, error code otherwise.
939 */
map_phys_pg_pack(struct hl_ctx * ctx,u64 vaddr,struct hl_vm_phys_pg_pack * phys_pg_pack)940 static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
941 struct hl_vm_phys_pg_pack *phys_pg_pack)
942 {
943 struct hl_device *hdev = ctx->hdev;
944 u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
945 u32 page_size = phys_pg_pack->page_size;
946 int rc = 0;
947 bool is_host_addr;
948
949 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
950 paddr = phys_pg_pack->pages[i];
951
952 rc = hl_mmu_map_page(ctx, next_vaddr, paddr, page_size,
953 (i + 1) == phys_pg_pack->npages);
954 if (rc) {
955 dev_err(hdev->dev,
956 "map failed for handle %u, npages: %llu, mapped: %llu",
957 phys_pg_pack->handle, phys_pg_pack->npages,
958 mapped_pg_cnt);
959 goto err;
960 }
961
962 mapped_pg_cnt++;
963 next_vaddr += page_size;
964 }
965
966 return 0;
967
968 err:
969 is_host_addr = !hl_is_dram_va(hdev, vaddr);
970
971 next_vaddr = vaddr;
972 for (i = 0 ; i < mapped_pg_cnt ; i++) {
973 if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
974 (i + 1) == mapped_pg_cnt))
975 dev_warn_ratelimited(hdev->dev,
976 "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
977 phys_pg_pack->handle, next_vaddr,
978 phys_pg_pack->pages[i], page_size);
979
980 next_vaddr += page_size;
981
982 /*
983 * unmapping on Palladium can be really long, so avoid a CPU
984 * soft lockup bug by sleeping a little between unmapping pages
985 *
986 * In addition, on host num of pages could be huge,
987 * because page size could be 4KB, so when unmapping host
988 * pages sleep every 32K pages to avoid soft lockup
989 */
990 if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
991 usleep_range(50, 200);
992 }
993
994 return rc;
995 }
996
997 /**
998 * unmap_phys_pg_pack() - unmaps the physical page pack.
999 * @ctx: pointer to the context structure.
1000 * @vaddr: start address of the virtual area to unmap.
1001 * @phys_pg_pack: the pack of physical pages to unmap.
1002 */
unmap_phys_pg_pack(struct hl_ctx * ctx,u64 vaddr,struct hl_vm_phys_pg_pack * phys_pg_pack)1003 static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
1004 struct hl_vm_phys_pg_pack *phys_pg_pack)
1005 {
1006 struct hl_device *hdev = ctx->hdev;
1007 u64 next_vaddr, i;
1008 bool is_host_addr;
1009 u32 page_size;
1010
1011 is_host_addr = !hl_is_dram_va(hdev, vaddr);
1012 page_size = phys_pg_pack->page_size;
1013 next_vaddr = vaddr;
1014
1015 for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
1016 if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
1017 (i + 1) == phys_pg_pack->npages))
1018 dev_warn_ratelimited(hdev->dev,
1019 "unmap failed for vaddr: 0x%llx\n", next_vaddr);
1020
1021 /*
1022 * unmapping on Palladium can be really long, so avoid a CPU
1023 * soft lockup bug by sleeping a little between unmapping pages
1024 *
1025 * In addition, on host num of pages could be huge,
1026 * because page size could be 4KB, so when unmapping host
1027 * pages sleep every 32K pages to avoid soft lockup
1028 */
1029 if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
1030 usleep_range(50, 200);
1031 }
1032 }
1033
get_paddr_from_handle(struct hl_ctx * ctx,struct hl_mem_in * args,u64 * paddr)1034 static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
1035 u64 *paddr)
1036 {
1037 struct hl_device *hdev = ctx->hdev;
1038 struct hl_vm *vm = &hdev->vm;
1039 struct hl_vm_phys_pg_pack *phys_pg_pack;
1040 u32 handle;
1041
1042 handle = lower_32_bits(args->map_device.handle);
1043 spin_lock(&vm->idr_lock);
1044 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
1045 if (!phys_pg_pack) {
1046 spin_unlock(&vm->idr_lock);
1047 dev_err(hdev->dev, "no match for handle %u\n", handle);
1048 return -EINVAL;
1049 }
1050
1051 *paddr = phys_pg_pack->pages[0];
1052
1053 spin_unlock(&vm->idr_lock);
1054
1055 return 0;
1056 }
1057
1058 /**
1059 * map_device_va() - map the given memory.
1060 * @ctx: pointer to the context structure.
1061 * @args: host parameters with handle/host virtual address.
1062 * @device_addr: pointer to result device virtual address.
1063 *
1064 * This function does the following:
1065 * - If given a physical device memory handle, map to a device virtual block
1066 * and return the start address of this block.
1067 * - If given a host virtual address and size, find the related physical pages,
1068 * map a device virtual block to this pages and return the start address of
1069 * this block.
1070 */
map_device_va(struct hl_ctx * ctx,struct hl_mem_in * args,u64 * device_addr)1071 static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device_addr)
1072 {
1073 struct hl_vm_phys_pg_pack *phys_pg_pack;
1074 enum hl_va_range_type va_range_type = 0;
1075 struct hl_device *hdev = ctx->hdev;
1076 struct hl_userptr *userptr = NULL;
1077 u32 handle = 0, va_block_align;
1078 struct hl_vm_hash_node *hnode;
1079 struct hl_vm *vm = &hdev->vm;
1080 struct hl_va_range *va_range;
1081 bool is_userptr, do_prefetch;
1082 u64 ret_vaddr, hint_addr;
1083 enum vm_type *vm_type;
1084 int rc;
1085
1086 /* set map flags */
1087 is_userptr = args->flags & HL_MEM_USERPTR;
1088 do_prefetch = hdev->supports_mmu_prefetch && (args->flags & HL_MEM_PREFETCH);
1089
1090 /* Assume failure */
1091 *device_addr = 0;
1092
1093 if (is_userptr) {
1094 u64 addr = args->map_host.host_virt_addr,
1095 size = args->map_host.mem_size;
1096 u32 page_size = hdev->asic_prop.pmmu.page_size,
1097 huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
1098
1099 rc = dma_map_host_va(hdev, addr, size, &userptr);
1100 if (rc) {
1101 dev_err(hdev->dev, "failed to get userptr from va\n");
1102 return rc;
1103 }
1104
1105 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1106 &phys_pg_pack, false);
1107 if (rc) {
1108 dev_err(hdev->dev,
1109 "unable to init page pack for vaddr 0x%llx\n",
1110 addr);
1111 goto init_page_pack_err;
1112 }
1113
1114 vm_type = (enum vm_type *) userptr;
1115 hint_addr = args->map_host.hint_addr;
1116 handle = phys_pg_pack->handle;
1117
1118 /* get required alignment */
1119 if (phys_pg_pack->page_size == page_size) {
1120 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1121 va_range_type = HL_VA_RANGE_TYPE_HOST;
1122 /*
1123 * huge page alignment may be needed in case of regular
1124 * page mapping, depending on the host VA alignment
1125 */
1126 if (addr & (huge_page_size - 1))
1127 va_block_align = page_size;
1128 else
1129 va_block_align = huge_page_size;
1130 } else {
1131 /*
1132 * huge page alignment is needed in case of huge page
1133 * mapping
1134 */
1135 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
1136 va_range_type = HL_VA_RANGE_TYPE_HOST_HUGE;
1137 va_block_align = huge_page_size;
1138 }
1139 } else {
1140 handle = lower_32_bits(args->map_device.handle);
1141
1142 spin_lock(&vm->idr_lock);
1143 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
1144 if (!phys_pg_pack) {
1145 spin_unlock(&vm->idr_lock);
1146 dev_err(hdev->dev,
1147 "no match for handle %u\n", handle);
1148 return -EINVAL;
1149 }
1150
1151 /* increment now to avoid freeing device memory while mapping */
1152 atomic_inc(&phys_pg_pack->mapping_cnt);
1153
1154 spin_unlock(&vm->idr_lock);
1155
1156 vm_type = (enum vm_type *) phys_pg_pack;
1157
1158 hint_addr = args->map_device.hint_addr;
1159
1160 /* DRAM VA alignment is the same as the MMU page size */
1161 va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
1162 va_range_type = HL_VA_RANGE_TYPE_DRAM;
1163 va_block_align = hdev->asic_prop.dmmu.page_size;
1164 }
1165
1166 /*
1167 * relevant for mapping device physical memory only, as host memory is
1168 * implicitly shared
1169 */
1170 if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
1171 phys_pg_pack->asid != ctx->asid) {
1172 dev_err(hdev->dev,
1173 "Failed to map memory, handle %u is not shared\n",
1174 handle);
1175 rc = -EPERM;
1176 goto shared_err;
1177 }
1178
1179 hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
1180 if (!hnode) {
1181 rc = -ENOMEM;
1182 goto hnode_err;
1183 }
1184
1185 if (hint_addr && phys_pg_pack->offset) {
1186 if (args->flags & HL_MEM_FORCE_HINT) {
1187 /* Fail if hint must be respected but it can't be */
1188 dev_err(hdev->dev,
1189 "Hint address 0x%llx cannot be respected because source memory is not aligned 0x%x\n",
1190 hint_addr, phys_pg_pack->offset);
1191 rc = -EINVAL;
1192 goto va_block_err;
1193 }
1194 dev_dbg(hdev->dev,
1195 "Hint address 0x%llx will be ignored because source memory is not aligned 0x%x\n",
1196 hint_addr, phys_pg_pack->offset);
1197 }
1198
1199 ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
1200 hint_addr, va_block_align,
1201 va_range_type, args->flags);
1202 if (!ret_vaddr) {
1203 dev_err(hdev->dev, "no available va block for handle %u\n",
1204 handle);
1205 rc = -ENOMEM;
1206 goto va_block_err;
1207 }
1208
1209 mutex_lock(&hdev->mmu_lock);
1210
1211 rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
1212 if (rc) {
1213 dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle);
1214 mutex_unlock(&hdev->mmu_lock);
1215 goto map_err;
1216 }
1217
1218 rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
1219 ctx->asid, ret_vaddr, phys_pg_pack->total_size);
1220 mutex_unlock(&hdev->mmu_lock);
1221 if (rc)
1222 goto map_err;
1223
1224 /*
1225 * prefetch is done upon user's request. it is performed in WQ as and so can
1226 * be outside the MMU lock. the operation itself is already protected by the mmu lock
1227 */
1228 if (do_prefetch) {
1229 rc = hl_mmu_prefetch_cache_range(ctx, *vm_type, ctx->asid, ret_vaddr,
1230 phys_pg_pack->total_size);
1231 if (rc)
1232 goto map_err;
1233 }
1234
1235 ret_vaddr += phys_pg_pack->offset;
1236
1237 hnode->ptr = vm_type;
1238 hnode->vaddr = ret_vaddr;
1239 hnode->handle = is_userptr ? MEM_HANDLE_INVALID : handle;
1240
1241 mutex_lock(&ctx->mem_hash_lock);
1242 hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
1243 mutex_unlock(&ctx->mem_hash_lock);
1244
1245 *device_addr = ret_vaddr;
1246
1247 if (is_userptr)
1248 free_phys_pg_pack(hdev, phys_pg_pack);
1249
1250 return rc;
1251
1252 map_err:
1253 if (add_va_block(hdev, va_range, ret_vaddr,
1254 ret_vaddr + phys_pg_pack->total_size - 1))
1255 dev_warn(hdev->dev,
1256 "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
1257 handle, ret_vaddr);
1258
1259 va_block_err:
1260 kfree(hnode);
1261 hnode_err:
1262 shared_err:
1263 atomic_dec(&phys_pg_pack->mapping_cnt);
1264 if (is_userptr)
1265 free_phys_pg_pack(hdev, phys_pg_pack);
1266 init_page_pack_err:
1267 if (is_userptr)
1268 dma_unmap_host_va(hdev, userptr);
1269
1270 return rc;
1271 }
1272
1273 /**
1274 * unmap_device_va() - unmap the given device virtual address.
1275 * @ctx: pointer to the context structure.
1276 * @args: host parameters with device virtual address to unmap.
1277 * @ctx_free: true if in context free flow, false otherwise.
1278 *
1279 * This function does the following:
1280 * - unmap the physical pages related to the given virtual address.
1281 * - return the device virtual block to the virtual block list.
1282 */
unmap_device_va(struct hl_ctx * ctx,struct hl_mem_in * args,bool ctx_free)1283 static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
1284 bool ctx_free)
1285 {
1286 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
1287 u64 vaddr = args->unmap.device_virt_addr;
1288 struct hl_vm_hash_node *hnode = NULL;
1289 struct asic_fixed_properties *prop;
1290 struct hl_device *hdev = ctx->hdev;
1291 struct hl_userptr *userptr = NULL;
1292 struct hl_va_range *va_range;
1293 enum vm_type *vm_type;
1294 bool is_userptr;
1295 int rc = 0;
1296
1297 prop = &hdev->asic_prop;
1298
1299 /* protect from double entrance */
1300 mutex_lock(&ctx->mem_hash_lock);
1301 hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
1302 if (vaddr == hnode->vaddr)
1303 break;
1304
1305 if (!hnode) {
1306 mutex_unlock(&ctx->mem_hash_lock);
1307 dev_err(hdev->dev,
1308 "unmap failed, no mem hnode for vaddr 0x%llx\n",
1309 vaddr);
1310 return -EINVAL;
1311 }
1312
1313 if (hnode->export_cnt) {
1314 mutex_unlock(&ctx->mem_hash_lock);
1315 dev_err(hdev->dev, "failed to unmap %#llx, memory is exported\n", vaddr);
1316 return -EINVAL;
1317 }
1318
1319 hash_del(&hnode->node);
1320 mutex_unlock(&ctx->mem_hash_lock);
1321
1322 vm_type = hnode->ptr;
1323
1324 if (*vm_type == VM_TYPE_USERPTR) {
1325 is_userptr = true;
1326 userptr = hnode->ptr;
1327
1328 rc = init_phys_pg_pack_from_userptr(ctx, userptr, &phys_pg_pack,
1329 false);
1330 if (rc) {
1331 dev_err(hdev->dev,
1332 "unable to init page pack for vaddr 0x%llx\n",
1333 vaddr);
1334 goto vm_type_err;
1335 }
1336
1337 if (phys_pg_pack->page_size ==
1338 hdev->asic_prop.pmmu.page_size)
1339 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1340 else
1341 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
1342 } else if (*vm_type == VM_TYPE_PHYS_PACK) {
1343 is_userptr = false;
1344 va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
1345 phys_pg_pack = hnode->ptr;
1346 } else {
1347 dev_warn(hdev->dev,
1348 "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1349 vaddr);
1350 rc = -EFAULT;
1351 goto vm_type_err;
1352 }
1353
1354 if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
1355 dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1356 rc = -EINVAL;
1357 goto mapping_cnt_err;
1358 }
1359
1360 if (!is_userptr && !is_power_of_2(phys_pg_pack->page_size))
1361 vaddr = prop->dram_base_address +
1362 DIV_ROUND_DOWN_ULL(vaddr - prop->dram_base_address,
1363 phys_pg_pack->page_size) *
1364 phys_pg_pack->page_size;
1365 else
1366 vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
1367
1368 mutex_lock(&hdev->mmu_lock);
1369
1370 unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
1371
1372 /*
1373 * During context free this function is called in a loop to clean all
1374 * the context mappings. Hence the cache invalidation can be called once
1375 * at the loop end rather than for each iteration
1376 */
1377 if (!ctx_free)
1378 rc = hl_mmu_invalidate_cache_range(hdev, true, *vm_type, ctx->asid, vaddr,
1379 phys_pg_pack->total_size);
1380
1381 mutex_unlock(&hdev->mmu_lock);
1382
1383 /*
1384 * If the context is closing we don't need to check for the MMU cache
1385 * invalidation return code and update the VA free list as in this flow
1386 * we invalidate the MMU cache outside of this unmap function and the VA
1387 * free list will be freed anyway.
1388 */
1389 if (!ctx_free) {
1390 int tmp_rc;
1391
1392 tmp_rc = add_va_block(hdev, va_range, vaddr,
1393 vaddr + phys_pg_pack->total_size - 1);
1394 if (tmp_rc) {
1395 dev_warn(hdev->dev,
1396 "add va block failed for vaddr: 0x%llx\n",
1397 vaddr);
1398 if (!rc)
1399 rc = tmp_rc;
1400 }
1401 }
1402
1403 atomic_dec(&phys_pg_pack->mapping_cnt);
1404 kfree(hnode);
1405
1406 if (is_userptr) {
1407 free_phys_pg_pack(hdev, phys_pg_pack);
1408 dma_unmap_host_va(hdev, userptr);
1409 }
1410
1411 return rc;
1412
1413 mapping_cnt_err:
1414 if (is_userptr)
1415 free_phys_pg_pack(hdev, phys_pg_pack);
1416 vm_type_err:
1417 mutex_lock(&ctx->mem_hash_lock);
1418 hash_add(ctx->mem_hash, &hnode->node, vaddr);
1419 mutex_unlock(&ctx->mem_hash_lock);
1420
1421 return rc;
1422 }
1423
map_block(struct hl_device * hdev,u64 address,u64 * handle,u32 * size)1424 static int map_block(struct hl_device *hdev, u64 address, u64 *handle, u32 *size)
1425 {
1426 u32 block_id;
1427 int rc;
1428
1429 *handle = 0;
1430 if (size)
1431 *size = 0;
1432
1433 rc = hdev->asic_funcs->get_hw_block_id(hdev, address, size, &block_id);
1434 if (rc)
1435 return rc;
1436
1437 *handle = block_id | HL_MMAP_TYPE_BLOCK;
1438 *handle <<= PAGE_SHIFT;
1439
1440 return 0;
1441 }
1442
hw_block_vm_close(struct vm_area_struct * vma)1443 static void hw_block_vm_close(struct vm_area_struct *vma)
1444 {
1445 struct hl_vm_hw_block_list_node *lnode =
1446 (struct hl_vm_hw_block_list_node *) vma->vm_private_data;
1447 struct hl_ctx *ctx = lnode->ctx;
1448 long new_mmap_size;
1449
1450 new_mmap_size = lnode->mapped_size - (vma->vm_end - vma->vm_start);
1451 if (new_mmap_size > 0) {
1452 lnode->mapped_size = new_mmap_size;
1453 return;
1454 }
1455
1456 mutex_lock(&ctx->hw_block_list_lock);
1457 list_del(&lnode->node);
1458 mutex_unlock(&ctx->hw_block_list_lock);
1459 hl_ctx_put(ctx);
1460 kfree(lnode);
1461 vma->vm_private_data = NULL;
1462 }
1463
1464 static const struct vm_operations_struct hw_block_vm_ops = {
1465 .close = hw_block_vm_close
1466 };
1467
1468 /**
1469 * hl_hw_block_mmap() - mmap a hw block to user.
1470 * @hpriv: pointer to the private data of the fd
1471 * @vma: pointer to vm_area_struct of the process
1472 *
1473 * Driver increments context reference for every HW block mapped in order
1474 * to prevent user from closing FD without unmapping first
1475 */
hl_hw_block_mmap(struct hl_fpriv * hpriv,struct vm_area_struct * vma)1476 int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
1477 {
1478 struct hl_vm_hw_block_list_node *lnode;
1479 struct hl_device *hdev = hpriv->hdev;
1480 struct hl_ctx *ctx = hpriv->ctx;
1481 u32 block_id, block_size;
1482 int rc;
1483
1484 /* We use the page offset to hold the block id and thus we need to clear
1485 * it before doing the mmap itself
1486 */
1487 block_id = vma->vm_pgoff;
1488 vma->vm_pgoff = 0;
1489
1490 /* Driver only allows mapping of a complete HW block */
1491 block_size = vma->vm_end - vma->vm_start;
1492
1493 if (!access_ok((void __user *) (uintptr_t) vma->vm_start, block_size)) {
1494 dev_err(hdev->dev,
1495 "user pointer is invalid - 0x%lx\n",
1496 vma->vm_start);
1497
1498 return -EINVAL;
1499 }
1500
1501 lnode = kzalloc(sizeof(*lnode), GFP_KERNEL);
1502 if (!lnode)
1503 return -ENOMEM;
1504
1505 rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
1506 if (rc) {
1507 kfree(lnode);
1508 return rc;
1509 }
1510
1511 hl_ctx_get(ctx);
1512
1513 lnode->ctx = ctx;
1514 lnode->vaddr = vma->vm_start;
1515 lnode->block_size = block_size;
1516 lnode->mapped_size = lnode->block_size;
1517 lnode->id = block_id;
1518
1519 vma->vm_private_data = lnode;
1520 vma->vm_ops = &hw_block_vm_ops;
1521
1522 mutex_lock(&ctx->hw_block_list_lock);
1523 list_add_tail(&lnode->node, &ctx->hw_block_mem_list);
1524 mutex_unlock(&ctx->hw_block_list_lock);
1525
1526 vma->vm_pgoff = block_id;
1527
1528 return 0;
1529 }
1530
set_dma_sg(struct scatterlist * sg,u64 bar_address,u64 chunk_size,struct device * dev,enum dma_data_direction dir)1531 static int set_dma_sg(struct scatterlist *sg, u64 bar_address, u64 chunk_size,
1532 struct device *dev, enum dma_data_direction dir)
1533 {
1534 dma_addr_t addr;
1535 int rc;
1536
1537 addr = dma_map_resource(dev, bar_address, chunk_size, dir,
1538 DMA_ATTR_SKIP_CPU_SYNC);
1539 rc = dma_mapping_error(dev, addr);
1540 if (rc)
1541 return rc;
1542
1543 sg_set_page(sg, NULL, chunk_size, 0);
1544 sg_dma_address(sg) = addr;
1545 sg_dma_len(sg) = chunk_size;
1546
1547 return 0;
1548 }
1549
alloc_sgt_from_device_pages(struct hl_device * hdev,u64 * pages,u64 npages,u64 page_size,u64 exported_size,struct device * dev,enum dma_data_direction dir)1550 static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 *pages, u64 npages,
1551 u64 page_size, u64 exported_size,
1552 struct device *dev, enum dma_data_direction dir)
1553 {
1554 u64 chunk_size, bar_address, dma_max_seg_size, cur_size_to_export, cur_npages;
1555 struct asic_fixed_properties *prop;
1556 int rc, i, j, nents, cur_page;
1557 struct scatterlist *sg;
1558 struct sg_table *sgt;
1559
1560 prop = &hdev->asic_prop;
1561
1562 dma_max_seg_size = dma_get_max_seg_size(dev);
1563
1564 /* We would like to align the max segment size to PAGE_SIZE, so the
1565 * SGL will contain aligned addresses that can be easily mapped to
1566 * an MMU
1567 */
1568 dma_max_seg_size = ALIGN_DOWN(dma_max_seg_size, PAGE_SIZE);
1569 if (dma_max_seg_size < PAGE_SIZE) {
1570 dev_err_ratelimited(hdev->dev,
1571 "dma_max_seg_size %llu can't be smaller than PAGE_SIZE\n",
1572 dma_max_seg_size);
1573 return ERR_PTR(-EINVAL);
1574 }
1575
1576 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
1577 if (!sgt)
1578 return ERR_PTR(-ENOMEM);
1579
1580 /* remove export size restrictions in case not explicitly defined */
1581 cur_size_to_export = exported_size ? exported_size : (npages * page_size);
1582
1583 /* If the size of each page is larger than the dma max segment size,
1584 * then we can't combine pages and the number of entries in the SGL
1585 * will just be the
1586 * <number of pages> * <chunks of max segment size in each page>
1587 */
1588 if (page_size > dma_max_seg_size) {
1589 /* we should limit number of pages according to the exported size */
1590 cur_npages = DIV_ROUND_UP_SECTOR_T(cur_size_to_export, page_size);
1591 nents = cur_npages * DIV_ROUND_UP_SECTOR_T(page_size, dma_max_seg_size);
1592 } else {
1593 cur_npages = npages;
1594
1595 /* Get number of non-contiguous chunks */
1596 for (i = 1, nents = 1, chunk_size = page_size ; i < cur_npages ; i++) {
1597 if (pages[i - 1] + page_size != pages[i] ||
1598 chunk_size + page_size > dma_max_seg_size) {
1599 nents++;
1600 chunk_size = page_size;
1601 continue;
1602 }
1603
1604 chunk_size += page_size;
1605 }
1606 }
1607
1608 rc = sg_alloc_table(sgt, nents, GFP_KERNEL | __GFP_ZERO);
1609 if (rc)
1610 goto error_free;
1611
1612 cur_page = 0;
1613
1614 if (page_size > dma_max_seg_size) {
1615 u64 size_left, cur_device_address = 0;
1616
1617 size_left = page_size;
1618
1619 /* Need to split each page into the number of chunks of
1620 * dma_max_seg_size
1621 */
1622 for_each_sgtable_dma_sg(sgt, sg, i) {
1623 if (size_left == page_size)
1624 cur_device_address =
1625 pages[cur_page] - prop->dram_base_address;
1626 else
1627 cur_device_address += dma_max_seg_size;
1628
1629 /* make sure not to export over exported size */
1630 chunk_size = min3(size_left, dma_max_seg_size, cur_size_to_export);
1631
1632 bar_address = hdev->dram_pci_bar_start + cur_device_address;
1633
1634 rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
1635 if (rc)
1636 goto error_unmap;
1637
1638 cur_size_to_export -= chunk_size;
1639
1640 if (size_left > dma_max_seg_size) {
1641 size_left -= dma_max_seg_size;
1642 } else {
1643 cur_page++;
1644 size_left = page_size;
1645 }
1646 }
1647 } else {
1648 /* Merge pages and put them into the scatterlist */
1649 for_each_sgtable_dma_sg(sgt, sg, i) {
1650 chunk_size = page_size;
1651 for (j = cur_page + 1 ; j < cur_npages ; j++) {
1652 if (pages[j - 1] + page_size != pages[j] ||
1653 chunk_size + page_size > dma_max_seg_size)
1654 break;
1655
1656 chunk_size += page_size;
1657 }
1658
1659 bar_address = hdev->dram_pci_bar_start +
1660 (pages[cur_page] - prop->dram_base_address);
1661
1662 /* make sure not to export over exported size */
1663 chunk_size = min(chunk_size, cur_size_to_export);
1664 rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
1665 if (rc)
1666 goto error_unmap;
1667
1668 cur_size_to_export -= chunk_size;
1669 cur_page = j;
1670 }
1671 }
1672
1673 /* Because we are not going to include a CPU list we want to have some
1674 * chance that other users will detect this by setting the orig_nents
1675 * to 0 and using only nents (length of DMA list) when going over the
1676 * sgl
1677 */
1678 sgt->orig_nents = 0;
1679
1680 return sgt;
1681
1682 error_unmap:
1683 for_each_sgtable_dma_sg(sgt, sg, i) {
1684 if (!sg_dma_len(sg))
1685 continue;
1686
1687 dma_unmap_resource(dev, sg_dma_address(sg),
1688 sg_dma_len(sg), dir,
1689 DMA_ATTR_SKIP_CPU_SYNC);
1690 }
1691
1692 sg_free_table(sgt);
1693
1694 error_free:
1695 kfree(sgt);
1696 return ERR_PTR(rc);
1697 }
1698
hl_dmabuf_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)1699 static int hl_dmabuf_attach(struct dma_buf *dmabuf,
1700 struct dma_buf_attachment *attachment)
1701 {
1702 struct hl_dmabuf_priv *hl_dmabuf;
1703 struct hl_device *hdev;
1704 int rc;
1705
1706 hl_dmabuf = dmabuf->priv;
1707 hdev = hl_dmabuf->ctx->hdev;
1708
1709 rc = pci_p2pdma_distance(hdev->pdev, attachment->dev, true);
1710
1711 if (rc < 0)
1712 attachment->peer2peer = false;
1713 return 0;
1714 }
1715
hl_map_dmabuf(struct dma_buf_attachment * attachment,enum dma_data_direction dir)1716 static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment,
1717 enum dma_data_direction dir)
1718 {
1719 struct dma_buf *dma_buf = attachment->dmabuf;
1720 struct hl_vm_phys_pg_pack *phys_pg_pack;
1721 struct hl_dmabuf_priv *hl_dmabuf;
1722 struct hl_device *hdev;
1723 struct sg_table *sgt;
1724
1725 hl_dmabuf = dma_buf->priv;
1726 hdev = hl_dmabuf->ctx->hdev;
1727 phys_pg_pack = hl_dmabuf->phys_pg_pack;
1728
1729 if (!attachment->peer2peer) {
1730 dev_dbg(hdev->dev, "Failed to map dmabuf because p2p is disabled\n");
1731 return ERR_PTR(-EPERM);
1732 }
1733
1734 if (phys_pg_pack)
1735 sgt = alloc_sgt_from_device_pages(hdev,
1736 phys_pg_pack->pages,
1737 phys_pg_pack->npages,
1738 phys_pg_pack->page_size,
1739 phys_pg_pack->exported_size,
1740 attachment->dev,
1741 dir);
1742 else
1743 sgt = alloc_sgt_from_device_pages(hdev,
1744 &hl_dmabuf->device_address,
1745 1,
1746 hl_dmabuf->dmabuf->size,
1747 0,
1748 attachment->dev,
1749 dir);
1750
1751 if (IS_ERR(sgt))
1752 dev_err(hdev->dev, "failed (%ld) to initialize sgt for dmabuf\n", PTR_ERR(sgt));
1753
1754 return sgt;
1755 }
1756
hl_unmap_dmabuf(struct dma_buf_attachment * attachment,struct sg_table * sgt,enum dma_data_direction dir)1757 static void hl_unmap_dmabuf(struct dma_buf_attachment *attachment,
1758 struct sg_table *sgt,
1759 enum dma_data_direction dir)
1760 {
1761 struct scatterlist *sg;
1762 int i;
1763
1764 /* The memory behind the dma-buf has *always* resided on the device itself, i.e. it lives
1765 * only in the 'device' domain (after all, it maps a PCI bar address which points to the
1766 * device memory).
1767 *
1768 * Therefore, it was never in the 'CPU' domain and hence, there is no need to perform
1769 * a sync of the memory to the CPU's cache, as it never resided inside that cache.
1770 */
1771 for_each_sgtable_dma_sg(sgt, sg, i)
1772 dma_unmap_resource(attachment->dev, sg_dma_address(sg),
1773 sg_dma_len(sg), dir,
1774 DMA_ATTR_SKIP_CPU_SYNC);
1775
1776 /* Need to restore orig_nents because sg_free_table use that field */
1777 sgt->orig_nents = sgt->nents;
1778 sg_free_table(sgt);
1779 kfree(sgt);
1780 }
1781
hl_release_dmabuf(struct dma_buf * dmabuf)1782 static void hl_release_dmabuf(struct dma_buf *dmabuf)
1783 {
1784 struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
1785 struct hl_ctx *ctx;
1786
1787 if (!hl_dmabuf)
1788 return;
1789
1790 ctx = hl_dmabuf->ctx;
1791
1792 if (hl_dmabuf->memhash_hnode) {
1793 mutex_lock(&ctx->mem_hash_lock);
1794 hl_dmabuf->memhash_hnode->export_cnt--;
1795 mutex_unlock(&ctx->mem_hash_lock);
1796 }
1797
1798 hl_ctx_put(ctx);
1799 kfree(hl_dmabuf);
1800 }
1801
1802 static const struct dma_buf_ops habanalabs_dmabuf_ops = {
1803 .attach = hl_dmabuf_attach,
1804 .map_dma_buf = hl_map_dmabuf,
1805 .unmap_dma_buf = hl_unmap_dmabuf,
1806 .release = hl_release_dmabuf,
1807 };
1808
export_dmabuf(struct hl_ctx * ctx,struct hl_dmabuf_priv * hl_dmabuf,u64 total_size,int flags,int * dmabuf_fd)1809 static int export_dmabuf(struct hl_ctx *ctx,
1810 struct hl_dmabuf_priv *hl_dmabuf,
1811 u64 total_size, int flags, int *dmabuf_fd)
1812 {
1813 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1814 struct hl_device *hdev = ctx->hdev;
1815 int rc, fd;
1816
1817 exp_info.ops = &habanalabs_dmabuf_ops;
1818 exp_info.size = total_size;
1819 exp_info.flags = flags;
1820 exp_info.priv = hl_dmabuf;
1821
1822 hl_dmabuf->dmabuf = dma_buf_export(&exp_info);
1823 if (IS_ERR(hl_dmabuf->dmabuf)) {
1824 dev_err(hdev->dev, "failed to export dma-buf\n");
1825 return PTR_ERR(hl_dmabuf->dmabuf);
1826 }
1827
1828 fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
1829 if (fd < 0) {
1830 dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
1831 rc = fd;
1832 goto err_dma_buf_put;
1833 }
1834
1835 hl_dmabuf->ctx = ctx;
1836 hl_ctx_get(hl_dmabuf->ctx);
1837
1838 *dmabuf_fd = fd;
1839
1840 return 0;
1841
1842 err_dma_buf_put:
1843 hl_dmabuf->dmabuf->priv = NULL;
1844 dma_buf_put(hl_dmabuf->dmabuf);
1845 return rc;
1846 }
1847
validate_export_params_common(struct hl_device * hdev,u64 device_addr,u64 size)1848 static int validate_export_params_common(struct hl_device *hdev, u64 device_addr, u64 size)
1849 {
1850 if (!IS_ALIGNED(device_addr, PAGE_SIZE)) {
1851 dev_dbg(hdev->dev,
1852 "exported device memory address 0x%llx should be aligned to 0x%lx\n",
1853 device_addr, PAGE_SIZE);
1854 return -EINVAL;
1855 }
1856
1857 if (size < PAGE_SIZE) {
1858 dev_dbg(hdev->dev,
1859 "exported device memory size %llu should be equal to or greater than %lu\n",
1860 size, PAGE_SIZE);
1861 return -EINVAL;
1862 }
1863
1864 return 0;
1865 }
1866
validate_export_params_no_mmu(struct hl_device * hdev,u64 device_addr,u64 size)1867 static int validate_export_params_no_mmu(struct hl_device *hdev, u64 device_addr, u64 size)
1868 {
1869 struct asic_fixed_properties *prop = &hdev->asic_prop;
1870 u64 bar_address;
1871 int rc;
1872
1873 rc = validate_export_params_common(hdev, device_addr, size);
1874 if (rc)
1875 return rc;
1876
1877 if (device_addr < prop->dram_user_base_address ||
1878 (device_addr + size) > prop->dram_end_address ||
1879 (device_addr + size) < device_addr) {
1880 dev_dbg(hdev->dev,
1881 "DRAM memory range 0x%llx (+0x%llx) is outside of DRAM boundaries\n",
1882 device_addr, size);
1883 return -EINVAL;
1884 }
1885
1886 bar_address = hdev->dram_pci_bar_start + (device_addr - prop->dram_base_address);
1887
1888 if ((bar_address + size) > (hdev->dram_pci_bar_start + prop->dram_pci_bar_size) ||
1889 (bar_address + size) < bar_address) {
1890 dev_dbg(hdev->dev,
1891 "DRAM memory range 0x%llx (+0x%llx) is outside of PCI BAR boundaries\n",
1892 device_addr, size);
1893 return -EINVAL;
1894 }
1895
1896 return 0;
1897 }
1898
validate_export_params(struct hl_device * hdev,u64 device_addr,u64 size,u64 offset,struct hl_vm_phys_pg_pack * phys_pg_pack)1899 static int validate_export_params(struct hl_device *hdev, u64 device_addr, u64 size, u64 offset,
1900 struct hl_vm_phys_pg_pack *phys_pg_pack)
1901 {
1902 struct asic_fixed_properties *prop = &hdev->asic_prop;
1903 u64 bar_address;
1904 int i, rc;
1905
1906 rc = validate_export_params_common(hdev, device_addr, size);
1907 if (rc)
1908 return rc;
1909
1910 if ((offset + size) > phys_pg_pack->total_size) {
1911 dev_dbg(hdev->dev, "offset %#llx and size %#llx exceed total map size %#llx\n",
1912 offset, size, phys_pg_pack->total_size);
1913 return -EINVAL;
1914 }
1915
1916 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
1917
1918 bar_address = hdev->dram_pci_bar_start +
1919 (phys_pg_pack->pages[i] - prop->dram_base_address);
1920
1921 if ((bar_address + phys_pg_pack->page_size) >
1922 (hdev->dram_pci_bar_start + prop->dram_pci_bar_size) ||
1923 (bar_address + phys_pg_pack->page_size) < bar_address) {
1924 dev_dbg(hdev->dev,
1925 "DRAM memory range 0x%llx (+0x%x) is outside of PCI BAR boundaries\n",
1926 phys_pg_pack->pages[i],
1927 phys_pg_pack->page_size);
1928
1929 return -EINVAL;
1930 }
1931 }
1932
1933 return 0;
1934 }
1935
memhash_node_export_get(struct hl_ctx * ctx,u64 addr)1936 static struct hl_vm_hash_node *memhash_node_export_get(struct hl_ctx *ctx, u64 addr)
1937 {
1938 struct hl_device *hdev = ctx->hdev;
1939 struct hl_vm_hash_node *hnode;
1940
1941 /* get the memory handle */
1942 mutex_lock(&ctx->mem_hash_lock);
1943 hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)addr)
1944 if (addr == hnode->vaddr)
1945 break;
1946
1947 if (!hnode) {
1948 mutex_unlock(&ctx->mem_hash_lock);
1949 dev_dbg(hdev->dev, "map address %#llx not found\n", addr);
1950 return ERR_PTR(-EINVAL);
1951 }
1952
1953 if (upper_32_bits(hnode->handle)) {
1954 mutex_unlock(&ctx->mem_hash_lock);
1955 dev_dbg(hdev->dev, "invalid handle %#llx for map address %#llx\n",
1956 hnode->handle, addr);
1957 return ERR_PTR(-EINVAL);
1958 }
1959
1960 /*
1961 * node found, increase export count so this memory cannot be unmapped
1962 * and the hash node cannot be deleted.
1963 */
1964 hnode->export_cnt++;
1965 mutex_unlock(&ctx->mem_hash_lock);
1966
1967 return hnode;
1968 }
1969
memhash_node_export_put(struct hl_ctx * ctx,struct hl_vm_hash_node * hnode)1970 static void memhash_node_export_put(struct hl_ctx *ctx, struct hl_vm_hash_node *hnode)
1971 {
1972 mutex_lock(&ctx->mem_hash_lock);
1973 hnode->export_cnt--;
1974 mutex_unlock(&ctx->mem_hash_lock);
1975 }
1976
get_phys_pg_pack_from_hash_node(struct hl_device * hdev,struct hl_vm_hash_node * hnode)1977 static struct hl_vm_phys_pg_pack *get_phys_pg_pack_from_hash_node(struct hl_device *hdev,
1978 struct hl_vm_hash_node *hnode)
1979 {
1980 struct hl_vm_phys_pg_pack *phys_pg_pack;
1981 struct hl_vm *vm = &hdev->vm;
1982
1983 spin_lock(&vm->idr_lock);
1984 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, (u32) hnode->handle);
1985 if (!phys_pg_pack) {
1986 spin_unlock(&vm->idr_lock);
1987 dev_dbg(hdev->dev, "no match for handle 0x%x\n", (u32) hnode->handle);
1988 return ERR_PTR(-EINVAL);
1989 }
1990
1991 spin_unlock(&vm->idr_lock);
1992
1993 if (phys_pg_pack->vm_type != VM_TYPE_PHYS_PACK) {
1994 dev_dbg(hdev->dev, "handle 0x%llx does not represent DRAM memory\n", hnode->handle);
1995 return ERR_PTR(-EINVAL);
1996 }
1997
1998 return phys_pg_pack;
1999 }
2000
2001 /**
2002 * export_dmabuf_from_addr() - export a dma-buf object for the given memory
2003 * address and size.
2004 * @ctx: pointer to the context structure.
2005 * @addr: device address.
2006 * @size: size of device memory to export.
2007 * @offset: the offset into the buffer from which to start exporting
2008 * @flags: DMA-BUF file/FD flags.
2009 * @dmabuf_fd: pointer to result FD that represents the dma-buf object.
2010 *
2011 * Create and export a dma-buf object for an existing memory allocation inside
2012 * the device memory, and return a FD which is associated with the dma-buf
2013 * object.
2014 *
2015 * Return: 0 on success, non-zero for failure.
2016 */
export_dmabuf_from_addr(struct hl_ctx * ctx,u64 addr,u64 size,u64 offset,int flags,int * dmabuf_fd)2017 static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 addr, u64 size, u64 offset,
2018 int flags, int *dmabuf_fd)
2019 {
2020 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
2021 struct hl_vm_hash_node *hnode = NULL;
2022 struct asic_fixed_properties *prop;
2023 struct hl_dmabuf_priv *hl_dmabuf;
2024 struct hl_device *hdev;
2025 u64 export_addr;
2026 int rc;
2027
2028 hdev = ctx->hdev;
2029 prop = &hdev->asic_prop;
2030
2031 /* offset must be 0 in devices without virtual memory support */
2032 if (!prop->dram_supports_virtual_memory && offset) {
2033 dev_dbg(hdev->dev, "offset is not allowed in device without virtual memory\n");
2034 return -EINVAL;
2035 }
2036
2037 export_addr = addr + offset;
2038
2039 hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
2040 if (!hl_dmabuf)
2041 return -ENOMEM;
2042
2043 if (prop->dram_supports_virtual_memory) {
2044 hnode = memhash_node_export_get(ctx, addr);
2045 if (IS_ERR(hnode)) {
2046 rc = PTR_ERR(hnode);
2047 goto err_free_dmabuf_wrapper;
2048 }
2049 phys_pg_pack = get_phys_pg_pack_from_hash_node(hdev, hnode);
2050 if (IS_ERR(phys_pg_pack)) {
2051 rc = PTR_ERR(phys_pg_pack);
2052 goto dec_memhash_export_cnt;
2053 }
2054 rc = validate_export_params(hdev, export_addr, size, offset, phys_pg_pack);
2055 if (rc)
2056 goto dec_memhash_export_cnt;
2057
2058 phys_pg_pack->exported_size = size;
2059 hl_dmabuf->phys_pg_pack = phys_pg_pack;
2060 hl_dmabuf->memhash_hnode = hnode;
2061 } else {
2062 rc = validate_export_params_no_mmu(hdev, export_addr, size);
2063 if (rc)
2064 goto err_free_dmabuf_wrapper;
2065 }
2066
2067 hl_dmabuf->device_address = export_addr;
2068
2069 rc = export_dmabuf(ctx, hl_dmabuf, size, flags, dmabuf_fd);
2070 if (rc)
2071 goto dec_memhash_export_cnt;
2072
2073 return 0;
2074
2075 dec_memhash_export_cnt:
2076 if (prop->dram_supports_virtual_memory)
2077 memhash_node_export_put(ctx, hnode);
2078 err_free_dmabuf_wrapper:
2079 kfree(hl_dmabuf);
2080 return rc;
2081 }
2082
mem_ioctl_no_mmu(struct hl_fpriv * hpriv,union hl_mem_args * args)2083 static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
2084 {
2085 struct hl_device *hdev = hpriv->hdev;
2086 u64 block_handle, device_addr = 0;
2087 struct hl_ctx *ctx = hpriv->ctx;
2088 u32 handle = 0, block_size;
2089 int rc;
2090
2091 switch (args->in.op) {
2092 case HL_MEM_OP_ALLOC:
2093 if (args->in.alloc.mem_size == 0) {
2094 dev_err(hdev->dev, "alloc size must be larger than 0\n");
2095 rc = -EINVAL;
2096 goto out;
2097 }
2098
2099 /* Force contiguous as there are no real MMU
2100 * translations to overcome physical memory gaps
2101 */
2102 args->in.flags |= HL_MEM_CONTIGUOUS;
2103 rc = alloc_device_memory(ctx, &args->in, &handle);
2104
2105 memset(args, 0, sizeof(*args));
2106 args->out.handle = (__u64) handle;
2107 break;
2108
2109 case HL_MEM_OP_FREE:
2110 rc = free_device_memory(ctx, &args->in);
2111 break;
2112
2113 case HL_MEM_OP_MAP:
2114 if (args->in.flags & HL_MEM_USERPTR) {
2115 dev_err(hdev->dev, "Failed to map host memory when MMU is disabled\n");
2116 rc = -EPERM;
2117 } else {
2118 rc = get_paddr_from_handle(ctx, &args->in, &device_addr);
2119 memset(args, 0, sizeof(*args));
2120 args->out.device_virt_addr = device_addr;
2121 }
2122
2123 break;
2124
2125 case HL_MEM_OP_UNMAP:
2126 rc = 0;
2127 break;
2128
2129 case HL_MEM_OP_MAP_BLOCK:
2130 rc = map_block(hdev, args->in.map_block.block_addr, &block_handle, &block_size);
2131 args->out.block_handle = block_handle;
2132 args->out.block_size = block_size;
2133 break;
2134
2135 case HL_MEM_OP_EXPORT_DMABUF_FD:
2136 dev_err(hdev->dev, "Failed to export dma-buf object when MMU is disabled\n");
2137 rc = -EPERM;
2138 break;
2139
2140 case HL_MEM_OP_TS_ALLOC:
2141 rc = allocate_timestamps_buffers(hpriv, &args->in, &args->out.handle);
2142 break;
2143 default:
2144 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
2145 rc = -EINVAL;
2146 break;
2147 }
2148
2149 out:
2150 return rc;
2151 }
2152
ts_buff_release(struct hl_mmap_mem_buf * buf)2153 static void ts_buff_release(struct hl_mmap_mem_buf *buf)
2154 {
2155 struct hl_ts_buff *ts_buff = buf->private;
2156
2157 vfree(ts_buff->kernel_buff_address);
2158 vfree(ts_buff->user_buff_address);
2159 kfree(ts_buff);
2160 }
2161
hl_ts_mmap(struct hl_mmap_mem_buf * buf,struct vm_area_struct * vma,void * args)2162 static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, void *args)
2163 {
2164 struct hl_ts_buff *ts_buff = buf->private;
2165
2166 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE);
2167 return remap_vmalloc_range(vma, ts_buff->user_buff_address, 0);
2168 }
2169
hl_ts_alloc_buf(struct hl_mmap_mem_buf * buf,gfp_t gfp,void * args)2170 static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
2171 {
2172 struct hl_ts_buff *ts_buff = NULL;
2173 u32 num_elements;
2174 size_t size;
2175 void *p;
2176
2177 num_elements = *(u32 *)args;
2178
2179 ts_buff = kzalloc(sizeof(*ts_buff), gfp);
2180 if (!ts_buff)
2181 return -ENOMEM;
2182
2183 /* Allocate the user buffer */
2184 size = num_elements * sizeof(u64);
2185 p = vmalloc_user(size);
2186 if (!p)
2187 goto free_mem;
2188
2189 ts_buff->user_buff_address = p;
2190 buf->mappable_size = size;
2191
2192 /* Allocate the internal kernel buffer */
2193 size = num_elements * sizeof(struct hl_user_pending_interrupt);
2194 p = vzalloc(size);
2195 if (!p)
2196 goto free_user_buff;
2197
2198 ts_buff->kernel_buff_address = p;
2199 ts_buff->kernel_buff_size = size;
2200
2201 buf->private = ts_buff;
2202
2203 return 0;
2204
2205 free_user_buff:
2206 vfree(ts_buff->user_buff_address);
2207 free_mem:
2208 kfree(ts_buff);
2209 return -ENOMEM;
2210 }
2211
2212 static struct hl_mmap_mem_buf_behavior hl_ts_behavior = {
2213 .topic = "TS",
2214 .mem_id = HL_MMAP_TYPE_TS_BUFF,
2215 .mmap = hl_ts_mmap,
2216 .alloc = hl_ts_alloc_buf,
2217 .release = ts_buff_release,
2218 };
2219
2220 /**
2221 * allocate_timestamps_buffers() - allocate timestamps buffers
2222 * This function will allocate ts buffer that will later on be mapped to the user
2223 * in order to be able to read the timestamp.
2224 * in additon it'll allocate an extra buffer for registration management.
2225 * since we cannot fail during registration for out-of-memory situation, so
2226 * we'll prepare a pool which will be used as user interrupt nodes and instead
2227 * of dynamically allocating nodes while registration we'll pick the node from
2228 * this pool. in addtion it'll add node to the mapping hash which will be used
2229 * to map user ts buffer to the internal kernel ts buffer.
2230 * @hpriv: pointer to the private data of the fd
2231 * @args: ioctl input
2232 * @handle: user timestamp buffer handle as an output
2233 */
allocate_timestamps_buffers(struct hl_fpriv * hpriv,struct hl_mem_in * args,u64 * handle)2234 static int allocate_timestamps_buffers(struct hl_fpriv *hpriv, struct hl_mem_in *args, u64 *handle)
2235 {
2236 struct hl_mem_mgr *mmg = &hpriv->mem_mgr;
2237 struct hl_mmap_mem_buf *buf;
2238
2239 if (args->num_of_elements > TS_MAX_ELEMENTS_NUM) {
2240 dev_err(mmg->dev, "Num of elements exceeds Max allowed number (0x%x > 0x%x)\n",
2241 args->num_of_elements, TS_MAX_ELEMENTS_NUM);
2242 return -EINVAL;
2243 }
2244
2245 buf = hl_mmap_mem_buf_alloc(mmg, &hl_ts_behavior, GFP_KERNEL, &args->num_of_elements);
2246 if (!buf)
2247 return -ENOMEM;
2248
2249 *handle = buf->handle;
2250
2251 return 0;
2252 }
2253
hl_mem_ioctl(struct hl_fpriv * hpriv,void * data)2254 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
2255 {
2256 enum hl_device_status status;
2257 union hl_mem_args *args = data;
2258 struct hl_device *hdev = hpriv->hdev;
2259 struct hl_ctx *ctx = hpriv->ctx;
2260 u64 block_handle, device_addr = 0;
2261 u32 handle = 0, block_size;
2262 int rc, dmabuf_fd = -EBADF;
2263
2264 if (!hl_device_operational(hdev, &status)) {
2265 dev_dbg_ratelimited(hdev->dev,
2266 "Device is %s. Can't execute MEMORY IOCTL\n",
2267 hdev->status[status]);
2268 return -EBUSY;
2269 }
2270
2271 if (!hdev->mmu_enable)
2272 return mem_ioctl_no_mmu(hpriv, args);
2273
2274 switch (args->in.op) {
2275 case HL_MEM_OP_ALLOC:
2276 if (args->in.alloc.mem_size == 0) {
2277 dev_err(hdev->dev,
2278 "alloc size must be larger than 0\n");
2279 rc = -EINVAL;
2280 goto out;
2281 }
2282
2283 /* If DRAM does not support virtual memory the driver won't
2284 * handle the allocation/freeing of that memory. However, for
2285 * system administration/monitoring purposes, the driver will
2286 * keep track of the amount of DRAM memory that is allocated
2287 * and freed by the user. Because this code totally relies on
2288 * the user's input, the driver can't ensure the validity
2289 * of this accounting.
2290 */
2291 if (!hdev->asic_prop.dram_supports_virtual_memory) {
2292 atomic64_add(args->in.alloc.mem_size,
2293 &ctx->dram_phys_mem);
2294 atomic64_add(args->in.alloc.mem_size,
2295 &hdev->dram_used_mem);
2296
2297 dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
2298 rc = 0;
2299
2300 memset(args, 0, sizeof(*args));
2301 args->out.handle = 0;
2302 goto out;
2303 }
2304
2305 rc = alloc_device_memory(ctx, &args->in, &handle);
2306
2307 memset(args, 0, sizeof(*args));
2308 args->out.handle = (__u64) handle;
2309 break;
2310
2311 case HL_MEM_OP_FREE:
2312 /* If DRAM does not support virtual memory the driver won't
2313 * handle the allocation/freeing of that memory. However, for
2314 * system administration/monitoring purposes, the driver will
2315 * keep track of the amount of DRAM memory that is allocated
2316 * and freed by the user. Because this code totally relies on
2317 * the user's input, the driver can't ensure the validity
2318 * of this accounting.
2319 */
2320 if (!hdev->asic_prop.dram_supports_virtual_memory) {
2321 atomic64_sub(args->in.alloc.mem_size,
2322 &ctx->dram_phys_mem);
2323 atomic64_sub(args->in.alloc.mem_size,
2324 &hdev->dram_used_mem);
2325
2326 dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
2327 rc = 0;
2328
2329 goto out;
2330 }
2331
2332 rc = free_device_memory(ctx, &args->in);
2333 break;
2334
2335 case HL_MEM_OP_MAP:
2336 rc = map_device_va(ctx, &args->in, &device_addr);
2337
2338 memset(args, 0, sizeof(*args));
2339 args->out.device_virt_addr = device_addr;
2340 break;
2341
2342 case HL_MEM_OP_UNMAP:
2343 rc = unmap_device_va(ctx, &args->in, false);
2344 break;
2345
2346 case HL_MEM_OP_MAP_BLOCK:
2347 rc = map_block(hdev, args->in.map_block.block_addr,
2348 &block_handle, &block_size);
2349 args->out.block_handle = block_handle;
2350 args->out.block_size = block_size;
2351 break;
2352
2353 case HL_MEM_OP_EXPORT_DMABUF_FD:
2354 rc = export_dmabuf_from_addr(ctx,
2355 args->in.export_dmabuf_fd.addr,
2356 args->in.export_dmabuf_fd.mem_size,
2357 args->in.export_dmabuf_fd.offset,
2358 args->in.flags,
2359 &dmabuf_fd);
2360 memset(args, 0, sizeof(*args));
2361 args->out.fd = dmabuf_fd;
2362 break;
2363
2364 case HL_MEM_OP_TS_ALLOC:
2365 rc = allocate_timestamps_buffers(hpriv, &args->in, &args->out.handle);
2366 break;
2367 default:
2368 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
2369 rc = -EINVAL;
2370 break;
2371 }
2372
2373 out:
2374 return rc;
2375 }
2376
get_user_memory(struct hl_device * hdev,u64 addr,u64 size,u32 npages,u64 start,u32 offset,struct hl_userptr * userptr)2377 static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
2378 u32 npages, u64 start, u32 offset,
2379 struct hl_userptr *userptr)
2380 {
2381 int rc;
2382
2383 if (!access_ok((void __user *) (uintptr_t) addr, size)) {
2384 dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
2385 return -EFAULT;
2386 }
2387
2388 userptr->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
2389 if (!userptr->pages)
2390 return -ENOMEM;
2391
2392 rc = pin_user_pages_fast(start, npages, FOLL_WRITE | FOLL_LONGTERM,
2393 userptr->pages);
2394
2395 if (rc != npages) {
2396 dev_err(hdev->dev,
2397 "Failed (%d) to pin host memory with user ptr 0x%llx, size 0x%llx, npages %d\n",
2398 rc, addr, size, npages);
2399 if (rc < 0)
2400 goto destroy_pages;
2401 npages = rc;
2402 rc = -EFAULT;
2403 goto put_pages;
2404 }
2405 userptr->npages = npages;
2406
2407 rc = sg_alloc_table_from_pages(userptr->sgt,
2408 userptr->pages,
2409 npages, offset, size, GFP_KERNEL);
2410 if (rc < 0) {
2411 dev_err(hdev->dev, "failed to create SG table from pages\n");
2412 goto put_pages;
2413 }
2414
2415 return 0;
2416
2417 put_pages:
2418 unpin_user_pages(userptr->pages, npages);
2419 destroy_pages:
2420 kvfree(userptr->pages);
2421 return rc;
2422 }
2423
2424 /**
2425 * hl_pin_host_memory() - pins a chunk of host memory.
2426 * @hdev: pointer to the habanalabs device structure.
2427 * @addr: the host virtual address of the memory area.
2428 * @size: the size of the memory area.
2429 * @userptr: pointer to hl_userptr structure.
2430 *
2431 * This function does the following:
2432 * - Pins the physical pages.
2433 * - Create an SG list from those pages.
2434 */
hl_pin_host_memory(struct hl_device * hdev,u64 addr,u64 size,struct hl_userptr * userptr)2435 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
2436 struct hl_userptr *userptr)
2437 {
2438 u64 start, end;
2439 u32 npages, offset;
2440 int rc;
2441
2442 if (!size) {
2443 dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
2444 return -EINVAL;
2445 }
2446
2447 /*
2448 * If the combination of the address and size requested for this memory
2449 * region causes an integer overflow, return error.
2450 */
2451 if (((addr + size) < addr) ||
2452 PAGE_ALIGN(addr + size) < (addr + size)) {
2453 dev_err(hdev->dev,
2454 "user pointer 0x%llx + %llu causes integer overflow\n",
2455 addr, size);
2456 return -EINVAL;
2457 }
2458
2459 userptr->pid = current->pid;
2460 userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_KERNEL);
2461 if (!userptr->sgt)
2462 return -ENOMEM;
2463
2464 start = addr & PAGE_MASK;
2465 offset = addr & ~PAGE_MASK;
2466 end = PAGE_ALIGN(addr + size);
2467 npages = (end - start) >> PAGE_SHIFT;
2468
2469 userptr->size = size;
2470 userptr->addr = addr;
2471 userptr->dma_mapped = false;
2472 INIT_LIST_HEAD(&userptr->job_node);
2473
2474 rc = get_user_memory(hdev, addr, size, npages, start, offset,
2475 userptr);
2476 if (rc) {
2477 dev_err(hdev->dev,
2478 "failed to get user memory for address 0x%llx\n",
2479 addr);
2480 goto free_sgt;
2481 }
2482
2483 hl_debugfs_add_userptr(hdev, userptr);
2484
2485 return 0;
2486
2487 free_sgt:
2488 kfree(userptr->sgt);
2489 return rc;
2490 }
2491
2492 /*
2493 * hl_unpin_host_memory - unpins a chunk of host memory.
2494 * @hdev: pointer to the habanalabs device structure
2495 * @userptr: pointer to hl_userptr structure
2496 *
2497 * This function does the following:
2498 * - Unpins the physical pages related to the host memory
2499 * - Free the SG list
2500 */
hl_unpin_host_memory(struct hl_device * hdev,struct hl_userptr * userptr)2501 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
2502 {
2503 hl_debugfs_remove_userptr(hdev, userptr);
2504
2505 if (userptr->dma_mapped)
2506 hdev->asic_funcs->hl_dma_unmap_sgtable(hdev, userptr->sgt, userptr->dir);
2507
2508 unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
2509 kvfree(userptr->pages);
2510
2511 list_del(&userptr->job_node);
2512
2513 sg_free_table(userptr->sgt);
2514 kfree(userptr->sgt);
2515 }
2516
2517 /**
2518 * hl_userptr_delete_list() - clear userptr list.
2519 * @hdev: pointer to the habanalabs device structure.
2520 * @userptr_list: pointer to the list to clear.
2521 *
2522 * This function does the following:
2523 * - Iterates over the list and unpins the host memory and frees the userptr
2524 * structure.
2525 */
hl_userptr_delete_list(struct hl_device * hdev,struct list_head * userptr_list)2526 void hl_userptr_delete_list(struct hl_device *hdev,
2527 struct list_head *userptr_list)
2528 {
2529 struct hl_userptr *userptr, *tmp;
2530
2531 list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
2532 hl_unpin_host_memory(hdev, userptr);
2533 kfree(userptr);
2534 }
2535
2536 INIT_LIST_HEAD(userptr_list);
2537 }
2538
2539 /**
2540 * hl_userptr_is_pinned() - returns whether the given userptr is pinned.
2541 * @hdev: pointer to the habanalabs device structure.
2542 * @addr: user address to check.
2543 * @size: user block size to check.
2544 * @userptr_list: pointer to the list to clear.
2545 * @userptr: pointer to userptr to check.
2546 *
2547 * This function does the following:
2548 * - Iterates over the list and checks if the given userptr is in it, means is
2549 * pinned. If so, returns true, otherwise returns false.
2550 */
hl_userptr_is_pinned(struct hl_device * hdev,u64 addr,u32 size,struct list_head * userptr_list,struct hl_userptr ** userptr)2551 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
2552 u32 size, struct list_head *userptr_list,
2553 struct hl_userptr **userptr)
2554 {
2555 list_for_each_entry((*userptr), userptr_list, job_node) {
2556 if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
2557 return true;
2558 }
2559
2560 return false;
2561 }
2562
2563 /**
2564 * va_range_init() - initialize virtual addresses range.
2565 * @hdev: pointer to the habanalabs device structure.
2566 * @va_ranges: pointer to va_ranges array.
2567 * @range_type: virtual address range type.
2568 * @start: range start address, inclusive.
2569 * @end: range end address, inclusive.
2570 * @page_size: page size for this va_range.
2571 *
2572 * This function does the following:
2573 * - Initializes the virtual addresses list of the given range with the given
2574 * addresses.
2575 */
va_range_init(struct hl_device * hdev,struct hl_va_range ** va_ranges,enum hl_va_range_type range_type,u64 start,u64 end,u32 page_size)2576 static int va_range_init(struct hl_device *hdev, struct hl_va_range **va_ranges,
2577 enum hl_va_range_type range_type, u64 start,
2578 u64 end, u32 page_size)
2579 {
2580 struct hl_va_range *va_range = va_ranges[range_type];
2581 int rc;
2582
2583 INIT_LIST_HEAD(&va_range->list);
2584
2585 /*
2586 * PAGE_SIZE alignment
2587 * it is the caller's responsibility to align the addresses if the
2588 * page size is not a power of 2
2589 */
2590
2591 if (is_power_of_2(page_size)) {
2592 start = round_up(start, page_size);
2593
2594 /*
2595 * The end of the range is inclusive, hence we need to align it
2596 * to the end of the last full page in the range. For example if
2597 * end = 0x3ff5 with page size 0x1000, we need to align it to
2598 * 0x2fff. The remaining 0xff5 bytes do not form a full page.
2599 */
2600 end = round_down(end + 1, page_size) - 1;
2601 }
2602
2603 if (start >= end) {
2604 dev_err(hdev->dev, "too small vm range for va list\n");
2605 return -EFAULT;
2606 }
2607
2608 rc = add_va_block(hdev, va_range, start, end);
2609
2610 if (rc) {
2611 dev_err(hdev->dev, "Failed to init host va list\n");
2612 return rc;
2613 }
2614
2615 va_range->start_addr = start;
2616 va_range->end_addr = end;
2617 va_range->page_size = page_size;
2618
2619 return 0;
2620 }
2621
2622 /**
2623 * va_range_fini() - clear a virtual addresses range.
2624 * @hdev: pointer to the habanalabs structure.
2625 * @va_range: pointer to virtual addresses range.
2626 *
2627 * This function does the following:
2628 * - Frees the virtual addresses block list and its lock.
2629 */
va_range_fini(struct hl_device * hdev,struct hl_va_range * va_range)2630 static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
2631 {
2632 mutex_lock(&va_range->lock);
2633 clear_va_list_locked(hdev, &va_range->list);
2634 mutex_unlock(&va_range->lock);
2635
2636 mutex_destroy(&va_range->lock);
2637 kfree(va_range);
2638 }
2639
2640 /**
2641 * vm_ctx_init_with_ranges() - initialize virtual memory for context.
2642 * @ctx: pointer to the habanalabs context structure.
2643 * @host_range_start: host virtual addresses range start.
2644 * @host_range_end: host virtual addresses range end.
2645 * @host_page_size: host page size.
2646 * @host_huge_range_start: host virtual addresses range start for memory
2647 * allocated with huge pages.
2648 * @host_huge_range_end: host virtual addresses range end for memory allocated
2649 * with huge pages.
2650 * @host_huge_page_size: host huge page size.
2651 * @dram_range_start: dram virtual addresses range start.
2652 * @dram_range_end: dram virtual addresses range end.
2653 * @dram_page_size: dram page size.
2654 *
2655 * This function initializes the following:
2656 * - MMU for context.
2657 * - Virtual address to area descriptor hashtable.
2658 * - Virtual block list of available virtual memory.
2659 */
vm_ctx_init_with_ranges(struct hl_ctx * ctx,u64 host_range_start,u64 host_range_end,u32 host_page_size,u64 host_huge_range_start,u64 host_huge_range_end,u32 host_huge_page_size,u64 dram_range_start,u64 dram_range_end,u32 dram_page_size)2660 static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
2661 u64 host_range_start,
2662 u64 host_range_end,
2663 u32 host_page_size,
2664 u64 host_huge_range_start,
2665 u64 host_huge_range_end,
2666 u32 host_huge_page_size,
2667 u64 dram_range_start,
2668 u64 dram_range_end,
2669 u32 dram_page_size)
2670 {
2671 struct hl_device *hdev = ctx->hdev;
2672 int i, rc;
2673
2674 for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) {
2675 ctx->va_range[i] =
2676 kzalloc(sizeof(struct hl_va_range), GFP_KERNEL);
2677 if (!ctx->va_range[i]) {
2678 rc = -ENOMEM;
2679 goto free_va_range;
2680 }
2681 }
2682
2683 rc = hl_mmu_ctx_init(ctx);
2684 if (rc) {
2685 dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
2686 goto free_va_range;
2687 }
2688
2689 mutex_init(&ctx->mem_hash_lock);
2690 hash_init(ctx->mem_hash);
2691
2692 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2693
2694 rc = va_range_init(hdev, ctx->va_range, HL_VA_RANGE_TYPE_HOST,
2695 host_range_start, host_range_end, host_page_size);
2696 if (rc) {
2697 dev_err(hdev->dev, "failed to init host vm range\n");
2698 goto mmu_ctx_fini;
2699 }
2700
2701 if (hdev->pmmu_huge_range) {
2702 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2703
2704 rc = va_range_init(hdev,
2705 ctx->va_range, HL_VA_RANGE_TYPE_HOST_HUGE,
2706 host_huge_range_start, host_huge_range_end,
2707 host_huge_page_size);
2708 if (rc) {
2709 dev_err(hdev->dev,
2710 "failed to init host huge vm range\n");
2711 goto clear_host_va_range;
2712 }
2713 } else {
2714 kfree(ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
2715 ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE] =
2716 ctx->va_range[HL_VA_RANGE_TYPE_HOST];
2717 }
2718
2719 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
2720
2721 rc = va_range_init(hdev, ctx->va_range, HL_VA_RANGE_TYPE_DRAM,
2722 dram_range_start, dram_range_end, dram_page_size);
2723 if (rc) {
2724 dev_err(hdev->dev, "failed to init dram vm range\n");
2725 goto clear_host_huge_va_range;
2726 }
2727
2728 hl_debugfs_add_ctx_mem_hash(hdev, ctx);
2729
2730 return 0;
2731
2732 clear_host_huge_va_range:
2733 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
2734
2735 if (hdev->pmmu_huge_range) {
2736 mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2737 clear_va_list_locked(hdev,
2738 &ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->list);
2739 mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2740 }
2741 clear_host_va_range:
2742 if (hdev->pmmu_huge_range)
2743 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2744 mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2745 clear_va_list_locked(hdev, &ctx->va_range[HL_VA_RANGE_TYPE_HOST]->list);
2746 mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2747 mmu_ctx_fini:
2748 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2749 mutex_destroy(&ctx->mem_hash_lock);
2750 hl_mmu_ctx_fini(ctx);
2751 free_va_range:
2752 for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++)
2753 kfree(ctx->va_range[i]);
2754
2755 return rc;
2756 }
2757
hl_vm_ctx_init(struct hl_ctx * ctx)2758 int hl_vm_ctx_init(struct hl_ctx *ctx)
2759 {
2760 struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
2761 u64 host_range_start, host_range_end, host_huge_range_start,
2762 host_huge_range_end, dram_range_start, dram_range_end;
2763 u32 host_page_size, host_huge_page_size, dram_page_size;
2764
2765 atomic64_set(&ctx->dram_phys_mem, 0);
2766
2767 /*
2768 * - If MMU is enabled, init the ranges as usual.
2769 * - If MMU is disabled, in case of host mapping, the returned address
2770 * is the given one.
2771 * In case of DRAM mapping, the returned address is the physical
2772 * address of the memory related to the given handle.
2773 */
2774 if (!ctx->hdev->mmu_enable)
2775 return 0;
2776
2777 dram_range_start = prop->dmmu.start_addr;
2778 dram_range_end = prop->dmmu.end_addr - 1;
2779 dram_page_size = prop->dram_page_size ?
2780 prop->dram_page_size : prop->dmmu.page_size;
2781 host_range_start = prop->pmmu.start_addr;
2782 host_range_end = prop->pmmu.end_addr - 1;
2783 host_page_size = prop->pmmu.page_size;
2784 host_huge_range_start = prop->pmmu_huge.start_addr;
2785 host_huge_range_end = prop->pmmu_huge.end_addr - 1;
2786 host_huge_page_size = prop->pmmu_huge.page_size;
2787
2788 return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
2789 host_page_size, host_huge_range_start,
2790 host_huge_range_end, host_huge_page_size,
2791 dram_range_start, dram_range_end, dram_page_size);
2792 }
2793
2794 /**
2795 * hl_vm_ctx_fini() - virtual memory teardown of context.
2796 * @ctx: pointer to the habanalabs context structure.
2797 *
2798 * This function perform teardown the following:
2799 * - Virtual block list of available virtual memory.
2800 * - Virtual address to area descriptor hashtable.
2801 * - MMU for context.
2802 *
2803 * In addition this function does the following:
2804 * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
2805 * hashtable should be empty as no valid mappings should exist at this
2806 * point.
2807 * - Frees any existing physical page list from the idr which relates to the
2808 * current context asid.
2809 * - This function checks the virtual block list for correctness. At this point
2810 * the list should contain one element which describes the whole virtual
2811 * memory range of the context. Otherwise, a warning is printed.
2812 */
hl_vm_ctx_fini(struct hl_ctx * ctx)2813 void hl_vm_ctx_fini(struct hl_ctx *ctx)
2814 {
2815 struct hl_vm_phys_pg_pack *phys_pg_list, *tmp_phys_node;
2816 struct hl_device *hdev = ctx->hdev;
2817 struct hl_vm_hash_node *hnode;
2818 struct hl_vm *vm = &hdev->vm;
2819 struct hlist_node *tmp_node;
2820 struct list_head free_list;
2821 struct hl_mem_in args;
2822 int i;
2823
2824 if (!hdev->mmu_enable)
2825 return;
2826
2827 hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
2828
2829 /*
2830 * Clearly something went wrong on hard reset so no point in printing
2831 * another side effect error
2832 */
2833 if (!hdev->reset_info.hard_reset_pending && !hash_empty(ctx->mem_hash))
2834 dev_dbg(hdev->dev,
2835 "user released device without removing its memory mappings\n");
2836
2837 hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
2838 dev_dbg(hdev->dev,
2839 "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
2840 hnode->vaddr, ctx->asid);
2841 args.unmap.device_virt_addr = hnode->vaddr;
2842 unmap_device_va(ctx, &args, true);
2843 }
2844
2845 mutex_lock(&hdev->mmu_lock);
2846
2847 /* invalidate the cache once after the unmapping loop */
2848 hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
2849 hl_mmu_invalidate_cache(hdev, true, MMU_OP_PHYS_PACK);
2850
2851 mutex_unlock(&hdev->mmu_lock);
2852
2853 INIT_LIST_HEAD(&free_list);
2854
2855 spin_lock(&vm->idr_lock);
2856 idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
2857 if (phys_pg_list->asid == ctx->asid) {
2858 dev_dbg(hdev->dev,
2859 "page list 0x%px of asid %d is still alive\n",
2860 phys_pg_list, ctx->asid);
2861
2862 atomic64_sub(phys_pg_list->total_size, &hdev->dram_used_mem);
2863 idr_remove(&vm->phys_pg_pack_handles, i);
2864 list_add(&phys_pg_list->node, &free_list);
2865 }
2866 spin_unlock(&vm->idr_lock);
2867
2868 list_for_each_entry_safe(phys_pg_list, tmp_phys_node, &free_list, node)
2869 free_phys_pg_pack(hdev, phys_pg_list);
2870
2871 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]);
2872 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]);
2873
2874 if (hdev->pmmu_huge_range)
2875 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
2876
2877 mutex_destroy(&ctx->mem_hash_lock);
2878 hl_mmu_ctx_fini(ctx);
2879
2880 /* In this case we need to clear the global accounting of DRAM usage
2881 * because the user notifies us on allocations. If the user is no more,
2882 * all DRAM is available
2883 */
2884 if (ctx->asid != HL_KERNEL_ASID_ID &&
2885 !hdev->asic_prop.dram_supports_virtual_memory)
2886 atomic64_set(&hdev->dram_used_mem, 0);
2887 }
2888
2889 /**
2890 * hl_vm_init() - initialize virtual memory module.
2891 * @hdev: pointer to the habanalabs device structure.
2892 *
2893 * This function initializes the following:
2894 * - MMU module.
2895 * - DRAM physical pages pool of 2MB.
2896 * - Idr for device memory allocation handles.
2897 */
hl_vm_init(struct hl_device * hdev)2898 int hl_vm_init(struct hl_device *hdev)
2899 {
2900 struct asic_fixed_properties *prop = &hdev->asic_prop;
2901 struct hl_vm *vm = &hdev->vm;
2902 int rc;
2903
2904 if (is_power_of_2(prop->dram_page_size))
2905 vm->dram_pg_pool =
2906 gen_pool_create(__ffs(prop->dram_page_size), -1);
2907 else
2908 vm->dram_pg_pool =
2909 gen_pool_create(__ffs(DRAM_POOL_PAGE_SIZE), -1);
2910
2911 if (!vm->dram_pg_pool) {
2912 dev_err(hdev->dev, "Failed to create dram page pool\n");
2913 return -ENOMEM;
2914 }
2915
2916 kref_init(&vm->dram_pg_pool_refcount);
2917
2918 rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
2919 prop->dram_end_address - prop->dram_user_base_address,
2920 -1);
2921
2922 if (rc) {
2923 dev_err(hdev->dev,
2924 "Failed to add memory to dram page pool %d\n", rc);
2925 goto pool_add_err;
2926 }
2927
2928 spin_lock_init(&vm->idr_lock);
2929 idr_init(&vm->phys_pg_pack_handles);
2930
2931 atomic64_set(&hdev->dram_used_mem, 0);
2932
2933 vm->init_done = true;
2934
2935 return 0;
2936
2937 pool_add_err:
2938 gen_pool_destroy(vm->dram_pg_pool);
2939
2940 return rc;
2941 }
2942
2943 /**
2944 * hl_vm_fini() - virtual memory module teardown.
2945 * @hdev: pointer to the habanalabs device structure.
2946 *
2947 * This function perform teardown to the following:
2948 * - Idr for device memory allocation handles.
2949 * - DRAM physical pages pool of 2MB.
2950 * - MMU module.
2951 */
hl_vm_fini(struct hl_device * hdev)2952 void hl_vm_fini(struct hl_device *hdev)
2953 {
2954 struct hl_vm *vm = &hdev->vm;
2955
2956 if (!vm->init_done)
2957 return;
2958
2959 /*
2960 * At this point all the contexts should be freed and hence no DRAM
2961 * memory should be in use. Hence the DRAM pool should be freed here.
2962 */
2963 if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
2964 dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
2965 __func__);
2966
2967 vm->init_done = false;
2968 }
2969
2970 /**
2971 * hl_hw_block_mem_init() - HW block memory initialization.
2972 * @ctx: pointer to the habanalabs context structure.
2973 *
2974 * This function initializes the HW block virtual mapped addresses list and
2975 * it's lock.
2976 */
hl_hw_block_mem_init(struct hl_ctx * ctx)2977 void hl_hw_block_mem_init(struct hl_ctx *ctx)
2978 {
2979 mutex_init(&ctx->hw_block_list_lock);
2980 INIT_LIST_HEAD(&ctx->hw_block_mem_list);
2981 }
2982
2983 /**
2984 * hl_hw_block_mem_fini() - HW block memory teardown.
2985 * @ctx: pointer to the habanalabs context structure.
2986 *
2987 * This function clears the HW block virtual mapped addresses list and destroys
2988 * it's lock.
2989 */
hl_hw_block_mem_fini(struct hl_ctx * ctx)2990 void hl_hw_block_mem_fini(struct hl_ctx *ctx)
2991 {
2992 struct hl_vm_hw_block_list_node *lnode, *tmp;
2993
2994 if (!list_empty(&ctx->hw_block_mem_list))
2995 dev_crit(ctx->hdev->dev, "HW block mem list isn't empty\n");
2996
2997 list_for_each_entry_safe(lnode, tmp, &ctx->hw_block_mem_list, node) {
2998 list_del(&lnode->node);
2999 kfree(lnode);
3000 }
3001
3002 mutex_destroy(&ctx->hw_block_list_lock);
3003 }
3004