1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #include <uapi/drm/habanalabs_accel.h>
9 #include "habanalabs.h"
10
11 #include <linux/mm.h>
12 #include <linux/slab.h>
13 #include <linux/uaccess.h>
14
15 #define CB_VA_POOL_SIZE (4UL * SZ_1G)
16
cb_map_mem(struct hl_ctx * ctx,struct hl_cb * cb)17 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
18 {
19 struct hl_device *hdev = ctx->hdev;
20 struct asic_fixed_properties *prop = &hdev->asic_prop;
21 u32 page_size = prop->pmmu.page_size;
22 int rc;
23
24 if (!hdev->supports_cb_mapping) {
25 dev_err_ratelimited(hdev->dev,
26 "Mapping a CB to the device's MMU is not supported\n");
27 return -EINVAL;
28 }
29
30 if (!hdev->mmu_enable) {
31 dev_err_ratelimited(hdev->dev,
32 "Cannot map CB because MMU is disabled\n");
33 return -EINVAL;
34 }
35
36 if (cb->is_mmu_mapped)
37 return 0;
38
39 cb->roundup_size = roundup(cb->size, page_size);
40
41 cb->virtual_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, cb->roundup_size);
42 if (!cb->virtual_addr) {
43 dev_err(hdev->dev, "Failed to allocate device virtual address for CB\n");
44 return -ENOMEM;
45 }
46
47 mutex_lock(&hdev->mmu_lock);
48 rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size);
49 if (rc) {
50 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr);
51 goto err_va_umap;
52 }
53 rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV);
54 mutex_unlock(&hdev->mmu_lock);
55
56 cb->is_mmu_mapped = true;
57 return rc;
58
59 err_va_umap:
60 mutex_unlock(&hdev->mmu_lock);
61 gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
62 return rc;
63 }
64
cb_unmap_mem(struct hl_ctx * ctx,struct hl_cb * cb)65 static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
66 {
67 struct hl_device *hdev = ctx->hdev;
68
69 mutex_lock(&hdev->mmu_lock);
70 hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size);
71 hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
72 mutex_unlock(&hdev->mmu_lock);
73
74 gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
75 }
76
cb_fini(struct hl_device * hdev,struct hl_cb * cb)77 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
78 {
79 if (cb->is_internal)
80 gen_pool_free(hdev->internal_cb_pool,
81 (uintptr_t)cb->kernel_address, cb->size);
82 else
83 hl_asic_dma_free_coherent(hdev, cb->size, cb->kernel_address, cb->bus_address);
84
85 kfree(cb);
86 }
87
cb_do_release(struct hl_device * hdev,struct hl_cb * cb)88 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
89 {
90 if (cb->is_pool) {
91 atomic_set(&cb->is_handle_destroyed, 0);
92 spin_lock(&hdev->cb_pool_lock);
93 list_add(&cb->pool_list, &hdev->cb_pool);
94 spin_unlock(&hdev->cb_pool_lock);
95 } else {
96 cb_fini(hdev, cb);
97 }
98 }
99
hl_cb_alloc(struct hl_device * hdev,u32 cb_size,int ctx_id,bool internal_cb)100 static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
101 int ctx_id, bool internal_cb)
102 {
103 struct hl_cb *cb = NULL;
104 u32 cb_offset;
105 void *p;
106
107 /*
108 * We use of GFP_ATOMIC here because this function can be called from
109 * the latency-sensitive code path for command submission. Due to H/W
110 * limitations in some of the ASICs, the kernel must copy the user CB
111 * that is designated for an external queue and actually enqueue
112 * the kernel's copy. Hence, we must never sleep in this code section
113 * and must use GFP_ATOMIC for all memory allocations.
114 */
115 if (ctx_id == HL_KERNEL_ASID_ID && !hdev->disabled)
116 cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
117
118 if (!cb)
119 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
120
121 if (!cb)
122 return NULL;
123
124 if (internal_cb) {
125 p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size);
126 if (!p) {
127 kfree(cb);
128 return NULL;
129 }
130
131 cb_offset = p - hdev->internal_cb_pool_virt_addr;
132 cb->is_internal = true;
133 cb->bus_address = hdev->internal_cb_va_base + cb_offset;
134 } else if (ctx_id == HL_KERNEL_ASID_ID) {
135 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_ATOMIC);
136 if (!p)
137 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_KERNEL);
138 } else {
139 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address,
140 GFP_USER | __GFP_ZERO);
141 }
142
143 if (!p) {
144 dev_err(hdev->dev,
145 "failed to allocate %d of dma memory for CB\n",
146 cb_size);
147 kfree(cb);
148 return NULL;
149 }
150
151 cb->kernel_address = p;
152 cb->size = cb_size;
153
154 return cb;
155 }
156
157 struct hl_cb_mmap_mem_alloc_args {
158 struct hl_device *hdev;
159 struct hl_ctx *ctx;
160 u32 cb_size;
161 bool internal_cb;
162 bool map_cb;
163 };
164
hl_cb_mmap_mem_release(struct hl_mmap_mem_buf * buf)165 static void hl_cb_mmap_mem_release(struct hl_mmap_mem_buf *buf)
166 {
167 struct hl_cb *cb = buf->private;
168
169 hl_debugfs_remove_cb(cb);
170
171 if (cb->is_mmu_mapped)
172 cb_unmap_mem(cb->ctx, cb);
173
174 hl_ctx_put(cb->ctx);
175
176 cb_do_release(cb->hdev, cb);
177 }
178
hl_cb_mmap_mem_alloc(struct hl_mmap_mem_buf * buf,gfp_t gfp,void * args)179 static int hl_cb_mmap_mem_alloc(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
180 {
181 struct hl_cb_mmap_mem_alloc_args *cb_args = args;
182 struct hl_cb *cb;
183 int rc, ctx_id = cb_args->ctx->asid;
184 bool alloc_new_cb = true;
185
186 if (!cb_args->internal_cb) {
187 /* Minimum allocation must be PAGE SIZE */
188 if (cb_args->cb_size < PAGE_SIZE)
189 cb_args->cb_size = PAGE_SIZE;
190
191 if (ctx_id == HL_KERNEL_ASID_ID &&
192 cb_args->cb_size <= cb_args->hdev->asic_prop.cb_pool_cb_size) {
193
194 spin_lock(&cb_args->hdev->cb_pool_lock);
195 if (!list_empty(&cb_args->hdev->cb_pool)) {
196 cb = list_first_entry(&cb_args->hdev->cb_pool,
197 typeof(*cb), pool_list);
198 list_del(&cb->pool_list);
199 spin_unlock(&cb_args->hdev->cb_pool_lock);
200 alloc_new_cb = false;
201 } else {
202 spin_unlock(&cb_args->hdev->cb_pool_lock);
203 dev_dbg(cb_args->hdev->dev, "CB pool is empty\n");
204 }
205 }
206 }
207
208 if (alloc_new_cb) {
209 cb = hl_cb_alloc(cb_args->hdev, cb_args->cb_size, ctx_id, cb_args->internal_cb);
210 if (!cb)
211 return -ENOMEM;
212 }
213
214 cb->hdev = cb_args->hdev;
215 cb->ctx = cb_args->ctx;
216 cb->buf = buf;
217 cb->buf->mappable_size = cb->size;
218 cb->buf->private = cb;
219
220 hl_ctx_get(cb->ctx);
221
222 if (cb_args->map_cb) {
223 if (ctx_id == HL_KERNEL_ASID_ID) {
224 dev_err(cb_args->hdev->dev,
225 "CB mapping is not supported for kernel context\n");
226 rc = -EINVAL;
227 goto release_cb;
228 }
229
230 rc = cb_map_mem(cb_args->ctx, cb);
231 if (rc)
232 goto release_cb;
233 }
234
235 hl_debugfs_add_cb(cb);
236
237 return 0;
238
239 release_cb:
240 hl_ctx_put(cb->ctx);
241 cb_do_release(cb_args->hdev, cb);
242
243 return rc;
244 }
245
hl_cb_mmap(struct hl_mmap_mem_buf * buf,struct vm_area_struct * vma,void * args)246 static int hl_cb_mmap(struct hl_mmap_mem_buf *buf,
247 struct vm_area_struct *vma, void *args)
248 {
249 struct hl_cb *cb = buf->private;
250
251 return cb->hdev->asic_funcs->mmap(cb->hdev, vma, cb->kernel_address,
252 cb->bus_address, cb->size);
253 }
254
255 static struct hl_mmap_mem_buf_behavior cb_behavior = {
256 .topic = "CB",
257 .mem_id = HL_MMAP_TYPE_CB,
258 .alloc = hl_cb_mmap_mem_alloc,
259 .release = hl_cb_mmap_mem_release,
260 .mmap = hl_cb_mmap,
261 };
262
hl_cb_create(struct hl_device * hdev,struct hl_mem_mgr * mmg,struct hl_ctx * ctx,u32 cb_size,bool internal_cb,bool map_cb,u64 * handle)263 int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg,
264 struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
265 bool map_cb, u64 *handle)
266 {
267 struct hl_cb_mmap_mem_alloc_args args = {
268 .hdev = hdev,
269 .ctx = ctx,
270 .cb_size = cb_size,
271 .internal_cb = internal_cb,
272 .map_cb = map_cb,
273 };
274 struct hl_mmap_mem_buf *buf;
275 int ctx_id = ctx->asid;
276
277 if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) {
278 dev_warn_ratelimited(hdev->dev,
279 "Device is disabled or in reset. Can't create new CBs\n");
280 return -EBUSY;
281 }
282
283 if (cb_size > SZ_2M) {
284 dev_err(hdev->dev, "CB size %d must be less than %d\n",
285 cb_size, SZ_2M);
286 return -EINVAL;
287 }
288
289 buf = hl_mmap_mem_buf_alloc(
290 mmg, &cb_behavior,
291 ctx_id == HL_KERNEL_ASID_ID ? GFP_ATOMIC : GFP_KERNEL, &args);
292 if (!buf)
293 return -ENOMEM;
294
295 *handle = buf->handle;
296
297 return 0;
298 }
299
hl_cb_destroy(struct hl_mem_mgr * mmg,u64 cb_handle)300 int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle)
301 {
302 struct hl_cb *cb;
303 int rc;
304
305 cb = hl_cb_get(mmg, cb_handle);
306 if (!cb) {
307 dev_dbg(mmg->dev, "CB destroy failed, no CB was found for handle %#llx\n",
308 cb_handle);
309 return -EINVAL;
310 }
311
312 /* Make sure that CB handle isn't destroyed more than once */
313 rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1);
314 hl_cb_put(cb);
315 if (rc) {
316 dev_dbg(mmg->dev, "CB destroy failed, handle %#llx was already destroyed\n",
317 cb_handle);
318 return -EINVAL;
319 }
320
321 rc = hl_mmap_mem_buf_put_handle(mmg, cb_handle);
322 if (rc < 0)
323 return rc; /* Invalid handle */
324
325 if (rc == 0)
326 dev_dbg(mmg->dev, "CB 0x%llx is destroyed while still in use\n", cb_handle);
327
328 return 0;
329 }
330
hl_cb_info(struct hl_mem_mgr * mmg,u64 handle,u32 flags,u32 * usage_cnt,u64 * device_va)331 static int hl_cb_info(struct hl_mem_mgr *mmg,
332 u64 handle, u32 flags, u32 *usage_cnt, u64 *device_va)
333 {
334 struct hl_cb *cb;
335 int rc = 0;
336
337 cb = hl_cb_get(mmg, handle);
338 if (!cb) {
339 dev_err(mmg->dev,
340 "CB info failed, no match to handle 0x%llx\n", handle);
341 return -EINVAL;
342 }
343
344 if (flags & HL_CB_FLAGS_GET_DEVICE_VA) {
345 if (cb->is_mmu_mapped) {
346 *device_va = cb->virtual_addr;
347 } else {
348 dev_err(mmg->dev, "CB is not mapped to the device's MMU\n");
349 rc = -EINVAL;
350 goto out;
351 }
352 } else {
353 *usage_cnt = atomic_read(&cb->cs_cnt);
354 }
355
356 out:
357 hl_cb_put(cb);
358 return rc;
359 }
360
hl_cb_ioctl(struct hl_fpriv * hpriv,void * data)361 int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
362 {
363 union hl_cb_args *args = data;
364 struct hl_device *hdev = hpriv->hdev;
365 u64 handle = 0, device_va = 0;
366 enum hl_device_status status;
367 u32 usage_cnt = 0;
368 int rc;
369
370 if (!hl_device_operational(hdev, &status)) {
371 dev_dbg_ratelimited(hdev->dev,
372 "Device is %s. Can't execute CB IOCTL\n",
373 hdev->status[status]);
374 return -EBUSY;
375 }
376
377 switch (args->in.op) {
378 case HL_CB_OP_CREATE:
379 if (args->in.cb_size > HL_MAX_CB_SIZE) {
380 dev_err(hdev->dev,
381 "User requested CB size %d must be less than %d\n",
382 args->in.cb_size, HL_MAX_CB_SIZE);
383 rc = -EINVAL;
384 } else {
385 rc = hl_cb_create(hdev, &hpriv->mem_mgr, hpriv->ctx,
386 args->in.cb_size, false,
387 !!(args->in.flags & HL_CB_FLAGS_MAP),
388 &handle);
389 }
390
391 memset(args, 0, sizeof(*args));
392 args->out.cb_handle = handle;
393 break;
394
395 case HL_CB_OP_DESTROY:
396 rc = hl_cb_destroy(&hpriv->mem_mgr,
397 args->in.cb_handle);
398 break;
399
400 case HL_CB_OP_INFO:
401 rc = hl_cb_info(&hpriv->mem_mgr, args->in.cb_handle,
402 args->in.flags,
403 &usage_cnt,
404 &device_va);
405 if (rc)
406 break;
407
408 memset(&args->out, 0, sizeof(args->out));
409
410 if (args->in.flags & HL_CB_FLAGS_GET_DEVICE_VA)
411 args->out.device_va = device_va;
412 else
413 args->out.usage_cnt = usage_cnt;
414 break;
415
416 default:
417 rc = -EINVAL;
418 break;
419 }
420
421 return rc;
422 }
423
hl_cb_get(struct hl_mem_mgr * mmg,u64 handle)424 struct hl_cb *hl_cb_get(struct hl_mem_mgr *mmg, u64 handle)
425 {
426 struct hl_mmap_mem_buf *buf;
427
428 buf = hl_mmap_mem_buf_get(mmg, handle);
429 if (!buf)
430 return NULL;
431 return buf->private;
432
433 }
434
hl_cb_put(struct hl_cb * cb)435 void hl_cb_put(struct hl_cb *cb)
436 {
437 hl_mmap_mem_buf_put(cb->buf);
438 }
439
hl_cb_kernel_create(struct hl_device * hdev,u32 cb_size,bool internal_cb)440 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
441 bool internal_cb)
442 {
443 u64 cb_handle;
444 struct hl_cb *cb;
445 int rc;
446
447 rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, cb_size,
448 internal_cb, false, &cb_handle);
449 if (rc) {
450 dev_err(hdev->dev,
451 "Failed to allocate CB for the kernel driver %d\n", rc);
452 return NULL;
453 }
454
455 cb = hl_cb_get(&hdev->kernel_mem_mgr, cb_handle);
456 /* hl_cb_get should never fail here */
457 if (!cb) {
458 dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n",
459 (u32) cb_handle);
460 goto destroy_cb;
461 }
462
463 return cb;
464
465 destroy_cb:
466 hl_cb_destroy(&hdev->kernel_mem_mgr, cb_handle);
467
468 return NULL;
469 }
470
hl_cb_pool_init(struct hl_device * hdev)471 int hl_cb_pool_init(struct hl_device *hdev)
472 {
473 struct hl_cb *cb;
474 int i;
475
476 INIT_LIST_HEAD(&hdev->cb_pool);
477 spin_lock_init(&hdev->cb_pool_lock);
478
479 for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
480 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
481 HL_KERNEL_ASID_ID, false);
482 if (cb) {
483 cb->is_pool = true;
484 list_add(&cb->pool_list, &hdev->cb_pool);
485 } else {
486 hl_cb_pool_fini(hdev);
487 return -ENOMEM;
488 }
489 }
490
491 return 0;
492 }
493
hl_cb_pool_fini(struct hl_device * hdev)494 int hl_cb_pool_fini(struct hl_device *hdev)
495 {
496 struct hl_cb *cb, *tmp;
497
498 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
499 list_del(&cb->pool_list);
500 cb_fini(hdev, cb);
501 }
502
503 return 0;
504 }
505
hl_cb_va_pool_init(struct hl_ctx * ctx)506 int hl_cb_va_pool_init(struct hl_ctx *ctx)
507 {
508 struct hl_device *hdev = ctx->hdev;
509 struct asic_fixed_properties *prop = &hdev->asic_prop;
510 int rc;
511
512 if (!hdev->supports_cb_mapping)
513 return 0;
514
515 ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1);
516 if (!ctx->cb_va_pool) {
517 dev_err(hdev->dev,
518 "Failed to create VA gen pool for CB mapping\n");
519 return -ENOMEM;
520 }
521
522 ctx->cb_va_pool_base = hl_reserve_va_block(hdev, ctx, HL_VA_RANGE_TYPE_HOST,
523 CB_VA_POOL_SIZE, HL_MMU_VA_ALIGNMENT_NOT_NEEDED);
524 if (!ctx->cb_va_pool_base) {
525 rc = -ENOMEM;
526 goto err_pool_destroy;
527 }
528 rc = gen_pool_add(ctx->cb_va_pool, ctx->cb_va_pool_base, CB_VA_POOL_SIZE, -1);
529 if (rc) {
530 dev_err(hdev->dev,
531 "Failed to add memory to VA gen pool for CB mapping\n");
532 goto err_unreserve_va_block;
533 }
534
535 return 0;
536
537 err_unreserve_va_block:
538 hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE);
539 err_pool_destroy:
540 gen_pool_destroy(ctx->cb_va_pool);
541
542 return rc;
543 }
544
hl_cb_va_pool_fini(struct hl_ctx * ctx)545 void hl_cb_va_pool_fini(struct hl_ctx *ctx)
546 {
547 struct hl_device *hdev = ctx->hdev;
548
549 if (!hdev->supports_cb_mapping)
550 return;
551
552 gen_pool_destroy(ctx->cb_va_pool);
553 hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE);
554 }
555