1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2019 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include "../habanalabs.h"
9 #include "../../include/hw_ip/mmu/mmu_general.h"
10 
11 #include <linux/slab.h>
12 
13 static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
14 
get_pgt_info(struct hl_ctx * ctx,u64 hop_addr)15 static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
16 {
17 	struct pgt_info *pgt_info = NULL;
18 
19 	hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
20 				(unsigned long) hop_addr)
21 		if (hop_addr == pgt_info->shadow_addr)
22 			break;
23 
24 	return pgt_info;
25 }
26 
_free_hop(struct hl_ctx * ctx,struct pgt_info * pgt_info)27 static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
28 {
29 	struct hl_device *hdev = ctx->hdev;
30 
31 	gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr,
32 			hdev->asic_prop.mmu_hop_table_size);
33 	hash_del(&pgt_info->node);
34 	kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
35 	kfree(pgt_info);
36 }
37 
free_hop(struct hl_ctx * ctx,u64 hop_addr)38 static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
39 {
40 	struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
41 
42 	_free_hop(ctx, pgt_info);
43 }
44 
alloc_hop(struct hl_ctx * ctx)45 static u64 alloc_hop(struct hl_ctx *ctx)
46 {
47 	struct hl_device *hdev = ctx->hdev;
48 	struct asic_fixed_properties *prop = &hdev->asic_prop;
49 	struct pgt_info *pgt_info;
50 	u64 phys_addr, shadow_addr;
51 
52 	pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
53 	if (!pgt_info)
54 		return ULLONG_MAX;
55 
56 	phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool,
57 					prop->mmu_hop_table_size);
58 	if (!phys_addr) {
59 		dev_err(hdev->dev, "failed to allocate page\n");
60 		goto pool_add_err;
61 	}
62 
63 	shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size,
64 						GFP_KERNEL);
65 	if (!shadow_addr)
66 		goto shadow_err;
67 
68 	pgt_info->phys_addr = phys_addr;
69 	pgt_info->shadow_addr = shadow_addr;
70 	pgt_info->ctx = ctx;
71 	pgt_info->num_of_ptes = 0;
72 	hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
73 
74 	return shadow_addr;
75 
76 shadow_err:
77 	gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, phys_addr,
78 			prop->mmu_hop_table_size);
79 pool_add_err:
80 	kfree(pgt_info);
81 
82 	return ULLONG_MAX;
83 }
84 
get_phys_hop0_addr(struct hl_ctx * ctx)85 static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx)
86 {
87 	return ctx->hdev->asic_prop.mmu_pgt_addr +
88 			(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
89 }
90 
get_hop0_addr(struct hl_ctx * ctx)91 static inline u64 get_hop0_addr(struct hl_ctx *ctx)
92 {
93 	return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 +
94 			(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
95 }
96 
flush(struct hl_ctx * ctx)97 static void flush(struct hl_ctx *ctx)
98 {
99 	/* flush all writes from all cores to reach PCI */
100 	mb();
101 	ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx));
102 }
103 
104 /* transform the value to physical address when writing to H/W */
write_pte(struct hl_ctx * ctx,u64 shadow_pte_addr,u64 val)105 static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
106 {
107 	/*
108 	 * The value to write is actually the address of the next shadow hop +
109 	 * flags at the 12 LSBs.
110 	 * Hence in order to get the value to write to the physical PTE, we
111 	 * clear the 12 LSBs and translate the shadow hop to its associated
112 	 * physical hop, and add back the original 12 LSBs.
113 	 */
114 	u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) |
115 				(val & FLAGS_MASK);
116 
117 	ctx->hdev->asic_funcs->write_pte(ctx->hdev,
118 					get_phys_addr(ctx, shadow_pte_addr),
119 					phys_val);
120 
121 	*(u64 *) (uintptr_t) shadow_pte_addr = val;
122 }
123 
124 /* do not transform the value to physical address when writing to H/W */
write_final_pte(struct hl_ctx * ctx,u64 shadow_pte_addr,u64 val)125 static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr,
126 					u64 val)
127 {
128 	ctx->hdev->asic_funcs->write_pte(ctx->hdev,
129 					get_phys_addr(ctx, shadow_pte_addr),
130 					val);
131 	*(u64 *) (uintptr_t) shadow_pte_addr = val;
132 }
133 
134 /* clear the last and present bits */
clear_pte(struct hl_ctx * ctx,u64 pte_addr)135 static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr)
136 {
137 	/* no need to transform the value to physical address */
138 	write_final_pte(ctx, pte_addr, 0);
139 }
140 
get_pte(struct hl_ctx * ctx,u64 hop_addr)141 static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
142 {
143 	get_pgt_info(ctx, hop_addr)->num_of_ptes++;
144 }
145 
146 /*
147  * put_pte - decrement the num of ptes and free the hop if possible
148  *
149  * @ctx: pointer to the context structure
150  * @hop_addr: addr of the hop
151  *
152  * This function returns the number of ptes left on this hop. If the number is
153  * 0, it means the pte was freed.
154  */
put_pte(struct hl_ctx * ctx,u64 hop_addr)155 static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
156 {
157 	struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
158 	int num_of_ptes_left;
159 
160 	pgt_info->num_of_ptes--;
161 
162 	/*
163 	 * Need to save the number of ptes left because free_hop might free
164 	 * the pgt_info
165 	 */
166 	num_of_ptes_left = pgt_info->num_of_ptes;
167 	if (!num_of_ptes_left)
168 		_free_hop(ctx, pgt_info);
169 
170 	return num_of_ptes_left;
171 }
172 
get_hopN_pte_addr(struct hl_ctx * ctx,u64 hop_addr,u64 virt_addr,u64 mask,u64 shift)173 static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
174 					u64 virt_addr, u64 mask, u64 shift)
175 {
176 	return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
177 			((virt_addr & mask) >> shift);
178 }
179 
get_hop0_pte_addr(struct hl_ctx * ctx,struct hl_mmu_properties * mmu_prop,u64 hop_addr,u64 vaddr)180 static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
181 					struct hl_mmu_properties *mmu_prop,
182 					u64 hop_addr, u64 vaddr)
183 {
184 	return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask,
185 					mmu_prop->hop0_shift);
186 }
187 
get_hop1_pte_addr(struct hl_ctx * ctx,struct hl_mmu_properties * mmu_prop,u64 hop_addr,u64 vaddr)188 static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
189 					struct hl_mmu_properties *mmu_prop,
190 					u64 hop_addr, u64 vaddr)
191 {
192 	return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask,
193 					mmu_prop->hop1_shift);
194 }
195 
get_hop2_pte_addr(struct hl_ctx * ctx,struct hl_mmu_properties * mmu_prop,u64 hop_addr,u64 vaddr)196 static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
197 					struct hl_mmu_properties *mmu_prop,
198 					u64 hop_addr, u64 vaddr)
199 {
200 	return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask,
201 					mmu_prop->hop2_shift);
202 }
203 
get_hop3_pte_addr(struct hl_ctx * ctx,struct hl_mmu_properties * mmu_prop,u64 hop_addr,u64 vaddr)204 static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
205 					struct hl_mmu_properties *mmu_prop,
206 					u64 hop_addr, u64 vaddr)
207 {
208 	return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask,
209 					mmu_prop->hop3_shift);
210 }
211 
get_hop4_pte_addr(struct hl_ctx * ctx,struct hl_mmu_properties * mmu_prop,u64 hop_addr,u64 vaddr)212 static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
213 					struct hl_mmu_properties *mmu_prop,
214 					u64 hop_addr, u64 vaddr)
215 {
216 	return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask,
217 					mmu_prop->hop4_shift);
218 }
219 
get_next_hop_addr(struct hl_ctx * ctx,u64 curr_pte)220 static inline u64 get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
221 {
222 	if (curr_pte & PAGE_PRESENT_MASK)
223 		return curr_pte & HOP_PHYS_ADDR_MASK;
224 	else
225 		return ULLONG_MAX;
226 }
227 
get_alloc_next_hop_addr(struct hl_ctx * ctx,u64 curr_pte,bool * is_new_hop)228 static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
229 						bool *is_new_hop)
230 {
231 	u64 hop_addr = get_next_hop_addr(ctx, curr_pte);
232 
233 	if (hop_addr == ULLONG_MAX) {
234 		hop_addr = alloc_hop(ctx);
235 		*is_new_hop = (hop_addr != ULLONG_MAX);
236 	}
237 
238 	return hop_addr;
239 }
240 
241 /* translates shadow address inside hop to a physical address */
get_phys_addr(struct hl_ctx * ctx,u64 shadow_addr)242 static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
243 {
244 	u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1);
245 	u64 shadow_hop_addr = shadow_addr & ~page_mask;
246 	u64 pte_offset = shadow_addr & page_mask;
247 	u64 phys_hop_addr;
248 
249 	if (shadow_hop_addr != get_hop0_addr(ctx))
250 		phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
251 	else
252 		phys_hop_addr = get_phys_hop0_addr(ctx);
253 
254 	return phys_hop_addr + pte_offset;
255 }
256 
dram_default_mapping_init(struct hl_ctx * ctx)257 static int dram_default_mapping_init(struct hl_ctx *ctx)
258 {
259 	struct hl_device *hdev = ctx->hdev;
260 	struct asic_fixed_properties *prop = &hdev->asic_prop;
261 	u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
262 		hop2_pte_addr, hop3_pte_addr, pte_val;
263 	int rc, i, j, hop3_allocated = 0;
264 
265 	if ((!prop->dram_supports_virtual_memory) ||
266 			(!hdev->dram_default_page_mapping) ||
267 			(ctx->asid == HL_KERNEL_ASID_ID))
268 		return 0;
269 
270 	num_of_hop3 = prop->dram_size_for_default_page_mapping;
271 	do_div(num_of_hop3, prop->dram_page_size);
272 	do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
273 
274 	/* add hop1 and hop2 */
275 	total_hops = num_of_hop3 + 2;
276 
277 	ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops,  GFP_KERNEL);
278 	if (!ctx->dram_default_hops)
279 		return -ENOMEM;
280 
281 	hop0_addr = get_hop0_addr(ctx);
282 
283 	hop1_addr = alloc_hop(ctx);
284 	if (hop1_addr == ULLONG_MAX) {
285 		dev_err(hdev->dev, "failed to alloc hop 1\n");
286 		rc = -ENOMEM;
287 		goto hop1_err;
288 	}
289 
290 	ctx->dram_default_hops[total_hops - 1] = hop1_addr;
291 
292 	hop2_addr = alloc_hop(ctx);
293 	if (hop2_addr == ULLONG_MAX) {
294 		dev_err(hdev->dev, "failed to alloc hop 2\n");
295 		rc = -ENOMEM;
296 		goto hop2_err;
297 	}
298 
299 	ctx->dram_default_hops[total_hops - 2] = hop2_addr;
300 
301 	for (i = 0 ; i < num_of_hop3 ; i++) {
302 		ctx->dram_default_hops[i] = alloc_hop(ctx);
303 		if (ctx->dram_default_hops[i] == ULLONG_MAX) {
304 			dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i);
305 			rc = -ENOMEM;
306 			goto hop3_err;
307 		}
308 		hop3_allocated++;
309 	}
310 
311 	/* need only pte 0 in hops 0 and 1 */
312 	pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
313 	write_pte(ctx, hop0_addr, pte_val);
314 
315 	pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
316 	write_pte(ctx, hop1_addr, pte_val);
317 	get_pte(ctx, hop1_addr);
318 
319 	hop2_pte_addr = hop2_addr;
320 	for (i = 0 ; i < num_of_hop3 ; i++) {
321 		pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) |
322 				PAGE_PRESENT_MASK;
323 		write_pte(ctx, hop2_pte_addr, pte_val);
324 		get_pte(ctx, hop2_addr);
325 		hop2_pte_addr += HL_PTE_SIZE;
326 	}
327 
328 	pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) |
329 			LAST_MASK | PAGE_PRESENT_MASK;
330 
331 	for (i = 0 ; i < num_of_hop3 ; i++) {
332 		hop3_pte_addr = ctx->dram_default_hops[i];
333 		for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
334 			write_final_pte(ctx, hop3_pte_addr, pte_val);
335 			get_pte(ctx, ctx->dram_default_hops[i]);
336 			hop3_pte_addr += HL_PTE_SIZE;
337 		}
338 	}
339 
340 	flush(ctx);
341 
342 	return 0;
343 
344 hop3_err:
345 	for (i = 0 ; i < hop3_allocated ; i++)
346 		free_hop(ctx, ctx->dram_default_hops[i]);
347 
348 	free_hop(ctx, hop2_addr);
349 hop2_err:
350 	free_hop(ctx, hop1_addr);
351 hop1_err:
352 	kfree(ctx->dram_default_hops);
353 
354 	return rc;
355 }
356 
dram_default_mapping_fini(struct hl_ctx * ctx)357 static void dram_default_mapping_fini(struct hl_ctx *ctx)
358 {
359 	struct hl_device *hdev = ctx->hdev;
360 	struct asic_fixed_properties *prop = &hdev->asic_prop;
361 	u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
362 		hop2_pte_addr, hop3_pte_addr;
363 	int i, j;
364 
365 	if ((!prop->dram_supports_virtual_memory) ||
366 			(!hdev->dram_default_page_mapping) ||
367 			(ctx->asid == HL_KERNEL_ASID_ID))
368 		return;
369 
370 	num_of_hop3 = prop->dram_size_for_default_page_mapping;
371 	do_div(num_of_hop3, prop->dram_page_size);
372 	do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
373 
374 	hop0_addr = get_hop0_addr(ctx);
375 	/* add hop1 and hop2 */
376 	total_hops = num_of_hop3 + 2;
377 	hop1_addr = ctx->dram_default_hops[total_hops - 1];
378 	hop2_addr = ctx->dram_default_hops[total_hops - 2];
379 
380 	for (i = 0 ; i < num_of_hop3 ; i++) {
381 		hop3_pte_addr = ctx->dram_default_hops[i];
382 		for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
383 			clear_pte(ctx, hop3_pte_addr);
384 			put_pte(ctx, ctx->dram_default_hops[i]);
385 			hop3_pte_addr += HL_PTE_SIZE;
386 		}
387 	}
388 
389 	hop2_pte_addr = hop2_addr;
390 	hop2_pte_addr = hop2_addr;
391 	for (i = 0 ; i < num_of_hop3 ; i++) {
392 		clear_pte(ctx, hop2_pte_addr);
393 		put_pte(ctx, hop2_addr);
394 		hop2_pte_addr += HL_PTE_SIZE;
395 	}
396 
397 	clear_pte(ctx, hop1_addr);
398 	put_pte(ctx, hop1_addr);
399 	clear_pte(ctx, hop0_addr);
400 
401 	kfree(ctx->dram_default_hops);
402 
403 	flush(ctx);
404 }
405 
406 /**
407  * hl_mmu_v1_init() - initialize the MMU module.
408  * @hdev: habanalabs device structure.
409  *
410  * This function does the following:
411  * - Create a pool of pages for pgt_infos.
412  * - Create a shadow table for pgt
413  *
414  * Return: 0 for success, non-zero for failure.
415  */
hl_mmu_v1_init(struct hl_device * hdev)416 static int hl_mmu_v1_init(struct hl_device *hdev)
417 {
418 	struct asic_fixed_properties *prop = &hdev->asic_prop;
419 	int rc;
420 
421 	hdev->mmu_priv.dr.mmu_pgt_pool =
422 			gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
423 
424 	if (!hdev->mmu_priv.dr.mmu_pgt_pool) {
425 		dev_err(hdev->dev, "Failed to create page gen pool\n");
426 		return -ENOMEM;
427 	}
428 
429 	rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr +
430 			prop->mmu_hop0_tables_total_size,
431 			prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
432 			-1);
433 	if (rc) {
434 		dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
435 		goto err_pool_add;
436 	}
437 
438 	hdev->mmu_priv.dr.mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
439 						prop->mmu_hop_table_size,
440 						GFP_KERNEL | __GFP_ZERO);
441 	if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
442 		rc = -ENOMEM;
443 		goto err_pool_add;
444 	}
445 
446 	/* MMU H/W init will be done in device hw_init() */
447 
448 	return 0;
449 
450 err_pool_add:
451 	gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
452 
453 	return rc;
454 }
455 
456 /**
457  * hl_mmu_fini() - release the MMU module.
458  * @hdev: habanalabs device structure.
459  *
460  * This function does the following:
461  * - Disable MMU in H/W.
462  * - Free the pgt_infos pool.
463  *
464  * All contexts should be freed before calling this function.
465  */
hl_mmu_v1_fini(struct hl_device * hdev)466 static void hl_mmu_v1_fini(struct hl_device *hdev)
467 {
468 	/* MMU H/W fini was already done in device hw_fini() */
469 
470 	if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) {
471 		kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
472 		gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
473 
474 		/* Make sure that if we arrive here again without init was
475 		 * called we won't cause kernel panic. This can happen for
476 		 * example if we fail during hard reset code at certain points
477 		 */
478 		hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
479 	}
480 }
481 
482 /**
483  * hl_mmu_ctx_init() - initialize a context for using the MMU module.
484  * @ctx: pointer to the context structure to initialize.
485  *
486  * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
487  * page tables hops related to this context.
488  * Return: 0 on success, non-zero otherwise.
489  */
hl_mmu_v1_ctx_init(struct hl_ctx * ctx)490 static int hl_mmu_v1_ctx_init(struct hl_ctx *ctx)
491 {
492 	hash_init(ctx->mmu_shadow_hash);
493 	return dram_default_mapping_init(ctx);
494 }
495 
496 /*
497  * hl_mmu_ctx_fini - disable a ctx from using the mmu module
498  *
499  * @ctx: pointer to the context structure
500  *
501  * This function does the following:
502  * - Free any pgts which were not freed yet
503  * - Free the mutex
504  * - Free DRAM default page mapping hops
505  */
hl_mmu_v1_ctx_fini(struct hl_ctx * ctx)506 static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx)
507 {
508 	struct hl_device *hdev = ctx->hdev;
509 	struct pgt_info *pgt_info;
510 	struct hlist_node *tmp;
511 	int i;
512 
513 	dram_default_mapping_fini(ctx);
514 
515 	if (!hash_empty(ctx->mmu_shadow_hash))
516 		dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
517 			ctx->asid);
518 
519 	hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
520 		dev_err_ratelimited(hdev->dev,
521 			"pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
522 			pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
523 		_free_hop(ctx, pgt_info);
524 	}
525 }
526 
_hl_mmu_v1_unmap(struct hl_ctx * ctx,u64 virt_addr,bool is_dram_addr)527 static int _hl_mmu_v1_unmap(struct hl_ctx *ctx,
528 				u64 virt_addr, bool is_dram_addr)
529 {
530 	struct hl_device *hdev = ctx->hdev;
531 	struct asic_fixed_properties *prop = &hdev->asic_prop;
532 	struct hl_mmu_properties *mmu_prop;
533 	u64 hop0_addr = 0, hop0_pte_addr = 0,
534 		hop1_addr = 0, hop1_pte_addr = 0,
535 		hop2_addr = 0, hop2_pte_addr = 0,
536 		hop3_addr = 0, hop3_pte_addr = 0,
537 		hop4_addr = 0, hop4_pte_addr = 0,
538 		curr_pte;
539 	bool is_huge, clear_hop3 = true;
540 
541 	/* shifts and masks are the same in PMMU and HPMMU, use one of them */
542 	mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
543 
544 	hop0_addr = get_hop0_addr(ctx);
545 	hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
546 
547 	curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
548 
549 	hop1_addr = get_next_hop_addr(ctx, curr_pte);
550 
551 	if (hop1_addr == ULLONG_MAX)
552 		goto not_mapped;
553 
554 	hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
555 
556 	curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
557 
558 	hop2_addr = get_next_hop_addr(ctx, curr_pte);
559 
560 	if (hop2_addr == ULLONG_MAX)
561 		goto not_mapped;
562 
563 	hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
564 
565 	curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
566 
567 	hop3_addr = get_next_hop_addr(ctx, curr_pte);
568 
569 	if (hop3_addr == ULLONG_MAX)
570 		goto not_mapped;
571 
572 	hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
573 
574 	curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
575 
576 	is_huge = curr_pte & LAST_MASK;
577 
578 	if (is_dram_addr && !is_huge) {
579 		dev_err(hdev->dev,
580 				"DRAM unmapping should use huge pages only\n");
581 		return -EFAULT;
582 	}
583 
584 	if (!is_huge) {
585 		hop4_addr = get_next_hop_addr(ctx, curr_pte);
586 
587 		if (hop4_addr == ULLONG_MAX)
588 			goto not_mapped;
589 
590 		hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
591 							virt_addr);
592 
593 		curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
594 
595 		clear_hop3 = false;
596 	}
597 
598 	if (hdev->dram_default_page_mapping && is_dram_addr) {
599 		u64 default_pte = (prop->mmu_dram_default_page_addr &
600 				HOP_PHYS_ADDR_MASK) | LAST_MASK |
601 					PAGE_PRESENT_MASK;
602 		if (curr_pte == default_pte) {
603 			dev_err(hdev->dev,
604 				"DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
605 					virt_addr);
606 			goto not_mapped;
607 		}
608 
609 		if (!(curr_pte & PAGE_PRESENT_MASK)) {
610 			dev_err(hdev->dev,
611 				"DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n",
612 					virt_addr);
613 			goto not_mapped;
614 		}
615 
616 		write_final_pte(ctx, hop3_pte_addr, default_pte);
617 		put_pte(ctx, hop3_addr);
618 	} else {
619 		if (!(curr_pte & PAGE_PRESENT_MASK))
620 			goto not_mapped;
621 
622 		if (hop4_addr)
623 			clear_pte(ctx, hop4_pte_addr);
624 		else
625 			clear_pte(ctx, hop3_pte_addr);
626 
627 		if (hop4_addr && !put_pte(ctx, hop4_addr))
628 			clear_hop3 = true;
629 
630 		if (!clear_hop3)
631 			goto mapped;
632 
633 		clear_pte(ctx, hop3_pte_addr);
634 
635 		if (put_pte(ctx, hop3_addr))
636 			goto mapped;
637 
638 		clear_pte(ctx, hop2_pte_addr);
639 
640 		if (put_pte(ctx, hop2_addr))
641 			goto mapped;
642 
643 		clear_pte(ctx, hop1_pte_addr);
644 
645 		if (put_pte(ctx, hop1_addr))
646 			goto mapped;
647 
648 		clear_pte(ctx, hop0_pte_addr);
649 	}
650 
651 mapped:
652 	return 0;
653 
654 not_mapped:
655 	dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
656 		virt_addr);
657 
658 	return -EINVAL;
659 }
660 
_hl_mmu_v1_map(struct hl_ctx * ctx,u64 virt_addr,u64 phys_addr,u32 page_size,bool is_dram_addr)661 static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
662 			u32 page_size, bool is_dram_addr)
663 {
664 	struct hl_device *hdev = ctx->hdev;
665 	struct asic_fixed_properties *prop = &hdev->asic_prop;
666 	struct hl_mmu_properties *mmu_prop;
667 	u64 hop0_addr = 0, hop0_pte_addr = 0,
668 		hop1_addr = 0, hop1_pte_addr = 0,
669 		hop2_addr = 0, hop2_pte_addr = 0,
670 		hop3_addr = 0, hop3_pte_addr = 0,
671 		hop4_addr = 0, hop4_pte_addr = 0,
672 		curr_pte = 0;
673 	bool hop1_new = false, hop2_new = false, hop3_new = false,
674 		hop4_new = false, is_huge;
675 	int rc = -ENOMEM;
676 
677 	/*
678 	 * This mapping function can map a page or a huge page. For huge page
679 	 * there are only 3 hops rather than 4. Currently the DRAM allocation
680 	 * uses huge pages only but user memory could have been allocated with
681 	 * one of the two page sizes. Since this is a common code for all the
682 	 * three cases, we need this hugs page check.
683 	 */
684 	if (is_dram_addr) {
685 		mmu_prop = &prop->dmmu;
686 		is_huge = true;
687 	} else if (page_size == prop->pmmu_huge.page_size) {
688 		mmu_prop = &prop->pmmu_huge;
689 		is_huge = true;
690 	} else {
691 		mmu_prop = &prop->pmmu;
692 		is_huge = false;
693 	}
694 
695 	hop0_addr = get_hop0_addr(ctx);
696 	hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
697 	curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
698 
699 	hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
700 	if (hop1_addr == ULLONG_MAX)
701 		goto err;
702 
703 	hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
704 	curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
705 
706 	hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
707 	if (hop2_addr == ULLONG_MAX)
708 		goto err;
709 
710 	hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
711 	curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
712 
713 	hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
714 	if (hop3_addr == ULLONG_MAX)
715 		goto err;
716 
717 	hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
718 	curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
719 
720 	if (!is_huge) {
721 		hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new);
722 		if (hop4_addr == ULLONG_MAX)
723 			goto err;
724 
725 		hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
726 							virt_addr);
727 		curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
728 	}
729 
730 	if (hdev->dram_default_page_mapping && is_dram_addr) {
731 		u64 default_pte = (prop->mmu_dram_default_page_addr &
732 					HOP_PHYS_ADDR_MASK) | LAST_MASK |
733 						PAGE_PRESENT_MASK;
734 
735 		if (curr_pte != default_pte) {
736 			dev_err(hdev->dev,
737 				"DRAM: mapping already exists for virt_addr 0x%llx\n",
738 					virt_addr);
739 			rc = -EINVAL;
740 			goto err;
741 		}
742 
743 		if (hop1_new || hop2_new || hop3_new || hop4_new) {
744 			dev_err(hdev->dev,
745 				"DRAM mapping should not allocate more hops\n");
746 			rc = -EFAULT;
747 			goto err;
748 		}
749 	} else if (curr_pte & PAGE_PRESENT_MASK) {
750 		dev_err(hdev->dev,
751 			"mapping already exists for virt_addr 0x%llx\n",
752 				virt_addr);
753 
754 		dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n",
755 			*(u64 *) (uintptr_t) hop0_pte_addr, hop0_pte_addr);
756 		dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n",
757 			*(u64 *) (uintptr_t) hop1_pte_addr, hop1_pte_addr);
758 		dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n",
759 			*(u64 *) (uintptr_t) hop2_pte_addr, hop2_pte_addr);
760 		dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n",
761 			*(u64 *) (uintptr_t) hop3_pte_addr, hop3_pte_addr);
762 
763 		if (!is_huge)
764 			dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n",
765 				*(u64 *) (uintptr_t) hop4_pte_addr,
766 				hop4_pte_addr);
767 
768 		rc = -EINVAL;
769 		goto err;
770 	}
771 
772 	curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | LAST_MASK
773 			| PAGE_PRESENT_MASK;
774 
775 	if (is_huge)
776 		write_final_pte(ctx, hop3_pte_addr, curr_pte);
777 	else
778 		write_final_pte(ctx, hop4_pte_addr, curr_pte);
779 
780 	if (hop1_new) {
781 		curr_pte =
782 			(hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
783 		write_pte(ctx, hop0_pte_addr, curr_pte);
784 	}
785 	if (hop2_new) {
786 		curr_pte =
787 			(hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
788 		write_pte(ctx, hop1_pte_addr, curr_pte);
789 		get_pte(ctx, hop1_addr);
790 	}
791 	if (hop3_new) {
792 		curr_pte =
793 			(hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
794 		write_pte(ctx, hop2_pte_addr, curr_pte);
795 		get_pte(ctx, hop2_addr);
796 	}
797 
798 	if (!is_huge) {
799 		if (hop4_new) {
800 			curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) |
801 					PAGE_PRESENT_MASK;
802 			write_pte(ctx, hop3_pte_addr, curr_pte);
803 			get_pte(ctx, hop3_addr);
804 		}
805 
806 		get_pte(ctx, hop4_addr);
807 	} else {
808 		get_pte(ctx, hop3_addr);
809 	}
810 
811 	return 0;
812 
813 err:
814 	if (hop4_new)
815 		free_hop(ctx, hop4_addr);
816 	if (hop3_new)
817 		free_hop(ctx, hop3_addr);
818 	if (hop2_new)
819 		free_hop(ctx, hop2_addr);
820 	if (hop1_new)
821 		free_hop(ctx, hop1_addr);
822 
823 	return rc;
824 }
825 
826 /*
827  * hl_mmu_v1_swap_out - marks all mapping of the given ctx as swapped out
828  *
829  * @ctx: pointer to the context structure
830  *
831  */
hl_mmu_v1_swap_out(struct hl_ctx * ctx)832 static void hl_mmu_v1_swap_out(struct hl_ctx *ctx)
833 {
834 
835 }
836 
837 /*
838  * hl_mmu_v1_swap_in - marks all mapping of the given ctx as swapped in
839  *
840  * @ctx: pointer to the context structure
841  *
842  */
hl_mmu_v1_swap_in(struct hl_ctx * ctx)843 static void hl_mmu_v1_swap_in(struct hl_ctx *ctx)
844 {
845 
846 }
847 
get_hop_pte_addr(struct hl_ctx * ctx,struct hl_mmu_properties * mmu_prop,int hop_num,u64 hop_addr,u64 virt_addr)848 static inline u64 get_hop_pte_addr(struct hl_ctx *ctx,
849 				struct hl_mmu_properties *mmu_prop,
850 				int hop_num, u64 hop_addr, u64 virt_addr)
851 {
852 	switch (hop_num) {
853 	case 0:
854 		return get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
855 	case 1:
856 		return get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
857 	case 2:
858 		return get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
859 	case 3:
860 		return get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
861 	case 4:
862 		return get_hop4_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
863 	default:
864 		break;
865 	}
866 	return U64_MAX;
867 }
868 
hl_mmu_v1_get_tlb_info(struct hl_ctx * ctx,u64 virt_addr,struct hl_mmu_hop_info * hops)869 static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
870 				struct hl_mmu_hop_info *hops)
871 {
872 	struct hl_device *hdev = ctx->hdev;
873 	struct asic_fixed_properties *prop = &hdev->asic_prop;
874 	struct hl_mmu_properties *mmu_prop;
875 	bool is_dram_addr, is_pmmu_addr, is_pmmu_h_addr, is_huge;
876 	int i, used_hops;
877 
878 	is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
879 						prop->dmmu.start_addr,
880 						prop->dmmu.end_addr);
881 	is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size,
882 						prop->pmmu.start_addr,
883 						prop->pmmu.end_addr);
884 	is_pmmu_h_addr = hl_mem_area_inside_range(virt_addr,
885 						prop->pmmu_huge.page_size,
886 						prop->pmmu_huge.start_addr,
887 						prop->pmmu_huge.end_addr);
888 	if (is_dram_addr) {
889 		mmu_prop = &prop->dmmu;
890 		is_huge = true;
891 	} else if (is_pmmu_addr) {
892 		mmu_prop = &prop->pmmu;
893 		is_huge = false;
894 	} else if (is_pmmu_h_addr) {
895 		mmu_prop = &prop->pmmu_huge;
896 		is_huge = true;
897 	} else {
898 		return -EINVAL;
899 	}
900 
901 	used_hops = mmu_prop->num_hops;
902 
903 	/* huge pages use lesser hops */
904 	if (is_huge)
905 		used_hops--;
906 
907 	hops->hop_info[0].hop_addr = get_phys_hop0_addr(ctx);
908 	hops->hop_info[0].hop_pte_addr =
909 			get_hop_pte_addr(ctx, mmu_prop, 0,
910 					hops->hop_info[0].hop_addr, virt_addr);
911 	hops->hop_info[0].hop_pte_val =
912 			hdev->asic_funcs->read_pte(hdev,
913 						hops->hop_info[0].hop_pte_addr);
914 
915 	for (i = 1 ; i < used_hops ; i++) {
916 		hops->hop_info[i].hop_addr =
917 			get_next_hop_addr(ctx,
918 					hops->hop_info[i - 1].hop_pte_val);
919 		if (hops->hop_info[i].hop_addr == ULLONG_MAX)
920 			return -EFAULT;
921 
922 		hops->hop_info[i].hop_pte_addr =
923 				get_hop_pte_addr(ctx, mmu_prop, i,
924 						hops->hop_info[i].hop_addr,
925 						virt_addr);
926 		hops->hop_info[i].hop_pte_val =
927 				hdev->asic_funcs->read_pte(hdev,
928 						hops->hop_info[i].hop_pte_addr);
929 
930 		if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
931 			return -EFAULT;
932 
933 		if (hops->hop_info[i].hop_pte_val & LAST_MASK)
934 			break;
935 	}
936 
937 	/* if passed over all hops then no last hop was found */
938 	if (i == mmu_prop->num_hops)
939 		return -EFAULT;
940 
941 	if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
942 		return -EFAULT;
943 
944 	hops->used_hops = i + 1;
945 
946 	return 0;
947 }
948 
949 /*
950  * hl_mmu_v1_prepare - prepare mmu  for working with mmu v1
951  *
952  * @hdev: pointer to the device structure
953  */
hl_mmu_v1_set_funcs(struct hl_device * hdev,struct hl_mmu_funcs * mmu)954 void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
955 {
956 	mmu->init = hl_mmu_v1_init;
957 	mmu->fini = hl_mmu_v1_fini;
958 	mmu->ctx_init = hl_mmu_v1_ctx_init;
959 	mmu->ctx_fini = hl_mmu_v1_ctx_fini;
960 	mmu->map = _hl_mmu_v1_map;
961 	mmu->unmap = _hl_mmu_v1_unmap;
962 	mmu->flush = flush;
963 	mmu->swap_out = hl_mmu_v1_swap_out;
964 	mmu->swap_in = hl_mmu_v1_swap_in;
965 	mmu->get_tlb_info = hl_mmu_v1_get_tlb_info;
966 }
967