Lines Matching refs:vm
102 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_set_pasid() argument
107 if (vm->pasid == pasid) in amdgpu_vm_set_pasid()
110 if (vm->pasid) { in amdgpu_vm_set_pasid()
111 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); in amdgpu_vm_set_pasid()
115 vm->pasid = 0; in amdgpu_vm_set_pasid()
119 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, in amdgpu_vm_set_pasid()
124 vm->pasid = pasid; in amdgpu_vm_set_pasid()
136 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) in amdgpu_vm_eviction_lock() argument
138 mutex_lock(&vm->eviction_lock); in amdgpu_vm_eviction_lock()
139 vm->saved_flags = memalloc_noreclaim_save(); in amdgpu_vm_eviction_lock()
142 static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) in amdgpu_vm_eviction_trylock() argument
144 if (mutex_trylock(&vm->eviction_lock)) { in amdgpu_vm_eviction_trylock()
145 vm->saved_flags = memalloc_noreclaim_save(); in amdgpu_vm_eviction_trylock()
151 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) in amdgpu_vm_eviction_unlock() argument
153 memalloc_noreclaim_restore(vm->saved_flags); in amdgpu_vm_eviction_unlock()
154 mutex_unlock(&vm->eviction_lock); in amdgpu_vm_eviction_unlock()
269 struct amdgpu_vm *vm = vm_bo->vm; in amdgpu_vm_bo_evicted() local
274 list_move(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
276 list_move_tail(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
288 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_moved()
301 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); in amdgpu_vm_bo_idle()
315 spin_lock(&vm_bo->vm->invalidated_lock); in amdgpu_vm_bo_invalidated()
316 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); in amdgpu_vm_bo_invalidated()
317 spin_unlock(&vm_bo->vm->invalidated_lock); in amdgpu_vm_bo_invalidated()
331 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_relocated()
346 spin_lock(&vm_bo->vm->invalidated_lock); in amdgpu_vm_bo_done()
347 list_move(&vm_bo->vm_status, &vm_bo->vm->done); in amdgpu_vm_bo_done()
348 spin_unlock(&vm_bo->vm->invalidated_lock); in amdgpu_vm_bo_done()
362 struct amdgpu_vm *vm, in amdgpu_vm_bo_base_init() argument
365 base->vm = vm; in amdgpu_vm_bo_base_init()
375 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_base_init()
378 vm->bulk_moveable = false; in amdgpu_vm_bo_base_init()
435 struct amdgpu_vm *vm, uint64_t start, in amdgpu_vm_pt_start() argument
440 cursor->entry = &vm->root; in amdgpu_vm_pt_start()
561 struct amdgpu_vm *vm, in amdgpu_vm_pt_first_dfs() argument
568 amdgpu_vm_pt_start(adev, vm, 0, cursor); in amdgpu_vm_pt_first_dfs()
612 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \ argument
613 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
628 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, in amdgpu_vm_get_pd_bo() argument
633 entry->tv.bo = &vm->root.bo->tbo; in amdgpu_vm_get_pd_bo()
663 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_del_from_lru_notify() local
665 if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_del_from_lru_notify()
666 vm->bulk_moveable = false; in amdgpu_vm_del_from_lru_notify()
680 struct amdgpu_vm *vm) in amdgpu_vm_move_to_lru_tail() argument
684 if (vm->bulk_moveable) { in amdgpu_vm_move_to_lru_tail()
686 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
691 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); in amdgpu_vm_move_to_lru_tail()
694 list_for_each_entry(bo_base, &vm->idle, vm_status) { in amdgpu_vm_move_to_lru_tail()
702 &vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
706 &vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
710 vm->bulk_moveable = true; in amdgpu_vm_move_to_lru_tail()
726 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_validate_pt_bos() argument
733 vm->bulk_moveable &= list_empty(&vm->evicted); in amdgpu_vm_validate_pt_bos()
735 list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { in amdgpu_vm_validate_pt_bos()
751 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); in amdgpu_vm_validate_pt_bos()
756 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_validate_pt_bos()
757 vm->evicting = false; in amdgpu_vm_validate_pt_bos()
758 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_validate_pt_bos()
773 bool amdgpu_vm_ready(struct amdgpu_vm *vm) in amdgpu_vm_ready() argument
775 return list_empty(&vm->evicted); in amdgpu_vm_ready()
792 struct amdgpu_vm *vm, in amdgpu_vm_clear_bo() argument
815 if (!vm->pte_support_ats) { in amdgpu_vm_clear_bo()
828 if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >= ats_entries) { in amdgpu_vm_clear_bo()
851 r = vm->update_funcs->map_table(vmbo); in amdgpu_vm_clear_bo()
857 params.vm = vm; in amdgpu_vm_clear_bo()
860 r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); in amdgpu_vm_clear_bo()
875 r = vm->update_funcs->update(¶ms, vmbo, addr, 0, ats_entries, in amdgpu_vm_clear_bo()
898 r = vm->update_funcs->update(¶ms, vmbo, addr, 0, entries, in amdgpu_vm_clear_bo()
904 r = vm->update_funcs->commit(¶ms, NULL); in amdgpu_vm_clear_bo()
920 struct amdgpu_vm *vm, in amdgpu_vm_pt_create() argument
946 if (vm->use_cpu_for_update) in amdgpu_vm_pt_create()
951 if (vm->root.bo) in amdgpu_vm_pt_create()
952 bp.resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_pt_create()
959 if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) { in amdgpu_vm_pt_create()
1007 struct amdgpu_vm *vm, in amdgpu_vm_alloc_pts() argument
1019 r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt); in amdgpu_vm_alloc_pts()
1028 amdgpu_vm_bo_base_init(entry, vm, pt_bo); in amdgpu_vm_alloc_pts()
1029 r = amdgpu_vm_clear_bo(adev, vm, pt, immediate); in amdgpu_vm_alloc_pts()
1069 struct amdgpu_vm *vm, in amdgpu_vm_free_pts() argument
1075 vm->bulk_moveable = false; in amdgpu_vm_free_pts()
1077 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) in amdgpu_vm_free_pts()
1185 bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL)); in amdgpu_vm_flush()
1289 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, in amdgpu_vm_bo_find() argument
1295 if (base->vm != vm) in amdgpu_vm_bo_find()
1340 struct amdgpu_vm *vm, in amdgpu_vm_update_pde() argument
1354 return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt, in amdgpu_vm_update_pde()
1367 struct amdgpu_vm *vm) in amdgpu_vm_invalidate_pds() argument
1372 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) in amdgpu_vm_invalidate_pds()
1390 struct amdgpu_vm *vm, bool immediate) in amdgpu_vm_update_pdes() argument
1395 if (list_empty(&vm->relocated)) in amdgpu_vm_update_pdes()
1403 params.vm = vm; in amdgpu_vm_update_pdes()
1406 r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); in amdgpu_vm_update_pdes()
1410 while (!list_empty(&vm->relocated)) { in amdgpu_vm_update_pdes()
1413 entry = list_first_entry(&vm->relocated, in amdgpu_vm_update_pdes()
1418 r = amdgpu_vm_update_pde(¶ms, vm, entry); in amdgpu_vm_update_pdes()
1423 r = vm->update_funcs->commit(¶ms, &vm->last_update); in amdgpu_vm_update_pdes()
1430 amdgpu_vm_invalidate_pds(adev, vm); in amdgpu_vm_update_pdes()
1460 params->vm->update_funcs->update(params, pt, pe, addr, count, incr, in amdgpu_vm_update_flags()
1553 amdgpu_vm_pt_start(adev, params->vm, start, &cursor); in amdgpu_vm_update_ptes()
1563 r = amdgpu_vm_alloc_pts(params->adev, params->vm, in amdgpu_vm_update_ptes()
1626 struct amdgpu_vm *vm = params->vm; in amdgpu_vm_update_ptes() local
1638 vm->task_info.pid, in amdgpu_vm_update_ptes()
1639 vm->immediate.fence_context); in amdgpu_vm_update_ptes()
1668 amdgpu_vm_free_pts(adev, params->vm, &cursor); in amdgpu_vm_update_ptes()
1707 struct amdgpu_vm *vm, bool immediate, in amdgpu_vm_bo_update_mapping() argument
1726 params.vm = vm; in amdgpu_vm_bo_update_mapping()
1739 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_bo_update_mapping()
1740 if (vm->evicting) { in amdgpu_vm_bo_update_mapping()
1745 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { in amdgpu_vm_bo_update_mapping()
1748 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); in amdgpu_vm_bo_update_mapping()
1749 swap(vm->last_unlocked, tmp); in amdgpu_vm_bo_update_mapping()
1753 r = vm->update_funcs->prepare(¶ms, resv, sync_mode); in amdgpu_vm_bo_update_mapping()
1810 r = vm->update_funcs->commit(¶ms, fence); in amdgpu_vm_bo_update_mapping()
1816 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_bo_update_mapping()
1821 void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, in amdgpu_vm_get_memory() argument
1826 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { in amdgpu_vm_get_memory()
1832 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { in amdgpu_vm_get_memory()
1838 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { in amdgpu_vm_get_memory()
1844 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_vm_get_memory()
1850 spin_lock(&vm->invalidated_lock); in amdgpu_vm_get_memory()
1851 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { in amdgpu_vm_get_memory()
1857 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { in amdgpu_vm_get_memory()
1863 spin_unlock(&vm->invalidated_lock); in amdgpu_vm_get_memory()
1882 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_update() local
1894 resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_bo_update()
1925 vm->root.bo->tbo.base.resv)) in amdgpu_vm_bo_update()
1926 last_update = &vm->last_update; in amdgpu_vm_bo_update()
1954 r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, in amdgpu_vm_bo_update()
1967 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { in amdgpu_vm_bo_update()
2085 struct amdgpu_vm *vm, in amdgpu_vm_free_mapping() argument
2102 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_prt_fini() argument
2104 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_prt_fini()
2147 struct amdgpu_vm *vm, in amdgpu_vm_clear_freed() argument
2150 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_clear_freed()
2156 while (!list_empty(&vm->freed)) { in amdgpu_vm_clear_freed()
2157 mapping = list_first_entry(&vm->freed, in amdgpu_vm_clear_freed()
2161 if (vm->pte_support_ats && in amdgpu_vm_clear_freed()
2165 r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false, in amdgpu_vm_clear_freed()
2169 amdgpu_vm_free_mapping(adev, vm, mapping, f); in amdgpu_vm_clear_freed()
2201 struct amdgpu_vm *vm) in amdgpu_vm_handle_moved() argument
2208 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_vm_handle_moved()
2215 spin_lock(&vm->invalidated_lock); in amdgpu_vm_handle_moved()
2216 while (!list_empty(&vm->invalidated)) { in amdgpu_vm_handle_moved()
2217 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
2220 spin_unlock(&vm->invalidated_lock); in amdgpu_vm_handle_moved()
2235 spin_lock(&vm->invalidated_lock); in amdgpu_vm_handle_moved()
2237 spin_unlock(&vm->invalidated_lock); in amdgpu_vm_handle_moved()
2258 struct amdgpu_vm *vm, in amdgpu_vm_bo_add() argument
2267 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); in amdgpu_vm_bo_add()
2299 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_insert_map() local
2304 amdgpu_vm_it_insert(mapping, &vm->va); in amdgpu_vm_bo_insert_map()
2309 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_insert_map()
2311 list_move(&bo_va->base.vm_status, &vm->moved); in amdgpu_vm_bo_insert_map()
2340 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_map() local
2358 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_map()
2426 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); in amdgpu_vm_bo_replace_map()
2464 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_unmap() local
2487 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_unmap()
2492 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_unmap()
2494 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_unmap()
2514 struct amdgpu_vm *vm, in amdgpu_vm_bo_clear_mappings() argument
2539 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_clear_mappings()
2570 amdgpu_vm_it_remove(tmp, &vm->va); in amdgpu_vm_bo_clear_mappings()
2579 list_add(&tmp->list, &vm->freed); in amdgpu_vm_bo_clear_mappings()
2585 amdgpu_vm_it_insert(before, &vm->va); in amdgpu_vm_bo_clear_mappings()
2594 amdgpu_vm_it_insert(after, &vm->va); in amdgpu_vm_bo_clear_mappings()
2616 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, in amdgpu_vm_bo_lookup_mapping() argument
2619 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); in amdgpu_vm_bo_lookup_mapping()
2630 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) in amdgpu_vm_bo_trace_cs() argument
2637 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; in amdgpu_vm_bo_trace_cs()
2667 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_rmv() local
2671 if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_rmv()
2672 vm->bulk_moveable = false; in amdgpu_vm_bo_rmv()
2684 spin_lock(&vm->invalidated_lock); in amdgpu_vm_bo_rmv()
2686 spin_unlock(&vm->invalidated_lock); in amdgpu_vm_bo_rmv()
2690 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_rmv()
2693 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_rmv()
2697 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_rmv()
2698 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_rmv()
2722 if (!bo_base || !bo_base->vm) in amdgpu_vm_evictable()
2730 if (!amdgpu_vm_eviction_trylock(bo_base->vm)) in amdgpu_vm_evictable()
2734 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) { in amdgpu_vm_evictable()
2735 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
2739 bo_base->vm->evicting = true; in amdgpu_vm_evictable()
2740 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
2763 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_bo_invalidate() local
2765 if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { in amdgpu_vm_bo_invalidate()
2776 else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_invalidate()
2904 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) in amdgpu_vm_wait_idle() argument
2906 timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true, in amdgpu_vm_wait_idle()
2911 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); in amdgpu_vm_wait_idle()
2925 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_init() argument
2931 vm->va = RB_ROOT_CACHED; in amdgpu_vm_init()
2933 vm->reserved_vmid[i] = NULL; in amdgpu_vm_init()
2934 INIT_LIST_HEAD(&vm->evicted); in amdgpu_vm_init()
2935 INIT_LIST_HEAD(&vm->relocated); in amdgpu_vm_init()
2936 INIT_LIST_HEAD(&vm->moved); in amdgpu_vm_init()
2937 INIT_LIST_HEAD(&vm->idle); in amdgpu_vm_init()
2938 INIT_LIST_HEAD(&vm->invalidated); in amdgpu_vm_init()
2939 spin_lock_init(&vm->invalidated_lock); in amdgpu_vm_init()
2940 INIT_LIST_HEAD(&vm->freed); in amdgpu_vm_init()
2941 INIT_LIST_HEAD(&vm->done); in amdgpu_vm_init()
2944 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init()
2950 r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init()
2956 vm->pte_support_ats = false; in amdgpu_vm_init()
2957 vm->is_compute_context = false; in amdgpu_vm_init()
2959 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_init()
2963 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_init()
2964 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_init()
2968 if (vm->use_cpu_for_update) in amdgpu_vm_init()
2969 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_init()
2971 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_init()
2972 vm->last_update = NULL; in amdgpu_vm_init()
2973 vm->last_unlocked = dma_fence_get_stub(); in amdgpu_vm_init()
2975 mutex_init(&vm->eviction_lock); in amdgpu_vm_init()
2976 vm->evicting = false; in amdgpu_vm_init()
2978 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, in amdgpu_vm_init()
2991 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); in amdgpu_vm_init()
2993 r = amdgpu_vm_clear_bo(adev, vm, root, false); in amdgpu_vm_init()
2997 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2999 INIT_KFIFO(vm->faults); in amdgpu_vm_init()
3004 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
3009 vm->root.bo = NULL; in amdgpu_vm_init()
3012 dma_fence_put(vm->last_unlocked); in amdgpu_vm_init()
3013 drm_sched_entity_destroy(&vm->delayed); in amdgpu_vm_init()
3016 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_init()
3035 struct amdgpu_vm *vm) in amdgpu_vm_check_clean_reserved() argument
3042 if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo) in amdgpu_vm_check_clean_reserved()
3068 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_make_compute() argument
3073 r = amdgpu_bo_reserve(vm->root.bo, true); in amdgpu_vm_make_compute()
3078 r = amdgpu_vm_check_clean_reserved(adev, vm); in amdgpu_vm_make_compute()
3085 if (pte_support_ats != vm->pte_support_ats) { in amdgpu_vm_make_compute()
3086 vm->pte_support_ats = pte_support_ats; in amdgpu_vm_make_compute()
3087 r = amdgpu_vm_clear_bo(adev, vm, in amdgpu_vm_make_compute()
3088 to_amdgpu_bo_vm(vm->root.bo), in amdgpu_vm_make_compute()
3095 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_make_compute()
3098 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_make_compute()
3099 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_make_compute()
3103 if (vm->use_cpu_for_update) { in amdgpu_vm_make_compute()
3105 r = amdgpu_bo_sync_wait(vm->root.bo, in amdgpu_vm_make_compute()
3110 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_make_compute()
3112 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_make_compute()
3114 dma_fence_put(vm->last_update); in amdgpu_vm_make_compute()
3115 vm->last_update = NULL; in amdgpu_vm_make_compute()
3116 vm->is_compute_context = true; in amdgpu_vm_make_compute()
3119 amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); in amdgpu_vm_make_compute()
3124 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_make_compute()
3136 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_release_compute() argument
3138 amdgpu_vm_set_pasid(adev, vm, 0); in amdgpu_vm_release_compute()
3139 vm->is_compute_context = false; in amdgpu_vm_release_compute()
3151 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_fini() argument
3158 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); in amdgpu_vm_fini()
3160 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_fini()
3162 amdgpu_vm_set_pasid(adev, vm, 0); in amdgpu_vm_fini()
3163 dma_fence_wait(vm->last_unlocked, false); in amdgpu_vm_fini()
3164 dma_fence_put(vm->last_unlocked); in amdgpu_vm_fini()
3166 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { in amdgpu_vm_fini()
3168 amdgpu_vm_prt_fini(adev, vm); in amdgpu_vm_fini()
3173 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); in amdgpu_vm_fini()
3176 amdgpu_vm_free_pts(adev, vm, NULL); in amdgpu_vm_fini()
3179 WARN_ON(vm->root.bo); in amdgpu_vm_fini()
3181 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_fini()
3182 drm_sched_entity_destroy(&vm->delayed); in amdgpu_vm_fini()
3184 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { in amdgpu_vm_fini()
3188 &vm->va.rb_root, rb) { in amdgpu_vm_fini()
3196 dma_fence_put(vm->last_update); in amdgpu_vm_fini()
3198 amdgpu_vmid_free_reserved(adev, vm, i); in amdgpu_vm_fini()
3283 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, in amdgpu_vm_ioctl()
3295 r = amdgpu_bo_reserve(fpriv->vm.root.bo, true); in amdgpu_vm_ioctl()
3299 r = amdgpu_vm_wait_idle(&fpriv->vm, timeout); in amdgpu_vm_ioctl()
3303 amdgpu_bo_unreserve(fpriv->vm.root.bo); in amdgpu_vm_ioctl()
3304 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); in amdgpu_vm_ioctl()
3323 struct amdgpu_vm *vm; in amdgpu_vm_get_task_info() local
3328 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_get_task_info()
3329 if (vm) in amdgpu_vm_get_task_info()
3330 *task_info = vm->task_info; in amdgpu_vm_get_task_info()
3340 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) in amdgpu_vm_set_task_info() argument
3342 if (vm->task_info.pid) in amdgpu_vm_set_task_info()
3345 vm->task_info.pid = current->pid; in amdgpu_vm_set_task_info()
3346 get_task_comm(vm->task_info.task_name, current); in amdgpu_vm_set_task_info()
3351 vm->task_info.tgid = current->group_leader->pid; in amdgpu_vm_set_task_info()
3352 get_task_comm(vm->task_info.process_name, current->group_leader); in amdgpu_vm_set_task_info()
3372 struct amdgpu_vm *vm; in amdgpu_vm_handle_fault() local
3376 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
3377 if (vm) { in amdgpu_vm_handle_fault()
3378 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_handle_fault()
3379 is_compute_context = vm->is_compute_context; in amdgpu_vm_handle_fault()
3402 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
3403 if (vm && vm->root.bo != root) in amdgpu_vm_handle_fault()
3404 vm = NULL; in amdgpu_vm_handle_fault()
3406 if (!vm) in amdgpu_vm_handle_fault()
3436 r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr, in amdgpu_vm_handle_fault()
3442 r = amdgpu_vm_update_pdes(adev, vm, true); in amdgpu_vm_handle_fault()
3464 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) in amdgpu_debugfs_vm_bo_info() argument
3482 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3491 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3500 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3509 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3518 spin_lock(&vm->invalidated_lock); in amdgpu_debugfs_vm_bo_info()
3519 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3528 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3533 spin_unlock(&vm->invalidated_lock); in amdgpu_debugfs_vm_bo_info()