/linux/drivers/staging/media/tegra-vde/ |
A D | iommu.c | 24 struct iova *iova; in tegra_vde_iommu_map() local 34 if (!iova) in tegra_vde_iommu_map() 37 addr = iova_dma_addr(&vde->iova, iova); in tegra_vde_iommu_map() 42 __free_iova(&vde->iova, iova); in tegra_vde_iommu_map() 46 *iovap = iova; in tegra_vde_iommu_map() 55 dma_addr_t addr = iova_dma_addr(&vde->iova, iova); in tegra_vde_iommu_unmap() 58 __free_iova(&vde->iova, iova); in tegra_vde_iommu_unmap() 64 struct iova *iova; in tegra_vde_iommu_init() local 103 iova = reserve_iova(&vde->iova, 0x60000000 >> shift, in tegra_vde_iommu_init() 105 if (!iova) { in tegra_vde_iommu_init() [all …]
|
A D | dmabuf-cache.c | 28 struct iova *iova; member 39 tegra_vde_iommu_unmap(entry->vde, entry->iova); in tegra_vde_release_entry() 73 struct iova *iova; in tegra_vde_dmabuf_cache_map() local 91 *addrp = iova_dma_addr(&vde->iova, entry->iova); in tegra_vde_dmabuf_cache_map() 125 err = tegra_vde_iommu_map(vde, sgt, &iova, dmabuf->size); in tegra_vde_dmabuf_cache_map() 129 *addrp = iova_dma_addr(&vde->iova, iova); in tegra_vde_dmabuf_cache_map() 132 iova = NULL; in tegra_vde_dmabuf_cache_map() 139 entry->iova = iova; in tegra_vde_dmabuf_cache_map()
|
/linux/drivers/iommu/ |
A D | iova.c | 303 static void free_iova_mem(struct iova *iova) in free_iova_mem() argument 400 struct iova *iova = to_iova(node); in private_find_iova() local 430 struct iova *iova; in find_iova() local 469 struct iova *iova; in free_iova() local 687 struct iova *iova, *tmp; in put_iova_domain() local 703 struct iova *iova = to_iova(node); in __is_range_overlap() local 713 struct iova *iova; in alloc_and_init_iova() local 728 struct iova *iova; in __insert_new_range() local 731 if (iova) in __insert_new_range() 738 __adjust_overlap_range(struct iova *iova, in __adjust_overlap_range() argument [all …]
|
A D | tegra-gart.c | 62 iova < gart->iovmm_end; \ 63 iova += GART_PAGE_SIZE) 73 unsigned long iova) in gart_read_pte() argument 85 unsigned long iova; in do_gart_setup() local 87 for_each_gart_pte(gart, iova) in do_gart_setup() 197 unsigned long iova) in __gart_iommu_unmap() argument 204 gart_set_pte(gart, iova, 0); in __gart_iommu_unmap() 226 dma_addr_t iova) in gart_iommu_iova_to_phys() argument 235 pte = gart_read_pte(gart, iova); in gart_iommu_iova_to_phys() 299 unsigned long iova; in tegra_gart_suspend() local [all …]
|
A D | io-pgtable-arm-v7s.c | 543 iova += pgsize; in arm_v7s_map_pages() 739 iova += pgsize; in arm_v7s_unmap_pages() 943 iova = 0; in arm_v7s_do_selftests() 946 if (ops->map(ops, iova, iova, size, IOMMU_READ | in arm_v7s_do_selftests() 953 if (!ops->map(ops, iova, iova + size, size, in arm_v7s_do_selftests() 957 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) in arm_v7s_do_selftests() 960 iova += SZ_16M; in arm_v7s_do_selftests() 983 iova = 0; in arm_v7s_do_selftests() 994 if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL)) in arm_v7s_do_selftests() 997 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) in arm_v7s_do_selftests() [all …]
|
A D | exynos-iommu.c | 97 #define section_offs(iova) (iova & (SECT_SIZE - 1)) argument 99 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1)) argument 101 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1)) argument 108 return iova >> SECT_ORDER; in lv1ent_offset() 322 iova += SPAGE_SIZE; in __sysmmu_tlb_invalidate_entry() 329 writel((iova & SPAGE_MASK), in __sysmmu_tlb_invalidate_entry() 513 sysmmu_iova_t iova) in sysmmu_tlb_invalidate_flpdcache() argument 967 iova); in lv1set_section() 974 iova); in lv1set_section() 1151 ent = page_entry(ent, iova); in exynos_iommu_unmap() [all …]
|
A D | io-pgtable-arm.c | 704 unsigned long iova) in arm_lpae_iova_to_phys() argument 1241 unsigned long iova; in arm_lpae_run_tests() local 1271 iova = 0; in arm_lpae_run_tests() 1275 if (ops->map(ops, iova, iova, size, IOMMU_READ | in arm_lpae_run_tests() 1282 if (!ops->map(ops, iova, iova + size, size, in arm_lpae_run_tests() 1286 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) in arm_lpae_run_tests() 1289 iova += SZ_1G; in arm_lpae_run_tests() 1305 iova = 0; in arm_lpae_run_tests() 1316 if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL)) in arm_lpae_run_tests() 1319 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) in arm_lpae_run_tests() [all …]
|
A D | tegra-smmu.c | 223 unsigned long iova) in smmu_flush_tlb_section() argument 238 unsigned long iova) in smmu_flush_tlb_group() argument 560 return pt + iova_pt_index(iova); in tegra_smmu_pte_offset() 584 unsigned int pde = iova_pd_index(iova); in as_get_pte() 628 unsigned int pde = iova_pd_index(iova); in tegra_smmu_pte_put_use() 640 tegra_smmu_set_pde(as, iova, 0); in tegra_smmu_pte_put_use() 664 unsigned long iova, gfp_t gfp, in as_get_pde_page() argument 667 unsigned int pde = iova_pd_index(iova); in as_get_pde_page() 723 tegra_smmu_pte_get_use(as, iova); in __tegra_smmu_map() 752 tegra_smmu_pte_put_use(as, iova); in __tegra_smmu_unmap() [all …]
|
A D | rockchip-iommu.c | 369 dma_addr_t iova; in rk_iommu_zap_lines() local 371 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE) in rk_iommu_zap_lines() 576 dte_index = rk_iova_dte_index(iova); in log_iova() 577 pte_index = rk_iova_pte_index(iova); in log_iova() 614 dma_addr_t iova; in rk_iommu_irq() local 641 &iova, in rk_iommu_irq() 644 log_iova(iommu, i, iova); in rk_iommu_irq() 679 dma_addr_t iova) in rk_iommu_iova_to_phys() argument 736 dma_addr_t iova, size_t size) in rk_iommu_zap_iova_first_last() argument 745 dma_addr_t iova) in rk_dte_get_page_table() argument [all …]
|
A D | sun50i-iommu.c | 164 return FIELD_GET(SUN50I_IOVA_DTE_MASK, iova); in sun50i_iova_get_dte_index() 169 return FIELD_GET(SUN50I_IOVA_PTE_MASK, iova); in sun50i_iova_get_pte_index() 482 dma_addr_t iova, gfp_t gfp) in sun50i_dte_get_page_table() argument 541 &iova, &page_phys, &paddr, prot); in sun50i_iommu_map() 578 dma_addr_t iova) in sun50i_iommu_iova_to_phys() argument 596 sun50i_iova_get_page_offset(iova); in sun50i_iommu_iova_to_phys() 795 phys_addr_t iova; in sun50i_iommu_handle_pt_irq() local 801 iova = iommu_read(iommu, addr_reg); in sun50i_iommu_handle_pt_irq() 812 return iova; in sun50i_iommu_handle_pt_irq() 818 phys_addr_t iova; in sun50i_iommu_handle_perm_irq() local [all …]
|
A D | virtio-iommu.c | 59 struct interval_tree_node iova; member 324 mapping->iova.start = iova; in viommu_add_mapping() 325 mapping->iova.last = iova + size - 1; in viommu_add_mapping() 346 unsigned long iova, size_t size) in viommu_del_mappings() argument 362 if (mapping->iova.start < iova) in viommu_del_mappings() 369 unmapped += mapping->iova.last - mapping->iova.start + 1; in viommu_del_mappings() 740 .virt_start = cpu_to_le64(iova), in viommu_map() 775 .virt_start = cpu_to_le64(iova), in viommu_unmap() 784 dma_addr_t iova) in viommu_iova_to_phys() argument 793 node = interval_tree_iter_first(&vdomain->mappings, iova, iova); in viommu_iova_to_phys() [all …]
|
A D | dma-iommu.c | 31 dma_addr_t iova; member 464 if (!iova) in iommu_dma_alloc_iova() 518 dma_addr_t iova; in __iommu_dma_map() local 527 if (!iova) in __iommu_dma_map() 614 dma_addr_t iova; in __iommu_dma_alloc_noncontiguous() local 638 if (!iova) in __iommu_dma_alloc_noncontiguous() 838 return iova; in iommu_dma_map_page() 982 dma_addr_t iova; in iommu_dma_map_sg() local 1040 if (!iova) { in iommu_dma_map_sg() 1354 if (!iova) in iommu_dma_get_msi_page() [all …]
|
/linux/drivers/fpga/ |
A D | dfl-afu-dma-region.c | 125 u64 iova, u64 size) in dma_region_check_iova() argument 127 if (!size && region->iova != iova) in dma_region_check_iova() 130 return (region->iova <= iova) && in dma_region_check_iova() 131 (region->length + region->iova >= iova + size); in dma_region_check_iova() 150 (unsigned long long)region->iova); in afu_dma_region_add() 164 if (region->iova < this->iova) in afu_dma_region_add() 166 else if (region->iova > this->iova) in afu_dma_region_add() 217 if (region->iova) in afu_dma_region_destroy() 262 if (iova < region->iova) in afu_dma_region_find() 264 else if (iova > region->iova) in afu_dma_region_find() [all …]
|
/linux/include/linux/ |
A D | iova.h | 19 struct iova { struct 85 struct iova anchor; /* rbtree lookup anchor */ 101 static inline unsigned long iova_size(struct iova *iova) in iova_size() argument 103 return iova->pfn_hi - iova->pfn_lo + 1; in iova_size() 118 return iova & iova_mask(iovad); in iova_offset() 126 static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) in iova_dma_addr() argument 128 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); in iova_dma_addr() 133 return iova >> iova_shift(iovad); in iova_pfn() 141 void __free_iova(struct iova_domain *iovad, struct iova *iova); 174 static inline void __free_iova(struct iova_domain *iovad, struct iova *iova) in __free_iova() argument [all …]
|
A D | iommu.h | 442 unsigned long iova, size_t size, 509 unsigned long iova, int flags); 539 unsigned long iova, size_t size) in iommu_iotlb_gather_is_disjoint() argument 559 unsigned long iova, size_t size) in iommu_iotlb_gather_add_range() argument 561 unsigned long end = iova + size - 1; in iommu_iotlb_gather_add_range() 563 if (gather->start > iova) in iommu_iotlb_gather_add_range() 564 gather->start = iova; in iommu_iotlb_gather_add_range() 582 unsigned long iova, size_t size) in iommu_iotlb_gather_add_page() argument 747 unsigned long iova, size_t size) in iommu_unmap() argument 753 unsigned long iova, int gfp_order, in iommu_unmap_fast() argument [all …]
|
A D | io-pgtable.h | 41 void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule, 44 unsigned long iova, size_t granule, void *cookie); 156 int (*map)(struct io_pgtable_ops *ops, unsigned long iova, 158 int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova, 161 size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, 163 size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova, 167 unsigned long iova); 223 io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova, in io_pgtable_tlb_flush_walk() argument 227 iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); in io_pgtable_tlb_flush_walk() 232 struct iommu_iotlb_gather * gather, unsigned long iova, in io_pgtable_tlb_add_page() argument [all …]
|
/linux/include/trace/events/ |
A D | iommu.h | 90 TP_ARGS(iova, paddr, size), 93 __field(u64, iova) 99 __entry->iova = iova; 113 TP_ARGS(iova, size, unmapped_size), 116 __field(u64, iova) 122 __entry->iova = iova; 136 TP_ARGS(dev, iova, flags), 141 __field(u64, iova) 148 __entry->iova = iova; 154 __entry->iova, __entry->flags [all …]
|
/linux/drivers/vfio/ |
A D | vfio_iommu_type1.c | 334 if (iova < vpfn->iova) in vfio_find_vpfn() 336 else if (iova > vpfn->iova) in vfio_find_vpfn() 355 if (new->iova < vpfn->iova) in vfio_link_pfn() 379 vpfn->iova = iova; in vfio_add_to_pfn_list() 1035 entry->iova = *iova; in unmap_unpin_fast() 1079 dma_addr_t iova = dma->iova, end = dma->iova + dma->size; in vfio_unmap_unpin() local 1260 if (dma->iova < iova) in vfio_iova_dirty_bitmap() 1418 nb_unmap.iova = dma->iova; in vfio_dma_do_unmap() 1627 dma->iova = iova; in vfio_dma_do_map() 1721 iova = dma->iova; in vfio_iommu_replay() [all …]
|
/linux/drivers/staging/media/ipu3/ |
A D | ipu3-dmamap.c | 102 struct iova *iova; in imgu_dmamap_alloc() local 109 if (!iova) in imgu_dmamap_alloc() 146 __free_iova(&imgu->iova_domain, iova); in imgu_dmamap_alloc() 153 struct iova *iova; in imgu_dmamap_unmap() local 155 iova = find_iova(&imgu->iova_domain, in imgu_dmamap_unmap() 157 if (WARN_ON(!iova)) in imgu_dmamap_unmap() 163 __free_iova(&imgu->iova_domain, iova); in imgu_dmamap_unmap() 189 struct iova *iova; in imgu_dmamap_map_sg() local 209 if (!iova) in imgu_dmamap_map_sg() 213 iova->pfn_lo, iova->pfn_hi); in imgu_dmamap_map_sg() [all …]
|
A D | ipu3-mmu.c | 157 iova >>= IPU3_PAGE_SHIFT; in address_to_pte_idx() 160 *l2pt_idx = iova & IPU3_L2PT_MASK; in address_to_pte_idx() 162 iova >>= IPU3_L2PT_SHIFT; in address_to_pte_idx() 165 *l1pt_idx = iova & IPU3_L1PT_MASK; in address_to_pte_idx() 264 iova, &paddr, size); in imgu_mmu_map() 269 iova, &paddr, size); in imgu_mmu_map() 278 iova += IPU3_PAGE_SIZE; in imgu_mmu_map() 333 imgu_mmu_unmap(info, iova, mapped); in imgu_mmu_map_sg() 392 iova, size); in imgu_mmu_unmap() 408 iova, unmapped_page); in imgu_mmu_unmap() [all …]
|
/linux/drivers/vdpa/vdpa_user/ |
A D | iova_domain.c | 109 while (iova <= last) { in vduse_domain_map_bounce_page() 118 iova += PAGE_SIZE; in vduse_domain_map_bounce_page() 129 while (iova <= last) { in vduse_domain_unmap_bounce_page() 132 iova += PAGE_SIZE; in vduse_domain_unmap_bounce_page() 185 iova += sz; in vduse_domain_bounce() 327 if (!iova) in vduse_domain_map_page() 339 return iova; in vduse_domain_map_page() 367 if (!iova || !orig) in vduse_domain_alloc_coherent() 371 if (vduse_iotlb_add_range(domain, (u64)iova, (u64)iova + size - 1, in vduse_domain_alloc_coherent() 379 *dma_addr = iova; in vduse_domain_alloc_coherent() [all …]
|
/linux/drivers/gpu/drm/etnaviv/ |
A D | etnaviv_mmu.c | 17 unsigned long iova, size_t size) in etnaviv_context_unmap() argument 24 iova, size, pgsize); in etnaviv_context_unmap() 34 iova += unmapped_page; in etnaviv_context_unmap() 43 unsigned long orig_iova = iova; in etnaviv_context_map() 60 iova += pgsize; in etnaviv_context_map() 75 unsigned int da = iova; in etnaviv_iommu_map() 98 etnaviv_context_unmap(context, iova, da - iova); in etnaviv_iommu_map() 106 unsigned int da = iova; in etnaviv_iommu_unmap() 243 u32 iova; in etnaviv_iommu_map_gem() local 247 mapping->iova = iova; in etnaviv_iommu_map_gem() [all …]
|
/linux/drivers/gpu/drm/msm/ |
A D | msm_iommu.c | 41 unmapped += ops->unmap(ops, iova, 4096, NULL); in msm_iommu_pagetable_unmap() 42 iova += 4096; in msm_iommu_pagetable_unmap() 58 u64 addr = iova; in msm_iommu_pagetable_map() 68 msm_iommu_pagetable_unmap(mmu, iova, mapped); in msm_iommu_pagetable_map() 146 unsigned long iova, int flags, void *arg); 224 unsigned long iova, int flags, void *arg) in msm_fault_handler() argument 263 if (iova & BIT_ULL(48)) in msm_iommu_map() 264 iova |= GENMASK_ULL(63, 49); in msm_iommu_map() 276 if (iova & BIT_ULL(48)) in msm_iommu_unmap() 277 iova |= GENMASK_ULL(63, 49); in msm_iommu_unmap() [all …]
|
A D | msm_gem_vma.c | 55 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); in msm_gem_purge_vma() 64 if (!WARN_ON(!vma->iova)) in msm_gem_unmap_vma() 76 if (WARN_ON(!vma->iova)) in msm_gem_map_vma() 88 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, in msm_gem_map_vma() 107 if (vma->iova) in msm_gem_close_vma() 111 vma->iova = 0; in msm_gem_close_vma() 123 if (WARN_ON(vma->iova)) in msm_gem_init_vma() 134 vma->iova = vma->node.start << PAGE_SHIFT; in msm_gem_init_vma()
|
/linux/drivers/infiniband/sw/rxe/ |
A D | rxe_mr.c | 35 if (iova < set->iova || length > set->length || in mr_check_range() 36 iova > set->iova + set->length - length) in mr_check_range() 234 set->iova = iova; in rxe_mr_init_user() 272 size_t offset = iova - set->iova + set->offset; in lookup_iova() 321 addr = (void *)(uintptr_t)iova; in iova_to_vaddr() 325 if (mr_check_range(mr, iova, length)) { in iova_to_vaddr() 331 lookup_iova(mr, iova, &m, &n, &offset); in iova_to_vaddr() 377 err = mr_check_range(mr, iova, length); in rxe_mr_copy() 438 u64 iova; in copy_data() local 490 iova = sge->addr + offset; in copy_data() [all …]
|