Home
last modified time | relevance | path

Searched refs:granule (Results 1 – 25 of 27) sorted by relevance

12

/linux/tools/testing/selftests/dma/
A Ddma_map_benchmark.c43 __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ member
56 int granule = 1; in main() local
82 granule = atoi(optarg); in main()
119 if (granule < 1 || granule > 1024) { in main()
137 map.granule = granule; in main()
145 threads, seconds, node, dir[directions], granule); in main()
/linux/include/linux/
A Diova.h73 unsigned long granule; /* pfn granularity for this domain */ member
108 return __ffs(iovad->granule); in iova_shift()
113 return iovad->granule - 1; in iova_mask()
123 return ALIGN(size, iovad->granule); in iova_align()
154 void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
214 unsigned long granule, in init_iova_domain() argument
A Dio-pgtable.h41 void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
44 unsigned long iova, size_t granule, void *cookie);
224 size_t size, size_t granule) in io_pgtable_tlb_flush_walk() argument
227 iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); in io_pgtable_tlb_flush_walk()
233 size_t granule) in io_pgtable_tlb_add_page() argument
236 iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie); in io_pgtable_tlb_add_page()
/linux/drivers/iommu/arm/arm-smmu/
A Dqcom_iommu.c158 size_t granule, bool leaf, void *cookie) in qcom_iommu_tlb_inv_range_nosync() argument
174 iova += granule; in qcom_iommu_tlb_inv_range_nosync()
175 } while (s -= granule); in qcom_iommu_tlb_inv_range_nosync()
180 size_t granule, void *cookie) in qcom_iommu_tlb_flush_walk() argument
182 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie); in qcom_iommu_tlb_flush_walk()
187 unsigned long iova, size_t granule, in qcom_iommu_tlb_add_page() argument
190 qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); in qcom_iommu_tlb_add_page()
A Darm-smmu.c295 iova += granule; in arm_smmu_tlb_inv_range_s1()
296 } while (size -= granule); in arm_smmu_tlb_inv_range_s1()
302 iova += granule >> 12; in arm_smmu_tlb_inv_range_s1()
303 } while (size -= granule); in arm_smmu_tlb_inv_range_s1()
323 iova += granule >> 12; in arm_smmu_tlb_inv_range_s2()
324 } while (size -= granule); in arm_smmu_tlb_inv_range_s2()
328 size_t granule, void *cookie) in arm_smmu_tlb_inv_walk_s1() argument
346 arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie, in arm_smmu_tlb_add_page_s1()
351 size_t granule, void *cookie) in arm_smmu_tlb_inv_walk_s2() argument
362 arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie, in arm_smmu_tlb_add_page_s2()
[all …]
/linux/kernel/dma/
A Dmap_benchmark.c41 __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ member
62 int npages = map->bparam.granule; in map_benchmark_thread()
239 if (map->bparam.granule < 1 || map->bparam.granule > 1024) { in map_benchmark_ioctl()
/linux/drivers/iommu/
A Dmsm_iommu.c139 size_t granule, bool leaf, void *cookie) in __flush_iotlb_range() argument
159 iova += granule; in __flush_iotlb_range()
160 } while (temp_size -= granule); in __flush_iotlb_range()
171 size_t granule, void *cookie) in __flush_iotlb_walk() argument
173 __flush_iotlb_range(iova, size, granule, false, cookie); in __flush_iotlb_walk()
177 unsigned long iova, size_t granule, void *cookie) in __flush_iotlb_page() argument
179 __flush_iotlb_range(iova, granule, granule, true, cookie); in __flush_iotlb_page()
A Dio-pgtable-arm.c741 unsigned long granule, page_sizes; in arm_lpae_restrict_pgsizes() local
752 granule = PAGE_SIZE; in arm_lpae_restrict_pgsizes()
754 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); in arm_lpae_restrict_pgsizes()
756 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); in arm_lpae_restrict_pgsizes()
758 granule = 0; in arm_lpae_restrict_pgsizes()
760 switch (granule) { in arm_lpae_restrict_pgsizes()
1195 size_t granule, void *cookie) in dummy_tlb_flush() argument
1202 unsigned long iova, size_t granule, in dummy_tlb_add_page() argument
1205 dummy_tlb_flush(iova, granule, granule, cookie); in dummy_tlb_add_page()
A Diova.c48 init_iova_domain(struct iova_domain *iovad, unsigned long granule, in init_iova_domain() argument
56 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule)); in init_iova_domain()
62 iovad->granule = granule; in init_iova_domain()
A Dio-pgtable-arm-v7s.c878 size_t granule, void *cookie) in dummy_tlb_flush() argument
885 unsigned long iova, size_t granule, in dummy_tlb_add_page() argument
888 dummy_tlb_flush(iova, granule, granule, cookie); in dummy_tlb_add_page()
A Ddma-iommu.c82 return cookie->iovad.granule; in cookie_msi_granule()
159 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) in iommu_put_dma_cookie()
208 start += iovad->granule; in cookie_init_hw_msi_region()
384 if (1UL << order != iovad->granule || in iommu_dma_init_domain()
A Dipmmu-vmsa.c322 size_t granule, void *cookie) in ipmmu_tlb_flush() argument
A Dmtk_iommu.c227 size_t granule, in mtk_iommu_tlb_flush_range_sync() argument
/linux/arch/arm64/kvm/hyp/
A Dpgtable.c69 u64 granule = kvm_granule_size(level); in kvm_block_mapping_supported() local
74 if (granule > (end - addr)) in kvm_block_mapping_supported()
77 if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule)) in kvm_block_mapping_supported()
80 return IS_ALIGNED(addr, granule); in kvm_block_mapping_supported()
405 u64 granule = kvm_granule_size(level), phys = data->phys; in hyp_map_walker_try_leaf() local
414 data->phys += granule; in hyp_map_walker_try_leaf()
663 u64 granule = kvm_granule_size(level), phys = data->phys; in stage2_map_walker_try_leaf() local
691 granule); in stage2_map_walker_try_leaf()
694 mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule); in stage2_map_walker_try_leaf()
700 data->phys += granule; in stage2_map_walker_try_leaf()
/linux/Documentation/ia64/
A Daliasing.rst72 identity mapping only when the entire granule supports cacheable
75 Therefore, kern_memmap contains only full granule-sized regions that
162 If the EFI memory map reports that the entire granule supports
166 If the granule contains non-WB memory, but we can cover the
200 which uses a granule-sized UC mapping. This granule will cover some
236 at 0xA0000 prevents use of a WB granule. The UC mapping causes
/linux/arch/ia64/
A DKconfig.debug12 Select "16MB" for a small granule size.
13 Select "64MB" for a large granule size. This is the current default.
/linux/arch/arm64/kvm/hyp/nvhe/
A Dmem_protect.c281 u64 granule = kvm_granule_size(level); in host_stage2_adjust_range() local
282 cur.start = ALIGN_DOWN(addr, granule); in host_stage2_adjust_range()
283 cur.end = cur.start + granule; in host_stage2_adjust_range()
/linux/drivers/gpu/drm/msm/
A Dmsm_iommu.c130 size_t granule, void *cookie) in msm_iommu_tlb_flush_walk() argument
135 unsigned long iova, size_t granule, void *cookie) in msm_iommu_tlb_add_page() argument
/linux/drivers/iommu/amd/
A Dio_pgtable.c31 size_t granule, void *cookie) in v1_tlb_flush_walk() argument
36 unsigned long iova, size_t granule, in v1_tlb_add_page() argument
/linux/arch/powerpc/boot/dts/
A Dmicrowatt.dts70 reservation-granule-size = <64>;
/linux/drivers/iommu/arm/arm-smmu-v3/
A Darm-smmu-v3.c1860 size_t granule, in __arm_smmu_tlb_inv_range() argument
1865 size_t inv_range = granule; in __arm_smmu_tlb_inv_range()
1879 cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); in __arm_smmu_tlb_inv_range()
1920 size_t granule, bool leaf, in arm_smmu_tlb_inv_range_domain() argument
1937 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); in arm_smmu_tlb_inv_range_domain()
1947 size_t granule, bool leaf, in arm_smmu_tlb_inv_range_asid() argument
1959 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); in arm_smmu_tlb_inv_range_asid()
1963 unsigned long iova, size_t granule, in arm_smmu_tlb_inv_page_nosync() argument
1969 iommu_iotlb_gather_add_page(domain, gather, iova, granule); in arm_smmu_tlb_inv_page_nosync()
1973 size_t granule, void *cookie) in arm_smmu_tlb_inv_walk() argument
[all …]
A Darm-smmu-v3.h745 size_t granule, bool leaf,
/linux/Documentation/arm64/
A Dmemory-tagging-extension.rst19 allocation tag for each 16-byte granule in the physical address space.
186 4-bit tag per byte and correspond to a 16-byte MTE tag granule in the
189 **Note**: If ``addr`` is not aligned to a 16-byte granule, the kernel
/linux/Documentation/dev-tools/
A Dkasan.rst144 Internally, KASAN tracks memory state separately for each memory granule, which
149 For generic KASAN, the size of each memory granule is 8. The state of each
150 granule is encoded in one shadow byte. Those 8 bytes can be accessible,
/linux/drivers/gpu/drm/panfrost/
A Dpanfrost_mmu.c371 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, in mmu_tlb_flush_walk() argument

Completed in 81 milliseconds

12