Lines Matching refs:ops

112 		const struct dma_map_ops *ops)  in dma_go_direct()  argument
114 if (likely(!ops)) in dma_go_direct()
131 const struct dma_map_ops *ops) in dma_alloc_direct() argument
133 return dma_go_direct(dev, dev->coherent_dma_mask, ops); in dma_alloc_direct()
137 const struct dma_map_ops *ops) in dma_map_direct() argument
139 return dma_go_direct(dev, *dev->dma_mask, ops); in dma_map_direct()
146 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_map_page_attrs() local
154 if (dma_map_direct(dev, ops) || in dma_map_page_attrs()
158 addr = ops->map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs()
168 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_page_attrs() local
171 if (dma_map_direct(dev, ops) || in dma_unmap_page_attrs()
174 else if (ops->unmap_page) in dma_unmap_page_attrs()
175 ops->unmap_page(dev, addr, size, dir, attrs); in dma_unmap_page_attrs()
183 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_map_sg_attrs() local
191 if (dma_map_direct(dev, ops) || in __dma_map_sg_attrs()
195 ents = ops->map_sg(dev, sg, nents, dir, attrs); in __dma_map_sg_attrs()
276 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_sg_attrs() local
280 if (dma_map_direct(dev, ops) || in dma_unmap_sg_attrs()
283 else if (ops->unmap_sg) in dma_unmap_sg_attrs()
284 ops->unmap_sg(dev, sg, nents, dir, attrs); in dma_unmap_sg_attrs()
291 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_map_resource() local
299 if (dma_map_direct(dev, ops)) in dma_map_resource()
301 else if (ops->map_resource) in dma_map_resource()
302 addr = ops->map_resource(dev, phys_addr, size, dir, attrs); in dma_map_resource()
312 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_resource() local
315 if (!dma_map_direct(dev, ops) && ops->unmap_resource) in dma_unmap_resource()
316 ops->unmap_resource(dev, addr, size, dir, attrs); in dma_unmap_resource()
324 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_single_for_cpu() local
327 if (dma_map_direct(dev, ops)) in dma_sync_single_for_cpu()
329 else if (ops->sync_single_for_cpu) in dma_sync_single_for_cpu()
330 ops->sync_single_for_cpu(dev, addr, size, dir); in dma_sync_single_for_cpu()
338 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_single_for_device() local
341 if (dma_map_direct(dev, ops)) in dma_sync_single_for_device()
343 else if (ops->sync_single_for_device) in dma_sync_single_for_device()
344 ops->sync_single_for_device(dev, addr, size, dir); in dma_sync_single_for_device()
352 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_sg_for_cpu() local
355 if (dma_map_direct(dev, ops)) in dma_sync_sg_for_cpu()
357 else if (ops->sync_sg_for_cpu) in dma_sync_sg_for_cpu()
358 ops->sync_sg_for_cpu(dev, sg, nelems, dir); in dma_sync_sg_for_cpu()
366 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_sg_for_device() local
369 if (dma_map_direct(dev, ops)) in dma_sync_sg_for_device()
371 else if (ops->sync_sg_for_device) in dma_sync_sg_for_device()
372 ops->sync_sg_for_device(dev, sg, nelems, dir); in dma_sync_sg_for_device()
392 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_sgtable_attrs() local
394 if (dma_alloc_direct(dev, ops)) in dma_get_sgtable_attrs()
397 if (!ops->get_sgtable) in dma_get_sgtable_attrs()
399 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); in dma_get_sgtable_attrs()
431 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_can_mmap() local
433 if (dma_alloc_direct(dev, ops)) in dma_can_mmap()
435 return ops->mmap != NULL; in dma_can_mmap()
456 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_mmap_attrs() local
458 if (dma_alloc_direct(dev, ops)) in dma_mmap_attrs()
461 if (!ops->mmap) in dma_mmap_attrs()
463 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in dma_mmap_attrs()
469 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_required_mask() local
471 if (dma_alloc_direct(dev, ops)) in dma_get_required_mask()
473 if (ops->get_required_mask) in dma_get_required_mask()
474 return ops->get_required_mask(dev); in dma_get_required_mask()
491 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_alloc_attrs() local
502 if (dma_alloc_direct(dev, ops)) in dma_alloc_attrs()
504 else if (ops->alloc) in dma_alloc_attrs()
505 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); in dma_alloc_attrs()
517 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_free_attrs() local
534 if (dma_alloc_direct(dev, ops)) in dma_free_attrs()
536 else if (ops->free) in dma_free_attrs()
537 ops->free(dev, size, cpu_addr, dma_handle, attrs); in dma_free_attrs()
544 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_alloc_pages() local
552 if (dma_alloc_direct(dev, ops)) in __dma_alloc_pages()
554 if (!ops->alloc_pages) in __dma_alloc_pages()
556 return ops->alloc_pages(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages()
573 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_free_pages() local
576 if (dma_alloc_direct(dev, ops)) in __dma_free_pages()
578 else if (ops->free_pages) in __dma_free_pages()
579 ops->free_pages(dev, size, page, dma_handle, dir); in __dma_free_pages()
630 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_alloc_noncontiguous() local
636 if (ops && ops->alloc_noncontiguous) in dma_alloc_noncontiguous()
637 sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); in dma_alloc_noncontiguous()
661 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_free_noncontiguous() local
664 if (ops && ops->free_noncontiguous) in dma_free_noncontiguous()
665 ops->free_noncontiguous(dev, size, sgt, dir); in dma_free_noncontiguous()
674 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_vmap_noncontiguous() local
677 if (ops && ops->alloc_noncontiguous) in dma_vmap_noncontiguous()
685 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_vunmap_noncontiguous() local
687 if (ops && ops->alloc_noncontiguous) in dma_vunmap_noncontiguous()
695 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_mmap_noncontiguous() local
697 if (ops && ops->alloc_noncontiguous) { in dma_mmap_noncontiguous()
711 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_supported() local
717 if (!ops) in dma_supported()
719 if (!ops->dma_supported) in dma_supported()
721 return ops->dma_supported(dev, mask); in dma_supported()
768 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_max_mapping_size() local
771 if (dma_map_direct(dev, ops)) in dma_max_mapping_size()
773 else if (ops && ops->max_mapping_size) in dma_max_mapping_size()
774 size = ops->max_mapping_size(dev); in dma_max_mapping_size()
782 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_need_sync() local
784 if (dma_map_direct(dev, ops)) in dma_need_sync()
786 return ops->sync_single_for_cpu || ops->sync_single_for_device; in dma_need_sync()
792 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_merge_boundary() local
794 if (!ops || !ops->get_merge_boundary) in dma_get_merge_boundary()
797 return ops->get_merge_boundary(dev); in dma_get_merge_boundary()