Lines Matching refs:range
95 const struct mmu_notifier_range *range, in mn_itree_inv_start_range() argument
103 node = interval_tree_iter_first(&subscriptions->itree, range->start, in mn_itree_inv_start_range()
104 range->end - 1); in mn_itree_inv_start_range()
118 const struct mmu_notifier_range *range) in mn_itree_inv_next() argument
123 range->start, range->end - 1); in mn_itree_inv_next()
264 struct mmu_notifier_range range = { in mn_itree_release() local
276 mn_itree_inv_start_range(subscriptions, &range, &cur_seq); in mn_itree_release()
278 interval_sub = mn_itree_inv_next(interval_sub, &range)) { in mn_itree_release()
279 ret = interval_sub->ops->invalidate(interval_sub, &range, in mn_itree_release()
445 const struct mmu_notifier_range *range) in mn_itree_invalidate() argument
451 mn_itree_inv_start_range(subscriptions, range, &cur_seq); in mn_itree_invalidate()
453 interval_sub = mn_itree_inv_next(interval_sub, range)) { in mn_itree_invalidate()
456 ret = interval_sub->ops->invalidate(interval_sub, range, in mn_itree_invalidate()
459 if (WARN_ON(mmu_notifier_range_blockable(range))) in mn_itree_invalidate()
477 struct mmu_notifier_range *range) in mn_hlist_invalidate_range_start() argument
491 if (!mmu_notifier_range_blockable(range)) in mn_hlist_invalidate_range_start()
493 _ret = ops->invalidate_range_start(subscription, range); in mn_hlist_invalidate_range_start()
494 if (!mmu_notifier_range_blockable(range)) in mn_hlist_invalidate_range_start()
499 !mmu_notifier_range_blockable(range) ? in mn_hlist_invalidate_range_start()
502 WARN_ON(mmu_notifier_range_blockable(range) || in mn_hlist_invalidate_range_start()
528 range); in mn_hlist_invalidate_range_start()
536 int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) in __mmu_notifier_invalidate_range_start() argument
539 range->mm->notifier_subscriptions; in __mmu_notifier_invalidate_range_start()
543 ret = mn_itree_invalidate(subscriptions, range); in __mmu_notifier_invalidate_range_start()
548 return mn_hlist_invalidate_range_start(subscriptions, range); in __mmu_notifier_invalidate_range_start()
554 struct mmu_notifier_range *range, bool only_end) in mn_hlist_invalidate_end() argument
577 range->mm, in mn_hlist_invalidate_end()
578 range->start, in mn_hlist_invalidate_end()
579 range->end); in mn_hlist_invalidate_end()
581 if (!mmu_notifier_range_blockable(range)) in mn_hlist_invalidate_end()
584 range); in mn_hlist_invalidate_end()
585 if (!mmu_notifier_range_blockable(range)) in mn_hlist_invalidate_end()
592 void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range, in __mmu_notifier_invalidate_range_end() argument
596 range->mm->notifier_subscriptions; in __mmu_notifier_invalidate_range_end()
603 mn_hlist_invalidate_end(subscriptions, range, only_end); in __mmu_notifier_invalidate_range_end()
1113 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range) in mmu_notifier_range_update_to_read_only() argument
1115 if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA) in mmu_notifier_range_update_to_read_only()
1118 return range->vma->vm_flags & VM_READ; in mmu_notifier_range_update_to_read_only()