Lines Matching refs:page
61 int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument
74 if (unlikely(!get_page_unless_zero(page))) in isolate_movable_page()
82 if (unlikely(!__PageMovable(page))) in isolate_movable_page()
95 if (unlikely(!trylock_page(page))) in isolate_movable_page()
98 if (!PageMovable(page) || PageIsolated(page)) in isolate_movable_page()
101 mapping = page_mapping(page); in isolate_movable_page()
102 VM_BUG_ON_PAGE(!mapping, page); in isolate_movable_page()
104 if (!mapping->a_ops->isolate_page(page, mode)) in isolate_movable_page()
108 WARN_ON_ONCE(PageIsolated(page)); in isolate_movable_page()
109 __SetPageIsolated(page); in isolate_movable_page()
110 unlock_page(page); in isolate_movable_page()
115 unlock_page(page); in isolate_movable_page()
117 put_page(page); in isolate_movable_page()
122 static void putback_movable_page(struct page *page) in putback_movable_page() argument
126 mapping = page_mapping(page); in putback_movable_page()
127 mapping->a_ops->putback_page(page); in putback_movable_page()
128 __ClearPageIsolated(page); in putback_movable_page()
141 struct page *page; in putback_movable_pages() local
142 struct page *page2; in putback_movable_pages()
144 list_for_each_entry_safe(page, page2, l, lru) { in putback_movable_pages()
145 if (unlikely(PageHuge(page))) { in putback_movable_pages()
146 putback_active_hugepage(page); in putback_movable_pages()
149 list_del(&page->lru); in putback_movable_pages()
155 if (unlikely(__PageMovable(page))) { in putback_movable_pages()
156 VM_BUG_ON_PAGE(!PageIsolated(page), page); in putback_movable_pages()
157 lock_page(page); in putback_movable_pages()
158 if (PageMovable(page)) in putback_movable_pages()
159 putback_movable_page(page); in putback_movable_pages()
161 __ClearPageIsolated(page); in putback_movable_pages()
162 unlock_page(page); in putback_movable_pages()
163 put_page(page); in putback_movable_pages()
165 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in putback_movable_pages()
166 page_is_file_lru(page), -thp_nr_pages(page)); in putback_movable_pages()
167 putback_lru_page(page); in putback_movable_pages()
175 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, in remove_migration_pte() argument
179 .page = old, in remove_migration_pte()
184 struct page *new; in remove_migration_pte()
188 VM_BUG_ON_PAGE(PageTail(page), page); in remove_migration_pte()
190 if (PageKsm(page)) in remove_migration_pte()
191 new = page; in remove_migration_pte()
193 new = page - pvmw.page->index + in remove_migration_pte()
199 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); in remove_migration_pte()
257 if (PageTransHuge(page) && PageMlocked(page)) in remove_migration_pte()
258 clear_page_mlock(page); in remove_migration_pte()
271 void remove_migration_ptes(struct page *old, struct page *new, bool locked) in remove_migration_ptes()
294 struct page *page; in __migration_entry_wait() local
305 page = pfn_swap_entry_to_page(entry); in __migration_entry_wait()
306 page = compound_head(page); in __migration_entry_wait()
313 if (!get_page_unless_zero(page)) in __migration_entry_wait()
316 put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE); in __migration_entry_wait()
341 struct page *page; in pmd_migration_entry_wait() local
346 page = pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd)); in pmd_migration_entry_wait()
347 if (!get_page_unless_zero(page)) in pmd_migration_entry_wait()
350 put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE); in pmd_migration_entry_wait()
357 static int expected_page_refs(struct address_space *mapping, struct page *page) in expected_page_refs() argument
365 expected_count += is_device_private_page(page); in expected_page_refs()
367 expected_count += compound_nr(page) + page_has_private(page); in expected_page_refs()
386 int expected_count = expected_page_refs(mapping, &folio->page) + extra_count; in folio_migrate_mapping()
504 struct page *newpage, struct page *page) in migrate_huge_page_move_mapping() argument
506 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_huge_page_move_mapping()
510 expected_count = 2 + page_has_private(page); in migrate_huge_page_move_mapping()
511 if (page_count(page) != expected_count || xas_load(&xas) != page) { in migrate_huge_page_move_mapping()
516 if (!page_ref_freeze(page, expected_count)) { in migrate_huge_page_move_mapping()
521 newpage->index = page->index; in migrate_huge_page_move_mapping()
522 newpage->mapping = page->mapping; in migrate_huge_page_move_mapping()
528 page_ref_unfreeze(page, expected_count - 1); in migrate_huge_page_move_mapping()
573 cpupid = page_cpupid_xchg_last(&folio->page, -1); in folio_migrate_flags()
574 page_cpupid_xchg_last(&newfolio->page, cpupid); in folio_migrate_flags()
629 struct page *newpage, struct page *page, in migrate_page() argument
633 struct folio *folio = page_folio(page); in migrate_page()
691 struct page *newpage, struct page *page, enum migrate_mode mode, in __buffer_migrate_page() argument
698 if (!page_has_buffers(page)) in __buffer_migrate_page()
699 return migrate_page(mapping, newpage, page, mode); in __buffer_migrate_page()
702 expected_count = expected_page_refs(mapping, page); in __buffer_migrate_page()
703 if (page_count(page) != expected_count) in __buffer_migrate_page()
706 head = page_buffers(page); in __buffer_migrate_page()
737 rc = migrate_page_move_mapping(mapping, newpage, page, 0); in __buffer_migrate_page()
741 attach_page_private(newpage, detach_page_private(page)); in __buffer_migrate_page()
751 migrate_page_copy(newpage, page); in __buffer_migrate_page()
753 migrate_page_states(newpage, page); in __buffer_migrate_page()
775 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page() argument
777 return __buffer_migrate_page(mapping, newpage, page, mode, false); in buffer_migrate_page()
788 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page_norefs() argument
790 return __buffer_migrate_page(mapping, newpage, page, mode, true); in buffer_migrate_page_norefs()
797 static int writeout(struct address_space *mapping, struct page *page) in writeout() argument
812 if (!clear_page_dirty_for_io(page)) in writeout()
824 remove_migration_ptes(page, page, false); in writeout()
826 rc = mapping->a_ops->writepage(page, &wbc); in writeout()
830 lock_page(page); in writeout()
839 struct page *newpage, struct page *page, enum migrate_mode mode) in fallback_migrate_page() argument
841 if (PageDirty(page)) { in fallback_migrate_page()
850 return writeout(mapping, page); in fallback_migrate_page()
857 if (page_has_private(page) && in fallback_migrate_page()
858 !try_to_release_page(page, GFP_KERNEL)) in fallback_migrate_page()
861 return migrate_page(mapping, newpage, page, mode); in fallback_migrate_page()
875 static int move_to_new_page(struct page *newpage, struct page *page, in move_to_new_page() argument
880 bool is_lru = !__PageMovable(page); in move_to_new_page()
882 VM_BUG_ON_PAGE(!PageLocked(page), page); in move_to_new_page()
885 mapping = page_mapping(page); in move_to_new_page()
889 rc = migrate_page(mapping, newpage, page, mode); in move_to_new_page()
899 page, mode); in move_to_new_page()
902 page, mode); in move_to_new_page()
908 VM_BUG_ON_PAGE(!PageIsolated(page), page); in move_to_new_page()
909 if (!PageMovable(page)) { in move_to_new_page()
911 __ClearPageIsolated(page); in move_to_new_page()
916 page, mode); in move_to_new_page()
918 !PageIsolated(page)); in move_to_new_page()
926 if (__PageMovable(page)) { in move_to_new_page()
927 VM_BUG_ON_PAGE(!PageIsolated(page), page); in move_to_new_page()
933 __ClearPageIsolated(page); in move_to_new_page()
941 if (!PageMappingFlags(page)) in move_to_new_page()
942 page->mapping = NULL; in move_to_new_page()
952 static int __unmap_and_move(struct page *page, struct page *newpage, in __unmap_and_move() argument
958 bool is_lru = !__PageMovable(page); in __unmap_and_move()
960 if (!trylock_page(page)) { in __unmap_and_move()
980 lock_page(page); in __unmap_and_move()
983 if (PageWriteback(page)) { in __unmap_and_move()
1000 wait_on_page_writeback(page); in __unmap_and_move()
1017 if (PageAnon(page) && !PageKsm(page)) in __unmap_and_move()
1018 anon_vma = page_get_anon_vma(page); in __unmap_and_move()
1032 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
1048 if (!page->mapping) { in __unmap_and_move()
1049 VM_BUG_ON_PAGE(PageAnon(page), page); in __unmap_and_move()
1050 if (page_has_private(page)) { in __unmap_and_move()
1051 try_to_free_buffers(page); in __unmap_and_move()
1054 } else if (page_mapped(page)) { in __unmap_and_move()
1056 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, in __unmap_and_move()
1057 page); in __unmap_and_move()
1058 try_to_migrate(page, 0); in __unmap_and_move()
1062 if (!page_mapped(page)) in __unmap_and_move()
1063 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
1066 remove_migration_ptes(page, in __unmap_and_move()
1067 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false); in __unmap_and_move()
1075 unlock_page(page); in __unmap_and_move()
1176 unsigned long private, struct page *page, in unmap_and_move() argument
1182 struct page *newpage = NULL; in unmap_and_move()
1184 if (!thp_migration_supported() && PageTransHuge(page)) in unmap_and_move()
1187 if (page_count(page) == 1) { in unmap_and_move()
1189 ClearPageActive(page); in unmap_and_move()
1190 ClearPageUnevictable(page); in unmap_and_move()
1191 if (unlikely(__PageMovable(page))) { in unmap_and_move()
1192 lock_page(page); in unmap_and_move()
1193 if (!PageMovable(page)) in unmap_and_move()
1194 __ClearPageIsolated(page); in unmap_and_move()
1195 unlock_page(page); in unmap_and_move()
1200 newpage = get_new_page(page, private); in unmap_and_move()
1204 rc = __unmap_and_move(page, newpage, force, mode); in unmap_and_move()
1215 list_del(&page->lru); in unmap_and_move()
1229 if (likely(!__PageMovable(page))) in unmap_and_move()
1230 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in unmap_and_move()
1231 page_is_file_lru(page), -thp_nr_pages(page)); in unmap_and_move()
1237 put_page(page); in unmap_and_move()
1240 list_add_tail(&page->lru, ret); in unmap_and_move()
1271 struct page *hpage, int force, in unmap_and_move_huge_page()
1277 struct page *new_hpage; in unmap_and_move_huge_page()
1398 static inline int try_split_thp(struct page *page, struct page **page2, in try_split_thp() argument
1403 lock_page(page); in try_split_thp()
1404 rc = split_huge_page_to_list(page, from); in try_split_thp()
1405 unlock_page(page); in try_split_thp()
1407 list_safe_reset_next(page, *page2, lru); in try_split_thp()
1448 struct page *page; in migrate_pages() local
1449 struct page *page2; in migrate_pages()
1464 list_for_each_entry_safe(page, page2, from, lru) { in migrate_pages()
1471 is_thp = PageTransHuge(page) && !PageHuge(page); in migrate_pages()
1472 nr_subpages = thp_nr_pages(page); in migrate_pages()
1475 if (PageHuge(page)) in migrate_pages()
1477 put_new_page, private, page, in migrate_pages()
1482 private, page, pass > 2, mode, in migrate_pages()
1508 if (!try_split_thp(page, &page2, from)) { in migrate_pages()
1528 if (!try_split_thp(page, &page2, from)) { in migrate_pages()
1598 struct page *alloc_migration_target(struct page *page, unsigned long private) in alloc_migration_target() argument
1603 struct page *new_page = NULL; in alloc_migration_target()
1611 nid = page_to_nid(page); in alloc_migration_target()
1613 if (PageHuge(page)) { in alloc_migration_target()
1614 struct hstate *h = page_hstate(compound_head(page)); in alloc_migration_target()
1620 if (PageTransHuge(page)) { in alloc_migration_target()
1629 zidx = zone_idx(page_zone(page)); in alloc_migration_target()
1683 struct page *page; in add_page_for_migration() local
1695 page = follow_page(vma, addr, follflags); in add_page_for_migration()
1697 err = PTR_ERR(page); in add_page_for_migration()
1698 if (IS_ERR(page)) in add_page_for_migration()
1702 if (!page) in add_page_for_migration()
1706 if (page_to_nid(page) == node) in add_page_for_migration()
1710 if (page_mapcount(page) > 1 && !migrate_all) in add_page_for_migration()
1713 if (PageHuge(page)) { in add_page_for_migration()
1714 if (PageHead(page)) { in add_page_for_migration()
1715 isolate_huge_page(page, pagelist); in add_page_for_migration()
1719 struct page *head; in add_page_for_migration()
1721 head = compound_head(page); in add_page_for_migration()
1738 put_page(page); in add_page_for_migration()
1871 struct page *page; in do_pages_stat_array() local
1879 page = follow_page(vma, addr, FOLL_DUMP); in do_pages_stat_array()
1881 err = PTR_ERR(page); in do_pages_stat_array()
1882 if (IS_ERR(page)) in do_pages_stat_array()
1885 err = page ? page_to_nid(page) : -ENOENT; in do_pages_stat_array()
2070 static struct page *alloc_misplaced_dst_page(struct page *page, in alloc_misplaced_dst_page() argument
2074 struct page *newpage; in alloc_misplaced_dst_page()
2085 static struct page *alloc_misplaced_dst_page_thp(struct page *page, in alloc_misplaced_dst_page_thp() argument
2089 struct page *newpage; in alloc_misplaced_dst_page_thp()
2102 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) in numamigrate_isolate_page() argument
2105 int nr_pages = thp_nr_pages(page); in numamigrate_isolate_page()
2107 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); in numamigrate_isolate_page()
2110 if (PageTransHuge(page) && total_mapcount(page) > 1) in numamigrate_isolate_page()
2117 if (isolate_lru_page(page)) in numamigrate_isolate_page()
2120 page_lru = page_is_file_lru(page); in numamigrate_isolate_page()
2121 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, in numamigrate_isolate_page()
2129 put_page(page); in numamigrate_isolate_page()
2138 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, in migrate_misplaced_page() argument
2147 int nr_pages = thp_nr_pages(page); in migrate_misplaced_page()
2154 compound = PageTransHuge(page); in migrate_misplaced_page()
2165 if (page_mapcount(page) != 1 && page_is_file_lru(page) && in migrate_misplaced_page()
2173 if (page_is_file_lru(page) && PageDirty(page)) in migrate_misplaced_page()
2176 isolated = numamigrate_isolate_page(pgdat, page); in migrate_misplaced_page()
2180 list_add(&page->lru, &migratepages); in migrate_misplaced_page()
2185 list_del(&page->lru); in migrate_misplaced_page()
2186 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in migrate_misplaced_page()
2187 page_is_file_lru(page), -nr_pages); in migrate_misplaced_page()
2188 putback_lru_page(page); in migrate_misplaced_page()
2197 put_page(page); in migrate_misplaced_page()
2258 struct page *page; in migrate_vma_collect_pmd() local
2266 page = pmd_page(*pmdp); in migrate_vma_collect_pmd()
2267 if (is_huge_zero_page(page)) { in migrate_vma_collect_pmd()
2276 get_page(page); in migrate_vma_collect_pmd()
2278 if (unlikely(!trylock_page(page))) in migrate_vma_collect_pmd()
2281 ret = split_huge_page(page); in migrate_vma_collect_pmd()
2282 unlock_page(page); in migrate_vma_collect_pmd()
2283 put_page(page); in migrate_vma_collect_pmd()
2301 struct page *page; in migrate_vma_collect_pmd() local
2325 page = pfn_swap_entry_to_page(entry); in migrate_vma_collect_pmd()
2328 page->pgmap->owner != migrate->pgmap_owner) in migrate_vma_collect_pmd()
2331 mpfn = migrate_pfn(page_to_pfn(page)) | in migrate_vma_collect_pmd()
2344 page = vm_normal_page(migrate->vma, addr, pte); in migrate_vma_collect_pmd()
2350 if (!page || !page->mapping || PageTransCompound(page)) { in migrate_vma_collect_pmd()
2364 get_page(page); in migrate_vma_collect_pmd()
2371 if (trylock_page(page)) { in migrate_vma_collect_pmd()
2380 page_to_pfn(page)); in migrate_vma_collect_pmd()
2383 page_to_pfn(page)); in migrate_vma_collect_pmd()
2403 page_remove_rmap(page, false); in migrate_vma_collect_pmd()
2404 put_page(page); in migrate_vma_collect_pmd()
2409 put_page(page); in migrate_vma_collect_pmd()
2469 static bool migrate_vma_check_page(struct page *page) in migrate_vma_check_page() argument
2483 if (PageCompound(page)) in migrate_vma_check_page()
2487 if (is_zone_device_page(page)) { in migrate_vma_check_page()
2501 return is_device_private_page(page); in migrate_vma_check_page()
2505 if (page_mapping(page)) in migrate_vma_check_page()
2506 extra += 1 + page_has_private(page); in migrate_vma_check_page()
2508 if ((page_count(page) - extra) > page_mapcount(page)) in migrate_vma_check_page()
2535 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap() local
2537 if (!page) in migrate_vma_unmap()
2541 if (!is_zone_device_page(page)) { in migrate_vma_unmap()
2542 if (!PageLRU(page) && allow_drain) { in migrate_vma_unmap()
2548 if (isolate_lru_page(page)) { in migrate_vma_unmap()
2556 put_page(page); in migrate_vma_unmap()
2559 if (page_mapped(page)) in migrate_vma_unmap()
2560 try_to_migrate(page, 0); in migrate_vma_unmap()
2562 if (page_mapped(page) || !migrate_vma_check_page(page)) { in migrate_vma_unmap()
2563 if (!is_zone_device_page(page)) { in migrate_vma_unmap()
2564 get_page(page); in migrate_vma_unmap()
2565 putback_lru_page(page); in migrate_vma_unmap()
2576 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap() local
2578 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2581 remove_migration_ptes(page, page, false); in migrate_vma_unmap()
2584 unlock_page(page); in migrate_vma_unmap()
2585 put_page(page); in migrate_vma_unmap()
2702 struct page *page, in migrate_vma_insert_page() argument
2753 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) in migrate_vma_insert_page()
2761 __SetPageUptodate(page); in migrate_vma_insert_page()
2763 if (is_zone_device_page(page)) { in migrate_vma_insert_page()
2764 if (is_device_private_page(page)) { in migrate_vma_insert_page()
2769 page_to_pfn(page)); in migrate_vma_insert_page()
2772 page_to_pfn(page)); in migrate_vma_insert_page()
2783 entry = mk_pte(page, vma->vm_page_prot); in migrate_vma_insert_page()
2810 page_add_new_anon_rmap(page, vma, addr, false); in migrate_vma_insert_page()
2811 if (!is_zone_device_page(page)) in migrate_vma_insert_page()
2812 lru_cache_add_inactive_or_unevictable(page, vma); in migrate_vma_insert_page()
2813 get_page(page); in migrate_vma_insert_page()
2853 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_pages()
2854 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_pages() local
2863 if (!page) { in migrate_vma_pages()
2880 mapping = page_mapping(page); in migrate_vma_pages()
2902 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); in migrate_vma_pages()
2934 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_finalize()
2935 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_finalize() local
2937 if (!page) { in migrate_vma_finalize()
2950 newpage = page; in migrate_vma_finalize()
2953 remove_migration_ptes(page, newpage, false); in migrate_vma_finalize()
2954 unlock_page(page); in migrate_vma_finalize()
2956 if (is_zone_device_page(page)) in migrate_vma_finalize()
2957 put_page(page); in migrate_vma_finalize()
2959 putback_lru_page(page); in migrate_vma_finalize()
2961 if (newpage != page) { in migrate_vma_finalize()