Lines Matching refs:page

124 				   struct page *page, void *shadow)  in page_cache_delete()  argument
126 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
132 if (!PageHuge(page)) { in page_cache_delete()
133 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete()
134 nr = compound_nr(page); in page_cache_delete()
137 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_cache_delete()
138 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_delete()
139 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_delete()
144 page->mapping = NULL; in page_cache_delete()
150 struct page *page) in unaccount_page_cache_page() argument
159 if (PageUptodate(page) && PageMappedToDisk(page)) in unaccount_page_cache_page()
160 cleancache_put_page(page); in unaccount_page_cache_page()
162 cleancache_invalidate_page(mapping, page); in unaccount_page_cache_page()
164 VM_BUG_ON_PAGE(PageTail(page), page); in unaccount_page_cache_page()
165 VM_BUG_ON_PAGE(page_mapped(page), page); in unaccount_page_cache_page()
166 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { in unaccount_page_cache_page()
170 current->comm, page_to_pfn(page)); in unaccount_page_cache_page()
171 dump_page(page, "still mapped when deleted"); in unaccount_page_cache_page()
175 mapcount = page_mapcount(page); in unaccount_page_cache_page()
177 page_count(page) >= mapcount + 2) { in unaccount_page_cache_page()
184 page_mapcount_reset(page); in unaccount_page_cache_page()
185 page_ref_sub(page, mapcount); in unaccount_page_cache_page()
190 if (PageHuge(page)) in unaccount_page_cache_page()
193 nr = thp_nr_pages(page); in unaccount_page_cache_page()
195 __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr); in unaccount_page_cache_page()
196 if (PageSwapBacked(page)) { in unaccount_page_cache_page()
197 __mod_lruvec_page_state(page, NR_SHMEM, -nr); in unaccount_page_cache_page()
198 if (PageTransHuge(page)) in unaccount_page_cache_page()
199 __mod_lruvec_page_state(page, NR_SHMEM_THPS, -nr); in unaccount_page_cache_page()
200 } else if (PageTransHuge(page)) { in unaccount_page_cache_page()
201 __mod_lruvec_page_state(page, NR_FILE_THPS, -nr); in unaccount_page_cache_page()
215 if (WARN_ON_ONCE(PageDirty(page))) in unaccount_page_cache_page()
216 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); in unaccount_page_cache_page()
224 void __delete_from_page_cache(struct page *page, void *shadow) in __delete_from_page_cache() argument
226 struct address_space *mapping = page->mapping; in __delete_from_page_cache()
228 trace_mm_filemap_delete_from_page_cache(page); in __delete_from_page_cache()
230 unaccount_page_cache_page(mapping, page); in __delete_from_page_cache()
231 page_cache_delete(mapping, page, shadow); in __delete_from_page_cache()
235 struct page *page) in page_cache_free_page() argument
237 void (*freepage)(struct page *); in page_cache_free_page()
241 freepage(page); in page_cache_free_page()
243 if (PageTransHuge(page) && !PageHuge(page)) { in page_cache_free_page()
244 page_ref_sub(page, thp_nr_pages(page)); in page_cache_free_page()
245 VM_BUG_ON_PAGE(page_count(page) <= 0, page); in page_cache_free_page()
247 put_page(page); in page_cache_free_page()
259 void delete_from_page_cache(struct page *page) in delete_from_page_cache() argument
261 struct address_space *mapping = page_mapping(page); in delete_from_page_cache()
263 BUG_ON(!PageLocked(page)); in delete_from_page_cache()
266 __delete_from_page_cache(page, NULL); in delete_from_page_cache()
272 page_cache_free_page(mapping, page); in delete_from_page_cache()
296 struct page *page; in page_cache_delete_batch() local
299 xas_for_each(&xas, page, ULONG_MAX) { in page_cache_delete_batch()
304 if (xa_is_value(page)) in page_cache_delete_batch()
313 if (page != pvec->pages[i]) { in page_cache_delete_batch()
314 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, in page_cache_delete_batch()
315 page); in page_cache_delete_batch()
319 WARN_ON_ONCE(!PageLocked(page)); in page_cache_delete_batch()
321 if (page->index == xas.xa_index) in page_cache_delete_batch()
322 page->mapping = NULL; in page_cache_delete_batch()
330 if (page->index + compound_nr(page) - 1 == xas.xa_index) in page_cache_delete_batch()
492 struct page *page; in filemap_range_has_page() local
501 page = xas_find(&xas, max); in filemap_range_has_page()
502 if (xas_retry(&xas, page)) in filemap_range_has_page()
505 if (xa_is_value(page)) in filemap_range_has_page()
516 return page != NULL; in filemap_range_has_page()
541 struct page *page = pvec.pages[i]; in __filemap_fdatawait_range() local
543 wait_on_page_writeback(page); in __filemap_fdatawait_range()
544 ClearPageError(page); in __filemap_fdatawait_range()
654 struct page *page; in filemap_range_has_writeback() local
660 xas_for_each(&xas, page, max) { in filemap_range_has_writeback()
661 if (xas_retry(&xas, page)) in filemap_range_has_writeback()
663 if (xa_is_value(page)) in filemap_range_has_writeback()
665 if (PageDirty(page) || PageLocked(page) || PageWriteback(page)) in filemap_range_has_writeback()
669 return page != NULL; in filemap_range_has_writeback()
850 void replace_page_cache_page(struct page *old, struct page *new) in replace_page_cache_page()
855 void (*freepage)(struct page *) = mapping->a_ops->freepage; in replace_page_cache_page()
962 trace_mm_filemap_add_to_page_cache(&folio->page); in __filemap_add_folio()
984 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, in add_to_page_cache_locked() argument
987 return __filemap_add_folio(mapping, page_folio(page), offset, in add_to_page_cache_locked()
1454 int put_and_wait_on_page_locked(struct page *page, int state) in put_and_wait_on_page_locked() argument
1456 return folio_wait_bit_common(page_folio(page), PG_locked, state, in put_and_wait_on_page_locked()
1619 void page_endio(struct page *page, bool is_write, int err) in page_endio() argument
1623 SetPageUptodate(page); in page_endio()
1625 ClearPageUptodate(page); in page_endio()
1626 SetPageError(page); in page_endio()
1628 unlock_page(page); in page_endio()
1633 SetPageError(page); in page_endio()
1634 mapping = page_mapping(page); in page_endio()
1638 end_page_writeback(page); in page_endio()
1982 static inline struct page *find_get_entry(struct xa_state *xas, pgoff_t max, in find_get_entry()
1985 struct page *page; in find_get_entry() local
1989 page = xas_find(xas, max); in find_get_entry()
1991 page = xas_find_marked(xas, max, mark); in find_get_entry()
1993 if (xas_retry(xas, page)) in find_get_entry()
2000 if (!page || xa_is_value(page)) in find_get_entry()
2001 return page; in find_get_entry()
2003 if (!page_cache_get_speculative(page)) in find_get_entry()
2007 if (unlikely(page != xas_reload(xas))) { in find_get_entry()
2008 put_page(page); in find_get_entry()
2012 return page; in find_get_entry()
2048 struct page *page; in find_get_entries() local
2053 while ((page = find_get_entry(&xas, end, XA_PRESENT))) { in find_get_entries()
2058 if (!xa_is_value(page) && PageTransHuge(page) && in find_get_entries()
2059 !PageHuge(page)) { in find_get_entries()
2060 page = find_subpage(page, xas.xa_index); in find_get_entries()
2065 pvec->pages[ret] = page; in find_get_entries()
2100 struct page *page; in find_lock_entries() local
2103 while ((page = find_get_entry(&xas, end, XA_PRESENT))) { in find_lock_entries()
2104 if (!xa_is_value(page)) { in find_lock_entries()
2105 if (page->index < start) in find_lock_entries()
2107 if (page->index + thp_nr_pages(page) - 1 > end) in find_lock_entries()
2109 if (!trylock_page(page)) in find_lock_entries()
2111 if (page->mapping != mapping || PageWriteback(page)) in find_lock_entries()
2113 VM_BUG_ON_PAGE(!thp_contains(page, xas.xa_index), in find_lock_entries()
2114 page); in find_lock_entries()
2117 if (!pagevec_add(pvec, page)) in find_lock_entries()
2121 unlock_page(page); in find_lock_entries()
2123 put_page(page); in find_lock_entries()
2125 if (!xa_is_value(page) && PageTransHuge(page)) { in find_lock_entries()
2126 unsigned int nr_pages = thp_nr_pages(page); in find_lock_entries()
2129 xas_set(&xas, page->index + nr_pages); in find_lock_entries()
2162 struct page **pages) in find_get_pages_range()
2165 struct page *page; in find_get_pages_range() local
2172 while ((page = find_get_entry(&xas, end, XA_PRESENT))) { in find_get_pages_range()
2174 if (xa_is_value(page)) in find_get_pages_range()
2177 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_range()
2213 unsigned int nr_pages, struct page **pages) in find_get_pages_contig()
2216 struct page *page; in find_get_pages_contig() local
2223 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in find_get_pages_contig()
2224 if (xas_retry(&xas, page)) in find_get_pages_contig()
2230 if (xa_is_value(page)) in find_get_pages_contig()
2233 if (!page_cache_get_speculative(page)) in find_get_pages_contig()
2237 if (unlikely(page != xas_reload(&xas))) in find_get_pages_contig()
2240 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_contig()
2245 put_page(page); in find_get_pages_contig()
2271 struct page **pages) in find_get_pages_range_tag()
2274 struct page *page; in find_get_pages_range_tag() local
2281 while ((page = find_get_entry(&xas, end, tag))) { in find_get_pages_range_tag()
2287 if (xa_is_value(page)) in find_get_pages_range_tag()
2290 pages[ret] = page; in find_get_pages_range_tag()
2292 *index = page->index + thp_nr_pages(page); in find_get_pages_range_tag()
2347 struct page *head; in filemap_get_read_batch()
2380 struct page *page) in filemap_read_page() argument
2389 ClearPageError(page); in filemap_read_page()
2391 error = mapping->a_ops->readpage(file, page); in filemap_read_page()
2395 error = wait_on_page_locked_killable(page); in filemap_read_page()
2398 if (PageUptodate(page)) in filemap_read_page()
2405 loff_t pos, struct iov_iter *iter, struct page *page) in filemap_range_uptodate() argument
2409 if (PageUptodate(page)) in filemap_range_uptodate()
2416 if (mapping->host->i_blkbits >= (PAGE_SHIFT + thp_order(page))) in filemap_range_uptodate()
2420 if (page_offset(page) > pos) { in filemap_range_uptodate()
2421 count -= page_offset(page) - pos; in filemap_range_uptodate()
2424 pos -= page_offset(page); in filemap_range_uptodate()
2427 return mapping->a_ops->is_partially_uptodate(page, pos, count); in filemap_range_uptodate()
2432 struct page *page) in filemap_update_page() argument
2434 struct folio *folio = page_folio(page); in filemap_update_page()
2450 put_and_wait_on_page_locked(&folio->page, TASK_KILLABLE); in filemap_update_page()
2463 if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, &folio->page)) in filemap_update_page()
2470 error = filemap_read_page(iocb->ki_filp, mapping, &folio->page); in filemap_update_page()
2485 struct page *page; in filemap_create_page() local
2488 page = page_cache_alloc(mapping); in filemap_create_page()
2489 if (!page) in filemap_create_page()
2505 error = add_to_page_cache_lru(page, mapping, index, in filemap_create_page()
2512 error = filemap_read_page(file, mapping, page); in filemap_create_page()
2517 pagevec_add(pvec, page); in filemap_create_page()
2521 put_page(page); in filemap_create_page()
2526 struct address_space *mapping, struct page *page, in filemap_readahead() argument
2531 page_cache_async_readahead(mapping, &file->f_ra, file, page, in filemap_readahead()
2532 page->index, last_index - page->index); in filemap_readahead()
2544 struct page *page; in filemap_get_pages() local
2570 page = pvec->pages[pagevec_count(pvec) - 1]; in filemap_get_pages()
2571 if (PageReadahead(page)) { in filemap_get_pages()
2572 err = filemap_readahead(iocb, filp, mapping, page, last_index); in filemap_get_pages()
2576 if (!PageUptodate(page)) { in filemap_get_pages()
2579 err = filemap_update_page(iocb, mapping, iter, page); in filemap_get_pages()
2587 put_page(page); in filemap_get_pages()
2674 struct page *page = pvec.pages[i]; in filemap_read() local
2675 size_t page_size = thp_size(page); in filemap_read()
2681 if (end_offset < page_offset(page)) in filemap_read()
2684 mark_page_accessed(page); in filemap_read()
2693 for (j = 0; j < thp_nr_pages(page); j++) in filemap_read()
2694 flush_dcache_page(page + j); in filemap_read()
2697 copied = copy_page_to_iter(page, offset, bytes, iter); in filemap_read()
2797 struct address_space *mapping, struct page *page, in page_seek_hole_data() argument
2803 if (xa_is_value(page) || PageUptodate(page)) in page_seek_hole_data()
2810 lock_page(page); in page_seek_hole_data()
2811 if (unlikely(page->mapping != mapping)) in page_seek_hole_data()
2814 offset = offset_in_thp(page, start) & ~(bsz - 1); in page_seek_hole_data()
2817 if (ops->is_partially_uptodate(page, offset, bsz) == seek_data) in page_seek_hole_data()
2821 } while (offset < thp_size(page)); in page_seek_hole_data()
2823 unlock_page(page); in page_seek_hole_data()
2829 unsigned int seek_page_size(struct xa_state *xas, struct page *page) in seek_page_size() argument
2831 if (xa_is_value(page)) in seek_page_size()
2833 return thp_size(page); in seek_page_size()
2860 struct page *page; in mapping_seek_hole_data() local
2866 while ((page = find_get_entry(&xas, max, XA_PRESENT))) { in mapping_seek_hole_data()
2876 seek_size = seek_page_size(&xas, page); in mapping_seek_hole_data()
2878 start = page_seek_hole_data(&xas, mapping, page, start, pos, in mapping_seek_hole_data()
2886 if (!xa_is_value(page)) in mapping_seek_hole_data()
2887 put_page(page); in mapping_seek_hole_data()
2893 if (page && !xa_is_value(page)) in mapping_seek_hole_data()
2894 put_page(page); in mapping_seek_hole_data()
2913 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, in lock_page_maybe_drop_mmap() argument
2916 struct folio *folio = page_folio(page); in lock_page_maybe_drop_mmap()
3006 struct page *page) in do_async_mmap_readahead() argument
3021 if (PageReadahead(page)) { in do_async_mmap_readahead()
3024 page, offset, ra->ra_pages); in do_async_mmap_readahead()
3061 struct page *page; in filemap_fault() local
3072 page = find_get_page(mapping, offset); in filemap_fault()
3073 if (likely(page)) { in filemap_fault()
3079 fpin = do_async_mmap_readahead(vmf, page); in filemap_fault()
3080 if (unlikely(!PageUptodate(page))) { in filemap_fault()
3099 page = pagecache_get_page(mapping, offset, in filemap_fault()
3102 if (!page) { in filemap_fault()
3110 if (!lock_page_maybe_drop_mmap(vmf, page, &fpin)) in filemap_fault()
3114 if (unlikely(compound_head(page)->mapping != mapping)) { in filemap_fault()
3115 unlock_page(page); in filemap_fault()
3116 put_page(page); in filemap_fault()
3119 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); in filemap_fault()
3125 if (unlikely(!PageUptodate(page))) { in filemap_fault()
3133 unlock_page(page); in filemap_fault()
3134 put_page(page); in filemap_fault()
3146 unlock_page(page); in filemap_fault()
3158 unlock_page(page); in filemap_fault()
3159 put_page(page); in filemap_fault()
3163 vmf->page = page; in filemap_fault()
3174 error = filemap_read_page(file, mapping, page); in filemap_fault()
3177 put_page(page); in filemap_fault()
3191 if (page) in filemap_fault()
3192 put_page(page); in filemap_fault()
3201 static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page) in filemap_map_pmd() argument
3207 unlock_page(page); in filemap_map_pmd()
3208 put_page(page); in filemap_map_pmd()
3212 if (pmd_none(*vmf->pmd) && PageTransHuge(page)) { in filemap_map_pmd()
3213 vm_fault_t ret = do_set_pmd(vmf, page); in filemap_map_pmd()
3216 unlock_page(page); in filemap_map_pmd()
3226 unlock_page(page); in filemap_map_pmd()
3227 put_page(page); in filemap_map_pmd()
3234 static struct page *next_uptodate_page(struct page *page, in next_uptodate_page() argument
3241 if (!page) in next_uptodate_page()
3243 if (xas_retry(xas, page)) in next_uptodate_page()
3245 if (xa_is_value(page)) in next_uptodate_page()
3247 if (PageLocked(page)) in next_uptodate_page()
3249 if (!page_cache_get_speculative(page)) in next_uptodate_page()
3252 if (unlikely(page != xas_reload(xas))) in next_uptodate_page()
3254 if (!PageUptodate(page) || PageReadahead(page)) in next_uptodate_page()
3256 if (!trylock_page(page)) in next_uptodate_page()
3258 if (page->mapping != mapping) in next_uptodate_page()
3260 if (!PageUptodate(page)) in next_uptodate_page()
3265 return page; in next_uptodate_page()
3267 unlock_page(page); in next_uptodate_page()
3269 put_page(page); in next_uptodate_page()
3270 } while ((page = xas_next_entry(xas, end_pgoff)) != NULL); in next_uptodate_page()
3275 static inline struct page *first_map_page(struct address_space *mapping, in first_map_page()
3283 static inline struct page *next_map_page(struct address_space *mapping, in next_map_page()
3300 struct page *head, *page; in filemap_map_pages() local
3317 page = find_subpage(head, xas.xa_index); in filemap_map_pages()
3318 if (PageHWPoison(page)) in filemap_map_pages()
3335 do_set_pte(vmf, page, addr); in filemap_map_pages()
3355 struct page *page = vmf->page; in filemap_page_mkwrite() local
3360 lock_page(page); in filemap_page_mkwrite()
3361 if (page->mapping != mapping) { in filemap_page_mkwrite()
3362 unlock_page(page); in filemap_page_mkwrite()
3371 set_page_dirty(page); in filemap_page_mkwrite()
3372 wait_for_stable_page(page); in filemap_page_mkwrite()
3425 static struct page *wait_on_page_read(struct page *page) in wait_on_page_read() argument
3427 if (!IS_ERR(page)) { in wait_on_page_read()
3428 wait_on_page_locked(page); in wait_on_page_read()
3429 if (!PageUptodate(page)) { in wait_on_page_read()
3430 put_page(page); in wait_on_page_read()
3431 page = ERR_PTR(-EIO); in wait_on_page_read()
3434 return page; in wait_on_page_read()
3437 static struct page *do_read_cache_page(struct address_space *mapping, in do_read_cache_page()
3439 int (*filler)(void *, struct page *), in do_read_cache_page() argument
3443 struct page *page; in do_read_cache_page() local
3446 page = find_get_page(mapping, index); in do_read_cache_page()
3447 if (!page) { in do_read_cache_page()
3448 page = __page_cache_alloc(gfp); in do_read_cache_page()
3449 if (!page) in do_read_cache_page()
3451 err = add_to_page_cache_lru(page, mapping, index, gfp); in do_read_cache_page()
3453 put_page(page); in do_read_cache_page()
3462 err = filler(data, page); in do_read_cache_page()
3464 err = mapping->a_ops->readpage(data, page); in do_read_cache_page()
3467 put_page(page); in do_read_cache_page()
3471 page = wait_on_page_read(page); in do_read_cache_page()
3472 if (IS_ERR(page)) in do_read_cache_page()
3473 return page; in do_read_cache_page()
3476 if (PageUptodate(page)) in do_read_cache_page()
3510 wait_on_page_locked(page); in do_read_cache_page()
3511 if (PageUptodate(page)) in do_read_cache_page()
3515 lock_page(page); in do_read_cache_page()
3518 if (!page->mapping) { in do_read_cache_page()
3519 unlock_page(page); in do_read_cache_page()
3520 put_page(page); in do_read_cache_page()
3525 if (PageUptodate(page)) { in do_read_cache_page()
3526 unlock_page(page); in do_read_cache_page()
3536 ClearPageError(page); in do_read_cache_page()
3540 mark_page_accessed(page); in do_read_cache_page()
3541 return page; in do_read_cache_page()
3560 struct page *read_cache_page(struct address_space *mapping, in read_cache_page()
3562 int (*filler)(void *, struct page *), in read_cache_page() argument
3585 struct page *read_cache_page_gfp(struct address_space *mapping, in read_cache_page_gfp()
3595 struct page **pagep, void **fsdata) in pagecache_write_begin()
3606 struct page *page, void *fsdata) in pagecache_write_end() argument
3610 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); in pagecache_write_end()
3727 struct page *page; in generic_perform_write() local
3755 &page, &fsdata); in generic_perform_write()
3760 flush_dcache_page(page); in generic_perform_write()
3762 copied = copy_page_from_iter_atomic(page, offset, bytes, i); in generic_perform_write()
3763 flush_dcache_page(page); in generic_perform_write()
3766 page, fsdata); in generic_perform_write()
3939 int try_to_release_page(struct page *page, gfp_t gfp_mask) in try_to_release_page() argument
3941 struct address_space * const mapping = page->mapping; in try_to_release_page()
3943 BUG_ON(!PageLocked(page)); in try_to_release_page()
3944 if (PageWriteback(page)) in try_to_release_page()
3948 return mapping->a_ops->releasepage(page, gfp_mask); in try_to_release_page()
3949 return try_to_free_buffers(page); in try_to_release_page()