Lines Matching refs:bh
55 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
60 inline void touch_buffer(struct buffer_head *bh) in touch_buffer() argument
62 trace_block_touch_buffer(bh); in touch_buffer()
63 mark_page_accessed(bh->b_page); in touch_buffer()
67 void __lock_buffer(struct buffer_head *bh) in __lock_buffer() argument
69 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __lock_buffer()
73 void unlock_buffer(struct buffer_head *bh) in unlock_buffer() argument
75 clear_bit_unlock(BH_Lock, &bh->b_state); in unlock_buffer()
77 wake_up_bit(&bh->b_state, BH_Lock); in unlock_buffer()
89 struct buffer_head *head, *bh; in buffer_check_dirty_writeback() local
102 bh = head; in buffer_check_dirty_writeback()
104 if (buffer_locked(bh)) in buffer_check_dirty_writeback()
107 if (buffer_dirty(bh)) in buffer_check_dirty_writeback()
110 bh = bh->b_this_page; in buffer_check_dirty_writeback()
111 } while (bh != head); in buffer_check_dirty_writeback()
120 void __wait_on_buffer(struct buffer_head * bh) in __wait_on_buffer() argument
122 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __wait_on_buffer()
126 static void buffer_io_error(struct buffer_head *bh, char *msg) in buffer_io_error() argument
128 if (!test_bit(BH_Quiet, &bh->b_state)) in buffer_io_error()
131 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); in buffer_io_error()
142 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) in __end_buffer_read_notouch() argument
145 set_buffer_uptodate(bh); in __end_buffer_read_notouch()
148 clear_buffer_uptodate(bh); in __end_buffer_read_notouch()
150 unlock_buffer(bh); in __end_buffer_read_notouch()
157 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) in end_buffer_read_sync() argument
159 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_sync()
160 put_bh(bh); in end_buffer_read_sync()
164 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) in end_buffer_write_sync() argument
167 set_buffer_uptodate(bh); in end_buffer_write_sync()
169 buffer_io_error(bh, ", lost sync page write"); in end_buffer_write_sync()
170 mark_buffer_write_io_error(bh); in end_buffer_write_sync()
171 clear_buffer_uptodate(bh); in end_buffer_write_sync()
173 unlock_buffer(bh); in end_buffer_write_sync()
174 put_bh(bh); in end_buffer_write_sync()
195 struct buffer_head *bh; in __find_get_block_slow() local
210 bh = head; in __find_get_block_slow()
212 if (!buffer_mapped(bh)) in __find_get_block_slow()
214 else if (bh->b_blocknr == block) { in __find_get_block_slow()
215 ret = bh; in __find_get_block_slow()
216 get_bh(bh); in __find_get_block_slow()
219 bh = bh->b_this_page; in __find_get_block_slow()
220 } while (bh != head); in __find_get_block_slow()
233 (unsigned long long)bh->b_blocknr, in __find_get_block_slow()
234 bh->b_state, bh->b_size, bdev, in __find_get_block_slow()
244 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) in end_buffer_async_read() argument
252 BUG_ON(!buffer_async_read(bh)); in end_buffer_async_read()
254 page = bh->b_page; in end_buffer_async_read()
256 set_buffer_uptodate(bh); in end_buffer_async_read()
258 clear_buffer_uptodate(bh); in end_buffer_async_read()
259 buffer_io_error(bh, ", async page read"); in end_buffer_async_read()
270 clear_buffer_async_read(bh); in end_buffer_async_read()
271 unlock_buffer(bh); in end_buffer_async_read()
272 tmp = bh; in end_buffer_async_read()
281 } while (tmp != bh); in end_buffer_async_read()
300 struct buffer_head *bh; member
307 struct buffer_head *bh = ctx->bh; in decrypt_bh() local
310 err = fscrypt_decrypt_pagecache_blocks(bh->b_page, bh->b_size, in decrypt_bh()
311 bh_offset(bh)); in decrypt_bh()
312 end_buffer_async_read(bh, err == 0); in decrypt_bh()
320 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) in end_buffer_async_read_io() argument
324 fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) { in end_buffer_async_read_io()
329 ctx->bh = bh; in end_buffer_async_read_io()
335 end_buffer_async_read(bh, uptodate); in end_buffer_async_read_io()
342 void end_buffer_async_write(struct buffer_head *bh, int uptodate) in end_buffer_async_write() argument
349 BUG_ON(!buffer_async_write(bh)); in end_buffer_async_write()
351 page = bh->b_page; in end_buffer_async_write()
353 set_buffer_uptodate(bh); in end_buffer_async_write()
355 buffer_io_error(bh, ", lost async page write"); in end_buffer_async_write()
356 mark_buffer_write_io_error(bh); in end_buffer_async_write()
357 clear_buffer_uptodate(bh); in end_buffer_async_write()
364 clear_buffer_async_write(bh); in end_buffer_async_write()
365 unlock_buffer(bh); in end_buffer_async_write()
366 tmp = bh->b_this_page; in end_buffer_async_write()
367 while (tmp != bh) { in end_buffer_async_write()
405 static void mark_buffer_async_read(struct buffer_head *bh) in mark_buffer_async_read() argument
407 bh->b_end_io = end_buffer_async_read_io; in mark_buffer_async_read()
408 set_buffer_async_read(bh); in mark_buffer_async_read()
411 static void mark_buffer_async_write_endio(struct buffer_head *bh, in mark_buffer_async_write_endio() argument
414 bh->b_end_io = handler; in mark_buffer_async_write_endio()
415 set_buffer_async_write(bh); in mark_buffer_async_write_endio()
418 void mark_buffer_async_write(struct buffer_head *bh) in mark_buffer_async_write() argument
420 mark_buffer_async_write_endio(bh, end_buffer_async_write); in mark_buffer_async_write()
477 static void __remove_assoc_queue(struct buffer_head *bh) in __remove_assoc_queue() argument
479 list_del_init(&bh->b_assoc_buffers); in __remove_assoc_queue()
480 WARN_ON(!bh->b_assoc_map); in __remove_assoc_queue()
481 bh->b_assoc_map = NULL; in __remove_assoc_queue()
501 struct buffer_head *bh; in osync_buffers_list() local
508 bh = BH_ENTRY(p); in osync_buffers_list()
509 if (buffer_locked(bh)) { in osync_buffers_list()
510 get_bh(bh); in osync_buffers_list()
512 wait_on_buffer(bh); in osync_buffers_list()
513 if (!buffer_uptodate(bh)) in osync_buffers_list()
515 brelse(bh); in osync_buffers_list()
562 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); in write_boundary_block() local
563 if (bh) { in write_boundary_block()
564 if (buffer_dirty(bh)) in write_boundary_block()
565 ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); in write_boundary_block()
566 put_bh(bh); in write_boundary_block()
570 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) in mark_buffer_dirty_inode() argument
573 struct address_space *buffer_mapping = bh->b_page->mapping; in mark_buffer_dirty_inode()
575 mark_buffer_dirty(bh); in mark_buffer_dirty_inode()
581 if (!bh->b_assoc_map) { in mark_buffer_dirty_inode()
583 list_move_tail(&bh->b_assoc_buffers, in mark_buffer_dirty_inode()
585 bh->b_assoc_map = mapping; in mark_buffer_dirty_inode()
627 struct buffer_head *bh = head; in __set_page_dirty_buffers() local
630 set_buffer_dirty(bh); in __set_page_dirty_buffers()
631 bh = bh->b_this_page; in __set_page_dirty_buffers()
632 } while (bh != head); in __set_page_dirty_buffers()
675 struct buffer_head *bh; in fsync_buffers_list() local
686 bh = BH_ENTRY(list->next); in fsync_buffers_list()
687 mapping = bh->b_assoc_map; in fsync_buffers_list()
688 __remove_assoc_queue(bh); in fsync_buffers_list()
692 if (buffer_dirty(bh) || buffer_locked(bh)) { in fsync_buffers_list()
693 list_add(&bh->b_assoc_buffers, &tmp); in fsync_buffers_list()
694 bh->b_assoc_map = mapping; in fsync_buffers_list()
695 if (buffer_dirty(bh)) { in fsync_buffers_list()
696 get_bh(bh); in fsync_buffers_list()
705 write_dirty_buffer(bh, REQ_SYNC); in fsync_buffers_list()
713 brelse(bh); in fsync_buffers_list()
724 bh = BH_ENTRY(tmp.prev); in fsync_buffers_list()
725 get_bh(bh); in fsync_buffers_list()
726 mapping = bh->b_assoc_map; in fsync_buffers_list()
727 __remove_assoc_queue(bh); in fsync_buffers_list()
731 if (buffer_dirty(bh)) { in fsync_buffers_list()
732 list_add(&bh->b_assoc_buffers, in fsync_buffers_list()
734 bh->b_assoc_map = mapping; in fsync_buffers_list()
737 wait_on_buffer(bh); in fsync_buffers_list()
738 if (!buffer_uptodate(bh)) in fsync_buffers_list()
740 brelse(bh); in fsync_buffers_list()
793 struct buffer_head *bh = BH_ENTRY(list->next); in remove_inode_buffers() local
794 if (buffer_dirty(bh)) { in remove_inode_buffers()
798 __remove_assoc_queue(bh); in remove_inode_buffers()
817 struct buffer_head *bh, *head; in alloc_page_buffers() local
832 bh = alloc_buffer_head(gfp); in alloc_page_buffers()
833 if (!bh) in alloc_page_buffers()
836 bh->b_this_page = head; in alloc_page_buffers()
837 bh->b_blocknr = -1; in alloc_page_buffers()
838 head = bh; in alloc_page_buffers()
840 bh->b_size = size; in alloc_page_buffers()
843 set_bh_page(bh, page, offset); in alloc_page_buffers()
854 bh = head; in alloc_page_buffers()
856 free_buffer_head(bh); in alloc_page_buffers()
867 struct buffer_head *bh, *tail; in link_dev_buffers() local
869 bh = head; in link_dev_buffers()
871 tail = bh; in link_dev_buffers()
872 bh = bh->b_this_page; in link_dev_buffers()
873 } while (bh); in link_dev_buffers()
898 struct buffer_head *bh = head; in init_page_buffers() local
903 if (!buffer_mapped(bh)) { in init_page_buffers()
904 bh->b_end_io = NULL; in init_page_buffers()
905 bh->b_private = NULL; in init_page_buffers()
906 bh->b_bdev = bdev; in init_page_buffers()
907 bh->b_blocknr = block; in init_page_buffers()
909 set_buffer_uptodate(bh); in init_page_buffers()
911 set_buffer_mapped(bh); in init_page_buffers()
914 bh = bh->b_this_page; in init_page_buffers()
915 } while (bh != head); in init_page_buffers()
934 struct buffer_head *bh; in grow_dev_page() local
954 bh = page_buffers(page); in grow_dev_page()
955 if (bh->b_size == size) { in grow_dev_page()
968 bh = alloc_page_buffers(page, size, true); in grow_dev_page()
976 link_dev_buffers(page, bh); in grow_dev_page()
1034 struct buffer_head *bh; in __getblk_slow() local
1037 bh = __find_get_block(bdev, block, size); in __getblk_slow()
1038 if (bh) in __getblk_slow()
1039 return bh; in __getblk_slow()
1082 void mark_buffer_dirty(struct buffer_head *bh) in mark_buffer_dirty() argument
1084 WARN_ON_ONCE(!buffer_uptodate(bh)); in mark_buffer_dirty()
1086 trace_block_dirty_buffer(bh); in mark_buffer_dirty()
1094 if (buffer_dirty(bh)) { in mark_buffer_dirty()
1096 if (buffer_dirty(bh)) in mark_buffer_dirty()
1100 if (!test_set_buffer_dirty(bh)) { in mark_buffer_dirty()
1101 struct page *page = bh->b_page; in mark_buffer_dirty()
1117 void mark_buffer_write_io_error(struct buffer_head *bh) in mark_buffer_write_io_error() argument
1121 set_buffer_write_io_error(bh); in mark_buffer_write_io_error()
1123 if (bh->b_page && bh->b_page->mapping) in mark_buffer_write_io_error()
1124 mapping_set_error(bh->b_page->mapping, -EIO); in mark_buffer_write_io_error()
1125 if (bh->b_assoc_map) in mark_buffer_write_io_error()
1126 mapping_set_error(bh->b_assoc_map, -EIO); in mark_buffer_write_io_error()
1128 sb = READ_ONCE(bh->b_bdev->bd_super); in mark_buffer_write_io_error()
1156 void __bforget(struct buffer_head *bh) in __bforget() argument
1158 clear_buffer_dirty(bh); in __bforget()
1159 if (bh->b_assoc_map) { in __bforget()
1160 struct address_space *buffer_mapping = bh->b_page->mapping; in __bforget()
1163 list_del_init(&bh->b_assoc_buffers); in __bforget()
1164 bh->b_assoc_map = NULL; in __bforget()
1167 __brelse(bh); in __bforget()
1171 static struct buffer_head *__bread_slow(struct buffer_head *bh) in __bread_slow() argument
1173 lock_buffer(bh); in __bread_slow()
1174 if (buffer_uptodate(bh)) { in __bread_slow()
1175 unlock_buffer(bh); in __bread_slow()
1176 return bh; in __bread_slow()
1178 get_bh(bh); in __bread_slow()
1179 bh->b_end_io = end_buffer_read_sync; in __bread_slow()
1180 submit_bh(REQ_OP_READ, 0, bh); in __bread_slow()
1181 wait_on_buffer(bh); in __bread_slow()
1182 if (buffer_uptodate(bh)) in __bread_slow()
1183 return bh; in __bread_slow()
1185 brelse(bh); in __bread_slow()
1231 static void bh_lru_install(struct buffer_head *bh) in bh_lru_install() argument
1233 struct buffer_head *evictee = bh; in bh_lru_install()
1252 if (evictee == bh) { in bh_lru_install()
1258 get_bh(bh); in bh_lru_install()
1275 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); in lookup_bh_lru() local
1277 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && in lookup_bh_lru()
1278 bh->b_size == size) { in lookup_bh_lru()
1285 __this_cpu_write(bh_lrus.bhs[0], bh); in lookup_bh_lru()
1287 get_bh(bh); in lookup_bh_lru()
1288 ret = bh; in lookup_bh_lru()
1304 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); in __find_get_block() local
1306 if (bh == NULL) { in __find_get_block()
1308 bh = __find_get_block_slow(bdev, block); in __find_get_block()
1309 if (bh) in __find_get_block()
1310 bh_lru_install(bh); in __find_get_block()
1312 touch_buffer(bh); in __find_get_block()
1314 return bh; in __find_get_block()
1330 struct buffer_head *bh = __find_get_block(bdev, block, size); in __getblk_gfp() local
1333 if (bh == NULL) in __getblk_gfp()
1334 bh = __getblk_slow(bdev, block, size, gfp); in __getblk_gfp()
1335 return bh; in __getblk_gfp()
1344 struct buffer_head *bh = __getblk(bdev, block, size); in __breadahead() local
1345 if (likely(bh)) { in __breadahead()
1346 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); in __breadahead()
1347 brelse(bh); in __breadahead()
1355 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); in __breadahead_gfp() local
1356 if (likely(bh)) { in __breadahead_gfp()
1357 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); in __breadahead_gfp()
1358 brelse(bh); in __breadahead_gfp()
1379 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); in __bread_gfp() local
1381 if (likely(bh) && !buffer_uptodate(bh)) in __bread_gfp()
1382 bh = __bread_slow(bh); in __bread_gfp()
1383 return bh; in __bread_gfp()
1442 void set_bh_page(struct buffer_head *bh, in set_bh_page() argument
1445 bh->b_page = page; in set_bh_page()
1451 bh->b_data = (char *)(0 + offset); in set_bh_page()
1453 bh->b_data = page_address(page) + offset; in set_bh_page()
1466 static void discard_buffer(struct buffer_head * bh) in discard_buffer() argument
1470 lock_buffer(bh); in discard_buffer()
1471 clear_buffer_dirty(bh); in discard_buffer()
1472 bh->b_bdev = NULL; in discard_buffer()
1473 b_state = bh->b_state; in discard_buffer()
1475 b_state_old = cmpxchg(&bh->b_state, b_state, in discard_buffer()
1481 unlock_buffer(bh); in discard_buffer()
1503 struct buffer_head *head, *bh, *next; in block_invalidatepage() local
1517 bh = head; in block_invalidatepage()
1519 unsigned int next_off = curr_off + bh->b_size; in block_invalidatepage()
1520 next = bh->b_this_page; in block_invalidatepage()
1532 discard_buffer(bh); in block_invalidatepage()
1534 bh = next; in block_invalidatepage()
1535 } while (bh != head); in block_invalidatepage()
1558 struct buffer_head *bh, *head, *tail; in create_empty_buffers() local
1561 bh = head; in create_empty_buffers()
1563 bh->b_state |= b_state; in create_empty_buffers()
1564 tail = bh; in create_empty_buffers()
1565 bh = bh->b_this_page; in create_empty_buffers()
1566 } while (bh); in create_empty_buffers()
1571 bh = head; in create_empty_buffers()
1574 set_buffer_dirty(bh); in create_empty_buffers()
1576 set_buffer_uptodate(bh); in create_empty_buffers()
1577 bh = bh->b_this_page; in create_empty_buffers()
1578 } while (bh != head); in create_empty_buffers()
1613 struct buffer_head *bh; in clean_bdev_aliases() local
1635 bh = head; in clean_bdev_aliases()
1637 if (!buffer_mapped(bh) || (bh->b_blocknr < block)) in clean_bdev_aliases()
1639 if (bh->b_blocknr >= block + len) in clean_bdev_aliases()
1641 clear_buffer_dirty(bh); in clean_bdev_aliases()
1642 wait_on_buffer(bh); in clean_bdev_aliases()
1643 clear_buffer_req(bh); in clean_bdev_aliases()
1645 bh = bh->b_this_page; in clean_bdev_aliases()
1646 } while (bh != head); in clean_bdev_aliases()
1718 struct buffer_head *bh, *head; in __block_write_full_page() local
1736 bh = head; in __block_write_full_page()
1737 blocksize = bh->b_size; in __block_write_full_page()
1757 clear_buffer_dirty(bh); in __block_write_full_page()
1758 set_buffer_uptodate(bh); in __block_write_full_page()
1759 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && in __block_write_full_page()
1760 buffer_dirty(bh)) { in __block_write_full_page()
1761 WARN_ON(bh->b_size != blocksize); in __block_write_full_page()
1762 err = get_block(inode, block, bh, 1); in __block_write_full_page()
1765 clear_buffer_delay(bh); in __block_write_full_page()
1766 if (buffer_new(bh)) { in __block_write_full_page()
1768 clear_buffer_new(bh); in __block_write_full_page()
1769 clean_bdev_bh_alias(bh); in __block_write_full_page()
1772 bh = bh->b_this_page; in __block_write_full_page()
1774 } while (bh != head); in __block_write_full_page()
1777 if (!buffer_mapped(bh)) in __block_write_full_page()
1787 lock_buffer(bh); in __block_write_full_page()
1788 } else if (!trylock_buffer(bh)) { in __block_write_full_page()
1792 if (test_clear_buffer_dirty(bh)) { in __block_write_full_page()
1793 mark_buffer_async_write_endio(bh, handler); in __block_write_full_page()
1795 unlock_buffer(bh); in __block_write_full_page()
1797 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1807 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1808 if (buffer_async_write(bh)) { in __block_write_full_page()
1809 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, in __block_write_full_page()
1813 bh = next; in __block_write_full_page()
1814 } while (bh != head); in __block_write_full_page()
1841 bh = head; in __block_write_full_page()
1844 if (buffer_mapped(bh) && buffer_dirty(bh) && in __block_write_full_page()
1845 !buffer_delay(bh)) { in __block_write_full_page()
1846 lock_buffer(bh); in __block_write_full_page()
1847 mark_buffer_async_write_endio(bh, handler); in __block_write_full_page()
1853 clear_buffer_dirty(bh); in __block_write_full_page()
1855 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1861 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1862 if (buffer_async_write(bh)) { in __block_write_full_page()
1863 clear_buffer_dirty(bh); in __block_write_full_page()
1864 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, in __block_write_full_page()
1868 bh = next; in __block_write_full_page()
1869 } while (bh != head); in __block_write_full_page()
1883 struct buffer_head *head, *bh; in page_zero_new_buffers() local
1889 bh = head = page_buffers(page); in page_zero_new_buffers()
1892 block_end = block_start + bh->b_size; in page_zero_new_buffers()
1894 if (buffer_new(bh)) { in page_zero_new_buffers()
1903 set_buffer_uptodate(bh); in page_zero_new_buffers()
1906 clear_buffer_new(bh); in page_zero_new_buffers()
1907 mark_buffer_dirty(bh); in page_zero_new_buffers()
1912 bh = bh->b_this_page; in page_zero_new_buffers()
1913 } while (bh != head); in page_zero_new_buffers()
1918 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, in iomap_to_bh() argument
1923 bh->b_bdev = iomap->bdev; in iomap_to_bh()
1940 if (!buffer_uptodate(bh) || in iomap_to_bh()
1942 set_buffer_new(bh); in iomap_to_bh()
1945 if (!buffer_uptodate(bh) || in iomap_to_bh()
1947 set_buffer_new(bh); in iomap_to_bh()
1948 set_buffer_uptodate(bh); in iomap_to_bh()
1949 set_buffer_mapped(bh); in iomap_to_bh()
1950 set_buffer_delay(bh); in iomap_to_bh()
1958 set_buffer_new(bh); in iomap_to_bh()
1959 set_buffer_unwritten(bh); in iomap_to_bh()
1964 set_buffer_new(bh); in iomap_to_bh()
1965 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> in iomap_to_bh()
1967 set_buffer_mapped(bh); in iomap_to_bh()
1982 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; in __block_write_begin_int() local
1995 for(bh = head, block_start = 0; bh != head || !block_start; in __block_write_begin_int()
1996 block++, block_start=block_end, bh = bh->b_this_page) { in __block_write_begin_int()
2000 if (!buffer_uptodate(bh)) in __block_write_begin_int()
2001 set_buffer_uptodate(bh); in __block_write_begin_int()
2005 if (buffer_new(bh)) in __block_write_begin_int()
2006 clear_buffer_new(bh); in __block_write_begin_int()
2007 if (!buffer_mapped(bh)) { in __block_write_begin_int()
2008 WARN_ON(bh->b_size != blocksize); in __block_write_begin_int()
2010 err = get_block(inode, block, bh, 1); in __block_write_begin_int()
2014 iomap_to_bh(inode, block, bh, iomap); in __block_write_begin_int()
2017 if (buffer_new(bh)) { in __block_write_begin_int()
2018 clean_bdev_bh_alias(bh); in __block_write_begin_int()
2020 clear_buffer_new(bh); in __block_write_begin_int()
2021 set_buffer_uptodate(bh); in __block_write_begin_int()
2022 mark_buffer_dirty(bh); in __block_write_begin_int()
2033 if (!buffer_uptodate(bh)) in __block_write_begin_int()
2034 set_buffer_uptodate(bh); in __block_write_begin_int()
2037 if (!buffer_uptodate(bh) && !buffer_delay(bh) && in __block_write_begin_int()
2038 !buffer_unwritten(bh) && in __block_write_begin_int()
2040 ll_rw_block(REQ_OP_READ, 0, 1, &bh); in __block_write_begin_int()
2041 *wait_bh++=bh; in __block_write_begin_int()
2070 struct buffer_head *bh, *head; in __block_commit_write() local
2072 bh = head = page_buffers(page); in __block_commit_write()
2073 blocksize = bh->b_size; in __block_commit_write()
2079 if (!buffer_uptodate(bh)) in __block_commit_write()
2082 set_buffer_uptodate(bh); in __block_commit_write()
2083 mark_buffer_dirty(bh); in __block_commit_write()
2085 if (buffer_new(bh)) in __block_commit_write()
2086 clear_buffer_new(bh); in __block_commit_write()
2089 bh = bh->b_this_page; in __block_commit_write()
2090 } while (bh != head); in __block_commit_write()
2219 struct buffer_head *bh, *head; in block_is_partially_uptodate() local
2232 bh = head; in block_is_partially_uptodate()
2237 if (!buffer_uptodate(bh)) { in block_is_partially_uptodate()
2245 bh = bh->b_this_page; in block_is_partially_uptodate()
2246 } while (bh != head); in block_is_partially_uptodate()
2263 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; in block_read_full_page() local
2274 bh = head; in block_read_full_page()
2279 if (buffer_uptodate(bh)) in block_read_full_page()
2282 if (!buffer_mapped(bh)) { in block_read_full_page()
2287 WARN_ON(bh->b_size != blocksize); in block_read_full_page()
2288 err = get_block(inode, iblock, bh, 0); in block_read_full_page()
2292 if (!buffer_mapped(bh)) { in block_read_full_page()
2295 set_buffer_uptodate(bh); in block_read_full_page()
2302 if (buffer_uptodate(bh)) in block_read_full_page()
2305 arr[nr++] = bh; in block_read_full_page()
2306 } while (i++, iblock++, (bh = bh->b_this_page) != head); in block_read_full_page()
2324 bh = arr[i]; in block_read_full_page()
2325 lock_buffer(bh); in block_read_full_page()
2326 mark_buffer_async_read(bh); in block_read_full_page()
2335 bh = arr[i]; in block_read_full_page()
2336 if (buffer_uptodate(bh)) in block_read_full_page()
2337 end_buffer_async_read(bh, 1); in block_read_full_page()
2339 submit_bh(REQ_OP_READ, 0, bh); in block_read_full_page()
2543 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) in end_buffer_read_nobh() argument
2545 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_nobh()
2555 struct buffer_head *bh; in attach_nobh_buffers() local
2560 bh = head; in attach_nobh_buffers()
2563 set_buffer_dirty(bh); in attach_nobh_buffers()
2564 if (!bh->b_this_page) in attach_nobh_buffers()
2565 bh->b_this_page = head; in attach_nobh_buffers()
2566 bh = bh->b_this_page; in attach_nobh_buffers()
2567 } while (bh != head); in attach_nobh_buffers()
2585 struct buffer_head *head, *bh; in nobh_write_begin() local
2638 for (block_start = 0, block_in_page = 0, bh = head; in nobh_write_begin()
2640 block_in_page++, block_start += blocksize, bh = bh->b_this_page) { in nobh_write_begin()
2644 bh->b_state = 0; in nobh_write_begin()
2649 bh, create); in nobh_write_begin()
2652 if (!buffer_mapped(bh)) in nobh_write_begin()
2654 if (buffer_new(bh)) in nobh_write_begin()
2655 clean_bdev_bh_alias(bh); in nobh_write_begin()
2657 set_buffer_uptodate(bh); in nobh_write_begin()
2660 if (buffer_new(bh) || !buffer_mapped(bh)) { in nobh_write_begin()
2665 if (buffer_uptodate(bh)) in nobh_write_begin()
2668 lock_buffer(bh); in nobh_write_begin()
2669 bh->b_end_io = end_buffer_read_nobh; in nobh_write_begin()
2670 submit_bh(REQ_OP_READ, 0, bh); in nobh_write_begin()
2681 for (bh = head; bh; bh = bh->b_this_page) { in nobh_write_begin()
2682 wait_on_buffer(bh); in nobh_write_begin()
2683 if (!buffer_uptodate(bh)) in nobh_write_begin()
2724 struct buffer_head *bh; in nobh_write_end() local
2744 bh = head; in nobh_write_end()
2746 free_buffer_head(bh); in nobh_write_end()
2883 struct buffer_head *bh; in block_truncate_page() local
2905 bh = page_buffers(page); in block_truncate_page()
2908 bh = bh->b_this_page; in block_truncate_page()
2914 if (!buffer_mapped(bh)) { in block_truncate_page()
2915 WARN_ON(bh->b_size != blocksize); in block_truncate_page()
2916 err = get_block(inode, iblock, bh, 0); in block_truncate_page()
2920 if (!buffer_mapped(bh)) in block_truncate_page()
2926 set_buffer_uptodate(bh); in block_truncate_page()
2928 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { in block_truncate_page()
2930 ll_rw_block(REQ_OP_READ, 0, 1, &bh); in block_truncate_page()
2931 wait_on_buffer(bh); in block_truncate_page()
2933 if (!buffer_uptodate(bh)) in block_truncate_page()
2938 mark_buffer_dirty(bh); in block_truncate_page()
3000 struct buffer_head *bh = bio->bi_private; in end_bio_bh_io_sync() local
3003 set_bit(BH_Quiet, &bh->b_state); in end_bio_bh_io_sync()
3005 bh->b_end_io(bh, !bio->bi_status); in end_bio_bh_io_sync()
3009 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, in submit_bh_wbc() argument
3014 BUG_ON(!buffer_locked(bh)); in submit_bh_wbc()
3015 BUG_ON(!buffer_mapped(bh)); in submit_bh_wbc()
3016 BUG_ON(!bh->b_end_io); in submit_bh_wbc()
3017 BUG_ON(buffer_delay(bh)); in submit_bh_wbc()
3018 BUG_ON(buffer_unwritten(bh)); in submit_bh_wbc()
3023 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) in submit_bh_wbc()
3024 clear_buffer_write_io_error(bh); in submit_bh_wbc()
3028 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); in submit_bh_wbc()
3030 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in submit_bh_wbc()
3031 bio_set_dev(bio, bh->b_bdev); in submit_bh_wbc()
3034 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); in submit_bh_wbc()
3035 BUG_ON(bio->bi_iter.bi_size != bh->b_size); in submit_bh_wbc()
3038 bio->bi_private = bh; in submit_bh_wbc()
3040 if (buffer_meta(bh)) in submit_bh_wbc()
3042 if (buffer_prio(bh)) in submit_bh_wbc()
3051 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); in submit_bh_wbc()
3058 int submit_bh(int op, int op_flags, struct buffer_head *bh) in submit_bh() argument
3060 return submit_bh_wbc(op, op_flags, bh, 0, NULL); in submit_bh()
3095 struct buffer_head *bh = bhs[i]; in ll_rw_block() local
3097 if (!trylock_buffer(bh)) in ll_rw_block()
3100 if (test_clear_buffer_dirty(bh)) { in ll_rw_block()
3101 bh->b_end_io = end_buffer_write_sync; in ll_rw_block()
3102 get_bh(bh); in ll_rw_block()
3103 submit_bh(op, op_flags, bh); in ll_rw_block()
3107 if (!buffer_uptodate(bh)) { in ll_rw_block()
3108 bh->b_end_io = end_buffer_read_sync; in ll_rw_block()
3109 get_bh(bh); in ll_rw_block()
3110 submit_bh(op, op_flags, bh); in ll_rw_block()
3114 unlock_buffer(bh); in ll_rw_block()
3119 void write_dirty_buffer(struct buffer_head *bh, int op_flags) in write_dirty_buffer() argument
3121 lock_buffer(bh); in write_dirty_buffer()
3122 if (!test_clear_buffer_dirty(bh)) { in write_dirty_buffer()
3123 unlock_buffer(bh); in write_dirty_buffer()
3126 bh->b_end_io = end_buffer_write_sync; in write_dirty_buffer()
3127 get_bh(bh); in write_dirty_buffer()
3128 submit_bh(REQ_OP_WRITE, op_flags, bh); in write_dirty_buffer()
3137 int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) in __sync_dirty_buffer() argument
3141 WARN_ON(atomic_read(&bh->b_count) < 1); in __sync_dirty_buffer()
3142 lock_buffer(bh); in __sync_dirty_buffer()
3143 if (test_clear_buffer_dirty(bh)) { in __sync_dirty_buffer()
3148 if (!buffer_mapped(bh)) { in __sync_dirty_buffer()
3149 unlock_buffer(bh); in __sync_dirty_buffer()
3153 get_bh(bh); in __sync_dirty_buffer()
3154 bh->b_end_io = end_buffer_write_sync; in __sync_dirty_buffer()
3155 ret = submit_bh(REQ_OP_WRITE, op_flags, bh); in __sync_dirty_buffer()
3156 wait_on_buffer(bh); in __sync_dirty_buffer()
3157 if (!ret && !buffer_uptodate(bh)) in __sync_dirty_buffer()
3160 unlock_buffer(bh); in __sync_dirty_buffer()
3166 int sync_dirty_buffer(struct buffer_head *bh) in sync_dirty_buffer() argument
3168 return __sync_dirty_buffer(bh, REQ_SYNC); in sync_dirty_buffer()
3192 static inline int buffer_busy(struct buffer_head *bh) in buffer_busy() argument
3194 return atomic_read(&bh->b_count) | in buffer_busy()
3195 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); in buffer_busy()
3202 struct buffer_head *bh; in drop_buffers() local
3204 bh = head; in drop_buffers()
3206 if (buffer_busy(bh)) in drop_buffers()
3208 bh = bh->b_this_page; in drop_buffers()
3209 } while (bh != head); in drop_buffers()
3212 struct buffer_head *next = bh->b_this_page; in drop_buffers()
3214 if (bh->b_assoc_map) in drop_buffers()
3215 __remove_assoc_queue(bh); in drop_buffers()
3216 bh = next; in drop_buffers()
3217 } while (bh != head); in drop_buffers()
3262 struct buffer_head *bh = buffers_to_free; in try_to_free_buffers() local
3265 struct buffer_head *next = bh->b_this_page; in try_to_free_buffers()
3266 free_buffer_head(bh); in try_to_free_buffers()
3267 bh = next; in try_to_free_buffers()
3268 } while (bh != buffers_to_free); in try_to_free_buffers()
3322 void free_buffer_head(struct buffer_head *bh) in free_buffer_head() argument
3324 BUG_ON(!list_empty(&bh->b_assoc_buffers)); in free_buffer_head()
3325 kmem_cache_free(bh_cachep, bh); in free_buffer_head()
3354 int bh_uptodate_or_lock(struct buffer_head *bh) in bh_uptodate_or_lock() argument
3356 if (!buffer_uptodate(bh)) { in bh_uptodate_or_lock()
3357 lock_buffer(bh); in bh_uptodate_or_lock()
3358 if (!buffer_uptodate(bh)) in bh_uptodate_or_lock()
3360 unlock_buffer(bh); in bh_uptodate_or_lock()
3372 int bh_submit_read(struct buffer_head *bh) in bh_submit_read() argument
3374 BUG_ON(!buffer_locked(bh)); in bh_submit_read()
3376 if (buffer_uptodate(bh)) { in bh_submit_read()
3377 unlock_buffer(bh); in bh_submit_read()
3381 get_bh(bh); in bh_submit_read()
3382 bh->b_end_io = end_buffer_read_sync; in bh_submit_read()
3383 submit_bh(REQ_OP_READ, 0, bh); in bh_submit_read()
3384 wait_on_buffer(bh); in bh_submit_read()
3385 if (buffer_uptodate(bh)) in bh_submit_read()