1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 */
6
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/blkdev.h>
11 #include <linux/gfs2_ondisk.h>
12 #include <linux/crc32.h>
13 #include <linux/iomap.h>
14 #include <linux/ktime.h>
15
16 #include "gfs2.h"
17 #include "incore.h"
18 #include "bmap.h"
19 #include "glock.h"
20 #include "inode.h"
21 #include "meta_io.h"
22 #include "quota.h"
23 #include "rgrp.h"
24 #include "log.h"
25 #include "super.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "util.h"
29 #include "aops.h"
30 #include "trace_gfs2.h"
31
32 /* This doesn't need to be that large as max 64 bit pointers in a 4k
33 * block is 512, so __u16 is fine for that. It saves stack space to
34 * keep it small.
35 */
36 struct metapath {
37 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
38 __u16 mp_list[GFS2_MAX_META_HEIGHT];
39 int mp_fheight; /* find_metapath height */
40 int mp_aheight; /* actual height (lookup height) */
41 };
42
43 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
44
45 /**
46 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
47 * @ip: the inode
48 * @dibh: the dinode buffer
49 * @block: the block number that was allocated
50 * @page: The (optional) page. This is looked up if @page is NULL
51 *
52 * Returns: errno
53 */
54
gfs2_unstuffer_page(struct gfs2_inode * ip,struct buffer_head * dibh,u64 block,struct page * page)55 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
56 u64 block, struct page *page)
57 {
58 struct inode *inode = &ip->i_inode;
59
60 if (!PageUptodate(page)) {
61 void *kaddr = kmap(page);
62 u64 dsize = i_size_read(inode);
63
64 if (dsize > gfs2_max_stuffed_size(ip))
65 dsize = gfs2_max_stuffed_size(ip);
66
67 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
68 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
69 kunmap(page);
70
71 SetPageUptodate(page);
72 }
73
74 if (gfs2_is_jdata(ip)) {
75 struct buffer_head *bh;
76
77 if (!page_has_buffers(page))
78 create_empty_buffers(page, BIT(inode->i_blkbits),
79 BIT(BH_Uptodate));
80
81 bh = page_buffers(page);
82 if (!buffer_mapped(bh))
83 map_bh(bh, inode->i_sb, block);
84
85 set_buffer_uptodate(bh);
86 gfs2_trans_add_data(ip->i_gl, bh);
87 } else {
88 set_page_dirty(page);
89 gfs2_ordered_add_inode(ip);
90 }
91
92 return 0;
93 }
94
__gfs2_unstuff_inode(struct gfs2_inode * ip,struct page * page)95 static int __gfs2_unstuff_inode(struct gfs2_inode *ip, struct page *page)
96 {
97 struct buffer_head *bh, *dibh;
98 struct gfs2_dinode *di;
99 u64 block = 0;
100 int isdir = gfs2_is_dir(ip);
101 int error;
102
103 error = gfs2_meta_inode_buffer(ip, &dibh);
104 if (error)
105 return error;
106
107 if (i_size_read(&ip->i_inode)) {
108 /* Get a free block, fill it with the stuffed data,
109 and write it out to disk */
110
111 unsigned int n = 1;
112 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
113 if (error)
114 goto out_brelse;
115 if (isdir) {
116 gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1);
117 error = gfs2_dir_get_new_buffer(ip, block, &bh);
118 if (error)
119 goto out_brelse;
120 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
121 dibh, sizeof(struct gfs2_dinode));
122 brelse(bh);
123 } else {
124 error = gfs2_unstuffer_page(ip, dibh, block, page);
125 if (error)
126 goto out_brelse;
127 }
128 }
129
130 /* Set up the pointer to the new block */
131
132 gfs2_trans_add_meta(ip->i_gl, dibh);
133 di = (struct gfs2_dinode *)dibh->b_data;
134 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
135
136 if (i_size_read(&ip->i_inode)) {
137 *(__be64 *)(di + 1) = cpu_to_be64(block);
138 gfs2_add_inode_blocks(&ip->i_inode, 1);
139 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
140 }
141
142 ip->i_height = 1;
143 di->di_height = cpu_to_be16(1);
144
145 out_brelse:
146 brelse(dibh);
147 return error;
148 }
149
150 /**
151 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
152 * @ip: The GFS2 inode to unstuff
153 *
154 * This routine unstuffs a dinode and returns it to a "normal" state such
155 * that the height can be grown in the traditional way.
156 *
157 * Returns: errno
158 */
159
gfs2_unstuff_dinode(struct gfs2_inode * ip)160 int gfs2_unstuff_dinode(struct gfs2_inode *ip)
161 {
162 struct inode *inode = &ip->i_inode;
163 struct page *page;
164 int error;
165
166 down_write(&ip->i_rw_mutex);
167 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
168 error = -ENOMEM;
169 if (!page)
170 goto out;
171 error = __gfs2_unstuff_inode(ip, page);
172 unlock_page(page);
173 put_page(page);
174 out:
175 up_write(&ip->i_rw_mutex);
176 return error;
177 }
178
179 /**
180 * find_metapath - Find path through the metadata tree
181 * @sdp: The superblock
182 * @block: The disk block to look up
183 * @mp: The metapath to return the result in
184 * @height: The pre-calculated height of the metadata tree
185 *
186 * This routine returns a struct metapath structure that defines a path
187 * through the metadata of inode "ip" to get to block "block".
188 *
189 * Example:
190 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a
191 * filesystem with a blocksize of 4096.
192 *
193 * find_metapath() would return a struct metapath structure set to:
194 * mp_fheight = 3, mp_list[0] = 0, mp_list[1] = 48, and mp_list[2] = 165.
195 *
196 * That means that in order to get to the block containing the byte at
197 * offset 101342453, we would load the indirect block pointed to by pointer
198 * 0 in the dinode. We would then load the indirect block pointed to by
199 * pointer 48 in that indirect block. We would then load the data block
200 * pointed to by pointer 165 in that indirect block.
201 *
202 * ----------------------------------------
203 * | Dinode | |
204 * | | 4|
205 * | |0 1 2 3 4 5 9|
206 * | | 6|
207 * ----------------------------------------
208 * |
209 * |
210 * V
211 * ----------------------------------------
212 * | Indirect Block |
213 * | 5|
214 * | 4 4 4 4 4 5 5 1|
215 * |0 5 6 7 8 9 0 1 2|
216 * ----------------------------------------
217 * |
218 * |
219 * V
220 * ----------------------------------------
221 * | Indirect Block |
222 * | 1 1 1 1 1 5|
223 * | 6 6 6 6 6 1|
224 * |0 3 4 5 6 7 2|
225 * ----------------------------------------
226 * |
227 * |
228 * V
229 * ----------------------------------------
230 * | Data block containing offset |
231 * | 101342453 |
232 * | |
233 * | |
234 * ----------------------------------------
235 *
236 */
237
find_metapath(const struct gfs2_sbd * sdp,u64 block,struct metapath * mp,unsigned int height)238 static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
239 struct metapath *mp, unsigned int height)
240 {
241 unsigned int i;
242
243 mp->mp_fheight = height;
244 for (i = height; i--;)
245 mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
246 }
247
metapath_branch_start(const struct metapath * mp)248 static inline unsigned int metapath_branch_start(const struct metapath *mp)
249 {
250 if (mp->mp_list[0] == 0)
251 return 2;
252 return 1;
253 }
254
255 /**
256 * metaptr1 - Return the first possible metadata pointer in a metapath buffer
257 * @height: The metadata height (0 = dinode)
258 * @mp: The metapath
259 */
metaptr1(unsigned int height,const struct metapath * mp)260 static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
261 {
262 struct buffer_head *bh = mp->mp_bh[height];
263 if (height == 0)
264 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
265 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
266 }
267
268 /**
269 * metapointer - Return pointer to start of metadata in a buffer
270 * @height: The metadata height (0 = dinode)
271 * @mp: The metapath
272 *
273 * Return a pointer to the block number of the next height of the metadata
274 * tree given a buffer containing the pointer to the current height of the
275 * metadata tree.
276 */
277
metapointer(unsigned int height,const struct metapath * mp)278 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
279 {
280 __be64 *p = metaptr1(height, mp);
281 return p + mp->mp_list[height];
282 }
283
metaend(unsigned int height,const struct metapath * mp)284 static inline const __be64 *metaend(unsigned int height, const struct metapath *mp)
285 {
286 const struct buffer_head *bh = mp->mp_bh[height];
287 return (const __be64 *)(bh->b_data + bh->b_size);
288 }
289
clone_metapath(struct metapath * clone,struct metapath * mp)290 static void clone_metapath(struct metapath *clone, struct metapath *mp)
291 {
292 unsigned int hgt;
293
294 *clone = *mp;
295 for (hgt = 0; hgt < mp->mp_aheight; hgt++)
296 get_bh(clone->mp_bh[hgt]);
297 }
298
gfs2_metapath_ra(struct gfs2_glock * gl,__be64 * start,__be64 * end)299 static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
300 {
301 const __be64 *t;
302
303 for (t = start; t < end; t++) {
304 struct buffer_head *rabh;
305
306 if (!*t)
307 continue;
308
309 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
310 if (trylock_buffer(rabh)) {
311 if (!buffer_uptodate(rabh)) {
312 rabh->b_end_io = end_buffer_read_sync;
313 submit_bh(REQ_OP_READ,
314 REQ_RAHEAD | REQ_META | REQ_PRIO,
315 rabh);
316 continue;
317 }
318 unlock_buffer(rabh);
319 }
320 brelse(rabh);
321 }
322 }
323
__fillup_metapath(struct gfs2_inode * ip,struct metapath * mp,unsigned int x,unsigned int h)324 static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
325 unsigned int x, unsigned int h)
326 {
327 for (; x < h; x++) {
328 __be64 *ptr = metapointer(x, mp);
329 u64 dblock = be64_to_cpu(*ptr);
330 int ret;
331
332 if (!dblock)
333 break;
334 ret = gfs2_meta_buffer(ip, GFS2_METATYPE_IN, dblock, &mp->mp_bh[x + 1]);
335 if (ret)
336 return ret;
337 }
338 mp->mp_aheight = x + 1;
339 return 0;
340 }
341
342 /**
343 * lookup_metapath - Walk the metadata tree to a specific point
344 * @ip: The inode
345 * @mp: The metapath
346 *
347 * Assumes that the inode's buffer has already been looked up and
348 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
349 * by find_metapath().
350 *
351 * If this function encounters part of the tree which has not been
352 * allocated, it returns the current height of the tree at the point
353 * at which it found the unallocated block. Blocks which are found are
354 * added to the mp->mp_bh[] list.
355 *
356 * Returns: error
357 */
358
lookup_metapath(struct gfs2_inode * ip,struct metapath * mp)359 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
360 {
361 return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
362 }
363
364 /**
365 * fillup_metapath - fill up buffers for the metadata path to a specific height
366 * @ip: The inode
367 * @mp: The metapath
368 * @h: The height to which it should be mapped
369 *
370 * Similar to lookup_metapath, but does lookups for a range of heights
371 *
372 * Returns: error or the number of buffers filled
373 */
374
fillup_metapath(struct gfs2_inode * ip,struct metapath * mp,int h)375 static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
376 {
377 unsigned int x = 0;
378 int ret;
379
380 if (h) {
381 /* find the first buffer we need to look up. */
382 for (x = h - 1; x > 0; x--) {
383 if (mp->mp_bh[x])
384 break;
385 }
386 }
387 ret = __fillup_metapath(ip, mp, x, h);
388 if (ret)
389 return ret;
390 return mp->mp_aheight - x - 1;
391 }
392
metapath_to_block(struct gfs2_sbd * sdp,struct metapath * mp)393 static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
394 {
395 sector_t factor = 1, block = 0;
396 int hgt;
397
398 for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
399 if (hgt < mp->mp_aheight)
400 block += mp->mp_list[hgt] * factor;
401 factor *= sdp->sd_inptrs;
402 }
403 return block;
404 }
405
release_metapath(struct metapath * mp)406 static void release_metapath(struct metapath *mp)
407 {
408 int i;
409
410 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
411 if (mp->mp_bh[i] == NULL)
412 break;
413 brelse(mp->mp_bh[i]);
414 mp->mp_bh[i] = NULL;
415 }
416 }
417
418 /**
419 * gfs2_extent_length - Returns length of an extent of blocks
420 * @bh: The metadata block
421 * @ptr: Current position in @bh
422 * @limit: Max extent length to return
423 * @eob: Set to 1 if we hit "end of block"
424 *
425 * Returns: The length of the extent (minimum of one block)
426 */
427
gfs2_extent_length(struct buffer_head * bh,__be64 * ptr,size_t limit,int * eob)428 static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
429 {
430 const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
431 const __be64 *first = ptr;
432 u64 d = be64_to_cpu(*ptr);
433
434 *eob = 0;
435 do {
436 ptr++;
437 if (ptr >= end)
438 break;
439 d++;
440 } while(be64_to_cpu(*ptr) == d);
441 if (ptr >= end)
442 *eob = 1;
443 return ptr - first;
444 }
445
446 enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
447
448 /*
449 * gfs2_metadata_walker - walk an indirect block
450 * @mp: Metapath to indirect block
451 * @ptrs: Number of pointers to look at
452 *
453 * When returning WALK_FOLLOW, the walker must update @mp to point at the right
454 * indirect block to follow.
455 */
456 typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
457 unsigned int ptrs);
458
459 /*
460 * gfs2_walk_metadata - walk a tree of indirect blocks
461 * @inode: The inode
462 * @mp: Starting point of walk
463 * @max_len: Maximum number of blocks to walk
464 * @walker: Called during the walk
465 *
466 * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
467 * past the end of metadata, and a negative error code otherwise.
468 */
469
470 static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
471 u64 max_len, gfs2_metadata_walker walker)
472 {
473 struct gfs2_inode *ip = GFS2_I(inode);
474 struct gfs2_sbd *sdp = GFS2_SB(inode);
475 u64 factor = 1;
476 unsigned int hgt;
477 int ret;
478
479 /*
480 * The walk starts in the lowest allocated indirect block, which may be
481 * before the position indicated by @mp. Adjust @max_len accordingly
482 * to avoid a short walk.
483 */
484 for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
485 max_len += mp->mp_list[hgt] * factor;
486 mp->mp_list[hgt] = 0;
487 factor *= sdp->sd_inptrs;
488 }
489
490 for (;;) {
491 u16 start = mp->mp_list[hgt];
492 enum walker_status status;
493 unsigned int ptrs;
494 u64 len;
495
496 /* Walk indirect block. */
497 ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
498 len = ptrs * factor;
499 if (len > max_len)
500 ptrs = DIV_ROUND_UP_ULL(max_len, factor);
501 status = walker(mp, ptrs);
502 switch (status) {
503 case WALK_STOP:
504 return 1;
505 case WALK_FOLLOW:
506 BUG_ON(mp->mp_aheight == mp->mp_fheight);
507 ptrs = mp->mp_list[hgt] - start;
508 len = ptrs * factor;
509 break;
510 case WALK_CONTINUE:
511 break;
512 }
513 if (len >= max_len)
514 break;
515 max_len -= len;
516 if (status == WALK_FOLLOW)
517 goto fill_up_metapath;
518
519 lower_metapath:
520 /* Decrease height of metapath. */
521 brelse(mp->mp_bh[hgt]);
522 mp->mp_bh[hgt] = NULL;
523 mp->mp_list[hgt] = 0;
524 if (!hgt)
525 break;
526 hgt--;
527 factor *= sdp->sd_inptrs;
528
529 /* Advance in metadata tree. */
530 (mp->mp_list[hgt])++;
531 if (hgt) {
532 if (mp->mp_list[hgt] >= sdp->sd_inptrs)
533 goto lower_metapath;
534 } else {
535 if (mp->mp_list[hgt] >= sdp->sd_diptrs)
536 break;
537 }
538
539 fill_up_metapath:
540 /* Increase height of metapath. */
541 ret = fillup_metapath(ip, mp, ip->i_height - 1);
542 if (ret < 0)
543 return ret;
544 hgt += ret;
545 for (; ret; ret--)
546 do_div(factor, sdp->sd_inptrs);
547 mp->mp_aheight = hgt + 1;
548 }
549 return 0;
550 }
551
gfs2_hole_walker(struct metapath * mp,unsigned int ptrs)552 static enum walker_status gfs2_hole_walker(struct metapath *mp,
553 unsigned int ptrs)
554 {
555 const __be64 *start, *ptr, *end;
556 unsigned int hgt;
557
558 hgt = mp->mp_aheight - 1;
559 start = metapointer(hgt, mp);
560 end = start + ptrs;
561
562 for (ptr = start; ptr < end; ptr++) {
563 if (*ptr) {
564 mp->mp_list[hgt] += ptr - start;
565 if (mp->mp_aheight == mp->mp_fheight)
566 return WALK_STOP;
567 return WALK_FOLLOW;
568 }
569 }
570 return WALK_CONTINUE;
571 }
572
573 /**
574 * gfs2_hole_size - figure out the size of a hole
575 * @inode: The inode
576 * @lblock: The logical starting block number
577 * @len: How far to look (in blocks)
578 * @mp: The metapath at lblock
579 * @iomap: The iomap to store the hole size in
580 *
581 * This function modifies @mp.
582 *
583 * Returns: errno on error
584 */
gfs2_hole_size(struct inode * inode,sector_t lblock,u64 len,struct metapath * mp,struct iomap * iomap)585 static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
586 struct metapath *mp, struct iomap *iomap)
587 {
588 struct metapath clone;
589 u64 hole_size;
590 int ret;
591
592 clone_metapath(&clone, mp);
593 ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
594 if (ret < 0)
595 goto out;
596
597 if (ret == 1)
598 hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
599 else
600 hole_size = len;
601 iomap->length = hole_size << inode->i_blkbits;
602 ret = 0;
603
604 out:
605 release_metapath(&clone);
606 return ret;
607 }
608
gfs2_indirect_init(struct metapath * mp,struct gfs2_glock * gl,unsigned int i,unsigned offset,u64 bn)609 static inline __be64 *gfs2_indirect_init(struct metapath *mp,
610 struct gfs2_glock *gl, unsigned int i,
611 unsigned offset, u64 bn)
612 {
613 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
614 ((i > 1) ? sizeof(struct gfs2_meta_header) :
615 sizeof(struct gfs2_dinode)));
616 BUG_ON(i < 1);
617 BUG_ON(mp->mp_bh[i] != NULL);
618 mp->mp_bh[i] = gfs2_meta_new(gl, bn);
619 gfs2_trans_add_meta(gl, mp->mp_bh[i]);
620 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
621 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
622 ptr += offset;
623 *ptr = cpu_to_be64(bn);
624 return ptr;
625 }
626
627 enum alloc_state {
628 ALLOC_DATA = 0,
629 ALLOC_GROW_DEPTH = 1,
630 ALLOC_GROW_HEIGHT = 2,
631 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */
632 };
633
634 /**
635 * __gfs2_iomap_alloc - Build a metadata tree of the requested height
636 * @inode: The GFS2 inode
637 * @iomap: The iomap structure
638 * @mp: The metapath, with proper height information calculated
639 *
640 * In this routine we may have to alloc:
641 * i) Indirect blocks to grow the metadata tree height
642 * ii) Indirect blocks to fill in lower part of the metadata tree
643 * iii) Data blocks
644 *
645 * This function is called after __gfs2_iomap_get, which works out the
646 * total number of blocks which we need via gfs2_alloc_size.
647 *
648 * We then do the actual allocation asking for an extent at a time (if
649 * enough contiguous free blocks are available, there will only be one
650 * allocation request per call) and uses the state machine to initialise
651 * the blocks in order.
652 *
653 * Right now, this function will allocate at most one indirect block
654 * worth of data -- with a default block size of 4K, that's slightly
655 * less than 2M. If this limitation is ever removed to allow huge
656 * allocations, we would probably still want to limit the iomap size we
657 * return to avoid stalling other tasks during huge writes; the next
658 * iomap iteration would then find the blocks already allocated.
659 *
660 * Returns: errno on error
661 */
662
__gfs2_iomap_alloc(struct inode * inode,struct iomap * iomap,struct metapath * mp)663 static int __gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
664 struct metapath *mp)
665 {
666 struct gfs2_inode *ip = GFS2_I(inode);
667 struct gfs2_sbd *sdp = GFS2_SB(inode);
668 struct buffer_head *dibh = mp->mp_bh[0];
669 u64 bn;
670 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
671 size_t dblks = iomap->length >> inode->i_blkbits;
672 const unsigned end_of_metadata = mp->mp_fheight - 1;
673 int ret;
674 enum alloc_state state;
675 __be64 *ptr;
676 __be64 zero_bn = 0;
677
678 BUG_ON(mp->mp_aheight < 1);
679 BUG_ON(dibh == NULL);
680 BUG_ON(dblks < 1);
681
682 gfs2_trans_add_meta(ip->i_gl, dibh);
683
684 down_write(&ip->i_rw_mutex);
685
686 if (mp->mp_fheight == mp->mp_aheight) {
687 /* Bottom indirect block exists */
688 state = ALLOC_DATA;
689 } else {
690 /* Need to allocate indirect blocks */
691 if (mp->mp_fheight == ip->i_height) {
692 /* Writing into existing tree, extend tree down */
693 iblks = mp->mp_fheight - mp->mp_aheight;
694 state = ALLOC_GROW_DEPTH;
695 } else {
696 /* Building up tree height */
697 state = ALLOC_GROW_HEIGHT;
698 iblks = mp->mp_fheight - ip->i_height;
699 branch_start = metapath_branch_start(mp);
700 iblks += (mp->mp_fheight - branch_start);
701 }
702 }
703
704 /* start of the second part of the function (state machine) */
705
706 blks = dblks + iblks;
707 i = mp->mp_aheight;
708 do {
709 n = blks - alloced;
710 ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
711 if (ret)
712 goto out;
713 alloced += n;
714 if (state != ALLOC_DATA || gfs2_is_jdata(ip))
715 gfs2_trans_remove_revoke(sdp, bn, n);
716 switch (state) {
717 /* Growing height of tree */
718 case ALLOC_GROW_HEIGHT:
719 if (i == 1) {
720 ptr = (__be64 *)(dibh->b_data +
721 sizeof(struct gfs2_dinode));
722 zero_bn = *ptr;
723 }
724 for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
725 i++, n--)
726 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
727 if (i - 1 == mp->mp_fheight - ip->i_height) {
728 i--;
729 gfs2_buffer_copy_tail(mp->mp_bh[i],
730 sizeof(struct gfs2_meta_header),
731 dibh, sizeof(struct gfs2_dinode));
732 gfs2_buffer_clear_tail(dibh,
733 sizeof(struct gfs2_dinode) +
734 sizeof(__be64));
735 ptr = (__be64 *)(mp->mp_bh[i]->b_data +
736 sizeof(struct gfs2_meta_header));
737 *ptr = zero_bn;
738 state = ALLOC_GROW_DEPTH;
739 for(i = branch_start; i < mp->mp_fheight; i++) {
740 if (mp->mp_bh[i] == NULL)
741 break;
742 brelse(mp->mp_bh[i]);
743 mp->mp_bh[i] = NULL;
744 }
745 i = branch_start;
746 }
747 if (n == 0)
748 break;
749 fallthrough; /* To branching from existing tree */
750 case ALLOC_GROW_DEPTH:
751 if (i > 1 && i < mp->mp_fheight)
752 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
753 for (; i < mp->mp_fheight && n > 0; i++, n--)
754 gfs2_indirect_init(mp, ip->i_gl, i,
755 mp->mp_list[i-1], bn++);
756 if (i == mp->mp_fheight)
757 state = ALLOC_DATA;
758 if (n == 0)
759 break;
760 fallthrough; /* To tree complete, adding data blocks */
761 case ALLOC_DATA:
762 BUG_ON(n > dblks);
763 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
764 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
765 dblks = n;
766 ptr = metapointer(end_of_metadata, mp);
767 iomap->addr = bn << inode->i_blkbits;
768 iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW;
769 while (n-- > 0)
770 *ptr++ = cpu_to_be64(bn++);
771 break;
772 }
773 } while (iomap->addr == IOMAP_NULL_ADDR);
774
775 iomap->type = IOMAP_MAPPED;
776 iomap->length = (u64)dblks << inode->i_blkbits;
777 ip->i_height = mp->mp_fheight;
778 gfs2_add_inode_blocks(&ip->i_inode, alloced);
779 gfs2_dinode_out(ip, dibh->b_data);
780 out:
781 up_write(&ip->i_rw_mutex);
782 return ret;
783 }
784
785 #define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
786
787 /**
788 * gfs2_alloc_size - Compute the maximum allocation size
789 * @inode: The inode
790 * @mp: The metapath
791 * @size: Requested size in blocks
792 *
793 * Compute the maximum size of the next allocation at @mp.
794 *
795 * Returns: size in blocks
796 */
gfs2_alloc_size(struct inode * inode,struct metapath * mp,u64 size)797 static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
798 {
799 struct gfs2_inode *ip = GFS2_I(inode);
800 struct gfs2_sbd *sdp = GFS2_SB(inode);
801 const __be64 *first, *ptr, *end;
802
803 /*
804 * For writes to stuffed files, this function is called twice via
805 * __gfs2_iomap_get, before and after unstuffing. The size we return the
806 * first time needs to be large enough to get the reservation and
807 * allocation sizes right. The size we return the second time must
808 * be exact or else __gfs2_iomap_alloc won't do the right thing.
809 */
810
811 if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
812 unsigned int maxsize = mp->mp_fheight > 1 ?
813 sdp->sd_inptrs : sdp->sd_diptrs;
814 maxsize -= mp->mp_list[mp->mp_fheight - 1];
815 if (size > maxsize)
816 size = maxsize;
817 return size;
818 }
819
820 first = metapointer(ip->i_height - 1, mp);
821 end = metaend(ip->i_height - 1, mp);
822 if (end - first > size)
823 end = first + size;
824 for (ptr = first; ptr < end; ptr++) {
825 if (*ptr)
826 break;
827 }
828 return ptr - first;
829 }
830
831 /**
832 * __gfs2_iomap_get - Map blocks from an inode to disk blocks
833 * @inode: The inode
834 * @pos: Starting position in bytes
835 * @length: Length to map, in bytes
836 * @flags: iomap flags
837 * @iomap: The iomap structure
838 * @mp: The metapath
839 *
840 * Returns: errno
841 */
__gfs2_iomap_get(struct inode * inode,loff_t pos,loff_t length,unsigned flags,struct iomap * iomap,struct metapath * mp)842 static int __gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
843 unsigned flags, struct iomap *iomap,
844 struct metapath *mp)
845 {
846 struct gfs2_inode *ip = GFS2_I(inode);
847 struct gfs2_sbd *sdp = GFS2_SB(inode);
848 loff_t size = i_size_read(inode);
849 __be64 *ptr;
850 sector_t lblock;
851 sector_t lblock_stop;
852 int ret;
853 int eob;
854 u64 len;
855 struct buffer_head *dibh = NULL, *bh;
856 u8 height;
857
858 if (!length)
859 return -EINVAL;
860
861 down_read(&ip->i_rw_mutex);
862
863 ret = gfs2_meta_inode_buffer(ip, &dibh);
864 if (ret)
865 goto unlock;
866 mp->mp_bh[0] = dibh;
867
868 if (gfs2_is_stuffed(ip)) {
869 if (flags & IOMAP_WRITE) {
870 loff_t max_size = gfs2_max_stuffed_size(ip);
871
872 if (pos + length > max_size)
873 goto unstuff;
874 iomap->length = max_size;
875 } else {
876 if (pos >= size) {
877 if (flags & IOMAP_REPORT) {
878 ret = -ENOENT;
879 goto unlock;
880 } else {
881 iomap->offset = pos;
882 iomap->length = length;
883 goto hole_found;
884 }
885 }
886 iomap->length = size;
887 }
888 iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
889 sizeof(struct gfs2_dinode);
890 iomap->type = IOMAP_INLINE;
891 iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
892 goto out;
893 }
894
895 unstuff:
896 lblock = pos >> inode->i_blkbits;
897 iomap->offset = lblock << inode->i_blkbits;
898 lblock_stop = (pos + length - 1) >> inode->i_blkbits;
899 len = lblock_stop - lblock + 1;
900 iomap->length = len << inode->i_blkbits;
901
902 height = ip->i_height;
903 while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
904 height++;
905 find_metapath(sdp, lblock, mp, height);
906 if (height > ip->i_height || gfs2_is_stuffed(ip))
907 goto do_alloc;
908
909 ret = lookup_metapath(ip, mp);
910 if (ret)
911 goto unlock;
912
913 if (mp->mp_aheight != ip->i_height)
914 goto do_alloc;
915
916 ptr = metapointer(ip->i_height - 1, mp);
917 if (*ptr == 0)
918 goto do_alloc;
919
920 bh = mp->mp_bh[ip->i_height - 1];
921 len = gfs2_extent_length(bh, ptr, len, &eob);
922
923 iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
924 iomap->length = len << inode->i_blkbits;
925 iomap->type = IOMAP_MAPPED;
926 iomap->flags |= IOMAP_F_MERGED;
927 if (eob)
928 iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
929
930 out:
931 iomap->bdev = inode->i_sb->s_bdev;
932 unlock:
933 up_read(&ip->i_rw_mutex);
934 return ret;
935
936 do_alloc:
937 if (flags & IOMAP_REPORT) {
938 if (pos >= size)
939 ret = -ENOENT;
940 else if (height == ip->i_height)
941 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
942 else
943 iomap->length = size - iomap->offset;
944 } else if (flags & IOMAP_WRITE) {
945 u64 alloc_size;
946
947 if (flags & IOMAP_DIRECT)
948 goto out; /* (see gfs2_file_direct_write) */
949
950 len = gfs2_alloc_size(inode, mp, len);
951 alloc_size = len << inode->i_blkbits;
952 if (alloc_size < iomap->length)
953 iomap->length = alloc_size;
954 } else {
955 if (pos < size && height == ip->i_height)
956 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
957 }
958 hole_found:
959 iomap->addr = IOMAP_NULL_ADDR;
960 iomap->type = IOMAP_HOLE;
961 goto out;
962 }
963
gfs2_iomap_page_prepare(struct inode * inode,loff_t pos,unsigned len)964 static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
965 unsigned len)
966 {
967 unsigned int blockmask = i_blocksize(inode) - 1;
968 struct gfs2_sbd *sdp = GFS2_SB(inode);
969 unsigned int blocks;
970
971 blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
972 return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
973 }
974
gfs2_iomap_page_done(struct inode * inode,loff_t pos,unsigned copied,struct page * page)975 static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
976 unsigned copied, struct page *page)
977 {
978 struct gfs2_trans *tr = current->journal_info;
979 struct gfs2_inode *ip = GFS2_I(inode);
980 struct gfs2_sbd *sdp = GFS2_SB(inode);
981
982 if (page && !gfs2_is_stuffed(ip))
983 gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
984
985 if (tr->tr_num_buf_new)
986 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
987
988 gfs2_trans_end(sdp);
989 }
990
991 static const struct iomap_page_ops gfs2_iomap_page_ops = {
992 .page_prepare = gfs2_iomap_page_prepare,
993 .page_done = gfs2_iomap_page_done,
994 };
995
gfs2_iomap_begin_write(struct inode * inode,loff_t pos,loff_t length,unsigned flags,struct iomap * iomap,struct metapath * mp)996 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
997 loff_t length, unsigned flags,
998 struct iomap *iomap,
999 struct metapath *mp)
1000 {
1001 struct gfs2_inode *ip = GFS2_I(inode);
1002 struct gfs2_sbd *sdp = GFS2_SB(inode);
1003 bool unstuff;
1004 int ret;
1005
1006 unstuff = gfs2_is_stuffed(ip) &&
1007 pos + length > gfs2_max_stuffed_size(ip);
1008
1009 if (unstuff || iomap->type == IOMAP_HOLE) {
1010 unsigned int data_blocks, ind_blocks;
1011 struct gfs2_alloc_parms ap = {};
1012 unsigned int rblocks;
1013 struct gfs2_trans *tr;
1014
1015 gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
1016 &ind_blocks);
1017 ap.target = data_blocks + ind_blocks;
1018 ret = gfs2_quota_lock_check(ip, &ap);
1019 if (ret)
1020 return ret;
1021
1022 ret = gfs2_inplace_reserve(ip, &ap);
1023 if (ret)
1024 goto out_qunlock;
1025
1026 rblocks = RES_DINODE + ind_blocks;
1027 if (gfs2_is_jdata(ip))
1028 rblocks += data_blocks;
1029 if (ind_blocks || data_blocks)
1030 rblocks += RES_STATFS + RES_QUOTA;
1031 if (inode == sdp->sd_rindex)
1032 rblocks += 2 * RES_STATFS;
1033 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1034
1035 ret = gfs2_trans_begin(sdp, rblocks,
1036 iomap->length >> inode->i_blkbits);
1037 if (ret)
1038 goto out_trans_fail;
1039
1040 if (unstuff) {
1041 ret = gfs2_unstuff_dinode(ip);
1042 if (ret)
1043 goto out_trans_end;
1044 release_metapath(mp);
1045 ret = __gfs2_iomap_get(inode, iomap->offset,
1046 iomap->length, flags, iomap, mp);
1047 if (ret)
1048 goto out_trans_end;
1049 }
1050
1051 if (iomap->type == IOMAP_HOLE) {
1052 ret = __gfs2_iomap_alloc(inode, iomap, mp);
1053 if (ret) {
1054 gfs2_trans_end(sdp);
1055 gfs2_inplace_release(ip);
1056 punch_hole(ip, iomap->offset, iomap->length);
1057 goto out_qunlock;
1058 }
1059 }
1060
1061 tr = current->journal_info;
1062 if (tr->tr_num_buf_new)
1063 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1064
1065 gfs2_trans_end(sdp);
1066 }
1067
1068 if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
1069 iomap->page_ops = &gfs2_iomap_page_ops;
1070 return 0;
1071
1072 out_trans_end:
1073 gfs2_trans_end(sdp);
1074 out_trans_fail:
1075 gfs2_inplace_release(ip);
1076 out_qunlock:
1077 gfs2_quota_unlock(ip);
1078 return ret;
1079 }
1080
gfs2_iomap_begin(struct inode * inode,loff_t pos,loff_t length,unsigned flags,struct iomap * iomap,struct iomap * srcmap)1081 static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
1082 unsigned flags, struct iomap *iomap,
1083 struct iomap *srcmap)
1084 {
1085 struct gfs2_inode *ip = GFS2_I(inode);
1086 struct metapath mp = { .mp_aheight = 1, };
1087 int ret;
1088
1089 if (gfs2_is_jdata(ip))
1090 iomap->flags |= IOMAP_F_BUFFER_HEAD;
1091
1092 trace_gfs2_iomap_start(ip, pos, length, flags);
1093 ret = __gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
1094 if (ret)
1095 goto out_unlock;
1096
1097 switch(flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1098 case IOMAP_WRITE:
1099 if (flags & IOMAP_DIRECT) {
1100 /*
1101 * Silently fall back to buffered I/O for stuffed files
1102 * or if we've got a hole (see gfs2_file_direct_write).
1103 */
1104 if (iomap->type != IOMAP_MAPPED)
1105 ret = -ENOTBLK;
1106 goto out_unlock;
1107 }
1108 break;
1109 case IOMAP_ZERO:
1110 if (iomap->type == IOMAP_HOLE)
1111 goto out_unlock;
1112 break;
1113 default:
1114 goto out_unlock;
1115 }
1116
1117 ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
1118
1119 out_unlock:
1120 release_metapath(&mp);
1121 trace_gfs2_iomap_end(ip, iomap, ret);
1122 return ret;
1123 }
1124
gfs2_iomap_end(struct inode * inode,loff_t pos,loff_t length,ssize_t written,unsigned flags,struct iomap * iomap)1125 static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
1126 ssize_t written, unsigned flags, struct iomap *iomap)
1127 {
1128 struct gfs2_inode *ip = GFS2_I(inode);
1129 struct gfs2_sbd *sdp = GFS2_SB(inode);
1130
1131 switch (flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1132 case IOMAP_WRITE:
1133 if (flags & IOMAP_DIRECT)
1134 return 0;
1135 break;
1136 case IOMAP_ZERO:
1137 if (iomap->type == IOMAP_HOLE)
1138 return 0;
1139 break;
1140 default:
1141 return 0;
1142 }
1143
1144 if (!gfs2_is_stuffed(ip))
1145 gfs2_ordered_add_inode(ip);
1146
1147 if (inode == sdp->sd_rindex)
1148 adjust_fs_space(inode);
1149
1150 gfs2_inplace_release(ip);
1151
1152 if (ip->i_qadata && ip->i_qadata->qa_qd_num)
1153 gfs2_quota_unlock(ip);
1154
1155 if (length != written && (iomap->flags & IOMAP_F_NEW)) {
1156 /* Deallocate blocks that were just allocated. */
1157 loff_t blockmask = i_blocksize(inode) - 1;
1158 loff_t end = (pos + length) & ~blockmask;
1159
1160 pos = (pos + written + blockmask) & ~blockmask;
1161 if (pos < end) {
1162 truncate_pagecache_range(inode, pos, end - 1);
1163 punch_hole(ip, pos, end - pos);
1164 }
1165 }
1166
1167 if (unlikely(!written))
1168 return 0;
1169
1170 if (iomap->flags & IOMAP_F_SIZE_CHANGED)
1171 mark_inode_dirty(inode);
1172 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
1173 return 0;
1174 }
1175
1176 const struct iomap_ops gfs2_iomap_ops = {
1177 .iomap_begin = gfs2_iomap_begin,
1178 .iomap_end = gfs2_iomap_end,
1179 };
1180
1181 /**
1182 * gfs2_block_map - Map one or more blocks of an inode to a disk block
1183 * @inode: The inode
1184 * @lblock: The logical block number
1185 * @bh_map: The bh to be mapped
1186 * @create: True if its ok to alloc blocks to satify the request
1187 *
1188 * The size of the requested mapping is defined in bh_map->b_size.
1189 *
1190 * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged
1191 * when @lblock is not mapped. Sets buffer_mapped(bh_map) and
1192 * bh_map->b_size to indicate the size of the mapping when @lblock and
1193 * successive blocks are mapped, up to the requested size.
1194 *
1195 * Sets buffer_boundary() if a read of metadata will be required
1196 * before the next block can be mapped. Sets buffer_new() if new
1197 * blocks were allocated.
1198 *
1199 * Returns: errno
1200 */
1201
gfs2_block_map(struct inode * inode,sector_t lblock,struct buffer_head * bh_map,int create)1202 int gfs2_block_map(struct inode *inode, sector_t lblock,
1203 struct buffer_head *bh_map, int create)
1204 {
1205 struct gfs2_inode *ip = GFS2_I(inode);
1206 loff_t pos = (loff_t)lblock << inode->i_blkbits;
1207 loff_t length = bh_map->b_size;
1208 struct iomap iomap = { };
1209 int ret;
1210
1211 clear_buffer_mapped(bh_map);
1212 clear_buffer_new(bh_map);
1213 clear_buffer_boundary(bh_map);
1214 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
1215
1216 if (!create)
1217 ret = gfs2_iomap_get(inode, pos, length, &iomap);
1218 else
1219 ret = gfs2_iomap_alloc(inode, pos, length, &iomap);
1220 if (ret)
1221 goto out;
1222
1223 if (iomap.length > bh_map->b_size) {
1224 iomap.length = bh_map->b_size;
1225 iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY;
1226 }
1227 if (iomap.addr != IOMAP_NULL_ADDR)
1228 map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
1229 bh_map->b_size = iomap.length;
1230 if (iomap.flags & IOMAP_F_GFS2_BOUNDARY)
1231 set_buffer_boundary(bh_map);
1232 if (iomap.flags & IOMAP_F_NEW)
1233 set_buffer_new(bh_map);
1234
1235 out:
1236 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
1237 return ret;
1238 }
1239
gfs2_get_extent(struct inode * inode,u64 lblock,u64 * dblock,unsigned int * extlen)1240 int gfs2_get_extent(struct inode *inode, u64 lblock, u64 *dblock,
1241 unsigned int *extlen)
1242 {
1243 unsigned int blkbits = inode->i_blkbits;
1244 struct iomap iomap = { };
1245 unsigned int len;
1246 int ret;
1247
1248 ret = gfs2_iomap_get(inode, lblock << blkbits, *extlen << blkbits,
1249 &iomap);
1250 if (ret)
1251 return ret;
1252 if (iomap.type != IOMAP_MAPPED)
1253 return -EIO;
1254 *dblock = iomap.addr >> blkbits;
1255 len = iomap.length >> blkbits;
1256 if (len < *extlen)
1257 *extlen = len;
1258 return 0;
1259 }
1260
gfs2_alloc_extent(struct inode * inode,u64 lblock,u64 * dblock,unsigned int * extlen,bool * new)1261 int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
1262 unsigned int *extlen, bool *new)
1263 {
1264 unsigned int blkbits = inode->i_blkbits;
1265 struct iomap iomap = { };
1266 unsigned int len;
1267 int ret;
1268
1269 ret = gfs2_iomap_alloc(inode, lblock << blkbits, *extlen << blkbits,
1270 &iomap);
1271 if (ret)
1272 return ret;
1273 if (iomap.type != IOMAP_MAPPED)
1274 return -EIO;
1275 *dblock = iomap.addr >> blkbits;
1276 len = iomap.length >> blkbits;
1277 if (len < *extlen)
1278 *extlen = len;
1279 *new = iomap.flags & IOMAP_F_NEW;
1280 return 0;
1281 }
1282
1283 /*
1284 * NOTE: Never call gfs2_block_zero_range with an open transaction because it
1285 * uses iomap write to perform its actions, which begin their own transactions
1286 * (iomap_begin, page_prepare, etc.)
1287 */
gfs2_block_zero_range(struct inode * inode,loff_t from,unsigned int length)1288 static int gfs2_block_zero_range(struct inode *inode, loff_t from,
1289 unsigned int length)
1290 {
1291 BUG_ON(current->journal_info);
1292 return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
1293 }
1294
1295 #define GFS2_JTRUNC_REVOKES 8192
1296
1297 /**
1298 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
1299 * @inode: The inode being truncated
1300 * @oldsize: The original (larger) size
1301 * @newsize: The new smaller size
1302 *
1303 * With jdata files, we have to journal a revoke for each block which is
1304 * truncated. As a result, we need to split this into separate transactions
1305 * if the number of pages being truncated gets too large.
1306 */
1307
gfs2_journaled_truncate(struct inode * inode,u64 oldsize,u64 newsize)1308 static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1309 {
1310 struct gfs2_sbd *sdp = GFS2_SB(inode);
1311 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1312 u64 chunk;
1313 int error;
1314
1315 while (oldsize != newsize) {
1316 struct gfs2_trans *tr;
1317 unsigned int offs;
1318
1319 chunk = oldsize - newsize;
1320 if (chunk > max_chunk)
1321 chunk = max_chunk;
1322
1323 offs = oldsize & ~PAGE_MASK;
1324 if (offs && chunk > PAGE_SIZE)
1325 chunk = offs + ((chunk - offs) & PAGE_MASK);
1326
1327 truncate_pagecache(inode, oldsize - chunk);
1328 oldsize -= chunk;
1329
1330 tr = current->journal_info;
1331 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
1332 continue;
1333
1334 gfs2_trans_end(sdp);
1335 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1336 if (error)
1337 return error;
1338 }
1339
1340 return 0;
1341 }
1342
trunc_start(struct inode * inode,u64 newsize)1343 static int trunc_start(struct inode *inode, u64 newsize)
1344 {
1345 struct gfs2_inode *ip = GFS2_I(inode);
1346 struct gfs2_sbd *sdp = GFS2_SB(inode);
1347 struct buffer_head *dibh = NULL;
1348 int journaled = gfs2_is_jdata(ip);
1349 u64 oldsize = inode->i_size;
1350 int error;
1351
1352 if (!gfs2_is_stuffed(ip)) {
1353 unsigned int blocksize = i_blocksize(inode);
1354 unsigned int offs = newsize & (blocksize - 1);
1355 if (offs) {
1356 error = gfs2_block_zero_range(inode, newsize,
1357 blocksize - offs);
1358 if (error)
1359 return error;
1360 }
1361 }
1362 if (journaled)
1363 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1364 else
1365 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1366 if (error)
1367 return error;
1368
1369 error = gfs2_meta_inode_buffer(ip, &dibh);
1370 if (error)
1371 goto out;
1372
1373 gfs2_trans_add_meta(ip->i_gl, dibh);
1374
1375 if (gfs2_is_stuffed(ip))
1376 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1377 else
1378 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1379
1380 i_size_write(inode, newsize);
1381 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1382 gfs2_dinode_out(ip, dibh->b_data);
1383
1384 if (journaled)
1385 error = gfs2_journaled_truncate(inode, oldsize, newsize);
1386 else
1387 truncate_pagecache(inode, newsize);
1388
1389 out:
1390 brelse(dibh);
1391 if (current->journal_info)
1392 gfs2_trans_end(sdp);
1393 return error;
1394 }
1395
gfs2_iomap_get(struct inode * inode,loff_t pos,loff_t length,struct iomap * iomap)1396 int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
1397 struct iomap *iomap)
1398 {
1399 struct metapath mp = { .mp_aheight = 1, };
1400 int ret;
1401
1402 ret = __gfs2_iomap_get(inode, pos, length, 0, iomap, &mp);
1403 release_metapath(&mp);
1404 return ret;
1405 }
1406
gfs2_iomap_alloc(struct inode * inode,loff_t pos,loff_t length,struct iomap * iomap)1407 int gfs2_iomap_alloc(struct inode *inode, loff_t pos, loff_t length,
1408 struct iomap *iomap)
1409 {
1410 struct metapath mp = { .mp_aheight = 1, };
1411 int ret;
1412
1413 ret = __gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp);
1414 if (!ret && iomap->type == IOMAP_HOLE)
1415 ret = __gfs2_iomap_alloc(inode, iomap, &mp);
1416 release_metapath(&mp);
1417 return ret;
1418 }
1419
1420 /**
1421 * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
1422 * @ip: inode
1423 * @rd_gh: holder of resource group glock
1424 * @bh: buffer head to sweep
1425 * @start: starting point in bh
1426 * @end: end point in bh
1427 * @meta: true if bh points to metadata (rather than data)
1428 * @btotal: place to keep count of total blocks freed
1429 *
1430 * We sweep a metadata buffer (provided by the metapath) for blocks we need to
1431 * free, and free them all. However, we do it one rgrp at a time. If this
1432 * block has references to multiple rgrps, we break it into individual
1433 * transactions. This allows other processes to use the rgrps while we're
1434 * focused on a single one, for better concurrency / performance.
1435 * At every transaction boundary, we rewrite the inode into the journal.
1436 * That way the bitmaps are kept consistent with the inode and we can recover
1437 * if we're interrupted by power-outages.
1438 *
1439 * Returns: 0, or return code if an error occurred.
1440 * *btotal has the total number of blocks freed
1441 */
sweep_bh_for_rgrps(struct gfs2_inode * ip,struct gfs2_holder * rd_gh,struct buffer_head * bh,__be64 * start,__be64 * end,bool meta,u32 * btotal)1442 static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
1443 struct buffer_head *bh, __be64 *start, __be64 *end,
1444 bool meta, u32 *btotal)
1445 {
1446 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1447 struct gfs2_rgrpd *rgd;
1448 struct gfs2_trans *tr;
1449 __be64 *p;
1450 int blks_outside_rgrp;
1451 u64 bn, bstart, isize_blks;
1452 s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
1453 int ret = 0;
1454 bool buf_in_tr = false; /* buffer was added to transaction */
1455
1456 more_rgrps:
1457 rgd = NULL;
1458 if (gfs2_holder_initialized(rd_gh)) {
1459 rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
1460 gfs2_assert_withdraw(sdp,
1461 gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
1462 }
1463 blks_outside_rgrp = 0;
1464 bstart = 0;
1465 blen = 0;
1466
1467 for (p = start; p < end; p++) {
1468 if (!*p)
1469 continue;
1470 bn = be64_to_cpu(*p);
1471
1472 if (rgd) {
1473 if (!rgrp_contains_block(rgd, bn)) {
1474 blks_outside_rgrp++;
1475 continue;
1476 }
1477 } else {
1478 rgd = gfs2_blk2rgrpd(sdp, bn, true);
1479 if (unlikely(!rgd)) {
1480 ret = -EIO;
1481 goto out;
1482 }
1483 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1484 LM_FLAG_NODE_SCOPE, rd_gh);
1485 if (ret)
1486 goto out;
1487
1488 /* Must be done with the rgrp glock held: */
1489 if (gfs2_rs_active(&ip->i_res) &&
1490 rgd == ip->i_res.rs_rgd)
1491 gfs2_rs_deltree(&ip->i_res);
1492 }
1493
1494 /* The size of our transactions will be unknown until we
1495 actually process all the metadata blocks that relate to
1496 the rgrp. So we estimate. We know it can't be more than
1497 the dinode's i_blocks and we don't want to exceed the
1498 journal flush threshold, sd_log_thresh2. */
1499 if (current->journal_info == NULL) {
1500 unsigned int jblocks_rqsted, revokes;
1501
1502 jblocks_rqsted = rgd->rd_length + RES_DINODE +
1503 RES_INDIRECT;
1504 isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
1505 if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
1506 jblocks_rqsted +=
1507 atomic_read(&sdp->sd_log_thresh2);
1508 else
1509 jblocks_rqsted += isize_blks;
1510 revokes = jblocks_rqsted;
1511 if (meta)
1512 revokes += end - start;
1513 else if (ip->i_depth)
1514 revokes += sdp->sd_inptrs;
1515 ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
1516 if (ret)
1517 goto out_unlock;
1518 down_write(&ip->i_rw_mutex);
1519 }
1520 /* check if we will exceed the transaction blocks requested */
1521 tr = current->journal_info;
1522 if (tr->tr_num_buf_new + RES_STATFS +
1523 RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
1524 /* We set blks_outside_rgrp to ensure the loop will
1525 be repeated for the same rgrp, but with a new
1526 transaction. */
1527 blks_outside_rgrp++;
1528 /* This next part is tricky. If the buffer was added
1529 to the transaction, we've already set some block
1530 pointers to 0, so we better follow through and free
1531 them, or we will introduce corruption (so break).
1532 This may be impossible, or at least rare, but I
1533 decided to cover the case regardless.
1534
1535 If the buffer was not added to the transaction
1536 (this call), doing so would exceed our transaction
1537 size, so we need to end the transaction and start a
1538 new one (so goto). */
1539
1540 if (buf_in_tr)
1541 break;
1542 goto out_unlock;
1543 }
1544
1545 gfs2_trans_add_meta(ip->i_gl, bh);
1546 buf_in_tr = true;
1547 *p = 0;
1548 if (bstart + blen == bn) {
1549 blen++;
1550 continue;
1551 }
1552 if (bstart) {
1553 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1554 (*btotal) += blen;
1555 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1556 }
1557 bstart = bn;
1558 blen = 1;
1559 }
1560 if (bstart) {
1561 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1562 (*btotal) += blen;
1563 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1564 }
1565 out_unlock:
1566 if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
1567 outside the rgrp we just processed,
1568 do it all over again. */
1569 if (current->journal_info) {
1570 struct buffer_head *dibh;
1571
1572 ret = gfs2_meta_inode_buffer(ip, &dibh);
1573 if (ret)
1574 goto out;
1575
1576 /* Every transaction boundary, we rewrite the dinode
1577 to keep its di_blocks current in case of failure. */
1578 ip->i_inode.i_mtime = ip->i_inode.i_ctime =
1579 current_time(&ip->i_inode);
1580 gfs2_trans_add_meta(ip->i_gl, dibh);
1581 gfs2_dinode_out(ip, dibh->b_data);
1582 brelse(dibh);
1583 up_write(&ip->i_rw_mutex);
1584 gfs2_trans_end(sdp);
1585 buf_in_tr = false;
1586 }
1587 gfs2_glock_dq_uninit(rd_gh);
1588 cond_resched();
1589 goto more_rgrps;
1590 }
1591 out:
1592 return ret;
1593 }
1594
mp_eq_to_hgt(struct metapath * mp,__u16 * list,unsigned int h)1595 static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
1596 {
1597 if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
1598 return false;
1599 return true;
1600 }
1601
1602 /**
1603 * find_nonnull_ptr - find a non-null pointer given a metapath and height
1604 * @sdp: The superblock
1605 * @mp: starting metapath
1606 * @h: desired height to search
1607 * @end_list: See punch_hole().
1608 * @end_aligned: See punch_hole().
1609 *
1610 * Assumes the metapath is valid (with buffers) out to height h.
1611 * Returns: true if a non-null pointer was found in the metapath buffer
1612 * false if all remaining pointers are NULL in the buffer
1613 */
find_nonnull_ptr(struct gfs2_sbd * sdp,struct metapath * mp,unsigned int h,__u16 * end_list,unsigned int end_aligned)1614 static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
1615 unsigned int h,
1616 __u16 *end_list, unsigned int end_aligned)
1617 {
1618 struct buffer_head *bh = mp->mp_bh[h];
1619 __be64 *first, *ptr, *end;
1620
1621 first = metaptr1(h, mp);
1622 ptr = first + mp->mp_list[h];
1623 end = (__be64 *)(bh->b_data + bh->b_size);
1624 if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
1625 bool keep_end = h < end_aligned;
1626 end = first + end_list[h] + keep_end;
1627 }
1628
1629 while (ptr < end) {
1630 if (*ptr) { /* if we have a non-null pointer */
1631 mp->mp_list[h] = ptr - first;
1632 h++;
1633 if (h < GFS2_MAX_META_HEIGHT)
1634 mp->mp_list[h] = 0;
1635 return true;
1636 }
1637 ptr++;
1638 }
1639 return false;
1640 }
1641
1642 enum dealloc_states {
1643 DEALLOC_MP_FULL = 0, /* Strip a metapath with all buffers read in */
1644 DEALLOC_MP_LOWER = 1, /* lower the metapath strip height */
1645 DEALLOC_FILL_MP = 2, /* Fill in the metapath to the given height. */
1646 DEALLOC_DONE = 3, /* process complete */
1647 };
1648
1649 static inline void
metapointer_range(struct metapath * mp,int height,__u16 * start_list,unsigned int start_aligned,__u16 * end_list,unsigned int end_aligned,__be64 ** start,__be64 ** end)1650 metapointer_range(struct metapath *mp, int height,
1651 __u16 *start_list, unsigned int start_aligned,
1652 __u16 *end_list, unsigned int end_aligned,
1653 __be64 **start, __be64 **end)
1654 {
1655 struct buffer_head *bh = mp->mp_bh[height];
1656 __be64 *first;
1657
1658 first = metaptr1(height, mp);
1659 *start = first;
1660 if (mp_eq_to_hgt(mp, start_list, height)) {
1661 bool keep_start = height < start_aligned;
1662 *start = first + start_list[height] + keep_start;
1663 }
1664 *end = (__be64 *)(bh->b_data + bh->b_size);
1665 if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
1666 bool keep_end = height < end_aligned;
1667 *end = first + end_list[height] + keep_end;
1668 }
1669 }
1670
walk_done(struct gfs2_sbd * sdp,struct metapath * mp,int height,__u16 * end_list,unsigned int end_aligned)1671 static inline bool walk_done(struct gfs2_sbd *sdp,
1672 struct metapath *mp, int height,
1673 __u16 *end_list, unsigned int end_aligned)
1674 {
1675 __u16 end;
1676
1677 if (end_list) {
1678 bool keep_end = height < end_aligned;
1679 if (!mp_eq_to_hgt(mp, end_list, height))
1680 return false;
1681 end = end_list[height] + keep_end;
1682 } else
1683 end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
1684 return mp->mp_list[height] >= end;
1685 }
1686
1687 /**
1688 * punch_hole - deallocate blocks in a file
1689 * @ip: inode to truncate
1690 * @offset: the start of the hole
1691 * @length: the size of the hole (or 0 for truncate)
1692 *
1693 * Punch a hole into a file or truncate a file at a given position. This
1694 * function operates in whole blocks (@offset and @length are rounded
1695 * accordingly); partially filled blocks must be cleared otherwise.
1696 *
1697 * This function works from the bottom up, and from the right to the left. In
1698 * other words, it strips off the highest layer (data) before stripping any of
1699 * the metadata. Doing it this way is best in case the operation is interrupted
1700 * by power failure, etc. The dinode is rewritten in every transaction to
1701 * guarantee integrity.
1702 */
punch_hole(struct gfs2_inode * ip,u64 offset,u64 length)1703 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
1704 {
1705 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1706 u64 maxsize = sdp->sd_heightsize[ip->i_height];
1707 struct metapath mp = {};
1708 struct buffer_head *dibh, *bh;
1709 struct gfs2_holder rd_gh;
1710 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
1711 u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
1712 __u16 start_list[GFS2_MAX_META_HEIGHT];
1713 __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
1714 unsigned int start_aligned, end_aligned;
1715 unsigned int strip_h = ip->i_height - 1;
1716 u32 btotal = 0;
1717 int ret, state;
1718 int mp_h; /* metapath buffers are read in to this height */
1719 u64 prev_bnr = 0;
1720 __be64 *start, *end;
1721
1722 if (offset >= maxsize) {
1723 /*
1724 * The starting point lies beyond the allocated meta-data;
1725 * there are no blocks do deallocate.
1726 */
1727 return 0;
1728 }
1729
1730 /*
1731 * The start position of the hole is defined by lblock, start_list, and
1732 * start_aligned. The end position of the hole is defined by lend,
1733 * end_list, and end_aligned.
1734 *
1735 * start_aligned and end_aligned define down to which height the start
1736 * and end positions are aligned to the metadata tree (i.e., the
1737 * position is a multiple of the metadata granularity at the height
1738 * above). This determines at which heights additional meta pointers
1739 * needs to be preserved for the remaining data.
1740 */
1741
1742 if (length) {
1743 u64 end_offset = offset + length;
1744 u64 lend;
1745
1746 /*
1747 * Clip the end at the maximum file size for the given height:
1748 * that's how far the metadata goes; files bigger than that
1749 * will have additional layers of indirection.
1750 */
1751 if (end_offset > maxsize)
1752 end_offset = maxsize;
1753 lend = end_offset >> bsize_shift;
1754
1755 if (lblock >= lend)
1756 return 0;
1757
1758 find_metapath(sdp, lend, &mp, ip->i_height);
1759 end_list = __end_list;
1760 memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
1761
1762 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1763 if (end_list[mp_h])
1764 break;
1765 }
1766 end_aligned = mp_h;
1767 }
1768
1769 find_metapath(sdp, lblock, &mp, ip->i_height);
1770 memcpy(start_list, mp.mp_list, sizeof(start_list));
1771
1772 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1773 if (start_list[mp_h])
1774 break;
1775 }
1776 start_aligned = mp_h;
1777
1778 ret = gfs2_meta_inode_buffer(ip, &dibh);
1779 if (ret)
1780 return ret;
1781
1782 mp.mp_bh[0] = dibh;
1783 ret = lookup_metapath(ip, &mp);
1784 if (ret)
1785 goto out_metapath;
1786
1787 /* issue read-ahead on metadata */
1788 for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
1789 metapointer_range(&mp, mp_h, start_list, start_aligned,
1790 end_list, end_aligned, &start, &end);
1791 gfs2_metapath_ra(ip->i_gl, start, end);
1792 }
1793
1794 if (mp.mp_aheight == ip->i_height)
1795 state = DEALLOC_MP_FULL; /* We have a complete metapath */
1796 else
1797 state = DEALLOC_FILL_MP; /* deal with partial metapath */
1798
1799 ret = gfs2_rindex_update(sdp);
1800 if (ret)
1801 goto out_metapath;
1802
1803 ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1804 if (ret)
1805 goto out_metapath;
1806 gfs2_holder_mark_uninitialized(&rd_gh);
1807
1808 mp_h = strip_h;
1809
1810 while (state != DEALLOC_DONE) {
1811 switch (state) {
1812 /* Truncate a full metapath at the given strip height.
1813 * Note that strip_h == mp_h in order to be in this state. */
1814 case DEALLOC_MP_FULL:
1815 bh = mp.mp_bh[mp_h];
1816 gfs2_assert_withdraw(sdp, bh);
1817 if (gfs2_assert_withdraw(sdp,
1818 prev_bnr != bh->b_blocknr)) {
1819 fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u,"
1820 "s_h:%u, mp_h:%u\n",
1821 (unsigned long long)ip->i_no_addr,
1822 prev_bnr, ip->i_height, strip_h, mp_h);
1823 }
1824 prev_bnr = bh->b_blocknr;
1825
1826 if (gfs2_metatype_check(sdp, bh,
1827 (mp_h ? GFS2_METATYPE_IN :
1828 GFS2_METATYPE_DI))) {
1829 ret = -EIO;
1830 goto out;
1831 }
1832
1833 /*
1834 * Below, passing end_aligned as 0 gives us the
1835 * metapointer range excluding the end point: the end
1836 * point is the first metapath we must not deallocate!
1837 */
1838
1839 metapointer_range(&mp, mp_h, start_list, start_aligned,
1840 end_list, 0 /* end_aligned */,
1841 &start, &end);
1842 ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
1843 start, end,
1844 mp_h != ip->i_height - 1,
1845 &btotal);
1846
1847 /* If we hit an error or just swept dinode buffer,
1848 just exit. */
1849 if (ret || !mp_h) {
1850 state = DEALLOC_DONE;
1851 break;
1852 }
1853 state = DEALLOC_MP_LOWER;
1854 break;
1855
1856 /* lower the metapath strip height */
1857 case DEALLOC_MP_LOWER:
1858 /* We're done with the current buffer, so release it,
1859 unless it's the dinode buffer. Then back up to the
1860 previous pointer. */
1861 if (mp_h) {
1862 brelse(mp.mp_bh[mp_h]);
1863 mp.mp_bh[mp_h] = NULL;
1864 }
1865 /* If we can't get any lower in height, we've stripped
1866 off all we can. Next step is to back up and start
1867 stripping the previous level of metadata. */
1868 if (mp_h == 0) {
1869 strip_h--;
1870 memcpy(mp.mp_list, start_list, sizeof(start_list));
1871 mp_h = strip_h;
1872 state = DEALLOC_FILL_MP;
1873 break;
1874 }
1875 mp.mp_list[mp_h] = 0;
1876 mp_h--; /* search one metadata height down */
1877 mp.mp_list[mp_h]++;
1878 if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
1879 break;
1880 /* Here we've found a part of the metapath that is not
1881 * allocated. We need to search at that height for the
1882 * next non-null pointer. */
1883 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
1884 state = DEALLOC_FILL_MP;
1885 mp_h++;
1886 }
1887 /* No more non-null pointers at this height. Back up
1888 to the previous height and try again. */
1889 break; /* loop around in the same state */
1890
1891 /* Fill the metapath with buffers to the given height. */
1892 case DEALLOC_FILL_MP:
1893 /* Fill the buffers out to the current height. */
1894 ret = fillup_metapath(ip, &mp, mp_h);
1895 if (ret < 0)
1896 goto out;
1897
1898 /* On the first pass, issue read-ahead on metadata. */
1899 if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
1900 unsigned int height = mp.mp_aheight - 1;
1901
1902 /* No read-ahead for data blocks. */
1903 if (mp.mp_aheight - 1 == strip_h)
1904 height--;
1905
1906 for (; height >= mp.mp_aheight - ret; height--) {
1907 metapointer_range(&mp, height,
1908 start_list, start_aligned,
1909 end_list, end_aligned,
1910 &start, &end);
1911 gfs2_metapath_ra(ip->i_gl, start, end);
1912 }
1913 }
1914
1915 /* If buffers found for the entire strip height */
1916 if (mp.mp_aheight - 1 == strip_h) {
1917 state = DEALLOC_MP_FULL;
1918 break;
1919 }
1920 if (mp.mp_aheight < ip->i_height) /* We have a partial height */
1921 mp_h = mp.mp_aheight - 1;
1922
1923 /* If we find a non-null block pointer, crawl a bit
1924 higher up in the metapath and try again, otherwise
1925 we need to look lower for a new starting point. */
1926 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
1927 mp_h++;
1928 else
1929 state = DEALLOC_MP_LOWER;
1930 break;
1931 }
1932 }
1933
1934 if (btotal) {
1935 if (current->journal_info == NULL) {
1936 ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
1937 RES_QUOTA, 0);
1938 if (ret)
1939 goto out;
1940 down_write(&ip->i_rw_mutex);
1941 }
1942 gfs2_statfs_change(sdp, 0, +btotal, 0);
1943 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
1944 ip->i_inode.i_gid);
1945 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1946 gfs2_trans_add_meta(ip->i_gl, dibh);
1947 gfs2_dinode_out(ip, dibh->b_data);
1948 up_write(&ip->i_rw_mutex);
1949 gfs2_trans_end(sdp);
1950 }
1951
1952 out:
1953 if (gfs2_holder_initialized(&rd_gh))
1954 gfs2_glock_dq_uninit(&rd_gh);
1955 if (current->journal_info) {
1956 up_write(&ip->i_rw_mutex);
1957 gfs2_trans_end(sdp);
1958 cond_resched();
1959 }
1960 gfs2_quota_unhold(ip);
1961 out_metapath:
1962 release_metapath(&mp);
1963 return ret;
1964 }
1965
trunc_end(struct gfs2_inode * ip)1966 static int trunc_end(struct gfs2_inode *ip)
1967 {
1968 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1969 struct buffer_head *dibh;
1970 int error;
1971
1972 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1973 if (error)
1974 return error;
1975
1976 down_write(&ip->i_rw_mutex);
1977
1978 error = gfs2_meta_inode_buffer(ip, &dibh);
1979 if (error)
1980 goto out;
1981
1982 if (!i_size_read(&ip->i_inode)) {
1983 ip->i_height = 0;
1984 ip->i_goal = ip->i_no_addr;
1985 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1986 gfs2_ordered_del_inode(ip);
1987 }
1988 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1989 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
1990
1991 gfs2_trans_add_meta(ip->i_gl, dibh);
1992 gfs2_dinode_out(ip, dibh->b_data);
1993 brelse(dibh);
1994
1995 out:
1996 up_write(&ip->i_rw_mutex);
1997 gfs2_trans_end(sdp);
1998 return error;
1999 }
2000
2001 /**
2002 * do_shrink - make a file smaller
2003 * @inode: the inode
2004 * @newsize: the size to make the file
2005 *
2006 * Called with an exclusive lock on @inode. The @size must
2007 * be equal to or smaller than the current inode size.
2008 *
2009 * Returns: errno
2010 */
2011
do_shrink(struct inode * inode,u64 newsize)2012 static int do_shrink(struct inode *inode, u64 newsize)
2013 {
2014 struct gfs2_inode *ip = GFS2_I(inode);
2015 int error;
2016
2017 error = trunc_start(inode, newsize);
2018 if (error < 0)
2019 return error;
2020 if (gfs2_is_stuffed(ip))
2021 return 0;
2022
2023 error = punch_hole(ip, newsize, 0);
2024 if (error == 0)
2025 error = trunc_end(ip);
2026
2027 return error;
2028 }
2029
gfs2_trim_blocks(struct inode * inode)2030 void gfs2_trim_blocks(struct inode *inode)
2031 {
2032 int ret;
2033
2034 ret = do_shrink(inode, inode->i_size);
2035 WARN_ON(ret != 0);
2036 }
2037
2038 /**
2039 * do_grow - Touch and update inode size
2040 * @inode: The inode
2041 * @size: The new size
2042 *
2043 * This function updates the timestamps on the inode and
2044 * may also increase the size of the inode. This function
2045 * must not be called with @size any smaller than the current
2046 * inode size.
2047 *
2048 * Although it is not strictly required to unstuff files here,
2049 * earlier versions of GFS2 have a bug in the stuffed file reading
2050 * code which will result in a buffer overrun if the size is larger
2051 * than the max stuffed file size. In order to prevent this from
2052 * occurring, such files are unstuffed, but in other cases we can
2053 * just update the inode size directly.
2054 *
2055 * Returns: 0 on success, or -ve on error
2056 */
2057
do_grow(struct inode * inode,u64 size)2058 static int do_grow(struct inode *inode, u64 size)
2059 {
2060 struct gfs2_inode *ip = GFS2_I(inode);
2061 struct gfs2_sbd *sdp = GFS2_SB(inode);
2062 struct gfs2_alloc_parms ap = { .target = 1, };
2063 struct buffer_head *dibh;
2064 int error;
2065 int unstuff = 0;
2066
2067 if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
2068 error = gfs2_quota_lock_check(ip, &ap);
2069 if (error)
2070 return error;
2071
2072 error = gfs2_inplace_reserve(ip, &ap);
2073 if (error)
2074 goto do_grow_qunlock;
2075 unstuff = 1;
2076 }
2077
2078 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
2079 (unstuff &&
2080 gfs2_is_jdata(ip) ? RES_JDATA : 0) +
2081 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
2082 0 : RES_QUOTA), 0);
2083 if (error)
2084 goto do_grow_release;
2085
2086 if (unstuff) {
2087 error = gfs2_unstuff_dinode(ip);
2088 if (error)
2089 goto do_end_trans;
2090 }
2091
2092 error = gfs2_meta_inode_buffer(ip, &dibh);
2093 if (error)
2094 goto do_end_trans;
2095
2096 truncate_setsize(inode, size);
2097 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2098 gfs2_trans_add_meta(ip->i_gl, dibh);
2099 gfs2_dinode_out(ip, dibh->b_data);
2100 brelse(dibh);
2101
2102 do_end_trans:
2103 gfs2_trans_end(sdp);
2104 do_grow_release:
2105 if (unstuff) {
2106 gfs2_inplace_release(ip);
2107 do_grow_qunlock:
2108 gfs2_quota_unlock(ip);
2109 }
2110 return error;
2111 }
2112
2113 /**
2114 * gfs2_setattr_size - make a file a given size
2115 * @inode: the inode
2116 * @newsize: the size to make the file
2117 *
2118 * The file size can grow, shrink, or stay the same size. This
2119 * is called holding i_rwsem and an exclusive glock on the inode
2120 * in question.
2121 *
2122 * Returns: errno
2123 */
2124
gfs2_setattr_size(struct inode * inode,u64 newsize)2125 int gfs2_setattr_size(struct inode *inode, u64 newsize)
2126 {
2127 struct gfs2_inode *ip = GFS2_I(inode);
2128 int ret;
2129
2130 BUG_ON(!S_ISREG(inode->i_mode));
2131
2132 ret = inode_newsize_ok(inode, newsize);
2133 if (ret)
2134 return ret;
2135
2136 inode_dio_wait(inode);
2137
2138 ret = gfs2_qa_get(ip);
2139 if (ret)
2140 goto out;
2141
2142 if (newsize >= inode->i_size) {
2143 ret = do_grow(inode, newsize);
2144 goto out;
2145 }
2146
2147 ret = do_shrink(inode, newsize);
2148 out:
2149 gfs2_rs_delete(ip, NULL);
2150 gfs2_qa_put(ip);
2151 return ret;
2152 }
2153
gfs2_truncatei_resume(struct gfs2_inode * ip)2154 int gfs2_truncatei_resume(struct gfs2_inode *ip)
2155 {
2156 int error;
2157 error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
2158 if (!error)
2159 error = trunc_end(ip);
2160 return error;
2161 }
2162
gfs2_file_dealloc(struct gfs2_inode * ip)2163 int gfs2_file_dealloc(struct gfs2_inode *ip)
2164 {
2165 return punch_hole(ip, 0, 0);
2166 }
2167
2168 /**
2169 * gfs2_free_journal_extents - Free cached journal bmap info
2170 * @jd: The journal
2171 *
2172 */
2173
gfs2_free_journal_extents(struct gfs2_jdesc * jd)2174 void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
2175 {
2176 struct gfs2_journal_extent *jext;
2177
2178 while(!list_empty(&jd->extent_list)) {
2179 jext = list_first_entry(&jd->extent_list, struct gfs2_journal_extent, list);
2180 list_del(&jext->list);
2181 kfree(jext);
2182 }
2183 }
2184
2185 /**
2186 * gfs2_add_jextent - Add or merge a new extent to extent cache
2187 * @jd: The journal descriptor
2188 * @lblock: The logical block at start of new extent
2189 * @dblock: The physical block at start of new extent
2190 * @blocks: Size of extent in fs blocks
2191 *
2192 * Returns: 0 on success or -ENOMEM
2193 */
2194
gfs2_add_jextent(struct gfs2_jdesc * jd,u64 lblock,u64 dblock,u64 blocks)2195 static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
2196 {
2197 struct gfs2_journal_extent *jext;
2198
2199 if (!list_empty(&jd->extent_list)) {
2200 jext = list_last_entry(&jd->extent_list, struct gfs2_journal_extent, list);
2201 if ((jext->dblock + jext->blocks) == dblock) {
2202 jext->blocks += blocks;
2203 return 0;
2204 }
2205 }
2206
2207 jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
2208 if (jext == NULL)
2209 return -ENOMEM;
2210 jext->dblock = dblock;
2211 jext->lblock = lblock;
2212 jext->blocks = blocks;
2213 list_add_tail(&jext->list, &jd->extent_list);
2214 jd->nr_extents++;
2215 return 0;
2216 }
2217
2218 /**
2219 * gfs2_map_journal_extents - Cache journal bmap info
2220 * @sdp: The super block
2221 * @jd: The journal to map
2222 *
2223 * Create a reusable "extent" mapping from all logical
2224 * blocks to all physical blocks for the given journal. This will save
2225 * us time when writing journal blocks. Most journals will have only one
2226 * extent that maps all their logical blocks. That's because gfs2.mkfs
2227 * arranges the journal blocks sequentially to maximize performance.
2228 * So the extent would map the first block for the entire file length.
2229 * However, gfs2_jadd can happen while file activity is happening, so
2230 * those journals may not be sequential. Less likely is the case where
2231 * the users created their own journals by mounting the metafs and
2232 * laying it out. But it's still possible. These journals might have
2233 * several extents.
2234 *
2235 * Returns: 0 on success, or error on failure
2236 */
2237
gfs2_map_journal_extents(struct gfs2_sbd * sdp,struct gfs2_jdesc * jd)2238 int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
2239 {
2240 u64 lblock = 0;
2241 u64 lblock_stop;
2242 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
2243 struct buffer_head bh;
2244 unsigned int shift = sdp->sd_sb.sb_bsize_shift;
2245 u64 size;
2246 int rc;
2247 ktime_t start, end;
2248
2249 start = ktime_get();
2250 lblock_stop = i_size_read(jd->jd_inode) >> shift;
2251 size = (lblock_stop - lblock) << shift;
2252 jd->nr_extents = 0;
2253 WARN_ON(!list_empty(&jd->extent_list));
2254
2255 do {
2256 bh.b_state = 0;
2257 bh.b_blocknr = 0;
2258 bh.b_size = size;
2259 rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
2260 if (rc || !buffer_mapped(&bh))
2261 goto fail;
2262 rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
2263 if (rc)
2264 goto fail;
2265 size -= bh.b_size;
2266 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2267 } while(size > 0);
2268
2269 end = ktime_get();
2270 fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
2271 jd->nr_extents, ktime_ms_delta(end, start));
2272 return 0;
2273
2274 fail:
2275 fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
2276 rc, jd->jd_jid,
2277 (unsigned long long)(i_size_read(jd->jd_inode) - size),
2278 jd->nr_extents);
2279 fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
2280 rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
2281 bh.b_state, (unsigned long long)bh.b_size);
2282 gfs2_free_journal_extents(jd);
2283 return rc;
2284 }
2285
2286 /**
2287 * gfs2_write_alloc_required - figure out if a write will require an allocation
2288 * @ip: the file being written to
2289 * @offset: the offset to write to
2290 * @len: the number of bytes being written
2291 *
2292 * Returns: 1 if an alloc is required, 0 otherwise
2293 */
2294
gfs2_write_alloc_required(struct gfs2_inode * ip,u64 offset,unsigned int len)2295 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
2296 unsigned int len)
2297 {
2298 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2299 struct buffer_head bh;
2300 unsigned int shift;
2301 u64 lblock, lblock_stop, size;
2302 u64 end_of_file;
2303
2304 if (!len)
2305 return 0;
2306
2307 if (gfs2_is_stuffed(ip)) {
2308 if (offset + len > gfs2_max_stuffed_size(ip))
2309 return 1;
2310 return 0;
2311 }
2312
2313 shift = sdp->sd_sb.sb_bsize_shift;
2314 BUG_ON(gfs2_is_dir(ip));
2315 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
2316 lblock = offset >> shift;
2317 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
2318 if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
2319 return 1;
2320
2321 size = (lblock_stop - lblock) << shift;
2322 do {
2323 bh.b_state = 0;
2324 bh.b_size = size;
2325 gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
2326 if (!buffer_mapped(&bh))
2327 return 1;
2328 size -= bh.b_size;
2329 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2330 } while(size > 0);
2331
2332 return 0;
2333 }
2334
stuffed_zero_range(struct inode * inode,loff_t offset,loff_t length)2335 static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
2336 {
2337 struct gfs2_inode *ip = GFS2_I(inode);
2338 struct buffer_head *dibh;
2339 int error;
2340
2341 if (offset >= inode->i_size)
2342 return 0;
2343 if (offset + length > inode->i_size)
2344 length = inode->i_size - offset;
2345
2346 error = gfs2_meta_inode_buffer(ip, &dibh);
2347 if (error)
2348 return error;
2349 gfs2_trans_add_meta(ip->i_gl, dibh);
2350 memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
2351 length);
2352 brelse(dibh);
2353 return 0;
2354 }
2355
gfs2_journaled_truncate_range(struct inode * inode,loff_t offset,loff_t length)2356 static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
2357 loff_t length)
2358 {
2359 struct gfs2_sbd *sdp = GFS2_SB(inode);
2360 loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
2361 int error;
2362
2363 while (length) {
2364 struct gfs2_trans *tr;
2365 loff_t chunk;
2366 unsigned int offs;
2367
2368 chunk = length;
2369 if (chunk > max_chunk)
2370 chunk = max_chunk;
2371
2372 offs = offset & ~PAGE_MASK;
2373 if (offs && chunk > PAGE_SIZE)
2374 chunk = offs + ((chunk - offs) & PAGE_MASK);
2375
2376 truncate_pagecache_range(inode, offset, chunk);
2377 offset += chunk;
2378 length -= chunk;
2379
2380 tr = current->journal_info;
2381 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
2382 continue;
2383
2384 gfs2_trans_end(sdp);
2385 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
2386 if (error)
2387 return error;
2388 }
2389 return 0;
2390 }
2391
__gfs2_punch_hole(struct file * file,loff_t offset,loff_t length)2392 int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2393 {
2394 struct inode *inode = file_inode(file);
2395 struct gfs2_inode *ip = GFS2_I(inode);
2396 struct gfs2_sbd *sdp = GFS2_SB(inode);
2397 unsigned int blocksize = i_blocksize(inode);
2398 loff_t start, end;
2399 int error;
2400
2401 if (!gfs2_is_stuffed(ip)) {
2402 unsigned int start_off, end_len;
2403
2404 start_off = offset & (blocksize - 1);
2405 end_len = (offset + length) & (blocksize - 1);
2406 if (start_off) {
2407 unsigned int len = length;
2408 if (length > blocksize - start_off)
2409 len = blocksize - start_off;
2410 error = gfs2_block_zero_range(inode, offset, len);
2411 if (error)
2412 goto out;
2413 if (start_off + length < blocksize)
2414 end_len = 0;
2415 }
2416 if (end_len) {
2417 error = gfs2_block_zero_range(inode,
2418 offset + length - end_len, end_len);
2419 if (error)
2420 goto out;
2421 }
2422 }
2423
2424 start = round_down(offset, blocksize);
2425 end = round_up(offset + length, blocksize) - 1;
2426 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
2427 if (error)
2428 return error;
2429
2430 if (gfs2_is_jdata(ip))
2431 error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2432 GFS2_JTRUNC_REVOKES);
2433 else
2434 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2435 if (error)
2436 return error;
2437
2438 if (gfs2_is_stuffed(ip)) {
2439 error = stuffed_zero_range(inode, offset, length);
2440 if (error)
2441 goto out;
2442 }
2443
2444 if (gfs2_is_jdata(ip)) {
2445 BUG_ON(!current->journal_info);
2446 gfs2_journaled_truncate_range(inode, offset, length);
2447 } else
2448 truncate_pagecache_range(inode, offset, offset + length - 1);
2449
2450 file_update_time(file);
2451 mark_inode_dirty(inode);
2452
2453 if (current->journal_info)
2454 gfs2_trans_end(sdp);
2455
2456 if (!gfs2_is_stuffed(ip))
2457 error = punch_hole(ip, offset, length);
2458
2459 out:
2460 if (current->journal_info)
2461 gfs2_trans_end(sdp);
2462 return error;
2463 }
2464
gfs2_map_blocks(struct iomap_writepage_ctx * wpc,struct inode * inode,loff_t offset)2465 static int gfs2_map_blocks(struct iomap_writepage_ctx *wpc, struct inode *inode,
2466 loff_t offset)
2467 {
2468 int ret;
2469
2470 if (WARN_ON_ONCE(gfs2_is_stuffed(GFS2_I(inode))))
2471 return -EIO;
2472
2473 if (offset >= wpc->iomap.offset &&
2474 offset < wpc->iomap.offset + wpc->iomap.length)
2475 return 0;
2476
2477 memset(&wpc->iomap, 0, sizeof(wpc->iomap));
2478 ret = gfs2_iomap_get(inode, offset, INT_MAX, &wpc->iomap);
2479 return ret;
2480 }
2481
2482 const struct iomap_writeback_ops gfs2_writeback_ops = {
2483 .map_blocks = gfs2_map_blocks,
2484 };
2485