1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_defer.h"
14 #include "xfs_trans.h"
15 #include "xfs_buf_item.h"
16 #include "xfs_inode.h"
17 #include "xfs_inode_item.h"
18 #include "xfs_trace.h"
19 #include "xfs_icache.h"
20 #include "xfs_log.h"
21 #include "xfs_rmap.h"
22 #include "xfs_refcount.h"
23 #include "xfs_bmap.h"
24 #include "xfs_alloc.h"
25
26 static struct kmem_cache *xfs_defer_pending_cache;
27
28 /*
29 * Deferred Operations in XFS
30 *
31 * Due to the way locking rules work in XFS, certain transactions (block
32 * mapping and unmapping, typically) have permanent reservations so that
33 * we can roll the transaction to adhere to AG locking order rules and
34 * to unlock buffers between metadata updates. Prior to rmap/reflink,
35 * the mapping code had a mechanism to perform these deferrals for
36 * extents that were going to be freed; this code makes that facility
37 * more generic.
38 *
39 * When adding the reverse mapping and reflink features, it became
40 * necessary to perform complex remapping multi-transactions to comply
41 * with AG locking order rules, and to be able to spread a single
42 * refcount update operation (an operation on an n-block extent can
43 * update as many as n records!) among multiple transactions. XFS can
44 * roll a transaction to facilitate this, but using this facility
45 * requires us to log "intent" items in case log recovery needs to
46 * redo the operation, and to log "done" items to indicate that redo
47 * is not necessary.
48 *
49 * Deferred work is tracked in xfs_defer_pending items. Each pending
50 * item tracks one type of deferred work. Incoming work items (which
51 * have not yet had an intent logged) are attached to a pending item
52 * on the dop_intake list, where they wait for the caller to finish
53 * the deferred operations.
54 *
55 * Finishing a set of deferred operations is an involved process. To
56 * start, we define "rolling a deferred-op transaction" as follows:
57 *
58 * > For each xfs_defer_pending item on the dop_intake list,
59 * - Sort the work items in AG order. XFS locking
60 * order rules require us to lock buffers in AG order.
61 * - Create a log intent item for that type.
62 * - Attach it to the pending item.
63 * - Move the pending item from the dop_intake list to the
64 * dop_pending list.
65 * > Roll the transaction.
66 *
67 * NOTE: To avoid exceeding the transaction reservation, we limit the
68 * number of items that we attach to a given xfs_defer_pending.
69 *
70 * The actual finishing process looks like this:
71 *
72 * > For each xfs_defer_pending in the dop_pending list,
73 * - Roll the deferred-op transaction as above.
74 * - Create a log done item for that type, and attach it to the
75 * log intent item.
76 * - For each work item attached to the log intent item,
77 * * Perform the described action.
78 * * Attach the work item to the log done item.
79 * * If the result of doing the work was -EAGAIN, ->finish work
80 * wants a new transaction. See the "Requesting a Fresh
81 * Transaction while Finishing Deferred Work" section below for
82 * details.
83 *
84 * The key here is that we must log an intent item for all pending
85 * work items every time we roll the transaction, and that we must log
86 * a done item as soon as the work is completed. With this mechanism
87 * we can perform complex remapping operations, chaining intent items
88 * as needed.
89 *
90 * Requesting a Fresh Transaction while Finishing Deferred Work
91 *
92 * If ->finish_item decides that it needs a fresh transaction to
93 * finish the work, it must ask its caller (xfs_defer_finish) for a
94 * continuation. The most likely cause of this circumstance are the
95 * refcount adjust functions deciding that they've logged enough items
96 * to be at risk of exceeding the transaction reservation.
97 *
98 * To get a fresh transaction, we want to log the existing log done
99 * item to prevent the log intent item from replaying, immediately log
100 * a new log intent item with the unfinished work items, roll the
101 * transaction, and re-call ->finish_item wherever it left off. The
102 * log done item and the new log intent item must be in the same
103 * transaction or atomicity cannot be guaranteed; defer_finish ensures
104 * that this happens.
105 *
106 * This requires some coordination between ->finish_item and
107 * defer_finish. Upon deciding to request a new transaction,
108 * ->finish_item should update the current work item to reflect the
109 * unfinished work. Next, it should reset the log done item's list
110 * count to the number of items finished, and return -EAGAIN.
111 * defer_finish sees the -EAGAIN, logs the new log intent item
112 * with the remaining work items, and leaves the xfs_defer_pending
113 * item at the head of the dop_work queue. Then it rolls the
114 * transaction and picks up processing where it left off. It is
115 * required that ->finish_item must be careful to leave enough
116 * transaction reservation to fit the new log intent item.
117 *
118 * This is an example of remapping the extent (E, E+B) into file X at
119 * offset A and dealing with the extent (C, C+B) already being mapped
120 * there:
121 * +-------------------------------------------------+
122 * | Unmap file X startblock C offset A length B | t0
123 * | Intent to reduce refcount for extent (C, B) |
124 * | Intent to remove rmap (X, C, A, B) |
125 * | Intent to free extent (D, 1) (bmbt block) |
126 * | Intent to map (X, A, B) at startblock E |
127 * +-------------------------------------------------+
128 * | Map file X startblock E offset A length B | t1
129 * | Done mapping (X, E, A, B) |
130 * | Intent to increase refcount for extent (E, B) |
131 * | Intent to add rmap (X, E, A, B) |
132 * +-------------------------------------------------+
133 * | Reduce refcount for extent (C, B) | t2
134 * | Done reducing refcount for extent (C, 9) |
135 * | Intent to reduce refcount for extent (C+9, B-9) |
136 * | (ran out of space after 9 refcount updates) |
137 * +-------------------------------------------------+
138 * | Reduce refcount for extent (C+9, B+9) | t3
139 * | Done reducing refcount for extent (C+9, B-9) |
140 * | Increase refcount for extent (E, B) |
141 * | Done increasing refcount for extent (E, B) |
142 * | Intent to free extent (C, B) |
143 * | Intent to free extent (F, 1) (refcountbt block) |
144 * | Intent to remove rmap (F, 1, REFC) |
145 * +-------------------------------------------------+
146 * | Remove rmap (X, C, A, B) | t4
147 * | Done removing rmap (X, C, A, B) |
148 * | Add rmap (X, E, A, B) |
149 * | Done adding rmap (X, E, A, B) |
150 * | Remove rmap (F, 1, REFC) |
151 * | Done removing rmap (F, 1, REFC) |
152 * +-------------------------------------------------+
153 * | Free extent (C, B) | t5
154 * | Done freeing extent (C, B) |
155 * | Free extent (D, 1) |
156 * | Done freeing extent (D, 1) |
157 * | Free extent (F, 1) |
158 * | Done freeing extent (F, 1) |
159 * +-------------------------------------------------+
160 *
161 * If we should crash before t2 commits, log recovery replays
162 * the following intent items:
163 *
164 * - Intent to reduce refcount for extent (C, B)
165 * - Intent to remove rmap (X, C, A, B)
166 * - Intent to free extent (D, 1) (bmbt block)
167 * - Intent to increase refcount for extent (E, B)
168 * - Intent to add rmap (X, E, A, B)
169 *
170 * In the process of recovering, it should also generate and take care
171 * of these intent items:
172 *
173 * - Intent to free extent (C, B)
174 * - Intent to free extent (F, 1) (refcountbt block)
175 * - Intent to remove rmap (F, 1, REFC)
176 *
177 * Note that the continuation requested between t2 and t3 is likely to
178 * reoccur.
179 */
180
181 static const struct xfs_defer_op_type *defer_op_types[] = {
182 [XFS_DEFER_OPS_TYPE_BMAP] = &xfs_bmap_update_defer_type,
183 [XFS_DEFER_OPS_TYPE_REFCOUNT] = &xfs_refcount_update_defer_type,
184 [XFS_DEFER_OPS_TYPE_RMAP] = &xfs_rmap_update_defer_type,
185 [XFS_DEFER_OPS_TYPE_FREE] = &xfs_extent_free_defer_type,
186 [XFS_DEFER_OPS_TYPE_AGFL_FREE] = &xfs_agfl_free_defer_type,
187 };
188
189 static void
xfs_defer_create_intent(struct xfs_trans * tp,struct xfs_defer_pending * dfp,bool sort)190 xfs_defer_create_intent(
191 struct xfs_trans *tp,
192 struct xfs_defer_pending *dfp,
193 bool sort)
194 {
195 const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
196
197 if (!dfp->dfp_intent)
198 dfp->dfp_intent = ops->create_intent(tp, &dfp->dfp_work,
199 dfp->dfp_count, sort);
200 }
201
202 /*
203 * For each pending item in the intake list, log its intent item and the
204 * associated extents, then add the entire intake list to the end of
205 * the pending list.
206 */
207 STATIC void
xfs_defer_create_intents(struct xfs_trans * tp)208 xfs_defer_create_intents(
209 struct xfs_trans *tp)
210 {
211 struct xfs_defer_pending *dfp;
212
213 list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
214 trace_xfs_defer_create_intent(tp->t_mountp, dfp);
215 xfs_defer_create_intent(tp, dfp, true);
216 }
217 }
218
219 /* Abort all the intents that were committed. */
220 STATIC void
xfs_defer_trans_abort(struct xfs_trans * tp,struct list_head * dop_pending)221 xfs_defer_trans_abort(
222 struct xfs_trans *tp,
223 struct list_head *dop_pending)
224 {
225 struct xfs_defer_pending *dfp;
226 const struct xfs_defer_op_type *ops;
227
228 trace_xfs_defer_trans_abort(tp, _RET_IP_);
229
230 /* Abort intent items that don't have a done item. */
231 list_for_each_entry(dfp, dop_pending, dfp_list) {
232 ops = defer_op_types[dfp->dfp_type];
233 trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
234 if (dfp->dfp_intent && !dfp->dfp_done) {
235 ops->abort_intent(dfp->dfp_intent);
236 dfp->dfp_intent = NULL;
237 }
238 }
239 }
240
241 /*
242 * Capture resources that the caller said not to release ("held") when the
243 * transaction commits. Caller is responsible for zero-initializing @dres.
244 */
245 static int
xfs_defer_save_resources(struct xfs_defer_resources * dres,struct xfs_trans * tp)246 xfs_defer_save_resources(
247 struct xfs_defer_resources *dres,
248 struct xfs_trans *tp)
249 {
250 struct xfs_buf_log_item *bli;
251 struct xfs_inode_log_item *ili;
252 struct xfs_log_item *lip;
253
254 BUILD_BUG_ON(NBBY * sizeof(dres->dr_ordered) < XFS_DEFER_OPS_NR_BUFS);
255
256 list_for_each_entry(lip, &tp->t_items, li_trans) {
257 switch (lip->li_type) {
258 case XFS_LI_BUF:
259 bli = container_of(lip, struct xfs_buf_log_item,
260 bli_item);
261 if (bli->bli_flags & XFS_BLI_HOLD) {
262 if (dres->dr_bufs >= XFS_DEFER_OPS_NR_BUFS) {
263 ASSERT(0);
264 return -EFSCORRUPTED;
265 }
266 if (bli->bli_flags & XFS_BLI_ORDERED)
267 dres->dr_ordered |=
268 (1U << dres->dr_bufs);
269 else
270 xfs_trans_dirty_buf(tp, bli->bli_buf);
271 dres->dr_bp[dres->dr_bufs++] = bli->bli_buf;
272 }
273 break;
274 case XFS_LI_INODE:
275 ili = container_of(lip, struct xfs_inode_log_item,
276 ili_item);
277 if (ili->ili_lock_flags == 0) {
278 if (dres->dr_inos >= XFS_DEFER_OPS_NR_INODES) {
279 ASSERT(0);
280 return -EFSCORRUPTED;
281 }
282 xfs_trans_log_inode(tp, ili->ili_inode,
283 XFS_ILOG_CORE);
284 dres->dr_ip[dres->dr_inos++] = ili->ili_inode;
285 }
286 break;
287 default:
288 break;
289 }
290 }
291
292 return 0;
293 }
294
295 /* Attach the held resources to the transaction. */
296 static void
xfs_defer_restore_resources(struct xfs_trans * tp,struct xfs_defer_resources * dres)297 xfs_defer_restore_resources(
298 struct xfs_trans *tp,
299 struct xfs_defer_resources *dres)
300 {
301 unsigned short i;
302
303 /* Rejoin the joined inodes. */
304 for (i = 0; i < dres->dr_inos; i++)
305 xfs_trans_ijoin(tp, dres->dr_ip[i], 0);
306
307 /* Rejoin the buffers and dirty them so the log moves forward. */
308 for (i = 0; i < dres->dr_bufs; i++) {
309 xfs_trans_bjoin(tp, dres->dr_bp[i]);
310 if (dres->dr_ordered & (1U << i))
311 xfs_trans_ordered_buf(tp, dres->dr_bp[i]);
312 xfs_trans_bhold(tp, dres->dr_bp[i]);
313 }
314 }
315
316 /* Roll a transaction so we can do some deferred op processing. */
317 STATIC int
xfs_defer_trans_roll(struct xfs_trans ** tpp)318 xfs_defer_trans_roll(
319 struct xfs_trans **tpp)
320 {
321 struct xfs_defer_resources dres = { };
322 int error;
323
324 error = xfs_defer_save_resources(&dres, *tpp);
325 if (error)
326 return error;
327
328 trace_xfs_defer_trans_roll(*tpp, _RET_IP_);
329
330 /*
331 * Roll the transaction. Rolling always given a new transaction (even
332 * if committing the old one fails!) to hand back to the caller, so we
333 * join the held resources to the new transaction so that we always
334 * return with the held resources joined to @tpp, no matter what
335 * happened.
336 */
337 error = xfs_trans_roll(tpp);
338
339 xfs_defer_restore_resources(*tpp, &dres);
340
341 if (error)
342 trace_xfs_defer_trans_roll_error(*tpp, error);
343 return error;
344 }
345
346 /*
347 * Free up any items left in the list.
348 */
349 static void
xfs_defer_cancel_list(struct xfs_mount * mp,struct list_head * dop_list)350 xfs_defer_cancel_list(
351 struct xfs_mount *mp,
352 struct list_head *dop_list)
353 {
354 struct xfs_defer_pending *dfp;
355 struct xfs_defer_pending *pli;
356 struct list_head *pwi;
357 struct list_head *n;
358 const struct xfs_defer_op_type *ops;
359
360 /*
361 * Free the pending items. Caller should already have arranged
362 * for the intent items to be released.
363 */
364 list_for_each_entry_safe(dfp, pli, dop_list, dfp_list) {
365 ops = defer_op_types[dfp->dfp_type];
366 trace_xfs_defer_cancel_list(mp, dfp);
367 list_del(&dfp->dfp_list);
368 list_for_each_safe(pwi, n, &dfp->dfp_work) {
369 list_del(pwi);
370 dfp->dfp_count--;
371 ops->cancel_item(pwi);
372 }
373 ASSERT(dfp->dfp_count == 0);
374 kmem_cache_free(xfs_defer_pending_cache, dfp);
375 }
376 }
377
378 /*
379 * Prevent a log intent item from pinning the tail of the log by logging a
380 * done item to release the intent item; and then log a new intent item.
381 * The caller should provide a fresh transaction and roll it after we're done.
382 */
383 static int
xfs_defer_relog(struct xfs_trans ** tpp,struct list_head * dfops)384 xfs_defer_relog(
385 struct xfs_trans **tpp,
386 struct list_head *dfops)
387 {
388 struct xlog *log = (*tpp)->t_mountp->m_log;
389 struct xfs_defer_pending *dfp;
390 xfs_lsn_t threshold_lsn = NULLCOMMITLSN;
391
392
393 ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES);
394
395 list_for_each_entry(dfp, dfops, dfp_list) {
396 /*
397 * If the log intent item for this deferred op is not a part of
398 * the current log checkpoint, relog the intent item to keep
399 * the log tail moving forward. We're ok with this being racy
400 * because an incorrect decision means we'll be a little slower
401 * at pushing the tail.
402 */
403 if (dfp->dfp_intent == NULL ||
404 xfs_log_item_in_current_chkpt(dfp->dfp_intent))
405 continue;
406
407 /*
408 * Figure out where we need the tail to be in order to maintain
409 * the minimum required free space in the log. Only sample
410 * the log threshold once per call.
411 */
412 if (threshold_lsn == NULLCOMMITLSN) {
413 threshold_lsn = xlog_grant_push_threshold(log, 0);
414 if (threshold_lsn == NULLCOMMITLSN)
415 break;
416 }
417 if (XFS_LSN_CMP(dfp->dfp_intent->li_lsn, threshold_lsn) >= 0)
418 continue;
419
420 trace_xfs_defer_relog_intent((*tpp)->t_mountp, dfp);
421 XFS_STATS_INC((*tpp)->t_mountp, defer_relog);
422 dfp->dfp_intent = xfs_trans_item_relog(dfp->dfp_intent, *tpp);
423 }
424
425 if ((*tpp)->t_flags & XFS_TRANS_DIRTY)
426 return xfs_defer_trans_roll(tpp);
427 return 0;
428 }
429
430 /*
431 * Log an intent-done item for the first pending intent, and finish the work
432 * items.
433 */
434 static int
xfs_defer_finish_one(struct xfs_trans * tp,struct xfs_defer_pending * dfp)435 xfs_defer_finish_one(
436 struct xfs_trans *tp,
437 struct xfs_defer_pending *dfp)
438 {
439 const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
440 struct xfs_btree_cur *state = NULL;
441 struct list_head *li, *n;
442 int error;
443
444 trace_xfs_defer_pending_finish(tp->t_mountp, dfp);
445
446 dfp->dfp_done = ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count);
447 list_for_each_safe(li, n, &dfp->dfp_work) {
448 list_del(li);
449 dfp->dfp_count--;
450 error = ops->finish_item(tp, dfp->dfp_done, li, &state);
451 if (error == -EAGAIN) {
452 /*
453 * Caller wants a fresh transaction; put the work item
454 * back on the list and log a new log intent item to
455 * replace the old one. See "Requesting a Fresh
456 * Transaction while Finishing Deferred Work" above.
457 */
458 list_add(li, &dfp->dfp_work);
459 dfp->dfp_count++;
460 dfp->dfp_done = NULL;
461 dfp->dfp_intent = NULL;
462 xfs_defer_create_intent(tp, dfp, false);
463 }
464
465 if (error)
466 goto out;
467 }
468
469 /* Done with the dfp, free it. */
470 list_del(&dfp->dfp_list);
471 kmem_cache_free(xfs_defer_pending_cache, dfp);
472 out:
473 if (ops->finish_cleanup)
474 ops->finish_cleanup(tp, state, error);
475 return error;
476 }
477
478 /*
479 * Finish all the pending work. This involves logging intent items for
480 * any work items that wandered in since the last transaction roll (if
481 * one has even happened), rolling the transaction, and finishing the
482 * work items in the first item on the logged-and-pending list.
483 *
484 * If an inode is provided, relog it to the new transaction.
485 */
486 int
xfs_defer_finish_noroll(struct xfs_trans ** tp)487 xfs_defer_finish_noroll(
488 struct xfs_trans **tp)
489 {
490 struct xfs_defer_pending *dfp;
491 int error = 0;
492 LIST_HEAD(dop_pending);
493
494 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
495
496 trace_xfs_defer_finish(*tp, _RET_IP_);
497
498 /* Until we run out of pending work to finish... */
499 while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
500 /*
501 * Deferred items that are created in the process of finishing
502 * other deferred work items should be queued at the head of
503 * the pending list, which puts them ahead of the deferred work
504 * that was created by the caller. This keeps the number of
505 * pending work items to a minimum, which decreases the amount
506 * of time that any one intent item can stick around in memory,
507 * pinning the log tail.
508 */
509 xfs_defer_create_intents(*tp);
510 list_splice_init(&(*tp)->t_dfops, &dop_pending);
511
512 error = xfs_defer_trans_roll(tp);
513 if (error)
514 goto out_shutdown;
515
516 /* Possibly relog intent items to keep the log moving. */
517 error = xfs_defer_relog(tp, &dop_pending);
518 if (error)
519 goto out_shutdown;
520
521 dfp = list_first_entry(&dop_pending, struct xfs_defer_pending,
522 dfp_list);
523 error = xfs_defer_finish_one(*tp, dfp);
524 if (error && error != -EAGAIN)
525 goto out_shutdown;
526 }
527
528 trace_xfs_defer_finish_done(*tp, _RET_IP_);
529 return 0;
530
531 out_shutdown:
532 xfs_defer_trans_abort(*tp, &dop_pending);
533 xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
534 trace_xfs_defer_finish_error(*tp, error);
535 xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
536 xfs_defer_cancel(*tp);
537 return error;
538 }
539
540 int
xfs_defer_finish(struct xfs_trans ** tp)541 xfs_defer_finish(
542 struct xfs_trans **tp)
543 {
544 int error;
545
546 /*
547 * Finish and roll the transaction once more to avoid returning to the
548 * caller with a dirty transaction.
549 */
550 error = xfs_defer_finish_noroll(tp);
551 if (error)
552 return error;
553 if ((*tp)->t_flags & XFS_TRANS_DIRTY) {
554 error = xfs_defer_trans_roll(tp);
555 if (error) {
556 xfs_force_shutdown((*tp)->t_mountp,
557 SHUTDOWN_CORRUPT_INCORE);
558 return error;
559 }
560 }
561
562 /* Reset LOWMODE now that we've finished all the dfops. */
563 ASSERT(list_empty(&(*tp)->t_dfops));
564 (*tp)->t_flags &= ~XFS_TRANS_LOWMODE;
565 return 0;
566 }
567
568 void
xfs_defer_cancel(struct xfs_trans * tp)569 xfs_defer_cancel(
570 struct xfs_trans *tp)
571 {
572 struct xfs_mount *mp = tp->t_mountp;
573
574 trace_xfs_defer_cancel(tp, _RET_IP_);
575 xfs_defer_cancel_list(mp, &tp->t_dfops);
576 }
577
578 /* Add an item for later deferred processing. */
579 void
xfs_defer_add(struct xfs_trans * tp,enum xfs_defer_ops_type type,struct list_head * li)580 xfs_defer_add(
581 struct xfs_trans *tp,
582 enum xfs_defer_ops_type type,
583 struct list_head *li)
584 {
585 struct xfs_defer_pending *dfp = NULL;
586 const struct xfs_defer_op_type *ops;
587
588 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
589 BUILD_BUG_ON(ARRAY_SIZE(defer_op_types) != XFS_DEFER_OPS_TYPE_MAX);
590
591 /*
592 * Add the item to a pending item at the end of the intake list.
593 * If the last pending item has the same type, reuse it. Else,
594 * create a new pending item at the end of the intake list.
595 */
596 if (!list_empty(&tp->t_dfops)) {
597 dfp = list_last_entry(&tp->t_dfops,
598 struct xfs_defer_pending, dfp_list);
599 ops = defer_op_types[dfp->dfp_type];
600 if (dfp->dfp_type != type ||
601 (ops->max_items && dfp->dfp_count >= ops->max_items))
602 dfp = NULL;
603 }
604 if (!dfp) {
605 dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
606 GFP_NOFS | __GFP_NOFAIL);
607 dfp->dfp_type = type;
608 dfp->dfp_intent = NULL;
609 dfp->dfp_done = NULL;
610 dfp->dfp_count = 0;
611 INIT_LIST_HEAD(&dfp->dfp_work);
612 list_add_tail(&dfp->dfp_list, &tp->t_dfops);
613 }
614
615 list_add_tail(li, &dfp->dfp_work);
616 dfp->dfp_count++;
617 }
618
619 /*
620 * Move deferred ops from one transaction to another and reset the source to
621 * initial state. This is primarily used to carry state forward across
622 * transaction rolls with pending dfops.
623 */
624 void
xfs_defer_move(struct xfs_trans * dtp,struct xfs_trans * stp)625 xfs_defer_move(
626 struct xfs_trans *dtp,
627 struct xfs_trans *stp)
628 {
629 list_splice_init(&stp->t_dfops, &dtp->t_dfops);
630
631 /*
632 * Low free space mode was historically controlled by a dfops field.
633 * This meant that low mode state potentially carried across multiple
634 * transaction rolls. Transfer low mode on a dfops move to preserve
635 * that behavior.
636 */
637 dtp->t_flags |= (stp->t_flags & XFS_TRANS_LOWMODE);
638 stp->t_flags &= ~XFS_TRANS_LOWMODE;
639 }
640
641 /*
642 * Prepare a chain of fresh deferred ops work items to be completed later. Log
643 * recovery requires the ability to put off until later the actual finishing
644 * work so that it can process unfinished items recovered from the log in
645 * correct order.
646 *
647 * Create and log intent items for all the work that we're capturing so that we
648 * can be assured that the items will get replayed if the system goes down
649 * before log recovery gets a chance to finish the work it put off. The entire
650 * deferred ops state is transferred to the capture structure and the
651 * transaction is then ready for the caller to commit it. If there are no
652 * intent items to capture, this function returns NULL.
653 *
654 * If capture_ip is not NULL, the capture structure will obtain an extra
655 * reference to the inode.
656 */
657 static struct xfs_defer_capture *
xfs_defer_ops_capture(struct xfs_trans * tp)658 xfs_defer_ops_capture(
659 struct xfs_trans *tp)
660 {
661 struct xfs_defer_capture *dfc;
662 unsigned short i;
663 int error;
664
665 if (list_empty(&tp->t_dfops))
666 return NULL;
667
668 /* Create an object to capture the defer ops. */
669 dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS);
670 INIT_LIST_HEAD(&dfc->dfc_list);
671 INIT_LIST_HEAD(&dfc->dfc_dfops);
672
673 xfs_defer_create_intents(tp);
674
675 /* Move the dfops chain and transaction state to the capture struct. */
676 list_splice_init(&tp->t_dfops, &dfc->dfc_dfops);
677 dfc->dfc_tpflags = tp->t_flags & XFS_TRANS_LOWMODE;
678 tp->t_flags &= ~XFS_TRANS_LOWMODE;
679
680 /* Capture the remaining block reservations along with the dfops. */
681 dfc->dfc_blkres = tp->t_blk_res - tp->t_blk_res_used;
682 dfc->dfc_rtxres = tp->t_rtx_res - tp->t_rtx_res_used;
683
684 /* Preserve the log reservation size. */
685 dfc->dfc_logres = tp->t_log_res;
686
687 error = xfs_defer_save_resources(&dfc->dfc_held, tp);
688 if (error) {
689 /*
690 * Resource capture should never fail, but if it does, we
691 * still have to shut down the log and release things
692 * properly.
693 */
694 xfs_force_shutdown(tp->t_mountp, SHUTDOWN_CORRUPT_INCORE);
695 }
696
697 /*
698 * Grab extra references to the inodes and buffers because callers are
699 * expected to release their held references after we commit the
700 * transaction.
701 */
702 for (i = 0; i < dfc->dfc_held.dr_inos; i++) {
703 ASSERT(xfs_isilocked(dfc->dfc_held.dr_ip[i], XFS_ILOCK_EXCL));
704 ihold(VFS_I(dfc->dfc_held.dr_ip[i]));
705 }
706
707 for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
708 xfs_buf_hold(dfc->dfc_held.dr_bp[i]);
709
710 return dfc;
711 }
712
713 /* Release all resources that we used to capture deferred ops. */
714 void
xfs_defer_ops_capture_free(struct xfs_mount * mp,struct xfs_defer_capture * dfc)715 xfs_defer_ops_capture_free(
716 struct xfs_mount *mp,
717 struct xfs_defer_capture *dfc)
718 {
719 unsigned short i;
720
721 xfs_defer_cancel_list(mp, &dfc->dfc_dfops);
722
723 for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
724 xfs_buf_relse(dfc->dfc_held.dr_bp[i]);
725
726 for (i = 0; i < dfc->dfc_held.dr_inos; i++)
727 xfs_irele(dfc->dfc_held.dr_ip[i]);
728
729 kmem_free(dfc);
730 }
731
732 /*
733 * Capture any deferred ops and commit the transaction. This is the last step
734 * needed to finish a log intent item that we recovered from the log. If any
735 * of the deferred ops operate on an inode, the caller must pass in that inode
736 * so that the reference can be transferred to the capture structure. The
737 * caller must hold ILOCK_EXCL on the inode, and must unlock it before calling
738 * xfs_defer_ops_continue.
739 */
740 int
xfs_defer_ops_capture_and_commit(struct xfs_trans * tp,struct list_head * capture_list)741 xfs_defer_ops_capture_and_commit(
742 struct xfs_trans *tp,
743 struct list_head *capture_list)
744 {
745 struct xfs_mount *mp = tp->t_mountp;
746 struct xfs_defer_capture *dfc;
747 int error;
748
749 /* If we don't capture anything, commit transaction and exit. */
750 dfc = xfs_defer_ops_capture(tp);
751 if (!dfc)
752 return xfs_trans_commit(tp);
753
754 /* Commit the transaction and add the capture structure to the list. */
755 error = xfs_trans_commit(tp);
756 if (error) {
757 xfs_defer_ops_capture_free(mp, dfc);
758 return error;
759 }
760
761 list_add_tail(&dfc->dfc_list, capture_list);
762 return 0;
763 }
764
765 /*
766 * Attach a chain of captured deferred ops to a new transaction and free the
767 * capture structure. If an inode was captured, it will be passed back to the
768 * caller with ILOCK_EXCL held and joined to the transaction with lockflags==0.
769 * The caller now owns the inode reference.
770 */
771 void
xfs_defer_ops_continue(struct xfs_defer_capture * dfc,struct xfs_trans * tp,struct xfs_defer_resources * dres)772 xfs_defer_ops_continue(
773 struct xfs_defer_capture *dfc,
774 struct xfs_trans *tp,
775 struct xfs_defer_resources *dres)
776 {
777 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
778 ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY));
779
780 /* Lock and join the captured inode to the new transaction. */
781 if (dfc->dfc_held.dr_inos == 2)
782 xfs_lock_two_inodes(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL,
783 dfc->dfc_held.dr_ip[1], XFS_ILOCK_EXCL);
784 else if (dfc->dfc_held.dr_inos == 1)
785 xfs_ilock(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL);
786 xfs_defer_restore_resources(tp, &dfc->dfc_held);
787 memcpy(dres, &dfc->dfc_held, sizeof(struct xfs_defer_resources));
788
789 /* Move captured dfops chain and state to the transaction. */
790 list_splice_init(&dfc->dfc_dfops, &tp->t_dfops);
791 tp->t_flags |= dfc->dfc_tpflags;
792
793 kmem_free(dfc);
794 }
795
796 /* Release the resources captured and continued during recovery. */
797 void
xfs_defer_resources_rele(struct xfs_defer_resources * dres)798 xfs_defer_resources_rele(
799 struct xfs_defer_resources *dres)
800 {
801 unsigned short i;
802
803 for (i = 0; i < dres->dr_inos; i++) {
804 xfs_iunlock(dres->dr_ip[i], XFS_ILOCK_EXCL);
805 xfs_irele(dres->dr_ip[i]);
806 dres->dr_ip[i] = NULL;
807 }
808
809 for (i = 0; i < dres->dr_bufs; i++) {
810 xfs_buf_relse(dres->dr_bp[i]);
811 dres->dr_bp[i] = NULL;
812 }
813
814 dres->dr_inos = 0;
815 dres->dr_bufs = 0;
816 dres->dr_ordered = 0;
817 }
818
819 static inline int __init
xfs_defer_init_cache(void)820 xfs_defer_init_cache(void)
821 {
822 xfs_defer_pending_cache = kmem_cache_create("xfs_defer_pending",
823 sizeof(struct xfs_defer_pending),
824 0, 0, NULL);
825
826 return xfs_defer_pending_cache != NULL ? 0 : -ENOMEM;
827 }
828
829 static inline void
xfs_defer_destroy_cache(void)830 xfs_defer_destroy_cache(void)
831 {
832 kmem_cache_destroy(xfs_defer_pending_cache);
833 xfs_defer_pending_cache = NULL;
834 }
835
836 /* Set up caches for deferred work items. */
837 int __init
xfs_defer_init_item_caches(void)838 xfs_defer_init_item_caches(void)
839 {
840 int error;
841
842 error = xfs_defer_init_cache();
843 if (error)
844 return error;
845 error = xfs_rmap_intent_init_cache();
846 if (error)
847 goto err;
848 error = xfs_refcount_intent_init_cache();
849 if (error)
850 goto err;
851 error = xfs_bmap_intent_init_cache();
852 if (error)
853 goto err;
854 error = xfs_extfree_intent_init_cache();
855 if (error)
856 goto err;
857
858 return 0;
859 err:
860 xfs_defer_destroy_item_caches();
861 return error;
862 }
863
864 /* Destroy all the deferred work item caches, if they've been allocated. */
865 void
xfs_defer_destroy_item_caches(void)866 xfs_defer_destroy_item_caches(void)
867 {
868 xfs_extfree_intent_destroy_cache();
869 xfs_bmap_intent_destroy_cache();
870 xfs_refcount_intent_destroy_cache();
871 xfs_rmap_intent_destroy_cache();
872 xfs_defer_destroy_cache();
873 }
874