Linux Audio

Check our new training course

Embedded Linux Audio

Check our new training course
with Creative Commons CC-BY-SA
lecture materials

Bootlin logo

Elixir Cross Referencer

Loading...
   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
// SPDX-License-Identifier: GPL-2.0+
/*
 * Copyright (C) 2016 Oracle.  All Rights Reserved.
 * Author: Darrick J. Wong <darrick.wong@oracle.com>
 */
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_log.h"
#include "xfs_rmap.h"
#include "xfs_refcount.h"
#include "xfs_bmap.h"
#include "xfs_alloc.h"
#include "xfs_buf.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_attr.h"
#include "xfs_trans_priv.h"

static struct kmem_cache	*xfs_defer_pending_cache;

/*
 * Deferred Operations in XFS
 *
 * Due to the way locking rules work in XFS, certain transactions (block
 * mapping and unmapping, typically) have permanent reservations so that
 * we can roll the transaction to adhere to AG locking order rules and
 * to unlock buffers between metadata updates.  Prior to rmap/reflink,
 * the mapping code had a mechanism to perform these deferrals for
 * extents that were going to be freed; this code makes that facility
 * more generic.
 *
 * When adding the reverse mapping and reflink features, it became
 * necessary to perform complex remapping multi-transactions to comply
 * with AG locking order rules, and to be able to spread a single
 * refcount update operation (an operation on an n-block extent can
 * update as many as n records!) among multiple transactions.  XFS can
 * roll a transaction to facilitate this, but using this facility
 * requires us to log "intent" items in case log recovery needs to
 * redo the operation, and to log "done" items to indicate that redo
 * is not necessary.
 *
 * Deferred work is tracked in xfs_defer_pending items.  Each pending
 * item tracks one type of deferred work.  Incoming work items (which
 * have not yet had an intent logged) are attached to a pending item
 * on the dop_intake list, where they wait for the caller to finish
 * the deferred operations.
 *
 * Finishing a set of deferred operations is an involved process.  To
 * start, we define "rolling a deferred-op transaction" as follows:
 *
 * > For each xfs_defer_pending item on the dop_intake list,
 *   - Sort the work items in AG order.  XFS locking
 *     order rules require us to lock buffers in AG order.
 *   - Create a log intent item for that type.
 *   - Attach it to the pending item.
 *   - Move the pending item from the dop_intake list to the
 *     dop_pending list.
 * > Roll the transaction.
 *
 * NOTE: To avoid exceeding the transaction reservation, we limit the
 * number of items that we attach to a given xfs_defer_pending.
 *
 * The actual finishing process looks like this:
 *
 * > For each xfs_defer_pending in the dop_pending list,
 *   - Roll the deferred-op transaction as above.
 *   - Create a log done item for that type, and attach it to the
 *     log intent item.
 *   - For each work item attached to the log intent item,
 *     * Perform the described action.
 *     * Attach the work item to the log done item.
 *     * If the result of doing the work was -EAGAIN, ->finish work
 *       wants a new transaction.  See the "Requesting a Fresh
 *       Transaction while Finishing Deferred Work" section below for
 *       details.
 *
 * The key here is that we must log an intent item for all pending
 * work items every time we roll the transaction, and that we must log
 * a done item as soon as the work is completed.  With this mechanism
 * we can perform complex remapping operations, chaining intent items
 * as needed.
 *
 * Requesting a Fresh Transaction while Finishing Deferred Work
 *
 * If ->finish_item decides that it needs a fresh transaction to
 * finish the work, it must ask its caller (xfs_defer_finish) for a
 * continuation.  The most likely cause of this circumstance are the
 * refcount adjust functions deciding that they've logged enough items
 * to be at risk of exceeding the transaction reservation.
 *
 * To get a fresh transaction, we want to log the existing log done
 * item to prevent the log intent item from replaying, immediately log
 * a new log intent item with the unfinished work items, roll the
 * transaction, and re-call ->finish_item wherever it left off.  The
 * log done item and the new log intent item must be in the same
 * transaction or atomicity cannot be guaranteed; defer_finish ensures
 * that this happens.
 *
 * This requires some coordination between ->finish_item and
 * defer_finish.  Upon deciding to request a new transaction,
 * ->finish_item should update the current work item to reflect the
 * unfinished work.  Next, it should reset the log done item's list
 * count to the number of items finished, and return -EAGAIN.
 * defer_finish sees the -EAGAIN, logs the new log intent item
 * with the remaining work items, and leaves the xfs_defer_pending
 * item at the head of the dop_work queue.  Then it rolls the
 * transaction and picks up processing where it left off.  It is
 * required that ->finish_item must be careful to leave enough
 * transaction reservation to fit the new log intent item.
 *
 * This is an example of remapping the extent (E, E+B) into file X at
 * offset A and dealing with the extent (C, C+B) already being mapped
 * there:
 * +-------------------------------------------------+
 * | Unmap file X startblock C offset A length B     | t0
 * | Intent to reduce refcount for extent (C, B)     |
 * | Intent to remove rmap (X, C, A, B)              |
 * | Intent to free extent (D, 1) (bmbt block)       |
 * | Intent to map (X, A, B) at startblock E         |
 * +-------------------------------------------------+
 * | Map file X startblock E offset A length B       | t1
 * | Done mapping (X, E, A, B)                       |
 * | Intent to increase refcount for extent (E, B)   |
 * | Intent to add rmap (X, E, A, B)                 |
 * +-------------------------------------------------+
 * | Reduce refcount for extent (C, B)               | t2
 * | Done reducing refcount for extent (C, 9)        |
 * | Intent to reduce refcount for extent (C+9, B-9) |
 * | (ran out of space after 9 refcount updates)     |
 * +-------------------------------------------------+
 * | Reduce refcount for extent (C+9, B+9)           | t3
 * | Done reducing refcount for extent (C+9, B-9)    |
 * | Increase refcount for extent (E, B)             |
 * | Done increasing refcount for extent (E, B)      |
 * | Intent to free extent (C, B)                    |
 * | Intent to free extent (F, 1) (refcountbt block) |
 * | Intent to remove rmap (F, 1, REFC)              |
 * +-------------------------------------------------+
 * | Remove rmap (X, C, A, B)                        | t4
 * | Done removing rmap (X, C, A, B)                 |
 * | Add rmap (X, E, A, B)                           |
 * | Done adding rmap (X, E, A, B)                   |
 * | Remove rmap (F, 1, REFC)                        |
 * | Done removing rmap (F, 1, REFC)                 |
 * +-------------------------------------------------+
 * | Free extent (C, B)                              | t5
 * | Done freeing extent (C, B)                      |
 * | Free extent (D, 1)                              |
 * | Done freeing extent (D, 1)                      |
 * | Free extent (F, 1)                              |
 * | Done freeing extent (F, 1)                      |
 * +-------------------------------------------------+
 *
 * If we should crash before t2 commits, log recovery replays
 * the following intent items:
 *
 * - Intent to reduce refcount for extent (C, B)
 * - Intent to remove rmap (X, C, A, B)
 * - Intent to free extent (D, 1) (bmbt block)
 * - Intent to increase refcount for extent (E, B)
 * - Intent to add rmap (X, E, A, B)
 *
 * In the process of recovering, it should also generate and take care
 * of these intent items:
 *
 * - Intent to free extent (C, B)
 * - Intent to free extent (F, 1) (refcountbt block)
 * - Intent to remove rmap (F, 1, REFC)
 *
 * Note that the continuation requested between t2 and t3 is likely to
 * reoccur.
 */
STATIC struct xfs_log_item *
xfs_defer_barrier_create_intent(
	struct xfs_trans		*tp,
	struct list_head		*items,
	unsigned int			count,
	bool				sort)
{
	return NULL;
}

STATIC void
xfs_defer_barrier_abort_intent(
	struct xfs_log_item		*intent)
{
	/* empty */
}

STATIC struct xfs_log_item *
xfs_defer_barrier_create_done(
	struct xfs_trans		*tp,
	struct xfs_log_item		*intent,
	unsigned int			count)
{
	return NULL;
}

STATIC int
xfs_defer_barrier_finish_item(
	struct xfs_trans		*tp,
	struct xfs_log_item		*done,
	struct list_head		*item,
	struct xfs_btree_cur		**state)
{
	ASSERT(0);
	return -EFSCORRUPTED;
}

STATIC void
xfs_defer_barrier_cancel_item(
	struct list_head		*item)
{
	ASSERT(0);
}

static const struct xfs_defer_op_type xfs_barrier_defer_type = {
	.max_items	= 1,
	.create_intent	= xfs_defer_barrier_create_intent,
	.abort_intent	= xfs_defer_barrier_abort_intent,
	.create_done	= xfs_defer_barrier_create_done,
	.finish_item	= xfs_defer_barrier_finish_item,
	.cancel_item	= xfs_defer_barrier_cancel_item,
};

/* Create a log intent done item for a log intent item. */
static inline void
xfs_defer_create_done(
	struct xfs_trans		*tp,
	struct xfs_defer_pending	*dfp)
{
	struct xfs_log_item		*lip;

	/* If there is no log intent item, there can be no log done item. */
	if (!dfp->dfp_intent)
		return;

	/*
	 * Mark the transaction dirty, even on error. This ensures the
	 * transaction is aborted, which:
	 *
	 * 1.) releases the log intent item and frees the log done item
	 * 2.) shuts down the filesystem
	 */
	tp->t_flags |= XFS_TRANS_DIRTY;
	lip = dfp->dfp_ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count);
	if (!lip)
		return;

	tp->t_flags |= XFS_TRANS_HAS_INTENT_DONE;
	xfs_trans_add_item(tp, lip);
	set_bit(XFS_LI_DIRTY, &lip->li_flags);
	dfp->dfp_done = lip;
}

/*
 * Ensure there's a log intent item associated with this deferred work item if
 * the operation must be restarted on crash.  Returns 1 if there's a log item;
 * 0 if there isn't; or a negative errno.
 */
static int
xfs_defer_create_intent(
	struct xfs_trans		*tp,
	struct xfs_defer_pending	*dfp,
	bool				sort)
{
	struct xfs_log_item		*lip;

	if (dfp->dfp_intent)
		return 1;

	lip = dfp->dfp_ops->create_intent(tp, &dfp->dfp_work, dfp->dfp_count,
			sort);
	if (!lip)
		return 0;
	if (IS_ERR(lip))
		return PTR_ERR(lip);

	tp->t_flags |= XFS_TRANS_DIRTY;
	xfs_trans_add_item(tp, lip);
	set_bit(XFS_LI_DIRTY, &lip->li_flags);
	dfp->dfp_intent = lip;
	return 1;
}

/*
 * For each pending item in the intake list, log its intent item and the
 * associated extents, then add the entire intake list to the end of
 * the pending list.
 *
 * Returns 1 if at least one log item was associated with the deferred work;
 * 0 if there are no log items; or a negative errno.
 */
static int
xfs_defer_create_intents(
	struct xfs_trans		*tp)
{
	struct xfs_defer_pending	*dfp;
	int				ret = 0;

	list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
		int			ret2;

		trace_xfs_defer_create_intent(tp->t_mountp, dfp);
		ret2 = xfs_defer_create_intent(tp, dfp, true);
		if (ret2 < 0)
			return ret2;
		ret |= ret2;
	}
	return ret;
}

static inline void
xfs_defer_pending_abort(
	struct xfs_mount		*mp,
	struct xfs_defer_pending	*dfp)
{
	trace_xfs_defer_pending_abort(mp, dfp);

	if (dfp->dfp_intent && !dfp->dfp_done) {
		dfp->dfp_ops->abort_intent(dfp->dfp_intent);
		dfp->dfp_intent = NULL;
	}
}

static inline void
xfs_defer_pending_cancel_work(
	struct xfs_mount		*mp,
	struct xfs_defer_pending	*dfp)
{
	struct list_head		*pwi;
	struct list_head		*n;

	trace_xfs_defer_cancel_list(mp, dfp);

	list_del(&dfp->dfp_list);
	list_for_each_safe(pwi, n, &dfp->dfp_work) {
		list_del(pwi);
		dfp->dfp_count--;
		trace_xfs_defer_cancel_item(mp, dfp, pwi);
		dfp->dfp_ops->cancel_item(pwi);
	}
	ASSERT(dfp->dfp_count == 0);
	kmem_cache_free(xfs_defer_pending_cache, dfp);
}

STATIC void
xfs_defer_pending_abort_list(
	struct xfs_mount		*mp,
	struct list_head		*dop_list)
{
	struct xfs_defer_pending	*dfp;

	/* Abort intent items that don't have a done item. */
	list_for_each_entry(dfp, dop_list, dfp_list)
		xfs_defer_pending_abort(mp, dfp);
}

/* Abort all the intents that were committed. */
STATIC void
xfs_defer_trans_abort(
	struct xfs_trans		*tp,
	struct list_head		*dop_pending)
{
	trace_xfs_defer_trans_abort(tp, _RET_IP_);
	xfs_defer_pending_abort_list(tp->t_mountp, dop_pending);
}

/*
 * Capture resources that the caller said not to release ("held") when the
 * transaction commits.  Caller is responsible for zero-initializing @dres.
 */
static int
xfs_defer_save_resources(
	struct xfs_defer_resources	*dres,
	struct xfs_trans		*tp)
{
	struct xfs_buf_log_item		*bli;
	struct xfs_inode_log_item	*ili;
	struct xfs_log_item		*lip;

	BUILD_BUG_ON(NBBY * sizeof(dres->dr_ordered) < XFS_DEFER_OPS_NR_BUFS);

	list_for_each_entry(lip, &tp->t_items, li_trans) {
		switch (lip->li_type) {
		case XFS_LI_BUF:
			bli = container_of(lip, struct xfs_buf_log_item,
					   bli_item);
			if (bli->bli_flags & XFS_BLI_HOLD) {
				if (dres->dr_bufs >= XFS_DEFER_OPS_NR_BUFS) {
					ASSERT(0);
					return -EFSCORRUPTED;
				}
				if (bli->bli_flags & XFS_BLI_ORDERED)
					dres->dr_ordered |=
							(1U << dres->dr_bufs);
				else
					xfs_trans_dirty_buf(tp, bli->bli_buf);
				dres->dr_bp[dres->dr_bufs++] = bli->bli_buf;
			}
			break;
		case XFS_LI_INODE:
			ili = container_of(lip, struct xfs_inode_log_item,
					   ili_item);
			if (ili->ili_lock_flags == 0) {
				if (dres->dr_inos >= XFS_DEFER_OPS_NR_INODES) {
					ASSERT(0);
					return -EFSCORRUPTED;
				}
				xfs_trans_log_inode(tp, ili->ili_inode,
						    XFS_ILOG_CORE);
				dres->dr_ip[dres->dr_inos++] = ili->ili_inode;
			}
			break;
		default:
			break;
		}
	}

	return 0;
}

/* Attach the held resources to the transaction. */
static void
xfs_defer_restore_resources(
	struct xfs_trans		*tp,
	struct xfs_defer_resources	*dres)
{
	unsigned short			i;

	/* Rejoin the joined inodes. */
	for (i = 0; i < dres->dr_inos; i++)
		xfs_trans_ijoin(tp, dres->dr_ip[i], 0);

	/* Rejoin the buffers and dirty them so the log moves forward. */
	for (i = 0; i < dres->dr_bufs; i++) {
		xfs_trans_bjoin(tp, dres->dr_bp[i]);
		if (dres->dr_ordered & (1U << i))
			xfs_trans_ordered_buf(tp, dres->dr_bp[i]);
		xfs_trans_bhold(tp, dres->dr_bp[i]);
	}
}

/* Roll a transaction so we can do some deferred op processing. */
STATIC int
xfs_defer_trans_roll(
	struct xfs_trans		**tpp)
{
	struct xfs_defer_resources	dres = { };
	int				error;

	error = xfs_defer_save_resources(&dres, *tpp);
	if (error)
		return error;

	trace_xfs_defer_trans_roll(*tpp, _RET_IP_);

	/*
	 * Roll the transaction.  Rolling always given a new transaction (even
	 * if committing the old one fails!) to hand back to the caller, so we
	 * join the held resources to the new transaction so that we always
	 * return with the held resources joined to @tpp, no matter what
	 * happened.
	 */
	error = xfs_trans_roll(tpp);

	xfs_defer_restore_resources(*tpp, &dres);

	if (error)
		trace_xfs_defer_trans_roll_error(*tpp, error);
	return error;
}

/*
 * Free up any items left in the list.
 */
static void
xfs_defer_cancel_list(
	struct xfs_mount		*mp,
	struct list_head		*dop_list)
{
	struct xfs_defer_pending	*dfp;
	struct xfs_defer_pending	*pli;

	/*
	 * Free the pending items.  Caller should already have arranged
	 * for the intent items to be released.
	 */
	list_for_each_entry_safe(dfp, pli, dop_list, dfp_list)
		xfs_defer_pending_cancel_work(mp, dfp);
}

static inline void
xfs_defer_relog_intent(
	struct xfs_trans		*tp,
	struct xfs_defer_pending	*dfp)
{
	struct xfs_log_item		*lip;

	xfs_defer_create_done(tp, dfp);

	lip = dfp->dfp_ops->relog_intent(tp, dfp->dfp_intent, dfp->dfp_done);
	if (lip) {
		xfs_trans_add_item(tp, lip);
		set_bit(XFS_LI_DIRTY, &lip->li_flags);
	}
	dfp->dfp_done = NULL;
	dfp->dfp_intent = lip;
}

/*
 * Prevent a log intent item from pinning the tail of the log by logging a
 * done item to release the intent item; and then log a new intent item.
 * The caller should provide a fresh transaction and roll it after we're done.
 */
static void
xfs_defer_relog(
	struct xfs_trans		**tpp,
	struct list_head		*dfops)
{
	struct xlog			*log = (*tpp)->t_mountp->m_log;
	struct xfs_defer_pending	*dfp;
	xfs_lsn_t			threshold_lsn = NULLCOMMITLSN;


	ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES);

	list_for_each_entry(dfp, dfops, dfp_list) {
		/*
		 * If the log intent item for this deferred op is not a part of
		 * the current log checkpoint, relog the intent item to keep
		 * the log tail moving forward.  We're ok with this being racy
		 * because an incorrect decision means we'll be a little slower
		 * at pushing the tail.
		 */
		if (dfp->dfp_intent == NULL ||
		    xfs_log_item_in_current_chkpt(dfp->dfp_intent))
			continue;

		/*
		 * Figure out where we need the tail to be in order to maintain
		 * the minimum required free space in the log.  Only sample
		 * the log threshold once per call.
		 */
		if (threshold_lsn == NULLCOMMITLSN) {
			threshold_lsn = xlog_grant_push_threshold(log, 0);
			if (threshold_lsn == NULLCOMMITLSN)
				break;
		}
		if (XFS_LSN_CMP(dfp->dfp_intent->li_lsn, threshold_lsn) >= 0)
			continue;

		trace_xfs_defer_relog_intent((*tpp)->t_mountp, dfp);
		XFS_STATS_INC((*tpp)->t_mountp, defer_relog);

		xfs_defer_relog_intent(*tpp, dfp);
	}
}

/*
 * Log an intent-done item for the first pending intent, and finish the work
 * items.
 */
int
xfs_defer_finish_one(
	struct xfs_trans		*tp,
	struct xfs_defer_pending	*dfp)
{
	const struct xfs_defer_op_type	*ops = dfp->dfp_ops;
	struct xfs_btree_cur		*state = NULL;
	struct list_head		*li, *n;
	int				error;

	trace_xfs_defer_pending_finish(tp->t_mountp, dfp);

	xfs_defer_create_done(tp, dfp);
	list_for_each_safe(li, n, &dfp->dfp_work) {
		list_del(li);
		dfp->dfp_count--;
		trace_xfs_defer_finish_item(tp->t_mountp, dfp, li);
		error = ops->finish_item(tp, dfp->dfp_done, li, &state);
		if (error == -EAGAIN) {
			int		ret;

			/*
			 * Caller wants a fresh transaction; put the work item
			 * back on the list and log a new log intent item to
			 * replace the old one.  See "Requesting a Fresh
			 * Transaction while Finishing Deferred Work" above.
			 */
			list_add(li, &dfp->dfp_work);
			dfp->dfp_count++;
			dfp->dfp_done = NULL;
			dfp->dfp_intent = NULL;
			ret = xfs_defer_create_intent(tp, dfp, false);
			if (ret < 0)
				error = ret;
		}

		if (error)
			goto out;
	}

	/* Done with the dfp, free it. */
	list_del(&dfp->dfp_list);
	kmem_cache_free(xfs_defer_pending_cache, dfp);
out:
	if (ops->finish_cleanup)
		ops->finish_cleanup(tp, state, error);
	return error;
}

/* Move all paused deferred work from @tp to @paused_list. */
static void
xfs_defer_isolate_paused(
	struct xfs_trans		*tp,
	struct list_head		*paused_list)
{
	struct xfs_defer_pending	*dfp;
	struct xfs_defer_pending	*pli;

	list_for_each_entry_safe(dfp, pli, &tp->t_dfops, dfp_list) {
		if (!(dfp->dfp_flags & XFS_DEFER_PAUSED))
			continue;

		list_move_tail(&dfp->dfp_list, paused_list);
		trace_xfs_defer_isolate_paused(tp->t_mountp, dfp);
	}
}

/*
 * Finish all the pending work.  This involves logging intent items for
 * any work items that wandered in since the last transaction roll (if
 * one has even happened), rolling the transaction, and finishing the
 * work items in the first item on the logged-and-pending list.
 *
 * If an inode is provided, relog it to the new transaction.
 */
int
xfs_defer_finish_noroll(
	struct xfs_trans		**tp)
{
	struct xfs_defer_pending	*dfp = NULL;
	int				error = 0;
	LIST_HEAD(dop_pending);
	LIST_HEAD(dop_paused);

	ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);

	trace_xfs_defer_finish(*tp, _RET_IP_);

	/* Until we run out of pending work to finish... */
	while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
		/*
		 * Deferred items that are created in the process of finishing
		 * other deferred work items should be queued at the head of
		 * the pending list, which puts them ahead of the deferred work
		 * that was created by the caller.  This keeps the number of
		 * pending work items to a minimum, which decreases the amount
		 * of time that any one intent item can stick around in memory,
		 * pinning the log tail.
		 */
		int has_intents = xfs_defer_create_intents(*tp);

		xfs_defer_isolate_paused(*tp, &dop_paused);

		list_splice_init(&(*tp)->t_dfops, &dop_pending);

		if (has_intents < 0) {
			error = has_intents;
			goto out_shutdown;
		}
		if (has_intents || dfp) {
			error = xfs_defer_trans_roll(tp);
			if (error)
				goto out_shutdown;

			/* Relog intent items to keep the log moving. */
			xfs_defer_relog(tp, &dop_pending);
			xfs_defer_relog(tp, &dop_paused);

			if ((*tp)->t_flags & XFS_TRANS_DIRTY) {
				error = xfs_defer_trans_roll(tp);
				if (error)
					goto out_shutdown;
			}
		}

		dfp = list_first_entry_or_null(&dop_pending,
				struct xfs_defer_pending, dfp_list);
		if (!dfp)
			break;
		error = xfs_defer_finish_one(*tp, dfp);
		if (error && error != -EAGAIN)
			goto out_shutdown;
	}

	/* Requeue the paused items in the outgoing transaction. */
	list_splice_tail_init(&dop_paused, &(*tp)->t_dfops);

	trace_xfs_defer_finish_done(*tp, _RET_IP_);
	return 0;

out_shutdown:
	list_splice_tail_init(&dop_paused, &dop_pending);
	xfs_defer_trans_abort(*tp, &dop_pending);
	xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
	trace_xfs_defer_finish_error(*tp, error);
	xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
	xfs_defer_cancel(*tp);
	return error;
}

int
xfs_defer_finish(
	struct xfs_trans	**tp)
{
#ifdef DEBUG
	struct xfs_defer_pending *dfp;
#endif
	int			error;

	/*
	 * Finish and roll the transaction once more to avoid returning to the
	 * caller with a dirty transaction.
	 */
	error = xfs_defer_finish_noroll(tp);
	if (error)
		return error;
	if ((*tp)->t_flags & XFS_TRANS_DIRTY) {
		error = xfs_defer_trans_roll(tp);
		if (error) {
			xfs_force_shutdown((*tp)->t_mountp,
					   SHUTDOWN_CORRUPT_INCORE);
			return error;
		}
	}

	/* Reset LOWMODE now that we've finished all the dfops. */
#ifdef DEBUG
	list_for_each_entry(dfp, &(*tp)->t_dfops, dfp_list)
		ASSERT(dfp->dfp_flags & XFS_DEFER_PAUSED);
#endif
	(*tp)->t_flags &= ~XFS_TRANS_LOWMODE;
	return 0;
}

void
xfs_defer_cancel(
	struct xfs_trans	*tp)
{
	struct xfs_mount	*mp = tp->t_mountp;

	trace_xfs_defer_cancel(tp, _RET_IP_);
	xfs_defer_trans_abort(tp, &tp->t_dfops);
	xfs_defer_cancel_list(mp, &tp->t_dfops);
}

/*
 * Return the last pending work item attached to this transaction if it matches
 * the deferred op type.
 */
static inline struct xfs_defer_pending *
xfs_defer_find_last(
	struct xfs_trans		*tp,
	const struct xfs_defer_op_type	*ops)
{
	struct xfs_defer_pending	*dfp = NULL;

	/* No dfops at all? */
	if (list_empty(&tp->t_dfops))
		return NULL;

	dfp = list_last_entry(&tp->t_dfops, struct xfs_defer_pending,
			dfp_list);

	/* Wrong type? */
	if (dfp->dfp_ops != ops)
		return NULL;
	return dfp;
}

/*
 * Decide if we can add a deferred work item to the last dfops item attached
 * to the transaction.
 */
static inline bool
xfs_defer_can_append(
	struct xfs_defer_pending	*dfp,
	const struct xfs_defer_op_type	*ops)
{
	/* Already logged? */
	if (dfp->dfp_intent)
		return false;

	/* Paused items cannot absorb more work */
	if (dfp->dfp_flags & XFS_DEFER_PAUSED)
		return NULL;

	/* Already full? */
	if (ops->max_items && dfp->dfp_count >= ops->max_items)
		return false;

	return true;
}

/* Create a new pending item at the end of the transaction list. */
static inline struct xfs_defer_pending *
xfs_defer_alloc(
	struct xfs_trans		*tp,
	const struct xfs_defer_op_type	*ops)
{
	struct xfs_defer_pending	*dfp;

	dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
			GFP_NOFS | __GFP_NOFAIL);
	dfp->dfp_ops = ops;
	INIT_LIST_HEAD(&dfp->dfp_work);
	list_add_tail(&dfp->dfp_list, &tp->t_dfops);

	return dfp;
}

/* Add an item for later deferred processing. */
struct xfs_defer_pending *
xfs_defer_add(
	struct xfs_trans		*tp,
	struct list_head		*li,
	const struct xfs_defer_op_type	*ops)
{
	struct xfs_defer_pending	*dfp = NULL;

	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);

	dfp = xfs_defer_find_last(tp, ops);
	if (!dfp || !xfs_defer_can_append(dfp, ops))
		dfp = xfs_defer_alloc(tp, ops);

	xfs_defer_add_item(dfp, li);
	trace_xfs_defer_add_item(tp->t_mountp, dfp, li);
	return dfp;
}

/*
 * Add a defer ops barrier to force two otherwise adjacent deferred work items
 * to be tracked separately and have separate log items.
 */
void
xfs_defer_add_barrier(
	struct xfs_trans		*tp)
{
	struct xfs_defer_pending	*dfp;

	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);

	/* If the last defer op added was a barrier, we're done. */
	dfp = xfs_defer_find_last(tp, &xfs_barrier_defer_type);
	if (dfp)
		return;

	xfs_defer_alloc(tp, &xfs_barrier_defer_type);

	trace_xfs_defer_add_item(tp->t_mountp, dfp, NULL);
}

/*
 * Create a pending deferred work item to replay the recovered intent item
 * and add it to the list.
 */
void
xfs_defer_start_recovery(
	struct xfs_log_item		*lip,
	struct list_head		*r_dfops,
	const struct xfs_defer_op_type	*ops)
{
	struct xfs_defer_pending	*dfp;

	dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
			GFP_NOFS | __GFP_NOFAIL);
	dfp->dfp_ops = ops;
	dfp->dfp_intent = lip;
	INIT_LIST_HEAD(&dfp->dfp_work);
	list_add_tail(&dfp->dfp_list, r_dfops);
}

/*
 * Cancel a deferred work item created to recover a log intent item.  @dfp
 * will be freed after this function returns.
 */
void
xfs_defer_cancel_recovery(
	struct xfs_mount		*mp,
	struct xfs_defer_pending	*dfp)
{
	xfs_defer_pending_abort(mp, dfp);
	xfs_defer_pending_cancel_work(mp, dfp);
}

/* Replay the deferred work item created from a recovered log intent item. */
int
xfs_defer_finish_recovery(
	struct xfs_mount		*mp,
	struct xfs_defer_pending	*dfp,
	struct list_head		*capture_list)
{
	const struct xfs_defer_op_type	*ops = dfp->dfp_ops;
	int				error;

	/* dfp is freed by recover_work and must not be accessed afterwards */
	error = ops->recover_work(dfp, capture_list);
	if (error)
		trace_xlog_intent_recovery_failed(mp, ops, error);
	return error;
}

/*
 * Move deferred ops from one transaction to another and reset the source to
 * initial state. This is primarily used to carry state forward across
 * transaction rolls with pending dfops.
 */
void
xfs_defer_move(
	struct xfs_trans	*dtp,
	struct xfs_trans	*stp)
{
	list_splice_init(&stp->t_dfops, &dtp->t_dfops);

	/*
	 * Low free space mode was historically controlled by a dfops field.
	 * This meant that low mode state potentially carried across multiple
	 * transaction rolls. Transfer low mode on a dfops move to preserve
	 * that behavior.
	 */
	dtp->t_flags |= (stp->t_flags & XFS_TRANS_LOWMODE);
	stp->t_flags &= ~XFS_TRANS_LOWMODE;
}

/*
 * Prepare a chain of fresh deferred ops work items to be completed later.  Log
 * recovery requires the ability to put off until later the actual finishing
 * work so that it can process unfinished items recovered from the log in
 * correct order.
 *
 * Create and log intent items for all the work that we're capturing so that we
 * can be assured that the items will get replayed if the system goes down
 * before log recovery gets a chance to finish the work it put off.  The entire
 * deferred ops state is transferred to the capture structure and the
 * transaction is then ready for the caller to commit it.  If there are no
 * intent items to capture, this function returns NULL.
 *
 * If capture_ip is not NULL, the capture structure will obtain an extra
 * reference to the inode.
 */
static struct xfs_defer_capture *
xfs_defer_ops_capture(
	struct xfs_trans		*tp)
{
	struct xfs_defer_capture	*dfc;
	unsigned short			i;
	int				error;

	if (list_empty(&tp->t_dfops))
		return NULL;

	error = xfs_defer_create_intents(tp);
	if (error < 0)
		return ERR_PTR(error);

	/* Create an object to capture the defer ops. */
	dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS);
	INIT_LIST_HEAD(&dfc->dfc_list);
	INIT_LIST_HEAD(&dfc->dfc_dfops);

	/* Move the dfops chain and transaction state to the capture struct. */
	list_splice_init(&tp->t_dfops, &dfc->dfc_dfops);
	dfc->dfc_tpflags = tp->t_flags & XFS_TRANS_LOWMODE;
	tp->t_flags &= ~XFS_TRANS_LOWMODE;

	/* Capture the remaining block reservations along with the dfops. */
	dfc->dfc_blkres = tp->t_blk_res - tp->t_blk_res_used;
	dfc->dfc_rtxres = tp->t_rtx_res - tp->t_rtx_res_used;

	/* Preserve the log reservation size. */
	dfc->dfc_logres = tp->t_log_res;

	error = xfs_defer_save_resources(&dfc->dfc_held, tp);
	if (error) {
		/*
		 * Resource capture should never fail, but if it does, we
		 * still have to shut down the log and release things
		 * properly.
		 */
		xfs_force_shutdown(tp->t_mountp, SHUTDOWN_CORRUPT_INCORE);
	}

	/*
	 * Grab extra references to the inodes and buffers because callers are
	 * expected to release their held references after we commit the
	 * transaction.
	 */
	for (i = 0; i < dfc->dfc_held.dr_inos; i++) {
		ASSERT(xfs_isilocked(dfc->dfc_held.dr_ip[i], XFS_ILOCK_EXCL));
		ihold(VFS_I(dfc->dfc_held.dr_ip[i]));
	}

	for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
		xfs_buf_hold(dfc->dfc_held.dr_bp[i]);

	return dfc;
}

/* Release all resources that we used to capture deferred ops. */
void
xfs_defer_ops_capture_abort(
	struct xfs_mount		*mp,
	struct xfs_defer_capture	*dfc)
{
	unsigned short			i;

	xfs_defer_pending_abort_list(mp, &dfc->dfc_dfops);
	xfs_defer_cancel_list(mp, &dfc->dfc_dfops);

	for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
		xfs_buf_relse(dfc->dfc_held.dr_bp[i]);

	for (i = 0; i < dfc->dfc_held.dr_inos; i++)
		xfs_irele(dfc->dfc_held.dr_ip[i]);

	kmem_free(dfc);
}

/*
 * Capture any deferred ops and commit the transaction.  This is the last step
 * needed to finish a log intent item that we recovered from the log.  If any
 * of the deferred ops operate on an inode, the caller must pass in that inode
 * so that the reference can be transferred to the capture structure.  The
 * caller must hold ILOCK_EXCL on the inode, and must unlock it before calling
 * xfs_defer_ops_continue.
 */
int
xfs_defer_ops_capture_and_commit(
	struct xfs_trans		*tp,
	struct list_head		*capture_list)
{
	struct xfs_mount		*mp = tp->t_mountp;
	struct xfs_defer_capture	*dfc;
	int				error;

	/* If we don't capture anything, commit transaction and exit. */
	dfc = xfs_defer_ops_capture(tp);
	if (IS_ERR(dfc)) {
		xfs_trans_cancel(tp);
		return PTR_ERR(dfc);
	}
	if (!dfc)
		return xfs_trans_commit(tp);

	/* Commit the transaction and add the capture structure to the list. */
	error = xfs_trans_commit(tp);
	if (error) {
		xfs_defer_ops_capture_abort(mp, dfc);
		return error;
	}

	list_add_tail(&dfc->dfc_list, capture_list);
	return 0;
}

/*
 * Attach a chain of captured deferred ops to a new transaction and free the
 * capture structure.  If an inode was captured, it will be passed back to the
 * caller with ILOCK_EXCL held and joined to the transaction with lockflags==0.
 * The caller now owns the inode reference.
 */
void
xfs_defer_ops_continue(
	struct xfs_defer_capture	*dfc,
	struct xfs_trans		*tp,
	struct xfs_defer_resources	*dres)
{
	unsigned int			i;

	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
	ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY));

	/* Lock the captured resources to the new transaction. */
	if (dfc->dfc_held.dr_inos == 2)
		xfs_lock_two_inodes(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL,
				    dfc->dfc_held.dr_ip[1], XFS_ILOCK_EXCL);
	else if (dfc->dfc_held.dr_inos == 1)
		xfs_ilock(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL);

	for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
		xfs_buf_lock(dfc->dfc_held.dr_bp[i]);

	/* Join the captured resources to the new transaction. */
	xfs_defer_restore_resources(tp, &dfc->dfc_held);
	memcpy(dres, &dfc->dfc_held, sizeof(struct xfs_defer_resources));
	dres->dr_bufs = 0;

	/* Move captured dfops chain and state to the transaction. */
	list_splice_init(&dfc->dfc_dfops, &tp->t_dfops);
	tp->t_flags |= dfc->dfc_tpflags;

	kmem_free(dfc);
}

/* Release the resources captured and continued during recovery. */
void
xfs_defer_resources_rele(
	struct xfs_defer_resources	*dres)
{
	unsigned short			i;

	for (i = 0; i < dres->dr_inos; i++) {
		xfs_iunlock(dres->dr_ip[i], XFS_ILOCK_EXCL);
		xfs_irele(dres->dr_ip[i]);
		dres->dr_ip[i] = NULL;
	}

	for (i = 0; i < dres->dr_bufs; i++) {
		xfs_buf_relse(dres->dr_bp[i]);
		dres->dr_bp[i] = NULL;
	}

	dres->dr_inos = 0;
	dres->dr_bufs = 0;
	dres->dr_ordered = 0;
}

static inline int __init
xfs_defer_init_cache(void)
{
	xfs_defer_pending_cache = kmem_cache_create("xfs_defer_pending",
			sizeof(struct xfs_defer_pending),
			0, 0, NULL);

	return xfs_defer_pending_cache != NULL ? 0 : -ENOMEM;
}

static inline void
xfs_defer_destroy_cache(void)
{
	kmem_cache_destroy(xfs_defer_pending_cache);
	xfs_defer_pending_cache = NULL;
}

/* Set up caches for deferred work items. */
int __init
xfs_defer_init_item_caches(void)
{
	int				error;

	error = xfs_defer_init_cache();
	if (error)
		return error;
	error = xfs_rmap_intent_init_cache();
	if (error)
		goto err;
	error = xfs_refcount_intent_init_cache();
	if (error)
		goto err;
	error = xfs_bmap_intent_init_cache();
	if (error)
		goto err;
	error = xfs_extfree_intent_init_cache();
	if (error)
		goto err;
	error = xfs_attr_intent_init_cache();
	if (error)
		goto err;
	return 0;
err:
	xfs_defer_destroy_item_caches();
	return error;
}

/* Destroy all the deferred work item caches, if they've been allocated. */
void
xfs_defer_destroy_item_caches(void)
{
	xfs_attr_intent_destroy_cache();
	xfs_extfree_intent_destroy_cache();
	xfs_bmap_intent_destroy_cache();
	xfs_refcount_intent_destroy_cache();
	xfs_rmap_intent_destroy_cache();
	xfs_defer_destroy_cache();
}

/*
 * Mark a deferred work item so that it will be requeued indefinitely without
 * being finished.  Caller must ensure there are no data dependencies on this
 * work item in the meantime.
 */
void
xfs_defer_item_pause(
	struct xfs_trans		*tp,
	struct xfs_defer_pending	*dfp)
{
	ASSERT(!(dfp->dfp_flags & XFS_DEFER_PAUSED));

	dfp->dfp_flags |= XFS_DEFER_PAUSED;

	trace_xfs_defer_item_pause(tp->t_mountp, dfp);
}

/*
 * Release a paused deferred work item so that it will be finished during the
 * next transaction roll.
 */
void
xfs_defer_item_unpause(
	struct xfs_trans		*tp,
	struct xfs_defer_pending	*dfp)
{
	ASSERT(dfp->dfp_flags & XFS_DEFER_PAUSED);

	dfp->dfp_flags &= ~XFS_DEFER_PAUSED;

	trace_xfs_defer_item_unpause(tp->t_mountp, dfp);
}