Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2022-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong <djwong@kernel.org> */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_ag.h" #include "xfs_trace.h" /* * Use a static key here to reduce the overhead of xfs_drain_rele. If the * compiler supports jump labels, the static branch will be replaced by a nop * sled when there are no xfs_drain_wait callers. Online fsck is currently * the only caller, so this is a reasonable tradeoff. * * Note: Patching the kernel code requires taking the cpu hotplug lock. Other * parts of the kernel allocate memory with that lock held, which means that * XFS callers cannot hold any locks that might be used by memory reclaim or * writeback when calling the static_branch_{inc,dec} functions. */ static DEFINE_STATIC_KEY_FALSE(xfs_drain_waiter_gate); void xfs_drain_wait_disable(void) { static_branch_dec(&xfs_drain_waiter_gate); } void xfs_drain_wait_enable(void) { static_branch_inc(&xfs_drain_waiter_gate); } void xfs_defer_drain_init( struct xfs_defer_drain *dr) { atomic_set(&dr->dr_count, 0); init_waitqueue_head(&dr->dr_waiters); } void xfs_defer_drain_free(struct xfs_defer_drain *dr) { ASSERT(atomic_read(&dr->dr_count) == 0); } /* Increase the pending intent count. */ static inline void xfs_defer_drain_grab(struct xfs_defer_drain *dr) { atomic_inc(&dr->dr_count); } static inline bool has_waiters(struct wait_queue_head *wq_head) { /* * This memory barrier is paired with the one in set_current_state on * the waiting side. */ smp_mb__after_atomic(); return waitqueue_active(wq_head); } /* Decrease the pending intent count, and wake any waiters, if appropriate. */ static inline void xfs_defer_drain_rele(struct xfs_defer_drain *dr) { if (atomic_dec_and_test(&dr->dr_count) && static_branch_unlikely(&xfs_drain_waiter_gate) && has_waiters(&dr->dr_waiters)) wake_up(&dr->dr_waiters); } /* Are there intents pending? */ static inline bool xfs_defer_drain_busy(struct xfs_defer_drain *dr) { return atomic_read(&dr->dr_count) > 0; } /* * Wait for the pending intent count for a drain to hit zero. * * Callers must not hold any locks that would prevent intents from being * finished. */ static inline int xfs_defer_drain_wait(struct xfs_defer_drain *dr) { return wait_event_killable(dr->dr_waiters, !xfs_defer_drain_busy(dr)); } /* * Get a passive reference to an AG and declare an intent to update its * metadata. */ struct xfs_perag * xfs_perag_intent_get( struct xfs_mount *mp, xfs_agnumber_t agno) { struct xfs_perag *pag; pag = xfs_perag_get(mp, agno); if (!pag) return NULL; xfs_perag_intent_hold(pag); return pag; } /* * Release our intent to update this AG's metadata, and then release our * passive ref to the AG. */ void xfs_perag_intent_put( struct xfs_perag *pag) { xfs_perag_intent_rele(pag); xfs_perag_put(pag); } /* * Declare an intent to update AG metadata. Other threads that need exclusive * access can decide to back off if they see declared intentions. */ void xfs_perag_intent_hold( struct xfs_perag *pag) { trace_xfs_perag_intent_hold(pag, __return_address); xfs_defer_drain_grab(&pag->pag_intents_drain); } /* Release our intent to update this AG's metadata. */ void xfs_perag_intent_rele( struct xfs_perag *pag) { trace_xfs_perag_intent_rele(pag, __return_address); xfs_defer_drain_rele(&pag->pag_intents_drain); } /* * Wait for the intent update count for this AG to hit zero. * Callers must not hold any AG header buffers. */ int xfs_perag_intent_drain( struct xfs_perag *pag) { trace_xfs_perag_wait_intents(pag, __return_address); return xfs_defer_drain_wait(&pag->pag_intents_drain); } /* Has anyone declared an intent to update this AG? */ bool xfs_perag_intent_busy( struct xfs_perag *pag) { return xfs_defer_drain_busy(&pag->pag_intents_drain); } |