Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef BTRFS_BLOCK_GROUP_H #define BTRFS_BLOCK_GROUP_H #include "free-space-cache.h" struct btrfs_chunk_map; enum btrfs_disk_cache_state { BTRFS_DC_WRITTEN, BTRFS_DC_ERROR, BTRFS_DC_CLEAR, BTRFS_DC_SETUP, }; enum btrfs_block_group_size_class { /* Unset */ BTRFS_BG_SZ_NONE, /* 0 < size <= 128K */ BTRFS_BG_SZ_SMALL, /* 128K < size <= 8M */ BTRFS_BG_SZ_MEDIUM, /* 8M < size < BG_LENGTH */ BTRFS_BG_SZ_LARGE, }; /* * This describes the state of the block_group for async discard. This is due * to the two pass nature of it where extent discarding is prioritized over * bitmap discarding. BTRFS_DISCARD_RESET_CURSOR is set when we are resetting * between lists to prevent contention for discard state variables * (eg. discard_cursor). */ enum btrfs_discard_state { BTRFS_DISCARD_EXTENTS, BTRFS_DISCARD_BITMAPS, BTRFS_DISCARD_RESET_CURSOR, }; /* * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to * only allocate a chunk if we really need one. * * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few * chunks already allocated. This is used as part of the clustering code to * help make sure we have a good pool of storage to cluster in, without filling * the FS with empty chunks * * CHUNK_ALLOC_FORCE means it must try to allocate one * * CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from * find_free_extent() that also activaes the zone */ enum btrfs_chunk_alloc_enum { CHUNK_ALLOC_NO_FORCE, CHUNK_ALLOC_LIMITED, CHUNK_ALLOC_FORCE, CHUNK_ALLOC_FORCE_FOR_EXTENT, }; /* Block group flags set at runtime */ enum btrfs_block_group_flags { BLOCK_GROUP_FLAG_IREF, BLOCK_GROUP_FLAG_REMOVED, BLOCK_GROUP_FLAG_TO_COPY, BLOCK_GROUP_FLAG_RELOCATING_REPAIR, BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, /* Does the block group need to be added to the free space tree? */ BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, /* Indicate that the block group is placed on a sequential zone */ BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, /* * Indicate that block group is in the list of new block groups of a * transaction. */ BLOCK_GROUP_FLAG_NEW, }; enum btrfs_caching_type { BTRFS_CACHE_NO, BTRFS_CACHE_STARTED, BTRFS_CACHE_FINISHED, BTRFS_CACHE_ERROR, }; struct btrfs_caching_control { struct list_head list; struct mutex mutex; wait_queue_head_t wait; struct btrfs_work work; struct btrfs_block_group *block_group; /* Track progress of caching during allocation. */ atomic_t progress; refcount_t count; }; /* Once caching_thread() finds this much free space, it will wake up waiters. */ #define CACHING_CTL_WAKE_UP SZ_2M struct btrfs_block_group { struct btrfs_fs_info *fs_info; struct inode *inode; spinlock_t lock; u64 start; u64 length; u64 pinned; u64 reserved; u64 used; u64 delalloc_bytes; u64 bytes_super; u64 flags; u64 cache_generation; u64 global_root_id; /* * The last committed used bytes of this block group, if the above @used * is still the same as @commit_used, we don't need to update block * group item of this block group. */ u64 commit_used; /* * If the free space extent count exceeds this number, convert the block * group to bitmaps. */ u32 bitmap_high_thresh; /* * If the free space extent count drops below this number, convert the * block group back to extents. */ u32 bitmap_low_thresh; /* * It is just used for the delayed data space allocation because * only the data space allocation and the relative metadata update * can be done cross the transaction. */ struct rw_semaphore data_rwsem; /* For raid56, this is a full stripe, without parity */ unsigned long full_stripe_len; unsigned long runtime_flags; unsigned int ro; int disk_cache_state; /* Cache tracking stuff */ int cached; struct btrfs_caching_control *caching_ctl; struct btrfs_space_info *space_info; /* Free space cache stuff */ struct btrfs_free_space_ctl *free_space_ctl; /* Block group cache stuff */ struct rb_node cache_node; /* For block groups in the same raid type */ struct list_head list; refcount_t refs; /* * List of struct btrfs_free_clusters for this block group. * Today it will only have one thing on it, but that may change */ struct list_head cluster_list; /* * Used for several lists: * * 1) struct btrfs_fs_info::unused_bgs * 2) struct btrfs_fs_info::reclaim_bgs * 3) struct btrfs_transaction::deleted_bgs * 4) struct btrfs_trans_handle::new_bgs */ struct list_head bg_list; /* For read-only block groups */ struct list_head ro_list; /* * When non-zero it means the block group's logical address and its * device extents can not be reused for future block group allocations * until the counter goes down to 0. This is to prevent them from being * reused while some task is still using the block group after it was * deleted - we want to make sure they can only be reused for new block * groups after that task is done with the deleted block group. */ atomic_t frozen; /* For discard operations */ struct list_head discard_list; int discard_index; u64 discard_eligible_time; u64 discard_cursor; enum btrfs_discard_state discard_state; /* For dirty block groups */ struct list_head dirty_list; struct list_head io_list; struct btrfs_io_ctl io_ctl; /* * Incremented when doing extent allocations and holding a read lock * on the space_info's groups_sem semaphore. * Decremented when an ordered extent that represents an IO against this * block group's range is created (after it's added to its inode's * root's list of ordered extents) or immediately after the allocation * if it's a metadata extent or fallocate extent (for these cases we * don't create ordered extents). */ atomic_t reservations; /* * Incremented while holding the spinlock *lock* by a task checking if * it can perform a nocow write (incremented if the value for the *ro* * field is 0). Decremented by such tasks once they create an ordered * extent or before that if some error happens before reaching that step. * This is to prevent races between block group relocation and nocow * writes through direct IO. */ atomic_t nocow_writers; /* Lock for free space tree operations. */ struct mutex free_space_lock; /* * Number of extents in this block group used for swap files. * All accesses protected by the spinlock 'lock'. */ int swap_extents; /* * Allocation offset for the block group to implement sequential * allocation. This is used only on a zoned filesystem. */ u64 alloc_offset; u64 zone_unusable; u64 zone_capacity; u64 meta_write_pointer; struct btrfs_chunk_map *physical_map; struct list_head active_bg_list; struct work_struct zone_finish_work; struct extent_buffer *last_eb; enum btrfs_block_group_size_class size_class; }; static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group) { return (block_group->start + block_group->length); } static inline bool btrfs_is_block_group_data_only( struct btrfs_block_group *block_group) { /* * In mixed mode the fragmentation is expected to be high, lowering the * efficiency, so only proper data block groups are considered. */ return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) && !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA); } #ifdef CONFIG_BTRFS_DEBUG int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group); #endif struct btrfs_block_group *btrfs_lookup_first_block_group( struct btrfs_fs_info *info, u64 bytenr); struct btrfs_block_group *btrfs_lookup_block_group( struct btrfs_fs_info *info, u64 bytenr); struct btrfs_block_group *btrfs_next_block_group( struct btrfs_block_group *cache); void btrfs_get_block_group(struct btrfs_block_group *cache); void btrfs_put_block_group(struct btrfs_block_group *cache); void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, const u64 start); void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg); struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); void btrfs_dec_nocow_writers(struct btrfs_block_group *bg); void btrfs_wait_nocow_writers(struct btrfs_block_group *bg); void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, u64 num_bytes); int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait); void btrfs_put_caching_control(struct btrfs_caching_control *ctl); struct btrfs_caching_control *btrfs_get_caching_control( struct btrfs_block_group *cache); int btrfs_add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end, u64 *total_added_ret); struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( struct btrfs_fs_info *fs_info, const u64 chunk_offset); int btrfs_remove_block_group(struct btrfs_trans_handle *trans, struct btrfs_chunk_map *map); void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); void btrfs_mark_bg_unused(struct btrfs_block_group *bg); void btrfs_reclaim_bgs_work(struct work_struct *work); void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info); void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg); int btrfs_read_block_groups(struct btrfs_fs_info *info); struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 type, u64 chunk_offset, u64 size); void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, bool do_chunk_alloc); void btrfs_dec_block_group_ro(struct btrfs_block_group *cache); int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans); int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans); int btrfs_setup_space_cache(struct btrfs_trans_handle *trans); int btrfs_update_block_group(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, bool alloc); int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, u64 ram_bytes, u64 num_bytes, int delalloc, bool force_wrong_size_class); void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, u64 num_bytes, int delalloc); int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, enum btrfs_chunk_alloc_enum force); int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type); void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type); void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans, bool is_item_insertion); u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags); void btrfs_put_block_group_cache(struct btrfs_fs_info *info); int btrfs_free_block_groups(struct btrfs_fs_info *info); int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, u64 physical, u64 **logical, int *naddrs, int *stripe_len); static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info) { return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA); } static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info) { return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA); } static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info) { return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); } static inline int btrfs_block_group_done(struct btrfs_block_group *cache) { smp_mb(); return cache->cached == BTRFS_CACHE_FINISHED || cache->cached == BTRFS_CACHE_ERROR; } void btrfs_freeze_block_group(struct btrfs_block_group *cache); void btrfs_unfreeze_block_group(struct btrfs_block_group *cache); bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg); void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount); enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size); int btrfs_use_block_group_size_class(struct btrfs_block_group *bg, enum btrfs_block_group_size_class size_class, bool force_wrong_size_class); bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg); #endif /* BTRFS_BLOCK_GROUP_H */ |