Lines Matching +full:wait +full:- +full:free +full:- +full:us
1 // SPDX-License-Identifier: GPL-2.0
7 #include "space-info.h"
10 #include "free-space-cache.h"
11 #include "ordered-data.h"
13 #include "block-group.h"
16 #include "extent-tree.h"
31 * reservations we care about total_bytes - SUM(space_info->bytes_) when
37 * code on the rules for each type, but generally block_rsv->reserved is how
38 * much space is accounted for in space_info->bytes_may_use.
52 * ->reserve
53 * space_info->bytes_may_reserve += num_bytes
55 * ->extent allocation
57 * space_info->bytes_may_reserve -= num_bytes
58 * space_info->bytes_reserved += extent_bytes
60 * ->insert reference
62 * space_info->bytes_reserved -= extent_bytes
63 * space_info->bytes_used += extent_bytes
65 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
70 * -> __reserve_bytes
71 * create a reserve_ticket with ->bytes set to our reservation, add it to
72 * the tail of space_info->tickets, kick async flush thread
74 * ->handle_reserve_ticket
75 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
78 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
79 * Flushes various things attempting to free up space.
81 * -> btrfs_try_granting_tickets()
83 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
84 * space_info->total_bytes. This loops through the ->priority_tickets and
85 * then the ->tickets list checking to see if the reservation can be
86 * completed. If it can the space is added to space_info->bytes_may_use and
89 * -> ticket wakeup
90 * Check if ->bytes == 0, if it does we got our reservation and we can carry
97 * space_info->priority_tickets, and we do not use ticket->wait, we simply
98 * call flush_space() ourselves for the states that are safe for us to call
107 * things however hold reservations, and so letting them run allows us to
121 * running delalloc, but usually we need to wait for ordered extents to
135 * zones of an unused block group and let us reuse the space. The reusing
152 * out of a pre-tickets era where we could end up committing the transaction
161 * reserve more space than is currently free in the currently allocate
168 * free space in the allocated metadata chunks.
179 return s_info->bytes_used + s_info->bytes_reserved + in btrfs_space_info_used()
180 s_info->bytes_pinned + s_info->bytes_readonly + in btrfs_space_info_used()
181 s_info->bytes_zone_unusable + in btrfs_space_info_used()
182 (may_use_included ? s_info->bytes_may_use : 0); in btrfs_space_info_used()
191 struct list_head *head = &info->space_info; in btrfs_clear_space_info_full()
195 found->full = 0; in btrfs_clear_space_info_full()
212 return fs_info->zone_size; in calc_chunk_size()
222 if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G) in calc_chunk_size()
234 WRITE_ONCE(space_info->chunk_size, chunk_size); in btrfs_update_space_info_chunk_size()
246 return -ENOMEM; in create_space_info()
248 space_info->fs_info = info; in create_space_info()
250 INIT_LIST_HEAD(&space_info->block_groups[i]); in create_space_info()
251 init_rwsem(&space_info->groups_sem); in create_space_info()
252 spin_lock_init(&space_info->lock); in create_space_info()
253 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; in create_space_info()
254 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; in create_space_info()
255 INIT_LIST_HEAD(&space_info->ro_bgs); in create_space_info()
256 INIT_LIST_HEAD(&space_info->tickets); in create_space_info()
257 INIT_LIST_HEAD(&space_info->priority_tickets); in create_space_info()
258 space_info->clamp = 1; in create_space_info()
262 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH; in create_space_info()
268 list_add(&space_info->list, &info->space_info); in create_space_info()
270 info->data_sinfo = space_info; in create_space_info()
283 disk_super = fs_info->super_copy; in btrfs_init_space_info()
285 return -EINVAL; in btrfs_init_space_info()
318 factor = btrfs_bg_type_to_factor(block_group->flags); in btrfs_add_bg_to_space_info()
320 found = btrfs_find_space_info(info, block_group->flags); in btrfs_add_bg_to_space_info()
322 spin_lock(&found->lock); in btrfs_add_bg_to_space_info()
323 found->total_bytes += block_group->length; in btrfs_add_bg_to_space_info()
324 found->disk_total += block_group->length * factor; in btrfs_add_bg_to_space_info()
325 found->bytes_used += block_group->used; in btrfs_add_bg_to_space_info()
326 found->disk_used += block_group->used * factor; in btrfs_add_bg_to_space_info()
327 found->bytes_readonly += block_group->bytes_super; in btrfs_add_bg_to_space_info()
328 btrfs_space_info_update_bytes_zone_unusable(found, block_group->zone_unusable); in btrfs_add_bg_to_space_info()
329 if (block_group->length > 0) in btrfs_add_bg_to_space_info()
330 found->full = 0; in btrfs_add_bg_to_space_info()
332 spin_unlock(&found->lock); in btrfs_add_bg_to_space_info()
334 block_group->space_info = found; in btrfs_add_bg_to_space_info()
336 index = btrfs_bg_flags_to_raid_index(block_group->flags); in btrfs_add_bg_to_space_info()
337 down_write(&found->groups_sem); in btrfs_add_bg_to_space_info()
338 list_add_tail(&block_group->list, &found->block_groups[index]); in btrfs_add_bg_to_space_info()
339 up_write(&found->groups_sem); in btrfs_add_bg_to_space_info()
345 struct list_head *head = &info->space_info; in btrfs_find_space_info()
351 if (found->flags & flags) in btrfs_find_space_info()
363 * Calculate the data_chunk_size, space_info->chunk_size is the in calc_effective_data_chunk_size()
368 * On the zoned mode, we need to use zone_size (= data_sinfo->chunk_size) in calc_effective_data_chunk_size()
373 return data_sinfo->chunk_size; in calc_effective_data_chunk_size()
374 data_chunk_size = min(data_sinfo->chunk_size, in calc_effective_data_chunk_size()
375 mult_perc(fs_info->fs_devices->total_rw_bytes, 10)); in calc_effective_data_chunk_size()
388 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) in calc_available_free_space()
393 avail = atomic64_read(&fs_info->free_chunk_space); in calc_available_free_space()
396 * If we have dup, raid1 or raid10 then only half of the free in calc_available_free_space()
412 * available space used by a data allocation, which could put us in a in calc_available_free_space()
417 * overcommit much, and if we're very close to full it'll keep us from in calc_available_free_space()
423 avail -= data_chunk_size; in calc_available_free_space()
426 * If we aren't flushing all things, let us overcommit up to in calc_available_free_space()
427 * 1/2th of the space. If we can flush, don't let us overcommit in calc_available_free_space()
437 * Returning non-zone size alingned bytes here will result in in calc_available_free_space()
439 * will over-commit too much leading to ENOSPC. Align down to the in calc_available_free_space()
443 avail = ALIGN_DOWN(avail, fs_info->zone_size); in calc_available_free_space()
456 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) in btrfs_can_overcommit()
462 if (used + bytes < space_info->total_bytes + avail) in btrfs_can_overcommit()
470 if (!list_empty(&ticket->list)) { in remove_ticket()
471 list_del_init(&ticket->list); in remove_ticket()
472 ASSERT(space_info->reclaim_size >= ticket->bytes); in remove_ticket()
473 space_info->reclaim_size -= ticket->bytes; in remove_ticket()
478 * This is for space we already have accounted in space_info->bytes_may_use, so
487 lockdep_assert_held(&space_info->lock); in btrfs_try_granting_tickets()
489 head = &space_info->priority_tickets; in btrfs_try_granting_tickets()
498 if ((used + ticket->bytes <= space_info->total_bytes) || in btrfs_try_granting_tickets()
499 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, in btrfs_try_granting_tickets()
501 btrfs_space_info_update_bytes_may_use(space_info, ticket->bytes); in btrfs_try_granting_tickets()
503 ticket->bytes = 0; in btrfs_try_granting_tickets()
504 space_info->tickets_id++; in btrfs_try_granting_tickets()
505 wake_up(&ticket->wait); in btrfs_try_granting_tickets()
511 if (head == &space_info->priority_tickets) { in btrfs_try_granting_tickets()
512 head = &space_info->tickets; in btrfs_try_granting_tickets()
520 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
521 spin_lock(&__rsv->lock); \
523 __rsv->size, __rsv->reserved); \
524 spin_unlock(&__rsv->lock); \
529 switch (space_info->flags) { in space_info_flag_to_str()
556 lockdep_assert_held(&info->lock); in __btrfs_dump_space_info()
558 /* The free space could be negative in case of overcommit */ in __btrfs_dump_space_info()
559 btrfs_info(fs_info, "space_info %s has %lld free, is %sfull", in __btrfs_dump_space_info()
561 (s64)(info->total_bytes - btrfs_space_info_used(info, true)), in __btrfs_dump_space_info()
562 info->full ? "" : "not "); in __btrfs_dump_space_info()
565 info->total_bytes, info->bytes_used, info->bytes_pinned, in __btrfs_dump_space_info()
566 info->bytes_reserved, info->bytes_may_use, in __btrfs_dump_space_info()
567 info->bytes_readonly, info->bytes_zone_unusable); in __btrfs_dump_space_info()
578 spin_lock(&info->lock); in btrfs_dump_space_info()
581 spin_unlock(&info->lock); in btrfs_dump_space_info()
586 down_read(&info->groups_sem); in btrfs_dump_space_info()
588 list_for_each_entry(cache, &info->block_groups[index], list) { in btrfs_dump_space_info()
591 spin_lock(&cache->lock); in btrfs_dump_space_info()
592 avail = cache->length - cache->used - cache->pinned - in btrfs_dump_space_info()
593 cache->reserved - cache->bytes_super - cache->zone_unusable; in btrfs_dump_space_info()
596 cache->start, cache->length, cache->used, cache->pinned, in btrfs_dump_space_info()
597 cache->reserved, cache->delalloc_bytes, in btrfs_dump_space_info()
598 cache->bytes_super, cache->zone_unusable, in btrfs_dump_space_info()
599 avail, cache->ro ? "[readonly]" : ""); in btrfs_dump_space_info()
600 spin_unlock(&cache->lock); in btrfs_dump_space_info()
606 up_read(&info->groups_sem); in btrfs_dump_space_info()
639 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes); in shrink_delalloc()
640 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes); in shrink_delalloc()
652 * worth of reservations, however that's not available to us in shrink_delalloc()
664 trans = current->journal_info; in shrink_delalloc()
667 * If we are doing more ordered than delalloc we need to just wait on in shrink_delalloc()
669 * that likely won't give us the space back we need. in shrink_delalloc()
692 * This exists because we do not want to wait for each in shrink_delalloc()
694 * start the IO on everybody, and then come back here and wait in shrink_delalloc()
697 * can decide if we wait for that or not. in shrink_delalloc()
703 async_pages = atomic_read(&fs_info->async_delalloc_pages); in shrink_delalloc()
708 * We don't want to wait forever, if we wrote less pages in this in shrink_delalloc()
709 * loop than we have outstanding, only wait for that number of in shrink_delalloc()
710 * pages, otherwise we can wait for all async pages to finish in shrink_delalloc()
714 async_pages -= nr_pages; in shrink_delalloc()
717 wait_event(fs_info->async_submit_wait, in shrink_delalloc()
718 atomic_read(&fs_info->async_delalloc_pages) <= in shrink_delalloc()
731 * If we are for preemption we just want a one-shot of delalloc in shrink_delalloc()
738 spin_lock(&space_info->lock); in shrink_delalloc()
739 if (list_empty(&space_info->tickets) && in shrink_delalloc()
740 list_empty(&space_info->priority_tickets)) { in shrink_delalloc()
741 spin_unlock(&space_info->lock); in shrink_delalloc()
744 spin_unlock(&space_info->lock); in shrink_delalloc()
747 &fs_info->delalloc_bytes); in shrink_delalloc()
749 &fs_info->ordered_bytes); in shrink_delalloc()
762 struct btrfs_root *root = fs_info->tree_root; in flush_space()
773 nr = -1; in flush_space()
778 if (ret == -ENOENT) in flush_space()
798 if (ret == -ENOENT) in flush_space()
816 btrfs_get_alloc_profile(fs_info, space_info->flags), in flush_space()
821 if (ret > 0 || ret == -ENOSPC) in flush_space()
826 * If we have pending delayed iputs then we could free up a in flush_space()
834 ASSERT(current->journal_info == NULL); in flush_space()
837 * current one or wait it fully commits in case its commit is in flush_space()
839 * because that does not wait for a transaction to fully commit in flush_space()
848 ret = -ENOSPC; in flush_space()
852 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, in flush_space()
862 u64 to_reclaim = space_info->reclaim_size; in btrfs_calc_reclaim_metadata_size()
864 lockdep_assert_held(&space_info->lock); in btrfs_calc_reclaim_metadata_size()
872 * before, and now we're well over-committed based on our current free in btrfs_calc_reclaim_metadata_size()
876 if (space_info->total_bytes + avail < used) in btrfs_calc_reclaim_metadata_size()
877 to_reclaim += used - (space_info->total_bytes + avail); in btrfs_calc_reclaim_metadata_size()
885 const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv); in need_preemptive_reclaim()
890 thresh = mult_perc(space_info->total_bytes, 90); in need_preemptive_reclaim()
892 lockdep_assert_held(&space_info->lock); in need_preemptive_reclaim()
894 /* If we're just plain full then async reclaim just slows us down. */ in need_preemptive_reclaim()
895 if ((space_info->bytes_used + space_info->bytes_reserved + in need_preemptive_reclaim()
899 used = space_info->bytes_may_use + space_info->bytes_pinned; in need_preemptive_reclaim()
910 if (used - global_rsv_size <= SZ_128M) in need_preemptive_reclaim()
917 if (space_info->reclaim_size) in need_preemptive_reclaim()
921 * If we have over half of the free space occupied by reservations or in need_preemptive_reclaim()
932 * if our reclaimable space exceeds our clamped free space. in need_preemptive_reclaim()
934 * Our clamping range is 2^1 -> 2^8. Practically speaking that means in need_preemptive_reclaim()
951 used = space_info->bytes_used + space_info->bytes_reserved + in need_preemptive_reclaim()
952 space_info->bytes_readonly + global_rsv_size; in need_preemptive_reclaim()
953 if (used < space_info->total_bytes) in need_preemptive_reclaim()
954 thresh += space_info->total_bytes - used; in need_preemptive_reclaim()
955 thresh >>= space_info->clamp; in need_preemptive_reclaim()
957 used = space_info->bytes_pinned; in need_preemptive_reclaim()
962 * around. Preemptive flushing is only useful in that it can free up in need_preemptive_reclaim()
963 * space before tickets need to wait for things to finish. In the case in need_preemptive_reclaim()
964 * of ordered extents, preemptively waiting on ordered extents gets us in need_preemptive_reclaim()
966 * simply have to slow down writers by forcing them to wait on ordered in need_preemptive_reclaim()
974 * waste time and cause us to slow down. in need_preemptive_reclaim()
982 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1; in need_preemptive_reclaim()
983 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes); in need_preemptive_reclaim()
985 used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) + in need_preemptive_reclaim()
986 btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv); in need_preemptive_reclaim()
988 used += space_info->bytes_may_use - global_rsv_size; in need_preemptive_reclaim()
991 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); in need_preemptive_reclaim()
998 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; in steal_from_global_rsv()
1001 if (!ticket->steal) in steal_from_global_rsv()
1004 if (global_rsv->space_info != space_info) in steal_from_global_rsv()
1007 spin_lock(&global_rsv->lock); in steal_from_global_rsv()
1008 min_bytes = mult_perc(global_rsv->size, 10); in steal_from_global_rsv()
1009 if (global_rsv->reserved < min_bytes + ticket->bytes) { in steal_from_global_rsv()
1010 spin_unlock(&global_rsv->lock); in steal_from_global_rsv()
1013 global_rsv->reserved -= ticket->bytes; in steal_from_global_rsv()
1015 ticket->bytes = 0; in steal_from_global_rsv()
1016 wake_up(&ticket->wait); in steal_from_global_rsv()
1017 space_info->tickets_id++; in steal_from_global_rsv()
1018 if (global_rsv->reserved < global_rsv->size) in steal_from_global_rsv()
1019 global_rsv->full = 0; in steal_from_global_rsv()
1020 spin_unlock(&global_rsv->lock); in steal_from_global_rsv()
1028 * @fs_info - fs_info for this fs
1029 * @space_info - the space info we were flushing
1045 u64 tickets_id = space_info->tickets_id; in maybe_fail_all_tickets()
1055 while (!list_empty(&space_info->tickets) && in maybe_fail_all_tickets()
1056 tickets_id == space_info->tickets_id) { in maybe_fail_all_tickets()
1057 ticket = list_first_entry(&space_info->tickets, in maybe_fail_all_tickets()
1065 ticket->bytes); in maybe_fail_all_tickets()
1069 ticket->error = -EIO; in maybe_fail_all_tickets()
1071 ticket->error = -ENOSPC; in maybe_fail_all_tickets()
1072 wake_up(&ticket->wait); in maybe_fail_all_tickets()
1083 return (tickets_id != space_info->tickets_id); in maybe_fail_all_tickets()
1087 * This is for normal flushers, we can wait all goddamned day if we want to. We
1108 spin_lock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1111 space_info->flush = 0; in btrfs_async_reclaim_metadata_space()
1112 spin_unlock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1115 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_metadata_space()
1116 spin_unlock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1121 spin_lock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1122 if (list_empty(&space_info->tickets)) { in btrfs_async_reclaim_metadata_space()
1123 space_info->flush = 0; in btrfs_async_reclaim_metadata_space()
1124 spin_unlock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1129 if (last_tickets_id == space_info->tickets_id) { in btrfs_async_reclaim_metadata_space()
1132 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_metadata_space()
1135 commit_cycles--; in btrfs_async_reclaim_metadata_space()
1164 commit_cycles--; in btrfs_async_reclaim_metadata_space()
1166 space_info->flush = 0; in btrfs_async_reclaim_metadata_space()
1172 spin_unlock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1177 * This handles pre-flushing of metadata space before we get to the point that
1179 * from the other flush paths because it doesn't rely on tickets to tell us how
1180 * much we need to flush, instead it attempts to keep us below the 80% full
1197 delayed_block_rsv = &fs_info->delayed_block_rsv; in btrfs_preempt_reclaim_metadata_space()
1198 delayed_refs_rsv = &fs_info->delayed_refs_rsv; in btrfs_preempt_reclaim_metadata_space()
1199 global_rsv = &fs_info->global_block_rsv; in btrfs_preempt_reclaim_metadata_space()
1200 trans_rsv = &fs_info->trans_block_rsv; in btrfs_preempt_reclaim_metadata_space()
1202 spin_lock(&space_info->lock); in btrfs_preempt_reclaim_metadata_space()
1222 if (block_rsv_size < space_info->bytes_may_use) in btrfs_preempt_reclaim_metadata_space()
1223 delalloc_size = space_info->bytes_may_use - block_rsv_size; in btrfs_preempt_reclaim_metadata_space()
1230 block_rsv_size -= global_rsv_size; in btrfs_preempt_reclaim_metadata_space()
1240 } else if (space_info->bytes_pinned > in btrfs_preempt_reclaim_metadata_space()
1243 to_reclaim = space_info->bytes_pinned; in btrfs_preempt_reclaim_metadata_space()
1254 spin_unlock(&space_info->lock); in btrfs_preempt_reclaim_metadata_space()
1258 * down the to_reclaim by 1/4. If it takes us down to 0, in btrfs_preempt_reclaim_metadata_space()
1266 spin_lock(&space_info->lock); in btrfs_preempt_reclaim_metadata_space()
1270 if (loops == 1 && !space_info->reclaim_size) in btrfs_preempt_reclaim_metadata_space()
1271 space_info->clamp = max(1, space_info->clamp - 1); in btrfs_preempt_reclaim_metadata_space()
1273 spin_unlock(&space_info->lock); in btrfs_preempt_reclaim_metadata_space()
1285 * length to ->bytes_reserved, and subtracts the reserved space from
1286 * ->bytes_may_use.
1329 space_info = fs_info->data_sinfo; in btrfs_async_reclaim_data_space()
1331 spin_lock(&space_info->lock); in btrfs_async_reclaim_data_space()
1332 if (list_empty(&space_info->tickets)) { in btrfs_async_reclaim_data_space()
1333 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1334 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1337 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_data_space()
1338 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1340 while (!space_info->full) { in btrfs_async_reclaim_data_space()
1342 spin_lock(&space_info->lock); in btrfs_async_reclaim_data_space()
1343 if (list_empty(&space_info->tickets)) { in btrfs_async_reclaim_data_space()
1344 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1345 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1352 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_data_space()
1353 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1359 spin_lock(&space_info->lock); in btrfs_async_reclaim_data_space()
1360 if (list_empty(&space_info->tickets)) { in btrfs_async_reclaim_data_space()
1361 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1362 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1366 if (last_tickets_id == space_info->tickets_id) { in btrfs_async_reclaim_data_space()
1369 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_data_space()
1374 if (space_info->full) { in btrfs_async_reclaim_data_space()
1378 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1388 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1394 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1395 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1400 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); in btrfs_init_async_reclaim_work()
1401 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space); in btrfs_init_async_reclaim_work()
1402 INIT_WORK(&fs_info->preempt_reclaim_work, in btrfs_init_async_reclaim_work()
1435 spin_lock(&space_info->lock); in priority_reclaim_metadata_space()
1441 * to_reclaim but ->bytes == 0. in priority_reclaim_metadata_space()
1443 if (ticket->bytes == 0) { in priority_reclaim_metadata_space()
1444 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1449 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1453 spin_lock(&space_info->lock); in priority_reclaim_metadata_space()
1454 if (ticket->bytes == 0) { in priority_reclaim_metadata_space()
1455 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1464 * success to the caller if we can steal from the global rsv - this is in priority_reclaim_metadata_space()
1466 * modify the fs, making it easier to debug -ENOSPC problems. in priority_reclaim_metadata_space()
1469 ticket->error = BTRFS_FS_ERROR(fs_info); in priority_reclaim_metadata_space()
1472 ticket->error = -ENOSPC; in priority_reclaim_metadata_space()
1482 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1489 spin_lock(&space_info->lock); in priority_reclaim_data_space()
1492 if (ticket->bytes == 0) { in priority_reclaim_data_space()
1493 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1497 while (!space_info->full) { in priority_reclaim_data_space()
1498 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1500 spin_lock(&space_info->lock); in priority_reclaim_data_space()
1501 if (ticket->bytes == 0) { in priority_reclaim_data_space()
1502 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1507 ticket->error = -ENOSPC; in priority_reclaim_data_space()
1510 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1517 DEFINE_WAIT(wait); in wait_reserve_ticket()
1520 spin_lock(&space_info->lock); in wait_reserve_ticket()
1521 while (ticket->bytes > 0 && ticket->error == 0) { in wait_reserve_ticket()
1522 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); in wait_reserve_ticket()
1525 * Delete us from the list. After we unlock the space in wait_reserve_ticket()
1533 ticket->error = -EINTR; in wait_reserve_ticket()
1536 spin_unlock(&space_info->lock); in wait_reserve_ticket()
1540 finish_wait(&ticket->wait, &wait); in wait_reserve_ticket()
1541 spin_lock(&space_info->lock); in wait_reserve_ticket()
1543 spin_unlock(&space_info->lock); in wait_reserve_ticket()
1591 ret = ticket->error; in handle_reserve_ticket()
1592 ASSERT(list_empty(&ticket->list)); in handle_reserve_ticket()
1599 ASSERT(!(ticket->bytes == 0 && ticket->error)); in handle_reserve_ticket()
1600 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes, in handle_reserve_ticket()
1601 start_ns, flush, ticket->error); in handle_reserve_ticket()
1618 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes); in maybe_clamp_preempt()
1619 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes); in maybe_clamp_preempt()
1622 * If we're heavy on ordered operations then clamping won't help us. We in maybe_clamp_preempt()
1630 space_info->clamp = min(space_info->clamp + 1, 8); in maybe_clamp_preempt()
1672 int ret = -ENOSPC; in __reserve_bytes()
1677 * If have a transaction handle (current->journal_info != NULL), then in __reserve_bytes()
1682 if (current->journal_info) { in __reserve_bytes()
1690 async_work = &fs_info->async_data_reclaim_work; in __reserve_bytes()
1692 async_work = &fs_info->async_reclaim_work; in __reserve_bytes()
1694 spin_lock(&space_info->lock); in __reserve_bytes()
1703 pending_tickets = !list_empty(&space_info->tickets) || in __reserve_bytes()
1704 !list_empty(&space_info->priority_tickets); in __reserve_bytes()
1706 pending_tickets = !list_empty(&space_info->priority_tickets); in __reserve_bytes()
1709 * Carry on if we have enough space (short-circuit) OR call in __reserve_bytes()
1713 ((used + orig_bytes <= space_info->total_bytes) || in __reserve_bytes()
1726 if (used + orig_bytes <= space_info->total_bytes) { in __reserve_bytes()
1742 space_info->reclaim_size += ticket.bytes; in __reserve_bytes()
1743 init_waitqueue_head(&ticket.wait); in __reserve_bytes()
1751 list_add_tail(&ticket.list, &space_info->tickets); in __reserve_bytes()
1752 if (!space_info->flush) { in __reserve_bytes()
1762 space_info->flush = 1; in __reserve_bytes()
1764 space_info->flags, in __reserve_bytes()
1771 &space_info->priority_tickets); in __reserve_bytes()
1773 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { in __reserve_bytes()
1776 * which means we won't have fs_info->fs_root set, so don't do in __reserve_bytes()
1779 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && in __reserve_bytes()
1780 !work_busy(&fs_info->preempt_reclaim_work) && in __reserve_bytes()
1782 trace_btrfs_trigger_flush(fs_info, space_info->flags, in __reserve_bytes()
1785 &fs_info->preempt_reclaim_work); in __reserve_bytes()
1788 spin_unlock(&space_info->lock); in __reserve_bytes()
1819 if (ret == -ENOSPC) { in btrfs_reserve_metadata_bytes()
1821 space_info->flags, orig_bytes, 1); in btrfs_reserve_metadata_bytes()
1842 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; in btrfs_reserve_data_bytes()
1848 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); in btrfs_reserve_data_bytes()
1851 if (ret == -ENOSPC) { in btrfs_reserve_data_bytes()
1853 data_sinfo->flags, bytes, 1); in btrfs_reserve_data_bytes()
1866 list_for_each_entry(space_info, &fs_info->space_info, list) { in btrfs_dump_space_info_for_trans_abort()
1867 spin_lock(&space_info->lock); in btrfs_dump_space_info_for_trans_abort()
1869 spin_unlock(&space_info->lock); in btrfs_dump_space_info_for_trans_abort()
1885 if (list_empty(&sinfo->ro_bgs)) in btrfs_account_ro_block_groups_free_space()
1888 spin_lock(&sinfo->lock); in btrfs_account_ro_block_groups_free_space()
1889 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) { in btrfs_account_ro_block_groups_free_space()
1890 spin_lock(&block_group->lock); in btrfs_account_ro_block_groups_free_space()
1892 if (!block_group->ro) { in btrfs_account_ro_block_groups_free_space()
1893 spin_unlock(&block_group->lock); in btrfs_account_ro_block_groups_free_space()
1897 factor = btrfs_bg_type_to_factor(block_group->flags); in btrfs_account_ro_block_groups_free_space()
1898 free_bytes += (block_group->length - in btrfs_account_ro_block_groups_free_space()
1899 block_group->used) * factor; in btrfs_account_ro_block_groups_free_space()
1901 spin_unlock(&block_group->lock); in btrfs_account_ro_block_groups_free_space()
1903 spin_unlock(&sinfo->lock); in btrfs_account_ro_block_groups_free_space()
1948 * value in doing trivial "relocations" of re-writing the same block group
1957 * - calculate a target unallocated amount of 5 block group sized chunks
1958 * - ratchet up the intensity of reclaim depending on how far we are from
1966 struct btrfs_fs_info *fs_info = space_info->fs_info; in calc_dynamic_reclaim_threshold()
1967 u64 unalloc = atomic64_read(&fs_info->free_chunk_space); in calc_dynamic_reclaim_threshold()
1969 u64 alloc = space_info->total_bytes; in calc_dynamic_reclaim_threshold()
1971 u64 unused = alloc - used; in calc_dynamic_reclaim_threshold()
1972 u64 want = target > unalloc ? target - unalloc : 0; in calc_dynamic_reclaim_threshold()
1985 lockdep_assert_held(&space_info->lock); in btrfs_calc_reclaim_threshold()
1987 if (READ_ONCE(space_info->dynamic_reclaim)) in btrfs_calc_reclaim_threshold()
1989 return READ_ONCE(space_info->bg_reclaim_threshold); in btrfs_calc_reclaim_threshold()
1999 struct btrfs_fs_info *fs_info = space_info->fs_info; in is_reclaim_urgent()
2000 u64 unalloc = atomic64_read(&fs_info->free_chunk_space); in is_reclaim_urgent()
2013 spin_lock(&space_info->lock); in do_reclaim_sweep()
2016 spin_unlock(&space_info->lock); in do_reclaim_sweep()
2018 down_read(&space_info->groups_sem); in do_reclaim_sweep()
2020 list_for_each_entry(bg, &space_info->block_groups[raid], list) { in do_reclaim_sweep()
2025 spin_lock(&bg->lock); in do_reclaim_sweep()
2026 thresh = mult_perc(bg->length, thresh_pct); in do_reclaim_sweep()
2027 if (bg->used < thresh && bg->reclaim_mark) { in do_reclaim_sweep()
2031 bg->reclaim_mark++; in do_reclaim_sweep()
2032 spin_unlock(&bg->lock); in do_reclaim_sweep()
2050 up_read(&space_info->groups_sem); in do_reclaim_sweep()
2055 u64 chunk_sz = calc_effective_data_chunk_size(space_info->fs_info); in btrfs_space_info_update_reclaimable()
2057 lockdep_assert_held(&space_info->lock); in btrfs_space_info_update_reclaimable()
2058 space_info->reclaimable_bytes += bytes; in btrfs_space_info_update_reclaimable()
2060 if (space_info->reclaimable_bytes >= chunk_sz) in btrfs_space_info_update_reclaimable()
2066 lockdep_assert_held(&space_info->lock); in btrfs_set_periodic_reclaim_ready()
2067 if (!READ_ONCE(space_info->periodic_reclaim)) in btrfs_set_periodic_reclaim_ready()
2069 if (ready != space_info->periodic_reclaim_ready) { in btrfs_set_periodic_reclaim_ready()
2070 space_info->periodic_reclaim_ready = ready; in btrfs_set_periodic_reclaim_ready()
2072 space_info->reclaimable_bytes = 0; in btrfs_set_periodic_reclaim_ready()
2080 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) in btrfs_should_periodic_reclaim()
2082 if (!READ_ONCE(space_info->periodic_reclaim)) in btrfs_should_periodic_reclaim()
2085 spin_lock(&space_info->lock); in btrfs_should_periodic_reclaim()
2086 ret = space_info->periodic_reclaim_ready; in btrfs_should_periodic_reclaim()
2088 spin_unlock(&space_info->lock); in btrfs_should_periodic_reclaim()
2098 list_for_each_entry(space_info, &fs_info->space_info, list) { in btrfs_reclaim_sweep()
2108 struct btrfs_fs_info *fs_info = space_info->fs_info; in btrfs_return_free_space()
2109 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; in btrfs_return_free_space()
2111 lockdep_assert_held(&space_info->lock); in btrfs_return_free_space()
2114 if (global_rsv->space_info != space_info) in btrfs_return_free_space()
2117 spin_lock(&global_rsv->lock); in btrfs_return_free_space()
2118 if (!global_rsv->full) { in btrfs_return_free_space()
2119 u64 to_add = min(len, global_rsv->size - global_rsv->reserved); in btrfs_return_free_space()
2121 global_rsv->reserved += to_add; in btrfs_return_free_space()
2123 if (global_rsv->reserved >= global_rsv->size) in btrfs_return_free_space()
2124 global_rsv->full = 1; in btrfs_return_free_space()
2125 len -= to_add; in btrfs_return_free_space()
2127 spin_unlock(&global_rsv->lock); in btrfs_return_free_space()