Lines Matching full:ca
342 struct bch_dev *ca = c ? bch2_dev_bucket_tryget_noerror(c, k.k->p) : NULL; in bch2_alloc_to_text() local
362 if (ca) in bch2_alloc_to_text()
363 prt_printf(out, "fragmentation %llu\n", alloc_lru_idx_fragmentation(*a, ca)); in bch2_alloc_to_text()
367 bch2_dev_put(ca); in bch2_alloc_to_text()
593 struct bch_dev *ca = NULL; in bch2_alloc_read() local
605 ca = bch2_dev_iterate(c, ca, k.k->p.inode); in bch2_alloc_read()
610 if (!ca) { in bch2_alloc_read()
617 for (u64 b = max_t(u64, ca->mi.first_bucket, start); in bch2_alloc_read()
618 b < min_t(u64, ca->mi.nbuckets, end); in bch2_alloc_read()
620 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK]; in bch2_alloc_read()
626 ca = bch2_dev_iterate(c, ca, k.k->p.inode); in bch2_alloc_read()
631 if (!ca) { in bch2_alloc_read()
636 if (k.k->p.offset < ca->mi.first_bucket) { in bch2_alloc_read()
637 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode, ca->mi.first_bucket)); in bch2_alloc_read()
641 if (k.k->p.offset >= ca->mi.nbuckets) { in bch2_alloc_read()
647 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen; in bch2_alloc_read()
652 bch2_dev_put(ca); in bch2_alloc_read()
696 struct bch_dev *ca, in bch2_bucket_do_index() argument
727 need_discard_or_freespace_err_on(ca->mi.freespace_initialized && in bch2_bucket_do_index()
774 static inline int bch2_dev_data_type_accounting_mod(struct btree_trans *trans, struct bch_dev *ca, in bch2_dev_data_type_accounting_mod() argument
782 .dev_data_type.dev = ca->dev_idx, in bch2_dev_data_type_accounting_mod()
790 int bch2_alloc_key_to_dev_counters(struct btree_trans *trans, struct bch_dev *ca, in bch2_alloc_key_to_dev_counters() argument
798 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, in bch2_alloc_key_to_dev_counters()
799 1, new_sectors, bch2_bucket_sectors_fragmented(ca, *new), flags) ?: in bch2_alloc_key_to_dev_counters()
800 bch2_dev_data_type_accounting_mod(trans, ca, old->data_type, in bch2_alloc_key_to_dev_counters()
801 -1, -old_sectors, -bch2_bucket_sectors_fragmented(ca, *old), flags); in bch2_alloc_key_to_dev_counters()
805 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, in bch2_alloc_key_to_dev_counters()
808 bch2_bucket_sectors_fragmented(ca, *new) - in bch2_alloc_key_to_dev_counters()
809 bch2_bucket_sectors_fragmented(ca, *old), flags); in bch2_alloc_key_to_dev_counters()
817 int ret = bch2_dev_data_type_accounting_mod(trans, ca, BCH_DATA_unstriped, in bch2_alloc_key_to_dev_counters()
838 struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p); in bch2_trigger_alloc() local
839 if (!ca) in bch2_trigger_alloc()
882 ret = bch2_bucket_do_index(trans, ca, old, old_a, false) ?: in bch2_trigger_alloc()
883 bch2_bucket_do_index(trans, ca, new.s_c, new_a, true); in bch2_trigger_alloc()
902 old_lru = alloc_lru_idx_fragmentation(*old_a, ca); in bch2_trigger_alloc()
903 new_lru = alloc_lru_idx_fragmentation(*new_a, ca); in bch2_trigger_alloc()
921 ret = bch2_mod_dev_cached_sectors(trans, ca->dev_idx, in bch2_trigger_alloc()
928 ret = bch2_alloc_key_to_dev_counters(trans, ca, old_a, new_a, flags); in bch2_trigger_alloc()
987 u8 *gen = bucket_gen(ca, new.k->p.offset); in bch2_trigger_alloc()
1007 bch2_discard_one_bucket_fast(ca, new.k->p.offset); in bch2_trigger_alloc()
1011 should_invalidate_buckets(ca, bch2_dev_usage_read(ca))) in bch2_trigger_alloc()
1012 bch2_dev_do_invalidates(ca); in bch2_trigger_alloc()
1020 struct bucket *g = gc_bucket(ca, new.k->p.offset); in bch2_trigger_alloc()
1032 bch2_dev_put(ca); in bch2_trigger_alloc()
1087 static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *bucket) in next_bucket() argument
1089 if (*ca) { in next_bucket()
1090 if (bucket->offset < (*ca)->mi.first_bucket) in next_bucket()
1091 bucket->offset = (*ca)->mi.first_bucket; in next_bucket()
1093 if (bucket->offset < (*ca)->mi.nbuckets) in next_bucket()
1096 bch2_dev_put(*ca); in next_bucket()
1097 *ca = NULL; in next_bucket()
1103 *ca = __bch2_next_dev_idx(c, bucket->inode, NULL); in next_bucket()
1104 if (*ca) { in next_bucket()
1105 *bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket); in next_bucket()
1106 bch2_dev_get(*ca); in next_bucket()
1110 return *ca != NULL; in next_bucket()
1114 struct bch_dev **ca, struct bkey *hole) in bch2_get_key_or_real_bucket_hole() argument
1123 *ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode); in bch2_get_key_or_real_bucket_hole()
1128 if (!*ca || !bucket_valid(*ca, hole_start.offset)) { in bch2_get_key_or_real_bucket_hole()
1129 if (!next_bucket(c, ca, &hole_start)) in bch2_get_key_or_real_bucket_hole()
1136 if (k.k->p.offset > (*ca)->mi.nbuckets) in bch2_get_key_or_real_bucket_hole()
1137 bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset); in bch2_get_key_or_real_bucket_hole()
1159 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p); in bch2_check_alloc_key() local
1160 if (fsck_err_on(!ca, in bch2_check_alloc_key()
1165 if (!ca) in bch2_check_alloc_key()
1168 if (!ca->mi.freespace_initialized) in bch2_check_alloc_key()
1237 bch2_dev_put(ca); in bch2_check_alloc_key()
1244 struct bch_dev *ca, in bch2_check_alloc_hole_freespace() argument
1253 if (!ca->mi.freespace_initialized) in bch2_check_alloc_hole_freespace()
1505 struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode); in bch2_check_bucket_gens_key() local
1506 if (!ca) { in bch2_check_bucket_gens_key()
1514 if (fsck_err_on(end <= ca->mi.first_bucket || in bch2_check_bucket_gens_key()
1515 start >= ca->mi.nbuckets, in bch2_check_bucket_gens_key()
1523 for (b = start; b < ca->mi.first_bucket; b++) in bch2_check_bucket_gens_key()
1531 for (b = ca->mi.nbuckets; b < end; b++) in bch2_check_bucket_gens_key()
1551 bch2_dev_put(ca); in bch2_check_bucket_gens_key()
1560 struct bch_dev *ca = NULL; in bch2_check_alloc_info() local
1579 k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole); in bch2_check_alloc_info()
1600 ret = bch2_check_alloc_hole_freespace(trans, ca, in bch2_check_alloc_info()
1628 bch2_dev_put(ca); in bch2_check_alloc_info()
1629 ca = NULL; in bch2_check_alloc_info()
1700 struct bch_dev *ca = bch2_dev_tryget_noerror(c, alloc_k.k->p.inode); in bch2_check_alloc_to_lru_ref() local
1701 if (!ca) in bch2_check_alloc_to_lru_ref()
1706 u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca); in bch2_check_alloc_to_lru_ref()
1744 bch2_dev_put(ca); in bch2_check_alloc_to_lru_ref()
1767 static int discard_in_flight_add(struct bch_dev *ca, u64 bucket, bool in_progress) in discard_in_flight_add() argument
1771 mutex_lock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_add()
1772 darray_for_each(ca->discard_buckets_in_flight, i) in discard_in_flight_add()
1778 ret = darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) { in discard_in_flight_add()
1783 mutex_unlock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_add()
1787 static void discard_in_flight_remove(struct bch_dev *ca, u64 bucket) in discard_in_flight_remove() argument
1789 mutex_lock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_remove()
1790 darray_for_each(ca->discard_buckets_in_flight, i) in discard_in_flight_remove()
1793 darray_remove_item(&ca->discard_buckets_in_flight, i); in discard_in_flight_remove()
1798 mutex_unlock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_remove()
1809 struct bch_dev *ca, in bch2_discard_one_bucket() argument
1861 if (discard_in_flight_add(ca, iter.pos.offset, true)) in bch2_discard_one_bucket()
1871 if (ca->mi.discard && !c->opts.nochanges) { in bch2_discard_one_bucket()
1877 blkdev_issue_discard(ca->disk_sb.bdev, in bch2_discard_one_bucket()
1878 k.k->p.offset * ca->mi.bucket_size, in bch2_discard_one_bucket()
1879 ca->mi.bucket_size, in bch2_discard_one_bucket()
1904 discard_in_flight_remove(ca, iter.pos.offset); in bch2_discard_one_bucket()
1914 struct bch_dev *ca = container_of(work, struct bch_dev, discard_work); in bch2_do_discards_work() local
1915 struct bch_fs *c = ca->fs; in bch2_do_discards_work()
1928 POS(ca->dev_idx, 0), in bch2_do_discards_work()
1929 POS(ca->dev_idx, U64_MAX), 0, k, in bch2_do_discards_work()
1930 bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s, false))); in bch2_do_discards_work()
1932 if (s.need_journal_commit > dev_buckets_available(ca, BCH_WATERMARK_normal)) in bch2_do_discards_work()
1938 percpu_ref_put(&ca->io_ref); in bch2_do_discards_work()
1942 void bch2_dev_do_discards(struct bch_dev *ca) in bch2_dev_do_discards() argument
1944 struct bch_fs *c = ca->fs; in bch2_dev_do_discards()
1949 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) in bch2_dev_do_discards()
1952 if (queue_work(c->write_ref_wq, &ca->discard_work)) in bch2_dev_do_discards()
1955 percpu_ref_put(&ca->io_ref); in bch2_dev_do_discards()
1962 for_each_member_device(c, ca) in bch2_do_discards()
1963 bch2_dev_do_discards(ca); in bch2_do_discards()
1967 struct bch_dev *ca, in bch2_do_discards_fast_one() argument
1974 BTREE_ID_need_discard, POS(ca->dev_idx, bucket), 0); in bch2_do_discards_fast_one()
1982 ca->dev_idx, bucket)) in bch2_do_discards_fast_one()
1985 ret = bch2_discard_one_bucket(trans, ca, &need_discard_iter, discard_pos_done, s, true); in bch2_do_discards_fast_one()
1994 struct bch_dev *ca = container_of(work, struct bch_dev, discard_fast_work); in bch2_do_discards_fast_work() local
1995 struct bch_fs *c = ca->fs; in bch2_do_discards_fast_work()
2005 mutex_lock(&ca->discard_buckets_in_flight_lock); in bch2_do_discards_fast_work()
2006 darray_for_each(ca->discard_buckets_in_flight, i) { in bch2_do_discards_fast_work()
2015 mutex_unlock(&ca->discard_buckets_in_flight_lock); in bch2_do_discards_fast_work()
2021 bch2_do_discards_fast_one(trans, ca, bucket, &discard_pos_done, &s)); in bch2_do_discards_fast_work()
2024 discard_in_flight_remove(ca, bucket); in bch2_do_discards_fast_work()
2033 percpu_ref_put(&ca->io_ref); in bch2_do_discards_fast_work()
2037 static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket) in bch2_discard_one_bucket_fast() argument
2039 struct bch_fs *c = ca->fs; in bch2_discard_one_bucket_fast()
2041 if (discard_in_flight_add(ca, bucket, false)) in bch2_discard_one_bucket_fast()
2047 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) in bch2_discard_one_bucket_fast()
2050 if (queue_work(c->write_ref_wq, &ca->discard_fast_work)) in bch2_discard_one_bucket_fast()
2053 percpu_ref_put(&ca->io_ref); in bch2_discard_one_bucket_fast()
2125 struct bch_dev *ca, bool *wrapped) in next_lru_key() argument
2129 k = bch2_btree_iter_peek_max(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); in next_lru_key()
2131 bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0)); in next_lru_key()
2141 struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work); in bch2_do_invalidates_work() local
2142 struct bch_fs *c = ca->fs; in bch2_do_invalidates_work()
2151 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)); in bch2_do_invalidates_work()
2156 lru_pos(ca->dev_idx, 0, in bch2_do_invalidates_work()
2163 struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped); in bch2_do_invalidates_work()
2182 percpu_ref_put(&ca->io_ref); in bch2_do_invalidates_work()
2186 void bch2_dev_do_invalidates(struct bch_dev *ca) in bch2_dev_do_invalidates() argument
2188 struct bch_fs *c = ca->fs; in bch2_dev_do_invalidates()
2193 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) in bch2_dev_do_invalidates()
2196 if (queue_work(c->write_ref_wq, &ca->invalidate_work)) in bch2_dev_do_invalidates()
2199 percpu_ref_put(&ca->io_ref); in bch2_dev_do_invalidates()
2206 for_each_member_device(c, ca) in bch2_do_invalidates()
2207 bch2_dev_do_invalidates(ca); in bch2_do_invalidates()
2210 int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, in bch2_dev_freespace_init() argument
2217 struct bpos end = POS(ca->dev_idx, bucket_end); in bch2_dev_freespace_init()
2223 BUG_ON(bucket_end > ca->mi.nbuckets); in bch2_dev_freespace_init()
2226 POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)), in bch2_dev_freespace_init()
2229 * Scan the alloc btree for every bucket on @ca, and add buckets to the in bch2_dev_freespace_init()
2234 bch_info(ca, "%s: currently at %llu/%llu", in bch2_dev_freespace_init()
2235 __func__, iter.pos.offset, ca->mi.nbuckets); in bch2_dev_freespace_init()
2259 ret = bch2_bucket_do_index(trans, ca, k, a, true) ?: in bch2_dev_freespace_init()
2298 bch_err_msg(ca, ret, "initializing free space"); in bch2_dev_freespace_init()
2303 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); in bch2_dev_freespace_init()
2320 for_each_member_device(c, ca) { in bch2_fs_freespace_init()
2321 if (ca->mi.freespace_initialized) in bch2_fs_freespace_init()
2329 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets); in bch2_fs_freespace_init()
2331 bch2_dev_put(ca); in bch2_fs_freespace_init()
2349 int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_remove_alloc() argument
2351 struct bpos start = POS(ca->dev_idx, 0); in bch2_dev_remove_alloc()
2352 struct bpos end = POS(ca->dev_idx, U64_MAX); in bch2_dev_remove_alloc()
2359 ret = bch2_dev_remove_stripes(c, ca->dev_idx) ?: in bch2_dev_remove_alloc()
2372 bch2_dev_usage_remove(c, ca->dev_idx); in bch2_dev_remove_alloc()
2373 bch_err_msg(ca, ret, "removing dev alloc info"); in bch2_dev_remove_alloc()
2423 for_each_online_member(c, ca) { in bch2_recalc_capacity()
2424 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi; in bch2_recalc_capacity()
2431 for_each_rw_member(c, ca) { in bch2_recalc_capacity()
2451 dev_reserve += ca->nr_btree_reserve * 2; in bch2_recalc_capacity()
2452 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */ in bch2_recalc_capacity()
2458 dev_reserve *= ca->mi.bucket_size; in bch2_recalc_capacity()
2460 capacity += bucket_to_sector(ca, ca->mi.nbuckets - in bch2_recalc_capacity()
2461 ca->mi.first_bucket); in bch2_recalc_capacity()
2466 ca->mi.bucket_size); in bch2_recalc_capacity()
2490 for_each_rw_member(c, ca) in bch2_min_rw_member_capacity()
2491 ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size); in bch2_min_rw_member_capacity()
2495 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_has_open_write_point() argument
2505 ob->dev == ca->dev_idx) in bch2_dev_has_open_write_point()
2514 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_allocator_remove() argument
2521 clear_bit(ca->dev_idx, c->rw_devs[i].d); in bch2_dev_allocator_remove()
2530 bch2_open_buckets_stop(c, ca, false); in bch2_dev_allocator_remove()
2547 !bch2_dev_has_open_write_point(c, ca)); in bch2_dev_allocator_remove()
2551 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_allocator_add() argument
2556 if (ca->mi.data_allowed & (1 << i)) in bch2_dev_allocator_add()
2557 set_bit(ca->dev_idx, c->rw_devs[i].d); in bch2_dev_allocator_add()
2562 void bch2_dev_allocator_background_exit(struct bch_dev *ca) in bch2_dev_allocator_background_exit() argument
2564 darray_exit(&ca->discard_buckets_in_flight); in bch2_dev_allocator_background_exit()
2567 void bch2_dev_allocator_background_init(struct bch_dev *ca) in bch2_dev_allocator_background_init() argument
2569 mutex_init(&ca->discard_buckets_in_flight_lock); in bch2_dev_allocator_background_init()
2570 INIT_WORK(&ca->discard_work, bch2_do_discards_work); in bch2_dev_allocator_background_init()
2571 INIT_WORK(&ca->discard_fast_work, bch2_do_discards_fast_work); in bch2_dev_allocator_background_init()
2572 INIT_WORK(&ca->invalidate_work, bch2_do_invalidates_work); in bch2_dev_allocator_background_init()