Lines Matching full:b

40 	if (!c->btree_roots_known[0].b)  in bch2_recalc_btree_reserve()
46 if (r->b) in bch2_recalc_btree_reserve()
47 reserve += min_t(unsigned, 1, r->b->c.level) * 8; in bch2_recalc_btree_reserve()
63 static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b) in btree_node_to_freedlist() argument
65 BUG_ON(!list_empty(&b->list)); in btree_node_to_freedlist()
67 if (b->c.lock.readers) in btree_node_to_freedlist()
68 list_add(&b->list, &bc->freed_pcpu); in btree_node_to_freedlist()
70 list_add(&b->list, &bc->freed_nonpcpu); in btree_node_to_freedlist()
73 static void __bch2_btree_node_to_freelist(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_to_freelist() argument
75 BUG_ON(!list_empty(&b->list)); in __bch2_btree_node_to_freelist()
76 BUG_ON(!b->data); in __bch2_btree_node_to_freelist()
79 list_add(&b->list, &bc->freeable); in __bch2_btree_node_to_freelist()
82 void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b) in bch2_btree_node_to_freelist() argument
87 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_to_freelist()
90 six_unlock_write(&b->c.lock); in bch2_btree_node_to_freelist()
91 six_unlock_intent(&b->c.lock); in bch2_btree_node_to_freelist()
94 static void __btree_node_data_free(struct btree_cache *bc, struct btree *b) in __btree_node_data_free() argument
96 BUG_ON(!list_empty(&b->list)); in __btree_node_data_free()
97 BUG_ON(btree_node_hashed(b)); in __btree_node_data_free()
104 if (b->data) in __btree_node_data_free()
105 mm_account_reclaimed_pages(btree_buf_bytes(b) / PAGE_SIZE); in __btree_node_data_free()
106 if (b->aux_data) in __btree_node_data_free()
107 mm_account_reclaimed_pages(btree_aux_data_bytes(b) / PAGE_SIZE); in __btree_node_data_free()
109 EBUG_ON(btree_node_write_in_flight(b)); in __btree_node_data_free()
111 clear_btree_node_just_written(b); in __btree_node_data_free()
113 kvfree(b->data); in __btree_node_data_free()
114 b->data = NULL; in __btree_node_data_free()
116 kvfree(b->aux_data); in __btree_node_data_free()
118 munmap(b->aux_data, btree_aux_data_bytes(b)); in __btree_node_data_free()
120 b->aux_data = NULL; in __btree_node_data_free()
122 btree_node_to_freedlist(bc, b); in __btree_node_data_free()
125 static void btree_node_data_free(struct btree_cache *bc, struct btree *b) in btree_node_data_free() argument
127 BUG_ON(list_empty(&b->list)); in btree_node_data_free()
128 list_del_init(&b->list); in btree_node_data_free()
130 __btree_node_data_free(bc, b); in btree_node_data_free()
136 const struct btree *b = obj; in bch2_btree_cache_cmp_fn() local
139 return b->hash_val == *v ? 0 : 1; in bch2_btree_cache_cmp_fn()
150 static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp) in btree_node_data_alloc() argument
152 BUG_ON(b->data || b->aux_data); in btree_node_data_alloc()
156 b->data = kvmalloc(btree_buf_bytes(b), gfp); in btree_node_data_alloc()
157 if (!b->data) in btree_node_data_alloc()
160 b->aux_data = kvmalloc(btree_aux_data_bytes(b), gfp); in btree_node_data_alloc()
162 b->aux_data = mmap(NULL, btree_aux_data_bytes(b), in btree_node_data_alloc()
165 if (b->aux_data == MAP_FAILED) in btree_node_data_alloc()
166 b->aux_data = NULL; in btree_node_data_alloc()
168 if (!b->aux_data) { in btree_node_data_alloc()
169 kvfree(b->data); in btree_node_data_alloc()
170 b->data = NULL; in btree_node_data_alloc()
179 struct btree *b; in __btree_node_mem_alloc() local
181 b = kzalloc(sizeof(struct btree), gfp); in __btree_node_mem_alloc()
182 if (!b) in __btree_node_mem_alloc()
185 bkey_btree_ptr_init(&b->key); in __btree_node_mem_alloc()
186 INIT_LIST_HEAD(&b->list); in __btree_node_mem_alloc()
187 INIT_LIST_HEAD(&b->write_blocked); in __btree_node_mem_alloc()
188 b->byte_order = ilog2(c->opts.btree_node_size); in __btree_node_mem_alloc()
189 return b; in __btree_node_mem_alloc()
195 struct btree *b; in __bch2_btree_node_mem_alloc() local
197 b = __btree_node_mem_alloc(c, GFP_KERNEL); in __bch2_btree_node_mem_alloc()
198 if (!b) in __bch2_btree_node_mem_alloc()
201 if (btree_node_data_alloc(c, b, GFP_KERNEL)) { in __bch2_btree_node_mem_alloc()
202 kfree(b); in __bch2_btree_node_mem_alloc()
206 bch2_btree_lock_init(&b->c, 0, GFP_KERNEL); in __bch2_btree_node_mem_alloc()
208 __bch2_btree_node_to_freelist(bc, b); in __bch2_btree_node_mem_alloc()
209 return b; in __bch2_btree_node_mem_alloc()
212 static inline bool __btree_node_pinned(struct btree_cache *bc, struct btree *b) in __btree_node_pinned() argument
214 struct bbpos pos = BBPOS(b->c.btree_id, b->key.k.p); in __btree_node_pinned()
216 u64 mask = bc->pinned_nodes_mask[!!b->c.level]; in __btree_node_pinned()
218 return ((mask & BIT_ULL(b->c.btree_id)) && in __btree_node_pinned()
223 void bch2_node_pin(struct bch_fs *c, struct btree *b) in bch2_node_pin() argument
228 if (b != btree_node_root(c, b) && !btree_node_pinned(b)) { in bch2_node_pin()
229 set_btree_node_pinned(b); in bch2_node_pin()
230 list_move(&b->list, &bc->live[1].list); in bch2_node_pin()
240 struct btree *b, *n; in bch2_btree_cache_unpin() local
246 list_for_each_entry_safe(b, n, &bc->live[1].list, list) { in bch2_btree_cache_unpin()
247 clear_btree_node_pinned(b); in bch2_btree_cache_unpin()
248 list_move(&b->list, &bc->live[0].list); in bch2_btree_cache_unpin()
258 void __bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_hash_remove() argument
262 int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params); in __bch2_btree_node_hash_remove()
266 b->hash_val = 0; in __bch2_btree_node_hash_remove()
268 if (b->c.btree_id < BTREE_ID_NR) in __bch2_btree_node_hash_remove()
269 --bc->nr_by_btree[b->c.btree_id]; in __bch2_btree_node_hash_remove()
270 --bc->live[btree_node_pinned(b)].nr; in __bch2_btree_node_hash_remove()
271 list_del_init(&b->list); in __bch2_btree_node_hash_remove()
274 void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) in bch2_btree_node_hash_remove() argument
276 __bch2_btree_node_hash_remove(bc, b); in bch2_btree_node_hash_remove()
277 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_hash_remove()
280 int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_hash_insert() argument
282 BUG_ON(!list_empty(&b->list)); in __bch2_btree_node_hash_insert()
283 BUG_ON(b->hash_val); in __bch2_btree_node_hash_insert()
285 b->hash_val = btree_ptr_hash_val(&b->key); in __bch2_btree_node_hash_insert()
286 int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash, in __bch2_btree_node_hash_insert()
291 if (b->c.btree_id < BTREE_ID_NR) in __bch2_btree_node_hash_insert()
292 bc->nr_by_btree[b->c.btree_id]++; in __bch2_btree_node_hash_insert()
294 bool p = __btree_node_pinned(bc, b); in __bch2_btree_node_hash_insert()
295 mod_bit(BTREE_NODE_pinned, &b->flags, p); in __bch2_btree_node_hash_insert()
297 list_add_tail(&b->list, &bc->live[p].list); in __bch2_btree_node_hash_insert()
302 int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b, in bch2_btree_node_hash_insert() argument
305 b->c.level = level; in bch2_btree_node_hash_insert()
306 b->c.btree_id = id; in bch2_btree_node_hash_insert()
309 int ret = __bch2_btree_node_hash_insert(bc, b); in bch2_btree_node_hash_insert()
320 struct btree *b; in bch2_btree_node_update_key_early() local
327 b = bch2_btree_node_get_noiter(trans, tmp.k, btree, level, true); in bch2_btree_node_update_key_early()
328 if (!IS_ERR_OR_NULL(b)) { in bch2_btree_node_update_key_early()
331 __bch2_btree_node_hash_remove(&c->btree_cache, b); in bch2_btree_node_update_key_early()
333 bkey_copy(&b->key, new); in bch2_btree_node_update_key_early()
334 ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); in bch2_btree_node_update_key_early()
338 six_unlock_read(&b->c.lock); in bch2_btree_node_update_key_early()
357 static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush, bool shrinker_counte… in __btree_node_reclaim() argument
364 if (b->flags & ((1U << BTREE_NODE_dirty)| in __btree_node_reclaim()
368 if (btree_node_dirty(b)) in __btree_node_reclaim()
370 else if (btree_node_read_in_flight(b)) in __btree_node_reclaim()
372 else if (btree_node_write_in_flight(b)) in __btree_node_reclaim()
378 bch2_btree_node_wait_on_read(b); in __btree_node_reclaim()
379 bch2_btree_node_wait_on_write(b); in __btree_node_reclaim()
382 if (!six_trylock_intent(&b->c.lock)) { in __btree_node_reclaim()
387 if (!six_trylock_write(&b->c.lock)) { in __btree_node_reclaim()
393 if (b->flags & ((1U << BTREE_NODE_read_in_flight)| in __btree_node_reclaim()
396 if (btree_node_read_in_flight(b)) in __btree_node_reclaim()
398 else if (btree_node_write_in_flight(b)) in __btree_node_reclaim()
402 six_unlock_write(&b->c.lock); in __btree_node_reclaim()
403 six_unlock_intent(&b->c.lock); in __btree_node_reclaim()
407 if (btree_node_noevict(b)) { in __btree_node_reclaim()
411 if (btree_node_write_blocked(b)) { in __btree_node_reclaim()
415 if (btree_node_will_make_reachable(b)) { in __btree_node_reclaim()
420 if (btree_node_dirty(b)) { in __btree_node_reclaim()
432 bch2_btree_node_write(c, b, SIX_LOCK_intent, in __btree_node_reclaim()
435 __bch2_btree_node_write(c, b, in __btree_node_reclaim()
438 six_unlock_write(&b->c.lock); in __btree_node_reclaim()
439 six_unlock_intent(&b->c.lock); in __btree_node_reclaim()
443 if (b->hash_val && !ret) in __btree_node_reclaim()
444 trace_and_count(c, btree_cache_reap, c, b); in __btree_node_reclaim()
447 six_unlock_write(&b->c.lock); in __btree_node_reclaim()
449 six_unlock_intent(&b->c.lock); in __btree_node_reclaim()
454 static int btree_node_reclaim(struct bch_fs *c, struct btree *b, bool shrinker_counter) in btree_node_reclaim() argument
456 return __btree_node_reclaim(c, b, false, shrinker_counter); in btree_node_reclaim()
459 static int btree_node_write_and_reclaim(struct bch_fs *c, struct btree *b) in btree_node_write_and_reclaim() argument
461 return __btree_node_reclaim(c, b, true, false); in btree_node_write_and_reclaim()
470 struct btree *b, *t; in bch2_btree_cache_scan() local
496 list_for_each_entry_safe(b, t, &bc->freeable, list) { in bch2_btree_cache_scan()
509 if (!btree_node_reclaim(c, b, true)) { in bch2_btree_cache_scan()
510 btree_node_data_free(bc, b); in bch2_btree_cache_scan()
511 six_unlock_write(&b->c.lock); in bch2_btree_cache_scan()
512 six_unlock_intent(&b->c.lock); in bch2_btree_cache_scan()
518 list_for_each_entry_safe(b, t, &list->list, list) { in bch2_btree_cache_scan()
521 if (btree_node_accessed(b)) { in bch2_btree_cache_scan()
522 clear_btree_node_accessed(b); in bch2_btree_cache_scan()
525 } else if (!btree_node_reclaim(c, b, true)) { in bch2_btree_cache_scan()
526 __bch2_btree_node_hash_remove(bc, b); in bch2_btree_cache_scan()
527 __btree_node_data_free(bc, b); in bch2_btree_cache_scan()
532 six_unlock_write(&b->c.lock); in bch2_btree_cache_scan()
533 six_unlock_intent(&b->c.lock); in bch2_btree_cache_scan()
538 btree_node_dirty(b) && in bch2_btree_cache_scan()
539 !btree_node_will_make_reachable(b) && in bch2_btree_cache_scan()
540 !btree_node_write_blocked(b) && in bch2_btree_cache_scan()
541 six_trylock_read(&b->c.lock)) { in bch2_btree_cache_scan()
542 list_move(&list->list, &b->list); in bch2_btree_cache_scan()
544 __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim); in bch2_btree_cache_scan()
545 six_unlock_read(&b->c.lock); in bch2_btree_cache_scan()
581 struct btree *b, *t; in bch2_fs_btree_cache_exit() local
599 if (r->b) in bch2_fs_btree_cache_exit()
600 list_add(&r->b->list, &bc->live[0].list); in bch2_fs_btree_cache_exit()
603 list_for_each_entry_safe(b, t, &bc->live[1].list, list) in bch2_fs_btree_cache_exit()
604 bch2_btree_node_hash_remove(bc, b); in bch2_fs_btree_cache_exit()
605 list_for_each_entry_safe(b, t, &bc->live[0].list, list) in bch2_fs_btree_cache_exit()
606 bch2_btree_node_hash_remove(bc, b); in bch2_fs_btree_cache_exit()
608 list_for_each_entry_safe(b, t, &bc->freeable, list) { in bch2_fs_btree_cache_exit()
609 BUG_ON(btree_node_read_in_flight(b) || in bch2_fs_btree_cache_exit()
610 btree_node_write_in_flight(b)); in bch2_fs_btree_cache_exit()
612 btree_node_data_free(bc, b); in bch2_fs_btree_cache_exit()
620 list_for_each_entry_safe(b, t, &bc->freed_nonpcpu, list) { in bch2_fs_btree_cache_exit()
621 list_del(&b->list); in bch2_fs_btree_cache_exit()
622 six_lock_exit(&b->c.lock); in bch2_fs_btree_cache_exit()
623 kfree(b); in bch2_fs_btree_cache_exit()
753 struct btree *b; in btree_node_cannibalize() local
756 list_for_each_entry_reverse(b, &bc->live[i].list, list) in btree_node_cannibalize()
757 if (!btree_node_reclaim(c, b, false)) in btree_node_cannibalize()
758 return b; in btree_node_cannibalize()
762 list_for_each_entry_reverse(b, &bc->live[i].list, list) in btree_node_cannibalize()
763 if (!btree_node_write_and_reclaim(c, b)) in btree_node_cannibalize()
764 return b; in btree_node_cannibalize()
782 struct btree *b, *b2; in bch2_btree_node_mem_alloc() local
791 list_for_each_entry(b, freed, list) in bch2_btree_node_mem_alloc()
792 if (!btree_node_reclaim(c, b, false)) { in bch2_btree_node_mem_alloc()
793 list_del_init(&b->list); in bch2_btree_node_mem_alloc()
797 b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN); in bch2_btree_node_mem_alloc()
798 if (b) { in bch2_btree_node_mem_alloc()
799 bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0, GFP_NOWAIT); in bch2_btree_node_mem_alloc()
803 b = __btree_node_mem_alloc(c, GFP_KERNEL); in bch2_btree_node_mem_alloc()
804 if (!b) in bch2_btree_node_mem_alloc()
806 bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0, GFP_KERNEL); in bch2_btree_node_mem_alloc()
810 BUG_ON(!six_trylock_intent(&b->c.lock)); in bch2_btree_node_mem_alloc()
811 BUG_ON(!six_trylock_write(&b->c.lock)); in bch2_btree_node_mem_alloc()
820 swap(b->data, b2->data); in bch2_btree_node_mem_alloc()
821 swap(b->aux_data, b2->aux_data); in bch2_btree_node_mem_alloc()
835 if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) { in bch2_btree_node_mem_alloc()
837 if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN)) in bch2_btree_node_mem_alloc()
842 BUG_ON(!list_empty(&b->list)); in bch2_btree_node_mem_alloc()
843 BUG_ON(btree_node_hashed(b)); in bch2_btree_node_mem_alloc()
844 BUG_ON(btree_node_dirty(b)); in bch2_btree_node_mem_alloc()
845 BUG_ON(btree_node_write_in_flight(b)); in bch2_btree_node_mem_alloc()
847 b->flags = 0; in bch2_btree_node_mem_alloc()
848 b->written = 0; in bch2_btree_node_mem_alloc()
849 b->nsets = 0; in bch2_btree_node_mem_alloc()
850 b->sib_u64s[0] = 0; in bch2_btree_node_mem_alloc()
851 b->sib_u64s[1] = 0; in bch2_btree_node_mem_alloc()
852 b->whiteout_u64s = 0; in bch2_btree_node_mem_alloc()
853 bch2_btree_keys_init(b); in bch2_btree_node_mem_alloc()
854 set_btree_node_accessed(b); in bch2_btree_node_mem_alloc()
861 bch2_btree_node_to_freelist(c, b); in bch2_btree_node_mem_alloc()
865 return b; in bch2_btree_node_mem_alloc()
875 if (b) { in bch2_btree_node_mem_alloc()
876 swap(b->data, b2->data); in bch2_btree_node_mem_alloc()
877 swap(b->aux_data, b2->aux_data); in bch2_btree_node_mem_alloc()
882 b = b2; in bch2_btree_node_mem_alloc()
885 BUG_ON(!list_empty(&b->list)); in bch2_btree_node_mem_alloc()
907 struct btree *b; in bch2_btree_node_fill() local
942 b = bch2_btree_node_mem_alloc(trans, level != 0); in bch2_btree_node_fill()
944 if (bch2_err_matches(PTR_ERR_OR_ZERO(b), ENOMEM)) { in bch2_btree_node_fill()
946 return b; in bch2_btree_node_fill()
953 if (IS_ERR(b)) in bch2_btree_node_fill()
954 return b; in bch2_btree_node_fill()
956 bkey_copy(&b->key, k); in bch2_btree_node_fill()
957 if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) { in bch2_btree_node_fill()
961 b->hash_val = 0; in bch2_btree_node_fill()
964 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_fill()
967 six_unlock_write(&b->c.lock); in bch2_btree_node_fill()
968 six_unlock_intent(&b->c.lock); in bch2_btree_node_fill()
972 set_btree_node_read_in_flight(b); in bch2_btree_node_fill()
973 six_unlock_write(&b->c.lock); in bch2_btree_node_fill()
976 u32 seq = six_lock_seq(&b->c.lock); in bch2_btree_node_fill()
979 six_unlock_intent(&b->c.lock); in bch2_btree_node_fill()
982 bch2_btree_node_read(trans, b, sync); in bch2_btree_node_fill()
991 if (!six_relock_type(&b->c.lock, lock_type, seq)) in bch2_btree_node_fill()
992 b = NULL; in bch2_btree_node_fill()
994 bch2_btree_node_read(trans, b, sync); in bch2_btree_node_fill()
996 six_lock_downgrade(&b->c.lock); in bch2_btree_node_fill()
999 return b; in bch2_btree_node_fill()
1002 static noinline void btree_bad_header(struct bch_fs *c, struct btree *b) in btree_bad_header() argument
1011 bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); in btree_bad_header()
1013 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); in btree_bad_header()
1016 bch2_btree_id_level_to_text(&buf, BTREE_NODE_ID(b->data), BTREE_NODE_LEVEL(b->data)); in btree_bad_header()
1018 bch2_bpos_to_text(&buf, b->data->min_key); in btree_bad_header()
1021 bch2_bpos_to_text(&buf, b->data->max_key); in btree_bad_header()
1028 static inline void btree_check_header(struct bch_fs *c, struct btree *b) in btree_check_header() argument
1030 if (b->c.btree_id != BTREE_NODE_ID(b->data) || in btree_check_header()
1031 b->c.level != BTREE_NODE_LEVEL(b->data) || in btree_check_header()
1032 !bpos_eq(b->data->max_key, b->key.k.p) || in btree_check_header()
1033 (b->key.k.type == KEY_TYPE_btree_ptr_v2 && in btree_check_header()
1034 !bpos_eq(b->data->min_key, in btree_check_header()
1035 bkey_i_to_btree_ptr_v2(&b->key)->v.min_key))) in btree_check_header()
1036 btree_bad_header(c, b); in btree_check_header()
1046 struct btree *b; in __bch2_btree_node_get() local
1052 b = btree_cache_find(bc, k); in __bch2_btree_node_get()
1053 if (unlikely(!b)) { in __bch2_btree_node_get()
1059 b = bch2_btree_node_fill(trans, path, k, path->btree_id, in __bch2_btree_node_get()
1064 if (!b) in __bch2_btree_node_get()
1067 if (IS_ERR(b)) in __bch2_btree_node_get()
1068 return b; in __bch2_btree_node_get()
1073 ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip); in __bch2_btree_node_get()
1079 if (unlikely(b->hash_val != btree_ptr_hash_val(k) || in __bch2_btree_node_get()
1080 b->c.level != level || in __bch2_btree_node_get()
1082 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get()
1091 if (!btree_node_accessed(b)) in __bch2_btree_node_get()
1092 set_btree_node_accessed(b); in __bch2_btree_node_get()
1095 if (unlikely(btree_node_read_in_flight(b))) { in __bch2_btree_node_get()
1096 u32 seq = six_lock_seq(&b->c.lock); in __bch2_btree_node_get()
1098 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get()
1102 bch2_btree_node_wait_on_read(b); in __bch2_btree_node_get()
1112 if (!six_relock_type(&b->c.lock, lock_type, seq)) in __bch2_btree_node_get()
1120 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get()
1125 prefetch(b->aux_data); in __bch2_btree_node_get()
1127 for_each_bset(b, t) { in __bch2_btree_node_get()
1128 void *p = (u64 *) b->aux_data + t->aux_data_offset; in __bch2_btree_node_get()
1135 if (unlikely(btree_node_read_error(b))) { in __bch2_btree_node_get()
1136 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get()
1140 EBUG_ON(b->c.btree_id != path->btree_id); in __bch2_btree_node_get()
1141 EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); in __bch2_btree_node_get()
1142 btree_check_header(c, b); in __bch2_btree_node_get()
1144 return b; in __bch2_btree_node_get()
1169 struct btree *b; in bch2_btree_node_get() local
1174 b = btree_node_mem_ptr(k); in bch2_btree_node_get()
1177 * Check b->hash_val _before_ calling btree_node_lock() - this might not in bch2_btree_node_get()
1182 !b || in bch2_btree_node_get()
1183 b->hash_val != btree_ptr_hash_val(k))) in bch2_btree_node_get()
1189 ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip); in bch2_btree_node_get()
1195 if (unlikely(b->hash_val != btree_ptr_hash_val(k) || in bch2_btree_node_get()
1196 b->c.level != level || in bch2_btree_node_get()
1198 six_unlock_type(&b->c.lock, lock_type); in bch2_btree_node_get()
1206 if (unlikely(btree_node_read_in_flight(b))) { in bch2_btree_node_get()
1207 six_unlock_type(&b->c.lock, lock_type); in bch2_btree_node_get()
1211 prefetch(b->aux_data); in bch2_btree_node_get()
1213 for_each_bset(b, t) { in bch2_btree_node_get()
1214 void *p = (u64 *) b->aux_data + t->aux_data_offset; in bch2_btree_node_get()
1222 if (!btree_node_accessed(b)) in bch2_btree_node_get()
1223 set_btree_node_accessed(b); in bch2_btree_node_get()
1225 if (unlikely(btree_node_read_error(b))) { in bch2_btree_node_get()
1226 six_unlock_type(&b->c.lock, lock_type); in bch2_btree_node_get()
1230 EBUG_ON(b->c.btree_id != path->btree_id); in bch2_btree_node_get()
1231 EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); in bch2_btree_node_get()
1232 btree_check_header(c, b); in bch2_btree_node_get()
1234 return b; in bch2_btree_node_get()
1245 struct btree *b; in bch2_btree_node_get_noiter() local
1251 b = btree_node_mem_ptr(k); in bch2_btree_node_get_noiter()
1252 if (b) in bch2_btree_node_get_noiter()
1256 b = btree_cache_find(bc, k); in bch2_btree_node_get_noiter()
1257 if (unlikely(!b)) { in bch2_btree_node_get_noiter()
1261 b = bch2_btree_node_fill(trans, NULL, k, btree_id, in bch2_btree_node_get_noiter()
1265 if (!b) in bch2_btree_node_get_noiter()
1268 if (IS_ERR(b) && in bch2_btree_node_get_noiter()
1272 if (IS_ERR(b)) in bch2_btree_node_get_noiter()
1276 ret = btree_node_lock_nopath(trans, &b->c, SIX_LOCK_read, _THIS_IP_); in bch2_btree_node_get_noiter()
1282 if (unlikely(b->hash_val != btree_ptr_hash_val(k) || in bch2_btree_node_get_noiter()
1283 b->c.btree_id != btree_id || in bch2_btree_node_get_noiter()
1284 b->c.level != level)) { in bch2_btree_node_get_noiter()
1285 six_unlock_read(&b->c.lock); in bch2_btree_node_get_noiter()
1291 __bch2_btree_node_wait_on_read(b); in bch2_btree_node_get_noiter()
1293 prefetch(b->aux_data); in bch2_btree_node_get_noiter()
1295 for_each_bset(b, t) { in bch2_btree_node_get_noiter()
1296 void *p = (u64 *) b->aux_data + t->aux_data_offset; in bch2_btree_node_get_noiter()
1304 if (!btree_node_accessed(b)) in bch2_btree_node_get_noiter()
1305 set_btree_node_accessed(b); in bch2_btree_node_get_noiter()
1307 if (unlikely(btree_node_read_error(b))) { in bch2_btree_node_get_noiter()
1308 six_unlock_read(&b->c.lock); in bch2_btree_node_get_noiter()
1309 b = ERR_PTR(-BCH_ERR_btree_node_read_err_cached); in bch2_btree_node_get_noiter()
1313 EBUG_ON(b->c.btree_id != btree_id); in bch2_btree_node_get_noiter()
1314 EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); in bch2_btree_node_get_noiter()
1315 btree_check_header(c, b); in bch2_btree_node_get_noiter()
1318 return b; in bch2_btree_node_get_noiter()
1332 struct btree *b = btree_cache_find(bc, k); in bch2_btree_node_prefetch() local
1333 if (b) in bch2_btree_node_prefetch()
1336 b = bch2_btree_node_fill(trans, path, k, btree_id, in bch2_btree_node_prefetch()
1338 int ret = PTR_ERR_OR_ZERO(b); in bch2_btree_node_prefetch()
1341 if (b) in bch2_btree_node_prefetch()
1342 six_unlock_read(&b->c.lock); in bch2_btree_node_prefetch()
1350 struct btree *b; in bch2_btree_node_evict() local
1352 b = btree_cache_find(bc, k); in bch2_btree_node_evict()
1353 if (!b) in bch2_btree_node_evict()
1356 BUG_ON(b == btree_node_root(trans->c, b)); in bch2_btree_node_evict()
1363 __bch2_btree_node_wait_on_read(b); in bch2_btree_node_evict()
1364 __bch2_btree_node_wait_on_write(b); in bch2_btree_node_evict()
1366 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); in bch2_btree_node_evict()
1367 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); in bch2_btree_node_evict()
1368 if (unlikely(b->hash_val != btree_ptr_hash_val(k))) in bch2_btree_node_evict()
1371 if (btree_node_dirty(b)) { in bch2_btree_node_evict()
1372 __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim); in bch2_btree_node_evict()
1373 six_unlock_write(&b->c.lock); in bch2_btree_node_evict()
1374 six_unlock_intent(&b->c.lock); in bch2_btree_node_evict()
1378 BUG_ON(btree_node_dirty(b)); in bch2_btree_node_evict()
1381 bch2_btree_node_hash_remove(bc, b); in bch2_btree_node_evict()
1382 btree_node_data_free(bc, b); in bch2_btree_node_evict()
1385 six_unlock_write(&b->c.lock); in bch2_btree_node_evict()
1386 six_unlock_intent(&b->c.lock); in bch2_btree_node_evict()
1424 void bch2_btree_pos_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b) in bch2_btree_pos_to_text() argument
1426 __bch2_btree_pos_to_text(out, c, b->c.btree_id, b->c.level, bkey_i_to_s_c(&b->key)); in bch2_btree_pos_to_text()
1429 void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b) in bch2_btree_node_to_text() argument
1435 bch2_btree_keys_stats(b, &stats); in bch2_btree_node_to_text()
1437 prt_printf(out, "l %u ", b->c.level); in bch2_btree_node_to_text()
1438 bch2_bpos_to_text(out, b->data->min_key); in bch2_btree_node_to_text()
1440 bch2_bpos_to_text(out, b->data->max_key); in bch2_btree_node_to_text()
1443 bch2_val_to_text(out, c, bkey_i_to_s_c(&b->key)); in bch2_btree_node_to_text()
1448 bch2_bkey_format_to_text(out, &b->format); in bch2_btree_node_to_text()
1458 b->unpack_fn_len, in bch2_btree_node_to_text()
1459 b->nr.live_u64s * sizeof(u64), in bch2_btree_node_to_text()
1460 btree_buf_bytes(b) - sizeof(struct btree_node), in bch2_btree_node_to_text()
1461 b->nr.live_u64s * 100 / btree_max_u64s(c), in bch2_btree_node_to_text()
1462 b->sib_u64s[0], in bch2_btree_node_to_text()
1463 b->sib_u64s[1], in bch2_btree_node_to_text()
1465 b->nr.packed_keys, in bch2_btree_node_to_text()
1466 b->nr.unpacked_keys, in bch2_btree_node_to_text()