Lines Matching +full:cache +full:- +full:block

1 // SPDX-License-Identifier: GPL-2.0-only
6 #include "block-map.h"
13 #include "memory-alloc.h"
16 #include "action-manager.h"
17 #include "admin-state.h"
20 #include "data-vio.h"
22 #include "io-submitter.h"
23 #include "physical-zone.h"
24 #include "recovery-journal.h"
25 #include "slab-depot.h"
26 #include "status-codes.h"
30 #include "wait-queue.h"
33 * DOC: Block map eras
35 * The block map era, or maximum age, is used as follows:
37 * Each block map page, when dirty, records the earliest recovery journal block sequence number of
38 * the changes reflected in that dirty block. Sequence numbers are classified into eras: every
39 * @maximum_age sequence numbers, we switch to a new era. Block map pages are assigned to eras
42 * In the current (newest) era, block map pages are not written unless there is cache pressure. In
43 * the next oldest era, each time a new journal block is written 1/@maximum_age of the pages in
121 * For adjusting VDO page cache statistic fields which are only mutated on the logical zone thread.
128 return info->state == PS_DIRTY; in is_dirty()
133 return (info->state == PS_RESIDENT) || (info->state == PS_DIRTY); in is_present()
138 return (info->state == PS_INCOMING) || (info->state == PS_OUTGOING); in is_in_flight()
143 return info->state == PS_INCOMING; in is_incoming()
148 return info->state == PS_OUTGOING; in is_outgoing()
158 struct vdo_page_cache *cache = info->cache; in get_page_buffer() local
160 return &cache->pages[(info - cache->infos) * VDO_BLOCK_SIZE]; in get_page_buffer()
171 vdo_assert_completion_type(&completion->completion, VDO_PAGE_COMPLETION); in page_completion_from_waiter()
176 * initialize_info() - Initialize all page info structures and put them on the free list.
180 static int initialize_info(struct vdo_page_cache *cache) in initialize_info() argument
184 INIT_LIST_HEAD(&cache->free_list); in initialize_info()
185 for (info = cache->infos; info < cache->infos + cache->page_count; info++) { in initialize_info()
188 info->cache = cache; in initialize_info()
189 info->state = PS_FREE; in initialize_info()
190 info->pbn = NO_PAGE; in initialize_info()
192 result = create_metadata_vio(cache->vdo, VIO_TYPE_BLOCK_MAP, in initialize_info()
194 get_page_buffer(info), &info->vio); in initialize_info()
199 info->vio->completion.callback_thread_id = cache->zone->thread_id; in initialize_info()
201 INIT_LIST_HEAD(&info->state_entry); in initialize_info()
202 list_add_tail(&info->state_entry, &cache->free_list); in initialize_info()
203 INIT_LIST_HEAD(&info->lru_entry); in initialize_info()
210 * allocate_cache_components() - Allocate components of the cache which require their own
217 static int __must_check allocate_cache_components(struct vdo_page_cache *cache) in allocate_cache_components() argument
219 u64 size = cache->page_count * (u64) VDO_BLOCK_SIZE; in allocate_cache_components()
222 result = vdo_allocate(cache->page_count, struct page_info, "page infos", in allocate_cache_components()
223 &cache->infos); in allocate_cache_components()
227 result = vdo_allocate_memory(size, VDO_BLOCK_SIZE, "cache pages", &cache->pages); in allocate_cache_components()
231 result = vdo_int_map_create(cache->page_count, &cache->page_map); in allocate_cache_components()
235 return initialize_info(cache); in allocate_cache_components()
239 * assert_on_cache_thread() - Assert that a function has been called on the VDO page cache's
242 static inline void assert_on_cache_thread(struct vdo_page_cache *cache, in assert_on_cache_thread() argument
247 VDO_ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id), in assert_on_cache_thread()
248 "%s() must only be called on cache thread %d, not thread %d", in assert_on_cache_thread()
249 function_name, cache->zone->thread_id, thread_id); in assert_on_cache_thread()
252 /** assert_io_allowed() - Assert that a page cache may issue I/O. */
253 static inline void assert_io_allowed(struct vdo_page_cache *cache) in assert_io_allowed() argument
255 VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state), in assert_io_allowed()
256 "VDO page cache may issue I/O"); in assert_io_allowed()
259 /** report_cache_pressure() - Log and, if enabled, report cache pressure. */
260 static void report_cache_pressure(struct vdo_page_cache *cache) in report_cache_pressure() argument
262 ADD_ONCE(cache->stats.cache_pressure, 1); in report_cache_pressure()
263 if (cache->waiter_count > cache->page_count) { in report_cache_pressure()
264 if ((cache->pressure_report % LOG_INTERVAL) == 0) in report_cache_pressure()
265 vdo_log_info("page cache pressure %u", cache->stats.cache_pressure); in report_cache_pressure()
267 if (++cache->pressure_report >= DISPLAY_INTERVAL) in report_cache_pressure()
268 cache->pressure_report = 0; in report_cache_pressure()
273 * get_page_state_name() - Return the name of a page state.
297 * update_counter() - Update the counter associated with a given state.
303 struct block_map_statistics *stats = &info->cache->stats; in update_counter()
305 switch (info->state) { in update_counter()
307 ADD_ONCE(stats->free_pages, delta); in update_counter()
311 ADD_ONCE(stats->incoming_pages, delta); in update_counter()
315 ADD_ONCE(stats->outgoing_pages, delta); in update_counter()
319 ADD_ONCE(stats->failed_pages, delta); in update_counter()
323 ADD_ONCE(stats->clean_pages, delta); in update_counter()
327 ADD_ONCE(stats->dirty_pages, delta); in update_counter()
335 /** update_lru() - Update the lru information for an active page. */
338 if (info->cache->lru_list.prev != &info->lru_entry) in update_lru()
339 list_move_tail(&info->lru_entry, &info->cache->lru_list); in update_lru()
343 * set_info_state() - Set the state of a page_info and put it on the right list, adjusting
348 if (new_state == info->state) in set_info_state()
351 update_counter(info, -1); in set_info_state()
352 info->state = new_state; in set_info_state()
355 switch (info->state) { in set_info_state()
358 list_move_tail(&info->state_entry, &info->cache->free_list); in set_info_state()
362 list_move_tail(&info->state_entry, &info->cache->outgoing_list); in set_info_state()
369 list_del_init(&info->state_entry); in set_info_state()
373 /** set_info_pbn() - Set the pbn for an info, updating the map as needed. */
376 struct vdo_page_cache *cache = info->cache; in set_info_pbn() local
379 int result = VDO_ASSERT((pbn == NO_PAGE) || (info->pbn == NO_PAGE), in set_info_pbn()
384 if (info->pbn != NO_PAGE) in set_info_pbn()
385 vdo_int_map_remove(cache->page_map, info->pbn); in set_info_pbn()
387 info->pbn = pbn; in set_info_pbn()
390 result = vdo_int_map_put(cache->page_map, pbn, info, true, NULL); in set_info_pbn()
397 /** reset_page_info() - Reset page info to represent an unallocated page. */
402 result = VDO_ASSERT(info->busy == 0, "VDO Page must not be busy"); in reset_page_info()
406 result = VDO_ASSERT(!vdo_waitq_has_waiters(&info->waiting), in reset_page_info()
413 list_del_init(&info->lru_entry); in reset_page_info()
418 * find_free_page() - Find a free page.
422 static struct page_info * __must_check find_free_page(struct vdo_page_cache *cache) in find_free_page() argument
426 info = list_first_entry_or_null(&cache->free_list, struct page_info, in find_free_page()
429 list_del_init(&info->state_entry); in find_free_page()
435 * find_page() - Find the page info (if any) associated with a given pbn.
436 * @pbn: The absolute physical block number of the page.
440 static struct page_info * __must_check find_page(struct vdo_page_cache *cache, in find_page() argument
443 if ((cache->last_found != NULL) && (cache->last_found->pbn == pbn)) in find_page()
444 return cache->last_found; in find_page()
446 cache->last_found = vdo_int_map_get(cache->page_map, pbn); in find_page()
447 return cache->last_found; in find_page()
451 * select_lru_page() - Determine which page is least recently used.
453 * Picks the least recently used from among the non-busy entries at the front of each of the lru
460 static struct page_info * __must_check select_lru_page(struct vdo_page_cache *cache) in select_lru_page() argument
464 list_for_each_entry(info, &cache->lru_list, lru_entry) in select_lru_page()
465 if ((info->busy == 0) && !is_in_flight(info)) in select_lru_page()
474 * complete_with_page() - Helper to complete the VDO Page Completion request successfully.
481 bool available = vdo_page_comp->writable ? is_present(info) : is_valid(info); in complete_with_page()
485 "Requested cache page %llu in state %s is not %s", in complete_with_page()
486 (unsigned long long) info->pbn, in complete_with_page()
487 get_page_state_name(info->state), in complete_with_page()
488 vdo_page_comp->writable ? "present" : "valid"); in complete_with_page()
489 vdo_fail_completion(&vdo_page_comp->completion, VDO_BAD_PAGE); in complete_with_page()
493 vdo_page_comp->info = info; in complete_with_page()
494 vdo_page_comp->ready = true; in complete_with_page()
495 vdo_finish_completion(&vdo_page_comp->completion); in complete_with_page()
499 * complete_waiter_with_error() - Complete a page completion with an error code.
509 vdo_fail_completion(&page_completion_from_waiter(waiter)->completion, *result); in complete_waiter_with_error()
513 * complete_waiter_with_page() - Complete a page completion with a page.
525 * distribute_page_over_waitq() - Complete a waitq of VDO page completions with a page result.
543 info->busy += num_pages; in distribute_page_over_waitq()
550 * set_persistent_error() - Set a persistent error which all requests will receive in the future.
556 static void set_persistent_error(struct vdo_page_cache *cache, const char *context, in set_persistent_error() argument
560 /* If we're already read-only, there's no need to log. */ in set_persistent_error()
561 struct vdo *vdo = cache->vdo; in set_persistent_error()
564 vdo_log_error_strerror(result, "VDO Page Cache persistent error: %s", in set_persistent_error()
569 assert_on_cache_thread(cache, __func__); in set_persistent_error()
571 vdo_waitq_notify_all_waiters(&cache->free_waiters, in set_persistent_error()
573 cache->waiter_count = 0; in set_persistent_error()
575 for (info = cache->infos; info < cache->infos + cache->page_count; info++) { in set_persistent_error()
576 vdo_waitq_notify_all_waiters(&info->waiting, in set_persistent_error()
582 * validate_completed_page() - Check that a page completion which is being freed to the cache
593 result = VDO_ASSERT(completion->ready, "VDO Page completion not ready"); in validate_completed_page()
597 result = VDO_ASSERT(completion->info != NULL, in validate_completed_page()
602 result = VDO_ASSERT(completion->info->pbn == completion->pbn, in validate_completed_page()
607 result = VDO_ASSERT(is_valid(completion->info), in validate_completed_page()
613 result = VDO_ASSERT(completion->writable, in validate_completed_page()
624 if (vdo_is_state_draining(&zone->state) && in check_for_drain_complete()
625 (zone->active_lookups == 0) && in check_for_drain_complete()
626 !vdo_waitq_has_waiters(&zone->flush_waiters) && in check_for_drain_complete()
627 !is_vio_pool_busy(zone->vio_pool) && in check_for_drain_complete()
628 (zone->page_cache.outstanding_reads == 0) && in check_for_drain_complete()
629 (zone->page_cache.outstanding_writes == 0)) { in check_for_drain_complete()
630 vdo_finish_draining_with_result(&zone->state, in check_for_drain_complete()
631 (vdo_is_read_only(zone->block_map->vdo) ? in check_for_drain_complete()
638 vdo_enter_read_only_mode(zone->block_map->vdo, result); in enter_zone_read_only_mode()
641 * We are in read-only mode, so we won't ever write any page out. in enter_zone_read_only_mode()
644 vdo_waitq_init(&zone->flush_waiters); in enter_zone_read_only_mode()
657 enter_zone_read_only_mode(completion->info->cache->zone, result); in validate_completed_page_or_enter_read_only_mode()
662 * handle_load_error() - Handle page load errors.
667 int result = completion->result; in handle_load_error()
668 struct page_info *info = completion->parent; in handle_load_error()
669 struct vdo_page_cache *cache = info->cache; in handle_load_error() local
671 assert_on_cache_thread(cache, __func__); in handle_load_error()
673 vdo_enter_read_only_mode(cache->zone->block_map->vdo, result); in handle_load_error()
674 ADD_ONCE(cache->stats.failed_reads, 1); in handle_load_error()
676 vdo_waitq_notify_all_waiters(&info->waiting, complete_waiter_with_error, &result); in handle_load_error()
681 * ensure that the above work can't cause the page cache to be freed out from under us. in handle_load_error()
683 cache->outstanding_reads--; in handle_load_error()
684 check_for_drain_complete(cache->zone); in handle_load_error()
688 * page_is_loaded() - Callback used when a page has been loaded.
693 struct page_info *info = completion->parent; in page_is_loaded()
694 struct vdo_page_cache *cache = info->cache; in page_is_loaded() local
695 nonce_t nonce = info->cache->zone->block_map->nonce; in page_is_loaded()
699 assert_on_cache_thread(cache, __func__); in page_is_loaded()
702 validity = vdo_validate_block_map_page(page, nonce, info->pbn); in page_is_loaded()
707 (unsigned long long) info->pbn, in page_is_loaded()
715 vdo_format_block_map_page(page, nonce, info->pbn, false); in page_is_loaded()
717 info->recovery_lock = 0; in page_is_loaded()
719 distribute_page_over_waitq(info, &info->waiting); in page_is_loaded()
723 * ensure that the above work can't cause the page cache to be freed out from under us. in page_is_loaded()
725 cache->outstanding_reads--; in page_is_loaded()
726 check_for_drain_complete(cache->zone); in page_is_loaded()
730 * handle_rebuild_read_error() - Handle a read error during a read-only rebuild.
735 struct page_info *info = completion->parent; in handle_rebuild_read_error()
736 struct vdo_page_cache *cache = info->cache; in handle_rebuild_read_error() local
738 assert_on_cache_thread(cache, __func__); in handle_rebuild_read_error()
741 * We are doing a read-only rebuild, so treat this as a successful read in handle_rebuild_read_error()
745 ADD_ONCE(cache->stats.failed_reads, 1); in handle_rebuild_read_error()
753 struct vio *vio = bio->bi_private; in load_cache_page_endio()
754 struct page_info *info = vio->completion.parent; in load_cache_page_endio()
756 continue_vio_after_io(vio, page_is_loaded, info->cache->zone->thread_id); in load_cache_page_endio()
760 * launch_page_load() - Begin the process of loading a page.
769 struct vdo_page_cache *cache = info->cache; in launch_page_load() local
771 assert_io_allowed(cache); in launch_page_load()
777 result = VDO_ASSERT((info->busy == 0), "Page is not busy before loading."); in launch_page_load()
782 cache->outstanding_reads++; in launch_page_load()
783 ADD_ONCE(cache->stats.pages_loaded, 1); in launch_page_load()
784 callback = (cache->rebuilding ? handle_rebuild_read_error : handle_load_error); in launch_page_load()
785 vdo_submit_metadata_vio(info->vio, pbn, load_cache_page_endio, in launch_page_load()
792 /** handle_flush_error() - Handle errors flushing the layer. */
795 struct page_info *info = completion->parent; in handle_flush_error()
798 set_persistent_error(info->cache, "flush failed", completion->result); in handle_flush_error()
804 struct vio *vio = bio->bi_private; in flush_endio()
805 struct page_info *info = vio->completion.parent; in flush_endio()
807 continue_vio_after_io(vio, write_pages, info->cache->zone->thread_id); in flush_endio()
810 /** save_pages() - Attempt to save the outgoing pages by first flushing the layer. */
811 static void save_pages(struct vdo_page_cache *cache) in save_pages() argument
816 if ((cache->pages_in_flush > 0) || (cache->pages_to_flush == 0)) in save_pages()
819 assert_io_allowed(cache); in save_pages()
821 info = list_first_entry(&cache->outgoing_list, struct page_info, state_entry); in save_pages()
823 cache->pages_in_flush = cache->pages_to_flush; in save_pages()
824 cache->pages_to_flush = 0; in save_pages()
825 ADD_ONCE(cache->stats.flush_count, 1); in save_pages()
827 vio = info->vio; in save_pages()
838 * schedule_page_save() - Add a page to the outgoing list of pages waiting to be saved.
844 if (info->busy > 0) { in schedule_page_save()
845 info->write_status = WRITE_STATUS_DEFERRED; in schedule_page_save()
849 info->cache->pages_to_flush++; in schedule_page_save()
850 info->cache->outstanding_writes++; in schedule_page_save()
855 * launch_page_save() - Add a page to outgoing pages waiting to be saved, and then start saving
861 save_pages(info->cache); in launch_page_save()
865 * completion_needs_page() - Determine whether a given vdo_page_completion (as a waiter) is
877 return (page_completion_from_waiter(waiter)->pbn == *pbn); in completion_needs_page()
881 * allocate_free_page() - Allocate a free page to the first completion in the waiting queue, and
889 struct vdo_page_cache *cache = info->cache; in allocate_free_page() local
891 assert_on_cache_thread(cache, __func__); in allocate_free_page()
893 if (!vdo_waitq_has_waiters(&cache->free_waiters)) { in allocate_free_page()
894 if (cache->stats.cache_pressure > 0) { in allocate_free_page()
895 vdo_log_info("page cache pressure relieved"); in allocate_free_page()
896 WRITE_ONCE(cache->stats.cache_pressure, 0); in allocate_free_page()
904 set_persistent_error(cache, "cannot reset page info", result); in allocate_free_page()
908 oldest_waiter = vdo_waitq_get_first_waiter(&cache->free_waiters); in allocate_free_page()
909 pbn = page_completion_from_waiter(oldest_waiter)->pbn; in allocate_free_page()
915 vdo_waitq_dequeue_matching_waiters(&cache->free_waiters, completion_needs_page, in allocate_free_page()
916 &pbn, &info->waiting); in allocate_free_page()
917 cache->waiter_count -= vdo_waitq_num_waiters(&info->waiting); in allocate_free_page()
921 vdo_waitq_notify_all_waiters(&info->waiting, in allocate_free_page()
927 * discard_a_page() - Begin the process of discarding a page.
931 * cache is not big enough.
936 static void discard_a_page(struct vdo_page_cache *cache) in discard_a_page() argument
938 struct page_info *info = select_lru_page(cache); in discard_a_page()
941 report_cache_pressure(cache); in discard_a_page()
953 cache->discard_count++; in discard_a_page()
954 info->write_status = WRITE_STATUS_DISCARD; in discard_a_page()
959 * discard_page_for_completion() - Helper used to trigger a discard so that the completion can get
964 struct vdo_page_cache *cache = vdo_page_comp->cache; in discard_page_for_completion() local
966 cache->waiter_count++; in discard_page_for_completion()
967 vdo_waitq_enqueue_waiter(&cache->free_waiters, &vdo_page_comp->waiter); in discard_page_for_completion()
968 discard_a_page(cache); in discard_page_for_completion()
972 * discard_page_if_needed() - Helper used to trigger a discard if the cache needs another free
974 * @cache: The page cache.
976 static void discard_page_if_needed(struct vdo_page_cache *cache) in discard_page_if_needed() argument
978 if (cache->waiter_count > cache->discard_count) in discard_page_if_needed()
979 discard_a_page(cache); in discard_page_if_needed()
983 * write_has_finished() - Inform the cache that a write has finished (possibly with an error).
990 bool was_discard = (info->write_status == WRITE_STATUS_DISCARD); in write_has_finished()
992 assert_on_cache_thread(info->cache, __func__); in write_has_finished()
993 info->cache->outstanding_writes--; in write_has_finished()
995 info->write_status = WRITE_STATUS_NORMAL; in write_has_finished()
1000 * handle_page_write_error() - Handler for page write errors.
1005 int result = completion->result; in handle_page_write_error()
1006 struct page_info *info = completion->parent; in handle_page_write_error()
1007 struct vdo_page_cache *cache = info->cache; in handle_page_write_error() local
1011 /* If we're already read-only, write failures are to be expected. */ in handle_page_write_error()
1014 "failed to write block map page %llu", in handle_page_write_error()
1015 (unsigned long long) info->pbn); in handle_page_write_error()
1019 ADD_ONCE(cache->stats.failed_writes, 1); in handle_page_write_error()
1020 set_persistent_error(cache, "cannot write page", result); in handle_page_write_error()
1023 discard_page_if_needed(cache); in handle_page_write_error()
1025 check_for_drain_complete(cache->zone); in handle_page_write_error()
1032 struct vio *vio = bio->bi_private; in write_cache_page_endio()
1033 struct page_info *info = vio->completion.parent; in write_cache_page_endio()
1035 continue_vio_after_io(vio, page_is_written_out, info->cache->zone->thread_id); in write_cache_page_endio()
1039 * page_is_written_out() - Callback used when a page has been written out.
1046 struct page_info *info = completion->parent; in page_is_written_out()
1047 struct vdo_page_cache *cache = info->cache; in page_is_written_out() local
1050 if (!page->header.initialized) { in page_is_written_out()
1051 page->header.initialized = true; in page_is_written_out()
1052 vdo_submit_metadata_vio(info->vio, info->pbn, in page_is_written_out()
1060 vdo_release_recovery_journal_block_reference(cache->zone->block_map->journal, in page_is_written_out()
1061 info->recovery_lock, in page_is_written_out()
1063 cache->zone->zone_number); in page_is_written_out()
1064 info->recovery_lock = 0; in page_is_written_out()
1066 reclaimed = (!was_discard || (info->busy > 0) || vdo_waitq_has_waiters(&info->waiting)); in page_is_written_out()
1070 reclamations = distribute_page_over_waitq(info, &info->waiting); in page_is_written_out()
1071 ADD_ONCE(cache->stats.reclaimed, reclamations); in page_is_written_out()
1074 cache->discard_count--; in page_is_written_out()
1077 discard_page_if_needed(cache); in page_is_written_out()
1081 check_for_drain_complete(cache->zone); in page_is_written_out()
1085 * write_pages() - Write the batch of pages which were covered by the layer flush which just
1093 struct vdo_page_cache *cache = ((struct page_info *) flush_completion->parent)->cache; in write_pages() local
1096 * We need to cache these two values on the stack since it is possible for the last in write_pages()
1097 * page info to cause the page cache to get freed. Hence once we launch the last page, in write_pages()
1098 * it may be unsafe to dereference the cache. in write_pages()
1100 bool has_unflushed_pages = (cache->pages_to_flush > 0); in write_pages()
1101 page_count_t pages_in_flush = cache->pages_in_flush; in write_pages()
1103 cache->pages_in_flush = 0; in write_pages()
1104 while (pages_in_flush-- > 0) { in write_pages()
1106 list_first_entry(&cache->outgoing_list, struct page_info, in write_pages()
1109 list_del_init(&info->state_entry); in write_pages()
1110 if (vdo_is_read_only(info->cache->vdo)) { in write_pages()
1111 struct vdo_completion *completion = &info->vio->completion; in write_pages()
1114 completion->callback = page_is_written_out; in write_pages()
1115 completion->error_handler = handle_page_write_error; in write_pages()
1119 ADD_ONCE(info->cache->stats.pages_saved, 1); in write_pages()
1120 vdo_submit_metadata_vio(info->vio, info->pbn, write_cache_page_endio, in write_pages()
1126 * If there are unflushed pages, the cache can't have been freed, so this call is in write_pages()
1129 save_pages(cache); in write_pages()
1134 * vdo_release_page_completion() - Release a VDO Page Completion.
1144 struct vdo_page_cache *cache; in vdo_release_page_completion() local
1146 if (completion->result == VDO_SUCCESS) { in vdo_release_page_completion()
1150 if (--page_completion->info->busy == 0) in vdo_release_page_completion()
1151 discard_info = page_completion->info; in vdo_release_page_completion()
1154 VDO_ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL), in vdo_release_page_completion()
1157 page_completion->info = NULL; in vdo_release_page_completion()
1158 cache = page_completion->cache; in vdo_release_page_completion()
1159 assert_on_cache_thread(cache, __func__); in vdo_release_page_completion()
1162 if (discard_info->write_status == WRITE_STATUS_DEFERRED) { in vdo_release_page_completion()
1163 discard_info->write_status = WRITE_STATUS_NORMAL; in vdo_release_page_completion()
1171 discard_page_if_needed(cache); in vdo_release_page_completion()
1176 * load_page_for_completion() - Helper function to load a page as described by a VDO Page
1184 vdo_waitq_enqueue_waiter(&info->waiting, &vdo_page_comp->waiter); in load_page_for_completion()
1185 result = launch_page_load(info, vdo_page_comp->pbn); in load_page_for_completion()
1187 vdo_waitq_notify_all_waiters(&info->waiting, in load_page_for_completion()
1193 * vdo_get_page() - Initialize a page completion and get a block map page.
1195 * @zone: The block map zone of the desired page.
1196 * @pbn: The absolute physical block of the desired page.
1205 * resident in the cache and marked busy. All callers must call vdo_release_page_completion()
1213 struct vdo_page_cache *cache = &zone->page_cache; in vdo_get_page() local
1214 struct vdo_completion *completion = &page_completion->completion; in vdo_get_page()
1217 assert_on_cache_thread(cache, __func__); in vdo_get_page()
1218 VDO_ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL), in vdo_get_page()
1224 .cache = cache, in vdo_get_page()
1227 vdo_initialize_completion(completion, cache->vdo, VDO_PAGE_COMPLETION); in vdo_get_page()
1229 cache->zone->thread_id, parent); in vdo_get_page()
1230 completion->requeue = requeue; in vdo_get_page()
1232 if (page_completion->writable && vdo_is_read_only(cache->vdo)) { in vdo_get_page()
1237 if (page_completion->writable) in vdo_get_page()
1238 ADD_ONCE(cache->stats.write_count, 1); in vdo_get_page()
1240 ADD_ONCE(cache->stats.read_count, 1); in vdo_get_page()
1242 info = find_page(cache, page_completion->pbn); in vdo_get_page()
1244 /* The page is in the cache already. */ in vdo_get_page()
1245 if ((info->write_status == WRITE_STATUS_DEFERRED) || in vdo_get_page()
1247 (is_outgoing(info) && page_completion->writable)) { in vdo_get_page()
1249 ADD_ONCE(cache->stats.wait_for_page, 1); in vdo_get_page()
1250 vdo_waitq_enqueue_waiter(&info->waiting, &page_completion->waiter); in vdo_get_page()
1256 ADD_ONCE(cache->stats.found_in_cache, 1); in vdo_get_page()
1258 ADD_ONCE(cache->stats.read_outgoing, 1); in vdo_get_page()
1260 info->busy++; in vdo_get_page()
1270 info = find_free_page(cache); in vdo_get_page()
1272 ADD_ONCE(cache->stats.fetch_required, 1); in vdo_get_page()
1278 ADD_ONCE(cache->stats.discard_required, 1); in vdo_get_page()
1283 * vdo_request_page_write() - Request that a VDO page be written out as soon as it is not busy.
1294 info = vdo_page_comp->info; in vdo_request_page_write()
1300 * vdo_get_cached_page() - Get the block map page from a page completion.
1315 *page_ptr = (struct block_map_page *) get_page_buffer(vpc->info); in vdo_get_cached_page()
1321 * vdo_invalidate_page_cache() - Invalidate all entries in the VDO page cache.
1323 * There must not be any dirty pages in the cache.
1327 int vdo_invalidate_page_cache(struct vdo_page_cache *cache) in vdo_invalidate_page_cache() argument
1331 assert_on_cache_thread(cache, __func__); in vdo_invalidate_page_cache()
1334 for (info = cache->infos; info < cache->infos + cache->page_count; info++) { in vdo_invalidate_page_cache()
1335 int result = VDO_ASSERT(!is_dirty(info), "cache must have no dirty pages"); in vdo_invalidate_page_cache()
1341 /* Reset the page map by re-allocating it. */ in vdo_invalidate_page_cache()
1342 vdo_int_map_free(vdo_forget(cache->page_map)); in vdo_invalidate_page_cache()
1343 return vdo_int_map_create(cache->page_count, &cache->page_map); in vdo_invalidate_page_cache()
1347 * get_tree_page_by_index() - Get the tree page for a given height and page index.
1359 for (segment = 0; segment < forest->segments; segment++) { in get_tree_page_by_index()
1360 page_number_t border = forest->boundaries[segment].levels[height - 1]; in get_tree_page_by_index()
1363 struct block_map_tree *tree = &forest->trees[root_index]; in get_tree_page_by_index()
1365 return &(tree->segments[segment].levels[height - 1][page_index - offset]); in get_tree_page_by_index()
1378 return get_tree_page_by_index(zone->block_map->forest, lock->root_index, in get_tree_page()
1379 lock->height, in get_tree_page()
1380 lock->tree_slots[lock->height].page_index); in get_tree_page()
1383 /** vdo_copy_valid_page() - Validate and copy a buffer to a page. */
1408 * in_cyclic_range() - Check whether the given value is between the lower and upper bounds, within
1409 * a cyclic range of values from 0 to (modulus - 1).
1429 * is_not_older() - Check whether a generation is strictly older than some other generation in the
1441 result = VDO_ASSERT((in_cyclic_range(zone->oldest_generation, a, zone->generation, 1 << 8) && in is_not_older()
1442 in_cyclic_range(zone->oldest_generation, b, zone->generation, 1 << 8)), in is_not_older()
1444 a, b, zone->oldest_generation, zone->generation); in is_not_older()
1450 return in_cyclic_range(b, a, zone->generation, 1 << 8); in is_not_older()
1457 result = VDO_ASSERT((zone->dirty_page_counts[generation] > 0), in release_generation()
1464 zone->dirty_page_counts[generation]--; in release_generation()
1465 while ((zone->dirty_page_counts[zone->oldest_generation] == 0) && in release_generation()
1466 (zone->oldest_generation != zone->generation)) in release_generation()
1467 zone->oldest_generation++; in release_generation()
1475 bool decrement_old = vdo_waiter_is_waiting(&page->waiter); in set_generation()
1476 u8 old_generation = page->generation; in set_generation()
1481 page->generation = new_generation; in set_generation()
1482 new_count = ++zone->dirty_page_counts[new_generation]; in set_generation()
1504 waiter->callback = write_page_callback; in acquire_vio()
1505 acquire_vio_from_pool(zone->vio_pool, waiter); in acquire_vio()
1511 u8 generation = zone->generation + 1; in attempt_increment()
1513 if (zone->oldest_generation == generation) in attempt_increment()
1516 zone->generation = generation; in attempt_increment()
1523 if ((zone->flusher == NULL) && attempt_increment(zone)) { in enqueue_page()
1524 zone->flusher = page; in enqueue_page()
1525 acquire_vio(&page->waiter, zone); in enqueue_page()
1529 vdo_waitq_enqueue_waiter(&zone->flush_waiters, &page->waiter); in enqueue_page()
1537 if (page->generation == write_context->generation) { in write_page_if_not_dirtied()
1538 acquire_vio(waiter, write_context->zone); in write_page_if_not_dirtied()
1542 enqueue_page(page, write_context->zone); in write_page_if_not_dirtied()
1547 return_vio_to_pool(zone->vio_pool, vio); in return_to_pool()
1557 struct tree_page *page = completion->parent; in finish_page_write()
1558 struct block_map_zone *zone = pooled->context; in finish_page_write()
1560 vdo_release_recovery_journal_block_reference(zone->block_map->journal, in finish_page_write()
1561 page->writing_recovery_lock, in finish_page_write()
1563 zone->zone_number); in finish_page_write()
1565 dirty = (page->writing_generation != page->generation); in finish_page_write()
1566 release_generation(zone, page->writing_generation); in finish_page_write()
1567 page->writing = false; in finish_page_write()
1569 if (zone->flusher == page) { in finish_page_write()
1572 .generation = page->writing_generation, in finish_page_write()
1575 vdo_waitq_notify_all_waiters(&zone->flush_waiters, in finish_page_write()
1582 zone->flusher = NULL; in finish_page_write()
1587 } else if ((zone->flusher == NULL) && vdo_waitq_has_waiters(&zone->flush_waiters) && in finish_page_write()
1589 zone->flusher = container_of(vdo_waitq_dequeue_waiter(&zone->flush_waiters), in finish_page_write()
1591 write_page(zone->flusher, pooled); in finish_page_write()
1600 int result = completion->result; in handle_write_error()
1603 struct block_map_zone *zone = pooled->context; in handle_write_error()
1616 struct block_map_zone *zone = pooled->context; in write_initialized_page()
1617 struct tree_page *tree_page = completion->parent; in write_initialized_page()
1618 struct block_map_page *page = (struct block_map_page *) vio->data; in write_initialized_page()
1625 page->header.initialized = true; in write_initialized_page()
1627 if (zone->flusher == tree_page) in write_initialized_page()
1637 struct pooled_vio *vio = bio->bi_private; in write_page_endio()
1638 struct block_map_zone *zone = vio->context; in write_page_endio()
1639 struct block_map_page *page = (struct block_map_page *) vio->vio.data; in write_page_endio()
1641 continue_vio_after_io(&vio->vio, in write_page_endio()
1642 (page->header.initialized ? in write_page_endio()
1644 zone->thread_id); in write_page_endio()
1649 struct vdo_completion *completion = &vio->vio.completion; in write_page()
1650 struct block_map_zone *zone = vio->context; in write_page()
1653 if ((zone->flusher != tree_page) && in write_page()
1654 is_not_older(zone, tree_page->generation, zone->generation)) { in write_page()
1656 * This page was re-dirtied after the last flush was issued, hence we need to do in write_page()
1664 completion->parent = tree_page; in write_page()
1665 memcpy(vio->vio.data, tree_page->page_buffer, VDO_BLOCK_SIZE); in write_page()
1666 completion->callback_thread_id = zone->thread_id; in write_page()
1668 tree_page->writing = true; in write_page()
1669 tree_page->writing_generation = tree_page->generation; in write_page()
1670 tree_page->writing_recovery_lock = tree_page->recovery_lock; in write_page()
1673 tree_page->recovery_lock = 0; in write_page()
1681 if (page->header.initialized) { in write_page()
1686 page->header.initialized = true; in write_page()
1687 vdo_submit_metadata_vio(&vio->vio, vdo_get_block_map_page_pbn(page), in write_page()
1697 struct tree_lock *lock = &data_vio->tree_lock; in release_page_lock()
1699 VDO_ASSERT_LOG_ONLY(lock->locked, in release_page_lock()
1700 "release of unlocked block map page %s for key %llu in tree %u", in release_page_lock()
1701 what, (unsigned long long) lock->key, lock->root_index); in release_page_lock()
1703 zone = data_vio->logical.zone->block_map_zone; in release_page_lock()
1704 lock_holder = vdo_int_map_remove(zone->loading_pages, lock->key); in release_page_lock()
1706 "block map page %s mismatch for key %llu in tree %u", in release_page_lock()
1707 what, (unsigned long long) lock->key, lock->root_index); in release_page_lock()
1708 lock->locked = false; in release_page_lock()
1713 data_vio->tree_lock.height = 0; in finish_lookup()
1715 --data_vio->logical.zone->block_map_zone->active_lookups; in finish_lookup()
1718 data_vio->vio.completion.error_handler = handle_data_vio_error; in finish_lookup()
1727 if (!data_vio->write) { in abort_lookup_for_waiter()
1740 enter_zone_read_only_mode(data_vio->logical.zone->block_map_zone, result); in abort_lookup()
1742 if (data_vio->tree_lock.locked) { in abort_lookup()
1744 vdo_waitq_notify_all_waiters(&data_vio->tree_lock.waiters, in abort_lookup()
1762 vdo_is_state_compressed(mapping->state) || in is_invalid_tree_entry()
1763 (vdo_is_mapped_location(mapping) && (mapping->pbn == VDO_ZERO_BLOCK))) in is_invalid_tree_entry()
1770 return !vdo_is_physical_data_block(vdo->depot, mapping->pbn); in is_invalid_tree_entry()
1780 struct tree_lock *lock = &data_vio->tree_lock; in continue_with_loaded_page()
1781 struct block_map_tree_slot slot = lock->tree_slots[lock->height]; in continue_with_loaded_page()
1783 vdo_unpack_block_map_entry(&page->entries[slot.block_map_slot.slot]); in continue_with_loaded_page()
1785 if (is_invalid_tree_entry(vdo_from_data_vio(data_vio), &mapping, lock->height)) { in continue_with_loaded_page()
1787 "Invalid block map tree PBN: %llu with state %u for page index %u at height %u", in continue_with_loaded_page()
1789 lock->tree_slots[lock->height - 1].page_index, in continue_with_loaded_page()
1790 lock->height - 1); in continue_with_loaded_page()
1797 allocate_block_map_page(data_vio->logical.zone->block_map_zone, in continue_with_loaded_page()
1802 lock->tree_slots[lock->height - 1].block_map_slot.pbn = mapping.pbn; in continue_with_loaded_page()
1803 if (lock->height == 1) { in continue_with_loaded_page()
1809 load_block_map_page(data_vio->logical.zone->block_map_zone, data_vio); in continue_with_loaded_page()
1816 data_vio->tree_lock.height--; in continue_load_for_waiter()
1828 struct data_vio *data_vio = completion->parent; in finish_block_map_page_load()
1829 struct block_map_zone *zone = pooled->context; in finish_block_map_page_load()
1830 struct tree_lock *tree_lock = &data_vio->tree_lock; in finish_block_map_page_load()
1832 tree_lock->height--; in finish_block_map_page_load()
1833 pbn = tree_lock->tree_slots[tree_lock->height].block_map_slot.pbn; in finish_block_map_page_load()
1835 page = (struct block_map_page *) tree_page->page_buffer; in finish_block_map_page_load()
1836 nonce = zone->block_map->nonce; in finish_block_map_page_load()
1838 if (!vdo_copy_valid_page(vio->data, nonce, pbn, page)) in finish_block_map_page_load()
1840 return_vio_to_pool(zone->vio_pool, pooled); in finish_block_map_page_load()
1844 vdo_waitq_notify_all_waiters(&tree_lock->waiters, continue_load_for_waiter, page); in finish_block_map_page_load()
1850 int result = completion->result; in handle_io_error()
1853 struct data_vio *data_vio = completion->parent; in handle_io_error()
1854 struct block_map_zone *zone = pooled->context; in handle_io_error()
1857 return_vio_to_pool(zone->vio_pool, pooled); in handle_io_error()
1863 struct vio *vio = bio->bi_private; in load_page_endio()
1864 struct data_vio *data_vio = vio->completion.parent; in load_page_endio()
1867 data_vio->logical.zone->thread_id); in load_page_endio()
1874 struct tree_lock *lock = &data_vio->tree_lock; in load_page()
1875 physical_block_number_t pbn = lock->tree_slots[lock->height - 1].block_map_slot.pbn; in load_page()
1877 pooled->vio.completion.parent = data_vio; in load_page()
1878 vdo_submit_metadata_vio(&pooled->vio, pbn, load_page_endio, in load_page()
1884 * acquired, @data_vio->tree_lock.locked will be true.
1890 struct tree_lock *lock = &data_vio->tree_lock; in attempt_page_lock()
1891 height_t height = lock->height; in attempt_page_lock()
1892 struct block_map_tree_slot tree_slot = lock->tree_slots[height]; in attempt_page_lock()
1896 .root_index = lock->root_index, in attempt_page_lock()
1901 lock->key = key.key; in attempt_page_lock()
1903 result = vdo_int_map_put(zone->loading_pages, lock->key, in attempt_page_lock()
1910 data_vio->tree_lock.locked = true; in attempt_page_lock()
1915 vdo_waitq_enqueue_waiter(&lock_holder->waiters, &data_vio->waiter); in attempt_page_lock()
1919 /* Load a block map tree page from disk, for the next level in the data vio tree lock. */
1930 if (data_vio->tree_lock.locked) { in load_block_map_page()
1931 data_vio->waiter.callback = load_page; in load_block_map_page()
1932 acquire_vio_from_pool(zone->vio_pool, &data_vio->waiter); in load_block_map_page()
1941 data_vio->logical.zone->thread_id)) in allocation_failure()
1944 abort_lookup(data_vio, completion->result, "allocation"); in allocation_failure()
1950 struct tree_lock *tree_lock = &data_vio->tree_lock; in continue_allocation_for_waiter()
1953 tree_lock->height--; in continue_allocation_for_waiter()
1954 data_vio->tree_lock.tree_slots[tree_lock->height].block_map_slot.pbn = pbn; in continue_allocation_for_waiter()
1956 if (tree_lock->height == 0) { in continue_allocation_for_waiter()
1961 allocate_block_map_page(data_vio->logical.zone->block_map_zone, data_vio); in continue_allocation_for_waiter()
1964 /** expire_oldest_list() - Expire the oldest list. */
1967 block_count_t i = dirty_lists->offset++; in expire_oldest_list()
1969 dirty_lists->oldest_period++; in expire_oldest_list()
1970 if (!list_empty(&dirty_lists->eras[i][VDO_TREE_PAGE])) { in expire_oldest_list()
1971 list_splice_tail_init(&dirty_lists->eras[i][VDO_TREE_PAGE], in expire_oldest_list()
1972 &dirty_lists->expired[VDO_TREE_PAGE]); in expire_oldest_list()
1975 if (!list_empty(&dirty_lists->eras[i][VDO_CACHE_PAGE])) { in expire_oldest_list()
1976 list_splice_tail_init(&dirty_lists->eras[i][VDO_CACHE_PAGE], in expire_oldest_list()
1977 &dirty_lists->expired[VDO_CACHE_PAGE]); in expire_oldest_list()
1980 if (dirty_lists->offset == dirty_lists->maximum_age) in expire_oldest_list()
1981 dirty_lists->offset = 0; in expire_oldest_list()
1985 /** update_period() - Update the dirty_lists period if necessary. */
1988 while (dirty->next_period <= period) { in update_period()
1989 if ((dirty->next_period - dirty->oldest_period) == dirty->maximum_age) in update_period()
1991 dirty->next_period++; in update_period()
1995 /** write_expired_elements() - Write out the expired list. */
2001 u8 generation = zone->generation; in write_expired_elements()
2003 expired = &zone->dirty_lists->expired[VDO_TREE_PAGE]; in write_expired_elements()
2007 list_del_init(&page->entry); in write_expired_elements()
2009 result = VDO_ASSERT(!vdo_waiter_is_waiting(&page->waiter), in write_expired_elements()
2017 if (!page->writing) in write_expired_elements()
2021 expired = &zone->dirty_lists->expired[VDO_CACHE_PAGE]; in write_expired_elements()
2023 list_del_init(&info->state_entry); in write_expired_elements()
2027 save_pages(&zone->page_cache); in write_expired_elements()
2031 * add_to_dirty_lists() - Add an element to the dirty lists.
2045 struct dirty_lists *dirty_lists = zone->dirty_lists; in add_to_dirty_lists()
2050 if (new_period < dirty_lists->oldest_period) { in add_to_dirty_lists()
2051 list_move_tail(entry, &dirty_lists->expired[type]); in add_to_dirty_lists()
2055 &dirty_lists->eras[new_period % dirty_lists->maximum_age][type]); in add_to_dirty_lists()
2072 struct block_map_zone *zone = data_vio->logical.zone->block_map_zone; in finish_block_map_allocation()
2073 struct tree_lock *tree_lock = &data_vio->tree_lock; in finish_block_map_allocation()
2074 height_t height = tree_lock->height; in finish_block_map_allocation()
2079 pbn = tree_lock->tree_slots[height - 1].block_map_slot.pbn; in finish_block_map_allocation()
2082 page = (struct block_map_page *) tree_page->page_buffer; in finish_block_map_allocation()
2083 old_lock = tree_page->recovery_lock; in finish_block_map_allocation()
2086 &tree_page->recovery_lock); in finish_block_map_allocation()
2088 if (vdo_waiter_is_waiting(&tree_page->waiter)) { in finish_block_map_allocation()
2090 if (zone->flusher != tree_page) { in finish_block_map_allocation()
2095 set_generation(zone, tree_page, zone->generation); in finish_block_map_allocation()
2100 INIT_LIST_HEAD(&tree_page->entry); in finish_block_map_allocation()
2101 add_to_dirty_lists(zone, &tree_page->entry, VDO_TREE_PAGE, in finish_block_map_allocation()
2102 old_lock, tree_page->recovery_lock); in finish_block_map_allocation()
2105 tree_lock->height--; in finish_block_map_allocation()
2109 vdo_format_block_map_page(tree_page->page_buffer, in finish_block_map_allocation()
2110 zone->block_map->nonce, in finish_block_map_allocation()
2116 vdo_waitq_notify_all_waiters(&tree_lock->waiters, in finish_block_map_allocation()
2118 if (tree_lock->height == 0) { in finish_block_map_allocation()
2137 * Newly allocated block map pages are set to have to MAXIMUM_REFERENCES after they are journaled,
2138 * to prevent deduplication against the block after we release the write lock on it, but before we
2147 completion->callback = release_block_map_write_lock; in set_block_map_page_reference_count()
2148 vdo_modify_reference_count(completion, &data_vio->increment_updater); in set_block_map_page_reference_count()
2159 vdo_add_recovery_journal_entry(completion->vdo->recovery_journal, data_vio); in journal_block_map_allocation()
2165 struct tree_lock *lock = &data_vio->tree_lock; in allocate_block()
2173 pbn = data_vio->allocation.pbn; in allocate_block()
2174 lock->tree_slots[lock->height - 1].block_map_slot.pbn = pbn; in allocate_block()
2175 data_vio->increment_updater = (struct reference_updater) { in allocate_block()
2182 .lock = data_vio->allocation.lock, in allocate_block()
2193 if (!data_vio->write || data_vio->is_discard) { in allocate_block_map_page()
2205 if (!data_vio->tree_lock.locked) in allocate_block_map_page()
2213 * vdo_find_block_map_slot() - Find the block map slot in which the block map entry for a data_vio
2214 * resides and cache that result in the data_vio.
2224 struct tree_lock *lock = &data_vio->tree_lock; in vdo_find_block_map_slot()
2225 struct block_map_zone *zone = data_vio->logical.zone->block_map_zone; in vdo_find_block_map_slot()
2227 zone->active_lookups++; in vdo_find_block_map_slot()
2228 if (vdo_is_state_draining(&zone->state)) { in vdo_find_block_map_slot()
2233 lock->tree_slots[0].block_map_slot.slot = in vdo_find_block_map_slot()
2234 data_vio->logical.lbn % VDO_BLOCK_MAP_ENTRIES_PER_PAGE; in vdo_find_block_map_slot()
2235 page_index = (lock->tree_slots[0].page_index / zone->block_map->root_count); in vdo_find_block_map_slot()
2244 for (lock->height = 1; lock->height <= VDO_BLOCK_MAP_TREE_HEIGHT; lock->height++) { in vdo_find_block_map_slot()
2247 lock->tree_slots[lock->height] = tree_slot; in vdo_find_block_map_slot()
2248 page = (struct block_map_page *) (get_tree_page(zone, lock)->page_buffer); in vdo_find_block_map_slot()
2251 lock->tree_slots[lock->height].block_map_slot.pbn = pbn; in vdo_find_block_map_slot()
2262 mapping = vdo_unpack_block_map_entry(&page->entries[tree_slot.block_map_slot.slot]); in vdo_find_block_map_slot()
2263 if (is_invalid_tree_entry(vdo_from_data_vio(data_vio), &mapping, lock->height)) { in vdo_find_block_map_slot()
2265 "Invalid block map tree PBN: %llu with state %u for page index %u at height %u", in vdo_find_block_map_slot()
2267 lock->tree_slots[lock->height - 1].page_index, in vdo_find_block_map_slot()
2268 lock->height - 1); in vdo_find_block_map_slot()
2279 lock->tree_slots[lock->height - 1].block_map_slot.pbn = mapping.pbn; in vdo_find_block_map_slot()
2280 if (lock->height == 1) { in vdo_find_block_map_slot()
2281 /* This is the ultimate block map page, so we're done */ in vdo_find_block_map_slot()
2291 * Find the PBN of a leaf block map page. This method may only be used after all allocated tree
2300 root_count_t root_index = page_number % map->root_count; in vdo_find_block_map_page_pbn()
2301 page_number_t page_index = page_number / map->root_count; in vdo_find_block_map_page_pbn()
2306 tree_page = get_tree_page_by_index(map->forest, root_index, 1, page_index); in vdo_find_block_map_page_pbn()
2307 page = (struct block_map_page *) tree_page->page_buffer; in vdo_find_block_map_page_pbn()
2308 if (!page->header.initialized) in vdo_find_block_map_page_pbn()
2311 mapping = vdo_unpack_block_map_entry(&page->entries[slot]); in vdo_find_block_map_page_pbn()
2318 * Write a tree page or indicate that it has been re-dirtied if it is already being written. This
2319 * method is used when correcting errors in the tree during read-only rebuild.
2323 bool waiting = vdo_waiter_is_waiting(&page->waiter); in vdo_write_tree_page()
2325 if (waiting && (zone->flusher == page)) in vdo_write_tree_page()
2328 set_generation(zone, page, zone->generation); in vdo_write_tree_page()
2329 if (waiting || page->writing) in vdo_write_tree_page()
2338 size_t index = (old_forest == NULL) ? 0 : old_forest->segments; in make_segment()
2345 forest->segments = index + 1; in make_segment()
2347 result = vdo_allocate(forest->segments, struct boundary, in make_segment()
2348 "forest boundary array", &forest->boundaries); in make_segment()
2352 result = vdo_allocate(forest->segments, struct tree_page *, in make_segment()
2353 "forest page pointers", &forest->pages); in make_segment()
2358 "new forest pages", &forest->pages[index]); in make_segment()
2363 memcpy(forest->boundaries, old_forest->boundaries, in make_segment()
2365 memcpy(forest->pages, old_forest->pages, in make_segment()
2369 memcpy(&(forest->boundaries[index]), new_boundary, sizeof(struct boundary)); in make_segment()
2372 segment_sizes[height] = new_boundary->levels[height]; in make_segment()
2374 segment_sizes[height] -= old_forest->boundaries[index - 1].levels[height]; in make_segment()
2377 page_ptr = forest->pages[index]; in make_segment()
2378 for (root = 0; root < forest->map->root_count; root++) { in make_segment()
2380 struct block_map_tree *tree = &(forest->trees[root]); in make_segment()
2383 int result = vdo_allocate(forest->segments, in make_segment()
2385 "tree root segments", &tree->segments); in make_segment()
2390 memcpy(tree->segments, old_forest->trees[root].segments, in make_segment()
2394 segment = &(tree->segments[index]); in make_segment()
2399 segment->levels[height] = page_ptr; in make_segment()
2400 if (height == (VDO_BLOCK_MAP_TREE_HEIGHT - 1)) { in make_segment()
2403 vdo_format_block_map_page(page_ptr->page_buffer, in make_segment()
2404 forest->map->nonce, in make_segment()
2406 page->entries[0] = in make_segment()
2407 vdo_pack_block_map_entry(forest->map->root_origin + root, in make_segment()
2421 if (forest->pages != NULL) { in deforest()
2424 for (segment = first_page_segment; segment < forest->segments; segment++) in deforest()
2425 vdo_free(forest->pages[segment]); in deforest()
2426 vdo_free(forest->pages); in deforest()
2429 for (root = 0; root < forest->map->root_count; root++) in deforest()
2430 vdo_free(forest->trees[root].segments); in deforest()
2432 vdo_free(forest->boundaries); in deforest()
2437 * make_forest() - Make a collection of trees for a block_map, expanding the existing forest if
2439 * @entries: The number of entries the block map will hold.
2445 struct forest *forest, *old_forest = map->forest; in make_forest()
2451 old_boundary = &(old_forest->boundaries[old_forest->segments - 1]); in make_forest()
2453 new_pages = vdo_compute_new_forest_pages(map->root_count, old_boundary, in make_forest()
2456 map->next_entry_count = entries; in make_forest()
2460 result = vdo_allocate_extended(struct forest, map->root_count, in make_forest()
2466 forest->map = map; in make_forest()
2469 deforest(forest, forest->segments - 1); in make_forest()
2473 map->next_forest = forest; in make_forest()
2474 map->next_entry_count = entries; in make_forest()
2479 * replace_forest() - Replace a block_map's forest with the already-prepared larger forest.
2483 if (map->next_forest != NULL) { in replace_forest()
2484 if (map->forest != NULL) in replace_forest()
2485 deforest(map->forest, map->forest->segments); in replace_forest()
2486 map->forest = vdo_forget(map->next_forest); in replace_forest()
2489 map->entry_count = map->next_entry_count; in replace_forest()
2490 map->next_entry_count = 0; in replace_forest()
2494 * finish_cursor() - Finish the traversal of a single tree. If it was the last cursor, finish the
2499 struct cursors *cursors = cursor->parent; in finish_cursor()
2500 struct vdo_completion *completion = cursors->completion; in finish_cursor()
2502 return_vio_to_pool(cursors->pool, vdo_forget(cursor->vio)); in finish_cursor()
2503 if (--cursors->active_roots > 0) in finish_cursor()
2514 * continue_traversal() - Continue traversing a block map tree.
2520 traverse(completion->parent); in continue_traversal()
2524 * finish_traversal_load() - Continue traversing a block map tree now that a page has been loaded.
2529 struct cursor *cursor = completion->parent; in finish_traversal_load()
2530 height_t height = cursor->height; in finish_traversal_load()
2531 struct cursor_level *level = &cursor->levels[height]; in finish_traversal_load()
2533 &(cursor->tree->segments[0].levels[height][level->page_index]); in finish_traversal_load()
2534 struct block_map_page *page = (struct block_map_page *) tree_page->page_buffer; in finish_traversal_load()
2536 vdo_copy_valid_page(cursor->vio->vio.data, in finish_traversal_load()
2537 cursor->parent->zone->block_map->nonce, in finish_traversal_load()
2538 pbn_from_vio_bio(cursor->vio->vio.bio), page); in finish_traversal_load()
2544 struct vio *vio = bio->bi_private; in traversal_endio()
2545 struct cursor *cursor = vio->completion.parent; in traversal_endio()
2548 cursor->parent->zone->thread_id); in traversal_endio()
2552 * traverse() - Traverse a single block map tree.
2558 for (; cursor->height < VDO_BLOCK_MAP_TREE_HEIGHT; cursor->height++) { in traverse()
2559 height_t height = cursor->height; in traverse()
2560 struct cursor_level *level = &cursor->levels[height]; in traverse()
2562 &(cursor->tree->segments[0].levels[height][level->page_index]); in traverse()
2563 struct block_map_page *page = (struct block_map_page *) tree_page->page_buffer; in traverse()
2565 if (!page->header.initialized) in traverse()
2568 for (; level->slot < VDO_BLOCK_MAP_ENTRIES_PER_PAGE; level->slot++) { in traverse()
2571 (VDO_BLOCK_MAP_ENTRIES_PER_PAGE * level->page_index) + level->slot; in traverse()
2573 vdo_unpack_block_map_entry(&page->entries[level->slot]); in traverse()
2577 page->entries[level->slot] = UNMAPPED_BLOCK_MAP_ENTRY; in traverse()
2578 vdo_write_tree_page(tree_page, cursor->parent->zone); in traverse()
2586 if (entry_index >= cursor->boundary.levels[height]) { in traverse()
2587 page->entries[level->slot] = UNMAPPED_BLOCK_MAP_ENTRY; in traverse()
2588 vdo_write_tree_page(tree_page, cursor->parent->zone); in traverse()
2592 if (cursor->height < VDO_BLOCK_MAP_TREE_HEIGHT - 1) { in traverse()
2593 int result = cursor->parent->entry_callback(location.pbn, in traverse()
2594 cursor->parent->completion); in traverse()
2596 page->entries[level->slot] = UNMAPPED_BLOCK_MAP_ENTRY; in traverse()
2597 vdo_write_tree_page(tree_page, cursor->parent->zone); in traverse()
2602 if (cursor->height == 0) in traverse()
2605 cursor->height--; in traverse()
2606 next_level = &cursor->levels[cursor->height]; in traverse()
2607 next_level->page_index = entry_index; in traverse()
2608 next_level->slot = 0; in traverse()
2609 level->slot++; in traverse()
2610 vdo_submit_metadata_vio(&cursor->vio->vio, location.pbn, in traverse()
2621 * launch_cursor() - Start traversing a single block map tree now that the cursor has a VIO with
2632 cursor->vio = pooled; in launch_cursor()
2633 pooled->vio.completion.parent = cursor; in launch_cursor()
2634 pooled->vio.completion.callback_thread_id = cursor->parent->zone->thread_id; in launch_cursor()
2639 * compute_boundary() - Compute the number of pages used at each level of the given root's tree.
2647 page_count_t leaf_pages = vdo_compute_block_map_page_count(map->entry_count); in compute_boundary()
2653 page_count_t last_tree_root = (leaf_pages - 1) % map->root_count; in compute_boundary()
2654 page_count_t level_pages = leaf_pages / map->root_count; in compute_boundary()
2659 for (height = 0; height < VDO_BLOCK_MAP_TREE_HEIGHT - 1; height++) { in compute_boundary()
2665 boundary.levels[VDO_BLOCK_MAP_TREE_HEIGHT - 1] = 1; in compute_boundary()
2671 * vdo_traverse_forest() - Walk the entire forest of a block map.
2682 result = vdo_allocate_extended(struct cursors, map->root_count, in vdo_traverse_forest()
2689 cursors->zone = &map->zones[0]; in vdo_traverse_forest()
2690 cursors->pool = cursors->zone->vio_pool; in vdo_traverse_forest()
2691 cursors->entry_callback = callback; in vdo_traverse_forest()
2692 cursors->completion = completion; in vdo_traverse_forest()
2693 cursors->active_roots = map->root_count; in vdo_traverse_forest()
2694 for (root = 0; root < map->root_count; root++) { in vdo_traverse_forest()
2695 struct cursor *cursor = &cursors->cursors[root]; in vdo_traverse_forest()
2698 .tree = &map->forest->trees[root], in vdo_traverse_forest()
2699 .height = VDO_BLOCK_MAP_TREE_HEIGHT - 1, in vdo_traverse_forest()
2704 cursor->waiter.callback = launch_cursor; in vdo_traverse_forest()
2705 acquire_vio_from_pool(cursors->pool, &cursor->waiter); in vdo_traverse_forest()
2710 * initialize_block_map_zone() - Initialize the per-zone portions of the block map.
2721 struct vdo *vdo = map->vdo; in initialize_block_map_zone()
2722 struct block_map_zone *zone = &map->zones[zone_number]; in initialize_block_map_zone()
2726 zone->zone_number = zone_number; in initialize_block_map_zone()
2727 zone->thread_id = vdo->thread_config.logical_threads[zone_number]; in initialize_block_map_zone()
2728 zone->block_map = map; in initialize_block_map_zone()
2732 &zone->dirty_lists); in initialize_block_map_zone()
2736 zone->dirty_lists->maximum_age = maximum_age; in initialize_block_map_zone()
2737 INIT_LIST_HEAD(&zone->dirty_lists->expired[VDO_TREE_PAGE]); in initialize_block_map_zone()
2738 INIT_LIST_HEAD(&zone->dirty_lists->expired[VDO_CACHE_PAGE]); in initialize_block_map_zone()
2741 INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_TREE_PAGE]); in initialize_block_map_zone()
2742 INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_CACHE_PAGE]); in initialize_block_map_zone()
2745 result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->loading_pages); in initialize_block_map_zone()
2750 zone->thread_id, VIO_TYPE_BLOCK_MAP_INTERIOR, in initialize_block_map_zone()
2751 VIO_PRIORITY_METADATA, zone, &zone->vio_pool); in initialize_block_map_zone()
2755 vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION); in initialize_block_map_zone()
2757 zone->page_cache.zone = zone; in initialize_block_map_zone()
2758 zone->page_cache.vdo = vdo; in initialize_block_map_zone()
2759 zone->page_cache.page_count = cache_size / map->zone_count; in initialize_block_map_zone()
2760 zone->page_cache.stats.free_pages = zone->page_cache.page_count; in initialize_block_map_zone()
2762 result = allocate_cache_components(&zone->page_cache); in initialize_block_map_zone()
2767 INIT_LIST_HEAD(&zone->page_cache.lru_list); in initialize_block_map_zone()
2768 INIT_LIST_HEAD(&zone->page_cache.outgoing_list); in initialize_block_map_zone()
2778 return map->zones[zone_number].thread_id; in get_block_map_zone_thread_id()
2786 map->current_era_point = map->pending_era_point; in prepare_for_era_advance()
2795 struct block_map_zone *zone = &map->zones[zone_number]; in advance_block_map_zone_era()
2797 update_period(zone->dirty_lists, map->current_era_point); in advance_block_map_zone_era()
2804 * vdo_schedule_default_action() on the block map's action manager.
2812 if (map->current_era_point == map->pending_era_point) in schedule_era_advance()
2815 return vdo_schedule_action(map->action_manager, prepare_for_era_advance, in schedule_era_advance()
2821 struct vdo_page_cache *cache = &zone->page_cache; in uninitialize_block_map_zone() local
2823 vdo_free(vdo_forget(zone->dirty_lists)); in uninitialize_block_map_zone()
2824 free_vio_pool(vdo_forget(zone->vio_pool)); in uninitialize_block_map_zone()
2825 vdo_int_map_free(vdo_forget(zone->loading_pages)); in uninitialize_block_map_zone()
2826 if (cache->infos != NULL) { in uninitialize_block_map_zone()
2829 for (info = cache->infos; info < cache->infos + cache->page_count; info++) in uninitialize_block_map_zone()
2830 free_vio(vdo_forget(info->vio)); in uninitialize_block_map_zone()
2833 vdo_int_map_free(vdo_forget(cache->page_map)); in uninitialize_block_map_zone()
2834 vdo_free(vdo_forget(cache->infos)); in uninitialize_block_map_zone()
2835 vdo_free(vdo_forget(cache->pages)); in uninitialize_block_map_zone()
2845 for (zone = 0; zone < map->zone_count; zone++) in vdo_free_block_map()
2846 uninitialize_block_map_zone(&map->zones[zone]); in vdo_free_block_map()
2849 if (map->forest != NULL) in vdo_free_block_map()
2850 deforest(vdo_forget(map->forest), 0); in vdo_free_block_map()
2851 vdo_free(vdo_forget(map->action_manager)); in vdo_free_block_map()
2866 ((VDO_BLOCK_SIZE - sizeof(struct block_map_page)) / in vdo_decode_block_map()
2868 result = VDO_ASSERT(cache_size > 0, "block map cache size is specified"); in vdo_decode_block_map()
2873 vdo->thread_config.logical_zone_count, in vdo_decode_block_map()
2878 map->vdo = vdo; in vdo_decode_block_map()
2879 map->root_origin = state.root_origin; in vdo_decode_block_map()
2880 map->root_count = state.root_count; in vdo_decode_block_map()
2881 map->entry_count = logical_blocks; in vdo_decode_block_map()
2882 map->journal = journal; in vdo_decode_block_map()
2883 map->nonce = nonce; in vdo_decode_block_map()
2885 result = make_forest(map, map->entry_count); in vdo_decode_block_map()
2893 map->zone_count = vdo->thread_config.logical_zone_count; in vdo_decode_block_map()
2894 for (zone = 0; zone < map->zone_count; zone++) { in vdo_decode_block_map()
2902 result = vdo_make_action_manager(map->zone_count, get_block_map_zone_thread_id, in vdo_decode_block_map()
2905 &map->action_manager); in vdo_decode_block_map()
2921 .root_origin = map->root_origin, in vdo_record_block_map()
2922 .root_count = map->root_count, in vdo_record_block_map()
2926 /* The block map needs to know the journals' sequence number to initialize the eras. */
2932 map->current_era_point = vdo_get_recovery_journal_current_sequence_number(journal); in vdo_initialize_block_map_from_journal()
2933 map->pending_era_point = map->current_era_point; in vdo_initialize_block_map_from_journal()
2935 for (z = 0; z < map->zone_count; z++) { in vdo_initialize_block_map_from_journal()
2936 struct dirty_lists *dirty_lists = map->zones[z].dirty_lists; in vdo_initialize_block_map_from_journal()
2938 VDO_ASSERT_LOG_ONLY(dirty_lists->next_period == 0, "current period not set"); in vdo_initialize_block_map_from_journal()
2939 dirty_lists->oldest_period = map->current_era_point; in vdo_initialize_block_map_from_journal()
2940 dirty_lists->next_period = map->current_era_point + 1; in vdo_initialize_block_map_from_journal()
2941 dirty_lists->offset = map->current_era_point % dirty_lists->maximum_age; in vdo_initialize_block_map_from_journal()
2948 struct block_map *map = vdo_from_data_vio(data_vio)->block_map; in vdo_compute_logical_zone()
2949 struct tree_lock *tree_lock = &data_vio->tree_lock; in vdo_compute_logical_zone()
2950 page_number_t page_number = data_vio->logical.lbn / VDO_BLOCK_MAP_ENTRIES_PER_PAGE; in vdo_compute_logical_zone()
2952 tree_lock->tree_slots[0].page_index = page_number; in vdo_compute_logical_zone()
2953 tree_lock->root_index = page_number % map->root_count; in vdo_compute_logical_zone()
2954 return (tree_lock->root_index % map->zone_count); in vdo_compute_logical_zone()
2963 map->pending_era_point = recovery_block_number; in vdo_advance_block_map_era()
2964 vdo_schedule_default_action(map->action_manager); in vdo_advance_block_map_era()
2972 VDO_ASSERT_LOG_ONLY((zone->active_lookups == 0), in initiate_drain()
2976 while (zone->dirty_lists->oldest_period < zone->dirty_lists->next_period) in initiate_drain()
2977 expire_oldest_list(zone->dirty_lists); in initiate_drain()
2989 struct block_map_zone *zone = &map->zones[zone_number]; in drain_zone()
2991 vdo_start_draining(&zone->state, in drain_zone()
2992 vdo_get_current_manager_operation(map->action_manager), in drain_zone()
2999 vdo_schedule_operation(map->action_manager, operation, NULL, drain_zone, NULL, in vdo_drain_block_map()
3008 struct block_map_zone *zone = &map->zones[zone_number]; in resume_block_map_zone()
3010 vdo_fail_completion(parent, vdo_resume_if_quiescent(&zone->state)); in resume_block_map_zone()
3015 vdo_schedule_operation(map->action_manager, VDO_ADMIN_STATE_RESUMING, in vdo_resume_block_map()
3023 if (map->next_entry_count == new_logical_blocks) in vdo_prepare_to_grow_block_map()
3026 if (map->next_entry_count > 0) in vdo_prepare_to_grow_block_map()
3029 if (new_logical_blocks < map->entry_count) { in vdo_prepare_to_grow_block_map()
3030 map->next_entry_count = map->entry_count; in vdo_prepare_to_grow_block_map()
3047 vdo_schedule_operation(map->action_manager, in vdo_grow_block_map()
3054 struct forest *forest = vdo_forget(map->next_forest); in vdo_abandon_block_map_growth()
3057 deforest(forest, forest->segments - 1); in vdo_abandon_block_map_growth()
3059 map->next_entry_count = 0; in vdo_abandon_block_map_growth()
3065 struct vdo_completion *parent = completion->parent; in finish_processing_page()
3073 finish_processing_page(completion, completion->result); in handle_page_error()
3076 /* Fetch the mapping page for a block map update, and call the provided handler when fetched. */
3080 struct block_map_zone *zone = data_vio->logical.zone->block_map_zone; in fetch_mapping_page()
3082 if (vdo_is_state_draining(&zone->state)) { in fetch_mapping_page()
3087 vdo_get_page(&data_vio->page_completion, zone, in fetch_mapping_page()
3088 data_vio->tree_lock.tree_slots[0].block_map_slot.pbn, in fetch_mapping_page()
3089 modifiable, &data_vio->vio.completion, in fetch_mapping_page()
3094 * clear_mapped_location() - Clear a data_vio's mapped block location, setting it to be unmapped.
3096 * This indicates the block map entry for the logical block is either unmapped or corrupted.
3100 data_vio->mapped = (struct zoned_pbn) { in clear_mapped_location()
3106 * set_mapped_location() - Decode and validate a block map entry, and set the mapped location of a
3122 mapped.pbn, &data_vio->mapped.zone); in set_mapped_location()
3124 data_vio->mapped.pbn = mapped.pbn; in set_mapped_location()
3125 data_vio->mapped.state = mapped.state; in set_mapped_location()
3142 "PBN %llu with state %u read from the block map was invalid", in set_mapped_location()
3146 * A read VIO has no option but to report the bad mapping--reading zeros would be hiding in set_mapped_location()
3149 if (!data_vio->write) in set_mapped_location()
3153 * A write VIO only reads this mapping to decref the old block. Treat this as an unmapped in set_mapped_location()
3167 struct data_vio *data_vio = as_data_vio(completion->parent); in get_mapping_from_fetched_page()
3170 if (completion->result != VDO_SUCCESS) { in get_mapping_from_fetched_page()
3171 finish_processing_page(completion, completion->result); in get_mapping_from_fetched_page()
3181 page = (const struct block_map_page *) get_page_buffer(vpc->info); in get_mapping_from_fetched_page()
3182 tree_slot = &data_vio->tree_lock.tree_slots[0]; in get_mapping_from_fetched_page()
3183 entry = &page->entries[tree_slot->block_map_slot.slot]; in get_mapping_from_fetched_page()
3194 struct block_map_zone *zone = data_vio->logical.zone->block_map_zone; in vdo_update_block_map_page()
3195 struct block_map *block_map = zone->block_map; in vdo_update_block_map_page()
3196 struct recovery_journal *journal = block_map->journal; in vdo_update_block_map_page()
3198 struct tree_lock *tree_lock = &data_vio->tree_lock; in vdo_update_block_map_page()
3201 page->entries[tree_lock->tree_slots[tree_lock->height].block_map_slot.slot] = in vdo_update_block_map_page()
3206 new_locked = data_vio->recovery_sequence_number; in vdo_update_block_map_page()
3211 zone->zone_number); in vdo_update_block_map_page()
3216 zone->zone_number); in vdo_update_block_map_page()
3227 data_vio->recovery_sequence_number = 0; in vdo_update_block_map_page()
3232 struct data_vio *data_vio = as_data_vio(completion->parent); in put_mapping_in_fetched_page()
3238 if (completion->result != VDO_SUCCESS) { in put_mapping_in_fetched_page()
3239 finish_processing_page(completion, completion->result); in put_mapping_in_fetched_page()
3250 info = vpc->info; in put_mapping_in_fetched_page()
3251 old_lock = info->recovery_lock; in put_mapping_in_fetched_page()
3253 data_vio, data_vio->new_mapped.pbn, in put_mapping_in_fetched_page()
3254 data_vio->new_mapped.state, &info->recovery_lock); in put_mapping_in_fetched_page()
3256 add_to_dirty_lists(info->cache->zone, &info->state_entry, in put_mapping_in_fetched_page()
3257 VDO_CACHE_PAGE, old_lock, info->recovery_lock); in put_mapping_in_fetched_page()
3261 /* Read a stored block mapping into a data_vio. */
3264 if (data_vio->tree_lock.tree_slots[0].block_map_slot.pbn == VDO_ZERO_BLOCK) { in vdo_get_mapped_block()
3266 * We know that the block map page for this LBN has not been allocated, so the in vdo_get_mapped_block()
3267 * block must be unmapped. in vdo_get_mapped_block()
3277 /* Update a stored block mapping to reflect a data_vio's new mapping. */
3289 for (zone = 0; zone < map->zone_count; zone++) { in vdo_get_block_map_statistics()
3291 &(map->zones[zone].page_cache.stats); in vdo_get_block_map_statistics()
3293 totals.dirty_pages += READ_ONCE(stats->dirty_pages); in vdo_get_block_map_statistics()
3294 totals.clean_pages += READ_ONCE(stats->clean_pages); in vdo_get_block_map_statistics()
3295 totals.free_pages += READ_ONCE(stats->free_pages); in vdo_get_block_map_statistics()
3296 totals.failed_pages += READ_ONCE(stats->failed_pages); in vdo_get_block_map_statistics()
3297 totals.incoming_pages += READ_ONCE(stats->incoming_pages); in vdo_get_block_map_statistics()
3298 totals.outgoing_pages += READ_ONCE(stats->outgoing_pages); in vdo_get_block_map_statistics()
3299 totals.cache_pressure += READ_ONCE(stats->cache_pressure); in vdo_get_block_map_statistics()
3300 totals.read_count += READ_ONCE(stats->read_count); in vdo_get_block_map_statistics()
3301 totals.write_count += READ_ONCE(stats->write_count); in vdo_get_block_map_statistics()
3302 totals.failed_reads += READ_ONCE(stats->failed_reads); in vdo_get_block_map_statistics()
3303 totals.failed_writes += READ_ONCE(stats->failed_writes); in vdo_get_block_map_statistics()
3304 totals.reclaimed += READ_ONCE(stats->reclaimed); in vdo_get_block_map_statistics()
3305 totals.read_outgoing += READ_ONCE(stats->read_outgoing); in vdo_get_block_map_statistics()
3306 totals.found_in_cache += READ_ONCE(stats->found_in_cache); in vdo_get_block_map_statistics()
3307 totals.discard_required += READ_ONCE(stats->discard_required); in vdo_get_block_map_statistics()
3308 totals.wait_for_page += READ_ONCE(stats->wait_for_page); in vdo_get_block_map_statistics()
3309 totals.fetch_required += READ_ONCE(stats->fetch_required); in vdo_get_block_map_statistics()
3310 totals.pages_loaded += READ_ONCE(stats->pages_loaded); in vdo_get_block_map_statistics()
3311 totals.pages_saved += READ_ONCE(stats->pages_saved); in vdo_get_block_map_statistics()
3312 totals.flush_count += READ_ONCE(stats->flush_count); in vdo_get_block_map_statistics()