Lines Matching full:ic

111 #define journal_entry_tag(ic, je)		((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])  argument
309 struct dm_integrity_c *ic; member
332 struct dm_integrity_c *ic; member
344 struct dm_integrity_c *ic; member
370 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) in dm_integrity_io_error() argument
373 atomic64_inc(&ic->number_of_mismatches); in dm_integrity_io_error()
374 if (!cmpxchg(&ic->failed, 0, err)) in dm_integrity_io_error()
378 static int dm_integrity_failed(struct dm_integrity_c *ic) in dm_integrity_failed() argument
380 return READ_ONCE(ic->failed); in dm_integrity_failed()
383 static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic) in dm_integrity_disable_recalculate() argument
385 if (ic->legacy_recalculate) in dm_integrity_disable_recalculate()
387 if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) ? in dm_integrity_disable_recalculate()
388 ic->internal_hash_alg.key || ic->journal_mac_alg.key : in dm_integrity_disable_recalculate()
389 ic->internal_hash_alg.key && !ic->journal_mac_alg.key) in dm_integrity_disable_recalculate()
394 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned int i, in dm_integrity_commit_id() argument
401 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j); in dm_integrity_commit_id()
404 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector, in get_area_and_offset() argument
407 if (!ic->meta_dev) { in get_area_and_offset()
408 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors; in get_area_and_offset()
417 #define sector_to_block(ic, n) \ argument
419 BUG_ON((n) & (unsigned int)((ic)->sectors_per_block - 1)); \
420 (n) >>= (ic)->sb->log2_sectors_per_block; \
423 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area, in get_metadata_sector_and_offset() argument
429 ms = area << ic->sb->log2_interleave_sectors; in get_metadata_sector_and_offset()
430 if (likely(ic->log2_metadata_run >= 0)) in get_metadata_sector_and_offset()
431 ms += area << ic->log2_metadata_run; in get_metadata_sector_and_offset()
433 ms += area * ic->metadata_run; in get_metadata_sector_and_offset()
434 ms >>= ic->log2_buffer_sectors; in get_metadata_sector_and_offset()
436 sector_to_block(ic, offset); in get_metadata_sector_and_offset()
438 if (likely(ic->log2_tag_size >= 0)) { in get_metadata_sector_and_offset()
439 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size); in get_metadata_sector_and_offset()
440 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); in get_metadata_sector_and_offset()
442 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors); in get_metadata_sector_and_offset()
443 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); in get_metadata_sector_and_offset()
449 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset) in get_data_sector() argument
453 if (ic->meta_dev) in get_data_sector()
456 result = area << ic->sb->log2_interleave_sectors; in get_data_sector()
457 if (likely(ic->log2_metadata_run >= 0)) in get_data_sector()
458 result += (area + 1) << ic->log2_metadata_run; in get_data_sector()
460 result += (area + 1) * ic->metadata_run; in get_data_sector()
462 result += (sector_t)ic->initial_sectors + offset; in get_data_sector()
463 result += ic->start; in get_data_sector()
468 static void wraparound_section(struct dm_integrity_c *ic, unsigned int *sec_ptr) in wraparound_section() argument
470 if (unlikely(*sec_ptr >= ic->journal_sections)) in wraparound_section()
471 *sec_ptr -= ic->journal_sections; in wraparound_section()
474 static void sb_set_version(struct dm_integrity_c *ic) in sb_set_version() argument
476 if (ic->sb->flags & cpu_to_le32(SB_FLAG_INLINE)) in sb_set_version()
477 ic->sb->version = SB_VERSION_6; in sb_set_version()
478 else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) in sb_set_version()
479 ic->sb->version = SB_VERSION_5; in sb_set_version()
480 else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) in sb_set_version()
481 ic->sb->version = SB_VERSION_4; in sb_set_version()
482 else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) in sb_set_version()
483 ic->sb->version = SB_VERSION_3; in sb_set_version()
484 else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) in sb_set_version()
485 ic->sb->version = SB_VERSION_2; in sb_set_version()
487 ic->sb->version = SB_VERSION_1; in sb_set_version()
490 static int sb_mac(struct dm_integrity_c *ic, bool wr) in sb_mac() argument
492 SHASH_DESC_ON_STACK(desc, ic->journal_mac); in sb_mac()
494 unsigned int mac_size = crypto_shash_digestsize(ic->journal_mac); in sb_mac()
495 __u8 *sb = (__u8 *)ic->sb; in sb_mac()
500 dm_integrity_io_error(ic, "digest is too long", -EINVAL); in sb_mac()
504 desc->tfm = ic->journal_mac; in sb_mac()
509 dm_integrity_io_error(ic, "crypto_shash_digest", r); in sb_mac()
517 dm_integrity_io_error(ic, "crypto_shash_digest", r); in sb_mac()
521 dm_integrity_io_error(ic, "superblock mac", -EILSEQ); in sb_mac()
522 dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0); in sb_mac()
530 static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf) in sync_rw_sb() argument
539 io_req.mem.ptr.addr = ic->sb; in sync_rw_sb()
541 io_req.client = ic->io; in sync_rw_sb()
542 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev; in sync_rw_sb()
543 io_loc.sector = ic->start; in sync_rw_sb()
547 sb_set_version(ic); in sync_rw_sb()
548 if (ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { in sync_rw_sb()
549 r = sb_mac(ic, true); in sync_rw_sb()
560 if (ic->mode != 'R' && ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { in sync_rw_sb()
561 r = sb_mac(ic, false); in sync_rw_sb()
575 static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap, in block_bitmap_op() argument
581 if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) { in block_bitmap_op()
585 ic->sb->log2_sectors_per_block, in block_bitmap_op()
586 ic->log2_blocks_per_bitmap_bit, in block_bitmap_op()
594 bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); in block_bitmap_op()
596 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); in block_bitmap_op()
681 static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *s… in block_bitmap_copy() argument
683 unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE); in block_bitmap_copy()
694 static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t secto… in sector_to_bitmap_block() argument
696 unsigned int bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); in sector_to_bitmap_block()
699 BUG_ON(bitmap_block >= ic->n_bitmap_blocks); in sector_to_bitmap_block()
700 return &ic->bbs[bitmap_block]; in sector_to_bitmap_block()
703 static void access_journal_check(struct dm_integrity_c *ic, unsigned int section, unsigned int offs… in access_journal_check() argument
707 unsigned int limit = e ? ic->journal_section_entries : ic->journal_section_sectors; in access_journal_check()
709 if (unlikely(section >= ic->journal_sections) || in access_journal_check()
712 function, section, offset, ic->journal_sections, limit); in access_journal_check()
718 static void page_list_location(struct dm_integrity_c *ic, unsigned int section, unsigned int offset, in page_list_location() argument
723 access_journal_check(ic, section, offset, false, "page_list_location"); in page_list_location()
725 sector = section * ic->journal_section_sectors + offset; in page_list_location()
731 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl, in access_page_list() argument
737 page_list_location(ic, section, offset, &pl_index, &pl_offset); in access_page_list()
747 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned int section, unsig… in access_journal() argument
749 return access_page_list(ic, ic->journal, section, offset, NULL); in access_journal()
752 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned int section, … in access_journal_entry() argument
757 access_journal_check(ic, section, n, true, "access_journal_entry"); in access_journal_entry()
762 js = access_journal(ic, section, rel_sector); in access_journal_entry()
763 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size); in access_journal_entry()
766 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned int section, … in access_journal_data() argument
768 n <<= ic->sb->log2_sectors_per_block; in access_journal_data()
772 access_journal_check(ic, section, n, false, "access_journal_data"); in access_journal_data()
774 return access_journal(ic, section, n); in access_journal_data()
777 static void section_mac(struct dm_integrity_c *ic, unsigned int section, __u8 result[JOURNAL_MAC_SI… in section_mac() argument
779 SHASH_DESC_ON_STACK(desc, ic->journal_mac); in section_mac()
783 desc->tfm = ic->journal_mac; in section_mac()
787 dm_integrity_io_error(ic, "crypto_shash_init", r); in section_mac()
791 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { in section_mac()
794 r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE); in section_mac()
796 dm_integrity_io_error(ic, "crypto_shash_update", r); in section_mac()
803 dm_integrity_io_error(ic, "crypto_shash_update", r); in section_mac()
808 for (j = 0; j < ic->journal_section_entries; j++) { in section_mac()
809 struct journal_entry *je = access_journal_entry(ic, section, j); in section_mac()
813 dm_integrity_io_error(ic, "crypto_shash_update", r); in section_mac()
818 size = crypto_shash_digestsize(ic->journal_mac); in section_mac()
823 dm_integrity_io_error(ic, "crypto_shash_final", r); in section_mac()
831 dm_integrity_io_error(ic, "digest_size", -EINVAL); in section_mac()
836 dm_integrity_io_error(ic, "crypto_shash_final", r); in section_mac()
847 static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool wr) in rw_section_mac() argument
852 if (!ic->journal_mac) in rw_section_mac()
855 section_mac(ic, section, result); in rw_section_mac()
858 struct journal_sector *js = access_journal(ic, section, j); in rw_section_mac()
864 dm_integrity_io_error(ic, "journal mac", -EILSEQ); in rw_section_mac()
865 dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0); in rw_section_mac()
880 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section, in xor_journal() argument
884 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT; in xor_journal()
889 source_pl = ic->journal; in xor_journal()
890 target_pl = ic->journal_io; in xor_journal()
892 source_pl = ic->journal_io; in xor_journal()
893 target_pl = ic->journal; in xor_journal()
896 page_list_location(ic, section, 0, &pl_index, &pl_offset); in xor_journal()
913 rw_section_mac(ic, section, true); in xor_journal()
918 page_list_location(ic, section, 0, &section_index, &dummy); in xor_journal()
924 src_pages[1] = ic->journal_xor[pl_index].page; in xor_journal()
944 complete(&comp->ic->crypto_backoff); in complete_journal_encrypt()
947 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err); in complete_journal_encrypt()
967 wait_for_completion(&comp->ic->crypto_backoff); in do_crypt()
968 reinit_completion(&comp->ic->crypto_backoff); in do_crypt()
971 dm_integrity_io_error(comp->ic, "encrypt", r); in do_crypt()
975 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section, in crypt_journal() argument
984 source_sg = ic->journal_scatterlist; in crypt_journal()
985 target_sg = ic->journal_io_scatterlist; in crypt_journal()
987 source_sg = ic->journal_io_scatterlist; in crypt_journal()
988 target_sg = ic->journal_scatterlist; in crypt_journal()
997 rw_section_mac(ic, section, true); in crypt_journal()
999 req = ic->sk_requests[section]; in crypt_journal()
1000 ivsize = crypto_skcipher_ivsize(ic->journal_crypt); in crypt_journal()
1019 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section, in encrypt_journal() argument
1022 if (ic->journal_xor) in encrypt_journal()
1023 return xor_journal(ic, encrypt, section, n_sections, comp); in encrypt_journal()
1025 return crypt_journal(ic, encrypt, section, n_sections, comp); in encrypt_journal()
1033 dm_integrity_io_error(comp->ic, "writing journal", -EIO); in complete_journal_io()
1037 static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf, in rw_journal_sectors() argument
1046 if (unlikely(dm_integrity_failed(ic))) { in rw_journal_sectors()
1057 if (ic->journal_io) in rw_journal_sectors()
1058 io_req.mem.ptr.pl = &ic->journal_io[pl_index]; in rw_journal_sectors()
1060 io_req.mem.ptr.pl = &ic->journal[pl_index]; in rw_journal_sectors()
1068 io_req.client = ic->io; in rw_journal_sectors()
1069 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev; in rw_journal_sectors()
1070 io_loc.sector = ic->start + SB_SECTORS + sector; in rw_journal_sectors()
1075 dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ? in rw_journal_sectors()
1084 static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf, in rw_journal() argument
1090 sector = section * ic->journal_section_sectors; in rw_journal()
1091 n_sectors = n_sections * ic->journal_section_sectors; in rw_journal()
1093 rw_journal_sectors(ic, opf, sector, n_sectors, comp); in rw_journal()
1096 static void write_journal(struct dm_integrity_c *ic, unsigned int commit_start, unsigned int commit… in write_journal() argument
1103 io_comp.ic = ic; in write_journal()
1106 if (commit_start + commit_sections <= ic->journal_sections) { in write_journal()
1108 if (ic->journal_io) { in write_journal()
1109 crypt_comp_1.ic = ic; in write_journal()
1112 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1); in write_journal()
1116 rw_section_mac(ic, commit_start + i, true); in write_journal()
1118 rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start, in write_journal()
1124 to_end = ic->journal_sections - commit_start; in write_journal()
1125 if (ic->journal_io) { in write_journal()
1126 crypt_comp_1.ic = ic; in write_journal()
1129 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); in write_journal()
1131 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, in write_journal()
1135 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); in write_journal()
1138 crypt_comp_2.ic = ic; in write_journal()
1141 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); in write_journal()
1143 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp); in write_journal()
1148 rw_section_mac(ic, commit_start + i, true); in write_journal()
1149 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp); in write_journal()
1151 rw_section_mac(ic, i, true); in write_journal()
1153 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, 0, commit_sections - to_end, &io_comp); in write_journal()
1159 static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset, in copy_from_journal() argument
1167 BUG_ON((target | n_sectors | offset) & (unsigned int)(ic->sectors_per_block - 1)); in copy_from_journal()
1169 if (unlikely(dm_integrity_failed(ic))) { in copy_from_journal()
1174 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset; in copy_from_journal()
1181 io_req.mem.ptr.pl = &ic->journal[pl_index]; in copy_from_journal()
1185 io_req.client = ic->io; in copy_from_journal()
1186 io_loc.bdev = ic->dev->bdev; in copy_from_journal()
1203 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool che… in add_new_range() argument
1205 struct rb_node **n = &ic->in_progress.rb_node; in add_new_range()
1208 …BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned int)(ic->sectors_per_block -… in add_new_range()
1213 list_for_each_entry(range, &ic->wait_list, wait_entry) { in add_new_range()
1234 rb_insert_color(&new_range->node, &ic->in_progress); in add_new_range()
1239 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range) in remove_range_unlocked() argument
1241 rb_erase(&range->node, &ic->in_progress); in remove_range_unlocked()
1242 while (unlikely(!list_empty(&ic->wait_list))) { in remove_range_unlocked()
1244 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); in remove_range_unlocked()
1249 if (!add_new_range(ic, last_range, false)) { in remove_range_unlocked()
1251 list_add(&last_range->wait_entry, &ic->wait_list); in remove_range_unlocked()
1259 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range) in remove_range() argument
1263 spin_lock_irqsave(&ic->endio_wait.lock, flags); in remove_range()
1264 remove_range_unlocked(ic, range); in remove_range()
1265 spin_unlock_irqrestore(&ic->endio_wait.lock, flags); in remove_range()
1268 static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) in wait_and_add_new_range() argument
1271 list_add_tail(&new_range->wait_entry, &ic->wait_list); in wait_and_add_new_range()
1275 spin_unlock_irq(&ic->endio_wait.lock); in wait_and_add_new_range()
1277 spin_lock_irq(&ic->endio_wait.lock); in wait_and_add_new_range()
1281 static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) in add_new_range_and_wait() argument
1283 if (unlikely(!add_new_range(ic, new_range, true))) in add_new_range_and_wait()
1284 wait_and_add_new_range(ic, new_range); in add_new_range_and_wait()
1293 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector) in add_journal_node() argument
1301 link = &ic->journal_tree_root.rb_node; in add_journal_node()
1316 rb_insert_color(&node->node, &ic->journal_tree_root); in add_journal_node()
1319 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node) in remove_journal_node() argument
1322 rb_erase(&node->node, &ic->journal_tree_root); in remove_journal_node()
1328 static unsigned int find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_se… in find_journal_node() argument
1330 struct rb_node *n = ic->journal_tree_root.rb_node; in find_journal_node()
1338 found = j - ic->journal_tree; in find_journal_node()
1350 static bool test_journal_node(struct dm_integrity_c *ic, unsigned int pos, sector_t sector) in test_journal_node() argument
1355 if (unlikely(pos >= ic->journal_entries)) in test_journal_node()
1357 node = &ic->journal_tree[pos]; in test_journal_node()
1371 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node) in find_newer_committed_node() argument
1388 next_section = (unsigned int)(next_node - ic->journal_tree) / ic->journal_section_entries; in find_newer_committed_node()
1389 if (next_section >= ic->committed_section && in find_newer_committed_node()
1390 next_section < ic->committed_section + ic->n_committed_sections) in find_newer_committed_node()
1392 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections) in find_newer_committed_node()
1402 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_bl… in dm_integrity_rw_tag() argument
1407 unsigned char mismatch_filler = !ic->discard; in dm_integrity_rw_tag()
1415 r = dm_integrity_failed(ic); in dm_integrity_rw_tag()
1419 data = dm_bufio_read(ic->bufio, *metadata_block, &b); in dm_integrity_rw_tag()
1423 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size); in dm_integrity_rw_tag()
1435 if (likely(is_power_of_2(ic->tag_size))) { in dm_integrity_rw_tag()
1452 if (unlikely(hash_offset == ic->tag_size)) { in dm_integrity_rw_tag()
1459 mismatch_filler = !ic->discard; in dm_integrity_rw_tag()
1468 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) { in dm_integrity_rw_tag()
1473 if (unlikely(!is_power_of_2(ic->tag_size))) in dm_integrity_rw_tag()
1474 hash_offset = (hash_offset + to_copy) % ic->tag_size; in dm_integrity_rw_tag()
1485 struct dm_integrity_c *ic; member
1494 dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO); in flush_notify()
1498 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data) in dm_integrity_flush_buffers() argument
1503 if (!ic->meta_dev) in dm_integrity_flush_buffers()
1511 fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio); in dm_integrity_flush_buffers()
1512 fr.io_reg.bdev = ic->dev->bdev; in dm_integrity_flush_buffers()
1515 fr.ic = ic; in dm_integrity_flush_buffers()
1521 r = dm_bufio_write_dirty_buffers(ic->bufio); in dm_integrity_flush_buffers()
1523 dm_integrity_io_error(ic, "writing tags", r); in dm_integrity_flush_buffers()
1529 static void sleep_on_endio_wait(struct dm_integrity_c *ic) in sleep_on_endio_wait() argument
1533 __add_wait_queue(&ic->endio_wait, &wait); in sleep_on_endio_wait()
1535 spin_unlock_irq(&ic->endio_wait.lock); in sleep_on_endio_wait()
1537 spin_lock_irq(&ic->endio_wait.lock); in sleep_on_endio_wait()
1538 __remove_wait_queue(&ic->endio_wait, &wait); in sleep_on_endio_wait()
1543 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer); in autocommit_fn() local
1545 if (likely(!dm_integrity_failed(ic))) in autocommit_fn()
1546 queue_work(ic->commit_wq, &ic->commit_work); in autocommit_fn()
1549 static void schedule_autocommit(struct dm_integrity_c *ic) in schedule_autocommit() argument
1551 if (!timer_pending(&ic->autocommit_timer)) in schedule_autocommit()
1552 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies); in schedule_autocommit()
1555 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) in submit_flush_bio() argument
1560 spin_lock_irqsave(&ic->endio_wait.lock, flags); in submit_flush_bio()
1562 bio_list_add(&ic->flush_bio_list, bio); in submit_flush_bio()
1563 spin_unlock_irqrestore(&ic->endio_wait.lock, flags); in submit_flush_bio()
1565 queue_work(ic->commit_wq, &ic->commit_work); in submit_flush_bio()
1568 static void do_endio(struct dm_integrity_c *ic, struct bio *bio) in do_endio() argument
1572 r = dm_integrity_failed(ic); in do_endio()
1575 if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) { in do_endio()
1578 spin_lock_irqsave(&ic->endio_wait.lock, flags); in do_endio()
1579 bio_list_add(&ic->synchronous_bios, bio); in do_endio()
1580 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); in do_endio()
1581 spin_unlock_irqrestore(&ic->endio_wait.lock, flags); in do_endio()
1587 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio) in do_endio_flush() argument
1591 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic))) in do_endio_flush()
1592 submit_flush_bio(ic, dio); in do_endio_flush()
1594 do_endio(ic, bio); in do_endio_flush()
1600 struct dm_integrity_c *ic = dio->ic; in dec_in_flight() local
1603 remove_range(ic, &dio->range); in dec_in_flight()
1606 schedule_autocommit(ic); in dec_in_flight()
1615 queue_work(ic->offload_wq, &dio->work); in dec_in_flight()
1618 do_endio_flush(ic, dio); in dec_in_flight()
1636 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector, in integrity_sector_checksum() argument
1640 SHASH_DESC_ON_STACK(req, ic->internal_hash); in integrity_sector_checksum()
1644 req->tfm = ic->internal_hash; in integrity_sector_checksum()
1648 dm_integrity_io_error(ic, "crypto_shash_init", r); in integrity_sector_checksum()
1652 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { in integrity_sector_checksum()
1653 r = crypto_shash_update(req, (__u8 *)&ic->sb->salt, SALT_SIZE); in integrity_sector_checksum()
1655 dm_integrity_io_error(ic, "crypto_shash_update", r); in integrity_sector_checksum()
1662 dm_integrity_io_error(ic, "crypto_shash_update", r); in integrity_sector_checksum()
1666 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT); in integrity_sector_checksum()
1668 dm_integrity_io_error(ic, "crypto_shash_update", r); in integrity_sector_checksum()
1674 dm_integrity_io_error(ic, "crypto_shash_final", r); in integrity_sector_checksum()
1678 digest_size = crypto_shash_digestsize(ic->internal_hash); in integrity_sector_checksum()
1679 if (unlikely(digest_size < ic->tag_size)) in integrity_sector_checksum()
1680 memset(result + digest_size, 0, ic->tag_size - digest_size); in integrity_sector_checksum()
1686 get_random_bytes(result, ic->tag_size); in integrity_sector_checksum()
1692 struct dm_integrity_c *ic = dio->ic; in integrity_recheck() local
1698 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); in integrity_recheck()
1699 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, in integrity_recheck()
1701 sector = get_data_sector(ic, area, offset); in integrity_recheck()
1704 page = mempool_alloc(&ic->recheck_pool, GFP_NOIO); in integrity_recheck()
1720 io_req.client = ic->io; in integrity_recheck()
1721 io_loc.bdev = ic->dev->bdev; in integrity_recheck()
1723 io_loc.count = ic->sectors_per_block; in integrity_recheck()
1739 integrity_sector_checksum(ic, logical_sector, buffer, checksum); in integrity_recheck()
1740 r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block, in integrity_recheck()
1741 &dio->metadata_offset, ic->tag_size, TAG_CMP); in integrity_recheck()
1746 atomic64_inc(&ic->number_of_mismatches); in integrity_recheck()
1756 memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT); in integrity_recheck()
1759 pos += ic->sectors_per_block << SECTOR_SHIFT; in integrity_recheck()
1760 sector += ic->sectors_per_block; in integrity_recheck()
1761 logical_sector += ic->sectors_per_block; in integrity_recheck()
1765 mempool_free(page, &ic->recheck_pool); in integrity_recheck()
1771 struct dm_integrity_c *ic = dio->ic; in integrity_metadata() local
1775 if (ic->internal_hash) { in integrity_metadata()
1778 unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash); in integrity_metadata()
1781 unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; in integrity_metadata()
1786 if (unlikely(ic->mode == 'R')) in integrity_metadata()
1790 …checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size +… in integrity_metadata()
1806 unsigned int max_blocks = max_size / ic->tag_size; in integrity_metadata()
1811 unsigned int this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block); in integrity_metadata()
1814 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, in integrity_metadata()
1815 this_step_blocks * ic->tag_size, TAG_WRITE); in integrity_metadata()
1822 bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block); in integrity_metadata()
1843 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr); in integrity_metadata()
1844 checksums_ptr += ic->tag_size; in integrity_metadata()
1845 sectors_to_process -= ic->sectors_per_block; in integrity_metadata()
1846 pos += ic->sectors_per_block << SECTOR_SHIFT; in integrity_metadata()
1847 sector += ic->sectors_per_block; in integrity_metadata()
1851 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, in integrity_metadata()
1883 sector_to_block(ic, data_to_process); in integrity_metadata()
1884 data_to_process *= ic->tag_size; in integrity_metadata()
1893 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset, in integrity_metadata()
1911 static inline bool dm_integrity_check_limits(struct dm_integrity_c *ic, sector_t logical_sector, st… in dm_integrity_check_limits() argument
1913 if (unlikely(logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { in dm_integrity_check_limits()
1916 ic->provided_data_sectors); in dm_integrity_check_limits()
1919 if (unlikely((logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) { in dm_integrity_check_limits()
1921 ic->sectors_per_block, in dm_integrity_check_limits()
1925 if (ic->sectors_per_block > 1 && likely(bio_op(bio) != REQ_OP_DISCARD)) { in dm_integrity_check_limits()
1930 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { in dm_integrity_check_limits()
1932 bv.bv_offset, bv.bv_len, ic->sectors_per_block); in dm_integrity_check_limits()
1942 struct dm_integrity_c *ic = ti->private; in dm_integrity_map() local
1948 dio->ic = ic; in dm_integrity_map()
1952 if (ic->mode == 'I') { in dm_integrity_map()
1953 bio->bi_iter.bi_sector = dm_target_offset(ic->ti, bio->bi_iter.bi_sector); in dm_integrity_map()
1976 submit_flush_bio(ic, dio); in dm_integrity_map()
1989 if (unlikely(!dm_integrity_check_limits(ic, dio->range.logical_sector, bio))) in dm_integrity_map()
1993 if (!ic->internal_hash) { in dm_integrity_map()
1995 unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; in dm_integrity_map()
1997 if (ic->log2_tag_size >= 0) in dm_integrity_map()
1998 wanted_tag_size <<= ic->log2_tag_size; in dm_integrity_map()
2000 wanted_tag_size *= ic->tag_size; in dm_integrity_map()
2014 if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ)) in dm_integrity_map()
2017 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); in dm_integrity_map()
2018 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); in dm_integrity_map()
2019 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset); in dm_integrity_map()
2028 struct dm_integrity_c *ic = dio->ic; in __journal_read_write() local
2048 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry); in __journal_read_write()
2059 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); in __journal_read_write()
2064 js = access_journal_data(ic, journal_section, journal_entry); in __journal_read_write()
2072 } while (++s < ic->sectors_per_block); in __journal_read_write()
2074 if (ic->internal_hash) { in __journal_read_write()
2077 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); in __journal_read_write()
2078 if (unlikely(crypto_memneq(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { in __journal_read_write()
2088 if (!ic->internal_hash) { in __journal_read_write()
2090 unsigned int tag_todo = ic->tag_size; in __journal_read_write()
2091 char *tag_ptr = journal_entry_tag(ic, je); in __journal_read_write()
2117 js = access_journal_data(ic, journal_section, journal_entry); in __journal_read_write()
2118 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT); in __journal_read_write()
2123 } while (++s < ic->sectors_per_block); in __journal_read_write()
2125 if (ic->internal_hash) { in __journal_read_write()
2126 unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash); in __journal_read_write()
2128 if (unlikely(digest_size > ic->tag_size)) { in __journal_read_write()
2131 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack); in __journal_read_write()
2132 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size); in __journal_read_write()
2134 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je)); in __journal_read_write()
2139 logical_sector += ic->sectors_per_block; in __journal_read_write()
2142 if (unlikely(journal_entry == ic->journal_section_entries)) { in __journal_read_write()
2145 wraparound_section(ic, &journal_section); in __journal_read_write()
2148 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT; in __journal_read_write()
2149 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT); in __journal_read_write()
2158 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait))) in __journal_read_write()
2159 wake_up(&ic->copy_to_journal_wait); in __journal_read_write()
2160 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) in __journal_read_write()
2161 queue_work(ic->commit_wq, &ic->commit_work); in __journal_read_write()
2163 schedule_autocommit(ic); in __journal_read_write()
2165 remove_range(ic, &dio->range); in __journal_read_write()
2171 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); in __journal_read_write()
2172 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); in __journal_read_write()
2181 struct dm_integrity_c *ic = dio->ic; in dm_integrity_map_continue() local
2188 bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ; in dm_integrity_map_continue()
2190 if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D') in dm_integrity_map_continue()
2195 queue_work(ic->offload_wq, &dio->work); in dm_integrity_map_continue()
2200 spin_lock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
2202 if (unlikely(dm_integrity_failed(ic))) { in dm_integrity_map_continue()
2203 spin_unlock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
2204 do_endio(ic, bio); in dm_integrity_map_continue()
2209 if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) { in dm_integrity_map_continue()
2215 (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block); in dm_integrity_map_continue()
2219 sleep_on_endio_wait(ic); in dm_integrity_map_continue()
2222 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block; in dm_integrity_map_continue()
2223 ic->free_sectors -= range_sectors; in dm_integrity_map_continue()
2224 journal_section = ic->free_section; in dm_integrity_map_continue()
2225 journal_entry = ic->free_section_entry; in dm_integrity_map_continue()
2227 next_entry = ic->free_section_entry + range_sectors; in dm_integrity_map_continue()
2228 ic->free_section_entry = next_entry % ic->journal_section_entries; in dm_integrity_map_continue()
2229 ic->free_section += next_entry / ic->journal_section_entries; in dm_integrity_map_continue()
2230 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; in dm_integrity_map_continue()
2231 wraparound_section(ic, &ic->free_section); in dm_integrity_map_continue()
2233 pos = journal_section * ic->journal_section_entries + journal_entry; in dm_integrity_map_continue()
2240 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i); in dm_integrity_map_continue()
2242 if (unlikely(pos >= ic->journal_entries)) in dm_integrity_map_continue()
2245 je = access_journal_entry(ic, ws, we); in dm_integrity_map_continue()
2249 if (unlikely(we == ic->journal_section_entries)) { in dm_integrity_map_continue()
2252 wraparound_section(ic, &ws); in dm_integrity_map_continue()
2254 } while ((i += ic->sectors_per_block) < dio->range.n_sectors); in dm_integrity_map_continue()
2256 spin_unlock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
2261 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); in dm_integrity_map_continue()
2269 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) { in dm_integrity_map_continue()
2270 if (!test_journal_node(ic, jp, dio->range.logical_sector + i)) in dm_integrity_map_continue()
2277 if (unlikely(!add_new_range(ic, &dio->range, true))) { in dm_integrity_map_continue()
2285 spin_unlock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
2287 queue_work(ic->wait_wq, &dio->work); in dm_integrity_map_continue()
2291 dio->range.n_sectors = ic->sectors_per_block; in dm_integrity_map_continue()
2292 wait_and_add_new_range(ic, &dio->range); in dm_integrity_map_continue()
2302 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); in dm_integrity_map_continue()
2304 remove_range_unlocked(ic, &dio->range); in dm_integrity_map_continue()
2309 if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) { in dm_integrity_map_continue()
2313 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); in dm_integrity_map_continue()
2316 remove_range_unlocked(ic, &dio->range); in dm_integrity_map_continue()
2317 spin_unlock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
2318 queue_work(ic->commit_wq, &ic->commit_work); in dm_integrity_map_continue()
2319 flush_workqueue(ic->commit_wq); in dm_integrity_map_continue()
2320 queue_work(ic->writer_wq, &ic->writer_work); in dm_integrity_map_continue()
2321 flush_workqueue(ic->writer_wq); in dm_integrity_map_continue()
2326 recalc_sector = le64_to_cpu(ic->sb->recalc_sector); in dm_integrity_map_continue()
2327 spin_unlock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
2330 journal_section = journal_read_pos / ic->journal_section_entries; in dm_integrity_map_continue()
2331 journal_entry = journal_read_pos % ic->journal_section_entries; in dm_integrity_map_continue()
2335 if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) { in dm_integrity_map_continue()
2336 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, in dm_integrity_map_continue()
2340 bbs = sector_to_bitmap_block(ic, dio->range.logical_sector); in dm_integrity_map_continue()
2344 queue_work(ic->writer_wq, &bbs->work); in dm_integrity_map_continue()
2358 bio_set_dev(bio, ic->dev->bdev); in dm_integrity_map_continue()
2364 if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) { in dm_integrity_map_continue()
2366 dm_integrity_flush_buffers(ic, false); in dm_integrity_map_continue()
2380 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && in dm_integrity_map_continue()
2383 if (ic->mode == 'B') { in dm_integrity_map_continue()
2384 if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector, in dm_integrity_map_continue()
2396 queue_work(ic->metadata_wq, &dio->work); in dm_integrity_map_continue()
2405 do_endio_flush(ic, dio); in dm_integrity_map_continue()
2410 struct dm_integrity_c *ic = dio->ic; in dm_integrity_map_inline() local
2422 bio_set_dev(bio, ic->dev->bdev); in dm_integrity_map_inline()
2429 dio->payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block); in dm_integrity_map_inline()
2430 digest_size = crypto_shash_digestsize(ic->internal_hash); in dm_integrity_map_inline()
2431 extra_size = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; in dm_integrity_map_inline()
2437 unsigned sectors = ((x_size - extra_size) / ic->tuple_size) << ic->sb->log2_sectors_per_block; in dm_integrity_map_inline()
2452 if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) in dm_integrity_map_inline()
2461 recalc_sector = le64_to_cpu(smp_load_acquire(&ic->sb->recalc_sector)); in dm_integrity_map_inline()
2465 spin_lock_irq(&ic->endio_wait.lock); in dm_integrity_map_inline()
2466 recalc_sector = le64_to_cpu(ic->sb->recalc_sector); in dm_integrity_map_inline()
2469 if (unlikely(!add_new_range(ic, &dio->range, true))) { in dm_integrity_map_inline()
2471 spin_unlock_irq(&ic->endio_wait.lock); in dm_integrity_map_inline()
2473 queue_work(ic->wait_wq, &dio->work); in dm_integrity_map_inline()
2476 wait_and_add_new_range(ic, &dio->range); in dm_integrity_map_inline()
2480 spin_unlock_irq(&ic->endio_wait.lock); in dm_integrity_map_inline()
2484 dio->integrity_payload = page_to_virt((struct page *)mempool_alloc(&ic->recheck_pool, GFP_NOIO)); in dm_integrity_map_inline()
2490 if (unlikely(!dm_integrity_check_limits(ic, bio->bi_iter.bi_sector, bio))) { in dm_integrity_map_inline()
2494 bio->bi_iter.bi_sector += ic->start + SB_SECTORS; in dm_integrity_map_inline()
2508 if (ic->tag_size < ic->tuple_size) in dm_integrity_map_inline()
2509 memset(dio->integrity_payload + pos + ic->tag_size, 0, ic->tuple_size - ic->tuple_size); in dm_integrity_map_inline()
2510 …integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, dio->integrity_payload + po… in dm_integrity_map_inline()
2512 pos += ic->tuple_size; in dm_integrity_map_inline()
2513 bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT); in dm_integrity_map_inline()
2530 struct dm_integrity_c *ic = dio->ic; in dm_integrity_free_payload() local
2532 mempool_free(virt_to_page(dio->integrity_payload), &ic->recheck_pool); in dm_integrity_free_payload()
2543 struct dm_integrity_c *ic = dio->ic; in dm_integrity_inline_recheck() local
2547 dio->integrity_payload = page_to_virt((struct page *)mempool_alloc(&ic->recheck_pool, GFP_NOIO)); in dm_integrity_inline_recheck()
2559 outgoing_bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recheck_bios); in dm_integrity_inline_recheck()
2561 …r = bio_add_page(outgoing_bio, virt_to_page(outgoing_data), ic->sectors_per_block << SECTOR_SHIFT,… in dm_integrity_inline_recheck()
2562 if (unlikely(r != (ic->sectors_per_block << SECTOR_SHIFT))) { in dm_integrity_inline_recheck()
2577 r = bio_integrity_add_page(outgoing_bio, virt_to_page(dio->integrity_payload), ic->tuple_size, 0); in dm_integrity_inline_recheck()
2578 if (unlikely(r != ic->tuple_size)) { in dm_integrity_inline_recheck()
2585 outgoing_bio->bi_iter.bi_sector = dio->bio_details.bi_iter.bi_sector + ic->start + SB_SECTORS; in dm_integrity_inline_recheck()
2596 integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, outgoing_data, digest); in dm_integrity_inline_recheck()
2597 …mneq(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)… in dm_integrity_inline_recheck()
2599 ic->dev->bdev, dio->bio_details.bi_iter.bi_sector); in dm_integrity_inline_recheck()
2600 atomic64_inc(&ic->number_of_mismatches); in dm_integrity_inline_recheck()
2611 memcpy(mem, outgoing_data, ic->sectors_per_block << SECTOR_SHIFT); in dm_integrity_inline_recheck()
2614 bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT); in dm_integrity_inline_recheck()
2622 struct dm_integrity_c *ic = ti->private; in dm_integrity_end_io() local
2623 if (ic->mode == 'I') { in dm_integrity_end_io()
2627 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && in dm_integrity_end_io()
2634 //memset(mem, 0xff, ic->sectors_per_block << SECTOR_SHIFT); in dm_integrity_end_io()
2635 integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, digest); in dm_integrity_end_io()
2637 min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) { in dm_integrity_end_io()
2641 queue_work(ic->offload_wq, &dio->work); in dm_integrity_end_io()
2645 pos += ic->tuple_size; in dm_integrity_end_io()
2646 bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT); in dm_integrity_end_io()
2652 remove_range(ic, &dio->range); in dm_integrity_end_io()
2660 struct dm_integrity_c *ic = dio->ic; in integrity_bio_wait() local
2662 if (ic->mode == 'I') { in integrity_bio_wait()
2682 static void pad_uncommitted(struct dm_integrity_c *ic) in pad_uncommitted() argument
2684 if (ic->free_section_entry) { in pad_uncommitted()
2685 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry; in pad_uncommitted()
2686 ic->free_section_entry = 0; in pad_uncommitted()
2687 ic->free_section++; in pad_uncommitted()
2688 wraparound_section(ic, &ic->free_section); in pad_uncommitted()
2689 ic->n_uncommitted_sections++; in pad_uncommitted()
2691 if (WARN_ON(ic->journal_sections * ic->journal_section_entries != in pad_uncommitted()
2692 (ic->n_uncommitted_sections + ic->n_committed_sections) * in pad_uncommitted()
2693 ic->journal_section_entries + ic->free_sectors)) { in pad_uncommitted()
2697 ic->journal_sections, ic->journal_section_entries, in pad_uncommitted()
2698 ic->n_uncommitted_sections, ic->n_committed_sections, in pad_uncommitted()
2699 ic->journal_section_entries, ic->free_sectors); in pad_uncommitted()
2705 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work); in integrity_commit() local
2710 del_timer(&ic->autocommit_timer); in integrity_commit()
2712 if (ic->mode == 'I') in integrity_commit()
2715 spin_lock_irq(&ic->endio_wait.lock); in integrity_commit()
2716 flushes = bio_list_get(&ic->flush_bio_list); in integrity_commit()
2717 if (unlikely(ic->mode != 'J')) { in integrity_commit()
2718 spin_unlock_irq(&ic->endio_wait.lock); in integrity_commit()
2719 dm_integrity_flush_buffers(ic, true); in integrity_commit()
2723 pad_uncommitted(ic); in integrity_commit()
2724 commit_start = ic->uncommitted_section; in integrity_commit()
2725 commit_sections = ic->n_uncommitted_sections; in integrity_commit()
2726 spin_unlock_irq(&ic->endio_wait.lock); in integrity_commit()
2731 ic->wrote_to_journal = true; in integrity_commit()
2735 for (j = 0; j < ic->journal_section_entries; j++) { in integrity_commit()
2738 je = access_journal_entry(ic, i, j); in integrity_commit()
2739 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); in integrity_commit()
2741 for (j = 0; j < ic->journal_section_sectors; j++) { in integrity_commit()
2744 js = access_journal(ic, i, j); in integrity_commit()
2745 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq); in integrity_commit()
2748 if (unlikely(i >= ic->journal_sections)) in integrity_commit()
2749 ic->commit_seq = next_commit_seq(ic->commit_seq); in integrity_commit()
2750 wraparound_section(ic, &i); in integrity_commit()
2754 write_journal(ic, commit_start, commit_sections); in integrity_commit()
2756 spin_lock_irq(&ic->endio_wait.lock); in integrity_commit()
2757 ic->uncommitted_section += commit_sections; in integrity_commit()
2758 wraparound_section(ic, &ic->uncommitted_section); in integrity_commit()
2759 ic->n_uncommitted_sections -= commit_sections; in integrity_commit()
2760 ic->n_committed_sections += commit_sections; in integrity_commit()
2761 spin_unlock_irq(&ic->endio_wait.lock); in integrity_commit()
2763 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) in integrity_commit()
2764 queue_work(ic->writer_wq, &ic->writer_work); in integrity_commit()
2771 do_endio(ic, flushes); in integrity_commit()
2780 struct dm_integrity_c *ic = comp->ic; in complete_copy_from_journal() local
2782 remove_range(ic, &io->range); in complete_copy_from_journal()
2783 mempool_free(io, &ic->journal_io_mempool); in complete_copy_from_journal()
2785 dm_integrity_io_error(ic, "copying from journal", -EIO); in complete_copy_from_journal()
2789 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js, in restore_last_bytes() argument
2797 } while (++s < ic->sectors_per_block); in restore_last_bytes()
2800 static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start, in do_journal_write() argument
2809 comp.ic = ic; in do_journal_write()
2814 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { in do_journal_write()
2818 rw_section_mac(ic, i, false); in do_journal_write()
2819 for (j = 0; j < ic->journal_section_entries; j++) { in do_journal_write()
2820 struct journal_entry *je = access_journal_entry(ic, i, j); in do_journal_write()
2832 if (unlikely(sec & (unsigned int)(ic->sectors_per_block - 1))) { in do_journal_write()
2833 dm_integrity_io_error(ic, "invalid sector in journal", -EIO); in do_journal_write()
2834 sec &= ~(sector_t)(ic->sectors_per_block - 1); in do_journal_write()
2836 if (unlikely(sec >= ic->provided_data_sectors)) { in do_journal_write()
2841 get_area_and_offset(ic, sec, &area, &offset); in do_journal_write()
2842 restore_last_bytes(ic, access_journal_data(ic, i, j), je); in do_journal_write()
2843 for (k = j + 1; k < ic->journal_section_entries; k++) { in do_journal_write()
2844 struct journal_entry *je2 = access_journal_entry(ic, i, k); in do_journal_write()
2851 if (unlikely(sec2 >= ic->provided_data_sectors)) in do_journal_write()
2853 get_area_and_offset(ic, sec2, &area2, &offset2); in do_journal_write()
2854 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block)) in do_journal_write()
2856 restore_last_bytes(ic, access_journal_data(ic, i, k), je2); in do_journal_write()
2860 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO); in do_journal_write()
2863 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block; in do_journal_write()
2865 spin_lock_irq(&ic->endio_wait.lock); in do_journal_write()
2866 add_new_range_and_wait(ic, &io->range); in do_journal_write()
2869 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries]; in do_journal_write()
2872 while (j < k && find_newer_committed_node(ic, &section_node[j])) { in do_journal_write()
2873 struct journal_entry *je2 = access_journal_entry(ic, i, j); in do_journal_write()
2876 remove_journal_node(ic, &section_node[j]); in do_journal_write()
2878 sec += ic->sectors_per_block; in do_journal_write()
2879 offset += ic->sectors_per_block; in do_journal_write()
2881 while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) { in do_journal_write()
2882 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1); in do_journal_write()
2885 remove_journal_node(ic, &section_node[k - 1]); in do_journal_write()
2889 remove_range_unlocked(ic, &io->range); in do_journal_write()
2890 spin_unlock_irq(&ic->endio_wait.lock); in do_journal_write()
2891 mempool_free(io, &ic->journal_io_mempool); in do_journal_write()
2895 remove_journal_node(ic, &section_node[l]); in do_journal_write()
2897 spin_unlock_irq(&ic->endio_wait.lock); in do_journal_write()
2899 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); in do_journal_write()
2902 struct journal_entry *je2 = access_journal_entry(ic, i, l); in do_journal_write()
2908 ic->internal_hash) { in do_journal_write()
2911 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block), in do_journal_write()
2912 (char *)access_journal_data(ic, i, l), test_tag); in do_journal_write()
2913 if (unlikely(crypto_memneq(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) { in do_journal_write()
2914 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ); in do_journal_write()
2915 dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0); in do_journal_write()
2920 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset, in do_journal_write()
2921 ic->tag_size, TAG_WRITE); in do_journal_write()
2923 dm_integrity_io_error(ic, "reading tags", r); in do_journal_write()
2927 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block, in do_journal_write()
2928 (k - j) << ic->sb->log2_sectors_per_block, in do_journal_write()
2929 get_data_sector(ic, area, offset), in do_journal_write()
2936 dm_bufio_write_dirty_buffers_async(ic->bufio); in do_journal_write()
2943 dm_integrity_flush_buffers(ic, true); in do_journal_write()
2948 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work); in integrity_writer() local
2952 spin_lock_irq(&ic->endio_wait.lock); in integrity_writer()
2953 write_start = ic->committed_section; in integrity_writer()
2954 write_sections = ic->n_committed_sections; in integrity_writer()
2955 spin_unlock_irq(&ic->endio_wait.lock); in integrity_writer()
2960 do_journal_write(ic, write_start, write_sections, false); in integrity_writer()
2962 spin_lock_irq(&ic->endio_wait.lock); in integrity_writer()
2964 ic->committed_section += write_sections; in integrity_writer()
2965 wraparound_section(ic, &ic->committed_section); in integrity_writer()
2966 ic->n_committed_sections -= write_sections; in integrity_writer()
2968 prev_free_sectors = ic->free_sectors; in integrity_writer()
2969 ic->free_sectors += write_sections * ic->journal_section_entries; in integrity_writer()
2971 wake_up_locked(&ic->endio_wait); in integrity_writer()
2973 spin_unlock_irq(&ic->endio_wait.lock); in integrity_writer()
2976 static void recalc_write_super(struct dm_integrity_c *ic) in recalc_write_super() argument
2980 dm_integrity_flush_buffers(ic, false); in recalc_write_super()
2981 if (dm_integrity_failed(ic)) in recalc_write_super()
2984 r = sync_rw_sb(ic, REQ_OP_WRITE); in recalc_write_super()
2986 dm_integrity_io_error(ic, "writing superblock", r); in recalc_write_super()
2991 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work); in integrity_recalc() local
3013 if (recalc_sectors >= 1U << ic->sb->log2_sectors_per_block) in integrity_recalc()
3018 recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size; in integrity_recalc()
3019 if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size) in integrity_recalc()
3020 recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size; in integrity_recalc()
3028 DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector)); in integrity_recalc()
3030 spin_lock_irq(&ic->endio_wait.lock); in integrity_recalc()
3034 if (unlikely(dm_post_suspending(ic->ti))) in integrity_recalc()
3037 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); in integrity_recalc()
3038 if (unlikely(range.logical_sector >= ic->provided_data_sectors)) { in integrity_recalc()
3039 if (ic->mode == 'B') { in integrity_recalc()
3040 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); in integrity_recalc()
3042 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); in integrity_recalc()
3047 get_area_and_offset(ic, range.logical_sector, &area, &offset); in integrity_recalc()
3048 range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector); in integrity_recalc()
3049 if (!ic->meta_dev) in integrity_recalc()
3050 …range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsign… in integrity_recalc()
3052 add_new_range_and_wait(ic, &range); in integrity_recalc()
3053 spin_unlock_irq(&ic->endio_wait.lock); in integrity_recalc()
3057 if (ic->mode == 'B') { in integrity_recalc()
3058 if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) in integrity_recalc()
3061 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, in integrity_recalc()
3062 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) { in integrity_recalc()
3063 logical_sector += ic->sectors_per_block; in integrity_recalc()
3064 n_sectors -= ic->sectors_per_block; in integrity_recalc()
3067 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block, in integrity_recalc()
3068 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) { in integrity_recalc()
3069 n_sectors -= ic->sectors_per_block; in integrity_recalc()
3072 get_area_and_offset(ic, logical_sector, &area, &offset); in integrity_recalc()
3078 recalc_write_super(ic); in integrity_recalc()
3079 if (ic->mode == 'B') in integrity_recalc()
3080 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval); in integrity_recalc()
3085 if (unlikely(dm_integrity_failed(ic))) in integrity_recalc()
3092 io_req.client = ic->io; in integrity_recalc()
3093 io_loc.bdev = ic->dev->bdev; in integrity_recalc()
3094 io_loc.sector = get_data_sector(ic, area, offset); in integrity_recalc()
3099 dm_integrity_io_error(ic, "reading data", r); in integrity_recalc()
3104 for (i = 0; i < n_sectors; i += ic->sectors_per_block) { in integrity_recalc()
3105 integrity_sector_checksum(ic, logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t); in integrity_recalc()
3106 t += ic->tag_size; in integrity_recalc()
3109 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); in integrity_recalc()
3111 …r = dm_integrity_rw_tag(ic, recalc_tags, &metadata_block, &metadata_offset, t - recalc_tags, TAG_W… in integrity_recalc()
3113 dm_integrity_io_error(ic, "writing tags", r); in integrity_recalc()
3117 if (ic->mode == 'B') { in integrity_recalc()
3121 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << in integrity_recalc()
3122 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); in integrity_recalc()
3124 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << in integrity_recalc()
3125 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); in integrity_recalc()
3126 block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR); in integrity_recalc()
3132 spin_lock_irq(&ic->endio_wait.lock); in integrity_recalc()
3133 remove_range_unlocked(ic, &range); in integrity_recalc()
3134 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors); in integrity_recalc()
3138 remove_range(ic, &range); in integrity_recalc()
3142 spin_unlock_irq(&ic->endio_wait.lock); in integrity_recalc()
3144 recalc_write_super(ic); in integrity_recalc()
3153 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work); in integrity_recalc_inline() local
3172 if (recalc_sectors >= 1U << ic->sb->log2_sectors_per_block) in integrity_recalc_inline()
3178 recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tuple_size; in integrity_recalc_inline()
3179 if (crypto_shash_digestsize(ic->internal_hash) > ic->tuple_size) in integrity_recalc_inline()
3180 recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tuple_size; in integrity_recalc_inline()
3188 spin_lock_irq(&ic->endio_wait.lock); in integrity_recalc_inline()
3191 if (unlikely(dm_post_suspending(ic->ti))) in integrity_recalc_inline()
3194 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); in integrity_recalc_inline()
3195 if (unlikely(range.logical_sector >= ic->provided_data_sectors)) in integrity_recalc_inline()
3197 range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector); in integrity_recalc_inline()
3199 add_new_range_and_wait(ic, &range); in integrity_recalc_inline()
3200 spin_unlock_irq(&ic->endio_wait.lock); in integrity_recalc_inline()
3203 recalc_write_super(ic); in integrity_recalc_inline()
3207 if (unlikely(dm_integrity_failed(ic))) in integrity_recalc_inline()
3212 bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recalc_bios); in integrity_recalc_inline()
3213 bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector; in integrity_recalc_inline()
3218 dm_integrity_io_error(ic, "reading data", r); in integrity_recalc_inline()
3223 for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) { in integrity_recalc_inline()
3224 memset(t, 0, ic->tuple_size); in integrity_recalc_inline()
3225 integrity_sector_checksum(ic, range.logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t); in integrity_recalc_inline()
3226 t += ic->tuple_size; in integrity_recalc_inline()
3229 bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_WRITE, GFP_NOIO, &ic->recalc_bios); in integrity_recalc_inline()
3230 bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector; in integrity_recalc_inline()
3242 dm_integrity_io_error(ic, "attaching integrity tags", -ENOMEM); in integrity_recalc_inline()
3249 dm_integrity_io_error(ic, "writing data", r); in integrity_recalc_inline()
3254 spin_lock_irq(&ic->endio_wait.lock); in integrity_recalc_inline()
3255 remove_range_unlocked(ic, &range); in integrity_recalc_inline()
3258 smp_store_release(&ic->sb->recalc_sector, cpu_to_le64(range.logical_sector + range.n_sectors)); in integrity_recalc_inline()
3260 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors); in integrity_recalc_inline()
3265 remove_range(ic, &range); in integrity_recalc_inline()
3269 spin_unlock_irq(&ic->endio_wait.lock); in integrity_recalc_inline()
3271 recalc_write_super(ic); in integrity_recalc_inline()
3281 struct dm_integrity_c *ic = bbs->ic; in bitmap_block_work() local
3298 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, in bitmap_block_work()
3300 remove_range(ic, &dio->range); in bitmap_block_work()
3302 queue_work(ic->offload_wq, &dio->work); in bitmap_block_work()
3304 block_bitmap_op(ic, ic->journal, dio->range.logical_sector, in bitmap_block_work()
3313 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, in bitmap_block_work()
3320 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, in bitmap_block_work()
3323 remove_range(ic, &dio->range); in bitmap_block_work()
3325 queue_work(ic->offload_wq, &dio->work); in bitmap_block_work()
3328 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval); in bitmap_block_work()
3333 struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work); in bitmap_flush_work() local
3338 dm_integrity_flush_buffers(ic, false); in bitmap_flush_work()
3341 range.n_sectors = ic->provided_data_sectors; in bitmap_flush_work()
3343 spin_lock_irq(&ic->endio_wait.lock); in bitmap_flush_work()
3344 add_new_range_and_wait(ic, &range); in bitmap_flush_work()
3345 spin_unlock_irq(&ic->endio_wait.lock); in bitmap_flush_work()
3347 dm_integrity_flush_buffers(ic, true); in bitmap_flush_work()
3349 limit = ic->provided_data_sectors; in bitmap_flush_work()
3350 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { in bitmap_flush_work()
3351 limit = le64_to_cpu(ic->sb->recalc_sector) in bitmap_flush_work()
3352 >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit) in bitmap_flush_work()
3353 << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); in bitmap_flush_work()
3356 block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR); in bitmap_flush_work()
3357 block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR); in bitmap_flush_work()
3359 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, in bitmap_flush_work()
3360 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); in bitmap_flush_work()
3362 spin_lock_irq(&ic->endio_wait.lock); in bitmap_flush_work()
3363 remove_range_unlocked(ic, &range); in bitmap_flush_work()
3364 while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) { in bitmap_flush_work()
3366 spin_unlock_irq(&ic->endio_wait.lock); in bitmap_flush_work()
3367 spin_lock_irq(&ic->endio_wait.lock); in bitmap_flush_work()
3369 spin_unlock_irq(&ic->endio_wait.lock); in bitmap_flush_work()
3373 static void init_journal(struct dm_integrity_c *ic, unsigned int start_section, in init_journal() argument
3383 wraparound_section(ic, &i); in init_journal()
3384 for (j = 0; j < ic->journal_section_sectors; j++) { in init_journal()
3385 struct journal_sector *js = access_journal(ic, i, j); in init_journal()
3389 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq); in init_journal()
3391 for (j = 0; j < ic->journal_section_entries; j++) { in init_journal()
3392 struct journal_entry *je = access_journal_entry(ic, i, j); in init_journal()
3398 write_journal(ic, start_section, n_sections); in init_journal()
3401 static int find_commit_seq(struct dm_integrity_c *ic, unsigned int i, unsigned int j, commit_id_t i… in find_commit_seq() argument
3406 if (dm_integrity_commit_id(ic, i, j, k) == id) in find_commit_seq()
3409 dm_integrity_io_error(ic, "journal commit id", -EIO); in find_commit_seq()
3413 static void replay_journal(struct dm_integrity_c *ic) in replay_journal() argument
3423 if (ic->mode == 'R') in replay_journal()
3426 if (ic->journal_uptodate) in replay_journal()
3432 if (!ic->just_formatted) { in replay_journal()
3434 rw_journal(ic, REQ_OP_READ, 0, ic->journal_sections, NULL); in replay_journal()
3435 if (ic->journal_io) in replay_journal()
3436 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal"); in replay_journal()
3437 if (ic->journal_io) { in replay_journal()
3440 crypt_comp.ic = ic; in replay_journal()
3443 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp); in replay_journal()
3446 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal"); in replay_journal()
3449 if (dm_integrity_failed(ic)) in replay_journal()
3455 for (i = 0; i < ic->journal_sections; i++) { in replay_journal()
3456 for (j = 0; j < ic->journal_section_sectors; j++) { in replay_journal()
3458 struct journal_sector *js = access_journal(ic, i, j); in replay_journal()
3460 k = find_commit_seq(ic, i, j, js->commit_id); in replay_journal()
3467 for (j = 0; j < ic->journal_section_entries; j++) { in replay_journal()
3468 struct journal_entry *je = access_journal_entry(ic, i, j); in replay_journal()
3487 dm_integrity_io_error(ic, "journal commit ids", -EIO); in replay_journal()
3502 if (unlikely(write_start >= ic->journal_sections)) in replay_journal()
3504 wraparound_section(ic, &write_start); in replay_journal()
3507 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) { in replay_journal()
3508 for (j = 0; j < ic->journal_section_sectors; j++) { in replay_journal()
3509 struct journal_sector *js = access_journal(ic, i, j); in replay_journal()
3511 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) { in replay_journal()
3518 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq); in replay_journal()
3523 if (unlikely(i >= ic->journal_sections)) in replay_journal()
3525 wraparound_section(ic, &i); in replay_journal()
3532 do_journal_write(ic, write_start, write_sections, true); in replay_journal()
3535 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) { in replay_journal()
3537 ic->commit_seq = want_commit_seq; in replay_journal()
3538 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq); in replay_journal()
3548 init_journal(ic, s, 1, erase_seq); in replay_journal()
3550 wraparound_section(ic, &s); in replay_journal()
3551 if (ic->journal_sections >= 2) { in replay_journal()
3552 init_journal(ic, s, ic->journal_sections - 2, erase_seq); in replay_journal()
3553 s += ic->journal_sections - 2; in replay_journal()
3554 wraparound_section(ic, &s); in replay_journal()
3555 init_journal(ic, s, 1, erase_seq); in replay_journal()
3559 ic->commit_seq = next_commit_seq(erase_seq); in replay_journal()
3562 ic->committed_section = continue_section; in replay_journal()
3563 ic->n_committed_sections = 0; in replay_journal()
3565 ic->uncommitted_section = continue_section; in replay_journal()
3566 ic->n_uncommitted_sections = 0; in replay_journal()
3568 ic->free_section = continue_section; in replay_journal()
3569 ic->free_section_entry = 0; in replay_journal()
3570 ic->free_sectors = ic->journal_entries; in replay_journal()
3572 ic->journal_tree_root = RB_ROOT; in replay_journal()
3573 for (i = 0; i < ic->journal_entries; i++) in replay_journal()
3574 init_journal_node(&ic->journal_tree[i]); in replay_journal()
3577 static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic) in dm_integrity_enter_synchronous_mode() argument
3581 if (ic->mode == 'B') { in dm_integrity_enter_synchronous_mode()
3582 ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1; in dm_integrity_enter_synchronous_mode()
3583 ic->synchronous_mode = 1; in dm_integrity_enter_synchronous_mode()
3585 cancel_delayed_work_sync(&ic->bitmap_flush_work); in dm_integrity_enter_synchronous_mode()
3586 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); in dm_integrity_enter_synchronous_mode()
3587 flush_workqueue(ic->commit_wq); in dm_integrity_enter_synchronous_mode()
3593 struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier); in dm_integrity_reboot() local
3597 dm_integrity_enter_synchronous_mode(ic); in dm_integrity_reboot()
3604 struct dm_integrity_c *ic = ti->private; in dm_integrity_postsuspend() local
3607 WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier)); in dm_integrity_postsuspend()
3609 del_timer_sync(&ic->autocommit_timer); in dm_integrity_postsuspend()
3611 if (ic->recalc_wq) in dm_integrity_postsuspend()
3612 drain_workqueue(ic->recalc_wq); in dm_integrity_postsuspend()
3614 if (ic->mode == 'B') in dm_integrity_postsuspend()
3615 cancel_delayed_work_sync(&ic->bitmap_flush_work); in dm_integrity_postsuspend()
3617 queue_work(ic->commit_wq, &ic->commit_work); in dm_integrity_postsuspend()
3618 drain_workqueue(ic->commit_wq); in dm_integrity_postsuspend()
3620 if (ic->mode == 'J') { in dm_integrity_postsuspend()
3621 queue_work(ic->writer_wq, &ic->writer_work); in dm_integrity_postsuspend()
3622 drain_workqueue(ic->writer_wq); in dm_integrity_postsuspend()
3623 dm_integrity_flush_buffers(ic, true); in dm_integrity_postsuspend()
3624 if (ic->wrote_to_journal) { in dm_integrity_postsuspend()
3625 init_journal(ic, ic->free_section, in dm_integrity_postsuspend()
3626 ic->journal_sections - ic->free_section, ic->commit_seq); in dm_integrity_postsuspend()
3627 if (ic->free_section) { in dm_integrity_postsuspend()
3628 init_journal(ic, 0, ic->free_section, in dm_integrity_postsuspend()
3629 next_commit_seq(ic->commit_seq)); in dm_integrity_postsuspend()
3634 if (ic->mode == 'B') { in dm_integrity_postsuspend()
3635 dm_integrity_flush_buffers(ic, true); in dm_integrity_postsuspend()
3638 init_journal(ic, 0, ic->journal_sections, 0); in dm_integrity_postsuspend()
3639 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); in dm_integrity_postsuspend()
3640 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); in dm_integrity_postsuspend()
3642 dm_integrity_io_error(ic, "writing superblock", r); in dm_integrity_postsuspend()
3646 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); in dm_integrity_postsuspend()
3648 ic->journal_uptodate = true; in dm_integrity_postsuspend()
3653 struct dm_integrity_c *ic = ti->private; in dm_integrity_resume() local
3654 __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors); in dm_integrity_resume()
3659 ic->wrote_to_journal = false; in dm_integrity_resume()
3661 if (ic->provided_data_sectors != old_provided_data_sectors) { in dm_integrity_resume()
3662 if (ic->provided_data_sectors > old_provided_data_sectors && in dm_integrity_resume()
3663 ic->mode == 'B' && in dm_integrity_resume()
3664 ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) { in dm_integrity_resume()
3665 rw_journal_sectors(ic, REQ_OP_READ, 0, in dm_integrity_resume()
3666 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); in dm_integrity_resume()
3667 block_bitmap_op(ic, ic->journal, old_provided_data_sectors, in dm_integrity_resume()
3668 ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET); in dm_integrity_resume()
3669 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, in dm_integrity_resume()
3670 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); in dm_integrity_resume()
3673 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors); in dm_integrity_resume()
3674 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); in dm_integrity_resume()
3676 dm_integrity_io_error(ic, "writing superblock", r); in dm_integrity_resume()
3679 if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) { in dm_integrity_resume()
3681 rw_journal_sectors(ic, REQ_OP_READ, 0, in dm_integrity_resume()
3682 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); in dm_integrity_resume()
3683 if (ic->mode == 'B') { in dm_integrity_resume()
3684 if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit && in dm_integrity_resume()
3685 !ic->reset_recalculate_flag) { in dm_integrity_resume()
3686 block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal); in dm_integrity_resume()
3687 block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal); in dm_integrity_resume()
3688 if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, in dm_integrity_resume()
3690 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); in dm_integrity_resume()
3691 ic->sb->recalc_sector = cpu_to_le64(0); in dm_integrity_resume()
3695 ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit); in dm_integrity_resume()
3696 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; in dm_integrity_resume()
3697 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET); in dm_integrity_resume()
3698 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET); in dm_integrity_resume()
3699 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET); in dm_integrity_resume()
3700 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, in dm_integrity_resume()
3701 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); in dm_integrity_resume()
3702 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); in dm_integrity_resume()
3703 ic->sb->recalc_sector = cpu_to_le64(0); in dm_integrity_resume()
3706 if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit && in dm_integrity_resume()
3707 … block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) || in dm_integrity_resume()
3708 ic->reset_recalculate_flag) { in dm_integrity_resume()
3709 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); in dm_integrity_resume()
3710 ic->sb->recalc_sector = cpu_to_le64(0); in dm_integrity_resume()
3712 init_journal(ic, 0, ic->journal_sections, 0); in dm_integrity_resume()
3713 replay_journal(ic); in dm_integrity_resume()
3714 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); in dm_integrity_resume()
3716 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); in dm_integrity_resume()
3718 dm_integrity_io_error(ic, "writing superblock", r); in dm_integrity_resume()
3720 replay_journal(ic); in dm_integrity_resume()
3721 if (ic->reset_recalculate_flag) { in dm_integrity_resume()
3722 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); in dm_integrity_resume()
3723 ic->sb->recalc_sector = cpu_to_le64(0); in dm_integrity_resume()
3725 if (ic->mode == 'B') { in dm_integrity_resume()
3726 ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP); in dm_integrity_resume()
3727 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; in dm_integrity_resume()
3728 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); in dm_integrity_resume()
3730 dm_integrity_io_error(ic, "writing superblock", r); in dm_integrity_resume()
3732 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); in dm_integrity_resume()
3733 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); in dm_integrity_resume()
3734 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); in dm_integrity_resume()
3735 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && in dm_integrity_resume()
3736 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) { in dm_integrity_resume()
3737 block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector), in dm_integrity_resume()
3738 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); in dm_integrity_resume()
3739 block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector), in dm_integrity_resume()
3740 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); in dm_integrity_resume()
3741 block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector), in dm_integrity_resume()
3742 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); in dm_integrity_resume()
3744 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, in dm_integrity_resume()
3745 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); in dm_integrity_resume()
3749 DEBUG_print("testing recalc: %x\n", ic->sb->flags); in dm_integrity_resume()
3750 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { in dm_integrity_resume()
3751 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector); in dm_integrity_resume()
3753 DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors); in dm_integrity_resume()
3754 if (recalc_pos < ic->provided_data_sectors) { in dm_integrity_resume()
3755 queue_work(ic->recalc_wq, &ic->recalc_work); in dm_integrity_resume()
3756 } else if (recalc_pos > ic->provided_data_sectors) { in dm_integrity_resume()
3757 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors); in dm_integrity_resume()
3758 recalc_write_super(ic); in dm_integrity_resume()
3762 ic->reboot_notifier.notifier_call = dm_integrity_reboot; in dm_integrity_resume()
3763 ic->reboot_notifier.next = NULL; in dm_integrity_resume()
3764 ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */ in dm_integrity_resume()
3765 WARN_ON(register_reboot_notifier(&ic->reboot_notifier)); in dm_integrity_resume()
3769 dm_integrity_enter_synchronous_mode(ic); in dm_integrity_resume()
3776 struct dm_integrity_c *ic = ti->private; in dm_integrity_status() local
3783 (unsigned long long)atomic64_read(&ic->number_of_mismatches), in dm_integrity_status()
3784 ic->provided_data_sectors); in dm_integrity_status()
3785 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) in dm_integrity_status()
3786 DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector)); in dm_integrity_status()
3793 arg_count += !!ic->meta_dev; in dm_integrity_status()
3794 arg_count += ic->sectors_per_block != 1; in dm_integrity_status()
3795 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)); in dm_integrity_status()
3796 arg_count += ic->reset_recalculate_flag; in dm_integrity_status()
3797 arg_count += ic->discard; in dm_integrity_status()
3798 arg_count += ic->mode != 'I'; /* interleave_sectors */ in dm_integrity_status()
3799 arg_count += ic->mode == 'J'; /* journal_sectors */ in dm_integrity_status()
3800 arg_count += ic->mode == 'J'; /* journal_watermark */ in dm_integrity_status()
3801 arg_count += ic->mode == 'J'; /* commit_time */ in dm_integrity_status()
3802 arg_count += ic->mode == 'B'; /* sectors_per_bit */ in dm_integrity_status()
3803 arg_count += ic->mode == 'B'; /* bitmap_flush_interval */ in dm_integrity_status()
3804 arg_count += !!ic->internal_hash_alg.alg_string; in dm_integrity_status()
3805 arg_count += !!ic->journal_crypt_alg.alg_string; in dm_integrity_status()
3806 arg_count += !!ic->journal_mac_alg.alg_string; in dm_integrity_status()
3807 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0; in dm_integrity_status()
3808 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0; in dm_integrity_status()
3809 arg_count += ic->legacy_recalculate; in dm_integrity_status()
3810 DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start, in dm_integrity_status()
3811 ic->tag_size, ic->mode, arg_count); in dm_integrity_status()
3812 if (ic->meta_dev) in dm_integrity_status()
3813 DMEMIT(" meta_device:%s", ic->meta_dev->name); in dm_integrity_status()
3814 if (ic->sectors_per_block != 1) in dm_integrity_status()
3815 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT); in dm_integrity_status()
3816 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) in dm_integrity_status()
3818 if (ic->reset_recalculate_flag) in dm_integrity_status()
3820 if (ic->discard) in dm_integrity_status()
3822 if (ic->mode != 'I') in dm_integrity_status()
3823 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors); in dm_integrity_status()
3824 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors); in dm_integrity_status()
3825 if (ic->mode == 'J') { in dm_integrity_status()
3826 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100; in dm_integrity_status()
3828 watermark_percentage += ic->journal_entries / 2; in dm_integrity_status()
3829 do_div(watermark_percentage, ic->journal_entries); in dm_integrity_status()
3830 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS); in dm_integrity_status()
3832 DMEMIT(" commit_time:%u", ic->autocommit_msec); in dm_integrity_status()
3834 if (ic->mode == 'B') { in dm_integrity_status()
3835 …DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit); in dm_integrity_status()
3836 DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval)); in dm_integrity_status()
3838 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) in dm_integrity_status()
3840 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0) in dm_integrity_status()
3842 if (ic->legacy_recalculate) in dm_integrity_status()
3847 if (ic->a.alg_string) { \ in dm_integrity_status()
3848 DMEMIT(" %s:%s", n, ic->a.alg_string); \ in dm_integrity_status()
3849 if (ic->a.key_string) \ in dm_integrity_status()
3850 DMEMIT(":%s", ic->a.key_string);\ in dm_integrity_status()
3861 ic->dev->name, ic->start, ic->tag_size, ic->mode); in dm_integrity_status()
3863 if (ic->meta_dev) in dm_integrity_status()
3864 DMEMIT(",meta_device=%s", ic->meta_dev->name); in dm_integrity_status()
3865 if (ic->sectors_per_block != 1) in dm_integrity_status()
3866 DMEMIT(",block_size=%u", ic->sectors_per_block << SECTOR_SHIFT); in dm_integrity_status()
3868 DMEMIT(",recalculate=%c", (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) ? in dm_integrity_status()
3870 DMEMIT(",allow_discards=%c", ic->discard ? 'y' : 'n'); in dm_integrity_status()
3872 ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) ? 'y' : 'n'); in dm_integrity_status()
3874 ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0) ? 'y' : 'n'); in dm_integrity_status()
3875 DMEMIT(",legacy_recalculate=%c", ic->legacy_recalculate ? 'y' : 'n'); in dm_integrity_status()
3877 DMEMIT(",journal_sectors=%u", ic->initial_sectors - SB_SECTORS); in dm_integrity_status()
3878 DMEMIT(",interleave_sectors=%u", 1U << ic->sb->log2_interleave_sectors); in dm_integrity_status()
3879 DMEMIT(",buffer_sectors=%u", 1U << ic->log2_buffer_sectors); in dm_integrity_status()
3888 struct dm_integrity_c *ic = ti->private; in dm_integrity_iterate_devices() local
3890 if (!ic->meta_dev) in dm_integrity_iterate_devices()
3891 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data); in dm_integrity_iterate_devices()
3893 return fn(ti, ic->dev, 0, ti->len, data); in dm_integrity_iterate_devices()
3898 struct dm_integrity_c *ic = ti->private; in dm_integrity_io_hints() local
3900 if (ic->sectors_per_block > 1) { in dm_integrity_io_hints()
3901 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT; in dm_integrity_io_hints()
3902 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT; in dm_integrity_io_hints()
3903 limits->io_min = ic->sectors_per_block << SECTOR_SHIFT; in dm_integrity_io_hints()
3905 limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT; in dm_integrity_io_hints()
3908 if (!ic->internal_hash) { in dm_integrity_io_hints()
3912 bi->tuple_size = ic->tag_size; in dm_integrity_io_hints()
3915 ic->sb->log2_sectors_per_block + SECTOR_SHIFT; in dm_integrity_io_hints()
3921 static void calculate_journal_section_size(struct dm_integrity_c *ic) in calculate_journal_section_size() argument
3925 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections); in calculate_journal_section_size()
3926ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block])… in calculate_journal_section_size()
3929 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) in calculate_journal_section_size()
3931 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size; in calculate_journal_section_size()
3932 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS; in calculate_journal_section_size()
3933ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JO… in calculate_journal_section_size()
3934 ic->journal_entries = ic->journal_section_entries * ic->journal_sections; in calculate_journal_section_size()
3937 static int calculate_device_limits(struct dm_integrity_c *ic) in calculate_device_limits() argument
3941 calculate_journal_section_size(ic); in calculate_device_limits()
3942 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections; in calculate_device_limits()
3943 …if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UIN… in calculate_device_limits()
3945 ic->initial_sectors = initial_sectors; in calculate_device_limits()
3947 if (ic->mode == 'I') { in calculate_device_limits()
3948 if (ic->initial_sectors + ic->provided_data_sectors > ic->meta_device_sectors) in calculate_device_limits()
3950 } else if (!ic->meta_dev) { in calculate_device_limits()
3955 ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ? in calculate_device_limits()
3959ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2… in calculate_device_limits()
3961 if (!(ic->metadata_run & (ic->metadata_run - 1))) in calculate_device_limits()
3962 ic->log2_metadata_run = __ffs(ic->metadata_run); in calculate_device_limits()
3964 ic->log2_metadata_run = -1; in calculate_device_limits()
3966 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset); in calculate_device_limits()
3967 last_sector = get_data_sector(ic, last_area, last_offset); in calculate_device_limits()
3968 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors) in calculate_device_limits()
3971 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size; in calculate_device_limits()
3973 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1)) in calculate_device_limits()
3974 >> (ic->log2_buffer_sectors + SECTOR_SHIFT); in calculate_device_limits()
3975 meta_size <<= ic->log2_buffer_sectors; in calculate_device_limits()
3976 if (ic->initial_sectors + meta_size < ic->initial_sectors || in calculate_device_limits()
3977 ic->initial_sectors + meta_size > ic->meta_device_sectors) in calculate_device_limits()
3979 ic->metadata_run = 1; in calculate_device_limits()
3980 ic->log2_metadata_run = 0; in calculate_device_limits()
3986 static void get_provided_data_sectors(struct dm_integrity_c *ic) in get_provided_data_sectors() argument
3988 if (!ic->meta_dev) { in get_provided_data_sectors()
3991 ic->provided_data_sectors = 0; in get_provided_data_sectors()
3992 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) { in get_provided_data_sectors()
3993 __u64 prev_data_sectors = ic->provided_data_sectors; in get_provided_data_sectors()
3995 ic->provided_data_sectors |= (sector_t)1 << test_bit; in get_provided_data_sectors()
3996 if (calculate_device_limits(ic)) in get_provided_data_sectors()
3997 ic->provided_data_sectors = prev_data_sectors; in get_provided_data_sectors()
4000 ic->provided_data_sectors = ic->data_device_sectors; in get_provided_data_sectors()
4001 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1); in get_provided_data_sectors()
4005 static int initialize_superblock(struct dm_integrity_c *ic, in initialize_superblock() argument
4011 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT); in initialize_superblock()
4012 memcpy(ic->sb->magic, SB_MAGIC, 8); in initialize_superblock()
4013 if (ic->mode == 'I') in initialize_superblock()
4014 ic->sb->flags |= cpu_to_le32(SB_FLAG_INLINE); in initialize_superblock()
4015 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size); in initialize_superblock()
4016 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block); in initialize_superblock()
4017 if (ic->journal_mac_alg.alg_string) in initialize_superblock()
4018 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC); in initialize_superblock()
4020 calculate_journal_section_size(ic); in initialize_superblock()
4021 journal_sections = journal_sectors / ic->journal_section_sectors; in initialize_superblock()
4024 if (ic->mode == 'I') in initialize_superblock()
4027 if (ic->fix_hmac && (ic->internal_hash_alg.alg_string || ic->journal_mac_alg.alg_string)) { in initialize_superblock()
4028 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_HMAC); in initialize_superblock()
4029 get_random_bytes(ic->sb->salt, SALT_SIZE); in initialize_superblock()
4032 if (!ic->meta_dev) { in initialize_superblock()
4033 if (ic->fix_padding) in initialize_superblock()
4034 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING); in initialize_superblock()
4035 ic->sb->journal_sections = cpu_to_le32(journal_sections); in initialize_superblock()
4038 ic->sb->log2_interleave_sectors = __fls(interleave_sectors); in initialize_superblock()
4039ic->sb->log2_interleave_sectors = max_t(__u8, MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave… in initialize_superblock()
4040ic->sb->log2_interleave_sectors = min_t(__u8, MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave… in initialize_superblock()
4042 get_provided_data_sectors(ic); in initialize_superblock()
4043 if (!ic->provided_data_sectors) in initialize_superblock()
4046 ic->sb->log2_interleave_sectors = 0; in initialize_superblock()
4048 get_provided_data_sectors(ic); in initialize_superblock()
4049 if (!ic->provided_data_sectors) in initialize_superblock()
4053 ic->sb->journal_sections = cpu_to_le32(0); in initialize_superblock()
4055 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections); in initialize_superblock()
4060 ic->sb->journal_sections = cpu_to_le32(test_journal_sections); in initialize_superblock()
4061 if (calculate_device_limits(ic)) in initialize_superblock()
4062 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections); in initialize_superblock()
4065 if (!le32_to_cpu(ic->sb->journal_sections)) { in initialize_superblock()
4066 if (ic->log2_buffer_sectors > 3) { in initialize_superblock()
4067 ic->log2_buffer_sectors--; in initialize_superblock()
4074 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors); in initialize_superblock()
4076 sb_set_version(ic); in initialize_superblock()
4116 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **s… in dm_integrity_free_journal_scatterlist() argument
4120 for (i = 0; i < ic->journal_sections; i++) in dm_integrity_free_journal_scatterlist()
4125 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, in dm_integrity_alloc_journal_scatterlist() argument
4131 sl = kvmalloc_array(ic->journal_sections, in dm_integrity_alloc_journal_scatterlist()
4137 for (i = 0; i < ic->journal_sections; i++) { in dm_integrity_alloc_journal_scatterlist()
4144 page_list_location(ic, i, 0, &start_index, &start_offset); in dm_integrity_alloc_journal_scatterlist()
4145 page_list_location(ic, i, ic->journal_section_sectors - 1, in dm_integrity_alloc_journal_scatterlist()
4153 dm_integrity_free_journal_scatterlist(ic, sl); in dm_integrity_alloc_journal_scatterlist()
4245 static int create_journal(struct dm_integrity_c *ic, char **error) in create_journal() argument
4253 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); in create_journal()
4254 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); in create_journal()
4255 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL); in create_journal()
4256 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL); in create_journal()
4258 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors, in create_journal()
4266 ic->journal_pages = journal_pages; in create_journal()
4268 ic->journal = dm_integrity_alloc_page_list(ic->journal_pages); in create_journal()
4269 if (!ic->journal) { in create_journal()
4274 if (ic->journal_crypt_alg.alg_string) { in create_journal()
4278 comp.ic = ic; in create_journal()
4279ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATE… in create_journal()
4280 if (IS_ERR(ic->journal_crypt)) { in create_journal()
4282 r = PTR_ERR(ic->journal_crypt); in create_journal()
4283 ic->journal_crypt = NULL; in create_journal()
4286 ivsize = crypto_skcipher_ivsize(ic->journal_crypt); in create_journal()
4287 blocksize = crypto_skcipher_blocksize(ic->journal_crypt); in create_journal()
4289 if (ic->journal_crypt_alg.key) { in create_journal()
4290 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key, in create_journal()
4291 ic->journal_crypt_alg.key_size); in create_journal()
4298 ic->journal_crypt_alg.alg_string, blocksize, ivsize); in create_journal()
4300 ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages); in create_journal()
4301 if (!ic->journal_io) { in create_journal()
4310 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); in create_journal()
4324 ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages); in create_journal()
4325 if (!ic->journal_xor) { in create_journal()
4331 sg = kvmalloc_array(ic->journal_pages + 1, in create_journal()
4339 sg_init_table(sg, ic->journal_pages + 1); in create_journal()
4340 for (i = 0; i < ic->journal_pages; i++) { in create_journal()
4341 char *va = lowmem_page_address(ic->journal_xor[i].page); in create_journal()
4346 sg_set_buf(&sg[i], &ic->commit_ids, sizeof(ic->commit_ids)); in create_journal()
4349 PAGE_SIZE * ic->journal_pages + sizeof(ic->commit_ids), crypt_iv); in create_journal()
4355 r = dm_integrity_failed(ic); in create_journal()
4360 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data"); in create_journal()
4362 crypto_free_skcipher(ic->journal_crypt); in create_journal()
4363 ic->journal_crypt = NULL; in create_journal()
4367 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); in create_journal()
4388 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); in create_journal()
4389 if (!ic->journal_scatterlist) { in create_journal()
4394 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io); in create_journal()
4395 if (!ic->journal_io_scatterlist) { in create_journal()
4400 ic->sk_requests = kvmalloc_array(ic->journal_sections, in create_journal()
4403 if (!ic->sk_requests) { in create_journal()
4408 for (i = 0; i < ic->journal_sections; i++) { in create_journal()
4424 r = dm_integrity_failed(ic); in create_journal()
4430 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); in create_journal()
4445 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT; in create_journal()
4446 ic->sk_requests[i] = section_req; in create_journal()
4457 if (ic->commit_ids[j] == ic->commit_ids[i]) { in create_journal()
4458 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1); in create_journal()
4462 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]); in create_journal()
4465 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node); in create_journal()
4471 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL); in create_journal()
4472 if (!ic->journal_tree) { in create_journal()
4510 struct dm_integrity_c *ic; in dm_integrity_ctr() local
4534 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL); in dm_integrity_ctr()
4535 if (!ic) { in dm_integrity_ctr()
4539 ti->private = ic; in dm_integrity_ctr()
4541 ic->ti = ti; in dm_integrity_ctr()
4543 ic->in_progress = RB_ROOT; in dm_integrity_ctr()
4544 INIT_LIST_HEAD(&ic->wait_list); in dm_integrity_ctr()
4545 init_waitqueue_head(&ic->endio_wait); in dm_integrity_ctr()
4546 bio_list_init(&ic->flush_bio_list); in dm_integrity_ctr()
4547 init_waitqueue_head(&ic->copy_to_journal_wait); in dm_integrity_ctr()
4548 init_completion(&ic->crypto_backoff); in dm_integrity_ctr()
4549 atomic64_set(&ic->number_of_mismatches, 0); in dm_integrity_ctr()
4550 ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL; in dm_integrity_ctr()
4552 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); in dm_integrity_ctr()
4563 ic->start = start; in dm_integrity_ctr()
4566 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) { in dm_integrity_ctr()
4576 ic->mode = argv[3][0]; in dm_integrity_ctr()
4588 ic->sectors_per_block = 1; in dm_integrity_ctr()
4618 if (ic->meta_dev) { in dm_integrity_ctr()
4619 dm_put_device(ti, ic->meta_dev); in dm_integrity_ctr()
4620 ic->meta_dev = NULL; in dm_integrity_ctr()
4623 dm_table_get_mode(ti->table), &ic->meta_dev); in dm_integrity_ctr()
4636 ic->sectors_per_block = val >> SECTOR_SHIFT; in dm_integrity_ctr()
4645 ic->bitmap_flush_interval = msecs_to_jiffies(val); in dm_integrity_ctr()
4647 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, in dm_integrity_ctr()
4652 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, in dm_integrity_ctr()
4657 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, in dm_integrity_ctr()
4662 ic->recalculate_flag = true; in dm_integrity_ctr()
4664 ic->recalculate_flag = true; in dm_integrity_ctr()
4665 ic->reset_recalculate_flag = true; in dm_integrity_ctr()
4667 ic->discard = true; in dm_integrity_ctr()
4669 ic->fix_padding = true; in dm_integrity_ctr()
4671 ic->fix_hmac = true; in dm_integrity_ctr()
4673 ic->legacy_recalculate = true; in dm_integrity_ctr()
4681 ic->data_device_sectors = bdev_nr_sectors(ic->dev->bdev); in dm_integrity_ctr()
4682 if (!ic->meta_dev) in dm_integrity_ctr()
4683 ic->meta_device_sectors = ic->data_device_sectors; in dm_integrity_ctr()
4685 ic->meta_device_sectors = bdev_nr_sectors(ic->meta_dev->bdev); in dm_integrity_ctr()
4689 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR); in dm_integrity_ctr()
4694 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT); in dm_integrity_ctr()
4696 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error, in dm_integrity_ctr()
4701 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error, in dm_integrity_ctr()
4706 if (!ic->tag_size) { in dm_integrity_ctr()
4707 if (!ic->internal_hash) { in dm_integrity_ctr()
4712 ic->tag_size = crypto_shash_digestsize(ic->internal_hash); in dm_integrity_ctr()
4714 if (ic->tag_size > MAX_TAG_SIZE) { in dm_integrity_ctr()
4719 if (!(ic->tag_size & (ic->tag_size - 1))) in dm_integrity_ctr()
4720 ic->log2_tag_size = __ffs(ic->tag_size); in dm_integrity_ctr()
4722 ic->log2_tag_size = -1; in dm_integrity_ctr()
4724 if (ic->mode == 'I') { in dm_integrity_ctr()
4726 if (ic->meta_dev) { in dm_integrity_ctr()
4731 if (!ic->internal_hash_alg.alg_string) { in dm_integrity_ctr()
4736 if (ic->journal_crypt_alg.alg_string || ic->journal_mac_alg.alg_string) { in dm_integrity_ctr()
4741 if (ic->discard) { in dm_integrity_ctr()
4746 bi = blk_get_integrity(ic->dev->bdev->bd_disk); in dm_integrity_ctr()
4753 if (bi->tuple_size < ic->tag_size) { in dm_integrity_ctr()
4763 ic->tuple_size = bi->tuple_size; in dm_integrity_ctr()
4764 if (1 << bi->interval_exp != ic->sectors_per_block << SECTOR_SHIFT) { in dm_integrity_ctr()
4771 if (ic->mode == 'B' && !ic->internal_hash) { in dm_integrity_ctr()
4777 if (ic->discard && !ic->internal_hash) { in dm_integrity_ctr()
4783 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec); in dm_integrity_ctr()
4784 ic->autocommit_msec = sync_msec; in dm_integrity_ctr()
4785 timer_setup(&ic->autocommit_timer, autocommit_fn, 0); in dm_integrity_ctr()
4787 ic->io = dm_io_client_create(); in dm_integrity_ctr()
4788 if (IS_ERR(ic->io)) { in dm_integrity_ctr()
4789 r = PTR_ERR(ic->io); in dm_integrity_ctr()
4790 ic->io = NULL; in dm_integrity_ctr()
4795 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache); in dm_integrity_ctr()
4801 r = mempool_init_page_pool(&ic->recheck_pool, 1, ic->mode == 'I' ? 1 : 0); in dm_integrity_ctr()
4807 if (ic->mode == 'I') { in dm_integrity_ctr()
4808 r = bioset_init(&ic->recheck_bios, RECHECK_POOL_SIZE, 0, BIOSET_NEED_BVECS); in dm_integrity_ctr()
4813 r = bioset_integrity_create(&ic->recheck_bios, RECHECK_POOL_SIZE); in dm_integrity_ctr()
4819 r = bioset_init(&ic->recalc_bios, 1, 0, BIOSET_NEED_BVECS); in dm_integrity_ctr()
4824 r = bioset_integrity_create(&ic->recalc_bios, 1); in dm_integrity_ctr()
4832 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata", in dm_integrity_ctr()
4834 if (!ic->metadata_wq) { in dm_integrity_ctr()
4844 ic->wait_wq = alloc_ordered_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM); in dm_integrity_ctr()
4845 if (!ic->wait_wq) { in dm_integrity_ctr()
4851 ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM, in dm_integrity_ctr()
4853 if (!ic->offload_wq) { in dm_integrity_ctr()
4859 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1); in dm_integrity_ctr()
4860 if (!ic->commit_wq) { in dm_integrity_ctr()
4865 INIT_WORK(&ic->commit_work, integrity_commit); in dm_integrity_ctr()
4867 if (ic->mode == 'J' || ic->mode == 'B') { in dm_integrity_ctr()
4868 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1); in dm_integrity_ctr()
4869 if (!ic->writer_wq) { in dm_integrity_ctr()
4874 INIT_WORK(&ic->writer_work, integrity_writer); in dm_integrity_ctr()
4877 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL); in dm_integrity_ctr()
4878 if (!ic->sb) { in dm_integrity_ctr()
4884 r = sync_rw_sb(ic, REQ_OP_READ); in dm_integrity_ctr()
4890 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) { in dm_integrity_ctr()
4891 if (ic->mode != 'R') { in dm_integrity_ctr()
4892 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) { in dm_integrity_ctr()
4899 r = initialize_superblock(ic, journal_sectors, interleave_sectors); in dm_integrity_ctr()
4904 if (ic->mode != 'R') in dm_integrity_ctr()
4908 if (!ic->sb->version || ic->sb->version > SB_VERSION_6) { in dm_integrity_ctr()
4913 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_INLINE)) != (ic->mode == 'I')) { in dm_integrity_ctr()
4918 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) { in dm_integrity_ctr()
4923 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) { in dm_integrity_ctr()
4928 if (ic->mode != 'I') { in dm_integrity_ctr()
4929 if (!le32_to_cpu(ic->sb->journal_sections)) { in dm_integrity_ctr()
4935 if (le32_to_cpu(ic->sb->journal_sections)) { in dm_integrity_ctr()
4942 if (!ic->meta_dev) { in dm_integrity_ctr()
4943 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || in dm_integrity_ctr()
4944 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { in dm_integrity_ctr()
4950 if (ic->sb->log2_interleave_sectors) { in dm_integrity_ctr()
4956 …if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string)… in dm_integrity_ctr()
4962 get_provided_data_sectors(ic); in dm_integrity_ctr()
4963 if (!ic->provided_data_sectors) { in dm_integrity_ctr()
4970 r = calculate_device_limits(ic); in dm_integrity_ctr()
4972 if (ic->meta_dev) { in dm_integrity_ctr()
4973 if (ic->log2_buffer_sectors > 3) { in dm_integrity_ctr()
4974 ic->log2_buffer_sectors--; in dm_integrity_ctr()
4984 if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block) in dm_integrity_ctr()
4985 log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block; in dm_integrity_ctr()
4987 …bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3… in dm_integrity_ctr()
4991 …while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit)… in dm_integrity_ctr()
4994 log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block; in dm_integrity_ctr()
4995 ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit; in dm_integrity_ctr()
4997 ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit; in dm_integrity_ctr()
4999 n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) in dm_integrity_ctr()
5001 ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8); in dm_integrity_ctr()
5003 if (!ic->meta_dev) in dm_integrity_ctr()
5004 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run)); in dm_integrity_ctr()
5006 if (ti->len > ic->provided_data_sectors) { in dm_integrity_ctr()
5012 threshold = (__u64)ic->journal_entries * (100 - journal_watermark); in dm_integrity_ctr()
5015 ic->free_sectors_threshold = threshold; in dm_integrity_ctr()
5018 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size)); in dm_integrity_ctr()
5019 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size); in dm_integrity_ctr()
5020 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector); in dm_integrity_ctr()
5021 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries); in dm_integrity_ctr()
5022 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors); in dm_integrity_ctr()
5023 DEBUG_print(" journal_sections %u\n", (unsigned int)le32_to_cpu(ic->sb->journal_sections)); in dm_integrity_ctr()
5024 DEBUG_print(" journal_entries %u\n", ic->journal_entries); in dm_integrity_ctr()
5025 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors); in dm_integrity_ctr()
5026 DEBUG_print(" data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev)); in dm_integrity_ctr()
5027 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors); in dm_integrity_ctr()
5028 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run); in dm_integrity_ctr()
5029 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run); in dm_integrity_ctr()
5030 …DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data… in dm_integrity_ctr()
5031 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors); in dm_integrity_ctr()
5034 if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) { in dm_integrity_ctr()
5035 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); in dm_integrity_ctr()
5036 ic->sb->recalc_sector = cpu_to_le64(0); in dm_integrity_ctr()
5039 if (ic->internal_hash) { in dm_integrity_ctr()
5040 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1); in dm_integrity_ctr()
5041 if (!ic->recalc_wq) { in dm_integrity_ctr()
5046 INIT_WORK(&ic->recalc_work, ic->mode == 'I' ? integrity_recalc_inline : integrity_recalc); in dm_integrity_ctr()
5048 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { in dm_integrity_ctr()
5055 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && in dm_integrity_ctr()
5056 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors && in dm_integrity_ctr()
5057 dm_integrity_disable_recalculate(ic)) { in dm_integrity_ctr()
5063 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev, in dm_integrity_ctr()
5064 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0); in dm_integrity_ctr()
5065 if (IS_ERR(ic->bufio)) { in dm_integrity_ctr()
5066 r = PTR_ERR(ic->bufio); in dm_integrity_ctr()
5068 ic->bufio = NULL; in dm_integrity_ctr()
5071 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors); in dm_integrity_ctr()
5073 if (ic->mode != 'R' && ic->mode != 'I') { in dm_integrity_ctr()
5074 r = create_journal(ic, &ti->error); in dm_integrity_ctr()
5080 if (ic->mode == 'B') { in dm_integrity_ctr()
5082 unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE); in dm_integrity_ctr()
5084 ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages); in dm_integrity_ctr()
5085 if (!ic->recalc_bitmap) { in dm_integrity_ctr()
5090 ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages); in dm_integrity_ctr()
5091 if (!ic->may_write_bitmap) { in dm_integrity_ctr()
5096 ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL); in dm_integrity_ctr()
5097 if (!ic->bbs) { in dm_integrity_ctr()
5102 INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work); in dm_integrity_ctr()
5103 for (i = 0; i < ic->n_bitmap_blocks; i++) { in dm_integrity_ctr()
5104 struct bitmap_block_status *bbs = &ic->bbs[i]; in dm_integrity_ctr()
5108 bbs->ic = ic; in dm_integrity_ctr()
5117 bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset; in dm_integrity_ctr()
5122 init_journal(ic, 0, ic->journal_sections, 0); in dm_integrity_ctr()
5123 r = dm_integrity_failed(ic); in dm_integrity_ctr()
5128 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); in dm_integrity_ctr()
5133 ic->just_formatted = true; in dm_integrity_ctr()
5136 if (!ic->meta_dev && ic->mode != 'I') { in dm_integrity_ctr()
5137 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors); in dm_integrity_ctr()
5141 if (ic->mode == 'B') { in dm_integrity_ctr()
5144 …max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_S… in dm_integrity_ctr()
5157 if (ic->discard) in dm_integrity_ctr()
5160 if (ic->mode == 'I') in dm_integrity_ctr()
5174 struct dm_integrity_c *ic = ti->private; in dm_integrity_dtr() local
5176 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); in dm_integrity_dtr()
5177 BUG_ON(!list_empty(&ic->wait_list)); in dm_integrity_dtr()
5179 if (ic->mode == 'B') in dm_integrity_dtr()
5180 cancel_delayed_work_sync(&ic->bitmap_flush_work); in dm_integrity_dtr()
5181 if (ic->metadata_wq) in dm_integrity_dtr()
5182 destroy_workqueue(ic->metadata_wq); in dm_integrity_dtr()
5183 if (ic->wait_wq) in dm_integrity_dtr()
5184 destroy_workqueue(ic->wait_wq); in dm_integrity_dtr()
5185 if (ic->offload_wq) in dm_integrity_dtr()
5186 destroy_workqueue(ic->offload_wq); in dm_integrity_dtr()
5187 if (ic->commit_wq) in dm_integrity_dtr()
5188 destroy_workqueue(ic->commit_wq); in dm_integrity_dtr()
5189 if (ic->writer_wq) in dm_integrity_dtr()
5190 destroy_workqueue(ic->writer_wq); in dm_integrity_dtr()
5191 if (ic->recalc_wq) in dm_integrity_dtr()
5192 destroy_workqueue(ic->recalc_wq); in dm_integrity_dtr()
5193 kvfree(ic->bbs); in dm_integrity_dtr()
5194 if (ic->bufio) in dm_integrity_dtr()
5195 dm_bufio_client_destroy(ic->bufio); in dm_integrity_dtr()
5196 bioset_exit(&ic->recalc_bios); in dm_integrity_dtr()
5197 bioset_exit(&ic->recheck_bios); in dm_integrity_dtr()
5198 mempool_exit(&ic->recheck_pool); in dm_integrity_dtr()
5199 mempool_exit(&ic->journal_io_mempool); in dm_integrity_dtr()
5200 if (ic->io) in dm_integrity_dtr()
5201 dm_io_client_destroy(ic->io); in dm_integrity_dtr()
5202 if (ic->dev) in dm_integrity_dtr()
5203 dm_put_device(ti, ic->dev); in dm_integrity_dtr()
5204 if (ic->meta_dev) in dm_integrity_dtr()
5205 dm_put_device(ti, ic->meta_dev); in dm_integrity_dtr()
5206 dm_integrity_free_page_list(ic->journal); in dm_integrity_dtr()
5207 dm_integrity_free_page_list(ic->journal_io); in dm_integrity_dtr()
5208 dm_integrity_free_page_list(ic->journal_xor); in dm_integrity_dtr()
5209 dm_integrity_free_page_list(ic->recalc_bitmap); in dm_integrity_dtr()
5210 dm_integrity_free_page_list(ic->may_write_bitmap); in dm_integrity_dtr()
5211 if (ic->journal_scatterlist) in dm_integrity_dtr()
5212 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist); in dm_integrity_dtr()
5213 if (ic->journal_io_scatterlist) in dm_integrity_dtr()
5214 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist); in dm_integrity_dtr()
5215 if (ic->sk_requests) { in dm_integrity_dtr()
5218 for (i = 0; i < ic->journal_sections; i++) { in dm_integrity_dtr()
5221 req = ic->sk_requests[i]; in dm_integrity_dtr()
5227 kvfree(ic->sk_requests); in dm_integrity_dtr()
5229 kvfree(ic->journal_tree); in dm_integrity_dtr()
5230 if (ic->sb) in dm_integrity_dtr()
5231 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT); in dm_integrity_dtr()
5233 if (ic->internal_hash) in dm_integrity_dtr()
5234 crypto_free_shash(ic->internal_hash); in dm_integrity_dtr()
5235 free_alg(&ic->internal_hash_alg); in dm_integrity_dtr()
5237 if (ic->journal_crypt) in dm_integrity_dtr()
5238 crypto_free_skcipher(ic->journal_crypt); in dm_integrity_dtr()
5239 free_alg(&ic->journal_crypt_alg); in dm_integrity_dtr()
5241 if (ic->journal_mac) in dm_integrity_dtr()
5242 crypto_free_shash(ic->journal_mac); in dm_integrity_dtr()
5243 free_alg(&ic->journal_mac_alg); in dm_integrity_dtr()
5245 kfree(ic); in dm_integrity_dtr()