Lines Matching full:sh

26  * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
120 static inline int raid6_d0(struct stripe_head *sh) in raid6_d0() argument
122 if (sh->ddf_layout) in raid6_d0()
126 if (sh->qd_idx == sh->disks - 1) in raid6_d0()
129 return sh->qd_idx + 1; in raid6_d0()
142 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, in raid6_idx_to_slot() argument
147 if (sh->ddf_layout) in raid6_idx_to_slot()
149 if (idx == sh->pd_idx) in raid6_idx_to_slot()
151 if (idx == sh->qd_idx) in raid6_idx_to_slot()
153 if (!sh->ddf_layout) in raid6_idx_to_slot()
160 static int stripe_operations_active(struct stripe_head *sh) in stripe_operations_active() argument
162 return sh->check_state || sh->reconstruct_state || in stripe_operations_active()
163 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || in stripe_operations_active()
164 test_bit(STRIPE_COMPUTE_RUN, &sh->state); in stripe_operations_active()
167 static bool stripe_is_lowprio(struct stripe_head *sh) in stripe_is_lowprio() argument
169 return (test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) || in stripe_is_lowprio()
170 test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) && in stripe_is_lowprio()
171 !test_bit(STRIPE_R5C_CACHING, &sh->state); in stripe_is_lowprio()
174 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) in raid5_wakeup_stripe_thread() argument
175 __must_hold(&sh->raid_conf->device_lock) in raid5_wakeup_stripe_thread()
177 struct r5conf *conf = sh->raid_conf; in raid5_wakeup_stripe_thread()
180 int i, cpu = sh->cpu; in raid5_wakeup_stripe_thread()
184 sh->cpu = cpu; in raid5_wakeup_stripe_thread()
187 if (list_empty(&sh->lru)) { in raid5_wakeup_stripe_thread()
190 if (stripe_is_lowprio(sh)) in raid5_wakeup_stripe_thread()
191 list_add_tail(&sh->lru, &group->loprio_list); in raid5_wakeup_stripe_thread()
193 list_add_tail(&sh->lru, &group->handle_list); in raid5_wakeup_stripe_thread()
195 sh->group = group; in raid5_wakeup_stripe_thread()
203 group = conf->worker_groups + cpu_to_group(sh->cpu); in raid5_wakeup_stripe_thread()
207 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); in raid5_wakeup_stripe_thread()
214 queue_work_on(sh->cpu, raid5_wq, in raid5_wakeup_stripe_thread()
221 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, in do_release_stripe() argument
228 BUG_ON(!list_empty(&sh->lru)); in do_release_stripe()
232 for (i = sh->disks; i--; ) in do_release_stripe()
233 if (test_bit(R5_InJournal, &sh->dev[i].flags)) in do_release_stripe()
242 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) || in do_release_stripe()
244 !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) { in do_release_stripe()
245 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) in do_release_stripe()
246 r5c_make_stripe_write_out(sh); in do_release_stripe()
247 set_bit(STRIPE_HANDLE, &sh->state); in do_release_stripe()
250 if (test_bit(STRIPE_HANDLE, &sh->state)) { in do_release_stripe()
251 if (test_bit(STRIPE_DELAYED, &sh->state) && in do_release_stripe()
252 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in do_release_stripe()
253 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe()
254 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && in do_release_stripe()
255 sh->bm_seq - conf->seq_write > 0) in do_release_stripe()
256 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe()
258 clear_bit(STRIPE_DELAYED, &sh->state); in do_release_stripe()
259 clear_bit(STRIPE_BIT_DELAY, &sh->state); in do_release_stripe()
261 if (stripe_is_lowprio(sh)) in do_release_stripe()
262 list_add_tail(&sh->lru, in do_release_stripe()
265 list_add_tail(&sh->lru, in do_release_stripe()
268 raid5_wakeup_stripe_thread(sh); in do_release_stripe()
274 BUG_ON(stripe_operations_active(sh)); in do_release_stripe()
275 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in do_release_stripe()
280 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { in do_release_stripe()
282 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe()
284 WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); in do_release_stripe()
286 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe()
289 if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) in do_release_stripe()
291 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) in do_release_stripe()
293 list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); in do_release_stripe()
301 list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); in do_release_stripe()
307 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, in __release_stripe() argument
311 if (atomic_dec_and_test(&sh->count)) in __release_stripe()
312 do_release_stripe(conf, sh, temp_inactive_list); in __release_stripe()
368 struct stripe_head *sh, *t; in release_stripe_list() local
374 llist_for_each_entry_safe(sh, t, head, release_list) { in release_stripe_list()
377 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ in release_stripe_list()
379 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); in release_stripe_list()
385 hash = sh->hash_lock_index; in release_stripe_list()
386 __release_stripe(conf, sh, &temp_inactive_list[hash]); in release_stripe_list()
393 void raid5_release_stripe(struct stripe_head *sh) in raid5_release_stripe() argument
395 struct r5conf *conf = sh->raid_conf; in raid5_release_stripe()
403 if (atomic_add_unless(&sh->count, -1, 1)) in raid5_release_stripe()
407 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) in raid5_release_stripe()
409 wakeup = llist_add(&sh->release_list, &conf->released_stripes); in raid5_release_stripe()
415 if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) { in raid5_release_stripe()
417 hash = sh->hash_lock_index; in raid5_release_stripe()
418 do_release_stripe(conf, sh, &list); in raid5_release_stripe()
424 static inline void remove_hash(struct stripe_head *sh) in remove_hash() argument
427 (unsigned long long)sh->sector); in remove_hash()
429 hlist_del_init(&sh->hash); in remove_hash()
432 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) in insert_hash() argument
434 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
437 (unsigned long long)sh->sector); in insert_hash()
439 hlist_add_head(&sh->hash, hp); in insert_hash()
445 struct stripe_head *sh = NULL; in get_free_stripe() local
451 sh = list_entry(first, struct stripe_head, lru); in get_free_stripe()
453 remove_hash(sh); in get_free_stripe()
455 BUG_ON(hash != sh->hash_lock_index); in get_free_stripe()
459 return sh; in get_free_stripe()
463 static void free_stripe_pages(struct stripe_head *sh) in free_stripe_pages() argument
469 if (!sh->pages) in free_stripe_pages()
472 for (i = 0; i < sh->nr_pages; i++) { in free_stripe_pages()
473 p = sh->pages[i]; in free_stripe_pages()
476 sh->pages[i] = NULL; in free_stripe_pages()
480 static int alloc_stripe_pages(struct stripe_head *sh, gfp_t gfp) in alloc_stripe_pages() argument
485 for (i = 0; i < sh->nr_pages; i++) { in alloc_stripe_pages()
487 if (sh->pages[i]) in alloc_stripe_pages()
492 free_stripe_pages(sh); in alloc_stripe_pages()
495 sh->pages[i] = p; in alloc_stripe_pages()
501 init_stripe_shared_pages(struct stripe_head *sh, struct r5conf *conf, int disks) in init_stripe_shared_pages() argument
505 if (sh->pages) in init_stripe_shared_pages()
508 /* Each of the sh->dev[i] need one conf->stripe_size */ in init_stripe_shared_pages()
512 sh->pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); in init_stripe_shared_pages()
513 if (!sh->pages) in init_stripe_shared_pages()
515 sh->nr_pages = nr_pages; in init_stripe_shared_pages()
516 sh->stripes_per_page = cnt; in init_stripe_shared_pages()
521 static void shrink_buffers(struct stripe_head *sh) in shrink_buffers() argument
524 int num = sh->raid_conf->pool_size; in shrink_buffers()
530 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); in shrink_buffers()
531 p = sh->dev[i].page; in shrink_buffers()
534 sh->dev[i].page = NULL; in shrink_buffers()
539 sh->dev[i].page = NULL; in shrink_buffers()
540 free_stripe_pages(sh); /* Free pages */ in shrink_buffers()
544 static int grow_buffers(struct stripe_head *sh, gfp_t gfp) in grow_buffers() argument
547 int num = sh->raid_conf->pool_size; in grow_buffers()
556 sh->dev[i].page = page; in grow_buffers()
557 sh->dev[i].orig_page = page; in grow_buffers()
558 sh->dev[i].offset = 0; in grow_buffers()
561 if (alloc_stripe_pages(sh, gfp)) in grow_buffers()
565 sh->dev[i].page = raid5_get_dev_page(sh, i); in grow_buffers()
566 sh->dev[i].orig_page = sh->dev[i].page; in grow_buffers()
567 sh->dev[i].offset = raid5_get_page_offset(sh, i); in grow_buffers()
574 struct stripe_head *sh);
576 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) in init_stripe() argument
578 struct r5conf *conf = sh->raid_conf; in init_stripe()
581 BUG_ON(atomic_read(&sh->count) != 0); in init_stripe()
582 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); in init_stripe()
583 BUG_ON(stripe_operations_active(sh)); in init_stripe()
584 BUG_ON(sh->batch_head); in init_stripe()
590 sh->generation = conf->generation - previous; in init_stripe()
591 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
592 sh->sector = sector; in init_stripe()
593 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
594 sh->state = 0; in init_stripe()
596 for (i = sh->disks; i--; ) { in init_stripe()
597 struct r5dev *dev = &sh->dev[i]; in init_stripe()
602 (unsigned long long)sh->sector, i, dev->toread, in init_stripe()
608 dev->sector = raid5_compute_blocknr(sh, i, previous); in init_stripe()
612 sh->overwrite_disks = 0; in init_stripe()
613 insert_hash(conf, sh); in init_stripe()
614 sh->cpu = smp_processor_id(); in init_stripe()
615 set_bit(STRIPE_BATCH_READY, &sh->state); in init_stripe()
621 struct stripe_head *sh; in __find_stripe() local
624 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
625 if (sh->sector == sector && sh->generation == generation) in __find_stripe()
626 return sh; in __find_stripe()
635 struct stripe_head *sh; in find_get_stripe() local
637 sh = __find_stripe(conf, sector, generation); in find_get_stripe()
638 if (!sh) in find_get_stripe()
641 if (atomic_inc_not_zero(&sh->count)) in find_get_stripe()
642 return sh; in find_get_stripe()
646 * be on a list (sh->lru). Must remove the stripe from the list that in find_get_stripe()
651 if (!atomic_read(&sh->count)) { in find_get_stripe()
652 if (!test_bit(STRIPE_HANDLE, &sh->state)) in find_get_stripe()
654 BUG_ON(list_empty(&sh->lru) && in find_get_stripe()
655 !test_bit(STRIPE_EXPANDING, &sh->state)); in find_get_stripe()
659 list_del_init(&sh->lru); in find_get_stripe()
663 if (sh->group) { in find_get_stripe()
664 sh->group->stripes_cnt--; in find_get_stripe()
665 sh->group = NULL; in find_get_stripe()
668 atomic_inc(&sh->count); in find_get_stripe()
671 return sh; in find_get_stripe()
807 struct stripe_head *sh; in raid5_get_active_stripe() local
834 sh = find_get_stripe(conf, sector, conf->generation - previous, in raid5_get_active_stripe()
836 if (sh) in raid5_get_active_stripe()
840 sh = get_free_stripe(conf, hash); in raid5_get_active_stripe()
841 if (sh) { in raid5_get_active_stripe()
843 init_stripe(sh, sector, previous); in raid5_get_active_stripe()
844 atomic_inc(&sh->count); in raid5_get_active_stripe()
871 return sh; in raid5_get_active_stripe()
874 static bool is_full_stripe_write(struct stripe_head *sh) in is_full_stripe_write() argument
876 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); in is_full_stripe_write()
877 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); in is_full_stripe_write()
902 static bool stripe_can_batch(struct stripe_head *sh) in stripe_can_batch() argument
904 struct r5conf *conf = sh->raid_conf; in stripe_can_batch()
908 return test_bit(STRIPE_BATCH_READY, &sh->state) && in stripe_can_batch()
909 is_full_stripe_write(sh); in stripe_can_batch()
914 struct stripe_head *sh, struct stripe_head *last_sh) in stripe_add_to_batch_list() argument
922 tmp_sec = sh->sector; in stripe_add_to_batch_list()
925 head_sector = sh->sector - RAID5_STRIPE_SECTORS(conf); in stripe_add_to_batch_list()
942 lock_two_stripes(head, sh); in stripe_add_to_batch_list()
944 if (!stripe_can_batch(head) || !stripe_can_batch(sh)) in stripe_add_to_batch_list()
947 if (sh->batch_head) in stripe_add_to_batch_list()
951 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_add_to_batch_list()
953 if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf || in stripe_add_to_batch_list()
954 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) in stripe_add_to_batch_list()
971 sh->batch_head = head->batch_head; in stripe_add_to_batch_list()
977 list_add(&sh->batch_list, &head->batch_list); in stripe_add_to_batch_list()
981 sh->batch_head = head->batch_head; in stripe_add_to_batch_list()
983 list_add_tail(&sh->batch_list, &head->batch_list); in stripe_add_to_batch_list()
987 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in stripe_add_to_batch_list()
992 if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { in stripe_add_to_batch_list()
993 int seq = sh->bm_seq; in stripe_add_to_batch_list()
994 if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && in stripe_add_to_batch_list()
995 sh->batch_head->bm_seq > seq) in stripe_add_to_batch_list()
996 seq = sh->batch_head->bm_seq; in stripe_add_to_batch_list()
997 set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); in stripe_add_to_batch_list()
998 sh->batch_head->bm_seq = seq; in stripe_add_to_batch_list()
1001 atomic_inc(&sh->count); in stripe_add_to_batch_list()
1003 unlock_two_stripes(head, sh); in stripe_add_to_batch_list()
1011 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) in use_new_offset() argument
1021 if (sh->generation == conf->generation - 1) in use_new_offset()
1138 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) in ops_run_io() argument
1140 struct r5conf *conf = sh->raid_conf; in ops_run_io()
1141 int i, disks = sh->disks; in ops_run_io()
1142 struct stripe_head *head_sh = sh; in ops_run_io()
1149 if (log_stripe(sh, s) == 0) in ops_run_io()
1161 sh = head_sh; in ops_run_io()
1162 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { in ops_run_io()
1164 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) in ops_run_io()
1166 if (test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_io()
1168 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) in ops_run_io()
1171 &sh->dev[i].flags)) { in ops_run_io()
1176 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) in ops_run_io()
1180 dev = &sh->dev[i]; in ops_run_io()
1213 int bad = rdev_has_badblock(rdev, sh->sector, in ops_run_io()
1247 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
1253 bi->bi_private = sh; in ops_run_io()
1256 __func__, (unsigned long long)sh->sector, in ops_run_io()
1258 atomic_inc(&sh->count); in ops_run_io()
1259 if (sh != head_sh) in ops_run_io()
1261 if (use_new_offset(conf, sh)) in ops_run_io()
1262 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1265 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1270 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) in ops_run_io()
1271 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in ops_run_io()
1274 test_bit(R5_InJournal, &sh->dev[i].flags)) in ops_run_io()
1280 sh->dev[i].vec.bv_page = sh->dev[i].orig_page; in ops_run_io()
1282 sh->dev[i].vec.bv_page = sh->dev[i].page; in ops_run_io()
1285 bi->bi_io_vec[0].bv_offset = sh->dev[i].offset; in ops_run_io()
1294 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); in ops_run_io()
1296 mddev_trace_remap(conf->mddev, bi, sh->dev[i].sector); in ops_run_io()
1307 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
1312 rbi->bi_private = sh; in ops_run_io()
1316 __func__, (unsigned long long)sh->sector, in ops_run_io()
1318 atomic_inc(&sh->count); in ops_run_io()
1319 if (sh != head_sh) in ops_run_io()
1321 if (use_new_offset(conf, sh)) in ops_run_io()
1322 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1325 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1327 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) in ops_run_io()
1328 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in ops_run_io()
1329 sh->dev[i].rvec.bv_page = sh->dev[i].page; in ops_run_io()
1332 rbi->bi_io_vec[0].bv_offset = sh->dev[i].offset; in ops_run_io()
1340 mddev_trace_remap(conf->mddev, rbi, sh->dev[i].sector); in ops_run_io()
1348 bi->bi_opf, i, (unsigned long long)sh->sector); in ops_run_io()
1349 clear_bit(R5_LOCKED, &sh->dev[i].flags); in ops_run_io()
1350 set_bit(STRIPE_HANDLE, &sh->state); in ops_run_io()
1355 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_io()
1357 if (sh != head_sh) in ops_run_io()
1368 struct stripe_head *sh, int no_skipcopy) in async_copy_data() argument
1376 struct r5conf *conf = sh->raid_conf; in async_copy_data()
1432 struct stripe_head *sh = stripe_head_ref; in ops_complete_biofill() local
1434 struct r5conf *conf = sh->raid_conf; in ops_complete_biofill()
1437 (unsigned long long)sh->sector); in ops_complete_biofill()
1440 for (i = sh->disks; i--; ) { in ops_complete_biofill()
1441 struct r5dev *dev = &sh->dev[i]; in ops_complete_biofill()
1462 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); in ops_complete_biofill()
1464 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_biofill()
1465 raid5_release_stripe(sh); in ops_complete_biofill()
1468 static void ops_run_biofill(struct stripe_head *sh) in ops_run_biofill() argument
1473 struct r5conf *conf = sh->raid_conf; in ops_run_biofill()
1475 BUG_ON(sh->batch_head); in ops_run_biofill()
1477 (unsigned long long)sh->sector); in ops_run_biofill()
1479 for (i = sh->disks; i--; ) { in ops_run_biofill()
1480 struct r5dev *dev = &sh->dev[i]; in ops_run_biofill()
1483 spin_lock_irq(&sh->stripe_lock); in ops_run_biofill()
1486 spin_unlock_irq(&sh->stripe_lock); in ops_run_biofill()
1491 dev->sector, tx, sh, 0); in ops_run_biofill()
1497 atomic_inc(&sh->count); in ops_run_biofill()
1498 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); in ops_run_biofill()
1502 static void mark_target_uptodate(struct stripe_head *sh, int target) in mark_target_uptodate() argument
1509 tgt = &sh->dev[target]; in mark_target_uptodate()
1517 struct stripe_head *sh = stripe_head_ref; in ops_complete_compute() local
1520 (unsigned long long)sh->sector); in ops_complete_compute()
1523 mark_target_uptodate(sh, sh->ops.target); in ops_complete_compute()
1524 mark_target_uptodate(sh, sh->ops.target2); in ops_complete_compute()
1526 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); in ops_complete_compute()
1527 if (sh->check_state == check_state_compute_run) in ops_complete_compute()
1528 sh->check_state = check_state_compute_result; in ops_complete_compute()
1529 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_compute()
1530 raid5_release_stripe(sh); in ops_complete_compute()
1540 static addr_conv_t *to_addr_conv(struct stripe_head *sh, in to_addr_conv() argument
1543 return (void *) (to_addr_page(percpu, i) + sh->disks + 2); in to_addr_conv()
1550 to_addr_offs(struct stripe_head *sh, struct raid5_percpu *percpu) in to_addr_offs() argument
1552 return (unsigned int *) (to_addr_conv(sh, percpu, 0) + sh->disks + 2); in to_addr_offs()
1556 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute5() argument
1558 int disks = sh->disks; in ops_run_compute5()
1560 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_compute5()
1561 int target = sh->ops.target; in ops_run_compute5()
1562 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute5()
1570 BUG_ON(sh->batch_head); in ops_run_compute5()
1573 __func__, (unsigned long long)sh->sector, target); in ops_run_compute5()
1578 off_srcs[count] = sh->dev[i].offset; in ops_run_compute5()
1579 xor_srcs[count++] = sh->dev[i].page; in ops_run_compute5()
1583 atomic_inc(&sh->count); in ops_run_compute5()
1586 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); in ops_run_compute5()
1589 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_compute5()
1592 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_compute5()
1598 * @srcs - (struct page *) array of size sh->disks
1600 * @sh - stripe_head to parse
1609 struct stripe_head *sh, in set_syndrome_sources() argument
1612 int disks = sh->disks; in set_syndrome_sources()
1613 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); in set_syndrome_sources()
1614 int d0_idx = raid6_d0(sh); in set_syndrome_sources()
1624 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in set_syndrome_sources()
1625 struct r5dev *dev = &sh->dev[i]; in set_syndrome_sources()
1627 if (i == sh->qd_idx || i == sh->pd_idx || in set_syndrome_sources()
1636 srcs[slot] = sh->dev[i].orig_page; in set_syndrome_sources()
1638 srcs[slot] = sh->dev[i].page; in set_syndrome_sources()
1644 offs[slot] = sh->dev[i].offset; in set_syndrome_sources()
1653 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_1() argument
1655 int disks = sh->disks; in ops_run_compute6_1()
1657 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_compute6_1()
1659 int qd_idx = sh->qd_idx; in ops_run_compute6_1()
1668 BUG_ON(sh->batch_head); in ops_run_compute6_1()
1669 if (sh->ops.target < 0) in ops_run_compute6_1()
1670 target = sh->ops.target2; in ops_run_compute6_1()
1671 else if (sh->ops.target2 < 0) in ops_run_compute6_1()
1672 target = sh->ops.target; in ops_run_compute6_1()
1678 __func__, (unsigned long long)sh->sector, target); in ops_run_compute6_1()
1680 tgt = &sh->dev[target]; in ops_run_compute6_1()
1685 atomic_inc(&sh->count); in ops_run_compute6_1()
1688 count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL); in ops_run_compute6_1()
1692 ops_complete_compute, sh, in ops_run_compute6_1()
1693 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1695 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_compute6_1()
1702 offs[count] = sh->dev[i].offset; in ops_run_compute6_1()
1703 blocks[count++] = sh->dev[i].page; in ops_run_compute6_1()
1707 NULL, ops_complete_compute, sh, in ops_run_compute6_1()
1708 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1710 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_compute6_1()
1717 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_2() argument
1719 int i, count, disks = sh->disks; in ops_run_compute6_2()
1720 int syndrome_disks = sh->ddf_layout ? disks : disks-2; in ops_run_compute6_2()
1721 int d0_idx = raid6_d0(sh); in ops_run_compute6_2()
1723 int target = sh->ops.target; in ops_run_compute6_2()
1724 int target2 = sh->ops.target2; in ops_run_compute6_2()
1725 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute6_2()
1726 struct r5dev *tgt2 = &sh->dev[target2]; in ops_run_compute6_2()
1729 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_compute6_2()
1732 BUG_ON(sh->batch_head); in ops_run_compute6_2()
1734 __func__, (unsigned long long)sh->sector, target, target2); in ops_run_compute6_2()
1749 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in ops_run_compute6_2()
1751 offs[slot] = sh->dev[i].offset; in ops_run_compute6_2()
1752 blocks[slot] = sh->dev[i].page; in ops_run_compute6_2()
1765 __func__, (unsigned long long)sh->sector, faila, failb); in ops_run_compute6_2()
1767 atomic_inc(&sh->count); in ops_run_compute6_2()
1774 ops_complete_compute, sh, in ops_run_compute6_2()
1775 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1777 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1783 int qd_idx = sh->qd_idx; in ops_run_compute6_2()
1795 offs[count] = sh->dev[i].offset; in ops_run_compute6_2()
1796 blocks[count++] = sh->dev[i].page; in ops_run_compute6_2()
1798 dest = sh->dev[data_target].page; in ops_run_compute6_2()
1799 dest_off = sh->dev[data_target].offset; in ops_run_compute6_2()
1803 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1805 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1808 count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL); in ops_run_compute6_2()
1810 ops_complete_compute, sh, in ops_run_compute6_2()
1811 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1813 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1818 ops_complete_compute, sh, in ops_run_compute6_2()
1819 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1823 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1829 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1838 struct stripe_head *sh = stripe_head_ref; in ops_complete_prexor() local
1841 (unsigned long long)sh->sector); in ops_complete_prexor()
1843 if (r5c_is_writeback(sh->raid_conf->log)) in ops_complete_prexor()
1848 r5c_release_extra_page(sh); in ops_complete_prexor()
1852 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor5() argument
1855 int disks = sh->disks; in ops_run_prexor5()
1857 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_prexor5()
1858 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_prexor5()
1862 unsigned int off_dest = off_srcs[count] = sh->dev[pd_idx].offset; in ops_run_prexor5()
1863 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_prexor5()
1865 BUG_ON(sh->batch_head); in ops_run_prexor5()
1867 (unsigned long long)sh->sector); in ops_run_prexor5()
1870 struct r5dev *dev = &sh->dev[i]; in ops_run_prexor5()
1886 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor5()
1888 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_prexor5()
1894 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor6() argument
1898 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_prexor6()
1903 (unsigned long long)sh->sector); in ops_run_prexor6()
1905 count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_WANT_DRAIN); in ops_run_prexor6()
1908 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor6()
1910 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_prexor6()
1916 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) in ops_run_biodrain() argument
1918 struct r5conf *conf = sh->raid_conf; in ops_run_biodrain()
1919 int disks = sh->disks; in ops_run_biodrain()
1921 struct stripe_head *head_sh = sh; in ops_run_biodrain()
1924 (unsigned long long)sh->sector); in ops_run_biodrain()
1930 sh = head_sh; in ops_run_biodrain()
1935 dev = &sh->dev[i]; in ops_run_biodrain()
1941 spin_lock_irq(&sh->stripe_lock); in ops_run_biodrain()
1944 sh->overwrite_disks = 0; in ops_run_biodrain()
1947 spin_unlock_irq(&sh->stripe_lock); in ops_run_biodrain()
1961 dev->sector, tx, sh, in ops_run_biodrain()
1974 sh = list_first_entry(&sh->batch_list, in ops_run_biodrain()
1977 if (sh == head_sh) in ops_run_biodrain()
1989 struct stripe_head *sh = stripe_head_ref; in ops_complete_reconstruct() local
1990 int disks = sh->disks; in ops_complete_reconstruct()
1991 int pd_idx = sh->pd_idx; in ops_complete_reconstruct()
1992 int qd_idx = sh->qd_idx; in ops_complete_reconstruct()
1997 (unsigned long long)sh->sector); in ops_complete_reconstruct()
2000 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); in ops_complete_reconstruct()
2001 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); in ops_complete_reconstruct()
2002 discard |= test_bit(R5_Discard, &sh->dev[i].flags); in ops_complete_reconstruct()
2006 struct r5dev *dev = &sh->dev[i]; in ops_complete_reconstruct()
2011 if (test_bit(STRIPE_EXPAND_READY, &sh->state)) in ops_complete_reconstruct()
2021 if (sh->reconstruct_state == reconstruct_state_drain_run) in ops_complete_reconstruct()
2022 sh->reconstruct_state = reconstruct_state_drain_result; in ops_complete_reconstruct()
2023 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) in ops_complete_reconstruct()
2024 sh->reconstruct_state = reconstruct_state_prexor_drain_result; in ops_complete_reconstruct()
2026 BUG_ON(sh->reconstruct_state != reconstruct_state_run); in ops_complete_reconstruct()
2027 sh->reconstruct_state = reconstruct_state_result; in ops_complete_reconstruct()
2030 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_reconstruct()
2031 raid5_release_stripe(sh); in ops_complete_reconstruct()
2035 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct5() argument
2038 int disks = sh->disks; in ops_run_reconstruct5()
2042 int count, pd_idx = sh->pd_idx, i; in ops_run_reconstruct5()
2048 struct stripe_head *head_sh = sh; in ops_run_reconstruct5()
2052 (unsigned long long)sh->sector); in ops_run_reconstruct5()
2054 for (i = 0; i < sh->disks; i++) { in ops_run_reconstruct5()
2057 if (!test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_reconstruct5()
2060 if (i >= sh->disks) { in ops_run_reconstruct5()
2061 atomic_inc(&sh->count); in ops_run_reconstruct5()
2062 set_bit(R5_Discard, &sh->dev[pd_idx].flags); in ops_run_reconstruct5()
2063 ops_complete_reconstruct(sh); in ops_run_reconstruct5()
2069 off_srcs = to_addr_offs(sh, percpu); in ops_run_reconstruct5()
2075 off_dest = off_srcs[count] = sh->dev[pd_idx].offset; in ops_run_reconstruct5()
2076 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_reconstruct5()
2078 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
2086 xor_dest = sh->dev[pd_idx].page; in ops_run_reconstruct5()
2087 off_dest = sh->dev[pd_idx].offset; in ops_run_reconstruct5()
2089 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
2103 list_first_entry(&sh->batch_list, in ops_run_reconstruct5()
2111 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
2115 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
2120 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_reconstruct5()
2123 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_reconstruct5()
2126 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_reconstruct5()
2133 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct6() argument
2140 struct stripe_head *head_sh = sh; in ops_run_reconstruct6()
2145 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); in ops_run_reconstruct6()
2147 for (i = 0; i < sh->disks; i++) { in ops_run_reconstruct6()
2148 if (sh->pd_idx == i || sh->qd_idx == i) in ops_run_reconstruct6()
2150 if (!test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_reconstruct6()
2153 if (i >= sh->disks) { in ops_run_reconstruct6()
2154 atomic_inc(&sh->count); in ops_run_reconstruct6()
2155 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in ops_run_reconstruct6()
2156 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); in ops_run_reconstruct6()
2157 ops_complete_reconstruct(sh); in ops_run_reconstruct6()
2163 offs = to_addr_offs(sh, percpu); in ops_run_reconstruct6()
2165 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { in ops_run_reconstruct6()
2173 count = set_syndrome_sources(blocks, offs, sh, synflags); in ops_run_reconstruct6()
2175 list_first_entry(&sh->batch_list, in ops_run_reconstruct6()
2181 head_sh, to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
2184 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
2186 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_reconstruct6()
2189 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_reconstruct6()
2197 struct stripe_head *sh = stripe_head_ref; in ops_complete_check() local
2200 (unsigned long long)sh->sector); in ops_complete_check()
2202 sh->check_state = check_state_check_result; in ops_complete_check()
2203 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_check()
2204 raid5_release_stripe(sh); in ops_complete_check()
2207 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_check_p() argument
2209 int disks = sh->disks; in ops_run_check_p()
2210 int pd_idx = sh->pd_idx; in ops_run_check_p()
2211 int qd_idx = sh->qd_idx; in ops_run_check_p()
2215 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_check_p()
2222 (unsigned long long)sh->sector); in ops_run_check_p()
2224 BUG_ON(sh->batch_head); in ops_run_check_p()
2226 xor_dest = sh->dev[pd_idx].page; in ops_run_check_p()
2227 off_dest = sh->dev[pd_idx].offset; in ops_run_check_p()
2233 off_srcs[count] = sh->dev[i].offset; in ops_run_check_p()
2234 xor_srcs[count++] = sh->dev[i].page; in ops_run_check_p()
2238 to_addr_conv(sh, percpu, 0)); in ops_run_check_p()
2240 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_check_p()
2241 &sh->ops.zero_sum_result, &submit); in ops_run_check_p()
2243 atomic_inc(&sh->count); in ops_run_check_p()
2244 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); in ops_run_check_p()
2248 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) in ops_run_check_pq() argument
2251 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_check_pq()
2256 (unsigned long long)sh->sector, checkp); in ops_run_check_pq()
2258 BUG_ON(sh->batch_head); in ops_run_check_pq()
2259 count = set_syndrome_sources(srcs, offs, sh, SYNDROME_SRC_ALL); in ops_run_check_pq()
2263 atomic_inc(&sh->count); in ops_run_check_pq()
2265 sh, to_addr_conv(sh, percpu, 0)); in ops_run_check_pq()
2267 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_check_pq()
2268 &sh->ops.zero_sum_result, percpu->spare_page, 0, &submit); in ops_run_check_pq()
2271 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) in raid_run_ops() argument
2273 int overlap_clear = 0, i, disks = sh->disks; in raid_run_ops()
2275 struct r5conf *conf = sh->raid_conf; in raid_run_ops()
2282 ops_run_biofill(sh); in raid_run_ops()
2288 tx = ops_run_compute5(sh, percpu); in raid_run_ops()
2290 if (sh->ops.target2 < 0 || sh->ops.target < 0) in raid_run_ops()
2291 tx = ops_run_compute6_1(sh, percpu); in raid_run_ops()
2293 tx = ops_run_compute6_2(sh, percpu); in raid_run_ops()
2302 tx = ops_run_prexor5(sh, percpu, tx); in raid_run_ops()
2304 tx = ops_run_prexor6(sh, percpu, tx); in raid_run_ops()
2308 tx = ops_run_partial_parity(sh, percpu, tx); in raid_run_ops()
2311 tx = ops_run_biodrain(sh, tx); in raid_run_ops()
2317 ops_run_reconstruct5(sh, percpu, tx); in raid_run_ops()
2319 ops_run_reconstruct6(sh, percpu, tx); in raid_run_ops()
2323 if (sh->check_state == check_state_run) in raid_run_ops()
2324 ops_run_check_p(sh, percpu); in raid_run_ops()
2325 else if (sh->check_state == check_state_run_q) in raid_run_ops()
2326 ops_run_check_pq(sh, percpu, 0); in raid_run_ops()
2327 else if (sh->check_state == check_state_run_pq) in raid_run_ops()
2328 ops_run_check_pq(sh, percpu, 1); in raid_run_ops()
2333 if (overlap_clear && !sh->batch_head) { in raid_run_ops()
2335 struct r5dev *dev = &sh->dev[i]; in raid_run_ops()
2343 static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) in free_stripe() argument
2346 kfree(sh->pages); in free_stripe()
2348 if (sh->ppl_page) in free_stripe()
2349 __free_page(sh->ppl_page); in free_stripe()
2350 kmem_cache_free(sc, sh); in free_stripe()
2356 struct stripe_head *sh; in alloc_stripe() local
2358 sh = kmem_cache_zalloc(sc, gfp); in alloc_stripe()
2359 if (sh) { in alloc_stripe()
2360 spin_lock_init(&sh->stripe_lock); in alloc_stripe()
2361 spin_lock_init(&sh->batch_lock); in alloc_stripe()
2362 INIT_LIST_HEAD(&sh->batch_list); in alloc_stripe()
2363 INIT_LIST_HEAD(&sh->lru); in alloc_stripe()
2364 INIT_LIST_HEAD(&sh->r5c); in alloc_stripe()
2365 INIT_LIST_HEAD(&sh->log_list); in alloc_stripe()
2366 atomic_set(&sh->count, 1); in alloc_stripe()
2367 sh->raid_conf = conf; in alloc_stripe()
2368 sh->log_start = MaxSector; in alloc_stripe()
2371 sh->ppl_page = alloc_page(gfp); in alloc_stripe()
2372 if (!sh->ppl_page) { in alloc_stripe()
2373 free_stripe(sc, sh); in alloc_stripe()
2378 if (init_stripe_shared_pages(sh, conf, disks)) { in alloc_stripe()
2379 free_stripe(sc, sh); in alloc_stripe()
2384 return sh; in alloc_stripe()
2388 struct stripe_head *sh; in grow_one_stripe() local
2390 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); in grow_one_stripe()
2391 if (!sh) in grow_one_stripe()
2394 if (grow_buffers(sh, gfp)) { in grow_one_stripe()
2395 shrink_buffers(sh); in grow_one_stripe()
2396 free_stripe(conf->slab_cache, sh); in grow_one_stripe()
2399 sh->hash_lock_index = in grow_one_stripe()
2404 raid5_release_stripe(sh); in grow_one_stripe()
2688 struct stripe_head *sh; in drop_one_stripe() local
2692 sh = get_free_stripe(conf, hash); in drop_one_stripe()
2694 if (!sh) in drop_one_stripe()
2696 BUG_ON(atomic_read(&sh->count)); in drop_one_stripe()
2697 shrink_buffers(sh); in drop_one_stripe()
2698 free_stripe(conf->slab_cache, sh); in drop_one_stripe()
2716 struct stripe_head *sh = bi->bi_private; in raid5_end_read_request() local
2717 struct r5conf *conf = sh->raid_conf; in raid5_end_read_request()
2718 int disks = sh->disks, i; in raid5_end_read_request()
2723 if (bi == &sh->dev[i].req) in raid5_end_read_request()
2727 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_read_request()
2733 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
2743 if (use_new_offset(conf, sh)) in raid5_end_read_request()
2744 s = sh->sector + rdev->new_data_offset; in raid5_end_read_request()
2746 s = sh->sector + rdev->data_offset; in raid5_end_read_request()
2748 set_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
2749 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in raid5_end_read_request()
2760 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2761 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
2762 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) in raid5_end_read_request()
2763 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2765 if (test_bit(R5_InJournal, &sh->dev[i].flags)) in raid5_end_read_request()
2770 set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); in raid5_end_read_request()
2778 clear_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
2781 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
2794 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { in raid5_end_read_request()
2815 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) in raid5_end_read_request()
2818 if (sh->qd_idx >= 0 && sh->pd_idx == i) in raid5_end_read_request()
2819 set_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2820 else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { in raid5_end_read_request()
2821 set_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2822 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2824 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2826 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2827 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
2831 rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 0))) in raid5_end_read_request()
2837 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_read_request()
2838 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_read_request()
2839 raid5_release_stripe(sh); in raid5_end_read_request()
2844 struct stripe_head *sh = bi->bi_private; in raid5_end_write_request() local
2845 struct r5conf *conf = sh->raid_conf; in raid5_end_write_request()
2846 int disks = sh->disks, i; in raid5_end_write_request()
2851 if (bi == &sh->dev[i].req) { in raid5_end_write_request()
2855 if (bi == &sh->dev[i].rreq) { in raid5_end_write_request()
2869 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_write_request()
2879 else if (rdev_has_badblock(rdev, sh->sector, in raid5_end_write_request()
2881 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); in raid5_end_write_request()
2885 set_bit(R5_WriteError, &sh->dev[i].flags); in raid5_end_write_request()
2889 } else if (rdev_has_badblock(rdev, sh->sector, in raid5_end_write_request()
2891 set_bit(R5_MadeGood, &sh->dev[i].flags); in raid5_end_write_request()
2892 if (test_bit(R5_ReadError, &sh->dev[i].flags)) in raid5_end_write_request()
2897 set_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_write_request()
2902 if (sh->batch_head && bi->bi_status && !replacement) in raid5_end_write_request()
2903 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); in raid5_end_write_request()
2906 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) in raid5_end_write_request()
2907 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_write_request()
2908 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_write_request()
2910 if (sh->batch_head && sh != sh->batch_head) in raid5_end_write_request()
2911 raid5_release_stripe(sh->batch_head); in raid5_end_write_request()
2912 raid5_release_stripe(sh); in raid5_end_write_request()
2955 struct stripe_head *sh) in raid5_compute_sector() argument
3143 if (sh) { in raid5_compute_sector()
3144 sh->pd_idx = pd_idx; in raid5_compute_sector()
3145 sh->qd_idx = qd_idx; in raid5_compute_sector()
3146 sh->ddf_layout = ddf_layout; in raid5_compute_sector()
3155 sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) in raid5_compute_blocknr() argument
3157 struct r5conf *conf = sh->raid_conf; in raid5_compute_blocknr()
3158 int raid_disks = sh->disks; in raid5_compute_blocknr()
3160 sector_t new_sector = sh->sector, check; in raid5_compute_blocknr()
3175 if (i == sh->pd_idx) in raid5_compute_blocknr()
3183 if (i > sh->pd_idx) in raid5_compute_blocknr()
3188 if (i < sh->pd_idx) in raid5_compute_blocknr()
3190 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3202 if (i == sh->qd_idx) in raid5_compute_blocknr()
3209 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
3211 else if (i > sh->pd_idx) in raid5_compute_blocknr()
3216 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
3220 if (i < sh->pd_idx) in raid5_compute_blocknr()
3222 i -= (sh->pd_idx + 2); in raid5_compute_blocknr()
3232 if (sh->pd_idx == 0) in raid5_compute_blocknr()
3236 if (i < sh->pd_idx) in raid5_compute_blocknr()
3238 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3243 if (i > sh->pd_idx) in raid5_compute_blocknr()
3248 if (i < sh->pd_idx) in raid5_compute_blocknr()
3250 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3266 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in raid5_compute_blocknr()
3267 || sh2.qd_idx != sh->qd_idx) { in raid5_compute_blocknr()
3332 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, in schedule_reconstruction() argument
3335 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; in schedule_reconstruction()
3336 struct r5conf *conf = sh->raid_conf; in schedule_reconstruction()
3346 r5c_release_extra_page(sh); in schedule_reconstruction()
3349 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
3370 sh->reconstruct_state = reconstruct_state_drain_run; in schedule_reconstruction()
3373 sh->reconstruct_state = reconstruct_state_run; in schedule_reconstruction()
3378 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) in schedule_reconstruction()
3381 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || in schedule_reconstruction()
3382 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); in schedule_reconstruction()
3384 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || in schedule_reconstruction()
3385 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); in schedule_reconstruction()
3388 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
3407 sh->reconstruct_state = reconstruct_state_prexor_drain_run; in schedule_reconstruction()
3416 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); in schedule_reconstruction()
3417 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in schedule_reconstruction()
3421 int qd_idx = sh->qd_idx; in schedule_reconstruction()
3422 struct r5dev *dev = &sh->dev[qd_idx]; in schedule_reconstruction()
3429 if (raid5_has_ppl(sh->raid_conf) && sh->ppl_page && in schedule_reconstruction()
3431 !test_bit(STRIPE_FULL_WRITE, &sh->state) && in schedule_reconstruction()
3432 test_bit(R5_Insync, &sh->dev[pd_idx].flags)) in schedule_reconstruction()
3436 __func__, (unsigned long long)sh->sector, in schedule_reconstruction()
3440 static bool stripe_bio_overlaps(struct stripe_head *sh, struct bio *bi, in stripe_bio_overlaps() argument
3443 struct r5conf *conf = sh->raid_conf; in stripe_bio_overlaps()
3447 bi->bi_iter.bi_sector, sh->sector); in stripe_bio_overlaps()
3450 if (sh->batch_head) in stripe_bio_overlaps()
3454 bip = &sh->dev[dd_idx].towrite; in stripe_bio_overlaps()
3456 bip = &sh->dev[dd_idx].toread; in stripe_bio_overlaps()
3481 for (i = 0; i < sh->disks; i++) { in stripe_bio_overlaps()
3482 if (i != sh->pd_idx && in stripe_bio_overlaps()
3483 (i == dd_idx || sh->dev[i].towrite)) { in stripe_bio_overlaps()
3484 sector = sh->dev[i].sector; in stripe_bio_overlaps()
3500 static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi, in __add_stripe_bio() argument
3503 struct r5conf *conf = sh->raid_conf; in __add_stripe_bio()
3508 bip = &sh->dev[dd_idx].towrite; in __add_stripe_bio()
3512 bip = &sh->dev[dd_idx].toread; in __add_stripe_bio()
3519 clear_bit(STRIPE_BATCH_READY, &sh->state); in __add_stripe_bio()
3530 sector_t sector = sh->dev[dd_idx].sector; in __add_stripe_bio()
3531 for (bi=sh->dev[dd_idx].towrite; in __add_stripe_bio()
3532 sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) && in __add_stripe_bio()
3534 bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) { in __add_stripe_bio()
3538 if (sector >= sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf)) in __add_stripe_bio()
3539 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) in __add_stripe_bio()
3540 sh->overwrite_disks++; in __add_stripe_bio()
3544 (*bip)->bi_iter.bi_sector, sh->sector, dd_idx, in __add_stripe_bio()
3545 sh->dev[dd_idx].sector); in __add_stripe_bio()
3547 if (conf->mddev->bitmap && firstwrite && !sh->batch_head) { in __add_stripe_bio()
3548 sh->bm_seq = conf->seq_flush+1; in __add_stripe_bio()
3549 set_bit(STRIPE_BIT_DELAY, &sh->state); in __add_stripe_bio()
3558 static bool add_stripe_bio(struct stripe_head *sh, struct bio *bi, in add_stripe_bio() argument
3561 spin_lock_irq(&sh->stripe_lock); in add_stripe_bio()
3563 if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) { in add_stripe_bio()
3564 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); in add_stripe_bio()
3565 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3569 __add_stripe_bio(sh, bi, dd_idx, forwrite, previous); in add_stripe_bio()
3570 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3577 struct stripe_head *sh) in stripe_set_idx() argument
3589 &dd_idx, sh); in stripe_set_idx()
3593 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, in handle_failed_stripe() argument
3597 BUG_ON(sh->batch_head); in handle_failed_stripe()
3601 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in handle_failed_stripe()
3612 sh->sector, in handle_failed_stripe()
3618 spin_lock_irq(&sh->stripe_lock); in handle_failed_stripe()
3620 bi = sh->dev[i].towrite; in handle_failed_stripe()
3621 sh->dev[i].towrite = NULL; in handle_failed_stripe()
3622 sh->overwrite_disks = 0; in handle_failed_stripe()
3623 spin_unlock_irq(&sh->stripe_lock); in handle_failed_stripe()
3625 log_stripe_write_finished(sh); in handle_failed_stripe()
3627 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
3628 wake_up_bit(&sh->dev[i].flags, R5_Overlap); in handle_failed_stripe()
3631 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3632 struct bio *nextbi = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3639 bi = sh->dev[i].written; in handle_failed_stripe()
3640 sh->dev[i].written = NULL; in handle_failed_stripe()
3641 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { in handle_failed_stripe()
3642 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_failed_stripe()
3643 sh->dev[i].page = sh->dev[i].orig_page; in handle_failed_stripe()
3647 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3648 struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3658 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && in handle_failed_stripe()
3660 (!test_bit(R5_Insync, &sh->dev[i].flags) || in handle_failed_stripe()
3661 test_bit(R5_ReadError, &sh->dev[i].flags))) { in handle_failed_stripe()
3662 spin_lock_irq(&sh->stripe_lock); in handle_failed_stripe()
3663 bi = sh->dev[i].toread; in handle_failed_stripe()
3664 sh->dev[i].toread = NULL; in handle_failed_stripe()
3665 spin_unlock_irq(&sh->stripe_lock); in handle_failed_stripe()
3666 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
3667 wake_up_bit(&sh->dev[i].flags, R5_Overlap); in handle_failed_stripe()
3671 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3673 r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3682 clear_bit(R5_LOCKED, &sh->dev[i].flags); in handle_failed_stripe()
3687 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_failed_stripe()
3693 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, in handle_failed_sync() argument
3699 BUG_ON(sh->batch_head); in handle_failed_sync()
3700 clear_bit(STRIPE_SYNCING, &sh->state); in handle_failed_sync()
3701 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_failed_sync()
3702 wake_up_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap); in handle_failed_sync()
3722 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3730 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3741 static int want_replace(struct stripe_head *sh, int disk_idx) in want_replace() argument
3746 rdev = sh->raid_conf->disks[disk_idx].replacement; in want_replace()
3750 && (rdev->recovery_offset <= sh->sector in want_replace()
3751 || rdev->mddev->recovery_cp <= sh->sector)) in want_replace()
3756 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, in need_this_block() argument
3759 struct r5dev *dev = &sh->dev[disk_idx]; in need_this_block()
3760 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], in need_this_block()
3761 &sh->dev[s->failed_num[1]] }; in need_this_block()
3763 bool force_rcw = (sh->raid_conf->rmw_level == PARITY_DISABLE_RMW); in need_this_block()
3779 (s->replacing && want_replace(sh, disk_idx))) in need_this_block()
3804 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in need_this_block()
3825 s->failed_num[i] == sh->pd_idx || in need_this_block()
3826 s->failed_num[i] == sh->qd_idx) && in need_this_block()
3843 sh->sector < sh->raid_conf->mddev->recovery_cp) in need_this_block()
3847 if (s->failed_num[i] != sh->pd_idx && in need_this_block()
3848 s->failed_num[i] != sh->qd_idx && in need_this_block()
3863 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, in fetch_block() argument
3866 struct r5dev *dev = &sh->dev[disk_idx]; in fetch_block()
3869 if (need_this_block(sh, s, disk_idx, disks)) { in fetch_block()
3875 BUG_ON(sh->batch_head); in fetch_block()
3887 ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) || in fetch_block()
3894 (unsigned long long)sh->sector, disk_idx); in fetch_block()
3895 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
3898 sh->ops.target = disk_idx; in fetch_block()
3899 sh->ops.target2 = -1; /* no 2nd target */ in fetch_block()
3918 &sh->dev[other].flags)) in fetch_block()
3923 (unsigned long long)sh->sector, in fetch_block()
3925 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
3927 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); in fetch_block()
3928 set_bit(R5_Wantcompute, &sh->dev[other].flags); in fetch_block()
3929 sh->ops.target = disk_idx; in fetch_block()
3930 sh->ops.target2 = other; in fetch_block()
3949 static void handle_stripe_fill(struct stripe_head *sh, in handle_stripe_fill() argument
3959 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && in handle_stripe_fill()
3960 !sh->reconstruct_state) { in handle_stripe_fill()
3970 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) in handle_stripe_fill()
3971 r5c_make_stripe_write_out(sh); in handle_stripe_fill()
3976 if (fetch_block(sh, s, i, disks)) in handle_stripe_fill()
3980 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_fill()
3991 struct stripe_head *sh, int disks) in handle_stripe_clean_event() argument
3996 struct stripe_head *head_sh = sh; in handle_stripe_clean_event()
4000 if (sh->dev[i].written) { in handle_stripe_clean_event()
4001 dev = &sh->dev[i]; in handle_stripe_clean_event()
4029 sh = list_first_entry(&sh->batch_list, in handle_stripe_clean_event()
4032 if (sh != head_sh) { in handle_stripe_clean_event()
4033 dev = &sh->dev[i]; in handle_stripe_clean_event()
4037 sh = head_sh; in handle_stripe_clean_event()
4038 dev = &sh->dev[i]; in handle_stripe_clean_event()
4043 log_stripe_write_finished(sh); in handle_stripe_clean_event()
4046 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_clean_event()
4048 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
4049 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
4050 if (sh->qd_idx >= 0) { in handle_stripe_clean_event()
4051 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); in handle_stripe_clean_event()
4052 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); in handle_stripe_clean_event()
4055 clear_bit(STRIPE_DISCARD, &sh->state); in handle_stripe_clean_event()
4062 hash = sh->hash_lock_index; in handle_stripe_clean_event()
4064 remove_hash(sh); in handle_stripe_clean_event()
4067 sh = list_first_entry(&sh->batch_list, in handle_stripe_clean_event()
4069 if (sh != head_sh) in handle_stripe_clean_event()
4072 sh = head_sh; in handle_stripe_clean_event()
4074 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) in handle_stripe_clean_event()
4075 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_clean_event()
4079 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_stripe_clean_event()
4103 struct stripe_head *sh, in handle_stripe_dirtying() argument
4118 (recovery_cp < MaxSector && sh->sector >= recovery_cp && in handle_stripe_dirtying()
4124 pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n", in handle_stripe_dirtying()
4126 (unsigned long long)sh->sector); in handle_stripe_dirtying()
4129 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
4131 i == sh->pd_idx || i == sh->qd_idx || in handle_stripe_dirtying()
4143 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
4155 (unsigned long long)sh->sector, sh->state, rmw, rcw); in handle_stripe_dirtying()
4156 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
4160 sh->sector, rmw); in handle_stripe_dirtying()
4163 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
4166 !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_dirtying()
4181 r5c_use_extra_page(sh); in handle_stripe_dirtying()
4186 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4193 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
4195 i == sh->pd_idx || i == sh->qd_idx || in handle_stripe_dirtying()
4202 &sh->state)) { in handle_stripe_dirtying()
4209 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4218 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
4220 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
4227 &sh->state)) { in handle_stripe_dirtying()
4235 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4241 (unsigned long long)sh->sector, rcw, qread, in handle_stripe_dirtying()
4242 test_bit(STRIPE_DELAYED, &sh->state)); in handle_stripe_dirtying()
4246 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe_dirtying()
4247 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4259 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && in handle_stripe_dirtying()
4261 !test_bit(STRIPE_BIT_DELAY, &sh->state))) in handle_stripe_dirtying()
4262 schedule_reconstruction(sh, s, rcw == 0, 0); in handle_stripe_dirtying()
4266 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks5() argument
4271 BUG_ON(sh->batch_head); in handle_parity_checks5()
4272 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks5()
4274 switch (sh->check_state) { in handle_parity_checks5()
4279 sh->check_state = check_state_run; in handle_parity_checks5()
4281 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
4285 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks5()
4288 sh->check_state = check_state_idle; in handle_parity_checks5()
4290 dev = &sh->dev[sh->pd_idx]; in handle_parity_checks5()
4293 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks5()
4304 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4309 sh->check_state = check_state_idle; in handle_parity_checks5()
4321 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) in handle_parity_checks5()
4325 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4330 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4333 (unsigned long long) sh->sector, in handle_parity_checks5()
4334 (unsigned long long) sh->sector + in handle_parity_checks5()
4337 sh->check_state = check_state_compute_run; in handle_parity_checks5()
4338 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks5()
4341 &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
4342 sh->ops.target = sh->pd_idx; in handle_parity_checks5()
4343 sh->ops.target2 = -1; in handle_parity_checks5()
4352 __func__, sh->check_state, in handle_parity_checks5()
4353 (unsigned long long) sh->sector); in handle_parity_checks5()
4358 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks6() argument
4362 int pd_idx = sh->pd_idx; in handle_parity_checks6()
4363 int qd_idx = sh->qd_idx; in handle_parity_checks6()
4366 BUG_ON(sh->batch_head); in handle_parity_checks6()
4367 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks6()
4377 switch (sh->check_state) { in handle_parity_checks6()
4385 sh->check_state = check_state_run; in handle_parity_checks6()
4391 if (sh->check_state == check_state_run) in handle_parity_checks6()
4392 sh->check_state = check_state_run_pq; in handle_parity_checks6()
4394 sh->check_state = check_state_run_q; in handle_parity_checks6()
4398 sh->ops.zero_sum_result = 0; in handle_parity_checks6()
4400 if (sh->check_state == check_state_run) { in handle_parity_checks6()
4402 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in handle_parity_checks6()
4405 if (sh->check_state >= check_state_run && in handle_parity_checks6()
4406 sh->check_state <= check_state_run_pq) { in handle_parity_checks6()
4418 sh->check_state = check_state_idle; in handle_parity_checks6()
4421 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks6()
4429 dev = &sh->dev[s->failed_num[1]]; in handle_parity_checks6()
4435 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks6()
4440 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
4441 dev = &sh->dev[pd_idx]; in handle_parity_checks6()
4446 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
4447 dev = &sh->dev[qd_idx]; in handle_parity_checks6()
4455 dev - (struct r5dev *) &sh->dev)) { in handle_parity_checks6()
4461 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4468 sh->check_state = check_state_idle; in handle_parity_checks6()
4474 if (sh->ops.zero_sum_result == 0) { in handle_parity_checks6()
4477 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4483 sh->check_state = check_state_compute_result; in handle_parity_checks6()
4494 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4497 (unsigned long long) sh->sector, in handle_parity_checks6()
4498 (unsigned long long) sh->sector + in handle_parity_checks6()
4501 int *target = &sh->ops.target; in handle_parity_checks6()
4503 sh->ops.target = -1; in handle_parity_checks6()
4504 sh->ops.target2 = -1; in handle_parity_checks6()
4505 sh->check_state = check_state_compute_run; in handle_parity_checks6()
4506 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks6()
4508 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
4510 &sh->dev[pd_idx].flags); in handle_parity_checks6()
4512 target = &sh->ops.target2; in handle_parity_checks6()
4515 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
4517 &sh->dev[qd_idx].flags); in handle_parity_checks6()
4528 __func__, sh->check_state, in handle_parity_checks6()
4529 (unsigned long long) sh->sector); in handle_parity_checks6()
4534 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) in handle_stripe_expansion() argument
4542 BUG_ON(sh->batch_head); in handle_stripe_expansion()
4543 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); in handle_stripe_expansion()
4544 for (i = 0; i < sh->disks; i++) in handle_stripe_expansion()
4545 if (i != sh->pd_idx && i != sh->qd_idx) { in handle_stripe_expansion()
4550 sector_t bn = raid5_compute_blocknr(sh, i, 1); in handle_stripe_expansion()
4571 sh->dev[i].page, sh2->dev[dd_idx].offset, in handle_stripe_expansion()
4572 sh->dev[i].offset, RAID5_STRIPE_SIZE(conf), in handle_stripe_expansion()
4607 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) in analyse_stripe() argument
4609 struct r5conf *conf = sh->raid_conf; in analyse_stripe()
4610 int disks = sh->disks; in analyse_stripe()
4617 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; in analyse_stripe()
4618 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; in analyse_stripe()
4628 dev = &sh->dev[i]; in analyse_stripe()
4639 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) in analyse_stripe()
4668 rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) && in analyse_stripe()
4669 !rdev_has_badblock(rdev, sh->sector, in analyse_stripe()
4683 is_bad = rdev_has_badblock(rdev, sh->sector, in analyse_stripe()
4709 else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <= rdev->recovery_offset) in analyse_stripe()
4778 if (test_bit(STRIPE_SYNCING, &sh->state)) { in analyse_stripe()
4788 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4800 static int clear_batch_ready(struct stripe_head *sh) in clear_batch_ready() argument
4803 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) in clear_batch_ready()
4804 return (sh->batch_head && sh->batch_head != sh); in clear_batch_ready()
4805 spin_lock(&sh->stripe_lock); in clear_batch_ready()
4806 if (!sh->batch_head) { in clear_batch_ready()
4807 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4815 if (sh->batch_head != sh) { in clear_batch_ready()
4816 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4819 spin_lock(&sh->batch_lock); in clear_batch_ready()
4820 list_for_each_entry(tmp, &sh->batch_list, batch_list) in clear_batch_ready()
4822 spin_unlock(&sh->batch_lock); in clear_batch_ready()
4823 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4835 struct stripe_head *sh, *next; in break_stripe_batch_list() local
4838 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { in break_stripe_batch_list()
4840 list_del_init(&sh->batch_list); in break_stripe_batch_list()
4842 WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | in break_stripe_batch_list()
4853 "stripe state: %lx\n", sh->state); in break_stripe_batch_list()
4858 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | in break_stripe_batch_list()
4863 sh->check_state = head_sh->check_state; in break_stripe_batch_list()
4864 sh->reconstruct_state = head_sh->reconstruct_state; in break_stripe_batch_list()
4865 spin_lock_irq(&sh->stripe_lock); in break_stripe_batch_list()
4866 sh->batch_head = NULL; in break_stripe_batch_list()
4867 spin_unlock_irq(&sh->stripe_lock); in break_stripe_batch_list()
4868 for (i = 0; i < sh->disks; i++) { in break_stripe_batch_list()
4869 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in break_stripe_batch_list()
4870 wake_up_bit(&sh->dev[i].flags, R5_Overlap); in break_stripe_batch_list()
4871 sh->dev[i].flags = head_sh->dev[i].flags & in break_stripe_batch_list()
4875 sh->state & handle_flags) in break_stripe_batch_list()
4876 set_bit(STRIPE_HANDLE, &sh->state); in break_stripe_batch_list()
4877 raid5_release_stripe(sh); in break_stripe_batch_list()
4889 static void handle_stripe(struct stripe_head *sh) in handle_stripe() argument
4892 struct r5conf *conf = sh->raid_conf; in handle_stripe()
4895 int disks = sh->disks; in handle_stripe()
4898 clear_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4906 if (clear_batch_ready(sh)) in handle_stripe()
4909 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { in handle_stripe()
4912 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4916 if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) in handle_stripe()
4917 break_stripe_batch_list(sh, 0); in handle_stripe()
4919 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { in handle_stripe()
4920 spin_lock(&sh->stripe_lock); in handle_stripe()
4925 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && in handle_stripe()
4926 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) && in handle_stripe()
4927 !test_bit(STRIPE_DISCARD, &sh->state) && in handle_stripe()
4928 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { in handle_stripe()
4929 set_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
4930 clear_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
4931 clear_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
4933 spin_unlock(&sh->stripe_lock); in handle_stripe()
4935 clear_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
4939 (unsigned long long)sh->sector, sh->state, in handle_stripe()
4940 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, in handle_stripe()
4941 sh->check_state, sh->reconstruct_state); in handle_stripe()
4943 analyse_stripe(sh, &s); in handle_stripe()
4945 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) in handle_stripe()
4950 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4957 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4965 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { in handle_stripe()
4967 set_bit(STRIPE_BIOFILL_RUN, &sh->state); in handle_stripe()
4983 sh->check_state = 0; in handle_stripe()
4984 sh->reconstruct_state = 0; in handle_stripe()
4985 break_stripe_batch_list(sh, 0); in handle_stripe()
4987 handle_failed_stripe(conf, sh, &s, disks); in handle_stripe()
4989 handle_failed_sync(conf, sh, &s); in handle_stripe()
4996 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) in handle_stripe()
4998 if (sh->reconstruct_state == reconstruct_state_drain_result || in handle_stripe()
4999 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { in handle_stripe()
5000 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
5005 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && in handle_stripe()
5006 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); in handle_stripe()
5007 BUG_ON(sh->qd_idx >= 0 && in handle_stripe()
5008 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && in handle_stripe()
5009 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); in handle_stripe()
5011 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
5013 (i == sh->pd_idx || i == sh->qd_idx || in handle_stripe()
5023 ((i == sh->pd_idx || i == sh->qd_idx) && in handle_stripe()
5025 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
5028 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe()
5036 pdev = &sh->dev[sh->pd_idx]; in handle_stripe()
5037 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) in handle_stripe()
5038 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); in handle_stripe()
5039 qdev = &sh->dev[sh->qd_idx]; in handle_stripe()
5040 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) in handle_stripe()
5041 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) in handle_stripe()
5053 handle_stripe_clean_event(conf, sh, disks); in handle_stripe()
5056 r5c_handle_cached_data_endio(conf, sh, disks); in handle_stripe()
5057 log_stripe_write_finished(sh); in handle_stripe()
5068 handle_stripe_fill(sh, &s, disks); in handle_stripe()
5075 r5c_finish_stripe_write_out(conf, sh, &s); in handle_stripe()
5086 if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) { in handle_stripe()
5089 handle_stripe_dirtying(conf, sh, &s, disks); in handle_stripe()
5095 ret = r5c_try_caching_write(conf, sh, &s, in handle_stripe()
5106 (!test_bit(STRIPE_R5C_CACHING, &sh->state) && in handle_stripe()
5108 ret = handle_stripe_dirtying(conf, sh, &s, in handle_stripe()
5121 if (sh->check_state || in handle_stripe()
5123 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
5124 !test_bit(STRIPE_INSYNC, &sh->state))) { in handle_stripe()
5126 handle_parity_checks6(conf, sh, &s, disks); in handle_stripe()
5128 handle_parity_checks5(conf, sh, &s, disks); in handle_stripe()
5132 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) in handle_stripe()
5133 && !test_bit(STRIPE_REPLACED, &sh->state)) { in handle_stripe()
5136 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { in handle_stripe()
5137 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_stripe()
5138 set_bit(R5_WantReplace, &sh->dev[i].flags); in handle_stripe()
5139 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
5143 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
5144 set_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
5147 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
5148 test_bit(STRIPE_INSYNC, &sh->state)) { in handle_stripe()
5150 clear_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
5151 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_stripe()
5152 wake_up_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap); in handle_stripe()
5160 struct r5dev *dev = &sh->dev[s.failed_num[i]]; in handle_stripe()
5177 if (sh->reconstruct_state == reconstruct_state_result) { in handle_stripe()
5179 = raid5_get_active_stripe(conf, NULL, sh->sector, in handle_stripe()
5183 /* sh cannot be written until sh_src has been read. in handle_stripe()
5184 * so arrange for sh to be delayed a little in handle_stripe()
5186 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
5187 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
5197 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
5198 clear_bit(STRIPE_EXPANDING, &sh->state); in handle_stripe()
5200 set_bit(R5_Wantwrite, &sh->dev[i].flags); in handle_stripe()
5201 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
5206 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && in handle_stripe()
5207 !sh->reconstruct_state) { in handle_stripe()
5209 sh->disks = conf->raid_disks; in handle_stripe()
5210 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
5211 schedule_reconstruction(sh, &s, 1, 1); in handle_stripe()
5212 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { in handle_stripe()
5213 clear_bit(STRIPE_EXPAND_READY, &sh->state); in handle_stripe()
5220 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) in handle_stripe()
5221 handle_stripe_expansion(conf, sh); in handle_stripe()
5241 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
5245 if (!rdev_set_badblocks(rdev, sh->sector, in handle_stripe()
5252 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
5261 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
5268 raid_run_ops(sh, s.ops_request); in handle_stripe()
5270 ops_run_io(sh, &s); in handle_stripe()
5283 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); in handle_stripe()
5292 struct stripe_head *sh; in raid5_activate_delayed() local
5293 sh = list_entry(l, struct stripe_head, lru); in raid5_activate_delayed()
5295 clear_bit(STRIPE_DELAYED, &sh->state); in raid5_activate_delayed()
5296 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in raid5_activate_delayed()
5298 list_add_tail(&sh->lru, &conf->hold_list); in raid5_activate_delayed()
5299 raid5_wakeup_stripe_thread(sh); in raid5_activate_delayed()
5312 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); in activate_bit_delay() local
5314 list_del_init(&sh->lru); in activate_bit_delay()
5315 atomic_inc(&sh->count); in activate_bit_delay()
5316 hash = sh->hash_lock_index; in activate_bit_delay()
5317 __release_stripe(conf, sh, &temp_inactive_list[hash]); in activate_bit_delay()
5511 struct stripe_head *sh, *tmp; in __get_priority_stripe() local
5521 sh = NULL; in __get_priority_stripe()
5547 sh = list_entry(handle_list->next, typeof(*sh), lru); in __get_priority_stripe()
5551 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { in __get_priority_stripe()
5571 sh = tmp; in __get_priority_stripe()
5576 if (sh) { in __get_priority_stripe()
5584 if (!sh) { in __get_priority_stripe()
5594 sh->group = NULL; in __get_priority_stripe()
5596 list_del_init(&sh->lru); in __get_priority_stripe()
5597 BUG_ON(atomic_inc_return(&sh->count) != 1); in __get_priority_stripe()
5598 return sh; in __get_priority_stripe()
5611 struct stripe_head *sh; in raid5_unplug() local
5620 sh = list_first_entry(&cb->list, struct stripe_head, lru); in raid5_unplug()
5621 list_del_init(&sh->lru); in raid5_unplug()
5628 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); in raid5_unplug()
5633 hash = sh->hash_lock_index; in raid5_unplug()
5634 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); in raid5_unplug()
5647 struct stripe_head *sh) in release_stripe_plug() argument
5655 raid5_release_stripe(sh); in release_stripe_plug()
5668 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) in release_stripe_plug()
5669 list_add_tail(&sh->lru, &cb->list); in release_stripe_plug()
5671 raid5_release_stripe(sh); in release_stripe_plug()
5678 struct stripe_head *sh; in make_discard_request() local
5708 sh = raid5_get_active_stripe(conf, NULL, logical_sector, 0); in make_discard_request()
5709 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5710 if (test_bit(STRIPE_SYNCING, &sh->state)) { in make_discard_request()
5711 raid5_release_stripe(sh); in make_discard_request()
5712 wait_on_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap, in make_discard_request()
5716 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5717 spin_lock_irq(&sh->stripe_lock); in make_discard_request()
5719 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5721 if (sh->dev[d].towrite || sh->dev[d].toread) { in make_discard_request()
5722 set_bit(R5_Overlap, &sh->dev[d].flags); in make_discard_request()
5723 spin_unlock_irq(&sh->stripe_lock); in make_discard_request()
5724 raid5_release_stripe(sh); in make_discard_request()
5725 wait_on_bit(&sh->dev[d].flags, R5_Overlap, in make_discard_request()
5730 set_bit(STRIPE_DISCARD, &sh->state); in make_discard_request()
5731 sh->overwrite_disks = 0; in make_discard_request()
5733 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5735 sh->dev[d].towrite = bi; in make_discard_request()
5736 set_bit(R5_OVERWRITE, &sh->dev[d].flags); in make_discard_request()
5739 sh->overwrite_disks++; in make_discard_request()
5741 spin_unlock_irq(&sh->stripe_lock); in make_discard_request()
5743 sh->bm_seq = conf->seq_flush + 1; in make_discard_request()
5744 set_bit(STRIPE_BIT_DELAY, &sh->state); in make_discard_request()
5747 set_bit(STRIPE_HANDLE, &sh->state); in make_discard_request()
5748 clear_bit(STRIPE_DELAYED, &sh->state); in make_discard_request()
5749 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in make_discard_request()
5751 release_stripe_plug(mddev, sh); in make_discard_request()
5772 struct stripe_head *sh) in stripe_ahead_of_reshape() argument
5778 for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) { in stripe_ahead_of_reshape()
5779 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_ahead_of_reshape()
5782 min_sector = min(min_sector, sh->dev[dd_idx].sector); in stripe_ahead_of_reshape()
5783 max_sector = max(max_sector, sh->dev[dd_idx].sector); in stripe_ahead_of_reshape()
5799 struct stripe_request_ctx *ctx, struct stripe_head *sh, in add_all_stripe_bios() argument
5804 spin_lock_irq(&sh->stripe_lock); in add_all_stripe_bios()
5806 for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) { in add_all_stripe_bios()
5807 struct r5dev *dev = &sh->dev[dd_idx]; in add_all_stripe_bios()
5809 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in add_all_stripe_bios()
5816 if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) { in add_all_stripe_bios()
5818 spin_unlock_irq(&sh->stripe_lock); in add_all_stripe_bios()
5819 raid5_release_stripe(sh); in add_all_stripe_bios()
5831 for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) { in add_all_stripe_bios()
5832 struct r5dev *dev = &sh->dev[dd_idx]; in add_all_stripe_bios()
5834 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in add_all_stripe_bios()
5841 __add_stripe_bio(sh, bi, dd_idx, forwrite, previous); in add_all_stripe_bios()
5846 spin_unlock_irq(&sh->stripe_lock); in add_all_stripe_bios()
5937 struct stripe_head *sh; in make_stripe_request() local
5964 sh = raid5_get_active_stripe(conf, ctx, new_sector, flags); in make_stripe_request()
5965 if (unlikely(!sh)) { in make_stripe_request()
5972 stripe_ahead_of_reshape(mddev, conf, sh)) { in make_stripe_request()
5977 * 'sh', we know that if that happens, in make_stripe_request()
5991 if (test_bit(STRIPE_EXPANDING, &sh->state)) { in make_stripe_request()
5997 if (!add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) { in make_stripe_request()
6002 if (stripe_can_batch(sh)) { in make_stripe_request()
6003 stripe_add_to_batch_list(conf, sh, ctx->batch_last); in make_stripe_request()
6006 atomic_inc(&sh->count); in make_stripe_request()
6007 ctx->batch_last = sh; in make_stripe_request()
6011 set_bit(STRIPE_R5C_PREFLUSH, &sh->state); in make_stripe_request()
6016 set_bit(STRIPE_HANDLE, &sh->state); in make_stripe_request()
6017 clear_bit(STRIPE_DELAYED, &sh->state); in make_stripe_request()
6018 if ((!sh->batch_head || sh == sh->batch_head) && in make_stripe_request()
6020 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in make_stripe_request()
6023 release_stripe_plug(mddev, sh); in make_stripe_request()
6027 raid5_release_stripe(sh); in make_stripe_request()
6047 struct stripe_head sh; in raid5_bio_lowest_chunk_sector() local
6053 sector = raid5_compute_sector(conf, r_sector, 0, &dd_idx, &sh); in raid5_bio_lowest_chunk_sector()
6062 while (dd_idx == sh.pd_idx || dd_idx == sh.qd_idx) in raid5_bio_lowest_chunk_sector()
6221 struct stripe_head *sh; in reshape_request() local
6378 sh = raid5_get_active_stripe(conf, NULL, stripe_addr+i, in reshape_request()
6380 set_bit(STRIPE_EXPANDING, &sh->state); in reshape_request()
6385 for (j=sh->disks; j--;) { in reshape_request()
6387 if (j == sh->pd_idx) in reshape_request()
6390 j == sh->qd_idx) in reshape_request()
6392 s = raid5_compute_blocknr(sh, j, 0); in reshape_request()
6397 memset(page_address(sh->dev[j].page), 0, RAID5_STRIPE_SIZE(conf)); in reshape_request()
6398 set_bit(R5_Expanded, &sh->dev[j].flags); in reshape_request()
6399 set_bit(R5_UPTODATE, &sh->dev[j].flags); in reshape_request()
6402 set_bit(STRIPE_EXPAND_READY, &sh->state); in reshape_request()
6403 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
6405 list_add(&sh->lru, &stripes); in reshape_request()
6428 sh = raid5_get_active_stripe(conf, NULL, first_sector, in reshape_request()
6430 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); in reshape_request()
6431 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
6432 raid5_release_stripe(sh); in reshape_request()
6439 sh = list_entry(stripes.next, struct stripe_head, lru); in reshape_request()
6440 list_del_init(&sh->lru); in reshape_request()
6441 raid5_release_stripe(sh); in reshape_request()
6490 struct stripe_head *sh; in raid5_sync_request() local
6549 sh = raid5_get_active_stripe(conf, NULL, sector_nr, in raid5_sync_request()
6551 if (sh == NULL) { in raid5_sync_request()
6552 sh = raid5_get_active_stripe(conf, NULL, sector_nr, 0); in raid5_sync_request()
6572 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); in raid5_sync_request()
6573 set_bit(STRIPE_HANDLE, &sh->state); in raid5_sync_request()
6575 raid5_release_stripe(sh); in raid5_sync_request()
6593 struct stripe_head *sh; in retry_aligned_read() local
6614 sh = raid5_get_active_stripe(conf, NULL, sector, in retry_aligned_read()
6616 if (!sh) { in retry_aligned_read()
6623 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { in retry_aligned_read()
6624 raid5_release_stripe(sh); in retry_aligned_read()
6630 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); in retry_aligned_read()
6631 handle_stripe(sh); in retry_aligned_read()
6632 raid5_release_stripe(sh); in retry_aligned_read()
6648 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; in handle_active_stripes() local
6653 (sh = __get_priority_stripe(conf, group)) != NULL) in handle_active_stripes()
6654 batch[batch_size++] = sh; in handle_active_stripes()