Lines Matching refs:mddev
71 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
73 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
129 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in r10buf_pool_alloc()
130 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in r10buf_pool_alloc()
174 &conf->mddev->recovery)) { in r10buf_pool_alloc()
255 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio()
263 struct r10conf *conf = r10_bio->mddev->private; in put_buf()
273 struct mddev *mddev = r10_bio->mddev; in reschedule_retry() local
274 struct r10conf *conf = mddev->private; in reschedule_retry()
284 md_wakeup_thread(mddev->thread); in reschedule_retry()
295 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io()
317 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos()
356 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request()
388 rdev_dec_pending(rdev, conf->mddev); in raid10_end_read_request()
395 mdname(conf->mddev), in raid10_end_read_request()
406 md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, in close_write()
410 md_write_end(r10_bio->mddev); in close_write()
433 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request()
458 md_error(rdev->mddev, rdev); in raid10_end_write_request()
463 &rdev->mddev->recovery); in raid10_end_write_request()
468 md_error(rdev->mddev, rdev); in raid10_end_write_request()
532 rdev_dec_pending(rdev, conf->mddev); in raid10_end_write_request()
631 conf->mddev->reshape_backwards)) { in raid10_find_phys()
736 if ((conf->mddev->recovery_cp < MaxSector in read_balance()
738 (mddev_is_clustered(conf->mddev) && in read_balance()
739 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, in read_balance()
881 md_bitmap_unplug(conf->mddev->bitmap); in flush_pending_writes()
970 raid10_log(conf->mddev, "wait barrier"); in wait_barrier()
980 (conf->mddev->thread->tsk == current && in wait_barrier()
982 &conf->mddev->recovery) && in wait_barrier()
1040 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) || in choose_data_offset()
1057 struct mddev *mddev = plug->cb.data; in raid10_unplug() local
1058 struct r10conf *conf = mddev->private; in raid10_unplug()
1067 md_wakeup_thread(mddev->thread); in raid10_unplug()
1074 md_bitmap_unplug(mddev->bitmap); in raid10_unplug()
1101 static void regular_request_wait(struct mddev *mddev, struct r10conf *conf, in regular_request_wait() argument
1105 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in regular_request_wait()
1108 raid10_log(conf->mddev, "wait reshape"); in regular_request_wait()
1118 static void raid10_read_request(struct mddev *mddev, struct bio *bio, in raid10_read_request() argument
1121 struct r10conf *conf = mddev->private; in raid10_read_request()
1160 regular_request_wait(mddev, conf, bio, r10_bio->sectors); in raid10_read_request()
1165 mdname(mddev), b, in raid10_read_request()
1173 mdname(mddev), in raid10_read_request()
1191 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); in raid10_read_request()
1206 if (mddev->gendisk) in raid10_read_request()
1207 trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk), in raid10_read_request()
1213 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, in raid10_write_one_disk() argument
1223 struct r10conf *conf = mddev->private; in raid10_write_one_disk()
1238 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); in raid10_write_one_disk()
1255 if (conf->mddev->gendisk) in raid10_write_one_disk()
1256 trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk), in raid10_write_one_disk()
1263 cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug)); in raid10_write_one_disk()
1276 md_wakeup_thread(mddev->thread); in raid10_write_one_disk()
1280 static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio) in wait_blocked_dev() argument
1283 struct r10conf *conf = mddev->private; in wait_blocked_dev()
1338 raid10_log(conf->mddev, "%s wait rdev %d blocked", in wait_blocked_dev()
1340 md_wait_for_blocked_rdev(blocked_rdev, mddev); in wait_blocked_dev()
1346 static void raid10_write_request(struct mddev *mddev, struct bio *bio, in raid10_write_request() argument
1349 struct r10conf *conf = mddev->private; in raid10_write_request()
1354 if ((mddev_is_clustered(mddev) && in raid10_write_request()
1355 md_cluster_ops->area_resyncing(mddev, WRITE, in raid10_write_request()
1362 if (!md_cluster_ops->area_resyncing(mddev, WRITE, in raid10_write_request()
1371 regular_request_wait(mddev, conf, bio, sectors); in raid10_write_request()
1372 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in raid10_write_request()
1373 (mddev->reshape_backwards in raid10_write_request()
1379 mddev->reshape_position = conf->reshape_progress; in raid10_write_request()
1380 set_mask_bits(&mddev->sb_flags, 0, in raid10_write_request()
1382 md_wakeup_thread(mddev->thread); in raid10_write_request()
1383 raid10_log(conf->mddev, "wait reshape metadata"); in raid10_write_request()
1384 wait_event(mddev->sb_wait, in raid10_write_request()
1385 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in raid10_write_request()
1387 conf->reshape_safe = mddev->reshape_position; in raid10_write_request()
1391 md_wakeup_thread(mddev->thread); in raid10_write_request()
1392 raid10_log(mddev, "wait queued"); in raid10_write_request()
1409 wait_blocked_dev(mddev, r10_bio); in raid10_write_request()
1493 md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); in raid10_write_request()
1497 raid10_write_one_disk(mddev, r10_bio, bio, false, i); in raid10_write_request()
1499 raid10_write_one_disk(mddev, r10_bio, bio, true, i); in raid10_write_request()
1504 static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) in __make_request() argument
1506 struct r10conf *conf = mddev->private; in __make_request()
1514 r10_bio->mddev = mddev; in __make_request()
1522 raid10_read_request(mddev, bio, r10_bio); in __make_request()
1524 raid10_write_request(mddev, bio, r10_bio); in __make_request()
1529 struct r10conf *conf = r10bio->mddev->private; in raid_end_discard_bio()
1541 md_write_end(r10bio->mddev); in raid_end_discard_bio()
1552 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_discard_request()
1577 rdev_dec_pending(rdev, conf->mddev); in raid10_end_discard_request()
1586 static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) in raid10_handle_discard() argument
1588 struct r10conf *conf = mddev->private; in raid10_handle_discard()
1607 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in raid10_handle_discard()
1616 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in raid10_handle_discard()
1694 r10_bio->mddev = mddev; in raid10_handle_discard()
1698 wait_blocked_dev(mddev, r10_bio); in raid10_handle_discard()
1765 dev_start = (first_stripe_index + 1) * mddev->chunk_sectors; in raid10_handle_discard()
1767 dev_start = first_stripe_index * mddev->chunk_sectors; in raid10_handle_discard()
1772 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors; in raid10_handle_discard()
1774 dev_end = last_stripe_index * mddev->chunk_sectors; in raid10_handle_discard()
1786 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); in raid10_handle_discard()
1792 md_submit_discard_bio(mddev, rdev, mbio, in raid10_handle_discard()
1799 rbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); in raid10_handle_discard()
1805 md_submit_discard_bio(mddev, rrdev, rbio, in raid10_handle_discard()
1831 static bool raid10_make_request(struct mddev *mddev, struct bio *bio) in raid10_make_request() argument
1833 struct r10conf *conf = mddev->private; in raid10_make_request()
1839 && md_flush_request(mddev, bio)) in raid10_make_request()
1842 if (!md_write_start(mddev, bio)) in raid10_make_request()
1846 if (!raid10_handle_discard(mddev, bio)) in raid10_make_request()
1861 __make_request(mddev, bio, sectors); in raid10_make_request()
1868 static void raid10_status(struct seq_file *seq, struct mddev *mddev) in raid10_status() argument
1870 struct r10conf *conf = mddev->private; in raid10_status()
1874 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); in raid10_status()
1886 conf->geo.raid_disks - mddev->degraded); in raid10_status()
1948 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) in raid10_error() argument
1951 struct r10conf *conf = mddev->private; in raid10_error()
1961 if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev in raid10_error()
1970 mddev->degraded++; in raid10_error()
1974 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid10_error()
1977 set_mask_bits(&mddev->sb_flags, 0, in raid10_error()
1982 mdname(mddev), bdevname(rdev->bdev, b), in raid10_error()
1983 mdname(mddev), conf->geo.raid_disks - mddev->degraded); in raid10_error()
1996 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, in print_conf()
2020 static int raid10_spare_active(struct mddev *mddev) in raid10_spare_active() argument
2023 struct r10conf *conf = mddev->private; in raid10_spare_active()
2061 mddev->degraded -= count; in raid10_spare_active()
2068 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) in raid10_add_disk() argument
2070 struct r10conf *conf = mddev->private; in raid10_add_disk()
2076 if (mddev->recovery_cp < MaxSector) in raid10_add_disk()
2084 if (md_integrity_add_rdev(rdev, mddev)) in raid10_add_disk()
2098 if (p->recovery_disabled == mddev->recovery_disabled) in raid10_add_disk()
2108 if (mddev->gendisk) in raid10_add_disk()
2109 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_add_disk()
2116 if (mddev->gendisk) in raid10_add_disk()
2117 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_add_disk()
2121 p->recovery_disabled = mddev->recovery_disabled - 1; in raid10_add_disk()
2129 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid10_add_disk()
2130 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); in raid10_add_disk()
2136 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) in raid10_remove_disk() argument
2138 struct r10conf *conf = mddev->private; in raid10_remove_disk()
2161 mddev->recovery_disabled != p->recovery_disabled && in raid10_remove_disk()
2189 err = md_integrity_register(mddev); in raid10_remove_disk()
2199 struct r10conf *conf = r10_bio->mddev->private; in __end_sync_read()
2213 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); in __end_sync_read()
2226 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read()
2242 struct mddev *mddev = r10_bio->mddev; in end_sync_request() local
2253 md_done_sync(mddev, s, 1); in end_sync_request()
2270 struct mddev *mddev = r10_bio->mddev; in end_sync_write() local
2271 struct r10conf *conf = mddev->private; in end_sync_write()
2287 md_error(mddev, rdev); in end_sync_write()
2292 &rdev->mddev->recovery); in end_sync_write()
2301 rdev_dec_pending(rdev, mddev); in end_sync_write()
2322 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) in sync_request_write() argument
2324 struct r10conf *conf = mddev->private; in sync_request_write()
2381 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); in sync_request_write()
2382 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) in sync_request_write()
2387 md_error(rdev->mddev, rdev); in sync_request_write()
2440 md_done_sync(mddev, r10_bio->sectors, 1); in sync_request_write()
2464 struct mddev *mddev = r10_bio->mddev; in fix_recovery_read_error() local
2465 struct r10conf *conf = mddev->private; in fix_recovery_read_error()
2503 &rdev->mddev->recovery); in fix_recovery_read_error()
2521 mdname(mddev)); in fix_recovery_read_error()
2524 = mddev->recovery_disabled; in fix_recovery_read_error()
2526 &mddev->recovery); in fix_recovery_read_error()
2538 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) in recovery_request_write() argument
2540 struct r10conf *conf = mddev->private; in recovery_request_write()
2582 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) in check_decay_read_errors() argument
2628 &rdev->mddev->recovery); in r10_sync_page_io()
2632 md_error(rdev->mddev, rdev); in r10_sync_page_io()
2644 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2649 int max_read_errors = atomic_read(&mddev->max_corr_read_errors); in fix_read_error()
2662 check_decay_read_errors(mddev, rdev); in fix_read_error()
2669 mdname(mddev), b, in fix_read_error()
2672 mdname(mddev), b); in fix_read_error()
2673 md_error(mddev, rdev); in fix_read_error()
2707 rdev_dec_pending(rdev, mddev); in fix_read_error()
2731 md_error(mddev, rdev); in fix_read_error()
2763 mdname(mddev), s, in fix_read_error()
2770 mdname(mddev), in fix_read_error()
2773 rdev_dec_pending(rdev, mddev); in fix_read_error()
2800 mdname(mddev), s, in fix_read_error()
2806 mdname(mddev), in fix_read_error()
2811 mdname(mddev), s, in fix_read_error()
2819 rdev_dec_pending(rdev, mddev); in fix_read_error()
2832 struct mddev *mddev = r10_bio->mddev; in narrow_write_error() local
2833 struct r10conf *conf = mddev->private; in narrow_write_error()
2868 wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); in narrow_write_error()
2890 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) in handle_read_error() argument
2894 struct r10conf *conf = mddev->private; in handle_read_error()
2909 if (mddev->ro) in handle_read_error()
2913 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2916 md_error(mddev, rdev); in handle_read_error()
2918 rdev_dec_pending(rdev, mddev); in handle_read_error()
2921 raid10_read_request(mddev, r10_bio->master_bio, r10_bio); in handle_read_error()
2953 md_error(conf->mddev, rdev); in handle_write_completed()
2970 md_error(conf->mddev, rdev); in handle_write_completed()
2985 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2989 md_error(conf->mddev, rdev); in handle_write_completed()
2993 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
3002 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
3015 md_wakeup_thread(conf->mddev->thread); in handle_write_completed()
3027 struct mddev *mddev = thread->mddev; in raid10d() local
3030 struct r10conf *conf = mddev->private; in raid10d()
3034 md_check_recovery(mddev); in raid10d()
3037 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in raid10d()
3040 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in raid10d()
3051 if (mddev->degraded) in raid10d()
3076 mddev = r10_bio->mddev; in raid10d()
3077 conf = mddev->private; in raid10d()
3082 reshape_request_write(mddev, r10_bio); in raid10d()
3084 sync_request_write(mddev, r10_bio); in raid10d()
3086 recovery_request_write(mddev, r10_bio); in raid10d()
3088 handle_read_error(mddev, r10_bio); in raid10d()
3093 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) in raid10d()
3094 md_check_recovery(mddev); in raid10d()
3125 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in raid10_alloc_init_r10buf()
3126 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in raid10_alloc_init_r10buf()
3172 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; in raid10_set_cluster_sync_high()
3215 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, in raid10_sync_request() argument
3218 struct r10conf *conf = mddev->private; in raid10_sync_request()
3238 if (mddev->bitmap == NULL && in raid10_sync_request()
3239 mddev->recovery_cp == MaxSector && in raid10_sync_request()
3240 mddev->reshape_position == MaxSector && in raid10_sync_request()
3241 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in raid10_sync_request()
3242 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in raid10_sync_request()
3243 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in raid10_sync_request()
3246 return mddev->dev_sectors - sector_nr; in raid10_sync_request()
3250 max_sector = mddev->dev_sectors; in raid10_sync_request()
3251 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in raid10_sync_request()
3252 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in raid10_sync_request()
3253 max_sector = mddev->resync_max_sectors; in raid10_sync_request()
3267 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in raid10_sync_request()
3273 if (mddev->curr_resync < max_sector) { /* aborted */ in raid10_sync_request()
3274 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) in raid10_sync_request()
3275 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, in raid10_sync_request()
3279 raid10_find_virt(conf, mddev->curr_resync, i); in raid10_sync_request()
3280 md_bitmap_end_sync(mddev->bitmap, sect, in raid10_sync_request()
3285 if ((!mddev->bitmap || conf->fullsync) in raid10_sync_request()
3287 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in raid10_sync_request()
3302 md_bitmap_close_sync(mddev->bitmap); in raid10_sync_request()
3308 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in raid10_sync_request()
3309 return reshape_request(mddev, sector_nr, skipped); in raid10_sync_request()
3319 if (max_sector > mddev->resync_max) in raid10_sync_request()
3320 max_sector = mddev->resync_max; /* Don't do IO beyond here */ in raid10_sync_request()
3352 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in raid10_sync_request()
3389 if (sect >= mddev->resync_max_sectors) { in raid10_sync_request()
3402 must_sync = md_bitmap_start_sync(mddev->bitmap, sect, in raid10_sync_request()
3429 r10_bio->mddev = mddev; in raid10_sync_request()
3448 must_sync = md_bitmap_start_sync(mddev->bitmap, sect, in raid10_sync_request()
3564 &mddev->recovery)) in raid10_sync_request()
3566 mdname(mddev)); in raid10_sync_request()
3568 = mddev->recovery_disabled; in raid10_sync_request()
3574 rdev_dec_pending(mrdev, mddev); in raid10_sync_request()
3576 rdev_dec_pending(mreplace, mddev); in raid10_sync_request()
3579 rdev_dec_pending(mrdev, mddev); in raid10_sync_request()
3581 rdev_dec_pending(mreplace, mddev); in raid10_sync_request()
3620 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, in raid10_sync_request()
3621 mddev_is_clustered(mddev) && in raid10_sync_request()
3624 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, in raid10_sync_request()
3625 &sync_blocks, mddev->degraded) && in raid10_sync_request()
3627 &mddev->recovery)) { in raid10_sync_request()
3637 r10_bio->mddev = mddev; in raid10_sync_request()
3719 mddev); in raid10_sync_request()
3724 mddev); in raid10_sync_request()
3756 if (mddev_is_clustered(mddev) && in raid10_sync_request()
3757 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in raid10_sync_request()
3760 conf->cluster_sync_low = mddev->curr_resync_completed; in raid10_sync_request()
3763 md_cluster_ops->resync_info_update(mddev, in raid10_sync_request()
3767 } else if (mddev_is_clustered(mddev)) { in raid10_sync_request()
3787 mddev->curr_resync_completed, i); in raid10_sync_request()
3796 md_cluster_ops->resync_info_update(mddev, in raid10_sync_request()
3821 md_done_sync(mddev, sectors_skipped, 1); in raid10_sync_request()
3839 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid10_size() argument
3842 struct r10conf *conf = mddev->private; in raid10_size()
3889 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) in setup_geo() argument
3895 layout = mddev->layout; in setup_geo()
3896 chunk = mddev->chunk_sectors; in setup_geo()
3897 disks = mddev->raid_disks - mddev->delta_disks; in setup_geo()
3900 layout = mddev->new_layout; in setup_geo()
3901 chunk = mddev->new_chunk_sectors; in setup_geo()
3902 disks = mddev->raid_disks; in setup_geo()
3907 layout = mddev->new_layout; in setup_geo()
3908 chunk = mddev->new_chunk_sectors; in setup_geo()
3909 disks = mddev->raid_disks + mddev->delta_disks; in setup_geo()
3945 static struct r10conf *setup_conf(struct mddev *mddev) in setup_conf() argument
3952 copies = setup_geo(&geo, mddev, geo_new); in setup_conf()
3956 mdname(mddev), PAGE_SIZE); in setup_conf()
3960 if (copies < 2 || copies > mddev->raid_disks) { in setup_conf()
3962 mdname(mddev), mddev->new_layout); in setup_conf()
3972 conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks), in setup_conf()
3993 calc_sectors(conf, mddev->dev_sectors); in setup_conf()
3994 if (mddev->reshape_position == MaxSector) { in setup_conf()
3998 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { in setup_conf()
4002 conf->reshape_progress = mddev->reshape_position; in setup_conf()
4019 conf->thread = md_register_thread(raid10d, mddev, "raid10"); in setup_conf()
4023 conf->mddev = mddev; in setup_conf()
4043 blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) * in raid10_set_io_opt()
4047 static int raid10_run(struct mddev *mddev) in raid10_run() argument
4058 if (mddev_init_writes_pending(mddev) < 0) in raid10_run()
4061 if (mddev->private == NULL) { in raid10_run()
4062 conf = setup_conf(mddev); in raid10_run()
4065 mddev->private = conf; in raid10_run()
4067 conf = mddev->private; in raid10_run()
4071 if (mddev_is_clustered(conf->mddev)) { in raid10_run()
4074 fc = (mddev->layout >> 8) & 255; in raid10_run()
4075 fo = mddev->layout & (1<<16); in raid10_run()
4083 mddev->thread = conf->thread; in raid10_run()
4086 if (mddev->queue) { in raid10_run()
4087 blk_queue_max_discard_sectors(mddev->queue, in raid10_run()
4089 blk_queue_max_write_same_sectors(mddev->queue, 0); in raid10_run()
4090 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); in raid10_run()
4091 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); in raid10_run()
4095 rdev_for_each(rdev, mddev) { in raid10_run()
4116 if (!mddev->reshape_backwards) in raid10_run()
4123 if (mddev->gendisk) in raid10_run()
4124 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_run()
4134 if (mddev->queue) { in raid10_run()
4137 mddev->queue); in raid10_run()
4140 mddev->queue); in raid10_run()
4145 mdname(mddev)); in raid10_run()
4159 mddev->degraded = 0; in raid10_run()
4177 mddev->degraded++; in raid10_run()
4189 disk->recovery_disabled = mddev->recovery_disabled - 1; in raid10_run()
4192 if (mddev->recovery_cp != MaxSector) in raid10_run()
4194 mdname(mddev)); in raid10_run()
4196 mdname(mddev), conf->geo.raid_disks - mddev->degraded, in raid10_run()
4201 mddev->dev_sectors = conf->dev_sectors; in raid10_run()
4202 size = raid10_size(mddev, 0, 0); in raid10_run()
4203 md_set_array_sectors(mddev, size); in raid10_run()
4204 mddev->resync_max_sectors = size; in raid10_run()
4205 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); in raid10_run()
4207 if (md_integrity_register(mddev)) in raid10_run()
4225 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid10_run()
4226 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid10_run()
4227 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid10_run()
4228 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid10_run()
4229 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid10_run()
4231 if (!mddev->sync_thread) in raid10_run()
4238 md_unregister_thread(&mddev->thread); in raid10_run()
4243 mddev->private = NULL; in raid10_run()
4248 static void raid10_free(struct mddev *mddev, void *priv) in raid10_free() argument
4261 static void raid10_quiesce(struct mddev *mddev, int quiesce) in raid10_quiesce() argument
4263 struct r10conf *conf = mddev->private; in raid10_quiesce()
4271 static int raid10_resize(struct mddev *mddev, sector_t sectors) in raid10_resize() argument
4285 struct r10conf *conf = mddev->private; in raid10_resize()
4288 if (mddev->reshape_position != MaxSector) in raid10_resize()
4294 oldsize = raid10_size(mddev, 0, 0); in raid10_resize()
4295 size = raid10_size(mddev, sectors, 0); in raid10_resize()
4296 if (mddev->external_size && in raid10_resize()
4297 mddev->array_sectors > size) in raid10_resize()
4299 if (mddev->bitmap) { in raid10_resize()
4300 int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0); in raid10_resize()
4304 md_set_array_sectors(mddev, size); in raid10_resize()
4305 if (sectors > mddev->dev_sectors && in raid10_resize()
4306 mddev->recovery_cp > oldsize) { in raid10_resize()
4307 mddev->recovery_cp = oldsize; in raid10_resize()
4308 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid10_resize()
4311 mddev->dev_sectors = conf->dev_sectors; in raid10_resize()
4312 mddev->resync_max_sectors = size; in raid10_resize()
4316 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) in raid10_takeover_raid0() argument
4321 if (mddev->degraded > 0) { in raid10_takeover_raid0()
4323 mdname(mddev)); in raid10_takeover_raid0()
4329 mddev->new_level = 10; in raid10_takeover_raid0()
4331 mddev->new_layout = (1<<8) + 2; in raid10_takeover_raid0()
4332 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid10_takeover_raid0()
4333 mddev->delta_disks = mddev->raid_disks; in raid10_takeover_raid0()
4334 mddev->raid_disks *= 2; in raid10_takeover_raid0()
4336 mddev->recovery_cp = MaxSector; in raid10_takeover_raid0()
4337 mddev->dev_sectors = size; in raid10_takeover_raid0()
4339 conf = setup_conf(mddev); in raid10_takeover_raid0()
4341 rdev_for_each(rdev, mddev) in raid10_takeover_raid0()
4352 static void *raid10_takeover(struct mddev *mddev) in raid10_takeover() argument
4359 if (mddev->level == 0) { in raid10_takeover()
4361 raid0_conf = mddev->private; in raid10_takeover()
4364 mdname(mddev)); in raid10_takeover()
4367 return raid10_takeover_raid0(mddev, in raid10_takeover()
4374 static int raid10_check_reshape(struct mddev *mddev) in raid10_check_reshape() argument
4390 struct r10conf *conf = mddev->private; in raid10_check_reshape()
4396 if (setup_geo(&geo, mddev, geo_start) != conf->copies) in raid10_check_reshape()
4403 if (mddev->array_sectors & geo.chunk_mask) in raid10_check_reshape()
4412 if (mddev->delta_disks > 0) { in raid10_check_reshape()
4415 kcalloc(mddev->raid_disks + mddev->delta_disks, in raid10_check_reshape()
4481 static int raid10_start_reshape(struct mddev *mddev) in raid10_start_reshape() argument
4497 struct r10conf *conf = mddev->private; in raid10_start_reshape()
4502 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in raid10_start_reshape()
4505 if (setup_geo(&new, mddev, geo_start) != conf->copies) in raid10_start_reshape()
4513 rdev_for_each(rdev, mddev) { in raid10_start_reshape()
4520 if (!mddev->reshape_backwards) in raid10_start_reshape()
4533 if (spares < mddev->delta_disks) in raid10_start_reshape()
4547 setup_geo(&conf->geo, mddev, geo_start); in raid10_start_reshape()
4549 if (mddev->reshape_backwards) { in raid10_start_reshape()
4550 sector_t size = raid10_size(mddev, 0, 0); in raid10_start_reshape()
4551 if (size < mddev->array_sectors) { in raid10_start_reshape()
4554 mdname(mddev)); in raid10_start_reshape()
4557 mddev->resync_max_sectors = size; in raid10_start_reshape()
4564 if (mddev->delta_disks && mddev->bitmap) { in raid10_start_reshape()
4568 oldsize = raid10_size(mddev, 0, 0); in raid10_start_reshape()
4569 newsize = raid10_size(mddev, 0, conf->geo.raid_disks); in raid10_start_reshape()
4571 if (!mddev_is_clustered(mddev)) { in raid10_start_reshape()
4572 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); in raid10_start_reshape()
4579 rdev_for_each(rdev, mddev) { in raid10_start_reshape()
4594 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); in raid10_start_reshape()
4598 ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize); in raid10_start_reshape()
4600 md_bitmap_resize(mddev->bitmap, oldsize, 0, 0); in raid10_start_reshape()
4605 if (mddev->delta_disks > 0) { in raid10_start_reshape()
4606 rdev_for_each(rdev, mddev) in raid10_start_reshape()
4609 if (raid10_add_disk(mddev, rdev) == 0) { in raid10_start_reshape()
4617 sysfs_link_rdev(mddev, rdev); in raid10_start_reshape()
4630 mddev->degraded = calc_degraded(conf); in raid10_start_reshape()
4632 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4633 mddev->reshape_position = conf->reshape_progress; in raid10_start_reshape()
4634 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in raid10_start_reshape()
4636 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid10_start_reshape()
4637 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid10_start_reshape()
4638 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in raid10_start_reshape()
4639 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid10_start_reshape()
4640 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid10_start_reshape()
4642 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid10_start_reshape()
4644 if (!mddev->sync_thread) { in raid10_start_reshape()
4649 md_wakeup_thread(mddev->sync_thread); in raid10_start_reshape()
4654 mddev->recovery = 0; in raid10_start_reshape()
4657 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4658 rdev_for_each(rdev, mddev) in raid10_start_reshape()
4663 mddev->reshape_position = MaxSector; in raid10_start_reshape()
4699 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, in reshape_request() argument
4739 struct r10conf *conf = mddev->private; in reshape_request()
4754 if (mddev->reshape_backwards && in reshape_request()
4755 conf->reshape_progress < raid10_size(mddev, 0, 0)) { in reshape_request()
4756 sector_nr = (raid10_size(mddev, 0, 0) in reshape_request()
4758 } else if (!mddev->reshape_backwards && in reshape_request()
4762 mddev->curr_resync_completed = sector_nr; in reshape_request()
4763 sysfs_notify_dirent_safe(mddev->sysfs_completed); in reshape_request()
4773 if (mddev->reshape_backwards) { in reshape_request()
4823 mddev->reshape_position = conf->reshape_progress; in reshape_request()
4824 if (mddev->reshape_backwards) in reshape_request()
4825 mddev->curr_resync_completed = raid10_size(mddev, 0, 0) in reshape_request()
4828 mddev->curr_resync_completed = conf->reshape_progress; in reshape_request()
4830 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in reshape_request()
4831 md_wakeup_thread(mddev->thread); in reshape_request()
4832 wait_event(mddev->sb_wait, mddev->sb_flags == 0 || in reshape_request()
4833 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
4834 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in reshape_request()
4838 conf->reshape_safe = mddev->reshape_position; in reshape_request()
4849 r10_bio->mddev = mddev; in reshape_request()
4862 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in reshape_request()
4866 read_bio = bio_alloc_bioset(GFP_KERNEL, RESYNC_PAGES, &mddev->bio_set); in reshape_request()
4881 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) { in reshape_request()
4899 md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low, in reshape_request()
4969 if (mddev->reshape_backwards) in reshape_request()
4978 static int handle_reshape_read_error(struct mddev *mddev,
4980 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) in reshape_request_write() argument
4987 struct r10conf *conf = mddev->private; in reshape_request_write()
4991 if (handle_reshape_read_error(mddev, r10_bio) < 0) { in reshape_request_write()
4993 md_done_sync(mddev, r10_bio->sectors, 0); in reshape_request_write()
5029 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) in end_reshape()
5034 md_finish_reshape(conf->mddev); in end_reshape()
5040 if (conf->mddev->queue) in end_reshape()
5045 static void raid10_update_reshape_pos(struct mddev *mddev) in raid10_update_reshape_pos() argument
5047 struct r10conf *conf = mddev->private; in raid10_update_reshape_pos()
5050 md_cluster_ops->resync_info_get(mddev, &lo, &hi); in raid10_update_reshape_pos()
5051 if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo)) in raid10_update_reshape_pos()
5052 || mddev->reshape_position == MaxSector) in raid10_update_reshape_pos()
5053 conf->reshape_progress = mddev->reshape_position; in raid10_update_reshape_pos()
5058 static int handle_reshape_read_error(struct mddev *mddev, in handle_reshape_read_error() argument
5063 struct r10conf *conf = mddev->private; in handle_reshape_read_error()
5071 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in handle_reshape_read_error()
5107 rdev_dec_pending(rdev, mddev); in handle_reshape_read_error()
5122 &mddev->recovery); in handle_reshape_read_error()
5136 struct mddev *mddev = r10_bio->mddev; in end_reshape_write() local
5137 struct r10conf *conf = mddev->private; in end_reshape_write()
5153 md_error(mddev, rdev); in end_reshape_write()
5156 rdev_dec_pending(rdev, mddev); in end_reshape_write()
5164 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); in end_reshape_request()
5169 static void raid10_finish_reshape(struct mddev *mddev) in raid10_finish_reshape() argument
5171 struct r10conf *conf = mddev->private; in raid10_finish_reshape()
5173 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in raid10_finish_reshape()
5176 if (mddev->delta_disks > 0) { in raid10_finish_reshape()
5177 if (mddev->recovery_cp > mddev->resync_max_sectors) { in raid10_finish_reshape()
5178 mddev->recovery_cp = mddev->resync_max_sectors; in raid10_finish_reshape()
5179 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid10_finish_reshape()
5181 mddev->resync_max_sectors = mddev->array_sectors; in raid10_finish_reshape()
5186 d < conf->geo.raid_disks - mddev->delta_disks; in raid10_finish_reshape()
5197 mddev->layout = mddev->new_layout; in raid10_finish_reshape()
5198 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()
5199 mddev->reshape_position = MaxSector; in raid10_finish_reshape()
5200 mddev->delta_disks = 0; in raid10_finish_reshape()
5201 mddev->reshape_backwards = 0; in raid10_finish_reshape()