Lines Matching refs:mddev
84 struct mddev *mddev = rdev->mddev; in wait_for_serialization() local
89 if (WARN_ON(!mddev->serial_info_pool)) in wait_for_serialization()
91 si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO); in wait_for_serialization()
101 struct mddev *mddev = rdev->mddev; in remove_serial() local
110 mempool_free(si, mddev->serial_info_pool); in remove_serial()
179 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) in r1buf_pool_alloc()
251 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio()
259 struct r1conf *conf = r1_bio->mddev->private; in put_buf()
266 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf()
277 struct mddev *mddev = r1_bio->mddev; in reschedule_retry() local
278 struct r1conf *conf = mddev->private; in reschedule_retry()
288 md_wakeup_thread(mddev->thread); in reschedule_retry()
311 struct r1conf *conf = r1_bio->mddev->private; in raid_end_bio_io()
336 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos()
348 struct r1conf *conf = r1_bio->mddev->private; in find_bio_disk()
365 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_read_request()
387 if (r1_bio->mddev->degraded == conf->raid_disks || in raid1_end_read_request()
388 (r1_bio->mddev->degraded == conf->raid_disks-1 && in raid1_end_read_request()
396 rdev_dec_pending(rdev, conf->mddev); in raid1_end_read_request()
403 mdname(conf->mddev), in raid1_end_read_request()
421 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, in close_write()
425 md_write_end(r1_bio->mddev); in close_write()
448 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_write_request()
465 conf->mddev->recovery); in raid1_end_write_request()
471 md_error(r1_bio->mddev, rdev); in raid1_end_write_request()
548 } else if (rdev->mddev->serialize_policy) in raid1_end_write_request()
551 rdev_dec_pending(rdev, conf->mddev); in raid1_end_write_request()
628 if ((conf->mddev->recovery_cp < this_sector + sectors) || in read_balance()
629 (mddev_is_clustered(conf->mddev) && in read_balance()
630 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, in read_balance()
794 md_bitmap_unplug(conf->mddev->bitmap); in flush_bio_list()
905 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), in raise_barrier()
908 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in raise_barrier()
1084 raid1_log(conf->mddev, "wait freeze"); in freeze_array()
1109 behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set); in alloc_behind_master_bio()
1159 struct mddev *mddev = plug->cb.data; in raid1_unplug() local
1160 struct r1conf *conf = mddev->private; in raid1_unplug()
1169 md_wakeup_thread(mddev->thread); in raid1_unplug()
1180 static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio) in init_r1bio() argument
1185 r1_bio->mddev = mddev; in init_r1bio()
1190 alloc_r1bio(struct mddev *mddev, struct bio *bio) in alloc_r1bio() argument
1192 struct r1conf *conf = mddev->private; in alloc_r1bio()
1198 init_r1bio(r1_bio, mddev, bio); in alloc_r1bio()
1202 static void raid1_read_request(struct mddev *mddev, struct bio *bio, in raid1_read_request() argument
1205 struct r1conf *conf = mddev->private; in raid1_read_request()
1208 struct bitmap *bitmap = mddev->bitmap; in raid1_read_request()
1242 r1_bio = alloc_r1bio(mddev, bio); in raid1_read_request()
1244 init_r1bio(r1_bio, mddev, bio); in raid1_read_request()
1257 mdname(mddev), in raid1_read_request()
1268 mdname(mddev), in raid1_read_request()
1278 raid1_log(mddev, "wait behind writes"); in raid1_read_request()
1298 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); in raid1_read_request()
1312 if (mddev->gendisk) in raid1_read_request()
1313 trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk), in raid1_read_request()
1319 static void raid1_write_request(struct mddev *mddev, struct bio *bio, in raid1_write_request() argument
1322 struct r1conf *conf = mddev->private; in raid1_write_request()
1325 struct bitmap *bitmap = mddev->bitmap; in raid1_write_request()
1334 if (mddev_is_clustered(mddev) && in raid1_write_request()
1335 md_cluster_ops->area_resyncing(mddev, WRITE, in raid1_write_request()
1342 if (!md_cluster_ops->area_resyncing(mddev, WRITE, in raid1_write_request()
1358 r1_bio = alloc_r1bio(mddev, bio); in raid1_write_request()
1362 md_wakeup_thread(mddev->thread); in raid1_write_request()
1363 raid1_log(mddev, "wait queued"); in raid1_write_request()
1429 rdev_dec_pending(rdev, mddev); in raid1_write_request()
1458 rdev_dec_pending(conf->mirrors[j].rdev, mddev); in raid1_write_request()
1461 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); in raid1_write_request()
1462 md_wait_for_blocked_rdev(blocked_rdev, mddev); in raid1_write_request()
1507 < mddev->bitmap_info.max_write_behind) && in raid1_write_request()
1519 GFP_NOIO, &mddev->bio_set); in raid1_write_request()
1521 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); in raid1_write_request()
1528 } else if (mddev->serialize_policy) in raid1_write_request()
1539 conf->raid_disks - mddev->degraded > 1) in raid1_write_request()
1545 if (mddev->gendisk) in raid1_write_request()
1546 trace_block_bio_remap(mbio, disk_devt(mddev->gendisk), in raid1_write_request()
1551 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); in raid1_write_request()
1564 md_wakeup_thread(mddev->thread); in raid1_write_request()
1574 static bool raid1_make_request(struct mddev *mddev, struct bio *bio) in raid1_make_request() argument
1579 && md_flush_request(mddev, bio)) in raid1_make_request()
1593 raid1_read_request(mddev, bio, sectors, NULL); in raid1_make_request()
1595 if (!md_write_start(mddev,bio)) in raid1_make_request()
1597 raid1_write_request(mddev, bio, sectors); in raid1_make_request()
1602 static void raid1_status(struct seq_file *seq, struct mddev *mddev) in raid1_status() argument
1604 struct r1conf *conf = mddev->private; in raid1_status()
1608 conf->raid_disks - mddev->degraded); in raid1_status()
1619 static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) in raid1_error() argument
1622 struct r1conf *conf = mddev->private; in raid1_error()
1632 if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev in raid1_error()
1633 && (conf->raid_disks - mddev->degraded) == 1) { in raid1_error()
1640 conf->recovery_disabled = mddev->recovery_disabled; in raid1_error()
1646 mddev->degraded++; in raid1_error()
1652 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid1_error()
1653 set_mask_bits(&mddev->sb_flags, 0, in raid1_error()
1657 mdname(mddev), bdevname(rdev->bdev, b), in raid1_error()
1658 mdname(mddev), conf->raid_disks - mddev->degraded); in raid1_error()
1670 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, in print_conf()
1698 static int raid1_spare_active(struct mddev *mddev) in raid1_spare_active() argument
1701 struct r1conf *conf = mddev->private; in raid1_spare_active()
1743 mddev->degraded -= count; in raid1_spare_active()
1750 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) in raid1_add_disk() argument
1752 struct r1conf *conf = mddev->private; in raid1_add_disk()
1759 if (mddev->recovery_disabled == conf->recovery_disabled) in raid1_add_disk()
1762 if (md_integrity_add_rdev(rdev, mddev)) in raid1_add_disk()
1781 if (mddev->gendisk) in raid1_add_disk()
1782 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid1_add_disk()
1808 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid1_add_disk()
1809 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); in raid1_add_disk()
1814 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) in raid1_remove_disk() argument
1816 struct r1conf *conf = mddev->private; in raid1_remove_disk()
1835 mddev->recovery_disabled != conf->recovery_disabled && in raid1_remove_disk()
1836 mddev->degraded < conf->raid_disks) { in raid1_remove_disk()
1876 err = md_integrity_register(mddev); in raid1_remove_disk()
1902 static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) in abort_sync_write() argument
1910 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); in abort_sync_write()
1919 struct mddev *mddev = r1_bio->mddev; in put_sync_write_buf() local
1927 md_done_sync(mddev, s, uptodate); in put_sync_write_buf()
1936 struct mddev *mddev = r1_bio->mddev; in end_sync_write() local
1937 struct r1conf *conf = mddev->private; in end_sync_write()
1943 abort_sync_write(mddev, r1_bio); in end_sync_write()
1947 mddev->recovery); in end_sync_write()
1972 rdev->mddev->recovery); in r1_sync_page_io()
1976 md_error(rdev->mddev, rdev); in r1_sync_page_io()
1993 struct mddev *mddev = r1_bio->mddev; in fix_sync_read_error() local
1994 struct r1conf *conf = mddev->private; in fix_sync_read_error()
2006 md_error(mddev, rdev); in fix_sync_read_error()
2050 mdname(mddev), bio_devname(bio, b), in fix_sync_read_error()
2061 mddev->recovery_disabled; in fix_sync_read_error()
2062 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in fix_sync_read_error()
2063 md_done_sync(mddev, r1_bio->sectors, 0); in fix_sync_read_error()
2087 rdev_dec_pending(rdev, mddev); in fix_sync_read_error()
2121 struct mddev *mddev = r1_bio->mddev; in process_checks() local
2122 struct r1conf *conf = mddev->private; in process_checks()
2153 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); in process_checks()
2186 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); in process_checks()
2187 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) in process_checks()
2191 rdev_dec_pending(conf->mirrors[i].rdev, mddev); in process_checks()
2199 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) in sync_request_write() argument
2201 struct r1conf *conf = mddev->private; in sync_request_write()
2211 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in sync_request_write()
2223 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) in sync_request_write()
2226 abort_sync_write(mddev, r1_bio); in sync_request_write()
2255 struct mddev *mddev = conf->mddev; in fix_read_error() local
2283 rdev_dec_pending(rdev, mddev); in fix_read_error()
2297 md_error(mddev, rdev); in fix_read_error()
2314 rdev_dec_pending(rdev, mddev); in fix_read_error()
2334 mdname(mddev), s, in fix_read_error()
2339 rdev_dec_pending(rdev, mddev); in fix_read_error()
2350 struct mddev *mddev = r1_bio->mddev; in narrow_write_error() local
2351 struct r1conf *conf = mddev->private; in narrow_write_error()
2390 &mddev->bio_set); in narrow_write_error()
2393 &mddev->bio_set); in narrow_write_error()
2434 md_error(conf->mddev, rdev); in handle_sync_write_finished()
2438 md_done_sync(conf->mddev, s, 1); in handle_sync_write_finished()
2452 rdev_dec_pending(rdev, conf->mddev); in handle_write_finished()
2460 md_error(conf->mddev, in handle_write_finished()
2466 conf->mddev); in handle_write_finished()
2479 md_wakeup_thread(conf->mddev->thread); in handle_write_finished()
2489 struct mddev *mddev = conf->mddev; in handle_read_error() local
2508 if (mddev->ro == 0 in handle_read_error()
2514 } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) { in handle_read_error()
2515 md_error(mddev, rdev); in handle_read_error()
2520 rdev_dec_pending(rdev, conf->mddev); in handle_read_error()
2526 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); in handle_read_error()
2531 struct mddev *mddev = thread->mddev; in raid1d() local
2534 struct r1conf *conf = mddev->private; in raid1d()
2539 md_check_recovery(mddev); in raid1d()
2542 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in raid1d()
2545 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) in raid1d()
2554 if (mddev->degraded) in raid1d()
2578 mddev = r1_bio->mddev; in raid1d()
2579 conf = mddev->private; in raid1d()
2585 sync_request_write(mddev, r1_bio); in raid1d()
2595 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) in raid1d()
2596 md_check_recovery(mddev); in raid1d()
2639 static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, in raid1_sync_request() argument
2642 struct r1conf *conf = mddev->private; in raid1_sync_request()
2661 max_sector = mddev->dev_sectors; in raid1_sync_request()
2668 if (mddev->curr_resync < max_sector) /* aborted */ in raid1_sync_request()
2669 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, in raid1_sync_request()
2674 md_bitmap_close_sync(mddev->bitmap); in raid1_sync_request()
2677 if (mddev_is_clustered(mddev)) { in raid1_sync_request()
2684 if (mddev->bitmap == NULL && in raid1_sync_request()
2685 mddev->recovery_cp == MaxSector && in raid1_sync_request()
2686 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in raid1_sync_request()
2694 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && in raid1_sync_request()
2695 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in raid1_sync_request()
2712 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, in raid1_sync_request()
2713 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); in raid1_sync_request()
2731 r1_bio->mddev = mddev; in raid1_sync_request()
2779 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in raid1_sync_request()
2780 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { in raid1_sync_request()
2817 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in raid1_sync_request()
2827 conf->recovery_disabled = mddev->recovery_disabled; in raid1_sync_request()
2828 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid1_sync_request()
2840 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) in raid1_sync_request()
2857 if (max_sector > mddev->resync_max) in raid1_sync_request()
2858 max_sector = mddev->resync_max; /* Don't do IO beyond here */ in raid1_sync_request()
2871 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, in raid1_sync_request()
2874 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in raid1_sync_request()
2902 if (mddev_is_clustered(mddev) && in raid1_sync_request()
2904 conf->cluster_sync_low = mddev->curr_resync_completed; in raid1_sync_request()
2907 md_cluster_ops->resync_info_update(mddev, in raid1_sync_request()
2915 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in raid1_sync_request()
2938 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid1_size() argument
2943 return mddev->dev_sectors; in raid1_size()
2946 static struct r1conf *setup_conf(struct mddev *mddev) in setup_conf() argument
2979 mddev->raid_disks, 2), in setup_conf()
2991 conf->poolinfo->raid_disks = mddev->raid_disks * 2; in setup_conf()
3001 conf->poolinfo->mddev = mddev; in setup_conf()
3005 rdev_for_each(rdev, mddev) { in setup_conf()
3007 if (disk_idx >= mddev->raid_disks in setup_conf()
3011 disk = conf->mirrors + mddev->raid_disks + disk_idx; in setup_conf()
3021 conf->raid_disks = mddev->raid_disks; in setup_conf()
3022 conf->mddev = mddev; in setup_conf()
3031 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
3063 conf->thread = md_register_thread(raid1d, mddev, "raid1"); in setup_conf()
3085 static void raid1_free(struct mddev *mddev, void *priv);
3086 static int raid1_run(struct mddev *mddev) in raid1_run() argument
3094 if (mddev->level != 1) { in raid1_run()
3096 mdname(mddev), mddev->level); in raid1_run()
3099 if (mddev->reshape_position != MaxSector) { in raid1_run()
3101 mdname(mddev)); in raid1_run()
3104 if (mddev_init_writes_pending(mddev) < 0) in raid1_run()
3111 if (mddev->private == NULL) in raid1_run()
3112 conf = setup_conf(mddev); in raid1_run()
3114 conf = mddev->private; in raid1_run()
3119 if (mddev->queue) { in raid1_run()
3120 blk_queue_max_write_same_sectors(mddev->queue, 0); in raid1_run()
3121 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); in raid1_run()
3124 rdev_for_each(rdev, mddev) { in raid1_run()
3125 if (!mddev->gendisk) in raid1_run()
3127 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid1_run()
3133 mddev->degraded = 0; in raid1_run()
3138 mddev->degraded++; in raid1_run()
3142 if (conf->raid_disks - mddev->degraded < 1) { in raid1_run()
3147 if (conf->raid_disks - mddev->degraded == 1) in raid1_run()
3148 mddev->recovery_cp = MaxSector; in raid1_run()
3150 if (mddev->recovery_cp != MaxSector) in raid1_run()
3152 mdname(mddev)); in raid1_run()
3154 mdname(mddev), mddev->raid_disks - mddev->degraded, in raid1_run()
3155 mddev->raid_disks); in raid1_run()
3160 mddev->thread = conf->thread; in raid1_run()
3162 mddev->private = conf; in raid1_run()
3163 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); in raid1_run()
3165 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); in raid1_run()
3167 if (mddev->queue) { in raid1_run()
3170 mddev->queue); in raid1_run()
3173 mddev->queue); in raid1_run()
3176 ret = md_integrity_register(mddev); in raid1_run()
3178 md_unregister_thread(&mddev->thread); in raid1_run()
3184 raid1_free(mddev, conf); in raid1_run()
3188 static void raid1_free(struct mddev *mddev, void *priv) in raid1_free() argument
3204 static int raid1_resize(struct mddev *mddev, sector_t sectors) in raid1_resize() argument
3213 sector_t newsize = raid1_size(mddev, sectors, 0); in raid1_resize()
3214 if (mddev->external_size && in raid1_resize()
3215 mddev->array_sectors > newsize) in raid1_resize()
3217 if (mddev->bitmap) { in raid1_resize()
3218 int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); in raid1_resize()
3222 md_set_array_sectors(mddev, newsize); in raid1_resize()
3223 if (sectors > mddev->dev_sectors && in raid1_resize()
3224 mddev->recovery_cp > mddev->dev_sectors) { in raid1_resize()
3225 mddev->recovery_cp = mddev->dev_sectors; in raid1_resize()
3226 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid1_resize()
3228 mddev->dev_sectors = sectors; in raid1_resize()
3229 mddev->resync_max_sectors = sectors; in raid1_resize()
3233 static int raid1_reshape(struct mddev *mddev) in raid1_reshape() argument
3249 struct r1conf *conf = mddev->private; in raid1_reshape()
3259 if (mddev->chunk_sectors != mddev->new_chunk_sectors || in raid1_reshape()
3260 mddev->layout != mddev->new_layout || in raid1_reshape()
3261 mddev->level != mddev->new_level) { in raid1_reshape()
3262 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid1_reshape()
3263 mddev->new_layout = mddev->layout; in raid1_reshape()
3264 mddev->new_level = mddev->level; in raid1_reshape()
3268 if (!mddev_is_clustered(mddev)) in raid1_reshape()
3269 md_allow_write(mddev); in raid1_reshape()
3271 raid_disks = mddev->raid_disks + mddev->delta_disks; in raid1_reshape()
3285 newpoolinfo->mddev = mddev; in raid1_reshape()
3312 sysfs_unlink_rdev(mddev, rdev); in raid1_reshape()
3314 sysfs_unlink_rdev(mddev, rdev); in raid1_reshape()
3315 if (sysfs_link_rdev(mddev, rdev)) in raid1_reshape()
3317 mdname(mddev), rdev->raid_disk); in raid1_reshape()
3328 mddev->degraded += (raid_disks - conf->raid_disks); in raid1_reshape()
3330 conf->raid_disks = mddev->raid_disks = raid_disks; in raid1_reshape()
3331 mddev->delta_disks = 0; in raid1_reshape()
3335 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in raid1_reshape()
3336 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid1_reshape()
3337 md_wakeup_thread(mddev->thread); in raid1_reshape()
3343 static void raid1_quiesce(struct mddev *mddev, int quiesce) in raid1_quiesce() argument
3345 struct r1conf *conf = mddev->private; in raid1_quiesce()
3353 static void *raid1_takeover(struct mddev *mddev) in raid1_takeover() argument
3358 if (mddev->level == 5 && mddev->raid_disks == 2) { in raid1_takeover()
3360 mddev->new_level = 1; in raid1_takeover()
3361 mddev->new_layout = 0; in raid1_takeover()
3362 mddev->new_chunk_sectors = 0; in raid1_takeover()
3363 conf = setup_conf(mddev); in raid1_takeover()
3367 mddev_clear_unsupported_flags(mddev, in raid1_takeover()