summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c37
1 files changed, 29 insertions, 8 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 611b5f797618..8034fbd6190c 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -333,9 +333,10 @@ static void raid1_end_read_request(struct bio *bio, int error)
spin_unlock_irqrestore(&conf->device_lock, flags);
}
- if (uptodate)
+ if (uptodate) {
raid_end_bio_io(r1_bio);
- else {
+ rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
+ } else {
/*
* oops, read error:
*/
@@ -349,9 +350,8 @@ static void raid1_end_read_request(struct bio *bio, int error)
(unsigned long long)r1_bio->sector);
set_bit(R1BIO_ReadError, &r1_bio->state);
reschedule_retry(r1_bio);
+ /* don't drop the reference on read_disk yet */
}
-
- rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
}
static void close_write(struct r1bio *r1_bio)
@@ -781,7 +781,12 @@ static void flush_pending_writes(struct r1conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
- generic_make_request(bio);
+ if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ /* Just ignore it */
+ bio_endio(bio, 0);
+ else
+ generic_make_request(bio);
bio = next;
}
} else
@@ -994,6 +999,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
+ const unsigned long do_discard = (bio->bi_rw
+ & (REQ_DISCARD | REQ_SECURE));
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
struct raid1_plug_cb *plug = NULL;
@@ -1295,7 +1302,7 @@ read_again:
conf->mirrors[i].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_rw = WRITE | do_flush_fua | do_sync;
+ mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard;
mbio->bi_private = r1_bio;
atomic_inc(&r1_bio->remaining);
@@ -1549,6 +1556,8 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
clear_bit(Unmerged, &rdev->flags);
}
md_integrity_add_rdev(rdev, mddev);
+ if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf);
return err;
}
@@ -1867,7 +1876,7 @@ static int process_checks(struct r1bio *r1_bio)
} else
j = 0;
if (j >= 0)
- mddev->resync_mismatches += r1_bio->sectors;
+ atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
&& test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
/* No need to write to this device. */
@@ -2220,6 +2229,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
unfreeze_array(conf);
} else
md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
+ rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
bio = r1_bio->bios[r1_bio->read_disk];
bdevname(bio->bi_bdev, b);
@@ -2285,8 +2295,9 @@ read_more:
}
}
-static void raid1d(struct mddev *mddev)
+static void raid1d(struct md_thread *thread)
{
+ struct mddev *mddev = thread->mddev;
struct r1bio *r1_bio;
unsigned long flags;
struct r1conf *conf = mddev->private;
@@ -2783,6 +2794,7 @@ static int run(struct mddev *mddev)
int i;
struct md_rdev *rdev;
int ret;
+ bool discard_supported = false;
if (mddev->level != 1) {
printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
@@ -2812,6 +2824,8 @@ static int run(struct mddev *mddev)
continue;
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
+ if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+ discard_supported = true;
}
mddev->degraded = 0;
@@ -2846,6 +2860,13 @@ static int run(struct mddev *mddev)
mddev->queue->backing_dev_info.congested_fn = raid1_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
+
+ if (discard_supported)
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
+ mddev->queue);
+ else
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
+ mddev->queue);
}
ret = md_integrity_register(mddev);