summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-io.c12
-rw-r--r--drivers/md/dm-kcopyd.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm.c14
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c10
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c22
-rw-r--r--drivers/md/raid10.c12
-rw-r--r--drivers/md/raid5.c2
13 files changed, 46 insertions, 48 deletions
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 10f457ca6af2..0590c75b0ab6 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -356,7 +356,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
BUG_ON(num_regions > DM_IO_MAX_REGIONS);
if (sync)
- rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+ rw |= REQ_SYNC | REQ_UNPLUG;
/*
* For multiple regions we need to be careful to rewind
@@ -364,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
*/
for (i = 0; i < num_regions; i++) {
*dp = old_pages;
- if (where[i].count || (rw & (1 << BIO_RW_BARRIER)))
+ if (where[i].count || (rw & REQ_HARDBARRIER))
do_region(rw, i, where + i, dp, io);
}
@@ -412,8 +412,8 @@ retry:
}
set_current_state(TASK_RUNNING);
- if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
- rw &= ~(1 << BIO_RW_BARRIER);
+ if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) {
+ rw &= ~REQ_HARDBARRIER;
goto retry;
}
@@ -479,8 +479,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
* New collapsed (a)synchronous interface.
*
* If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
- * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
- * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
+ * the queue with blk_unplug() some time later or set REQ_SYNC in
+io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
* the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
*/
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index addf83475040..d8587bac5682 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -345,7 +345,7 @@ static int run_io_job(struct kcopyd_job *job)
{
int r;
struct dm_io_request io_req = {
- .bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG),
+ .bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG,
.mem.type = DM_IO_PAGE_LIST,
.mem.ptr.pl = job->pages,
.mem.offset = job->offset,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ddda531723dc..74136262d654 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1211,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
if (error == -EOPNOTSUPP)
goto out;
- if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
+ if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
goto out;
if (unlikely(error)) {
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index e610725db766..d6e28d732b4d 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -284,7 +284,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
if (!error)
return 0; /* I/O complete */
- if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
+ if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
return error;
if (error == -EOPNOTSUPP)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1e0e6dd51501..d6f77baeafd6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -614,7 +614,7 @@ static void dec_pending(struct dm_io *io, int error)
*/
spin_lock_irqsave(&md->deferred_lock, flags);
if (__noflush_suspending(md)) {
- if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
+ if (!(io->bio->bi_rw & REQ_HARDBARRIER))
bio_list_add_head(&md->deferred,
io->bio);
} else
@@ -626,7 +626,7 @@ static void dec_pending(struct dm_io *io, int error)
io_error = io->error;
bio = io->bio;
- if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
+ if (bio->bi_rw & REQ_HARDBARRIER) {
/*
* There can be just one barrier request so we use
* a per-device variable for error reporting.
@@ -1106,7 +1106,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
clone->bi_sector = sector;
clone->bi_bdev = bio->bi_bdev;
- clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
+ clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
clone->bi_vcnt = 1;
clone->bi_size = to_bytes(len);
clone->bi_io_vec->bv_offset = offset;
@@ -1133,7 +1133,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
__bio_clone(clone, bio);
- clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
+ clone->bi_rw &= ~REQ_HARDBARRIER;
clone->bi_destructor = dm_bio_destructor;
clone->bi_sector = sector;
clone->bi_idx = idx;
@@ -1301,7 +1301,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
ci.map = dm_get_live_table(md);
if (unlikely(!ci.map)) {
- if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
+ if (!(bio->bi_rw & REQ_HARDBARRIER))
bio_io_error(bio);
else
if (!md->barrier_error)
@@ -1414,7 +1414,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
* we have to queue this io for later.
*/
if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
- unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
up_read(&md->io_lock);
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
@@ -2296,7 +2296,7 @@ static void dm_wq_work(struct work_struct *work)
if (dm_request_based(md))
generic_make_request(c);
else {
- if (bio_rw_flagged(c, BIO_RW_BARRIER))
+ if (c->bi_rw & REQ_HARDBARRIER)
process_barrier(md, c);
else
__split_and_process_bio(md, c);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 7e0e057db9a7..ba19060bcf3f 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -294,7 +294,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
dev_info_t *tmp_dev;
sector_t start_sector;
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
md_barrier_request(mddev, bio);
return 0;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cb20d0b0555a..1893af678779 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -353,7 +353,7 @@ static void md_submit_barrier(struct work_struct *ws)
/* an empty barrier - all done */
bio_endio(bio, 0);
else {
- bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
+ bio->bi_rw &= ~REQ_HARDBARRIER;
if (mddev->pers->make_request(mddev, bio))
generic_make_request(bio);
mddev->barrier = POST_REQUEST_BARRIER;
@@ -675,11 +675,11 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
* if zero is reached.
* If an error occurred, call md_error
*
- * As we might need to resubmit the request if BIO_RW_BARRIER
+ * As we might need to resubmit the request if REQ_HARDBARRIER
* causes ENOTSUPP, we allocate a spare bio...
*/
struct bio *bio = bio_alloc(GFP_NOIO, 1);
- int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
+ int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG;
bio->bi_bdev = rdev->bdev;
bio->bi_sector = sector;
@@ -691,7 +691,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
atomic_inc(&mddev->pending_writes);
if (!test_bit(BarriersNotsupp, &rdev->flags)) {
struct bio *rbio;
- rw |= (1<<BIO_RW_BARRIER);
+ rw |= REQ_HARDBARRIER;
rbio = bio_clone(bio, GFP_NOIO);
rbio->bi_private = bio;
rbio->bi_end_io = super_written_barrier;
@@ -736,7 +736,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size,
struct completion event;
int ret;
- rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+ rw |= REQ_SYNC | REQ_UNPLUG;
bio->bi_bdev = bdev;
bio->bi_sector = sector;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 10597bfec000..fc56e0f21c80 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -67,7 +67,7 @@ struct mdk_rdev_s
#define Faulty 1 /* device is known to have a fault */
#define In_sync 2 /* device is in_sync with rest of array */
#define WriteMostly 4 /* Avoid reading if at all possible */
-#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */
+#define BarriersNotsupp 5 /* REQ_HARDBARRIER is not supported */
#define AllReserved 6 /* If whole device is reserved for
* one array */
#define AutoDetected 7 /* added by auto-detect */
@@ -254,7 +254,7 @@ struct mddev_s
* fails. Only supported
*/
struct bio *biolist; /* bios that need to be retried
- * because BIO_RW_BARRIER is not supported
+ * because REQ_HARDBARRIER is not supported
*/
atomic_t recovery_active; /* blocks scheduled, but not written */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 410fb60699ac..0307d217e7a4 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio, int error)
if (uptodate)
multipath_end_bh_io(mp_bh, 0);
- else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) {
+ else if (!(bio->bi_rw & REQ_RAHEAD)) {
/*
* oops, IO error:
*/
@@ -142,7 +142,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
md_barrier_request(mddev, bio);
return 0;
}
@@ -163,7 +163,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
mp_bh->bio = *bio;
mp_bh->bio.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
- mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
+ mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
generic_make_request(&mp_bh->bio);
@@ -398,7 +398,7 @@ static void multipathd (mddev_t *mddev)
*bio = *(mp_bh->master_bio);
bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
- bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
+ bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh;
generic_make_request(bio);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 563abed5a2cb..6f7af46d623c 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -483,7 +483,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
struct strip_zone *zone;
mdk_rdev_t *tmp_dev;
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
md_barrier_request(mddev, bio);
return 0;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a948da8012de..73cc74ffc26b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -787,7 +787,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
struct bio_list bl;
struct page **behind_pages = NULL;
const int rw = bio_data_dir(bio);
- const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
+ const bool do_sync = (bio->bi_rw & REQ_SYNC);
bool do_barriers;
mdk_rdev_t *blocked_rdev;
@@ -822,7 +822,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
finish_wait(&conf->wait_barrier, &w);
}
if (unlikely(!mddev->barriers_work &&
- bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ (bio->bi_rw & REQ_HARDBARRIER))) {
if (rw == WRITE)
md_write_end(mddev);
bio_endio(bio, -EOPNOTSUPP);
@@ -877,7 +877,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid1_end_read_request;
- read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+ read_bio->bi_rw = READ | do_sync;
read_bio->bi_private = r1_bio;
generic_make_request(read_bio);
@@ -959,7 +959,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_set(&r1_bio->remaining, 0);
atomic_set(&r1_bio->behind_remaining, 0);
- do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER);
+ do_barriers = bio->bi_rw & REQ_HARDBARRIER;
if (do_barriers)
set_bit(R1BIO_Barrier, &r1_bio->state);
@@ -975,8 +975,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) |
- (do_sync << BIO_RW_SYNCIO);
+ mbio->bi_rw = WRITE | do_barriers | do_sync;
mbio->bi_private = r1_bio;
if (behind_pages) {
@@ -1633,7 +1632,7 @@ static void raid1d(mddev_t *mddev)
sync_request_write(mddev, r1_bio);
unplug = 1;
} else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
- /* some requests in the r1bio were BIO_RW_BARRIER
+ /* some requests in the r1bio were REQ_HARDBARRIER
* requests which failed with -EOPNOTSUPP. Hohumm..
* Better resubmit without the barrier.
* We know which devices to resubmit for, because
@@ -1641,7 +1640,7 @@ static void raid1d(mddev_t *mddev)
* We already have a nr_pending reference on these rdevs.
*/
int i;
- const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
+ const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
clear_bit(R1BIO_Barrier, &r1_bio->state);
for (i=0; i < conf->raid_disks; i++)
@@ -1662,8 +1661,7 @@ static void raid1d(mddev_t *mddev)
conf->mirrors[i].rdev->data_offset;
bio->bi_bdev = conf->mirrors[i].rdev->bdev;
bio->bi_end_io = raid1_end_write_request;
- bio->bi_rw = WRITE |
- (do_sync << BIO_RW_SYNCIO);
+ bio->bi_rw = WRITE | do_sync;
bio->bi_private = r1_bio;
r1_bio->bios[i] = bio;
generic_make_request(bio);
@@ -1698,7 +1696,7 @@ static void raid1d(mddev_t *mddev)
(unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio);
} else {
- const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
+ const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
r1_bio->bios[r1_bio->read_disk] =
mddev->ro ? IO_BLOCKED : NULL;
r1_bio->read_disk = disk;
@@ -1715,7 +1713,7 @@ static void raid1d(mddev_t *mddev)
bio->bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
- bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+ bio->bi_rw = READ | do_sync;
bio->bi_private = r1_bio;
unplug = 1;
generic_make_request(bio);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 42e64e4e5e25..62ecb6650fd0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -799,12 +799,12 @@ static int make_request(mddev_t *mddev, struct bio * bio)
int i;
int chunk_sects = conf->chunk_mask + 1;
const int rw = bio_data_dir(bio);
- const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
+ const bool do_sync = (bio->bi_rw & REQ_SYNC);
struct bio_list bl;
unsigned long flags;
mdk_rdev_t *blocked_rdev;
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
md_barrier_request(mddev, bio);
return 0;
}
@@ -879,7 +879,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
- read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+ read_bio->bi_rw = READ | do_sync;
read_bio->bi_private = r10_bio;
generic_make_request(read_bio);
@@ -947,7 +947,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
conf->mirrors[d].rdev->data_offset;
mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO);
+ mbio->bi_rw = WRITE | do_sync;
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
@@ -1716,7 +1716,7 @@ static void raid10d(mddev_t *mddev)
raid_end_bio_io(r10_bio);
bio_put(bio);
} else {
- const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
+ const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
bio_put(bio);
rdev = conf->mirrors[mirror].rdev;
if (printk_ratelimit())
@@ -1730,7 +1730,7 @@ static void raid10d(mddev_t *mddev)
bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
- bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+ bio->bi_rw = READ | do_sync;
bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request;
unplug = 1;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 96c690279fc6..20ac2f14376a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3958,7 +3958,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
const int rw = bio_data_dir(bi);
int remaining;
- if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
+ if (unlikely(bi->bi_rw & REQ_HARDBARRIER)) {
/* Drain all pending writes. We only really need
* to ensure they have been submitted, but this is
* easier.