summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bio-integrity.c13
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-merge.c19
-rw-r--r--block/blk-mq-tag.c28
-rw-r--r--block/blk-mq.c43
-rw-r--r--block/ioprio.c14
-rw-r--r--block/scsi_ioctl.c10
7 files changed, 97 insertions, 34 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 0984232e429f..5cbd5d9ea61d 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -216,9 +216,10 @@ static int bio_integrity_process(struct bio *bio,
{
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
struct blk_integrity_iter iter;
- struct bio_vec *bv;
+ struct bvec_iter bviter;
+ struct bio_vec bv;
struct bio_integrity_payload *bip = bio_integrity(bio);
- unsigned int i, ret = 0;
+ unsigned int ret = 0;
void *prot_buf = page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset;
@@ -227,11 +228,11 @@ static int bio_integrity_process(struct bio *bio,
iter.seed = bip_get_seed(bip);
iter.prot_buf = prot_buf;
- bio_for_each_segment_all(bv, bio, i) {
- void *kaddr = kmap_atomic(bv->bv_page);
+ bio_for_each_segment(bv, bio, bviter) {
+ void *kaddr = kmap_atomic(bv.bv_page);
- iter.data_buf = kaddr + bv->bv_offset;
- iter.data_size = bv->bv_len;
+ iter.data_buf = kaddr + bv.bv_offset;
+ iter.data_size = bv.bv_len;
ret = proc_fn(&iter);
if (ret) {
diff --git a/block/blk-core.c b/block/blk-core.c
index 0421b53e6431..2e7424b42947 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1266,7 +1266,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
- if (blk_rq_tagged(rq))
+ if (rq->cmd_flags & REQ_QUEUED)
blk_queue_end_tag(q, rq);
BUG_ON(blk_queued_rq(rq));
@@ -2554,7 +2554,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request);
*/
void blk_finish_request(struct request *req, int error)
{
- if (blk_rq_tagged(req))
+ if (req->cmd_flags & REQ_QUEUED)
blk_queue_end_tag(req->q, req);
BUG_ON(blk_queued_rq(req));
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b3ac40aef46b..89b97b5e0881 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -97,19 +97,22 @@ void blk_recalc_rq_segments(struct request *rq)
void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
- bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
- &q->queue_flags);
- bool merge_not_need = bio->bi_vcnt < queue_max_segments(q);
+ unsigned short seg_cnt;
+
+ /* estimate segment number by bi_vcnt for non-cloned bio */
+ if (bio_flagged(bio, BIO_CLONED))
+ seg_cnt = bio_segments(bio);
+ else
+ seg_cnt = bio->bi_vcnt;
- if (no_sg_merge && !bio_flagged(bio, BIO_CLONED) &&
- merge_not_need)
- bio->bi_phys_segments = bio->bi_vcnt;
+ if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
+ (seg_cnt < queue_max_segments(q)))
+ bio->bi_phys_segments = seg_cnt;
else {
struct bio *nxt = bio->bi_next;
bio->bi_next = NULL;
- bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio,
- no_sg_merge && merge_not_need);
+ bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
bio->bi_next = nxt;
}
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 8317175a3009..728b9a4d5f56 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -584,6 +584,34 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
return 0;
}
+/**
+ * blk_mq_unique_tag() - return a tag that is unique queue-wide
+ * @rq: request for which to compute a unique tag
+ *
+ * The tag field in struct request is unique per hardware queue but not over
+ * all hardware queues. Hence this function that returns a tag with the
+ * hardware context index in the upper bits and the per hardware queue tag in
+ * the lower bits.
+ *
+ * Note: When called for a request that is queued on a non-multiqueue request
+ * queue, the hardware context index is set to zero.
+ */
+u32 blk_mq_unique_tag(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+ struct blk_mq_hw_ctx *hctx;
+ int hwq = 0;
+
+ if (q->mq_ops) {
+ hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
+ hwq = hctx->queue_num;
+ }
+
+ return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
+ (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
+}
+EXPORT_SYMBOL(blk_mq_unique_tag);
+
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
char *orig_page = page;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 68929bad9a6a..92ceef0d2ab9 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -107,11 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
wake_up_all(&q->mq_freeze_wq);
}
-/*
- * Guarantee no request is in use, so we can change any data structure of
- * the queue afterward.
- */
-void blk_mq_freeze_queue(struct request_queue *q)
+static void blk_mq_freeze_queue_start(struct request_queue *q)
{
bool freeze;
@@ -123,9 +119,23 @@ void blk_mq_freeze_queue(struct request_queue *q)
percpu_ref_kill(&q->mq_usage_counter);
blk_mq_run_queues(q, false);
}
+}
+
+static void blk_mq_freeze_queue_wait(struct request_queue *q)
+{
wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
}
+/*
+ * Guarantee no request is in use, so we can change any data structure of
+ * the queue afterward.
+ */
+void blk_mq_freeze_queue(struct request_queue *q)
+{
+ blk_mq_freeze_queue_start(q);
+ blk_mq_freeze_queue_wait(q);
+}
+
static void blk_mq_unfreeze_queue(struct request_queue *q)
{
bool wake;
@@ -1921,7 +1931,7 @@ void blk_mq_free_queue(struct request_queue *q)
/* Basically redo blk_mq_init_queue with queue frozen */
static void blk_mq_queue_reinit(struct request_queue *q)
{
- blk_mq_freeze_queue(q);
+ WARN_ON_ONCE(!q->mq_freeze_depth);
blk_mq_sysfs_unregister(q);
@@ -1936,8 +1946,6 @@ static void blk_mq_queue_reinit(struct request_queue *q)
blk_mq_map_swqueue(q);
blk_mq_sysfs_register(q);
-
- blk_mq_unfreeze_queue(q);
}
static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
@@ -1956,8 +1964,25 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
return NOTIFY_OK;
mutex_lock(&all_q_mutex);
+
+ /*
+ * We need to freeze and reinit all existing queues. Freezing
+ * involves synchronous wait for an RCU grace period and doing it
+ * one by one may take a long time. Start freezing all queues in
+ * one swoop and then wait for the completions so that freezing can
+ * take place in parallel.
+ */
+ list_for_each_entry(q, &all_q_list, all_q_node)
+ blk_mq_freeze_queue_start(q);
+ list_for_each_entry(q, &all_q_list, all_q_node)
+ blk_mq_freeze_queue_wait(q);
+
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_queue_reinit(q);
+
+ list_for_each_entry(q, &all_q_list, all_q_node)
+ blk_mq_unfreeze_queue(q);
+
mutex_unlock(&all_q_mutex);
return NOTIFY_OK;
}
@@ -2024,6 +2049,8 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
*/
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
+ BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
+
if (!set->nr_hw_queues)
return -EINVAL;
if (!set->queue_depth)
diff --git a/block/ioprio.c b/block/ioprio.c
index e50170ca7c33..31666c92b46a 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -157,14 +157,16 @@ out:
int ioprio_best(unsigned short aprio, unsigned short bprio)
{
- unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
- unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
+ unsigned short aclass;
+ unsigned short bclass;
- if (aclass == IOPRIO_CLASS_NONE)
- aclass = IOPRIO_CLASS_BE;
- if (bclass == IOPRIO_CLASS_NONE)
- bclass = IOPRIO_CLASS_BE;
+ if (!ioprio_valid(aprio))
+ aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
+ if (!ioprio_valid(bprio))
+ bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
+ aclass = IOPRIO_PRIO_CLASS(aprio);
+ bclass = IOPRIO_PRIO_CLASS(bprio);
if (aclass == bclass)
return min(aprio, bprio);
if (aclass > bclass)
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 1e053d911240..28163fad3c5d 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -142,7 +142,7 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
__set_bit(GPCMD_VERIFY_10, filter->read_ok);
__set_bit(VERIFY_16, filter->read_ok);
__set_bit(REPORT_LUNS, filter->read_ok);
- __set_bit(SERVICE_ACTION_IN, filter->read_ok);
+ __set_bit(SERVICE_ACTION_IN_16, filter->read_ok);
__set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok);
__set_bit(MAINTENANCE_IN, filter->read_ok);
__set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);
@@ -458,7 +458,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto error;
+ goto error_free_buffer;
}
blk_rq_set_block_pc(rq);
@@ -531,9 +531,11 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
}
error:
+ blk_put_request(rq);
+
+error_free_buffer:
kfree(buffer);
- if (rq)
- blk_put_request(rq);
+
return err;
}
EXPORT_SYMBOL_GPL(sg_scsi_ioctl);