diff options
author | Jens Axboe <axboe@fb.com> | 2014-04-24 08:51:47 -0600 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-04-24 08:51:47 -0600 |
commit | 87ee7b112193bd081ba1a171fa5f6f39c429ef56 (patch) | |
tree | fbf88d2de279209467dee4d6cd9007f5daa8a678 /block | |
parent | 70ab0b2d51f84fc7d9eb6ed81c3986595efaa33d (diff) |
blk-mq: fix race with timeouts and requeue events
If a requeue event races with a timeout, we can get into the
situation where we attempt to complete a request from the
timeout handler when it's not start anymore. This causes a crash.
So have the timeout handler check that REQ_ATOM_STARTED is still
set on the request - if not, we ignore the event. If this happens,
the request has now been marked as complete. As a consequence, we
need to ensure to clear REQ_ATOM_COMPLETE in blk_mq_start_request(),
as to maintain proper request state.
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 39 | ||||
-rw-r--r-- | block/blk-mq.h | 2 | ||||
-rw-r--r-- | block/blk-timeout.c | 16 | ||||
-rw-r--r-- | block/blk.h | 3 |
4 files changed, 42 insertions, 18 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 7d5650d75aef..a84112c94e74 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -378,7 +378,15 @@ static void blk_mq_start_request(struct request *rq, bool last) * REQ_ATOMIC_STARTED is seen. */ rq->deadline = jiffies + q->rq_timeout; + + /* + * Mark us as started and clear complete. Complete might have been + * set if requeue raced with timeout, which then marked it as + * complete. So be sure to clear complete again when we start + * the request, otherwise we'll ignore the completion event. + */ set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); + clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); if (q->dma_drain_size && blk_rq_bytes(rq)) { /* @@ -485,6 +493,28 @@ static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx, blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data); } +static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq) +{ + struct request_queue *q = rq->q; + + /* + * We know that complete is set at this point. If STARTED isn't set + * anymore, then the request isn't active and the "timeout" should + * just be ignored. This can happen due to the bitflag ordering. + * Timeout first checks if STARTED is set, and if it is, assumes + * the request is active. But if we race with completion, then + * we both flags will get cleared. So check here again, and ignore + * a timeout event with a request that isn't active. + */ + if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) + return BLK_EH_NOT_HANDLED; + + if (!q->mq_ops->timeout) + return BLK_EH_RESET_TIMER; + + return q->mq_ops->timeout(rq); +} + static void blk_mq_rq_timer(unsigned long data) { struct request_queue *q = (struct request_queue *) data; @@ -538,11 +568,6 @@ static bool blk_mq_attempt_merge(struct request_queue *q, return false; } -void blk_mq_add_timer(struct request *rq) -{ - __blk_add_timer(rq, NULL); -} - /* * Run this hardware queue, pulling any software queues mapped to it in. * Note that this function currently has various problems around ordering @@ -799,7 +824,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, /* * We do this early, to ensure we are on the right CPU. */ - blk_mq_add_timer(rq); + blk_add_timer(rq); } void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, @@ -1400,7 +1425,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) q->sg_reserved_size = INT_MAX; blk_queue_make_request(q, blk_mq_make_request); - blk_queue_rq_timed_out(q, set->ops->timeout); + blk_queue_rq_timed_out(q, blk_mq_rq_timed_out); if (set->timeout) blk_queue_rq_timeout(q, set->timeout); diff --git a/block/blk-mq.h b/block/blk-mq.h index 5fa14f19f752..b41a784de50d 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -51,6 +51,4 @@ void blk_mq_disable_hotplug(void); extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); -void blk_mq_add_timer(struct request *rq); - #endif diff --git a/block/blk-timeout.c b/block/blk-timeout.c index a09e8af8186c..49988a3ca85c 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -96,11 +96,7 @@ static void blk_rq_timed_out(struct request *req) __blk_complete_request(req); break; case BLK_EH_RESET_TIMER: - if (q->mq_ops) - blk_mq_add_timer(req); - else - blk_add_timer(req); - + blk_add_timer(req); blk_clear_rq_complete(req); break; case BLK_EH_NOT_HANDLED: @@ -170,7 +166,8 @@ void blk_abort_request(struct request *req) } EXPORT_SYMBOL_GPL(blk_abort_request); -void __blk_add_timer(struct request *req, struct list_head *timeout_list) +static void __blk_add_timer(struct request *req, + struct list_head *timeout_list) { struct request_queue *q = req->q; unsigned long expiry; @@ -225,6 +222,11 @@ void __blk_add_timer(struct request *req, struct list_head *timeout_list) */ void blk_add_timer(struct request *req) { - __blk_add_timer(req, &req->q->timeout_list); + struct request_queue *q = req->q; + + if (q->mq_ops) + __blk_add_timer(req, NULL); + else + __blk_add_timer(req, &req->q->timeout_list); } diff --git a/block/blk.h b/block/blk.h index 1d880f1f957f..79be2cbce7fd 100644 --- a/block/blk.h +++ b/block/blk.h @@ -37,9 +37,8 @@ bool __blk_end_bidi_request(struct request *rq, int error, void blk_rq_timed_out_timer(unsigned long data); void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, unsigned int *next_set); -void __blk_add_timer(struct request *req, struct list_head *timeout_list); +void blk_add_timer(struct request *req); void blk_delete_timer(struct request *); -void blk_add_timer(struct request *); bool bio_attempt_front_merge(struct request_queue *q, struct request *req, |