diff options
author | Ming Lei <ming.lei@redhat.com> | 2020-06-30 22:03:55 +0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2020-06-30 12:57:59 -0600 |
commit | 570e9b73b0af2e5381ca5343759779b8c1ed20e3 (patch) | |
tree | 31d45357234c3313e48a34b6a44c2874b1644df7 /block/blk-mq-tag.h | |
parent | 6e6fcbc27e7788af54139c53537395d95560f2ef (diff) |
blk-mq: move blk_mq_get_driver_tag into blk-mq.c
blk_mq_get_driver_tag() is only used by blk-mq.c and is supposed to
stay in blk-mq.c, so move it and preparing for cleanup code of
get/put driver tag.
Meantime hctx_may_queue() is moved to header file and it is fine
since it is defined as inline always.
No functional change.
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-tag.h')
-rw-r--r-- | block/blk-mq-tag.h | 39 |
1 files changed, 31 insertions, 8 deletions
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 2e4ef51cdb32..3945c7f5b944 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -51,14 +51,6 @@ enum { BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1, }; -bool __blk_mq_get_driver_tag(struct request *rq); -static inline bool blk_mq_get_driver_tag(struct request *rq) -{ - if (rq->tag != BLK_MQ_NO_TAG) - return true; - return __blk_mq_get_driver_tag(rq); -} - extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *); extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); @@ -79,6 +71,37 @@ static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) } /* + * For shared tag users, we track the number of currently active users + * and attempt to provide a fair share of the tag depth for each of them. + */ +static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, + struct sbitmap_queue *bt) +{ + unsigned int depth, users; + + if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) + return true; + if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + return true; + + /* + * Don't try dividing an ant + */ + if (bt->sb.depth == 1) + return true; + + users = atomic_read(&hctx->tags->active_queues); + if (!users) + return true; + + /* + * Allow at least some tags + */ + depth = max((bt->sb.depth + users - 1) / users, 4U); + return atomic_read(&hctx->nr_active) < depth; +} + +/* * This helper should only be used for flush request to share tag * with the request cloned from, and both the two requests can't be * in flight at the same time. The caller has to make sure the tag |