summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorKemeng Shi <shikemeng@huaweicloud.com>2023-01-18 17:37:18 +0800
committerJens Axboe <axboe@kernel.dk>2023-02-06 09:22:28 -0700
commit3e368fb023ffab83404f628d02789550d79eca9c (patch)
treef285f36499d3f8b4261dd4f5bc3c97c77c754d35 /block
parent08e3599e7401a7eae5e68f5e2601cc4a4e53951b (diff)
blk-mq: remove unncessary from_schedule parameter in blk_mq_plug_issue_direct
Function blk_mq_plug_issue_direct tries to issue batch requests in plug list to driver directly. We will only issue plug request to driver if we are not from scheduler, so from_scheduler parameter of blk_mq_plug_issue_direct is always false. Remove unncessary from_scheduler of blk_mq_plug_issue_direct. Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b764bdd6fd81..e35637915531 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2688,7 +2688,7 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last);
}
-static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
+static void blk_mq_plug_issue_direct(struct blk_plug *plug)
{
struct blk_mq_hw_ctx *hctx = NULL;
struct request *rq;
@@ -2701,7 +2701,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
if (hctx != rq->mq_hctx) {
if (hctx)
- blk_mq_commit_rqs(hctx, &queued, from_schedule);
+ blk_mq_commit_rqs(hctx, &queued, false);
hctx = rq->mq_hctx;
}
@@ -2713,7 +2713,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
case BLK_STS_RESOURCE:
case BLK_STS_DEV_RESOURCE:
blk_mq_request_bypass_insert(rq, false, true);
- blk_mq_commit_rqs(hctx, &queued, from_schedule);
+ blk_mq_commit_rqs(hctx, &queued, false);
return;
default:
blk_mq_end_request(rq, ret);
@@ -2727,7 +2727,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
* there was more coming, but that turned out to be a lie.
*/
if (errors)
- blk_mq_commit_rqs(hctx, &queued, from_schedule);
+ blk_mq_commit_rqs(hctx, &queued, false);
}
static void __blk_mq_flush_plug_list(struct request_queue *q,
@@ -2798,7 +2798,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
}
blk_mq_run_dispatch_ops(q,
- blk_mq_plug_issue_direct(plug, false));
+ blk_mq_plug_issue_direct(plug));
if (rq_list_empty(plug->mq_list))
return;
}