diff options
author | Suwan Kim <suwan.kim027@gmail.com> | 2022-12-21 23:54:56 +0900 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2023-02-20 19:26:57 -0500 |
commit | 07b679f70d73483930e8d3c293942416d9cd5c13 (patch) | |
tree | d7116ba3e0b9b2968b16423daa8acd255f3c44ef /drivers/block/virtio_blk.c | |
parent | 489e18f3d73282f6bf6203324b3b17d459e2e750 (diff) |
virtio-blk: support completion batching for the IRQ path
This patch adds completion batching to the IRQ path. It reuses batch
completion code of virtblk_poll(). It collects requests to io_comp_batch
and processes them all at once. It can boost up the performance by 2%.
To validate the performance improvement and stabilty, I did fio test with
4 vCPU VM and 12 vCPU VM respectively. Both VMs have 8GB ram and the same
number of HW queues as vCPU.
The fio cammad is as follows and I ran the fio 5 times and got IOPS average.
(io_uring, randread, direct=1, bs=512, iodepth=64 numjobs=2,4)
Test result shows about 2% improvement.
4 vcpu VM | numjobs=2 | numjobs=4
-----------------------------------------------------------
fio without patch | 367.2K IOPS | 397.6K IOPS
-----------------------------------------------------------
fio with patch | 372.8K IOPS | 407.7K IOPS
12 vcpu VM | numjobs=2 | numjobs=4
-----------------------------------------------------------
fio without patch | 363.6K IOPS | 374.8K IOPS
-----------------------------------------------------------
fio with patch | 373.8K IOPS | 385.3K IOPS
Signed-off-by: Suwan Kim <suwan.kim027@gmail.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Message-Id: <20221221145456.281218-3-suwan.kim027@gmail.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/block/virtio_blk.c')
-rw-r--r-- | drivers/block/virtio_blk.c | 82 |
1 files changed, 45 insertions, 37 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index f2fb49aadb1e..23d4fa3252a4 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -334,33 +334,63 @@ static inline void virtblk_request_done(struct request *req) blk_mq_end_request(req, status); } +static void virtblk_complete_batch(struct io_comp_batch *iob) +{ + struct request *req; + + rq_list_for_each(&iob->req_list, req) { + virtblk_unmap_data(req, blk_mq_rq_to_pdu(req)); + virtblk_cleanup_cmd(req); + } + blk_mq_end_request_batch(iob); +} + +static int virtblk_handle_req(struct virtio_blk_vq *vq, + struct io_comp_batch *iob) +{ + struct virtblk_req *vbr; + int req_done = 0; + unsigned int len; + + while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) { + struct request *req = blk_mq_rq_from_pdu(vbr); + + if (likely(!blk_should_fake_timeout(req->q)) && + !blk_mq_complete_request_remote(req) && + !blk_mq_add_to_batch(req, iob, vbr->status, + virtblk_complete_batch)) + virtblk_request_done(req); + req_done++; + } + + return req_done; +} + static void virtblk_done(struct virtqueue *vq) { struct virtio_blk *vblk = vq->vdev->priv; - bool req_done = false; - int qid = vq->index; - struct virtblk_req *vbr; + struct virtio_blk_vq *vblk_vq = &vblk->vqs[vq->index]; + int req_done = 0; unsigned long flags; - unsigned int len; + DEFINE_IO_COMP_BATCH(iob); - spin_lock_irqsave(&vblk->vqs[qid].lock, flags); + spin_lock_irqsave(&vblk_vq->lock, flags); do { virtqueue_disable_cb(vq); - while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { - struct request *req = blk_mq_rq_from_pdu(vbr); + req_done += virtblk_handle_req(vblk_vq, &iob); - if (likely(!blk_should_fake_timeout(req->q))) - blk_mq_complete_request(req); - req_done = true; - } if (unlikely(virtqueue_is_broken(vq))) break; } while (!virtqueue_enable_cb(vq)); - /* In case queue is stopped waiting for more buffers. */ - if (req_done) + if (req_done) { + if (!rq_list_empty(iob.req_list)) + iob.complete(&iob); + + /* In case queue is stopped waiting for more buffers. */ blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); - spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); + } + spin_unlock_irqrestore(&vblk_vq->lock, flags); } static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) @@ -1176,37 +1206,15 @@ static void virtblk_map_queues(struct blk_mq_tag_set *set) } } -static void virtblk_complete_batch(struct io_comp_batch *iob) -{ - struct request *req; - - rq_list_for_each(&iob->req_list, req) { - virtblk_unmap_data(req, blk_mq_rq_to_pdu(req)); - virtblk_cleanup_cmd(req); - } - blk_mq_end_request_batch(iob); -} - static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct virtio_blk *vblk = hctx->queue->queuedata; struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx); - struct virtblk_req *vbr; unsigned long flags; - unsigned int len; int found = 0; spin_lock_irqsave(&vq->lock, flags); - - while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) { - struct request *req = blk_mq_rq_from_pdu(vbr); - - found++; - if (!blk_mq_complete_request_remote(req) && - !blk_mq_add_to_batch(req, iob, vbr->status, - virtblk_complete_batch)) - virtblk_request_done(req); - } + found = virtblk_handle_req(vq, iob); if (found) blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); |