diff options
author | Christoph Hellwig <hch@lst.de> | 2021-10-12 13:12:20 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-10-18 06:17:36 -0600 |
commit | d729cf9acb9311956c8a37113dcfa0160a2d9665 (patch) | |
tree | db98a1f9ce4a4ddf9e454bc8660a87dacb616061 | |
parent | ef99b2d37666b7a600baab9e1c4944436652b0a2 (diff) |
io_uring: don't sleep when polling for I/O
There is no point in sleeping for the expected I/O completion timeout
in the io_uring async polling model as we never poll for a specific
I/O.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Mark Wunderlich <mark.wunderlich@intel.com>
Link: https://lore.kernel.org/r/20211012111226.760968-11-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | block/blk-mq.c | 3 | ||||
-rw-r--r-- | fs/io_uring.c | 2 | ||||
-rw-r--r-- | include/linux/blkdev.h | 2 |
3 files changed, 5 insertions, 2 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 6609e10657a8..97c24e461d0a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4103,7 +4103,8 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags) if (current->plug) blk_flush_plug_list(current->plug, false); - if (q->poll_nsec != BLK_MQ_POLL_CLASSIC) { + if (!(flags & BLK_POLL_NOSLEEP) && + q->poll_nsec != BLK_MQ_POLL_CLASSIC) { if (blk_mq_poll_hybrid(q, cookie)) return 1; } diff --git a/fs/io_uring.c b/fs/io_uring.c index 541fec2bd49a..c5066146b8de 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2457,7 +2457,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, long min) { struct io_kiocb *req, *tmp; - unsigned int poll_flags = 0; + unsigned int poll_flags = BLK_POLL_NOSLEEP; LIST_HEAD(done); /* diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e177346bc020..2b80c98fc373 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -566,6 +566,8 @@ blk_status_t errno_to_blk_status(int errno); /* only poll the hardware once, don't continue until a completion was found */ #define BLK_POLL_ONESHOT (1 << 0) +/* do not sleep to wait for the expected completion time */ +#define BLK_POLL_NOSLEEP (1 << 1) int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |