summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/kbuf.c9
-rw-r--r--io_uring/kbuf.h20
-rw-r--r--io_uring/net.c12
-rw-r--r--io_uring/rw.c4
4 files changed, 13 insertions, 32 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 3d257ed9031b..9be42bff936b 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -81,15 +81,6 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
struct io_buffer_list *bl;
struct io_buffer *buf;
- /*
- * For legacy provided buffer mode, don't recycle if we already did
- * IO to this buffer. For ring-mapped provided buffer mode, we should
- * increment ring->head to explicitly monopolize the buffer to avoid
- * multiple use.
- */
- if (req->flags & REQ_F_PARTIAL_IO)
- return false;
-
io_ring_submit_lock(ctx, issue_flags);
buf = req->kbuf;
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index f74c910b83f4..5218bfd79e87 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -73,21 +73,9 @@ static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
* to monopolize the buffer.
*/
if (req->buf_list) {
- if (req->flags & REQ_F_PARTIAL_IO) {
- /*
- * If we end up here, then the io_uring_lock has
- * been kept held since we retrieved the buffer.
- * For the io-wq case, we already cleared
- * req->buf_list when the buffer was retrieved,
- * hence it cannot be set here for that case.
- */
- req->buf_list->head++;
- req->buf_list = NULL;
- } else {
- req->buf_index = req->buf_list->bgid;
- req->flags &= ~REQ_F_BUFFER_RING;
- return true;
- }
+ req->buf_index = req->buf_list->bgid;
+ req->flags &= ~REQ_F_BUFFER_RING;
+ return true;
}
return false;
}
@@ -101,6 +89,8 @@ static inline bool io_do_buffer_select(struct io_kiocb *req)
static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
{
+ if (req->flags & REQ_F_BL_NO_RECYCLE)
+ return false;
if (req->flags & REQ_F_BUFFER_SELECTED)
return io_kbuf_recycle_legacy(req, issue_flags);
if (req->flags & REQ_F_BUFFER_RING)
diff --git a/io_uring/net.c b/io_uring/net.c
index b832c586be36..b97d70f905a8 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -456,7 +456,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
kmsg->msg.msg_controllen = 0;
kmsg->msg.msg_control = NULL;
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_msg(req, kmsg, issue_flags);
}
if (ret == -ERESTARTSYS)
@@ -535,7 +535,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_addr(req, &__address, issue_flags);
}
if (ret == -ERESTARTSYS)
@@ -907,7 +907,7 @@ retry_multishot:
}
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_msg(req, kmsg, issue_flags);
}
if (ret == -ERESTARTSYS)
@@ -1007,7 +1007,7 @@ retry_multishot:
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ req->flags |= REQ_F_BL_NO_RECYCLE;
return -EAGAIN;
}
if (ret == -ERESTARTSYS)
@@ -1250,7 +1250,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
zc->len -= ret;
zc->buf += ret;
zc->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_addr(req, &__address, issue_flags);
}
if (ret == -ERESTARTSYS)
@@ -1320,7 +1320,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_msg(req, kmsg, issue_flags);
}
if (ret == -ERESTARTSYS)
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 7733449271f2..5651a5ad4e11 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -275,7 +275,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
* current cycle.
*/
io_req_io_end(req);
- req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
+ req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
return true;
}
req_set_fail(req);
@@ -342,7 +342,7 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
io_req_end_write(req);
if (unlikely(res != req->cqe.res)) {
if (res == -EAGAIN && io_rw_should_reissue(req)) {
- req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
+ req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
return;
}
req->cqe.res = res;