summaryrefslogtreecommitdiff
path: root/io_uring/net.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-07-15 13:49:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-07-15 13:49:10 -0700
commit3a56e241732975c2c1247047ddbfc0ac6f6a4905 (patch)
treec365b9a90548e6f60e2773a486402c6384ea3ac1 /io_uring/net.c
parent4f5e249ec0ea8872e1644df23cffffbe28007188 (diff)
parentad00e629145b2b9f0d78aa46e204a9df7d628978 (diff)
Merge tag 'for-6.11/io_uring-20240714' of git://git.kernel.dk/linux
Pull io_uring updates from Jens Axboe: "Here are the io_uring updates queued up for 6.11. Nothing major this time around, various minor improvements and cleanups/fixes. This contains: - Add bind/listen opcodes. Main motivation is to support direct descriptors, to avoid needing a regular fd just for doing these two operations (Gabriel) - Probe fixes (Gabriel) - Treat io-wq work flags as atomics. Not fixing a real issue, but may as well and it silences a KCSAN warning (me) - Cleanup of rsrc __set_current_state() usage (me) - Add 64-bit for {m,f}advise operations (me) - Improve performance of data ring messages (me) - Fix for ring message overflow posting (Pavel) - Fix for freezer interaction with TWA_NOTIFY_SIGNAL. Not strictly an io_uring thing, but since TWA_NOTIFY_SIGNAL was originally added for faster task_work signaling for io_uring, bundling it with this pull (Pavel) - Add Pavel as a co-maintainer - Various cleanups (me, Thorsten)" * tag 'for-6.11/io_uring-20240714' of git://git.kernel.dk/linux: (28 commits) io_uring/net: check socket is valid in io_bind()/io_listen() kernel: rerun task_work while freezing in get_signal() io_uring/io-wq: limit retrying worker initialisation io_uring/napi: Remove unnecessary s64 cast io_uring/net: cleanup io_recv_finish() bundle handling io_uring/msg_ring: fix overflow posting MAINTAINERS: change Pavel Begunkov from io_uring reviewer to maintainer io_uring/msg_ring: use kmem_cache_free() to free request io_uring/msg_ring: check for dead submitter task io_uring/msg_ring: add an alloc cache for io_kiocb entries io_uring/msg_ring: improve handling of target CQE posting io_uring: add io_add_aux_cqe() helper io_uring: add remote task_work execution helper io_uring/msg_ring: tighten requirement for remote posting io_uring: Allocate only necessary memory in io_probe io_uring: Fix probe of disabled operations io_uring: Introduce IORING_OP_LISTEN io_uring: Introduce IORING_OP_BIND net: Split a __sys_listen helper for io_uring net: Split a __sys_bind helper for io_uring ...
Diffstat (limited to 'io_uring/net.c')
-rw-r--r--io_uring/net.c94
1 files changed, 84 insertions, 10 deletions
diff --git a/io_uring/net.c b/io_uring/net.c
index cf742bdd2a93..7b75da2e7826 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -51,6 +51,16 @@ struct io_connect {
bool seen_econnaborted;
};
+struct io_bind {
+ struct file *file;
+ int addr_len;
+};
+
+struct io_listen {
+ struct file *file;
+ int backlog;
+};
+
struct io_sr_msg {
struct file *file;
union {
@@ -817,20 +827,20 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
bool mshot_finished, unsigned issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- unsigned int cflags;
-
- if (sr->flags & IORING_RECVSEND_BUNDLE)
- cflags = io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret),
- issue_flags);
- else
- cflags = io_put_kbuf(req, issue_flags);
+ unsigned int cflags = 0;
if (kmsg->msg.msg_inq > 0)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
- /* bundle with no more immediate buffers, we're done */
- if (sr->flags & IORING_RECVSEND_BUNDLE && req->flags & REQ_F_BL_EMPTY)
- goto finish;
+ if (sr->flags & IORING_RECVSEND_BUNDLE) {
+ cflags |= io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret),
+ issue_flags);
+ /* bundle with no more immediate buffers, we're done */
+ if (req->flags & REQ_F_BL_EMPTY)
+ goto finish;
+ } else {
+ cflags |= io_put_kbuf(req, issue_flags);
+ }
/*
* Fill CQE for this receive and see if we should keep trying to
@@ -1717,6 +1727,70 @@ out:
return IOU_OK;
}
+int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
+ struct sockaddr __user *uaddr;
+ struct io_async_msghdr *io;
+
+ if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
+ return -EINVAL;
+
+ uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ bind->addr_len = READ_ONCE(sqe->addr2);
+
+ io = io_msg_alloc_async(req);
+ if (unlikely(!io))
+ return -ENOMEM;
+ return move_addr_to_kernel(uaddr, bind->addr_len, &io->addr);
+}
+
+int io_bind(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
+ struct io_async_msghdr *io = req->async_data;
+ struct socket *sock;
+ int ret;
+
+ sock = sock_from_file(req->file);
+ if (unlikely(!sock))
+ return -ENOTSOCK;
+
+ ret = __sys_bind_socket(sock, &io->addr, bind->addr_len);
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ return 0;
+}
+
+int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
+
+ if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2)
+ return -EINVAL;
+
+ listen->backlog = READ_ONCE(sqe->len);
+ return 0;
+}
+
+int io_listen(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
+ struct socket *sock;
+ int ret;
+
+ sock = sock_from_file(req->file);
+ if (unlikely(!sock))
+ return -ENOTSOCK;
+
+ ret = __sys_listen_socket(sock, listen->backlog);
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ return 0;
+}
+
void io_netmsg_cache_free(const void *entry)
{
struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry;