diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-05 12:31:59 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-05 12:31:59 -0700 |
commit | 5518b69b76680a4f2df96b1deca260059db0c2de (patch) | |
tree | f33cd1519c8efb4590500f2f9617400be233238c /net/rxrpc/sendmsg.c | |
parent | 8ad06e56dcbc1984ef0ff8f6e3c19982c5809f73 (diff) | |
parent | 0e72582270c07850b92cac351c8b97d4f9c123b9 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
"Reasonably busy this cycle, but perhaps not as busy as in the 4.12
merge window:
1) Several optimizations for UDP processing under high load from
Paolo Abeni.
2) Support pacing internally in TCP when using the sch_fq packet
scheduler for this is not practical. From Eric Dumazet.
3) Support mutliple filter chains per qdisc, from Jiri Pirko.
4) Move to 1ms TCP timestamp clock, from Eric Dumazet.
5) Add batch dequeueing to vhost_net, from Jason Wang.
6) Flesh out more completely SCTP checksum offload support, from
Davide Caratti.
7) More plumbing of extended netlink ACKs, from David Ahern, Pablo
Neira Ayuso, and Matthias Schiffer.
8) Add devlink support to nfp driver, from Simon Horman.
9) Add RTM_F_FIB_MATCH flag to RTM_GETROUTE queries, from Roopa
Prabhu.
10) Add stack depth tracking to BPF verifier and use this information
in the various eBPF JITs. From Alexei Starovoitov.
11) Support XDP on qed device VFs, from Yuval Mintz.
12) Introduce BPF PROG ID for better introspection of installed BPF
programs. From Martin KaFai Lau.
13) Add bpf_set_hash helper for TC bpf programs, from Daniel Borkmann.
14) For loads, allow narrower accesses in bpf verifier checking, from
Yonghong Song.
15) Support MIPS in the BPF selftests and samples infrastructure, the
MIPS eBPF JIT will be merged in via the MIPS GIT tree. From David
Daney.
16) Support kernel based TLS, from Dave Watson and others.
17) Remove completely DST garbage collection, from Wei Wang.
18) Allow installing TCP MD5 rules using prefixes, from Ivan
Delalande.
19) Add XDP support to Intel i40e driver, from Björn Töpel
20) Add support for TC flower offload in nfp driver, from Simon
Horman, Pieter Jansen van Vuuren, Benjamin LaHaise, Jakub
Kicinski, and Bert van Leeuwen.
21) IPSEC offloading support in mlx5, from Ilan Tayari.
22) Add HW PTP support to macb driver, from Rafal Ozieblo.
23) Networking refcount_t conversions, From Elena Reshetova.
24) Add sock_ops support to BPF, from Lawrence Brako. This is useful
for tuning the TCP sockopt settings of a group of applications,
currently via CGROUPs"
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1899 commits)
net: phy: dp83867: add workaround for incorrect RX_CTRL pin strap
dt-bindings: phy: dp83867: provide a workaround for incorrect RX_CTRL pin strap
cxgb4: Support for get_ts_info ethtool method
cxgb4: Add PTP Hardware Clock (PHC) support
cxgb4: time stamping interface for PTP
nfp: default to chained metadata prepend format
nfp: remove legacy MAC address lookup
nfp: improve order of interfaces in breakout mode
net: macb: remove extraneous return when MACB_EXT_DESC is defined
bpf: add missing break in for the TCP_BPF_SNDCWND_CLAMP case
bpf: fix return in load_bpf_file
mpls: fix rtm policy in mpls_getroute
net, ax25: convert ax25_cb.refcount from atomic_t to refcount_t
net, ax25: convert ax25_route.refcount from atomic_t to refcount_t
net, ax25: convert ax25_uid_assoc.refcount from atomic_t to refcount_t
net, sctp: convert sctp_ep_common.refcnt from atomic_t to refcount_t
net, sctp: convert sctp_transport.refcnt from atomic_t to refcount_t
net, sctp: convert sctp_chunk.refcnt from atomic_t to refcount_t
net, sctp: convert sctp_datamsg.refcnt from atomic_t to refcount_t
net, sctp: convert sctp_auth_bytes.refcnt from atomic_t to refcount_t
...
Diffstat (limited to 'net/rxrpc/sendmsg.c')
-rw-r--r-- | net/rxrpc/sendmsg.c | 138 |
1 files changed, 99 insertions, 39 deletions
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 96ffa5d5733b..b0d2cda6ec0a 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -28,6 +28,15 @@ enum rxrpc_command { RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */ }; +struct rxrpc_send_params { + s64 tx_total_len; /* Total Tx data length (if send data) */ + unsigned long user_call_ID; /* User's call ID */ + u32 abort_code; /* Abort code to Tx (if abort) */ + enum rxrpc_command command : 8; /* The command to implement */ + bool exclusive; /* Shared or exclusive call */ + bool upgrade; /* If the connection is upgradeable */ +}; + /* * wait for space to appear in the transmit/ACK window * - caller holds the socket locked @@ -199,6 +208,13 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, more = msg->msg_flags & MSG_MORE; + if (call->tx_total_len != -1) { + if (len > call->tx_total_len) + return -EMSGSIZE; + if (!more && len != call->tx_total_len) + return -EMSGSIZE; + } + skb = call->tx_pending; call->tx_pending = NULL; rxrpc_see_skb(skb, rxrpc_skb_tx_seen); @@ -291,6 +307,8 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, sp->remain -= copy; skb->mark += copy; copied += copy; + if (call->tx_total_len != -1) + call->tx_total_len -= copy; } /* check for the far side aborting the call or a network error @@ -312,7 +330,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, pad &= conn->size_align - 1; _debug("pad %zu", pad); if (pad) - memset(skb_put(skb, pad), 0, pad); + skb_put_zero(skb, pad); } seq = call->tx_top + 1; @@ -362,18 +380,12 @@ efault: /* * extract control messages from the sendmsg() control buffer */ -static int rxrpc_sendmsg_cmsg(struct msghdr *msg, - unsigned long *user_call_ID, - enum rxrpc_command *command, - u32 *abort_code, - bool *_exclusive) +static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p) { struct cmsghdr *cmsg; bool got_user_ID = false; int len; - *command = RXRPC_CMD_SEND_DATA; - if (msg->msg_controllen == 0) return -EINVAL; @@ -393,42 +405,55 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, if (msg->msg_flags & MSG_CMSG_COMPAT) { if (len != sizeof(u32)) return -EINVAL; - *user_call_ID = *(u32 *) CMSG_DATA(cmsg); + p->user_call_ID = *(u32 *)CMSG_DATA(cmsg); } else { if (len != sizeof(unsigned long)) return -EINVAL; - *user_call_ID = *(unsigned long *) + p->user_call_ID = *(unsigned long *) CMSG_DATA(cmsg); } - _debug("User Call ID %lx", *user_call_ID); got_user_ID = true; break; case RXRPC_ABORT: - if (*command != RXRPC_CMD_SEND_DATA) + if (p->command != RXRPC_CMD_SEND_DATA) return -EINVAL; - *command = RXRPC_CMD_SEND_ABORT; - if (len != sizeof(*abort_code)) + p->command = RXRPC_CMD_SEND_ABORT; + if (len != sizeof(p->abort_code)) return -EINVAL; - *abort_code = *(unsigned int *) CMSG_DATA(cmsg); - _debug("Abort %x", *abort_code); - if (*abort_code == 0) + p->abort_code = *(unsigned int *)CMSG_DATA(cmsg); + if (p->abort_code == 0) return -EINVAL; break; case RXRPC_ACCEPT: - if (*command != RXRPC_CMD_SEND_DATA) + if (p->command != RXRPC_CMD_SEND_DATA) return -EINVAL; - *command = RXRPC_CMD_ACCEPT; + p->command = RXRPC_CMD_ACCEPT; if (len != 0) return -EINVAL; break; case RXRPC_EXCLUSIVE_CALL: - *_exclusive = true; + p->exclusive = true; + if (len != 0) + return -EINVAL; + break; + + case RXRPC_UPGRADE_SERVICE: + p->upgrade = true; if (len != 0) return -EINVAL; break; + + case RXRPC_TX_LENGTH: + if (p->tx_total_len != -1 || len != sizeof(__s64)) + return -EINVAL; + p->tx_total_len = *(__s64 *)CMSG_DATA(cmsg); + if (p->tx_total_len < 0) + return -EINVAL; + break; + default: return -EINVAL; } @@ -436,6 +461,8 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, if (!got_user_ID) return -EINVAL; + if (p->tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA) + return -EINVAL; _leave(" = 0"); return 0; } @@ -447,7 +474,7 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, */ static struct rxrpc_call * rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, - unsigned long user_call_ID, bool exclusive) + struct rxrpc_send_params *p) __releases(&rx->sk.sk_lock.slock) { struct rxrpc_conn_parameters cp; @@ -471,9 +498,11 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, cp.local = rx->local; cp.key = rx->key; cp.security_level = rx->min_sec_level; - cp.exclusive = rx->exclusive | exclusive; + cp.exclusive = rx->exclusive | p->exclusive; + cp.upgrade = p->upgrade; cp.service_id = srx->srx_service; - call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, GFP_KERNEL); + call = rxrpc_new_client_call(rx, &cp, srx, p->user_call_ID, + p->tx_total_len, GFP_KERNEL); /* The socket is now unlocked */ _leave(" = %p\n", call); @@ -489,25 +518,29 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) __releases(&rx->sk.sk_lock.slock) { enum rxrpc_call_state state; - enum rxrpc_command cmd; struct rxrpc_call *call; - unsigned long user_call_ID = 0; - bool exclusive = false; - u32 abort_code = 0; int ret; + struct rxrpc_send_params p = { + .tx_total_len = -1, + .user_call_ID = 0, + .abort_code = 0, + .command = RXRPC_CMD_SEND_DATA, + .exclusive = false, + .upgrade = true, + }; + _enter(""); - ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code, - &exclusive); + ret = rxrpc_sendmsg_cmsg(msg, &p); if (ret < 0) goto error_release_sock; - if (cmd == RXRPC_CMD_ACCEPT) { + if (p.command == RXRPC_CMD_ACCEPT) { ret = -EINVAL; if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) goto error_release_sock; - call = rxrpc_accept_call(rx, user_call_ID, NULL); + call = rxrpc_accept_call(rx, p.user_call_ID, NULL); /* The socket is now unlocked. */ if (IS_ERR(call)) return PTR_ERR(call); @@ -515,13 +548,12 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) return 0; } - call = rxrpc_find_call_by_user_ID(rx, user_call_ID); + call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID); if (!call) { ret = -EBADSLT; - if (cmd != RXRPC_CMD_SEND_DATA) + if (p.command != RXRPC_CMD_SEND_DATA) goto error_release_sock; - call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID, - exclusive); + call = rxrpc_new_client_call_for_sendmsg(rx, msg, &p); /* The socket is now unlocked... */ if (IS_ERR(call)) return PTR_ERR(call); @@ -545,6 +577,15 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) ret = -ERESTARTSYS; goto error_put; } + + if (p.tx_total_len != -1) { + ret = -EINVAL; + if (call->tx_total_len != -1 || + call->tx_pending || + call->tx_top != 0) + goto error_put; + call->tx_total_len = p.tx_total_len; + } } state = READ_ONCE(call->state); @@ -554,11 +595,11 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) if (state >= RXRPC_CALL_COMPLETE) { /* it's too late for this call */ ret = -ESHUTDOWN; - } else if (cmd == RXRPC_CMD_SEND_ABORT) { + } else if (p.command == RXRPC_CMD_SEND_ABORT) { ret = 0; - if (rxrpc_abort_call("CMD", call, 0, abort_code, -ECONNABORTED)) + if (rxrpc_abort_call("CMD", call, 0, p.abort_code, -ECONNABORTED)) ret = rxrpc_send_abort_packet(call); - } else if (cmd != RXRPC_CMD_SEND_DATA) { + } else if (p.command != RXRPC_CMD_SEND_DATA) { ret = -EINVAL; } else if (rxrpc_is_client_call(call) && state != RXRPC_CALL_CLIENT_SEND_REQUEST) { @@ -662,5 +703,24 @@ bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call, mutex_unlock(&call->user_mutex); return aborted; } - EXPORT_SYMBOL(rxrpc_kernel_abort_call); + +/** + * rxrpc_kernel_set_tx_length - Set the total Tx length on a call + * @sock: The socket the call is on + * @call: The call to be informed + * @tx_total_len: The amount of data to be transmitted for this call + * + * Allow a kernel service to set the total transmit length on a call. This + * allows buffer-to-packet encrypt-and-copy to be performed. + * + * This function is primarily for use for setting the reply length since the + * request length can be set when beginning the call. + */ +void rxrpc_kernel_set_tx_length(struct socket *sock, struct rxrpc_call *call, + s64 tx_total_len) +{ + WARN_ON(call->tx_total_len != -1); + call->tx_total_len = tx_total_len; +} +EXPORT_SYMBOL(rxrpc_kernel_set_tx_length); |