summaryrefslogtreecommitdiff
path: root/include/net/sock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h77
1 files changed, 48 insertions, 29 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 156350745700..7f2ab72f321a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -67,6 +67,7 @@
#include <linux/atomic.h>
#include <net/dst.h>
#include <net/checksum.h>
+#include <linux/net_tstamp.h>
struct cgroup;
struct cgroup_subsys;
@@ -181,7 +182,8 @@ struct sock_common {
unsigned short skc_family;
volatile unsigned char skc_state;
unsigned char skc_reuse:4;
- unsigned char skc_reuseport:4;
+ unsigned char skc_reuseport:1;
+ unsigned char skc_ipv6only:1;
int skc_bound_dev_if;
union {
struct hlist_node skc_bind_node;
@@ -272,10 +274,13 @@ struct cg_proto;
* @sk_rcvtimeo: %SO_RCVTIMEO setting
* @sk_sndtimeo: %SO_SNDTIMEO setting
* @sk_rxhash: flow hash received from netif layer
+ * @sk_txhash: computed flow hash for use on transmit
* @sk_filter: socket filtering instructions
* @sk_protinfo: private area, net family specific, when not using slab
* @sk_timer: sock cleanup timer
* @sk_stamp: time stamp of last packet received
+ * @sk_tsflags: SO_TIMESTAMPING socket options
+ * @sk_tskey: counter to disambiguate concurrent tstamp requests
* @sk_socket: Identd and reporting IO signals
* @sk_user_data: RPC layer private data
* @sk_frag: cached page frag
@@ -317,6 +322,7 @@ struct sock {
#define sk_state __sk_common.skc_state
#define sk_reuse __sk_common.skc_reuse
#define sk_reuseport __sk_common.skc_reuseport
+#define sk_ipv6only __sk_common.skc_ipv6only
#define sk_bound_dev_if __sk_common.skc_bound_dev_if
#define sk_bind_node __sk_common.skc_bind_node
#define sk_prot __sk_common.skc_prot
@@ -345,6 +351,7 @@ struct sock {
#ifdef CONFIG_RPS
__u32 sk_rxhash;
#endif
+ __u32 sk_txhash;
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_napi_id;
unsigned int sk_ll_usec;
@@ -407,6 +414,8 @@ struct sock {
void *sk_protinfo;
struct timer_list sk_timer;
ktime_t sk_stamp;
+ u16 sk_tsflags;
+ u32 sk_tskey;
struct socket *sk_socket;
void *sk_user_data;
struct page_frag sk_frag;
@@ -656,6 +665,20 @@ static inline void sk_add_bind_node(struct sock *sk,
#define sk_for_each_bound(__sk, list) \
hlist_for_each_entry(__sk, list, sk_bind_node)
+/**
+ * sk_nulls_for_each_entry_offset - iterate over a list at a given struct offset
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_node to use as a loop cursor.
+ * @head: the head for your list.
+ * @offset: offset of hlist_node within the struct.
+ *
+ */
+#define sk_nulls_for_each_entry_offset(tpos, pos, head, offset) \
+ for (pos = (head)->first; \
+ (!is_a_nulls(pos)) && \
+ ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \
+ pos = pos->next)
+
static inline struct user_namespace *sk_user_ns(struct sock *sk)
{
/* Careful only use this in a context where these parameters
@@ -683,13 +706,7 @@ enum sock_flags {
SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
SOCK_MEMALLOC, /* VM depends on this socket for swapping */
- SOCK_TIMESTAMPING_TX_HARDWARE, /* %SOF_TIMESTAMPING_TX_HARDWARE */
- SOCK_TIMESTAMPING_TX_SOFTWARE, /* %SOF_TIMESTAMPING_TX_SOFTWARE */
- SOCK_TIMESTAMPING_RX_HARDWARE, /* %SOF_TIMESTAMPING_RX_HARDWARE */
SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */
- SOCK_TIMESTAMPING_SOFTWARE, /* %SOF_TIMESTAMPING_SOFTWARE */
- SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
- SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
SOCK_FASYNC, /* fasync() active */
SOCK_RXQ_OVFL,
SOCK_ZEROCOPY, /* buffers from userspace */
@@ -792,8 +809,7 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
* Do not take into account this skb truesize,
* to allow even a single big packet to come.
*/
-static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
- unsigned int limit)
+static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
{
unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
@@ -804,7 +820,7 @@ static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
unsigned int limit)
{
- if (sk_rcvqueues_full(sk, skb, limit))
+ if (sk_rcvqueues_full(sk, limit))
return -ENOBUFS;
__sk_add_backlog(sk, skb);
@@ -971,7 +987,6 @@ struct proto {
struct sk_buff *skb);
void (*release_cb)(struct sock *sk);
- void (*mtu_reduced)(struct sock *sk);
/* Keeping track of sk's, looking them up, and port selection methods. */
void (*hash)(struct sock *sk);
@@ -1978,6 +1993,14 @@ static inline void sock_poll_wait(struct file *filp,
}
}
+static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
+{
+ if (sk->sk_txhash) {
+ skb->l4_hash = 1;
+ skb->hash = sk->sk_txhash;
+ }
+}
+
/*
* Queue a received datagram if it will fit. Stream and sequenced
* protocols can't normally use this as they need to fit buffers in
@@ -1992,6 +2015,7 @@ static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
skb_orphan(skb);
skb->sk = sk;
skb->destructor = sock_wfree;
+ skb_set_hash_from_sk(skb, sk);
/*
* We used to take a refcount on sk, but following operation
* is enough to guarantee sk_free() wont free this sock until
@@ -2135,21 +2159,17 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
/*
* generate control messages if
- * - receive time stamping in software requested (SOCK_RCVTSTAMP
- * or SOCK_TIMESTAMPING_RX_SOFTWARE)
+ * - receive time stamping in software requested
* - software time stamp available and wanted
- * (SOCK_TIMESTAMPING_SOFTWARE)
* - hardware time stamps available and wanted
- * (SOCK_TIMESTAMPING_SYS_HARDWARE or
- * SOCK_TIMESTAMPING_RAW_HARDWARE)
*/
if (sock_flag(sk, SOCK_RCVTSTAMP) ||
- sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE) ||
- (kt.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) ||
+ (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
+ (kt.tv64 &&
+ (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE ||
+ skb_shinfo(skb)->tx_flags & SKBTX_ANY_SW_TSTAMP)) ||
(hwtstamps->hwtstamp.tv64 &&
- sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) ||
- (hwtstamps->syststamp.tv64 &&
- sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)))
+ (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
else
sk->sk_stamp = kt;
@@ -2165,12 +2185,11 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
- (1UL << SOCK_RCVTSTAMP) | \
- (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \
- (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \
- (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE))
+ (1UL << SOCK_RCVTSTAMP))
+#define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
+ SOF_TIMESTAMPING_RAW_HARDWARE)
- if (sk->sk_flags & FLAGS_TS_OR_DROPS)
+ if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
__sock_recv_ts_and_drops(msg, sk, skb);
else
sk->sk_stamp = skb->tstamp;
@@ -2179,11 +2198,11 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
/**
* sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
* @sk: socket sending this packet
- * @tx_flags: filled with instructions for time stamping
+ * @tx_flags: completed with instructions for time stamping
*
- * Currently only depends on SOCK_TIMESTAMPING* flags.
+ * Note : callers should take care of initial *tx_flags value (usually 0)
*/
-void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
+void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
/**
* sk_eat_skb - Release a skb if it is no longer needed