diff options
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r-- | include/linux/skbuff.h | 320 |
1 files changed, 139 insertions, 181 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c2d89335f637..215b5ea1cb30 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -318,9 +318,13 @@ enum { SKB_GSO_GRE = 1 << 6, - SKB_GSO_UDP_TUNNEL = 1 << 7, + SKB_GSO_IPIP = 1 << 7, - SKB_GSO_MPLS = 1 << 8, + SKB_GSO_SIT = 1 << 8, + + SKB_GSO_UDP_TUNNEL = 1 << 9, + + SKB_GSO_MPLS = 1 << 10, }; #if BITS_PER_LONG > 32 @@ -333,11 +337,6 @@ typedef unsigned int sk_buff_data_t; typedef unsigned char *sk_buff_data_t; #endif -#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \ - defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE) -#define NET_SKBUFF_NF_DEFRAG_NEEDED 1 -#endif - /** * struct sk_buff - socket buffer * @next: Next buffer in list @@ -370,7 +369,6 @@ typedef unsigned char *sk_buff_data_t; * @protocol: Packet protocol from driver * @destructor: Destruct function * @nfct: Associated connection, if any - * @nfct_reasm: netfilter conntrack re-assembly pointer * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c * @skb_iif: ifindex of device we arrived on * @tc_index: Traffic control index @@ -459,9 +457,6 @@ struct sk_buff { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack *nfct; #endif -#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED - struct sk_buff *nfct_reasm; -#endif #ifdef CONFIG_BRIDGE_NETFILTER struct nf_bridge_info *nf_bridge; #endif @@ -585,8 +580,8 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) skb->_skb_refdst = (unsigned long)dst; } -extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, - bool force); +void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, + bool force); /** * skb_dst_set_noref - sets skb dst, hopefully, without taking reference @@ -634,20 +629,20 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb) return (struct rtable *)skb_dst(skb); } -extern void kfree_skb(struct sk_buff *skb); -extern void kfree_skb_list(struct sk_buff *segs); -extern void skb_tx_error(struct sk_buff *skb); -extern void consume_skb(struct sk_buff *skb); -extern void __kfree_skb(struct sk_buff *skb); +void kfree_skb(struct sk_buff *skb); +void kfree_skb_list(struct sk_buff *segs); +void skb_tx_error(struct sk_buff *skb); +void consume_skb(struct sk_buff *skb); +void __kfree_skb(struct sk_buff *skb); extern struct kmem_cache *skbuff_head_cache; -extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); -extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, - bool *fragstolen, int *delta_truesize); +void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); +bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, + bool *fragstolen, int *delta_truesize); -extern struct sk_buff *__alloc_skb(unsigned int size, - gfp_t priority, int flags, int node); -extern struct sk_buff *build_skb(void *data, unsigned int frag_size); +struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, + int node); +struct sk_buff *build_skb(void *data, unsigned int frag_size); static inline struct sk_buff *alloc_skb(unsigned int size, gfp_t priority) { @@ -660,41 +655,33 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); } -extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node); +struct sk_buff *__alloc_skb_head(gfp_t priority, int node); static inline struct sk_buff *alloc_skb_head(gfp_t priority) { return __alloc_skb_head(priority, -1); } -extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); -extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); -extern struct sk_buff *skb_clone(struct sk_buff *skb, - gfp_t priority); -extern struct sk_buff *skb_copy(const struct sk_buff *skb, - gfp_t priority); -extern struct sk_buff *__pskb_copy(struct sk_buff *skb, - int headroom, gfp_t gfp_mask); - -extern int pskb_expand_head(struct sk_buff *skb, - int nhead, int ntail, - gfp_t gfp_mask); -extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, - unsigned int headroom); -extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, - int newheadroom, int newtailroom, - gfp_t priority); -extern int skb_to_sgvec(struct sk_buff *skb, - struct scatterlist *sg, int offset, - int len); -extern int skb_cow_data(struct sk_buff *skb, int tailbits, - struct sk_buff **trailer); -extern int skb_pad(struct sk_buff *skb, int pad); +struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); +int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); +struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); +struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); +struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask); + +int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); +struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, + unsigned int headroom); +struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, + int newtailroom, gfp_t priority); +int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, + int len); +int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); +int skb_pad(struct sk_buff *skb, int pad); #define dev_kfree_skb(a) consume_skb(a) -extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, - int getfrag(void *from, char *to, int offset, - int len,int odd, struct sk_buff *skb), - void *from, int length); +int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, + int getfrag(void *from, char *to, int offset, + int len, int odd, struct sk_buff *skb), + void *from, int length); struct skb_seq_state { __u32 lower_offset; @@ -706,18 +693,17 @@ struct skb_seq_state { __u8 *frag_data; }; -extern void skb_prepare_seq_read(struct sk_buff *skb, - unsigned int from, unsigned int to, - struct skb_seq_state *st); -extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, - struct skb_seq_state *st); -extern void skb_abort_seq_read(struct skb_seq_state *st); +void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, + unsigned int to, struct skb_seq_state *st); +unsigned int skb_seq_read(unsigned int consumed, const u8 **data, + struct skb_seq_state *st); +void skb_abort_seq_read(struct skb_seq_state *st); -extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, - unsigned int to, struct ts_config *config, - struct ts_state *state); +unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, + unsigned int to, struct ts_config *config, + struct ts_state *state); -extern void __skb_get_rxhash(struct sk_buff *skb); +void __skb_get_rxhash(struct sk_buff *skb); static inline __u32 skb_get_rxhash(struct sk_buff *skb) { if (!skb->l4_rxhash) @@ -1095,7 +1081,8 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list, * The "__skb_xxxx()" functions are the non-atomic ones that * can only be called with interrupts disabled. */ -extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); +void skb_insert(struct sk_buff *old, struct sk_buff *newsk, + struct sk_buff_head *list); static inline void __skb_insert(struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list) @@ -1201,8 +1188,8 @@ static inline void __skb_queue_after(struct sk_buff_head *list, __skb_insert(newsk, prev, prev->next, list); } -extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, - struct sk_buff_head *list); +void skb_append(struct sk_buff *old, struct sk_buff *newsk, + struct sk_buff_head *list); static inline void __skb_queue_before(struct sk_buff_head *list, struct sk_buff *next, @@ -1221,7 +1208,7 @@ static inline void __skb_queue_before(struct sk_buff_head *list, * * A buffer cannot be placed on two lists at the same time. */ -extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); +void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) { @@ -1238,7 +1225,7 @@ static inline void __skb_queue_head(struct sk_buff_head *list, * * A buffer cannot be placed on two lists at the same time. */ -extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); +void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) { @@ -1249,7 +1236,7 @@ static inline void __skb_queue_tail(struct sk_buff_head *list, * remove sk_buff from list. _Must_ be called atomically, and with * the list known.. */ -extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); +void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) { struct sk_buff *next, *prev; @@ -1270,7 +1257,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) * so must be used with appropriate locks held only. The head item is * returned or %NULL if the list is empty. */ -extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); +struct sk_buff *skb_dequeue(struct sk_buff_head *list); static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) { struct sk_buff *skb = skb_peek(list); @@ -1287,7 +1274,7 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) * so must be used with appropriate locks held only. The tail item is * returned or %NULL if the list is empty. */ -extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); +struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) { struct sk_buff *skb = skb_peek_tail(list); @@ -1361,7 +1348,7 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, * @size: the length of the data * * As per __skb_fill_page_desc() -- initialises the @i'th fragment of - * @skb to point to &size bytes at offset @off within @page. In + * @skb to point to @size bytes at offset @off within @page. In * addition updates @skb such that @i is the last fragment. * * Does not take any additional reference on the fragment. @@ -1373,8 +1360,11 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i, skb_shinfo(skb)->nr_frags = i + 1; } -extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, - int off, int size, unsigned int truesize); +void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, + int size, unsigned int truesize); + +void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, + unsigned int truesize); #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) @@ -1418,7 +1408,8 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) /* * Add data to an sk_buff */ -extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len); +unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); +unsigned char *skb_put(struct sk_buff *skb, unsigned int len); static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) { unsigned char *tmp = skb_tail_pointer(skb); @@ -1428,7 +1419,7 @@ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) return tmp; } -extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len); +unsigned char *skb_push(struct sk_buff *skb, unsigned int len); static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) { skb->data -= len; @@ -1436,7 +1427,7 @@ static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) return skb->data; } -extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); +unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) { skb->len -= len; @@ -1449,7 +1440,7 @@ static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int l return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); } -extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); +unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) { @@ -1753,7 +1744,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) #define NET_SKB_PAD max(32, L1_CACHE_BYTES) #endif -extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); +int ___pskb_trim(struct sk_buff *skb, unsigned int len); static inline void __skb_trim(struct sk_buff *skb, unsigned int len) { @@ -1765,7 +1756,7 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len) skb_set_tail_pointer(skb, len); } -extern void skb_trim(struct sk_buff *skb, unsigned int len); +void skb_trim(struct sk_buff *skb, unsigned int len); static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) { @@ -1838,7 +1829,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) * the list and one reference dropped. This function does not take the * list lock and the caller must hold the relevant locks to use it. */ -extern void skb_queue_purge(struct sk_buff_head *list); +void skb_queue_purge(struct sk_buff_head *list); static inline void __skb_queue_purge(struct sk_buff_head *list) { struct sk_buff *skb; @@ -1850,11 +1841,10 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) #define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER) #define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE -extern void *netdev_alloc_frag(unsigned int fragsz); +void *netdev_alloc_frag(unsigned int fragsz); -extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, - unsigned int length, - gfp_t gfp_mask); +struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, + gfp_t gfp_mask); /** * netdev_alloc_skb - allocate an skbuff for rx on a specific device @@ -2071,6 +2061,8 @@ static inline void skb_frag_set_page(struct sk_buff *skb, int f, __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); } +bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio); + /** * skb_frag_dma_map - maps a paged fragment via the DMA API * @dev: the device to map the fragment to @@ -2342,60 +2334,49 @@ static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag) #define skb_walk_frags(skb, iter) \ for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) -extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, - int *peeked, int *off, int *err); -extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, - int noblock, int *err); -extern unsigned int datagram_poll(struct file *file, struct socket *sock, - struct poll_table_struct *wait); -extern int skb_copy_datagram_iovec(const struct sk_buff *from, - int offset, struct iovec *to, - int size); -extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, - int hlen, - struct iovec *iov); -extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, - int offset, - const struct iovec *from, - int from_offset, - int len); -extern int zerocopy_sg_from_iovec(struct sk_buff *skb, - const struct iovec *frm, - int offset, - size_t count); -extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, - int offset, - const struct iovec *to, - int to_offset, - int size); -extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); -extern void skb_free_datagram_locked(struct sock *sk, - struct sk_buff *skb); -extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, - unsigned int flags); -extern __wsum skb_checksum(const struct sk_buff *skb, int offset, - int len, __wsum csum); -extern int skb_copy_bits(const struct sk_buff *skb, int offset, - void *to, int len); -extern int skb_store_bits(struct sk_buff *skb, int offset, - const void *from, int len); -extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, - int offset, u8 *to, int len, - __wsum csum); -extern int skb_splice_bits(struct sk_buff *skb, - unsigned int offset, - struct pipe_inode_info *pipe, - unsigned int len, - unsigned int flags); -extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); -extern void skb_split(struct sk_buff *skb, - struct sk_buff *skb1, const u32 len); -extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, - int shiftlen); -extern void skb_scrub_packet(struct sk_buff *skb, bool xnet); - -extern struct sk_buff *skb_segment(struct sk_buff *skb, - netdev_features_t features); +struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, + int *peeked, int *off, int *err); +struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, + int *err); +unsigned int datagram_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait); +int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, + struct iovec *to, int size); +int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, + struct iovec *iov); +int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, + const struct iovec *from, int from_offset, + int len); +int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm, + int offset, size_t count); +int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset, + const struct iovec *to, int to_offset, + int size); +void skb_free_datagram(struct sock *sk, struct sk_buff *skb); +void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); +int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); +int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); +int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); +__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, + int len, __wsum csum); +int skb_splice_bits(struct sk_buff *skb, unsigned int offset, + struct pipe_inode_info *pipe, unsigned int len, + unsigned int flags); +void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); +void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); +int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); +void skb_scrub_packet(struct sk_buff *skb, bool xnet); +struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); + +struct skb_checksum_ops { + __wsum (*update)(const void *mem, int len, __wsum wsum); + __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len); +}; + +__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, + __wsum csum, const struct skb_checksum_ops *ops); +__wsum skb_checksum(const struct sk_buff *skb, int offset, int len, + __wsum csum); static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) @@ -2440,7 +2421,7 @@ static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, memcpy(skb->data + offset, from, len); } -extern void skb_init(void); +void skb_init(void); static inline ktime_t skb_get_ktime(const struct sk_buff *skb) { @@ -2483,12 +2464,12 @@ static inline ktime_t net_invalid_timestamp(void) return ktime_set(0, 0); } -extern void skb_timestamping_init(void); +void skb_timestamping_init(void); #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING -extern void skb_clone_tx_timestamp(struct sk_buff *skb); -extern bool skb_defer_rx_timestamp(struct sk_buff *skb); +void skb_clone_tx_timestamp(struct sk_buff *skb); +bool skb_defer_rx_timestamp(struct sk_buff *skb); #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ @@ -2529,8 +2510,8 @@ void skb_complete_tx_timestamp(struct sk_buff *skb, * generates a software time stamp (otherwise), then queues the clone * to the error queue of the socket. Errors are silently ignored. */ -extern void skb_tstamp_tx(struct sk_buff *orig_skb, - struct skb_shared_hwtstamps *hwtstamps); +void skb_tstamp_tx(struct sk_buff *orig_skb, + struct skb_shared_hwtstamps *hwtstamps); static inline void sw_tx_timestamp(struct sk_buff *skb) { @@ -2562,8 +2543,8 @@ static inline void skb_tx_timestamp(struct sk_buff *skb) */ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); -extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); -extern __sum16 __skb_checksum_complete(struct sk_buff *skb); +__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); +__sum16 __skb_checksum_complete(struct sk_buff *skb); static inline int skb_csum_unnecessary(const struct sk_buff *skb) { @@ -2593,7 +2574,7 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb) } #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) -extern void nf_conntrack_destroy(struct nf_conntrack *nfct); +void nf_conntrack_destroy(struct nf_conntrack *nfct); static inline void nf_conntrack_put(struct nf_conntrack *nfct) { if (nfct && atomic_dec_and_test(&nfct->use)) @@ -2605,18 +2586,6 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct) atomic_inc(&nfct->use); } #endif -#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED -static inline void nf_conntrack_get_reasm(struct sk_buff *skb) -{ - if (skb) - atomic_inc(&skb->users); -} -static inline void nf_conntrack_put_reasm(struct sk_buff *skb) -{ - if (skb) - kfree_skb(skb); -} -#endif #ifdef CONFIG_BRIDGE_NETFILTER static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) { @@ -2635,10 +2604,6 @@ static inline void nf_reset(struct sk_buff *skb) nf_conntrack_put(skb->nfct); skb->nfct = NULL; #endif -#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED - nf_conntrack_put_reasm(skb->nfct_reasm); - skb->nfct_reasm = NULL; -#endif #ifdef CONFIG_BRIDGE_NETFILTER nf_bridge_put(skb->nf_bridge); skb->nf_bridge = NULL; @@ -2660,10 +2625,6 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) nf_conntrack_get(src->nfct); dst->nfctinfo = src->nfctinfo; #endif -#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED - dst->nfct_reasm = src->nfct_reasm; - nf_conntrack_get_reasm(src->nfct_reasm); -#endif #ifdef CONFIG_BRIDGE_NETFILTER dst->nf_bridge = src->nf_bridge; nf_bridge_get(src->nf_bridge); @@ -2675,9 +2636,6 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(dst->nfct); #endif -#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED - nf_conntrack_put_reasm(dst->nfct_reasm); -#endif #ifdef CONFIG_BRIDGE_NETFILTER nf_bridge_put(dst->nf_bridge); #endif @@ -2732,28 +2690,27 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) return skb->queue_mapping != 0; } -extern u16 __skb_tx_hash(const struct net_device *dev, - const struct sk_buff *skb, - unsigned int num_tx_queues); +u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb, + unsigned int num_tx_queues); -#ifdef CONFIG_XFRM static inline struct sec_path *skb_sec_path(struct sk_buff *skb) { +#ifdef CONFIG_XFRM return skb->sp; -} #else -static inline struct sec_path *skb_sec_path(struct sk_buff *skb) -{ return NULL; -} #endif +} /* Keeps track of mac header offset relative to skb->head. * It is useful for TSO of Tunneling protocol. e.g. GRE. * For non-tunnel skb it points to skb_mac_header() and for - * tunnel skb it points to outer mac header. */ + * tunnel skb it points to outer mac header. + * Keeps track of level of encapsulation of network headers. + */ struct skb_gso_cb { - int mac_offset; + int mac_offset; + int encap_level; }; #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb) @@ -2783,12 +2740,13 @@ static inline bool skb_is_gso(const struct sk_buff *skb) return skb_shinfo(skb)->gso_size; } +/* Note: Should be called only if skb_is_gso(skb) is true */ static inline bool skb_is_gso_v6(const struct sk_buff *skb) { return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; } -extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); +void __skb_warn_lro_forwarding(const struct sk_buff *skb); static inline bool skb_warn_if_lro(const struct sk_buff *skb) { |