diff options
author | David S. Miller <davem@davemloft.net> | 2014-06-11 15:46:17 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-06-11 15:46:17 -0700 |
commit | f3591fd4c9881889dfa9203328a89316fcc834e1 (patch) | |
tree | 9eac0e7a5cf38fd37f0cb364ffb6a3e90cce3c88 /include | |
parent | 1054cc150cf760f7b58ec8d2983ee7c85fb1df0a (diff) | |
parent | 6bae1d4cc395ad46613e40c9e865ee171dc9de5c (diff) |
Merge branch 'inet_csums'
Tom Herbert says:
====================
net: Checksum offload changes - Part IV
I am working on overhauling RX checksum offload. Goals of this effort
are:
- Specify what exactly it means when driver returns CHECKSUM_UNNECESSARY
- Preserve CHECKSUM_COMPLETE through encapsulation layers
- Don't do skb_checksum more than once per packet
- Unify GRO and non-GRO csum verification as much as possible
- Unify the checksum functions (checksum_init)
- Simply code
What is in this fourth patch set:
- Preserve CHECKSUM_COMPLETE instead of changing it to
CHECKSUM_UNNECESSARY. This allows correct reuse in validating multiple
csums in a packet.
- When SW needs to compute the packet checksum, save it as
CHECKSUM_COMPLETE. Also mark that checksum was compute by SW.
- Add skb_gro_postpull_rcsum to udp and vxlan to make GRO work with
CHECKSUM_COMPLETE.
v2: Removed patch setting skb_encapsulation when validating checksum
in tcp_gro_receive
Please review carefully and test if possible, mucking with basic
checksum functions is always a little precarious :-)
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/skbuff.h | 24 |
1 files changed, 15 insertions, 9 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 1f50bfe2243d..5b5cd3189c98 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -572,7 +572,9 @@ struct sk_buff { */ __u8 encapsulation:1; __u8 encap_hdr_csum:1; - /* 5/7 bit hole (depending on ndisc_nodetype presence) */ + __u8 csum_valid:1; + __u8 csum_complete_sw:1; + /* 3/5 bit hole (depending on ndisc_nodetype presence) */ kmemcheck_bitfield_end(flags2); #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL @@ -2735,7 +2737,7 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb); static inline int skb_csum_unnecessary(const struct sk_buff *skb) { - return skb->ip_summed & CHECKSUM_UNNECESSARY; + return ((skb->ip_summed & CHECKSUM_UNNECESSARY) || skb->csum_valid); } /** @@ -2769,10 +2771,8 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, bool zero_okay, __sum16 check) { - if (skb_csum_unnecessary(skb)) { - return false; - } else if (zero_okay && !check) { - skb->ip_summed = CHECKSUM_UNNECESSARY; + if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { + skb->csum_valid = 1; return false; } @@ -2799,15 +2799,20 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, { if (skb->ip_summed == CHECKSUM_COMPLETE) { if (!csum_fold(csum_add(psum, skb->csum))) { - skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_valid = 1; return 0; } } skb->csum = psum; - if (complete || skb->len <= CHECKSUM_BREAK) - return __skb_checksum_complete(skb); + if (complete || skb->len <= CHECKSUM_BREAK) { + __sum16 csum; + + csum = __skb_checksum_complete(skb); + skb->csum_valid = !csum; + return csum; + } return 0; } @@ -2831,6 +2836,7 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) zero_okay, check, compute_pseudo) \ ({ \ __sum16 __ret = 0; \ + skb->csum_valid = 0; \ if (__skb_checksum_validate_needed(skb, zero_okay, check)) \ __ret = __skb_checksum_validate_complete(skb, \ complete, compute_pseudo(skb, proto)); \ |