diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-04-06 10:37:38 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-04-06 10:37:38 -0700 |
commit | 23f347ef63aa36b5a001b6791f657cd0e2a04de3 (patch) | |
tree | ce06ebdccd16b99265b3e74f8e9b7bd1e29cf465 /arch/x86 | |
parent | 314489bd4c7780fde6a069783d5128f6cef52919 (diff) | |
parent | 110c43304db6f06490961529536c362d9ac5732f (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking updates from David Miller:
1) Fix inaccuracies in network driver interface documentation, from Ben
Hutchings.
2) Fix handling of negative offsets in BPF JITs, from Jan Seiffert.
3) Compile warning, locking, and refcounting fixes in netfilter's
xt_CT, from Pablo Neira Ayuso.
4) phonet sendmsg needs to validate user length just like any other
datagram protocol, fix from Sasha Levin.
5) Ipv6 multicast code uses wrong loop index, from RongQing Li.
6) Link handling and firmware fixes in bnx2x driver from Yaniv Rosner
and Yuval Mintz.
7) mlx4 erroneously allocates 4 pages at a time, regardless of page
size, fix from Thadeu Lima de Souza Cascardo.
8) SCTP socket option wasn't extended in a backwards compatible way,
fix from Thomas Graf.
9) Add missing address change event emissions to bonding, from Shlomo
Pongratz.
10) /proc/net/dev regressed because it uses a private offset to track
where we are in the hash table, but this doesn't track the offset
pullback that the seq_file code does resulting in some entries being
missed in large dumps.
Fix from Eric Dumazet.
11) do_tcp_sendpage() unloads the send queue way too fast, because it
invokes tcp_push() when it shouldn't. Let the natural sequence
generated by the splice paths, and the assosciated MSG_MORE
settings, guide the tcp_push() calls.
Otherwise what goes out of TCP is spaghetti and doesn't batch
effectively into GSO/TSO clusters.
From Eric Dumazet.
12) Once we put a SKB into either the netlink receiver's queue or a
socket error queue, it can be consumed and freed up, therefore we
cannot touch it after queueing it like that.
Fixes from Eric Dumazet.
13) PPP has this annoying behavior in that for every transmit call it
immediately stops the TX queue, then calls down into the next layer
to transmit the PPP frame.
But if that next layer can take it immediately, it just un-stops the
TX queue right before returning from the transmit method.
Besides being useless work, it makes several facilities unusable, in
particular things like the equalizers. Well behaved devices should
only stop the TX queue when they really are full, and in PPP's case
when it gets backlogged to the downstream device.
David Woodhouse therefore fixed PPP to not stop the TX queue until
it's downstream can't take data any more.
14) IFF_UNICAST_FLT got accidently lost in some recent stmmac driver
changes, re-add. From Marc Kleine-Budde.
15) Fix link flaps in ixgbe, from Eric W. Multanen.
16) Descriptor writeback fixes in e1000e from Matthew Vick.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (47 commits)
net: fix a race in sock_queue_err_skb()
netlink: fix races after skb queueing
doc, net: Update ndo_start_xmit return type and values
doc, net: Remove instruction to set net_device::trans_start
doc, net: Update netdev operation names
doc, net: Update documentation of synchronisation for TX multiqueue
doc, net: Remove obsolete reference to dev->poll
ethtool: Remove exception to the requirement of holding RTNL lock
MAINTAINERS: update for Marvell Ethernet drivers
bonding: properly unset current_arp_slave on slave link up
phonet: Check input from user before allocating
tcp: tcp_sendpages() should call tcp_push() once
ipv6: fix array index in ip6_mc_add_src()
mlx4: allocate just enough pages instead of always 4 pages
stmmac: re-add IFF_UNICAST_FLT for dwmac1000
bnx2x: Clear MDC/MDIO warning message
bnx2x: Fix BCM57711+BCM84823 link issue
bnx2x: Clear BCM84833 LED after fan failure
bnx2x: Fix BCM84833 PHY FW version presentation
bnx2x: Fix link issue for BCM8727 boards.
...
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/net/bpf_jit.S | 122 | ||||
-rw-r--r-- | arch/x86/net/bpf_jit_comp.c | 41 |
2 files changed, 115 insertions, 48 deletions
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S index 66870223f8c5..877b9a1b2152 100644 --- a/arch/x86/net/bpf_jit.S +++ b/arch/x86/net/bpf_jit.S @@ -18,17 +18,17 @@ * r9d : hlen = skb->len - skb->data_len */ #define SKBDATA %r8 - -sk_load_word_ind: - .globl sk_load_word_ind - - add %ebx,%esi /* offset += X */ -# test %esi,%esi /* if (offset < 0) goto bpf_error; */ - js bpf_error +#define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */ sk_load_word: .globl sk_load_word + test %esi,%esi + js bpf_slow_path_word_neg + +sk_load_word_positive_offset: + .globl sk_load_word_positive_offset + mov %r9d,%eax # hlen sub %esi,%eax # hlen - offset cmp $3,%eax @@ -37,16 +37,15 @@ sk_load_word: bswap %eax /* ntohl() */ ret - -sk_load_half_ind: - .globl sk_load_half_ind - - add %ebx,%esi /* offset += X */ - js bpf_error - sk_load_half: .globl sk_load_half + test %esi,%esi + js bpf_slow_path_half_neg + +sk_load_half_positive_offset: + .globl sk_load_half_positive_offset + mov %r9d,%eax sub %esi,%eax # hlen - offset cmp $1,%eax @@ -55,14 +54,15 @@ sk_load_half: rol $8,%ax # ntohs() ret -sk_load_byte_ind: - .globl sk_load_byte_ind - add %ebx,%esi /* offset += X */ - js bpf_error - sk_load_byte: .globl sk_load_byte + test %esi,%esi + js bpf_slow_path_byte_neg + +sk_load_byte_positive_offset: + .globl sk_load_byte_positive_offset + cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */ jle bpf_slow_path_byte movzbl (SKBDATA,%rsi),%eax @@ -73,25 +73,21 @@ sk_load_byte: * * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf) * Must preserve A accumulator (%eax) - * Inputs : %esi is the offset value, already known positive + * Inputs : %esi is the offset value */ -ENTRY(sk_load_byte_msh) - CFI_STARTPROC +sk_load_byte_msh: + .globl sk_load_byte_msh + test %esi,%esi + js bpf_slow_path_byte_msh_neg + +sk_load_byte_msh_positive_offset: + .globl sk_load_byte_msh_positive_offset cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */ jle bpf_slow_path_byte_msh movzbl (SKBDATA,%rsi),%ebx and $15,%bl shl $2,%bl ret - CFI_ENDPROC -ENDPROC(sk_load_byte_msh) - -bpf_error: -# force a return 0 from jit handler - xor %eax,%eax - mov -8(%rbp),%rbx - leaveq - ret /* rsi contains offset and can be scratched */ #define bpf_slow_path_common(LEN) \ @@ -138,3 +134,67 @@ bpf_slow_path_byte_msh: shl $2,%al xchg %eax,%ebx ret + +#define sk_negative_common(SIZE) \ + push %rdi; /* save skb */ \ + push %r9; \ + push SKBDATA; \ +/* rsi already has offset */ \ + mov $SIZE,%ecx; /* size */ \ + call bpf_internal_load_pointer_neg_helper; \ + test %rax,%rax; \ + pop SKBDATA; \ + pop %r9; \ + pop %rdi; \ + jz bpf_error + + +bpf_slow_path_word_neg: + cmp SKF_MAX_NEG_OFF, %esi /* test range */ + jl bpf_error /* offset lower -> error */ +sk_load_word_negative_offset: + .globl sk_load_word_negative_offset + sk_negative_common(4) + mov (%rax), %eax + bswap %eax + ret + +bpf_slow_path_half_neg: + cmp SKF_MAX_NEG_OFF, %esi + jl bpf_error +sk_load_half_negative_offset: + .globl sk_load_half_negative_offset + sk_negative_common(2) + mov (%rax),%ax + rol $8,%ax + movzwl %ax,%eax + ret + +bpf_slow_path_byte_neg: + cmp SKF_MAX_NEG_OFF, %esi + jl bpf_error +sk_load_byte_negative_offset: + .globl sk_load_byte_negative_offset + sk_negative_common(1) + movzbl (%rax), %eax + ret + +bpf_slow_path_byte_msh_neg: + cmp SKF_MAX_NEG_OFF, %esi + jl bpf_error +sk_load_byte_msh_negative_offset: + .globl sk_load_byte_msh_negative_offset + xchg %eax,%ebx /* dont lose A , X is about to be scratched */ + sk_negative_common(1) + movzbl (%rax),%eax + and $15,%al + shl $2,%al + xchg %eax,%ebx + ret + +bpf_error: +# force a return 0 from jit handler + xor %eax,%eax + mov -8(%rbp),%rbx + leaveq + ret diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 5a5b6e4dd738..0597f95b6da6 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -30,7 +30,10 @@ int bpf_jit_enable __read_mostly; * assembly code in arch/x86/net/bpf_jit.S */ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; -extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[]; +extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[]; +extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[]; +extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; +extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[]; static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) { @@ -117,6 +120,8 @@ static inline void bpf_flush_icache(void *start, void *end) set_fs(old_fs); } +#define CHOOSE_LOAD_FUNC(K, func) \ + ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) void bpf_jit_compile(struct sk_filter *fp) { @@ -473,44 +478,46 @@ void bpf_jit_compile(struct sk_filter *fp) #endif break; case BPF_S_LD_W_ABS: - func = sk_load_word; + func = CHOOSE_LOAD_FUNC(K, sk_load_word); common_load: seen |= SEEN_DATAREF; - if ((int)K < 0) { - /* Abort the JIT because __load_pointer() is needed. */ - goto out; - } t_offset = func - (image + addrs[i]); EMIT1_off32(0xbe, K); /* mov imm32,%esi */ EMIT1_off32(0xe8, t_offset); /* call */ break; case BPF_S_LD_H_ABS: - func = sk_load_half; + func = CHOOSE_LOAD_FUNC(K, sk_load_half); goto common_load; case BPF_S_LD_B_ABS: - func = sk_load_byte; + func = CHOOSE_LOAD_FUNC(K, sk_load_byte); goto common_load; case BPF_S_LDX_B_MSH: - if ((int)K < 0) { - /* Abort the JIT because __load_pointer() is needed. */ - goto out; - } + func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); seen |= SEEN_DATAREF | SEEN_XREG; - t_offset = sk_load_byte_msh - (image + addrs[i]); + t_offset = func - (image + addrs[i]); EMIT1_off32(0xbe, K); /* mov imm32,%esi */ EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */ break; case BPF_S_LD_W_IND: - func = sk_load_word_ind; + func = sk_load_word; common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; t_offset = func - (image + addrs[i]); - EMIT1_off32(0xbe, K); /* mov imm32,%esi */ + if (K) { + if (is_imm8(K)) { + EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */ + } else { + EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */ + EMIT(K, 4); + } + } else { + EMIT2(0x89,0xde); /* mov %ebx,%esi */ + } EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */ break; case BPF_S_LD_H_IND: - func = sk_load_half_ind; + func = sk_load_half; goto common_load_ind; case BPF_S_LD_B_IND: - func = sk_load_byte_ind; + func = sk_load_byte; goto common_load_ind; case BPF_S_JMP_JA: t_offset = addrs[i + K] - addrs[i]; |