diff options
author | Matt Evans <matt@ozlabs.org> | 2011-07-20 15:51:00 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-07-21 12:38:32 -0700 |
commit | 0ca87f05ba8bdc6791c14878464efc901ad71e99 (patch) | |
tree | c37e25b30c22adae633dcacb2236e83086887418 /arch/powerpc/include | |
parent | 3aeb7d2243e55ddcad3c0b402e7b09619a67f5da (diff) |
net: filter: BPF 'JIT' compiler for PPC64
An implementation of a code generator for BPF programs to speed up packet
filtering on PPC64, inspired by Eric Dumazet's x86-64 version.
Filter code is generated as an ABI-compliant function in module_alloc()'d mem
with stackframe & prologue/epilogue generated if required (simple filters don't
need anything more than an li/blr). The filter's local variables, M[], live in
registers. Supports all BPF opcodes, although "complicated" loads from negative
packet offsets (e.g. SKF_LL_OFF) are not yet supported.
There are a couple of further optimisations left for future work; many-pass
assembly with branch-reach reduction and a register allocator to push M[]
variables into volatile registers would improve the code quality further.
This currently supports big-endian 64-bit PowerPC only (but is fairly simple
to port to PPC32 or LE!).
Enabled in the same way as x86-64:
echo 1 > /proc/sys/net/core/bpf_jit_enable
Or, enabled with extra debug output:
echo 2 > /proc/sys/net/core/bpf_jit_enable
Signed-off-by: Matt Evans <matt@ozlabs.org>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r-- | arch/powerpc/include/asm/ppc-opcode.h | 40 |
1 files changed, 40 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index e472659d906c..e980faae4225 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -71,6 +71,42 @@ #define PPC_INST_ERATSX 0x7c000126 #define PPC_INST_ERATSX_DOT 0x7c000127 +/* Misc instructions for BPF compiler */ +#define PPC_INST_LD 0xe8000000 +#define PPC_INST_LHZ 0xa0000000 +#define PPC_INST_LWZ 0x80000000 +#define PPC_INST_STD 0xf8000000 +#define PPC_INST_STDU 0xf8000001 +#define PPC_INST_MFLR 0x7c0802a6 +#define PPC_INST_MTLR 0x7c0803a6 +#define PPC_INST_CMPWI 0x2c000000 +#define PPC_INST_CMPDI 0x2c200000 +#define PPC_INST_CMPLW 0x7c000040 +#define PPC_INST_CMPLWI 0x28000000 +#define PPC_INST_ADDI 0x38000000 +#define PPC_INST_ADDIS 0x3c000000 +#define PPC_INST_ADD 0x7c000214 +#define PPC_INST_SUB 0x7c000050 +#define PPC_INST_BLR 0x4e800020 +#define PPC_INST_BLRL 0x4e800021 +#define PPC_INST_MULLW 0x7c0001d6 +#define PPC_INST_MULHWU 0x7c000016 +#define PPC_INST_MULLI 0x1c000000 +#define PPC_INST_DIVWU 0x7c0003d6 +#define PPC_INST_RLWINM 0x54000000 +#define PPC_INST_RLDICR 0x78000004 +#define PPC_INST_SLW 0x7c000030 +#define PPC_INST_SRW 0x7c000430 +#define PPC_INST_AND 0x7c000038 +#define PPC_INST_ANDDOT 0x7c000039 +#define PPC_INST_OR 0x7c000378 +#define PPC_INST_ANDI 0x70000000 +#define PPC_INST_ORI 0x60000000 +#define PPC_INST_ORIS 0x64000000 +#define PPC_INST_NEG 0x7c0000d0 +#define PPC_INST_BRANCH 0x48000000 +#define PPC_INST_BRANCH_COND 0x40800000 + /* macros to insert fields into opcodes */ #define __PPC_RA(a) (((a) & 0x1f) << 16) #define __PPC_RB(b) (((b) & 0x1f) << 11) @@ -83,6 +119,10 @@ #define __PPC_T_TLB(t) (((t) & 0x3) << 21) #define __PPC_WC(w) (((w) & 0x3) << 21) #define __PPC_WS(w) (((w) & 0x1f) << 11) +#define __PPC_SH(s) __PPC_WS(s) +#define __PPC_MB(s) (((s) & 0x1f) << 6) +#define __PPC_ME(s) (((s) & 0x1f) << 1) +#define __PPC_BI(s) (((s) & 0x1f) << 16) /* * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a |