diff options
Diffstat (limited to 'arch/arm64/net/bpf_jit_comp.c')
-rw-r--r-- | arch/arm64/net/bpf_jit_comp.c | 90 |
1 files changed, 46 insertions, 44 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index b405bbb54431..49ba37e4bfc0 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -31,8 +31,8 @@ int bpf_jit_enable __read_mostly; -#define TMP_REG_1 (MAX_BPF_REG + 0) -#define TMP_REG_2 (MAX_BPF_REG + 1) +#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) +#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* Map BPF registers to A64 registers */ static const int bpf2a64[] = { @@ -51,15 +51,16 @@ static const int bpf2a64[] = { [BPF_REG_9] = A64_R(22), /* read-only frame pointer to access stack */ [BPF_REG_FP] = A64_R(25), - /* temporary register for internal BPF JIT */ - [TMP_REG_1] = A64_R(23), - [TMP_REG_2] = A64_R(24), + /* temporary registers for internal BPF JIT */ + [TMP_REG_1] = A64_R(10), + [TMP_REG_2] = A64_R(11), + /* temporary register for blinding constants */ + [BPF_REG_AX] = A64_R(9), }; struct jit_ctx { const struct bpf_prog *prog; int idx; - int tmp_used; int epilogue_offset; int *offset; u32 *image; @@ -152,8 +153,6 @@ static void build_prologue(struct jit_ctx *ctx) const u8 r8 = bpf2a64[BPF_REG_8]; const u8 r9 = bpf2a64[BPF_REG_9]; const u8 fp = bpf2a64[BPF_REG_FP]; - const u8 tmp1 = bpf2a64[TMP_REG_1]; - const u8 tmp2 = bpf2a64[TMP_REG_2]; /* * BPF prog stack layout @@ -165,7 +164,7 @@ static void build_prologue(struct jit_ctx *ctx) * | ... | callee saved registers * +-----+ * | | x25/x26 - * BPF fp register => -80:+-----+ <= (BPF_FP) + * BPF fp register => -64:+-----+ <= (BPF_FP) * | | * | ... | BPF prog stack * | | @@ -187,8 +186,6 @@ static void build_prologue(struct jit_ctx *ctx) /* Save callee-saved register */ emit(A64_PUSH(r6, r7, A64_SP), ctx); emit(A64_PUSH(r8, r9, A64_SP), ctx); - if (ctx->tmp_used) - emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); /* Save fp (x25) and x26. SP requires 16 bytes alignment */ emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx); @@ -208,8 +205,6 @@ static void build_epilogue(struct jit_ctx *ctx) const u8 r8 = bpf2a64[BPF_REG_8]; const u8 r9 = bpf2a64[BPF_REG_9]; const u8 fp = bpf2a64[BPF_REG_FP]; - const u8 tmp1 = bpf2a64[TMP_REG_1]; - const u8 tmp2 = bpf2a64[TMP_REG_2]; /* We're done with BPF stack */ emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx); @@ -218,8 +213,6 @@ static void build_epilogue(struct jit_ctx *ctx) emit(A64_POP(fp, A64_R(26), A64_SP), ctx); /* Restore callee-saved register */ - if (ctx->tmp_used) - emit(A64_POP(tmp1, tmp2, A64_SP), ctx); emit(A64_POP(r8, r9, A64_SP), ctx); emit(A64_POP(r6, r7, A64_SP), ctx); @@ -315,7 +308,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) emit(A64_UDIV(is64, dst, dst, src), ctx); break; case BPF_MOD: - ctx->tmp_used = 1; emit(A64_UDIV(is64, tmp, dst, src), ctx); emit(A64_MUL(is64, tmp, tmp, src), ctx); emit(A64_SUB(is64, dst, dst, tmp), ctx); @@ -388,49 +380,41 @@ emit_bswap_uxt: /* dst = dst OP imm */ case BPF_ALU | BPF_ADD | BPF_K: case BPF_ALU64 | BPF_ADD | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(is64, tmp, imm, ctx); emit(A64_ADD(is64, dst, dst, tmp), ctx); break; case BPF_ALU | BPF_SUB | BPF_K: case BPF_ALU64 | BPF_SUB | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(is64, tmp, imm, ctx); emit(A64_SUB(is64, dst, dst, tmp), ctx); break; case BPF_ALU | BPF_AND | BPF_K: case BPF_ALU64 | BPF_AND | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(is64, tmp, imm, ctx); emit(A64_AND(is64, dst, dst, tmp), ctx); break; case BPF_ALU | BPF_OR | BPF_K: case BPF_ALU64 | BPF_OR | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(is64, tmp, imm, ctx); emit(A64_ORR(is64, dst, dst, tmp), ctx); break; case BPF_ALU | BPF_XOR | BPF_K: case BPF_ALU64 | BPF_XOR | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(is64, tmp, imm, ctx); emit(A64_EOR(is64, dst, dst, tmp), ctx); break; case BPF_ALU | BPF_MUL | BPF_K: case BPF_ALU64 | BPF_MUL | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(is64, tmp, imm, ctx); emit(A64_MUL(is64, dst, dst, tmp), ctx); break; case BPF_ALU | BPF_DIV | BPF_K: case BPF_ALU64 | BPF_DIV | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(is64, tmp, imm, ctx); emit(A64_UDIV(is64, dst, dst, tmp), ctx); break; case BPF_ALU | BPF_MOD | BPF_K: case BPF_ALU64 | BPF_MOD | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(is64, tmp2, imm, ctx); emit(A64_UDIV(is64, tmp, dst, tmp2), ctx); emit(A64_MUL(is64, tmp, tmp, tmp2), ctx); @@ -501,12 +485,10 @@ emit_cond_jmp: case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(1, tmp, imm, ctx); emit(A64_CMP(1, dst, tmp), ctx); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_K: - ctx->tmp_used = 1; emit_a64_mov_i(1, tmp, imm, ctx); emit(A64_TST(1, dst, tmp), ctx); goto emit_cond_jmp; @@ -516,7 +498,6 @@ emit_cond_jmp: const u8 r0 = bpf2a64[BPF_REG_0]; const u64 func = (u64)__bpf_call_base + imm; - ctx->tmp_used = 1; emit_a64_mov_i64(tmp, func, ctx); emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); emit(A64_MOV(1, A64_FP, A64_SP), ctx); @@ -562,7 +543,6 @@ emit_cond_jmp: case BPF_LDX | BPF_MEM | BPF_H: case BPF_LDX | BPF_MEM | BPF_B: case BPF_LDX | BPF_MEM | BPF_DW: - ctx->tmp_used = 1; emit_a64_mov_i(1, tmp, off, ctx); switch (BPF_SIZE(code)) { case BPF_W: @@ -586,7 +566,6 @@ emit_cond_jmp: case BPF_ST | BPF_MEM | BPF_B: case BPF_ST | BPF_MEM | BPF_DW: /* Load imm to a register then store it */ - ctx->tmp_used = 1; emit_a64_mov_i(1, tmp2, off, ctx); emit_a64_mov_i(1, tmp, imm, ctx); switch (BPF_SIZE(code)) { @@ -610,7 +589,6 @@ emit_cond_jmp: case BPF_STX | BPF_MEM | BPF_H: case BPF_STX | BPF_MEM | BPF_B: case BPF_STX | BPF_MEM | BPF_DW: - ctx->tmp_used = 1; emit_a64_mov_i(1, tmp, off, ctx); switch (BPF_SIZE(code)) { case BPF_W: @@ -762,31 +740,45 @@ void bpf_jit_compile(struct bpf_prog *prog) /* Nothing to do here. We support Internal BPF. */ } -void bpf_int_jit_compile(struct bpf_prog *prog) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { + struct bpf_prog *tmp, *orig_prog = prog; struct bpf_binary_header *header; + bool tmp_blinded = false; struct jit_ctx ctx; int image_size; u8 *image_ptr; if (!bpf_jit_enable) - return; + return orig_prog; - if (!prog || !prog->len) - return; + tmp = bpf_jit_blind_constants(prog); + /* If blinding was requested and we failed during blinding, + * we must fall back to the interpreter. + */ + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } memset(&ctx, 0, sizeof(ctx)); ctx.prog = prog; ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); - if (ctx.offset == NULL) - return; + if (ctx.offset == NULL) { + prog = orig_prog; + goto out; + } /* 1. Initial fake pass to compute ctx->idx. */ - /* Fake pass to fill in ctx->offset and ctx->tmp_used. */ - if (build_body(&ctx)) - goto out; + /* Fake pass to fill in ctx->offset. */ + if (build_body(&ctx)) { + prog = orig_prog; + goto out_off; + } build_prologue(&ctx); @@ -797,8 +789,10 @@ void bpf_int_jit_compile(struct bpf_prog *prog) image_size = sizeof(u32) * ctx.idx; header = bpf_jit_binary_alloc(image_size, &image_ptr, sizeof(u32), jit_fill_hole); - if (header == NULL) - goto out; + if (header == NULL) { + prog = orig_prog; + goto out_off; + } /* 2. Now, the actual pass. */ @@ -809,7 +803,8 @@ void bpf_int_jit_compile(struct bpf_prog *prog) if (build_body(&ctx)) { bpf_jit_binary_free(header); - goto out; + prog = orig_prog; + goto out_off; } build_epilogue(&ctx); @@ -817,7 +812,8 @@ void bpf_int_jit_compile(struct bpf_prog *prog) /* 3. Extra pass to validate JITed code. */ if (validate_code(&ctx)) { bpf_jit_binary_free(header); - goto out; + prog = orig_prog; + goto out_off; } /* And we're done. */ @@ -829,8 +825,14 @@ void bpf_int_jit_compile(struct bpf_prog *prog) set_memory_ro((unsigned long)header, header->pages); prog->bpf_func = (void *)ctx.image; prog->jited = 1; -out: + +out_off: kfree(ctx.offset); +out: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); + return prog; } void bpf_jit_free(struct bpf_prog *prog) |