From 163541e6ba3426b21884af5a2f498c9d2ce77f00 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 24 May 2019 23:25:22 +0100 Subject: [PATCH] arm: bpf: eliminate zero extension code-gen Cc: Shubham Bansal Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- arch/arm/net/bpf_jit_32.c | 42 +++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index c8bfbbfdfcc3..97a6b4b2a115 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -736,7 +736,8 @@ static inline void emit_a32_alu_r64(const bool is64, const s8 dst[], /* ALU operation */ emit_alu_r(rd[1], rs, true, false, op, ctx); - emit_a32_mov_i(rd[0], 0, ctx); + if (!ctx->prog->aux->verifier_zext) + emit_a32_mov_i(rd[0], 0, ctx); } arm_bpf_put_reg64(dst, rd, ctx); @@ -758,8 +759,9 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[], struct jit_ctx *ctx) { if (!is64) { emit_a32_mov_r(dst_lo, src_lo, ctx); - /* Zero out high 4 bytes */ - emit_a32_mov_i(dst_hi, 0, ctx); + if (!ctx->prog->aux->verifier_zext) + /* Zero out high 4 bytes */ + emit_a32_mov_i(dst_hi, 0, ctx); } else if (__LINUX_ARM_ARCH__ < 6 && ctx->cpu_architecture < CPU_ARCH_ARMv5TE) { /* complete 8 byte move */ @@ -1060,17 +1062,20 @@ static inline void emit_ldx_r(const s8 dst[], const s8 src, case BPF_B: /* Load a Byte */ emit(ARM_LDRB_I(rd[1], rm, off), ctx); - emit_a32_mov_i(rd[0], 0, ctx); + if (!ctx->prog->aux->verifier_zext) + emit_a32_mov_i(rd[0], 0, ctx); break; case BPF_H: /* Load a HalfWord */ emit(ARM_LDRH_I(rd[1], rm, off), ctx); - emit_a32_mov_i(rd[0], 0, ctx); + if (!ctx->prog->aux->verifier_zext) + emit_a32_mov_i(rd[0], 0, ctx); break; case BPF_W: /* Load a Word */ emit(ARM_LDR_I(rd[1], rm, off), ctx); - emit_a32_mov_i(rd[0], 0, ctx); + if (!ctx->prog->aux->verifier_zext) + emit_a32_mov_i(rd[0], 0, ctx); break; case BPF_DW: /* Load a Double Word */ @@ -1359,6 +1364,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU64 | BPF_MOV | BPF_X: switch (BPF_SRC(code)) { case BPF_X: + if (imm == 1) { + /* Special mov32 for zext */ + emit_a32_mov_i(dst_hi, 0, ctx); + break; + } emit_a32_mov_r64(is64, dst, src, ctx); break; case BPF_K: @@ -1438,7 +1448,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) } emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code)); arm_bpf_put_reg32(dst_lo, rd_lo, ctx); - emit_a32_mov_i(dst_hi, 0, ctx); + if (!ctx->prog->aux->verifier_zext) + emit_a32_mov_i(dst_hi, 0, ctx); break; case BPF_ALU64 | BPF_DIV | BPF_K: case BPF_ALU64 | BPF_DIV | BPF_X: @@ -1453,7 +1464,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) return -EINVAL; if (imm) emit_a32_alu_i(dst_lo, imm, ctx, BPF_OP(code)); - emit_a32_mov_i(dst_hi, 0, ctx); + if (!ctx->prog->aux->verifier_zext) + emit_a32_mov_i(dst_hi, 0, ctx); break; /* dst = dst << imm */ case BPF_ALU64 | BPF_LSH | BPF_K: @@ -1488,7 +1500,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) /* dst = ~dst */ case BPF_ALU | BPF_NEG: emit_a32_alu_i(dst_lo, 0, ctx, BPF_OP(code)); - emit_a32_mov_i(dst_hi, 0, ctx); + if (!ctx->prog->aux->verifier_zext) + emit_a32_mov_i(dst_hi, 0, ctx); break; /* dst = ~dst (64 bit) */ case BPF_ALU64 | BPF_NEG: @@ -1544,11 +1557,13 @@ emit_bswap_uxt: #else /* ARMv6+ */ emit(ARM_UXTH(rd[1], rd[1]), ctx); #endif - emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); + if (!ctx->prog->aux->verifier_zext) + emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); break; case 32: /* zero-extend 32 bits into 64 bits */ - emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); + if (!ctx->prog->aux->verifier_zext) + emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); break; case 64: /* nop */ @@ -1838,6 +1853,11 @@ void bpf_jit_compile(struct bpf_prog *prog) /* Nothing to do here. We support Internal BPF. */ } +bool bpf_jit_needs_zext(void) +{ + return true; +} + struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { struct bpf_prog *tmp, *orig_prog = prog;