2011-07-20 23:51:00 +08:00
|
|
|
/* bpf_jit.S: Packet/header access helper functions
|
|
|
|
* for PPC64 BPF compiler.
|
|
|
|
*
|
|
|
|
* Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; version 2
|
|
|
|
* of the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <asm/ppc_asm.h>
|
|
|
|
#include "bpf_jit.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All of these routines are called directly from generated code,
|
|
|
|
* whose register usage is:
|
|
|
|
*
|
|
|
|
* r3 skb
|
|
|
|
* r4,r5 A,X
|
|
|
|
* r6 *** address parameter to helper ***
|
|
|
|
* r7-r10 scratch
|
|
|
|
* r14 skb->data
|
|
|
|
* r15 skb headlen
|
|
|
|
* r16-31 M[]
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* To consider: These helpers are so small it could be better to just
|
|
|
|
* generate them inline. Inline code can do the simple headlen check
|
|
|
|
* then branch directly to slow_path_XXX if required. (In fact, could
|
|
|
|
* load a spare GPR with the address of slow_path_generic and pass size
|
|
|
|
* as an argument, making the call site a mtlr, li and bllr.)
|
|
|
|
*/
|
|
|
|
.globl sk_load_word
|
|
|
|
sk_load_word:
|
|
|
|
cmpdi r_addr, 0
|
2012-04-30 03:02:19 +08:00
|
|
|
blt bpf_slow_path_word_neg
|
|
|
|
.globl sk_load_word_positive_offset
|
|
|
|
sk_load_word_positive_offset:
|
2011-07-20 23:51:00 +08:00
|
|
|
/* Are we accessing past headlen? */
|
|
|
|
subi r_scratch1, r_HL, 4
|
|
|
|
cmpd r_scratch1, r_addr
|
|
|
|
blt bpf_slow_path_word
|
|
|
|
/* Nope, just hitting the header. cr0 here is eq or gt! */
|
2013-09-24 20:13:35 +08:00
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
|
|
lwbrx r_A, r_D, r_addr
|
|
|
|
#else
|
2011-07-20 23:51:00 +08:00
|
|
|
lwzx r_A, r_D, r_addr
|
2013-09-24 20:13:35 +08:00
|
|
|
#endif
|
2011-07-20 23:51:00 +08:00
|
|
|
blr /* Return success, cr0 != LT */
|
|
|
|
|
|
|
|
.globl sk_load_half
|
|
|
|
sk_load_half:
|
|
|
|
cmpdi r_addr, 0
|
2012-04-30 03:02:19 +08:00
|
|
|
blt bpf_slow_path_half_neg
|
|
|
|
.globl sk_load_half_positive_offset
|
|
|
|
sk_load_half_positive_offset:
|
2011-07-20 23:51:00 +08:00
|
|
|
subi r_scratch1, r_HL, 2
|
|
|
|
cmpd r_scratch1, r_addr
|
|
|
|
blt bpf_slow_path_half
|
2013-09-24 20:13:35 +08:00
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
|
|
lhbrx r_A, r_D, r_addr
|
|
|
|
#else
|
2011-07-20 23:51:00 +08:00
|
|
|
lhzx r_A, r_D, r_addr
|
2013-09-24 20:13:35 +08:00
|
|
|
#endif
|
2011-07-20 23:51:00 +08:00
|
|
|
blr
|
|
|
|
|
|
|
|
.globl sk_load_byte
|
|
|
|
sk_load_byte:
|
|
|
|
cmpdi r_addr, 0
|
2012-04-30 03:02:19 +08:00
|
|
|
blt bpf_slow_path_byte_neg
|
|
|
|
.globl sk_load_byte_positive_offset
|
|
|
|
sk_load_byte_positive_offset:
|
2011-07-20 23:51:00 +08:00
|
|
|
cmpd r_HL, r_addr
|
|
|
|
ble bpf_slow_path_byte
|
|
|
|
lbzx r_A, r_D, r_addr
|
|
|
|
blr
|
|
|
|
|
|
|
|
/*
|
net: filter: get rid of BPF_S_* enum
This patch finally allows us to get rid of the BPF_S_* enum.
Currently, the code performs unnecessary encode and decode
workarounds in seccomp and filter migration itself when a filter
is being attached in order to overcome BPF_S_* encoding which
is not used anymore by the new interpreter resp. JIT compilers.
Keeping it around would mean that also in future we would need
to extend and maintain this enum and related encoders/decoders.
We can get rid of all that and save us these operations during
filter attaching. Naturally, also JIT compilers need to be updated
by this.
Before JIT conversion is being done, each compiler checks if A
is being loaded at startup to obtain information if it needs to
emit instructions to clear A first. Since BPF extensions are a
subset of BPF_LD | BPF_{W,H,B} | BPF_ABS variants, case statements
for extensions can be removed at that point. To ease and minimalize
code changes in the classic JITs, we have introduced bpf_anc_helper().
Tested with test_bpf on x86_64 (JIT, int), s390x (JIT, int),
arm (JIT, int), i368 (int), ppc64 (JIT, int); for sparc we
unfortunately didn't have access, but changes are analogous to
the rest.
Joint work with Alexei Starovoitov.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Mircea Gherzan <mgherzan@gmail.com>
Cc: Kees Cook <keescook@chromium.org>
Acked-by: Chema Gonzalez <chemag@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-05-29 16:22:50 +08:00
|
|
|
* BPF_LDX | BPF_B | BPF_MSH: ldxb 4*([offset]&0xf)
|
2012-04-30 03:02:19 +08:00
|
|
|
* r_addr is the offset value
|
2011-07-20 23:51:00 +08:00
|
|
|
*/
|
|
|
|
.globl sk_load_byte_msh
|
|
|
|
sk_load_byte_msh:
|
2012-04-30 03:02:19 +08:00
|
|
|
cmpdi r_addr, 0
|
|
|
|
blt bpf_slow_path_byte_msh_neg
|
|
|
|
.globl sk_load_byte_msh_positive_offset
|
|
|
|
sk_load_byte_msh_positive_offset:
|
2011-07-20 23:51:00 +08:00
|
|
|
cmpd r_HL, r_addr
|
|
|
|
ble bpf_slow_path_byte_msh
|
|
|
|
lbzx r_X, r_D, r_addr
|
|
|
|
rlwinm r_X, r_X, 2, 32-4-2, 31-2
|
|
|
|
blr
|
|
|
|
|
|
|
|
/* Call out to skb_copy_bits:
|
|
|
|
* We'll need to back up our volatile regs first; we have
|
|
|
|
* local variable space at r1+(BPF_PPC_STACK_BASIC).
|
|
|
|
* Allocate a new stack frame here to remain ABI-compliant in
|
|
|
|
* stashing LR.
|
|
|
|
*/
|
|
|
|
#define bpf_slow_path_common(SIZE) \
|
|
|
|
mflr r0; \
|
|
|
|
std r0, 16(r1); \
|
|
|
|
/* R3 goes in parameter space of caller's frame */ \
|
|
|
|
std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
|
|
|
|
std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
|
|
|
|
std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
|
|
|
|
addi r5, r1, BPF_PPC_STACK_BASIC+(2*8); \
|
|
|
|
stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
|
|
|
|
/* R3 = r_skb, as passed */ \
|
|
|
|
mr r4, r_addr; \
|
|
|
|
li r6, SIZE; \
|
|
|
|
bl skb_copy_bits; \
|
2012-06-22 01:50:27 +08:00
|
|
|
nop; \
|
2011-07-20 23:51:00 +08:00
|
|
|
/* R3 = 0 on success */ \
|
|
|
|
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
|
|
|
|
ld r0, 16(r1); \
|
|
|
|
ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
|
|
|
|
ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
|
|
|
|
mtlr r0; \
|
|
|
|
cmpdi r3, 0; \
|
|
|
|
blt bpf_error; /* cr0 = LT */ \
|
|
|
|
ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
|
|
|
|
/* Great success! */
|
|
|
|
|
|
|
|
bpf_slow_path_word:
|
|
|
|
bpf_slow_path_common(4)
|
|
|
|
/* Data value is on stack, and cr0 != LT */
|
|
|
|
lwz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
|
|
|
|
blr
|
|
|
|
|
|
|
|
bpf_slow_path_half:
|
|
|
|
bpf_slow_path_common(2)
|
|
|
|
lhz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
|
|
|
|
blr
|
|
|
|
|
|
|
|
bpf_slow_path_byte:
|
|
|
|
bpf_slow_path_common(1)
|
|
|
|
lbz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
|
|
|
|
blr
|
|
|
|
|
|
|
|
bpf_slow_path_byte_msh:
|
|
|
|
bpf_slow_path_common(1)
|
|
|
|
lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
|
|
|
|
rlwinm r_X, r_X, 2, 32-4-2, 31-2
|
|
|
|
blr
|
2012-04-30 03:02:19 +08:00
|
|
|
|
|
|
|
/* Call out to bpf_internal_load_pointer_neg_helper:
|
|
|
|
* We'll need to back up our volatile regs first; we have
|
|
|
|
* local variable space at r1+(BPF_PPC_STACK_BASIC).
|
|
|
|
* Allocate a new stack frame here to remain ABI-compliant in
|
|
|
|
* stashing LR.
|
|
|
|
*/
|
|
|
|
#define sk_negative_common(SIZE) \
|
|
|
|
mflr r0; \
|
|
|
|
std r0, 16(r1); \
|
|
|
|
/* R3 goes in parameter space of caller's frame */ \
|
|
|
|
std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
|
|
|
|
std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
|
|
|
|
std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
|
|
|
|
stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
|
|
|
|
/* R3 = r_skb, as passed */ \
|
|
|
|
mr r4, r_addr; \
|
|
|
|
li r5, SIZE; \
|
|
|
|
bl bpf_internal_load_pointer_neg_helper; \
|
2012-06-22 01:50:27 +08:00
|
|
|
nop; \
|
2012-04-30 03:02:19 +08:00
|
|
|
/* R3 != 0 on success */ \
|
|
|
|
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
|
|
|
|
ld r0, 16(r1); \
|
|
|
|
ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
|
|
|
|
ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
|
|
|
|
mtlr r0; \
|
|
|
|
cmpldi r3, 0; \
|
|
|
|
beq bpf_error_slow; /* cr0 = EQ */ \
|
|
|
|
mr r_addr, r3; \
|
|
|
|
ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
|
|
|
|
/* Great success! */
|
|
|
|
|
|
|
|
bpf_slow_path_word_neg:
|
|
|
|
lis r_scratch1,-32 /* SKF_LL_OFF */
|
|
|
|
cmpd r_addr, r_scratch1 /* addr < SKF_* */
|
|
|
|
blt bpf_error /* cr0 = LT */
|
|
|
|
.globl sk_load_word_negative_offset
|
|
|
|
sk_load_word_negative_offset:
|
|
|
|
sk_negative_common(4)
|
|
|
|
lwz r_A, 0(r_addr)
|
|
|
|
blr
|
|
|
|
|
|
|
|
bpf_slow_path_half_neg:
|
|
|
|
lis r_scratch1,-32 /* SKF_LL_OFF */
|
|
|
|
cmpd r_addr, r_scratch1 /* addr < SKF_* */
|
|
|
|
blt bpf_error /* cr0 = LT */
|
|
|
|
.globl sk_load_half_negative_offset
|
|
|
|
sk_load_half_negative_offset:
|
|
|
|
sk_negative_common(2)
|
|
|
|
lhz r_A, 0(r_addr)
|
|
|
|
blr
|
|
|
|
|
|
|
|
bpf_slow_path_byte_neg:
|
|
|
|
lis r_scratch1,-32 /* SKF_LL_OFF */
|
|
|
|
cmpd r_addr, r_scratch1 /* addr < SKF_* */
|
|
|
|
blt bpf_error /* cr0 = LT */
|
|
|
|
.globl sk_load_byte_negative_offset
|
|
|
|
sk_load_byte_negative_offset:
|
|
|
|
sk_negative_common(1)
|
|
|
|
lbz r_A, 0(r_addr)
|
|
|
|
blr
|
|
|
|
|
|
|
|
bpf_slow_path_byte_msh_neg:
|
|
|
|
lis r_scratch1,-32 /* SKF_LL_OFF */
|
|
|
|
cmpd r_addr, r_scratch1 /* addr < SKF_* */
|
|
|
|
blt bpf_error /* cr0 = LT */
|
|
|
|
.globl sk_load_byte_msh_negative_offset
|
|
|
|
sk_load_byte_msh_negative_offset:
|
|
|
|
sk_negative_common(1)
|
|
|
|
lbz r_X, 0(r_addr)
|
|
|
|
rlwinm r_X, r_X, 2, 32-4-2, 31-2
|
|
|
|
blr
|
|
|
|
|
|
|
|
bpf_error_slow:
|
|
|
|
/* fabricate a cr0 = lt */
|
|
|
|
li r_scratch1, -1
|
|
|
|
cmpdi r_scratch1, 0
|
|
|
|
bpf_error:
|
|
|
|
/* Entered with cr0 = lt */
|
|
|
|
li r3, 0
|
|
|
|
/* Generated code will 'blt epilogue', returning 0. */
|
|
|
|
blr
|