2019-06-01 16:08:55 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2016-06-23 00:25:07 +08:00
|
|
|
/*
|
|
|
|
* bpf_jit64.h: BPF JIT compiler for PPC64
|
|
|
|
*
|
|
|
|
* Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
|
|
|
|
* IBM Corporation
|
|
|
|
*/
|
|
|
|
#ifndef _BPF_JIT64_H
|
|
|
|
#define _BPF_JIT64_H
|
|
|
|
|
|
|
|
#include "bpf_jit.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Stack layout:
|
2016-09-24 04:35:00 +08:00
|
|
|
* Ensure the top half (upto local_tmp_var) stays consistent
|
|
|
|
* with our redzone usage.
|
2016-06-23 00:25:07 +08:00
|
|
|
*
|
|
|
|
* [ prev sp ] <-------------
|
2024-06-12 13:13:20 +08:00
|
|
|
* [ nv gpr save area ] 5*8 |
|
2016-09-24 04:35:00 +08:00
|
|
|
* [ tail_call_cnt ] 8 |
|
2024-06-12 13:13:20 +08:00
|
|
|
* [ local_tmp_var ] 16 |
|
2017-09-02 02:53:01 +08:00
|
|
|
* fp (r31) --> [ ebpf stack space ] upto 512 |
|
2016-06-23 00:25:07 +08:00
|
|
|
* [ frame header ] 32/112 |
|
|
|
|
* sp (r1) ---> [ stack pointer ] --------------
|
|
|
|
*/
|
|
|
|
|
2018-05-04 07:08:21 +08:00
|
|
|
/* for gpr non volatile registers BPG_REG_6 to 10 */
|
2024-06-12 13:13:20 +08:00
|
|
|
#define BPF_PPC_STACK_SAVE (5*8)
|
2016-09-24 04:35:00 +08:00
|
|
|
/* for bpf JIT code internal usage */
|
2024-06-12 13:13:20 +08:00
|
|
|
#define BPF_PPC_STACK_LOCALS 24
|
2017-09-02 02:53:01 +08:00
|
|
|
/* stack frame excluding BPF stack, ensure this is quadword aligned */
|
|
|
|
#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
|
2016-09-24 04:35:00 +08:00
|
|
|
BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
|
2016-06-23 00:25:07 +08:00
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
|
|
|
/* BPF register usage */
|
2018-05-04 07:08:21 +08:00
|
|
|
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
|
|
|
|
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
|
2016-06-23 00:25:07 +08:00
|
|
|
|
|
|
|
/* BPF to ppc register mappings */
|
|
|
|
static const int b2p[] = {
|
|
|
|
/* function return value */
|
|
|
|
[BPF_REG_0] = 8,
|
|
|
|
/* function arguments */
|
|
|
|
[BPF_REG_1] = 3,
|
|
|
|
[BPF_REG_2] = 4,
|
|
|
|
[BPF_REG_3] = 5,
|
|
|
|
[BPF_REG_4] = 6,
|
|
|
|
[BPF_REG_5] = 7,
|
|
|
|
/* non volatile registers */
|
|
|
|
[BPF_REG_6] = 27,
|
|
|
|
[BPF_REG_7] = 28,
|
|
|
|
[BPF_REG_8] = 29,
|
|
|
|
[BPF_REG_9] = 30,
|
|
|
|
/* frame pointer aka BPF_REG_10 */
|
|
|
|
[BPF_REG_FP] = 31,
|
|
|
|
/* eBPF jit internal registers */
|
powerpc/bpf: Add support for bpf constant blinding
In line with similar support for other architectures by Daniel Borkmann.
'MOD Default X' from test_bpf without constant blinding:
84 bytes emitted from JIT compiler (pass:3, flen:7)
d0000000058a4688 + <x>:
0: nop
4: nop
8: std r27,-40(r1)
c: std r28,-32(r1)
10: xor r8,r8,r8
14: xor r28,r28,r28
18: mr r27,r3
1c: li r8,66
20: cmpwi r28,0
24: bne 0x0000000000000030
28: li r8,0
2c: b 0x0000000000000044
30: divwu r9,r8,r28
34: mullw r9,r28,r9
38: subf r8,r9,r8
3c: rotlwi r8,r8,0
40: li r8,66
44: ld r27,-40(r1)
48: ld r28,-32(r1)
4c: mr r3,r8
50: blr
... and with constant blinding:
140 bytes emitted from JIT compiler (pass:3, flen:11)
d00000000bd6ab24 + <x>:
0: nop
4: nop
8: std r27,-40(r1)
c: std r28,-32(r1)
10: xor r8,r8,r8
14: xor r28,r28,r28
18: mr r27,r3
1c: lis r2,-22834
20: ori r2,r2,36083
24: rotlwi r2,r2,0
28: xori r2,r2,36017
2c: xoris r2,r2,42702
30: rotlwi r2,r2,0
34: mr r8,r2
38: rotlwi r8,r8,0
3c: cmpwi r28,0
40: bne 0x000000000000004c
44: li r8,0
48: b 0x000000000000007c
4c: divwu r9,r8,r28
50: mullw r9,r28,r9
54: subf r8,r9,r8
58: rotlwi r8,r8,0
5c: lis r2,-17137
60: ori r2,r2,39065
64: rotlwi r2,r2,0
68: xori r2,r2,39131
6c: xoris r2,r2,48399
70: rotlwi r2,r2,0
74: mr r8,r2
78: rotlwi r8,r8,0
7c: ld r27,-40(r1)
80: ld r28,-32(r1)
84: mr r3,r8
88: blr
Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-09-24 04:35:02 +08:00
|
|
|
[BPF_REG_AX] = 2,
|
2016-06-23 00:25:07 +08:00
|
|
|
[TMP_REG_1] = 9,
|
|
|
|
[TMP_REG_2] = 10
|
|
|
|
};
|
|
|
|
|
2018-05-04 07:08:21 +08:00
|
|
|
/* PPC NVR range -- update this if we ever use NVRs below r27 */
|
|
|
|
#define BPF_PPC_NVR_MIN 27
|
2016-06-23 00:25:07 +08:00
|
|
|
|
2019-03-15 22:51:19 +08:00
|
|
|
/*
|
|
|
|
* WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
|
|
|
|
* so ensure that it isn't in use already.
|
|
|
|
*/
|
|
|
|
#define PPC_BPF_LL(r, base, i) do { \
|
|
|
|
if ((i) % 4) { \
|
|
|
|
PPC_LI(b2p[TMP_REG_2], (i)); \
|
|
|
|
PPC_LDX(r, base, b2p[TMP_REG_2]); \
|
|
|
|
} else \
|
|
|
|
PPC_LD(r, base, i); \
|
|
|
|
} while(0)
|
|
|
|
#define PPC_BPF_STL(r, base, i) do { \
|
|
|
|
if ((i) % 4) { \
|
|
|
|
PPC_LI(b2p[TMP_REG_2], (i)); \
|
|
|
|
PPC_STDX(r, base, b2p[TMP_REG_2]); \
|
|
|
|
} else \
|
|
|
|
PPC_STD(r, base, i); \
|
|
|
|
} while(0)
|
|
|
|
#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
|
|
|
|
|
2016-06-23 00:25:07 +08:00
|
|
|
#define SEEN_FUNC 0x1000 /* might call external helpers */
|
|
|
|
#define SEEN_STACK 0x2000 /* uses BPF stack */
|
2018-05-04 07:08:21 +08:00
|
|
|
#define SEEN_TAILCALL 0x4000 /* uses tail calls */
|
2016-06-23 00:25:07 +08:00
|
|
|
|
|
|
|
struct codegen_context {
|
|
|
|
/*
|
|
|
|
* This is used to track register usage as well
|
|
|
|
* as calls to external helpers.
|
|
|
|
* - register usage is tracked with corresponding
|
2018-05-04 07:08:21 +08:00
|
|
|
* bits (r3-r10 and r27-r31)
|
2016-06-23 00:25:07 +08:00
|
|
|
* - rest of the bits can be used to track other
|
|
|
|
* things -- for now, we use bits 16 to 23
|
|
|
|
* encoded in SEEN_* macros above
|
|
|
|
*/
|
|
|
|
unsigned int seen;
|
|
|
|
unsigned int idx;
|
2017-09-02 02:53:01 +08:00
|
|
|
unsigned int stack_size;
|
2016-06-23 00:25:07 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
|
|
|
|
#endif
|