selftests: bpf: adjust several test_verifier helpers for insn insertion
- bpf_fill_ld_abs_vlan_push_pop: Prevent zext happens inside PUSH_CNT loop. This could happen because of BPF_LD_ABS (32-bit def) + BPF_JMP (64-bit use), or BPF_LD_ABS + EXIT (64-bit use of R0). So, change BPF_JMP to BPF_JMP32 and redefine R0 at exit path to cut off the data-flow from inside the loop. - bpf_fill_jump_around_ld_abs: Jump range is limited to 16 bit. every ld_abs is replaced by 6 insns, but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted to extend the error value of the inlined ld_abs sequence which then contains 7 insns. so, set the dividend to 7 so the testcase could work on all arches. - bpf_fill_scale1/bpf_fill_scale2: Both contains ~1M BPF_ALU32_IMM which will trigger ~1M insn patcher call because of hi32 randomization later when BPF_F_TEST_RND_HI32 is set for bpf selftests. Insn patcher is not efficient that 1M call to it will hang computer. So , change to BPF_ALU64_IMM to avoid hi32 randomization. Signed-off-by: Jiong Wang <jiong.wang@netronome.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
046561981b
commit
f3b55abb6d
|
@ -138,32 +138,36 @@ static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
|
|||
loop:
|
||||
for (j = 0; j < PUSH_CNT; j++) {
|
||||
insn[i++] = BPF_LD_ABS(BPF_B, 0);
|
||||
insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
|
||||
/* jump to error label */
|
||||
insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
|
||||
i++;
|
||||
insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
|
||||
insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
|
||||
insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
|
||||
insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_skb_vlan_push),
|
||||
insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
|
||||
insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
|
||||
i++;
|
||||
}
|
||||
|
||||
for (j = 0; j < PUSH_CNT; j++) {
|
||||
insn[i++] = BPF_LD_ABS(BPF_B, 0);
|
||||
insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
|
||||
insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
|
||||
i++;
|
||||
insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
|
||||
insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_skb_vlan_pop),
|
||||
insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
|
||||
insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
|
||||
i++;
|
||||
}
|
||||
if (++k < 5)
|
||||
goto loop;
|
||||
|
||||
for (; i < len - 1; i++)
|
||||
insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
|
||||
for (; i < len - 3; i++)
|
||||
insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);
|
||||
insn[len - 3] = BPF_JMP_A(1);
|
||||
/* error label */
|
||||
insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);
|
||||
insn[len - 1] = BPF_EXIT_INSN();
|
||||
self->prog_len = len;
|
||||
}
|
||||
|
@ -171,8 +175,13 @@ loop:
|
|||
static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
|
||||
{
|
||||
struct bpf_insn *insn = self->fill_insns;
|
||||
/* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns */
|
||||
unsigned int len = (1 << 15) / 6;
|
||||
/* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,
|
||||
* but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted
|
||||
* to extend the error value of the inlined ld_abs sequence which then
|
||||
* contains 7 insns. so, set the dividend to 7 so the testcase could
|
||||
* work on all arches.
|
||||
*/
|
||||
unsigned int len = (1 << 15) / 7;
|
||||
int i = 0;
|
||||
|
||||
insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
|
||||
|
@ -232,7 +241,7 @@ static void bpf_fill_scale1(struct bpf_test *self)
|
|||
* within 1m limit add MAX_TEST_INSNS - MAX_JMP_SEQ - 1 MOVs and 1 EXIT
|
||||
*/
|
||||
while (i < MAX_TEST_INSNS - MAX_JMP_SEQ - 1)
|
||||
insn[i++] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 42);
|
||||
insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
|
||||
insn[i] = BPF_EXIT_INSN();
|
||||
self->prog_len = i + 1;
|
||||
self->retval = 42;
|
||||
|
@ -264,7 +273,7 @@ static void bpf_fill_scale2(struct bpf_test *self)
|
|||
* within 1m limit add MAX_TEST_INSNS - MAX_JMP_SEQ - 1 MOVs and 1 EXIT
|
||||
*/
|
||||
while (i < MAX_TEST_INSNS - MAX_JMP_SEQ - 1)
|
||||
insn[i++] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 42);
|
||||
insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
|
||||
insn[i] = BPF_EXIT_INSN();
|
||||
self->prog_len = i + 1;
|
||||
self->retval = 42;
|
||||
|
|
Loading…
Reference in New Issue