Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2020-09-23 The following pull-request contains BPF updates for your *net-next* tree. We've added 95 non-merge commits during the last 22 day(s) which contain a total of 124 files changed, 4211 insertions(+), 2040 deletions(-). The main changes are: 1) Full multi function support in libbpf, from Andrii. 2) Refactoring of function argument checks, from Lorenz. 3) Make bpf_tail_call compatible with functions (subprograms), from Maciej. 4) Program metadata support, from YiFei. 5) bpf iterator optimizations, from Yonghong. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
6d772f328d
4
Makefile
4
Makefile
|
@ -1080,13 +1080,15 @@ ifdef CONFIG_STACK_VALIDATION
|
|||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_BPF
|
||||
ifdef CONFIG_DEBUG_INFO_BTF
|
||||
ifeq ($(has_libelf),1)
|
||||
resolve_btfids_target := tools/bpf/resolve_btfids FORCE
|
||||
else
|
||||
ERROR_RESOLVE_BTFIDS := 1
|
||||
endif
|
||||
endif
|
||||
endif # CONFIG_DEBUG_INFO_BTF
|
||||
endif # CONFIG_BPF
|
||||
|
||||
PHONY += prepare0
|
||||
|
||||
|
|
|
@ -50,7 +50,6 @@ struct bpf_jit {
|
|||
int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
|
||||
int tail_call_start; /* Tail call start offset */
|
||||
int excnt; /* Number of exception table entries */
|
||||
int labels[1]; /* Labels for local jumps */
|
||||
};
|
||||
|
||||
#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
|
||||
|
@ -229,18 +228,18 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
|
|||
REG_SET_SEEN(b3); \
|
||||
})
|
||||
|
||||
#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask) \
|
||||
#define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target) \
|
||||
({ \
|
||||
int rel = (jit->labels[label] - jit->prg) >> 1; \
|
||||
unsigned int rel = (int)((target) - jit->prg) / 2; \
|
||||
_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \
|
||||
(op2) | (mask) << 12); \
|
||||
REG_SET_SEEN(b1); \
|
||||
REG_SET_SEEN(b2); \
|
||||
})
|
||||
|
||||
#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask) \
|
||||
#define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target) \
|
||||
({ \
|
||||
int rel = (jit->labels[label] - jit->prg) >> 1; \
|
||||
unsigned int rel = (int)((target) - jit->prg) / 2; \
|
||||
_EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \
|
||||
(rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \
|
||||
REG_SET_SEEN(b1); \
|
||||
|
@ -1282,7 +1281,9 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
|||
EMIT4(0xb9040000, BPF_REG_0, REG_2);
|
||||
break;
|
||||
}
|
||||
case BPF_JMP | BPF_TAIL_CALL:
|
||||
case BPF_JMP | BPF_TAIL_CALL: {
|
||||
int patch_1_clrj, patch_2_clij, patch_3_brc;
|
||||
|
||||
/*
|
||||
* Implicit input:
|
||||
* B1: pointer to ctx
|
||||
|
@ -1300,16 +1301,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
|||
EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
|
||||
offsetof(struct bpf_array, map.max_entries));
|
||||
/* if ((u32)%b3 >= (u32)%w1) goto out; */
|
||||
if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
|
||||
/* clrj %b3,%w1,0xa,label0 */
|
||||
EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
|
||||
REG_W1, 0, 0xa);
|
||||
} else {
|
||||
/* clr %b3,%w1 */
|
||||
EMIT2(0x1500, BPF_REG_3, REG_W1);
|
||||
/* brcl 0xa,label0 */
|
||||
EMIT6_PCREL_RILC(0xc0040000, 0xa, jit->labels[0]);
|
||||
}
|
||||
/* clrj %b3,%w1,0xa,out */
|
||||
patch_1_clrj = jit->prg;
|
||||
EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa,
|
||||
jit->prg);
|
||||
|
||||
/*
|
||||
* if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
|
||||
|
@ -1324,16 +1319,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
|||
EMIT4_IMM(0xa7080000, REG_W0, 1);
|
||||
/* laal %w1,%w0,off(%r15) */
|
||||
EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
|
||||
if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
|
||||
/* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
|
||||
EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
|
||||
MAX_TAIL_CALL_CNT, 0, 0x2);
|
||||
} else {
|
||||
/* clfi %w1,MAX_TAIL_CALL_CNT */
|
||||
EMIT6_IMM(0xc20f0000, REG_W1, MAX_TAIL_CALL_CNT);
|
||||
/* brcl 0x2,label0 */
|
||||
EMIT6_PCREL_RILC(0xc0040000, 0x2, jit->labels[0]);
|
||||
}
|
||||
/* clij %w1,MAX_TAIL_CALL_CNT,0x2,out */
|
||||
patch_2_clij = jit->prg;
|
||||
EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT,
|
||||
2, jit->prg);
|
||||
|
||||
/*
|
||||
* prog = array->ptrs[index];
|
||||
|
@ -1348,13 +1337,9 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
|||
/* ltg %r1,prog(%b2,%r1) */
|
||||
EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
|
||||
REG_1, offsetof(struct bpf_array, ptrs));
|
||||
if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
|
||||
/* brc 0x8,label0 */
|
||||
EMIT4_PCREL_RIC(0xa7040000, 0x8, jit->labels[0]);
|
||||
} else {
|
||||
/* brcl 0x8,label0 */
|
||||
EMIT6_PCREL_RILC(0xc0040000, 0x8, jit->labels[0]);
|
||||
}
|
||||
/* brc 0x8,out */
|
||||
patch_3_brc = jit->prg;
|
||||
EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg);
|
||||
|
||||
/*
|
||||
* Restore registers before calling function
|
||||
|
@ -1371,8 +1356,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
|||
/* bc 0xf,tail_call_start(%r1) */
|
||||
_EMIT4(0x47f01000 + jit->tail_call_start);
|
||||
/* out: */
|
||||
jit->labels[0] = jit->prg;
|
||||
if (jit->prg_buf) {
|
||||
*(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
|
||||
(jit->prg - patch_1_clrj) >> 1;
|
||||
*(u16 *)(jit->prg_buf + patch_2_clij + 2) =
|
||||
(jit->prg - patch_2_clij) >> 1;
|
||||
*(u16 *)(jit->prg_buf + patch_3_brc + 2) =
|
||||
(jit->prg - patch_3_brc) >> 1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BPF_JMP | BPF_EXIT: /* return b0 */
|
||||
last = (i == fp->len - 1) ? 1 : 0;
|
||||
if (last)
|
||||
|
|
|
@ -314,19 +314,19 @@ static inline void mds_idle_clear_cpu_buffers(void)
|
|||
* lfence
|
||||
* jmp spec_trap
|
||||
* do_rop:
|
||||
* mov %rax,(%rsp) for x86_64
|
||||
* mov %rcx,(%rsp) for x86_64
|
||||
* mov %edx,(%esp) for x86_32
|
||||
* retq
|
||||
*
|
||||
* Without retpolines configured:
|
||||
*
|
||||
* jmp *%rax for x86_64
|
||||
* jmp *%rcx for x86_64
|
||||
* jmp *%edx for x86_32
|
||||
*/
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
# ifdef CONFIG_X86_64
|
||||
# define RETPOLINE_RAX_BPF_JIT_SIZE 17
|
||||
# define RETPOLINE_RAX_BPF_JIT() \
|
||||
# define RETPOLINE_RCX_BPF_JIT_SIZE 17
|
||||
# define RETPOLINE_RCX_BPF_JIT() \
|
||||
do { \
|
||||
EMIT1_off32(0xE8, 7); /* callq do_rop */ \
|
||||
/* spec_trap: */ \
|
||||
|
@ -334,7 +334,7 @@ do { \
|
|||
EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
|
||||
EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
|
||||
/* do_rop: */ \
|
||||
EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
|
||||
EMIT4(0x48, 0x89, 0x0C, 0x24); /* mov %rcx,(%rsp) */ \
|
||||
EMIT1(0xC3); /* retq */ \
|
||||
} while (0)
|
||||
# else /* !CONFIG_X86_64 */
|
||||
|
@ -352,9 +352,9 @@ do { \
|
|||
# endif
|
||||
#else /* !CONFIG_RETPOLINE */
|
||||
# ifdef CONFIG_X86_64
|
||||
# define RETPOLINE_RAX_BPF_JIT_SIZE 2
|
||||
# define RETPOLINE_RAX_BPF_JIT() \
|
||||
EMIT2(0xFF, 0xE0); /* jmp *%rax */
|
||||
# define RETPOLINE_RCX_BPF_JIT_SIZE 2
|
||||
# define RETPOLINE_RCX_BPF_JIT() \
|
||||
EMIT2(0xFF, 0xE1); /* jmp *%rcx */
|
||||
# else /* !CONFIG_X86_64 */
|
||||
# define RETPOLINE_EDX_BPF_JIT() \
|
||||
EMIT2(0xFF, 0xE2) /* jmp *%edx */
|
||||
|
|
|
@ -221,14 +221,48 @@ struct jit_context {
|
|||
|
||||
/* Number of bytes emit_patch() needs to generate instructions */
|
||||
#define X86_PATCH_SIZE 5
|
||||
/* Number of bytes that will be skipped on tailcall */
|
||||
#define X86_TAIL_CALL_OFFSET 11
|
||||
|
||||
#define PROLOGUE_SIZE 25
|
||||
static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
int cnt = 0;
|
||||
|
||||
if (callee_regs_used[0])
|
||||
EMIT1(0x53); /* push rbx */
|
||||
if (callee_regs_used[1])
|
||||
EMIT2(0x41, 0x55); /* push r13 */
|
||||
if (callee_regs_used[2])
|
||||
EMIT2(0x41, 0x56); /* push r14 */
|
||||
if (callee_regs_used[3])
|
||||
EMIT2(0x41, 0x57); /* push r15 */
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
int cnt = 0;
|
||||
|
||||
if (callee_regs_used[3])
|
||||
EMIT2(0x41, 0x5F); /* pop r15 */
|
||||
if (callee_regs_used[2])
|
||||
EMIT2(0x41, 0x5E); /* pop r14 */
|
||||
if (callee_regs_used[1])
|
||||
EMIT2(0x41, 0x5D); /* pop r13 */
|
||||
if (callee_regs_used[0])
|
||||
EMIT1(0x5B); /* pop rbx */
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
/*
|
||||
* Emit x86-64 prologue code for BPF program and check its size.
|
||||
* bpf_tail_call helper will skip it while jumping into another program
|
||||
* Emit x86-64 prologue code for BPF program.
|
||||
* bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
|
||||
* while jumping to another program
|
||||
*/
|
||||
static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
|
||||
static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
|
||||
bool tail_call_reachable, bool is_subprog)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
int cnt = X86_PATCH_SIZE;
|
||||
|
@ -238,19 +272,18 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
|
|||
*/
|
||||
memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
|
||||
prog += cnt;
|
||||
if (!ebpf_from_cbpf) {
|
||||
if (tail_call_reachable && !is_subprog)
|
||||
EMIT2(0x31, 0xC0); /* xor eax, eax */
|
||||
else
|
||||
EMIT2(0x66, 0x90); /* nop2 */
|
||||
}
|
||||
EMIT1(0x55); /* push rbp */
|
||||
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
|
||||
/* sub rsp, rounded_stack_depth */
|
||||
EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
|
||||
EMIT1(0x53); /* push rbx */
|
||||
EMIT2(0x41, 0x55); /* push r13 */
|
||||
EMIT2(0x41, 0x56); /* push r14 */
|
||||
EMIT2(0x41, 0x57); /* push r15 */
|
||||
if (!ebpf_from_cbpf) {
|
||||
/* zero init tail_call_cnt */
|
||||
EMIT2(0x6a, 0x00);
|
||||
BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
|
||||
}
|
||||
if (tail_call_reachable)
|
||||
EMIT1(0x50); /* push rax */
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
|
@ -314,13 +347,14 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
|||
mutex_lock(&text_mutex);
|
||||
if (memcmp(ip, old_insn, X86_PATCH_SIZE))
|
||||
goto out;
|
||||
ret = 1;
|
||||
if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
|
||||
if (text_live)
|
||||
text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
|
||||
else
|
||||
memcpy(ip, new_insn, X86_PATCH_SIZE);
|
||||
ret = 0;
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
mutex_unlock(&text_mutex);
|
||||
return ret;
|
||||
|
@ -337,6 +371,22 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
|||
return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
|
||||
}
|
||||
|
||||
static int get_pop_bytes(bool *callee_regs_used)
|
||||
{
|
||||
int bytes = 0;
|
||||
|
||||
if (callee_regs_used[3])
|
||||
bytes += 2;
|
||||
if (callee_regs_used[2])
|
||||
bytes += 2;
|
||||
if (callee_regs_used[1])
|
||||
bytes += 2;
|
||||
if (callee_regs_used[0])
|
||||
bytes += 1;
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generate the following code:
|
||||
*
|
||||
|
@ -351,12 +401,26 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
|||
* goto *(prog->bpf_func + prologue_size);
|
||||
* out:
|
||||
*/
|
||||
static void emit_bpf_tail_call_indirect(u8 **pprog)
|
||||
static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
|
||||
u32 stack_depth)
|
||||
{
|
||||
int tcc_off = -4 - round_up(stack_depth, 8);
|
||||
u8 *prog = *pprog;
|
||||
int label1, label2, label3;
|
||||
int pop_bytes = 0;
|
||||
int off1 = 49;
|
||||
int off2 = 38;
|
||||
int off3 = 16;
|
||||
int cnt = 0;
|
||||
|
||||
/* count the additional bytes used for popping callee regs from stack
|
||||
* that need to be taken into account for each of the offsets that
|
||||
* are used for bailing out of the tail call
|
||||
*/
|
||||
pop_bytes = get_pop_bytes(callee_regs_used);
|
||||
off1 += pop_bytes;
|
||||
off2 += pop_bytes;
|
||||
off3 += pop_bytes;
|
||||
|
||||
/*
|
||||
* rdi - pointer to ctx
|
||||
* rsi - pointer to bpf_array
|
||||
|
@ -370,72 +434,106 @@ static void emit_bpf_tail_call_indirect(u8 **pprog)
|
|||
EMIT2(0x89, 0xD2); /* mov edx, edx */
|
||||
EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
|
||||
offsetof(struct bpf_array, map.max_entries));
|
||||
#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* Number of bytes to jump */
|
||||
#define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */
|
||||
EMIT2(X86_JBE, OFFSET1); /* jbe out */
|
||||
label1 = cnt;
|
||||
|
||||
/*
|
||||
* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
|
||||
* goto out;
|
||||
*/
|
||||
EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
|
||||
EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
|
||||
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
|
||||
#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
|
||||
#define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE)
|
||||
EMIT2(X86_JA, OFFSET2); /* ja out */
|
||||
label2 = cnt;
|
||||
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
|
||||
EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
|
||||
EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
|
||||
|
||||
/* prog = array->ptrs[index]; */
|
||||
EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
|
||||
EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
|
||||
offsetof(struct bpf_array, ptrs));
|
||||
|
||||
/*
|
||||
* if (prog == NULL)
|
||||
* goto out;
|
||||
*/
|
||||
EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
|
||||
#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
|
||||
EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
|
||||
#define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE)
|
||||
EMIT2(X86_JE, OFFSET3); /* je out */
|
||||
label3 = cnt;
|
||||
|
||||
/* goto *(prog->bpf_func + prologue_size); */
|
||||
EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */
|
||||
*pprog = prog;
|
||||
pop_callee_regs(pprog, callee_regs_used);
|
||||
prog = *pprog;
|
||||
|
||||
EMIT1(0x58); /* pop rax */
|
||||
EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
|
||||
round_up(stack_depth, 8));
|
||||
|
||||
/* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
|
||||
EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
|
||||
offsetof(struct bpf_prog, bpf_func));
|
||||
EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */
|
||||
|
||||
EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
|
||||
X86_TAIL_CALL_OFFSET);
|
||||
/*
|
||||
* Wow we're ready to jump into next BPF program
|
||||
* Now we're ready to jump into next BPF program
|
||||
* rdi == ctx (1st arg)
|
||||
* rax == prog->bpf_func + prologue_size
|
||||
* rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
|
||||
*/
|
||||
RETPOLINE_RAX_BPF_JIT();
|
||||
RETPOLINE_RCX_BPF_JIT();
|
||||
|
||||
/* out: */
|
||||
BUILD_BUG_ON(cnt - label1 != OFFSET1);
|
||||
BUILD_BUG_ON(cnt - label2 != OFFSET2);
|
||||
BUILD_BUG_ON(cnt - label3 != OFFSET3);
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
|
||||
u8 **pprog, int addr, u8 *image)
|
||||
u8 **pprog, int addr, u8 *image,
|
||||
bool *callee_regs_used, u32 stack_depth)
|
||||
{
|
||||
int tcc_off = -4 - round_up(stack_depth, 8);
|
||||
u8 *prog = *pprog;
|
||||
int pop_bytes = 0;
|
||||
int off1 = 27;
|
||||
int poke_off;
|
||||
int cnt = 0;
|
||||
|
||||
/* count the additional bytes used for popping callee regs to stack
|
||||
* that need to be taken into account for jump offset that is used for
|
||||
* bailing out from of the tail call when limit is reached
|
||||
*/
|
||||
pop_bytes = get_pop_bytes(callee_regs_used);
|
||||
off1 += pop_bytes;
|
||||
|
||||
/*
|
||||
* total bytes for:
|
||||
* - nop5/ jmpq $off
|
||||
* - pop callee regs
|
||||
* - sub rsp, $val
|
||||
* - pop rax
|
||||
*/
|
||||
poke_off = X86_PATCH_SIZE + pop_bytes + 7 + 1;
|
||||
|
||||
/*
|
||||
* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
|
||||
* goto out;
|
||||
*/
|
||||
EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
|
||||
EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
|
||||
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
|
||||
EMIT2(X86_JA, 14); /* ja out */
|
||||
EMIT2(X86_JA, off1); /* ja out */
|
||||
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
|
||||
EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
|
||||
EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
|
||||
|
||||
poke->ip = image + (addr - X86_PATCH_SIZE);
|
||||
poke->adj_off = PROLOGUE_SIZE;
|
||||
poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE);
|
||||
poke->adj_off = X86_TAIL_CALL_OFFSET;
|
||||
poke->tailcall_target = image + (addr - X86_PATCH_SIZE);
|
||||
poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
|
||||
|
||||
emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
|
||||
poke->tailcall_bypass);
|
||||
|
||||
*pprog = prog;
|
||||
pop_callee_regs(pprog, callee_regs_used);
|
||||
prog = *pprog;
|
||||
EMIT1(0x58); /* pop rax */
|
||||
EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
|
||||
|
||||
memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
|
||||
prog += X86_PATCH_SIZE;
|
||||
|
@ -453,7 +551,7 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
|
|||
|
||||
for (i = 0; i < prog->aux->size_poke_tab; i++) {
|
||||
poke = &prog->aux->poke_tab[i];
|
||||
WARN_ON_ONCE(READ_ONCE(poke->ip_stable));
|
||||
WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
|
||||
|
||||
if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
|
||||
continue;
|
||||
|
@ -464,18 +562,25 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
|
|||
if (target) {
|
||||
/* Plain memcpy is used when image is not live yet
|
||||
* and still not locked as read-only. Once poke
|
||||
* location is active (poke->ip_stable), any parallel
|
||||
* bpf_arch_text_poke() might occur still on the
|
||||
* read-write image until we finally locked it as
|
||||
* read-only. Both modifications on the given image
|
||||
* are under text_mutex to avoid interference.
|
||||
* location is active (poke->tailcall_target_stable),
|
||||
* any parallel bpf_arch_text_poke() might occur
|
||||
* still on the read-write image until we finally
|
||||
* locked it as read-only. Both modifications on
|
||||
* the given image are under text_mutex to avoid
|
||||
* interference.
|
||||
*/
|
||||
ret = __bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, NULL,
|
||||
ret = __bpf_arch_text_poke(poke->tailcall_target,
|
||||
BPF_MOD_JUMP, NULL,
|
||||
(u8 *)target->bpf_func +
|
||||
poke->adj_off, false);
|
||||
BUG_ON(ret < 0);
|
||||
ret = __bpf_arch_text_poke(poke->tailcall_bypass,
|
||||
BPF_MOD_JUMP,
|
||||
(u8 *)poke->tailcall_target +
|
||||
X86_PATCH_SIZE, NULL, false);
|
||||
BUG_ON(ret < 0);
|
||||
}
|
||||
WRITE_ONCE(poke->ip_stable, true);
|
||||
WRITE_ONCE(poke->tailcall_target_stable, true);
|
||||
mutex_unlock(&array->aux->poke_mutex);
|
||||
}
|
||||
}
|
||||
|
@ -652,19 +757,49 @@ static bool ex_handler_bpf(const struct exception_table_entry *x,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
|
||||
bool *regs_used, bool *tail_call_seen)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 1; i <= insn_cnt; i++, insn++) {
|
||||
if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
|
||||
*tail_call_seen = true;
|
||||
if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
|
||||
regs_used[0] = true;
|
||||
if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
|
||||
regs_used[1] = true;
|
||||
if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
|
||||
regs_used[2] = true;
|
||||
if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
|
||||
regs_used[3] = true;
|
||||
}
|
||||
}
|
||||
|
||||
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
int oldproglen, struct jit_context *ctx)
|
||||
{
|
||||
bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
|
||||
struct bpf_insn *insn = bpf_prog->insnsi;
|
||||
bool callee_regs_used[4] = {};
|
||||
int insn_cnt = bpf_prog->len;
|
||||
bool tail_call_seen = false;
|
||||
bool seen_exit = false;
|
||||
u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
|
||||
int i, cnt = 0, excnt = 0;
|
||||
int proglen = 0;
|
||||
u8 *prog = temp;
|
||||
|
||||
detect_reg_usage(insn, insn_cnt, callee_regs_used,
|
||||
&tail_call_seen);
|
||||
|
||||
/* tail call's presence in current prog implies it is reachable */
|
||||
tail_call_reachable |= tail_call_seen;
|
||||
|
||||
emit_prologue(&prog, bpf_prog->aux->stack_depth,
|
||||
bpf_prog_was_classic(bpf_prog));
|
||||
bpf_prog_was_classic(bpf_prog), tail_call_reachable,
|
||||
bpf_prog->aux->func_idx != 0);
|
||||
push_callee_regs(&prog, callee_regs_used);
|
||||
addrs[0] = prog - temp;
|
||||
|
||||
for (i = 1; i <= insn_cnt; i++, insn++) {
|
||||
|
@ -1102,16 +1237,27 @@ xadd: if (is_imm8(insn->off))
|
|||
/* call */
|
||||
case BPF_JMP | BPF_CALL:
|
||||
func = (u8 *) __bpf_call_base + imm32;
|
||||
if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
|
||||
return -EINVAL;
|
||||
if (tail_call_reachable) {
|
||||
EMIT3_off32(0x48, 0x8B, 0x85,
|
||||
-(bpf_prog->aux->stack_depth + 8));
|
||||
if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
||||
case BPF_JMP | BPF_TAIL_CALL:
|
||||
if (imm32)
|
||||
emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
|
||||
&prog, addrs[i], image);
|
||||
&prog, addrs[i], image,
|
||||
callee_regs_used,
|
||||
bpf_prog->aux->stack_depth);
|
||||
else
|
||||
emit_bpf_tail_call_indirect(&prog);
|
||||
emit_bpf_tail_call_indirect(&prog,
|
||||
callee_regs_used,
|
||||
bpf_prog->aux->stack_depth);
|
||||
break;
|
||||
|
||||
/* cond jump */
|
||||
|
@ -1294,12 +1440,9 @@ emit_jmp:
|
|||
seen_exit = true;
|
||||
/* Update cleanup_addr */
|
||||
ctx->cleanup_addr = proglen;
|
||||
if (!bpf_prog_was_classic(bpf_prog))
|
||||
EMIT1(0x5B); /* get rid of tail_call_cnt */
|
||||
EMIT2(0x41, 0x5F); /* pop r15 */
|
||||
EMIT2(0x41, 0x5E); /* pop r14 */
|
||||
EMIT2(0x41, 0x5D); /* pop r13 */
|
||||
EMIT1(0x5B); /* pop rbx */
|
||||
pop_callee_regs(&prog, callee_regs_used);
|
||||
if (tail_call_reachable)
|
||||
EMIT1(0x59); /* pop rcx, get rid of tail_call_cnt */
|
||||
EMIT1(0xC9); /* leave */
|
||||
EMIT1(0xC3); /* ret */
|
||||
break;
|
||||
|
|
|
@ -111,7 +111,9 @@ static int
|
|||
nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
|
||||
struct bpf_prog *prog)
|
||||
{
|
||||
int i, cnt, err;
|
||||
int i, cnt, err = 0;
|
||||
|
||||
mutex_lock(&prog->aux->used_maps_mutex);
|
||||
|
||||
/* Quickly count the maps we will have to remember */
|
||||
cnt = 0;
|
||||
|
@ -119,13 +121,15 @@ nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
|
|||
if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
|
||||
cnt++;
|
||||
if (!cnt)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
nfp_prog->map_records = kmalloc_array(cnt,
|
||||
sizeof(nfp_prog->map_records[0]),
|
||||
GFP_KERNEL);
|
||||
if (!nfp_prog->map_records)
|
||||
return -ENOMEM;
|
||||
if (!nfp_prog->map_records) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < prog->aux->used_map_cnt; i++)
|
||||
if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
|
||||
|
@ -133,12 +137,14 @@ nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
|
|||
prog->aux->used_maps[i]);
|
||||
if (err) {
|
||||
nfp_map_ptrs_forget(bpf, nfp_prog);
|
||||
return err;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
WARN_ON(cnt != nfp_prog->map_records_cnt);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
mutex_unlock(&prog->aux->used_maps_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -292,6 +292,7 @@ enum bpf_arg_type {
|
|||
ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
|
||||
ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */
|
||||
ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
|
||||
__BPF_ARG_TYPE_MAX,
|
||||
};
|
||||
|
||||
/* type of values returned from helper functions */
|
||||
|
@ -326,12 +327,16 @@ struct bpf_func_proto {
|
|||
};
|
||||
enum bpf_arg_type arg_type[5];
|
||||
};
|
||||
int *btf_id; /* BTF ids of arguments */
|
||||
bool (*check_btf_id)(u32 btf_id, u32 arg); /* if the argument btf_id is
|
||||
* valid. Often used if more
|
||||
* than one btf id is permitted
|
||||
* for this argument.
|
||||
*/
|
||||
union {
|
||||
struct {
|
||||
u32 *arg1_btf_id;
|
||||
u32 *arg2_btf_id;
|
||||
u32 *arg3_btf_id;
|
||||
u32 *arg4_btf_id;
|
||||
u32 *arg5_btf_id;
|
||||
};
|
||||
u32 *arg_btf_id[5];
|
||||
};
|
||||
int *ret_btf_id; /* return value btf_id */
|
||||
bool (*allowed)(const struct bpf_prog *prog);
|
||||
};
|
||||
|
@ -697,16 +702,19 @@ enum bpf_jit_poke_reason {
|
|||
|
||||
/* Descriptor of pokes pointing /into/ the JITed image. */
|
||||
struct bpf_jit_poke_descriptor {
|
||||
void *ip;
|
||||
void *tailcall_target;
|
||||
void *tailcall_bypass;
|
||||
void *bypass_addr;
|
||||
union {
|
||||
struct {
|
||||
struct bpf_map *map;
|
||||
u32 key;
|
||||
} tail_call;
|
||||
};
|
||||
bool ip_stable;
|
||||
bool tailcall_target_stable;
|
||||
u8 adj_off;
|
||||
u16 reason;
|
||||
u32 insn_idx;
|
||||
};
|
||||
|
||||
/* reg_type info for ctx arguments */
|
||||
|
@ -737,6 +745,7 @@ struct bpf_prog_aux {
|
|||
bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
|
||||
bool func_proto_unreliable;
|
||||
bool sleepable;
|
||||
bool tail_call_reachable;
|
||||
enum bpf_tramp_prog_type trampoline_prog_type;
|
||||
struct bpf_trampoline *trampoline;
|
||||
struct hlist_node tramp_hlist;
|
||||
|
@ -751,6 +760,7 @@ struct bpf_prog_aux {
|
|||
struct bpf_ksym ksym;
|
||||
const struct bpf_prog_ops *ops;
|
||||
struct bpf_map **used_maps;
|
||||
struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
|
||||
struct bpf_prog *prog;
|
||||
struct user_struct *user;
|
||||
u64 load_time; /* ns since boottime */
|
||||
|
@ -1380,8 +1390,6 @@ int btf_struct_access(struct bpf_verifier_log *log,
|
|||
u32 *next_btf_id);
|
||||
bool btf_struct_ids_match(struct bpf_verifier_log *log,
|
||||
int off, u32 id, u32 need_type_id);
|
||||
int btf_resolve_helper_id(struct bpf_verifier_log *log,
|
||||
const struct bpf_func_proto *fn, int);
|
||||
|
||||
int btf_distill_func_proto(struct bpf_verifier_log *log,
|
||||
struct btf *btf,
|
||||
|
@ -1900,6 +1908,6 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
|||
void *addr1, void *addr2);
|
||||
|
||||
struct btf_id_set;
|
||||
bool btf_id_set_contains(struct btf_id_set *set, u32 id);
|
||||
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
|
||||
|
||||
#endif /* _LINUX_BPF_H */
|
||||
|
|
|
@ -358,6 +358,9 @@ struct bpf_subprog_info {
|
|||
u32 start; /* insn idx of function entry point */
|
||||
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
|
||||
u16 stack_depth; /* max. stack depth used by this function */
|
||||
bool has_tail_call;
|
||||
bool tail_call_reachable;
|
||||
bool has_ld_abs;
|
||||
};
|
||||
|
||||
/* single container for all structs
|
||||
|
|
|
@ -76,6 +76,13 @@ extern u32 name[];
|
|||
#define BTF_ID_LIST_GLOBAL(name) \
|
||||
__BTF_ID_LIST(name, globl)
|
||||
|
||||
/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
|
||||
* a single entry.
|
||||
*/
|
||||
#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
|
||||
BTF_ID_LIST(name) \
|
||||
BTF_ID(prefix, typename)
|
||||
|
||||
/*
|
||||
* The BTF_ID_UNUSED macro defines 4 zero bytes.
|
||||
* It's used when we want to define 'unused' entry
|
||||
|
@ -140,6 +147,7 @@ extern struct btf_id_set name;
|
|||
#define BTF_ID(prefix, name)
|
||||
#define BTF_ID_UNUSED
|
||||
#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
|
||||
#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
|
||||
#define BTF_SET_START(name) static struct btf_id_set name = { 0 };
|
||||
#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 };
|
||||
#define BTF_SET_END(name)
|
||||
|
|
|
@ -1287,6 +1287,8 @@ int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
|
|||
struct bpf_sk_lookup_kern {
|
||||
u16 family;
|
||||
u16 protocol;
|
||||
__be16 sport;
|
||||
u16 dport;
|
||||
struct {
|
||||
__be32 saddr;
|
||||
__be32 daddr;
|
||||
|
@ -1295,8 +1297,6 @@ struct bpf_sk_lookup_kern {
|
|||
const struct in6_addr *saddr;
|
||||
const struct in6_addr *daddr;
|
||||
} v6;
|
||||
__be16 sport;
|
||||
u16 dport;
|
||||
struct sock *selected_sk;
|
||||
bool no_reuseport;
|
||||
};
|
||||
|
|
|
@ -96,7 +96,8 @@ struct inet_connection_sock {
|
|||
void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
|
||||
struct hlist_node icsk_listen_portaddr_node;
|
||||
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
|
||||
__u8 icsk_ca_state:6,
|
||||
__u8 icsk_ca_state:5,
|
||||
icsk_ca_initialized:1,
|
||||
icsk_ca_setsockopt:1,
|
||||
icsk_ca_dst_locked:1;
|
||||
__u8 icsk_retransmits;
|
||||
|
|
|
@ -1104,7 +1104,7 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
|
|||
void tcp_get_allowed_congestion_control(char *buf, size_t len);
|
||||
int tcp_set_allowed_congestion_control(char *allowed);
|
||||
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
|
||||
bool reinit, bool cap_net_admin);
|
||||
bool cap_net_admin);
|
||||
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
|
||||
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
|
||||
|
||||
|
|
|
@ -124,6 +124,7 @@ enum bpf_cmd {
|
|||
BPF_ENABLE_STATS,
|
||||
BPF_ITER_CREATE,
|
||||
BPF_LINK_DETACH,
|
||||
BPF_PROG_BIND_MAP,
|
||||
};
|
||||
|
||||
enum bpf_map_type {
|
||||
|
@ -658,6 +659,12 @@ union bpf_attr {
|
|||
__u32 flags;
|
||||
} iter_create;
|
||||
|
||||
struct { /* struct used by BPF_PROG_BIND_MAP command */
|
||||
__u32 prog_fd;
|
||||
__u32 map_fd;
|
||||
__u32 flags; /* extra flags */
|
||||
} prog_bind_map;
|
||||
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
/* The description below is an attempt at providing documentation to eBPF
|
||||
|
@ -1447,8 +1454,8 @@ union bpf_attr {
|
|||
* Return
|
||||
* The return value depends on the result of the test, and can be:
|
||||
*
|
||||
* * 0, if the *skb* task belongs to the cgroup2.
|
||||
* * 1, if the *skb* task does not belong to the cgroup2.
|
||||
* * 0, if current task belongs to the cgroup2.
|
||||
* * 1, if current task does not belong to the cgroup2.
|
||||
* * A negative error code, if an error occurred.
|
||||
*
|
||||
* long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
|
||||
|
@ -3349,38 +3356,38 @@ union bpf_attr {
|
|||
* Description
|
||||
* Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
|
||||
* Return
|
||||
* *sk* if casting is valid, or NULL otherwise.
|
||||
* *sk* if casting is valid, or **NULL** otherwise.
|
||||
*
|
||||
* struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
|
||||
* Description
|
||||
* Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
|
||||
* Return
|
||||
* *sk* if casting is valid, or NULL otherwise.
|
||||
* *sk* if casting is valid, or **NULL** otherwise.
|
||||
*
|
||||
* struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
|
||||
* Description
|
||||
* Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
|
||||
* Return
|
||||
* *sk* if casting is valid, or NULL otherwise.
|
||||
* *sk* if casting is valid, or **NULL** otherwise.
|
||||
*
|
||||
* struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
|
||||
* Description
|
||||
* Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
|
||||
* Return
|
||||
* *sk* if casting is valid, or NULL otherwise.
|
||||
* *sk* if casting is valid, or **NULL** otherwise.
|
||||
*
|
||||
* struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
|
||||
* Description
|
||||
* Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
|
||||
* Return
|
||||
* *sk* if casting is valid, or NULL otherwise.
|
||||
* *sk* if casting is valid, or **NULL** otherwise.
|
||||
*
|
||||
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
|
||||
* Description
|
||||
* Return a user or a kernel stack in bpf program provided buffer.
|
||||
* To achieve this, the helper needs *task*, which is a valid
|
||||
* pointer to struct task_struct. To store the stacktrace, the
|
||||
* bpf program provides *buf* with a nonnegative *size*.
|
||||
* pointer to **struct task_struct**. To store the stacktrace, the
|
||||
* bpf program provides *buf* with a nonnegative *size*.
|
||||
*
|
||||
* The last argument, *flags*, holds the number of stack frames to
|
||||
* skip (from 0 to 255), masked with
|
||||
|
@ -3410,12 +3417,12 @@ union bpf_attr {
|
|||
* long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
|
||||
* Description
|
||||
* Load header option. Support reading a particular TCP header
|
||||
* option for bpf program (BPF_PROG_TYPE_SOCK_OPS).
|
||||
* option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**).
|
||||
*
|
||||
* If *flags* is 0, it will search the option from the
|
||||
* sock_ops->skb_data. The comment in "struct bpf_sock_ops"
|
||||
* *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops**
|
||||
* has details on what skb_data contains under different
|
||||
* sock_ops->op.
|
||||
* *skops*\ **->op**.
|
||||
*
|
||||
* The first byte of the *searchby_res* specifies the
|
||||
* kind that it wants to search.
|
||||
|
@ -3435,7 +3442,7 @@ union bpf_attr {
|
|||
* [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ].
|
||||
*
|
||||
* To search for the standard window scale option (3),
|
||||
* the searchby_res should be [ 3, 0, 0, .... 0 ].
|
||||
* the *searchby_res* should be [ 3, 0, 0, .... 0 ].
|
||||
* Note, kind-length must be 0 for regular option.
|
||||
*
|
||||
* Searching for No-Op (0) and End-of-Option-List (1) are
|
||||
|
@ -3445,27 +3452,30 @@ union bpf_attr {
|
|||
* of a header option.
|
||||
*
|
||||
* Supported flags:
|
||||
*
|
||||
* * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the
|
||||
* saved_syn packet or the just-received syn packet.
|
||||
*
|
||||
* Return
|
||||
* >0 when found, the header option is copied to *searchby_res*.
|
||||
* The return value is the total length copied.
|
||||
* > 0 when found, the header option is copied to *searchby_res*.
|
||||
* The return value is the total length copied. On failure, a
|
||||
* negative error code is returned:
|
||||
*
|
||||
* **-EINVAL** If param is invalid
|
||||
* **-EINVAL** if a parameter is invalid.
|
||||
*
|
||||
* **-ENOMSG** The option is not found
|
||||
* **-ENOMSG** if the option is not found.
|
||||
*
|
||||
* **-ENOENT** No syn packet available when
|
||||
* **BPF_LOAD_HDR_OPT_TCP_SYN** is used
|
||||
* **-ENOENT** if no syn packet is available when
|
||||
* **BPF_LOAD_HDR_OPT_TCP_SYN** is used.
|
||||
*
|
||||
* **-ENOSPC** Not enough space. Only *len* number of
|
||||
* bytes are copied.
|
||||
* **-ENOSPC** if there is not enough space. Only *len* number of
|
||||
* bytes are copied.
|
||||
*
|
||||
* **-EFAULT** Cannot parse the header options in the packet
|
||||
* **-EFAULT** on failure to parse the header options in the
|
||||
* packet.
|
||||
*
|
||||
* **-EPERM** This helper cannot be used under the
|
||||
* current sock_ops->op.
|
||||
* **-EPERM** if the helper cannot be used under the current
|
||||
* *skops*\ **->op**.
|
||||
*
|
||||
* long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags)
|
||||
* Description
|
||||
|
@ -3483,44 +3493,44 @@ union bpf_attr {
|
|||
* by searching the same option in the outgoing skb.
|
||||
*
|
||||
* This helper can only be called during
|
||||
* BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
|
||||
* **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
|
||||
*
|
||||
* Return
|
||||
* 0 on success, or negative error in case of failure:
|
||||
*
|
||||
* **-EINVAL** If param is invalid
|
||||
* **-EINVAL** If param is invalid.
|
||||
*
|
||||
* **-ENOSPC** Not enough space in the header.
|
||||
* Nothing has been written
|
||||
* **-ENOSPC** if there is not enough space in the header.
|
||||
* Nothing has been written
|
||||
*
|
||||
* **-EEXIST** The option has already existed
|
||||
* **-EEXIST** if the option already exists.
|
||||
*
|
||||
* **-EFAULT** Cannot parse the existing header options
|
||||
* **-EFAULT** on failrue to parse the existing header options.
|
||||
*
|
||||
* **-EPERM** This helper cannot be used under the
|
||||
* current sock_ops->op.
|
||||
* **-EPERM** if the helper cannot be used under the current
|
||||
* *skops*\ **->op**.
|
||||
*
|
||||
* long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags)
|
||||
* Description
|
||||
* Reserve *len* bytes for the bpf header option. The
|
||||
* space will be used by bpf_store_hdr_opt() later in
|
||||
* BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
|
||||
* space will be used by **bpf_store_hdr_opt**\ () later in
|
||||
* **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
|
||||
*
|
||||
* If bpf_reserve_hdr_opt() is called multiple times,
|
||||
* If **bpf_reserve_hdr_opt**\ () is called multiple times,
|
||||
* the total number of bytes will be reserved.
|
||||
*
|
||||
* This helper can only be called during
|
||||
* BPF_SOCK_OPS_HDR_OPT_LEN_CB.
|
||||
* **BPF_SOCK_OPS_HDR_OPT_LEN_CB**.
|
||||
*
|
||||
* Return
|
||||
* 0 on success, or negative error in case of failure:
|
||||
*
|
||||
* **-EINVAL** if param is invalid
|
||||
* **-EINVAL** if a parameter is invalid.
|
||||
*
|
||||
* **-ENOSPC** Not enough space in the header.
|
||||
* **-ENOSPC** if there is not enough space in the header.
|
||||
*
|
||||
* **-EPERM** This helper cannot be used under the
|
||||
* current sock_ops->op.
|
||||
* **-EPERM** if the helper cannot be used under the current
|
||||
* *skops*\ **->op**.
|
||||
*
|
||||
* void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags)
|
||||
* Description
|
||||
|
@ -3560,9 +3570,9 @@ union bpf_attr {
|
|||
*
|
||||
* long bpf_d_path(struct path *path, char *buf, u32 sz)
|
||||
* Description
|
||||
* Return full path for given 'struct path' object, which
|
||||
* needs to be the kernel BTF 'path' object. The path is
|
||||
* returned in the provided buffer 'buf' of size 'sz' and
|
||||
* Return full path for given **struct path** object, which
|
||||
* needs to be the kernel BTF *path* object. The path is
|
||||
* returned in the provided buffer *buf* of size *sz* and
|
||||
* is zero terminated.
|
||||
*
|
||||
* Return
|
||||
|
@ -3573,7 +3583,7 @@ union bpf_attr {
|
|||
* long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr)
|
||||
* Description
|
||||
* Read *size* bytes from user space address *user_ptr* and store
|
||||
* the data in *dst*. This is a wrapper of copy_from_user().
|
||||
* the data in *dst*. This is a wrapper of **copy_from_user**\ ().
|
||||
* Return
|
||||
* 0 on success, or a negative error in case of failure.
|
||||
*/
|
||||
|
|
|
@ -898,6 +898,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
|
|||
struct bpf_prog *old,
|
||||
struct bpf_prog *new)
|
||||
{
|
||||
u8 *old_addr, *new_addr, *old_bypass_addr;
|
||||
struct prog_poke_elem *elem;
|
||||
struct bpf_array_aux *aux;
|
||||
|
||||
|
@ -918,12 +919,13 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
|
|||
* there could be danger of use after free otherwise.
|
||||
* 2) Initially when we start tracking aux, the program
|
||||
* is not JITed yet and also does not have a kallsyms
|
||||
* entry. We skip these as poke->ip_stable is not
|
||||
* active yet. The JIT will do the final fixup before
|
||||
* setting it stable. The various poke->ip_stable are
|
||||
* successively activated, so tail call updates can
|
||||
* arrive from here while JIT is still finishing its
|
||||
* final fixup for non-activated poke entries.
|
||||
* entry. We skip these as poke->tailcall_target_stable
|
||||
* is not active yet. The JIT will do the final fixup
|
||||
* before setting it stable. The various
|
||||
* poke->tailcall_target_stable are successively
|
||||
* activated, so tail call updates can arrive from here
|
||||
* while JIT is still finishing its final fixup for
|
||||
* non-activated poke entries.
|
||||
* 3) On program teardown, the program's kallsym entry gets
|
||||
* removed out of RCU callback, but we can only untrack
|
||||
* from sleepable context, therefore bpf_arch_text_poke()
|
||||
|
@ -940,7 +942,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
|
|||
* 5) Any other error happening below from bpf_arch_text_poke()
|
||||
* is a unexpected bug.
|
||||
*/
|
||||
if (!READ_ONCE(poke->ip_stable))
|
||||
if (!READ_ONCE(poke->tailcall_target_stable))
|
||||
continue;
|
||||
if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
|
||||
continue;
|
||||
|
@ -948,12 +950,39 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
|
|||
poke->tail_call.key != key)
|
||||
continue;
|
||||
|
||||
ret = bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP,
|
||||
old ? (u8 *)old->bpf_func +
|
||||
poke->adj_off : NULL,
|
||||
new ? (u8 *)new->bpf_func +
|
||||
poke->adj_off : NULL);
|
||||
BUG_ON(ret < 0 && ret != -EINVAL);
|
||||
old_bypass_addr = old ? NULL : poke->bypass_addr;
|
||||
old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
|
||||
new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
|
||||
|
||||
if (new) {
|
||||
ret = bpf_arch_text_poke(poke->tailcall_target,
|
||||
BPF_MOD_JUMP,
|
||||
old_addr, new_addr);
|
||||
BUG_ON(ret < 0 && ret != -EINVAL);
|
||||
if (!old) {
|
||||
ret = bpf_arch_text_poke(poke->tailcall_bypass,
|
||||
BPF_MOD_JUMP,
|
||||
poke->bypass_addr,
|
||||
NULL);
|
||||
BUG_ON(ret < 0 && ret != -EINVAL);
|
||||
}
|
||||
} else {
|
||||
ret = bpf_arch_text_poke(poke->tailcall_bypass,
|
||||
BPF_MOD_JUMP,
|
||||
old_bypass_addr,
|
||||
poke->bypass_addr);
|
||||
BUG_ON(ret < 0 && ret != -EINVAL);
|
||||
/* let other CPUs finish the execution of program
|
||||
* so that it will not possible to expose them
|
||||
* to invalid nop, stack unwind, nop state
|
||||
*/
|
||||
if (!ret)
|
||||
synchronize_rcu();
|
||||
ret = bpf_arch_text_poke(poke->tailcall_target,
|
||||
BPF_MOD_JUMP,
|
||||
old_addr, NULL);
|
||||
BUG_ON(ret < 0 && ret != -EINVAL);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -249,9 +249,7 @@ const struct bpf_map_ops inode_storage_map_ops = {
|
|||
.map_owner_storage_ptr = inode_storage_ptr,
|
||||
};
|
||||
|
||||
BTF_ID_LIST(bpf_inode_storage_btf_ids)
|
||||
BTF_ID_UNUSED
|
||||
BTF_ID(struct, inode)
|
||||
BTF_ID_LIST_SINGLE(bpf_inode_storage_btf_ids, struct, inode)
|
||||
|
||||
const struct bpf_func_proto bpf_inode_storage_get_proto = {
|
||||
.func = bpf_inode_storage_get,
|
||||
|
@ -259,9 +257,9 @@ const struct bpf_func_proto bpf_inode_storage_get_proto = {
|
|||
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg2_btf_id = &bpf_inode_storage_btf_ids[0],
|
||||
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
.btf_id = bpf_inode_storage_btf_ids,
|
||||
};
|
||||
|
||||
const struct bpf_func_proto bpf_inode_storage_delete_proto = {
|
||||
|
@ -270,5 +268,5 @@ const struct bpf_func_proto bpf_inode_storage_delete_proto = {
|
|||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID,
|
||||
.btf_id = bpf_inode_storage_btf_ids,
|
||||
.arg2_btf_id = &bpf_inode_storage_btf_ids[0],
|
||||
};
|
||||
|
|
|
@ -159,7 +159,7 @@ void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
|
|||
struct bpf_local_storage_elem *selem)
|
||||
{
|
||||
RCU_INIT_POINTER(selem->local_storage, local_storage);
|
||||
hlist_add_head(&selem->snode, &local_storage->list);
|
||||
hlist_add_head_rcu(&selem->snode, &local_storage->list);
|
||||
}
|
||||
|
||||
void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
|
||||
|
|
|
@ -4193,19 +4193,6 @@ again:
|
|||
return true;
|
||||
}
|
||||
|
||||
int btf_resolve_helper_id(struct bpf_verifier_log *log,
|
||||
const struct bpf_func_proto *fn, int arg)
|
||||
{
|
||||
int id;
|
||||
|
||||
if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID || !btf_vmlinux)
|
||||
return -EINVAL;
|
||||
id = fn->btf_id[arg];
|
||||
if (!id || id > btf_vmlinux->nr_types)
|
||||
return -EINVAL;
|
||||
return id;
|
||||
}
|
||||
|
||||
static int __get_type_size(struct btf *btf, u32 btf_id,
|
||||
const struct btf_type **bad_type)
|
||||
{
|
||||
|
@ -4772,7 +4759,7 @@ static int btf_id_cmp_func(const void *a, const void *b)
|
|||
return *pa - *pb;
|
||||
}
|
||||
|
||||
bool btf_id_set_contains(struct btf_id_set *set, u32 id)
|
||||
bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
|
||||
{
|
||||
return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
|
||||
}
|
||||
|
|
|
@ -98,6 +98,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
|
|||
fp->jit_requested = ebpf_jit_enabled();
|
||||
|
||||
INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
|
||||
mutex_init(&fp->aux->used_maps_mutex);
|
||||
|
||||
return fp;
|
||||
}
|
||||
|
@ -253,6 +254,7 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
|
|||
void __bpf_prog_free(struct bpf_prog *fp)
|
||||
{
|
||||
if (fp->aux) {
|
||||
mutex_destroy(&fp->aux->used_maps_mutex);
|
||||
free_percpu(fp->aux->stats);
|
||||
kfree(fp->aux->poke_tab);
|
||||
kfree(fp->aux);
|
||||
|
@ -773,7 +775,8 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
|
|||
|
||||
if (size > poke_tab_max)
|
||||
return -ENOSPC;
|
||||
if (poke->ip || poke->ip_stable || poke->adj_off)
|
||||
if (poke->tailcall_target || poke->tailcall_target_stable ||
|
||||
poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
|
||||
return -EINVAL;
|
||||
|
||||
switch (poke->reason) {
|
||||
|
@ -1747,8 +1750,9 @@ bool bpf_prog_array_compatible(struct bpf_array *array,
|
|||
static int bpf_check_tail_call(const struct bpf_prog *fp)
|
||||
{
|
||||
struct bpf_prog_aux *aux = fp->aux;
|
||||
int i;
|
||||
int i, ret = 0;
|
||||
|
||||
mutex_lock(&aux->used_maps_mutex);
|
||||
for (i = 0; i < aux->used_map_cnt; i++) {
|
||||
struct bpf_map *map = aux->used_maps[i];
|
||||
struct bpf_array *array;
|
||||
|
@ -1757,11 +1761,15 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
|
|||
continue;
|
||||
|
||||
array = container_of(map, struct bpf_array, map);
|
||||
if (!bpf_prog_array_compatible(array, fp))
|
||||
return -EINVAL;
|
||||
if (!bpf_prog_array_compatible(array, fp)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
out:
|
||||
mutex_unlock(&aux->used_maps_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void bpf_prog_select_func(struct bpf_prog *fp)
|
||||
|
|
|
@ -665,18 +665,17 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
|
|||
return __bpf_get_stack(regs, task, NULL, buf, size, flags);
|
||||
}
|
||||
|
||||
BTF_ID_LIST(bpf_get_task_stack_btf_ids)
|
||||
BTF_ID(struct, task_struct)
|
||||
BTF_ID_LIST_SINGLE(bpf_get_task_stack_btf_ids, struct, task_struct)
|
||||
|
||||
const struct bpf_func_proto bpf_get_task_stack_proto = {
|
||||
.func = bpf_get_task_stack,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg1_btf_id = &bpf_get_task_stack_btf_ids[0],
|
||||
.arg2_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
.btf_id = bpf_get_task_stack_btf_ids,
|
||||
};
|
||||
|
||||
BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
|
||||
|
|
|
@ -2345,12 +2345,8 @@ void bpf_link_put(struct bpf_link *link)
|
|||
if (!atomic64_dec_and_test(&link->refcnt))
|
||||
return;
|
||||
|
||||
if (in_atomic()) {
|
||||
INIT_WORK(&link->work, bpf_link_put_deferred);
|
||||
schedule_work(&link->work);
|
||||
} else {
|
||||
bpf_link_free(link);
|
||||
}
|
||||
INIT_WORK(&link->work, bpf_link_put_deferred);
|
||||
schedule_work(&link->work);
|
||||
}
|
||||
|
||||
static int bpf_link_release(struct inode *inode, struct file *filp)
|
||||
|
@ -3162,21 +3158,25 @@ static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
|
|||
const struct bpf_map *map;
|
||||
int i;
|
||||
|
||||
mutex_lock(&prog->aux->used_maps_mutex);
|
||||
for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
|
||||
map = prog->aux->used_maps[i];
|
||||
if (map == (void *)addr) {
|
||||
*type = BPF_PSEUDO_MAP_FD;
|
||||
return map;
|
||||
goto out;
|
||||
}
|
||||
if (!map->ops->map_direct_value_meta)
|
||||
continue;
|
||||
if (!map->ops->map_direct_value_meta(map, addr, off)) {
|
||||
*type = BPF_PSEUDO_MAP_VALUE;
|
||||
return map;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
map = NULL;
|
||||
|
||||
return NULL;
|
||||
out:
|
||||
mutex_unlock(&prog->aux->used_maps_mutex);
|
||||
return map;
|
||||
}
|
||||
|
||||
static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
|
||||
|
@ -3294,6 +3294,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
|
|||
memcpy(info.tag, prog->tag, sizeof(prog->tag));
|
||||
memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
|
||||
|
||||
mutex_lock(&prog->aux->used_maps_mutex);
|
||||
ulen = info.nr_map_ids;
|
||||
info.nr_map_ids = prog->aux->used_map_cnt;
|
||||
ulen = min_t(u32, info.nr_map_ids, ulen);
|
||||
|
@ -3303,9 +3304,12 @@ static int bpf_prog_get_info_by_fd(struct file *file,
|
|||
|
||||
for (i = 0; i < ulen; i++)
|
||||
if (put_user(prog->aux->used_maps[i]->id,
|
||||
&user_map_ids[i]))
|
||||
&user_map_ids[i])) {
|
||||
mutex_unlock(&prog->aux->used_maps_mutex);
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&prog->aux->used_maps_mutex);
|
||||
|
||||
err = set_info_rec_size(&info);
|
||||
if (err)
|
||||
|
@ -4153,6 +4157,66 @@ static int bpf_iter_create(union bpf_attr *attr)
|
|||
return err;
|
||||
}
|
||||
|
||||
#define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
|
||||
|
||||
static int bpf_prog_bind_map(union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_prog *prog;
|
||||
struct bpf_map *map;
|
||||
struct bpf_map **used_maps_old, **used_maps_new;
|
||||
int i, ret = 0;
|
||||
|
||||
if (CHECK_ATTR(BPF_PROG_BIND_MAP))
|
||||
return -EINVAL;
|
||||
|
||||
if (attr->prog_bind_map.flags)
|
||||
return -EINVAL;
|
||||
|
||||
prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
|
||||
map = bpf_map_get(attr->prog_bind_map.map_fd);
|
||||
if (IS_ERR(map)) {
|
||||
ret = PTR_ERR(map);
|
||||
goto out_prog_put;
|
||||
}
|
||||
|
||||
mutex_lock(&prog->aux->used_maps_mutex);
|
||||
|
||||
used_maps_old = prog->aux->used_maps;
|
||||
|
||||
for (i = 0; i < prog->aux->used_map_cnt; i++)
|
||||
if (used_maps_old[i] == map)
|
||||
goto out_unlock;
|
||||
|
||||
used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
|
||||
sizeof(used_maps_new[0]),
|
||||
GFP_KERNEL);
|
||||
if (!used_maps_new) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
memcpy(used_maps_new, used_maps_old,
|
||||
sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
|
||||
used_maps_new[prog->aux->used_map_cnt] = map;
|
||||
|
||||
prog->aux->used_map_cnt++;
|
||||
prog->aux->used_maps = used_maps_new;
|
||||
|
||||
kfree(used_maps_old);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&prog->aux->used_maps_mutex);
|
||||
|
||||
if (ret)
|
||||
bpf_map_put(map);
|
||||
out_prog_put:
|
||||
bpf_prog_put(prog);
|
||||
return ret;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
|
@ -4286,6 +4350,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
|
|||
case BPF_LINK_DETACH:
|
||||
err = link_detach(&attr);
|
||||
break;
|
||||
case BPF_PROG_BIND_MAP:
|
||||
err = bpf_prog_bind_map(&attr);
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
break;
|
||||
|
|
|
@ -22,7 +22,8 @@ struct bpf_iter_seq_task_info {
|
|||
};
|
||||
|
||||
static struct task_struct *task_seq_get_next(struct pid_namespace *ns,
|
||||
u32 *tid)
|
||||
u32 *tid,
|
||||
bool skip_if_dup_files)
|
||||
{
|
||||
struct task_struct *task = NULL;
|
||||
struct pid *pid;
|
||||
|
@ -36,6 +37,12 @@ retry:
|
|||
if (!task) {
|
||||
++*tid;
|
||||
goto retry;
|
||||
} else if (skip_if_dup_files && task->tgid != task->pid &&
|
||||
task->files == task->group_leader->files) {
|
||||
put_task_struct(task);
|
||||
task = NULL;
|
||||
++*tid;
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -48,7 +55,7 @@ static void *task_seq_start(struct seq_file *seq, loff_t *pos)
|
|||
struct bpf_iter_seq_task_info *info = seq->private;
|
||||
struct task_struct *task;
|
||||
|
||||
task = task_seq_get_next(info->common.ns, &info->tid);
|
||||
task = task_seq_get_next(info->common.ns, &info->tid, false);
|
||||
if (!task)
|
||||
return NULL;
|
||||
|
||||
|
@ -65,7 +72,7 @@ static void *task_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
++*pos;
|
||||
++info->tid;
|
||||
put_task_struct((struct task_struct *)v);
|
||||
task = task_seq_get_next(info->common.ns, &info->tid);
|
||||
task = task_seq_get_next(info->common.ns, &info->tid, false);
|
||||
if (!task)
|
||||
return NULL;
|
||||
|
||||
|
@ -148,7 +155,7 @@ again:
|
|||
curr_files = *fstruct;
|
||||
curr_fd = info->fd;
|
||||
} else {
|
||||
curr_task = task_seq_get_next(ns, &curr_tid);
|
||||
curr_task = task_seq_get_next(ns, &curr_tid, true);
|
||||
if (!curr_task)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -238,7 +238,6 @@ struct bpf_call_arg_meta {
|
|||
u64 msize_max_value;
|
||||
int ref_obj_id;
|
||||
int func_id;
|
||||
u32 btf_id;
|
||||
};
|
||||
|
||||
struct btf *btf_vmlinux;
|
||||
|
@ -436,6 +435,15 @@ static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
|
|||
return type == ARG_PTR_TO_SOCK_COMMON;
|
||||
}
|
||||
|
||||
static bool arg_type_may_be_null(enum bpf_arg_type type)
|
||||
{
|
||||
return type == ARG_PTR_TO_MAP_VALUE_OR_NULL ||
|
||||
type == ARG_PTR_TO_MEM_OR_NULL ||
|
||||
type == ARG_PTR_TO_CTX_OR_NULL ||
|
||||
type == ARG_PTR_TO_SOCKET_OR_NULL ||
|
||||
type == ARG_PTR_TO_ALLOC_MEM_OR_NULL;
|
||||
}
|
||||
|
||||
/* Determine whether the function releases some resources allocated by another
|
||||
* function call. The first reference type argument will be assumed to be
|
||||
* released by release_reference().
|
||||
|
@ -1490,6 +1498,13 @@ static int check_subprogs(struct bpf_verifier_env *env)
|
|||
for (i = 0; i < insn_cnt; i++) {
|
||||
u8 code = insn[i].code;
|
||||
|
||||
if (code == (BPF_JMP | BPF_CALL) &&
|
||||
insn[i].imm == BPF_FUNC_tail_call &&
|
||||
insn[i].src_reg != BPF_PSEUDO_CALL)
|
||||
subprog[cur_subprog].has_tail_call = true;
|
||||
if (BPF_CLASS(code) == BPF_LD &&
|
||||
(BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
|
||||
subprog[cur_subprog].has_ld_abs = true;
|
||||
if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
|
||||
goto next;
|
||||
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
|
||||
|
@ -2979,10 +2994,37 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
|
|||
int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
|
||||
struct bpf_subprog_info *subprog = env->subprog_info;
|
||||
struct bpf_insn *insn = env->prog->insnsi;
|
||||
bool tail_call_reachable = false;
|
||||
int ret_insn[MAX_CALL_FRAMES];
|
||||
int ret_prog[MAX_CALL_FRAMES];
|
||||
int j;
|
||||
|
||||
process_func:
|
||||
/* protect against potential stack overflow that might happen when
|
||||
* bpf2bpf calls get combined with tailcalls. Limit the caller's stack
|
||||
* depth for such case down to 256 so that the worst case scenario
|
||||
* would result in 8k stack size (32 which is tailcall limit * 256 =
|
||||
* 8k).
|
||||
*
|
||||
* To get the idea what might happen, see an example:
|
||||
* func1 -> sub rsp, 128
|
||||
* subfunc1 -> sub rsp, 256
|
||||
* tailcall1 -> add rsp, 256
|
||||
* func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
|
||||
* subfunc2 -> sub rsp, 64
|
||||
* subfunc22 -> sub rsp, 128
|
||||
* tailcall2 -> add rsp, 128
|
||||
* func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
|
||||
*
|
||||
* tailcall will unwind the current stack frame but it will not get rid
|
||||
* of caller's stack as shown on the example above.
|
||||
*/
|
||||
if (idx && subprog[idx].has_tail_call && depth >= 256) {
|
||||
verbose(env,
|
||||
"tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
|
||||
depth);
|
||||
return -EACCES;
|
||||
}
|
||||
/* round up to 32-bytes, since this is granularity
|
||||
* of interpreter stack size
|
||||
*/
|
||||
|
@ -3011,6 +3053,10 @@ continue_func:
|
|||
i);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (subprog[idx].has_tail_call)
|
||||
tail_call_reachable = true;
|
||||
|
||||
frame++;
|
||||
if (frame >= MAX_CALL_FRAMES) {
|
||||
verbose(env, "the call stack of %d frames is too deep !\n",
|
||||
|
@ -3019,6 +3065,15 @@ continue_func:
|
|||
}
|
||||
goto process_func;
|
||||
}
|
||||
/* if tail call got detected across bpf2bpf calls then mark each of the
|
||||
* currently present subprog frames as tail call reachable subprogs;
|
||||
* this info will be utilized by JIT so that we will be preserving the
|
||||
* tail call counter throughout bpf2bpf calls combined with tailcalls
|
||||
*/
|
||||
if (tail_call_reachable)
|
||||
for (j = 0; j < frame; j++)
|
||||
subprog[ret_prog[j]].tail_call_reachable = true;
|
||||
|
||||
/* end of for() loop means the last insn of the 'subprog'
|
||||
* was reached. Doesn't matter whether it was JA or EXIT
|
||||
*/
|
||||
|
@ -3594,18 +3649,6 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
|
|||
struct bpf_func_state *state = func(env, reg);
|
||||
int err, min_off, max_off, i, j, slot, spi;
|
||||
|
||||
if (reg->type != PTR_TO_STACK) {
|
||||
/* Allow zero-byte read from NULL, regardless of pointer type */
|
||||
if (zero_size_allowed && access_size == 0 &&
|
||||
register_is_null(reg))
|
||||
return 0;
|
||||
|
||||
verbose(env, "R%d type=%s expected=%s\n", regno,
|
||||
reg_type_str[reg->type],
|
||||
reg_type_str[PTR_TO_STACK]);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (tnum_is_const(reg->var_off)) {
|
||||
min_off = max_off = reg->var_off.value + reg->off;
|
||||
err = __check_stack_boundary(env, regno, min_off, access_size,
|
||||
|
@ -3750,9 +3793,19 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
|||
access_size, zero_size_allowed,
|
||||
"rdwr",
|
||||
&env->prog->aux->max_rdwr_access);
|
||||
default: /* scalar_value|ptr_to_stack or invalid ptr */
|
||||
case PTR_TO_STACK:
|
||||
return check_stack_boundary(env, regno, access_size,
|
||||
zero_size_allowed, meta);
|
||||
default: /* scalar_value or invalid ptr */
|
||||
/* Allow zero-byte read from NULL, regardless of pointer type */
|
||||
if (zero_size_allowed && access_size == 0 &&
|
||||
register_is_null(reg))
|
||||
return 0;
|
||||
|
||||
verbose(env, "R%d type=%s expected=%s\n", regno,
|
||||
reg_type_str[reg->type],
|
||||
reg_type_str[PTR_TO_STACK]);
|
||||
return -EACCES;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3784,10 +3837,6 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
|
|||
struct bpf_map *map = reg->map_ptr;
|
||||
u64 val = reg->var_off.value;
|
||||
|
||||
if (reg->type != PTR_TO_MAP_VALUE) {
|
||||
verbose(env, "R%d is not a pointer to map_value\n", regno);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!is_const) {
|
||||
verbose(env,
|
||||
"R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
|
||||
|
@ -3854,12 +3903,6 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type)
|
|||
type == ARG_CONST_SIZE_OR_ZERO;
|
||||
}
|
||||
|
||||
static bool arg_type_is_alloc_mem_ptr(enum bpf_arg_type type)
|
||||
{
|
||||
return type == ARG_PTR_TO_ALLOC_MEM ||
|
||||
type == ARG_PTR_TO_ALLOC_MEM_OR_NULL;
|
||||
}
|
||||
|
||||
static bool arg_type_is_alloc_size(enum bpf_arg_type type)
|
||||
{
|
||||
return type == ARG_CONST_ALLOC_SIZE_OR_ZERO;
|
||||
|
@ -3908,14 +3951,114 @@ static int resolve_map_arg_type(struct bpf_verifier_env *env,
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct bpf_reg_types {
|
||||
const enum bpf_reg_type types[10];
|
||||
};
|
||||
|
||||
static const struct bpf_reg_types map_key_value_types = {
|
||||
.types = {
|
||||
PTR_TO_STACK,
|
||||
PTR_TO_PACKET,
|
||||
PTR_TO_PACKET_META,
|
||||
PTR_TO_MAP_VALUE,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct bpf_reg_types sock_types = {
|
||||
.types = {
|
||||
PTR_TO_SOCK_COMMON,
|
||||
PTR_TO_SOCKET,
|
||||
PTR_TO_TCP_SOCK,
|
||||
PTR_TO_XDP_SOCK,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct bpf_reg_types mem_types = {
|
||||
.types = {
|
||||
PTR_TO_STACK,
|
||||
PTR_TO_PACKET,
|
||||
PTR_TO_PACKET_META,
|
||||
PTR_TO_MAP_VALUE,
|
||||
PTR_TO_MEM,
|
||||
PTR_TO_RDONLY_BUF,
|
||||
PTR_TO_RDWR_BUF,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct bpf_reg_types int_ptr_types = {
|
||||
.types = {
|
||||
PTR_TO_STACK,
|
||||
PTR_TO_PACKET,
|
||||
PTR_TO_PACKET_META,
|
||||
PTR_TO_MAP_VALUE,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
|
||||
static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
|
||||
static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
|
||||
static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM } };
|
||||
static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
|
||||
static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } };
|
||||
static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } };
|
||||
|
||||
static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
|
||||
[ARG_PTR_TO_MAP_KEY] = &map_key_value_types,
|
||||
[ARG_PTR_TO_MAP_VALUE] = &map_key_value_types,
|
||||
[ARG_PTR_TO_UNINIT_MAP_VALUE] = &map_key_value_types,
|
||||
[ARG_PTR_TO_MAP_VALUE_OR_NULL] = &map_key_value_types,
|
||||
[ARG_CONST_SIZE] = &scalar_types,
|
||||
[ARG_CONST_SIZE_OR_ZERO] = &scalar_types,
|
||||
[ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types,
|
||||
[ARG_CONST_MAP_PTR] = &const_map_ptr_types,
|
||||
[ARG_PTR_TO_CTX] = &context_types,
|
||||
[ARG_PTR_TO_CTX_OR_NULL] = &context_types,
|
||||
[ARG_PTR_TO_SOCK_COMMON] = &sock_types,
|
||||
[ARG_PTR_TO_SOCKET] = &fullsock_types,
|
||||
[ARG_PTR_TO_SOCKET_OR_NULL] = &fullsock_types,
|
||||
[ARG_PTR_TO_BTF_ID] = &btf_ptr_types,
|
||||
[ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types,
|
||||
[ARG_PTR_TO_MEM] = &mem_types,
|
||||
[ARG_PTR_TO_MEM_OR_NULL] = &mem_types,
|
||||
[ARG_PTR_TO_UNINIT_MEM] = &mem_types,
|
||||
[ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types,
|
||||
[ARG_PTR_TO_ALLOC_MEM_OR_NULL] = &alloc_mem_types,
|
||||
[ARG_PTR_TO_INT] = &int_ptr_types,
|
||||
[ARG_PTR_TO_LONG] = &int_ptr_types,
|
||||
};
|
||||
|
||||
static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
|
||||
const struct bpf_reg_types *compatible)
|
||||
{
|
||||
struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
|
||||
enum bpf_reg_type expected, type = reg->type;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
|
||||
expected = compatible->types[i];
|
||||
if (expected == NOT_INIT)
|
||||
break;
|
||||
|
||||
if (type == expected)
|
||||
return 0;
|
||||
}
|
||||
|
||||
verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]);
|
||||
for (j = 0; j + 1 < i; j++)
|
||||
verbose(env, "%s, ", reg_type_str[compatible->types[j]]);
|
||||
verbose(env, "%s\n", reg_type_str[compatible->types[j]]);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
|
||||
struct bpf_call_arg_meta *meta,
|
||||
const struct bpf_func_proto *fn)
|
||||
{
|
||||
u32 regno = BPF_REG_1 + arg;
|
||||
struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
|
||||
enum bpf_reg_type expected_type, type = reg->type;
|
||||
enum bpf_arg_type arg_type = fn->arg_type[arg];
|
||||
const struct bpf_reg_types *compatible;
|
||||
enum bpf_reg_type type = reg->type;
|
||||
int err = 0;
|
||||
|
||||
if (arg_type == ARG_DONTCARE)
|
||||
|
@ -3948,125 +4091,48 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
|
|||
return err;
|
||||
}
|
||||
|
||||
if (arg_type == ARG_PTR_TO_MAP_KEY ||
|
||||
arg_type == ARG_PTR_TO_MAP_VALUE ||
|
||||
arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE ||
|
||||
arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) {
|
||||
expected_type = PTR_TO_STACK;
|
||||
if (register_is_null(reg) &&
|
||||
arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL)
|
||||
/* final test in check_stack_boundary() */;
|
||||
else if (!type_is_pkt_pointer(type) &&
|
||||
type != PTR_TO_MAP_VALUE &&
|
||||
type != expected_type)
|
||||
goto err_type;
|
||||
} else if (arg_type == ARG_CONST_SIZE ||
|
||||
arg_type == ARG_CONST_SIZE_OR_ZERO ||
|
||||
arg_type == ARG_CONST_ALLOC_SIZE_OR_ZERO) {
|
||||
expected_type = SCALAR_VALUE;
|
||||
if (type != expected_type)
|
||||
goto err_type;
|
||||
} else if (arg_type == ARG_CONST_MAP_PTR) {
|
||||
expected_type = CONST_PTR_TO_MAP;
|
||||
if (type != expected_type)
|
||||
goto err_type;
|
||||
} else if (arg_type == ARG_PTR_TO_CTX ||
|
||||
arg_type == ARG_PTR_TO_CTX_OR_NULL) {
|
||||
expected_type = PTR_TO_CTX;
|
||||
if (!(register_is_null(reg) &&
|
||||
arg_type == ARG_PTR_TO_CTX_OR_NULL)) {
|
||||
if (type != expected_type)
|
||||
goto err_type;
|
||||
err = check_ctx_reg(env, reg, regno);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
} else if (arg_type == ARG_PTR_TO_SOCK_COMMON) {
|
||||
expected_type = PTR_TO_SOCK_COMMON;
|
||||
/* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
|
||||
if (!type_is_sk_pointer(type))
|
||||
goto err_type;
|
||||
if (reg->ref_obj_id) {
|
||||
if (meta->ref_obj_id) {
|
||||
verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
|
||||
regno, reg->ref_obj_id,
|
||||
meta->ref_obj_id);
|
||||
return -EFAULT;
|
||||
}
|
||||
meta->ref_obj_id = reg->ref_obj_id;
|
||||
}
|
||||
} else if (arg_type == ARG_PTR_TO_SOCKET ||
|
||||
arg_type == ARG_PTR_TO_SOCKET_OR_NULL) {
|
||||
expected_type = PTR_TO_SOCKET;
|
||||
if (!(register_is_null(reg) &&
|
||||
arg_type == ARG_PTR_TO_SOCKET_OR_NULL)) {
|
||||
if (type != expected_type)
|
||||
goto err_type;
|
||||
}
|
||||
} else if (arg_type == ARG_PTR_TO_BTF_ID) {
|
||||
bool ids_match = false;
|
||||
if (register_is_null(reg) && arg_type_may_be_null(arg_type))
|
||||
/* A NULL register has a SCALAR_VALUE type, so skip
|
||||
* type checking.
|
||||
*/
|
||||
goto skip_type_check;
|
||||
|
||||
expected_type = PTR_TO_BTF_ID;
|
||||
if (type != expected_type)
|
||||
goto err_type;
|
||||
if (!fn->check_btf_id) {
|
||||
if (reg->btf_id != meta->btf_id) {
|
||||
ids_match = btf_struct_ids_match(&env->log, reg->off, reg->btf_id,
|
||||
meta->btf_id);
|
||||
if (!ids_match) {
|
||||
verbose(env, "Helper has type %s got %s in R%d\n",
|
||||
kernel_type_name(meta->btf_id),
|
||||
kernel_type_name(reg->btf_id), regno);
|
||||
return -EACCES;
|
||||
}
|
||||
}
|
||||
} else if (!fn->check_btf_id(reg->btf_id, arg)) {
|
||||
verbose(env, "Helper does not support %s in R%d\n",
|
||||
kernel_type_name(reg->btf_id), regno);
|
||||
compatible = compatible_reg_types[arg_type];
|
||||
if (!compatible) {
|
||||
verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
err = check_reg_type(env, regno, compatible);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (type == PTR_TO_BTF_ID) {
|
||||
const u32 *btf_id = fn->arg_btf_id[arg];
|
||||
|
||||
if (!btf_id) {
|
||||
verbose(env, "verifier internal error: missing BTF ID\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!btf_struct_ids_match(&env->log, reg->off, reg->btf_id, *btf_id)) {
|
||||
verbose(env, "R%d is of type %s but %s is expected\n",
|
||||
regno, kernel_type_name(reg->btf_id), kernel_type_name(*btf_id));
|
||||
return -EACCES;
|
||||
}
|
||||
if ((reg->off && !ids_match) || !tnum_is_const(reg->var_off) || reg->var_off.value) {
|
||||
if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
|
||||
verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
|
||||
regno);
|
||||
return -EACCES;
|
||||
}
|
||||
} else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
|
||||
if (meta->func_id == BPF_FUNC_spin_lock) {
|
||||
if (process_spin_lock(env, regno, true))
|
||||
return -EACCES;
|
||||
} else if (meta->func_id == BPF_FUNC_spin_unlock) {
|
||||
if (process_spin_lock(env, regno, false))
|
||||
return -EACCES;
|
||||
} else {
|
||||
verbose(env, "verifier internal error\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
} else if (arg_type_is_mem_ptr(arg_type)) {
|
||||
expected_type = PTR_TO_STACK;
|
||||
/* One exception here. In case function allows for NULL to be
|
||||
* passed in as argument, it's a SCALAR_VALUE type. Final test
|
||||
* happens during stack boundary checking.
|
||||
*/
|
||||
if (register_is_null(reg) &&
|
||||
(arg_type == ARG_PTR_TO_MEM_OR_NULL ||
|
||||
arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL))
|
||||
/* final test in check_stack_boundary() */;
|
||||
else if (!type_is_pkt_pointer(type) &&
|
||||
type != PTR_TO_MAP_VALUE &&
|
||||
type != PTR_TO_MEM &&
|
||||
type != PTR_TO_RDONLY_BUF &&
|
||||
type != PTR_TO_RDWR_BUF &&
|
||||
type != expected_type)
|
||||
goto err_type;
|
||||
meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
|
||||
} else if (arg_type_is_alloc_mem_ptr(arg_type)) {
|
||||
expected_type = PTR_TO_MEM;
|
||||
if (register_is_null(reg) &&
|
||||
arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL)
|
||||
/* final test in check_stack_boundary() */;
|
||||
else if (type != expected_type)
|
||||
goto err_type;
|
||||
} else if (type == PTR_TO_CTX) {
|
||||
err = check_ctx_reg(env, reg, regno);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
skip_type_check:
|
||||
if (reg->ref_obj_id) {
|
||||
if (meta->ref_obj_id) {
|
||||
verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
|
||||
regno, reg->ref_obj_id,
|
||||
|
@ -4074,15 +4140,6 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
|
|||
return -EFAULT;
|
||||
}
|
||||
meta->ref_obj_id = reg->ref_obj_id;
|
||||
} else if (arg_type_is_int_ptr(arg_type)) {
|
||||
expected_type = PTR_TO_STACK;
|
||||
if (!type_is_pkt_pointer(type) &&
|
||||
type != PTR_TO_MAP_VALUE &&
|
||||
type != expected_type)
|
||||
goto err_type;
|
||||
} else {
|
||||
verbose(env, "unsupported arg_type %d\n", arg_type);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (arg_type == ARG_CONST_MAP_PTR) {
|
||||
|
@ -4121,6 +4178,22 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
|
|||
err = check_helper_mem_access(env, regno,
|
||||
meta->map_ptr->value_size, false,
|
||||
meta);
|
||||
} else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
|
||||
if (meta->func_id == BPF_FUNC_spin_lock) {
|
||||
if (process_spin_lock(env, regno, true))
|
||||
return -EACCES;
|
||||
} else if (meta->func_id == BPF_FUNC_spin_unlock) {
|
||||
if (process_spin_lock(env, regno, false))
|
||||
return -EACCES;
|
||||
} else {
|
||||
verbose(env, "verifier internal error\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
} else if (arg_type_is_mem_ptr(arg_type)) {
|
||||
/* The access to this pointer is only checked when we hit the
|
||||
* next is_mem_size argument below.
|
||||
*/
|
||||
meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MEM);
|
||||
} else if (arg_type_is_mem_size(arg_type)) {
|
||||
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
|
||||
|
||||
|
@ -4186,10 +4259,6 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
|
|||
}
|
||||
|
||||
return err;
|
||||
err_type:
|
||||
verbose(env, "R%d type=%s expected=%s\n", regno,
|
||||
reg_type_str[type], reg_type_str[expected_type]);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
|
||||
|
@ -4224,6 +4293,11 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
|
||||
{
|
||||
return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64);
|
||||
}
|
||||
|
||||
static int check_map_func_compatibility(struct bpf_verifier_env *env,
|
||||
struct bpf_map *map, int func_id)
|
||||
{
|
||||
|
@ -4339,8 +4413,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
|
|||
case BPF_FUNC_tail_call:
|
||||
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
|
||||
goto error;
|
||||
if (env->subprog_cnt > 1) {
|
||||
verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
|
||||
if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
|
||||
verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
@ -4495,10 +4569,22 @@ static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
|
|||
return count <= 1;
|
||||
}
|
||||
|
||||
static bool check_btf_id_ok(const struct bpf_func_proto *fn)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++)
|
||||
if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
|
||||
{
|
||||
return check_raw_mode_ok(fn) &&
|
||||
check_arg_pair_ok(fn) &&
|
||||
check_btf_id_ok(fn) &&
|
||||
check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -4894,11 +4980,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
|
|||
meta.func_id = func_id;
|
||||
/* check args */
|
||||
for (i = 0; i < 5; i++) {
|
||||
if (!fn->check_btf_id) {
|
||||
err = btf_resolve_helper_id(&env->log, fn, i);
|
||||
if (err > 0)
|
||||
meta.btf_id = err;
|
||||
}
|
||||
err = check_func_arg(env, i, &meta, fn);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -5317,6 +5398,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|||
dst, reg_type_str[ptr_reg->type]);
|
||||
return -EACCES;
|
||||
case CONST_PTR_TO_MAP:
|
||||
/* smin_val represents the known value */
|
||||
if (known && smin_val == 0 && opcode == BPF_ADD)
|
||||
break;
|
||||
/* fall-through */
|
||||
case PTR_TO_PACKET_END:
|
||||
case PTR_TO_SOCKET:
|
||||
case PTR_TO_SOCKET_OR_NULL:
|
||||
|
@ -7461,18 +7546,6 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (env->subprog_cnt > 1) {
|
||||
/* when program has LD_ABS insn JITs and interpreter assume
|
||||
* that r1 == ctx == skb which is not the case for callees
|
||||
* that can have arbitrary arguments. It's problematic
|
||||
* for main prog as well since JITs would need to analyze
|
||||
* all functions in order to make proper register save/restore
|
||||
* decisions in the main prog. Hence disallow LD_ABS with calls
|
||||
*/
|
||||
verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
|
||||
BPF_SIZE(insn->code) == BPF_DW ||
|
||||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
|
||||
|
@ -7883,6 +7956,23 @@ err_free:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int check_abnormal_return(struct bpf_verifier_env *env)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 1; i < env->subprog_cnt; i++) {
|
||||
if (env->subprog_info[i].has_ld_abs) {
|
||||
verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (env->subprog_info[i].has_tail_call) {
|
||||
verbose(env, "tail_call is not allowed in subprogs without BTF\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The minimum supported BTF func info size */
|
||||
#define MIN_BPF_FUNCINFO_SIZE 8
|
||||
#define MAX_FUNCINFO_REC_SIZE 252
|
||||
|
@ -7891,20 +7981,24 @@ static int check_btf_func(struct bpf_verifier_env *env,
|
|||
const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
const struct btf_type *type, *func_proto, *ret_type;
|
||||
u32 i, nfuncs, urec_size, min_size;
|
||||
u32 krec_size = sizeof(struct bpf_func_info);
|
||||
struct bpf_func_info *krecord;
|
||||
struct bpf_func_info_aux *info_aux = NULL;
|
||||
const struct btf_type *type;
|
||||
struct bpf_prog *prog;
|
||||
const struct btf *btf;
|
||||
void __user *urecord;
|
||||
u32 prev_offset = 0;
|
||||
bool scalar_return;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
nfuncs = attr->func_info_cnt;
|
||||
if (!nfuncs)
|
||||
if (!nfuncs) {
|
||||
if (check_abnormal_return(env))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (nfuncs != env->subprog_cnt) {
|
||||
verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
|
||||
|
@ -7952,25 +8046,23 @@ static int check_btf_func(struct bpf_verifier_env *env,
|
|||
}
|
||||
|
||||
/* check insn_off */
|
||||
ret = -EINVAL;
|
||||
if (i == 0) {
|
||||
if (krecord[i].insn_off) {
|
||||
verbose(env,
|
||||
"nonzero insn_off %u for the first func info record",
|
||||
krecord[i].insn_off);
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
} else if (krecord[i].insn_off <= prev_offset) {
|
||||
verbose(env,
|
||||
"same or smaller insn offset (%u) than previous func info record (%u)",
|
||||
krecord[i].insn_off, prev_offset);
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
if (env->subprog_info[i].start != krecord[i].insn_off) {
|
||||
verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
|
@ -7979,10 +8071,26 @@ static int check_btf_func(struct bpf_verifier_env *env,
|
|||
if (!type || !btf_type_is_func(type)) {
|
||||
verbose(env, "invalid type id %d in func info",
|
||||
krecord[i].type_id);
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
info_aux[i].linkage = BTF_INFO_VLEN(type->info);
|
||||
|
||||
func_proto = btf_type_by_id(btf, type->type);
|
||||
if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
|
||||
/* btf_func_check() already verified it during BTF load */
|
||||
goto err_free;
|
||||
ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
|
||||
scalar_return =
|
||||
btf_type_is_small_int(ret_type) || btf_type_is_enum(ret_type);
|
||||
if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
|
||||
verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
|
||||
goto err_free;
|
||||
}
|
||||
if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
|
||||
verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
prev_offset = krecord[i].insn_off;
|
||||
urecord += urec_size;
|
||||
}
|
||||
|
@ -8143,8 +8251,11 @@ static int check_btf_info(struct bpf_verifier_env *env,
|
|||
struct btf *btf;
|
||||
int err;
|
||||
|
||||
if (!attr->func_info_cnt && !attr->line_info_cnt)
|
||||
if (!attr->func_info_cnt && !attr->line_info_cnt) {
|
||||
if (check_abnormal_return(env))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
btf = btf_get_by_fd(attr->prog_btf_fd);
|
||||
if (IS_ERR(btf))
|
||||
|
@ -9619,6 +9730,18 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
|
|||
}
|
||||
}
|
||||
|
||||
static void adjust_poke_descs(struct bpf_prog *prog, u32 len)
|
||||
{
|
||||
struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
|
||||
int i, sz = prog->aux->size_poke_tab;
|
||||
struct bpf_jit_poke_descriptor *desc;
|
||||
|
||||
for (i = 0; i < sz; i++) {
|
||||
desc = &tab[i];
|
||||
desc->insn_idx += len - 1;
|
||||
}
|
||||
}
|
||||
|
||||
static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
|
||||
const struct bpf_insn *patch, u32 len)
|
||||
{
|
||||
|
@ -9635,6 +9758,7 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
|
|||
if (adjust_insn_aux_data(env, new_prog, off, len))
|
||||
return NULL;
|
||||
adjust_subprog_starts(env, off, len);
|
||||
adjust_poke_descs(new_prog, len);
|
||||
return new_prog;
|
||||
}
|
||||
|
||||
|
@ -10165,6 +10289,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
|||
{
|
||||
struct bpf_prog *prog = env->prog, **func, *tmp;
|
||||
int i, j, subprog_start, subprog_end = 0, len, subprog;
|
||||
struct bpf_map *map_ptr;
|
||||
struct bpf_insn *insn;
|
||||
void *old_bpf_func;
|
||||
int err, num_exentries;
|
||||
|
@ -10232,6 +10357,31 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
|||
func[i]->aux->btf = prog->aux->btf;
|
||||
func[i]->aux->func_info = prog->aux->func_info;
|
||||
|
||||
for (j = 0; j < prog->aux->size_poke_tab; j++) {
|
||||
u32 insn_idx = prog->aux->poke_tab[j].insn_idx;
|
||||
int ret;
|
||||
|
||||
if (!(insn_idx >= subprog_start &&
|
||||
insn_idx <= subprog_end))
|
||||
continue;
|
||||
|
||||
ret = bpf_jit_add_poke_descriptor(func[i],
|
||||
&prog->aux->poke_tab[j]);
|
||||
if (ret < 0) {
|
||||
verbose(env, "adding tail call poke descriptor failed\n");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
func[i]->insnsi[insn_idx - subprog_start].imm = ret + 1;
|
||||
|
||||
map_ptr = func[i]->aux->poke_tab[ret].tail_call.map;
|
||||
ret = map_ptr->ops->map_poke_track(map_ptr, func[i]->aux);
|
||||
if (ret < 0) {
|
||||
verbose(env, "tracking tail call prog failed\n");
|
||||
goto out_free;
|
||||
}
|
||||
}
|
||||
|
||||
/* Use bpf_prog_F_tag to indicate functions in stack traces.
|
||||
* Long term would need debug info to populate names
|
||||
*/
|
||||
|
@ -10250,6 +10400,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
|||
num_exentries++;
|
||||
}
|
||||
func[i]->aux->num_exentries = num_exentries;
|
||||
func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
|
||||
func[i] = bpf_int_jit_compile(func[i]);
|
||||
if (!func[i]->jited) {
|
||||
err = -ENOTSUPP;
|
||||
|
@ -10257,6 +10408,19 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
|||
}
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
/* Untrack main program's aux structs so that during map_poke_run()
|
||||
* we will not stumble upon the unfilled poke descriptors; each
|
||||
* of the main program's poke descs got distributed across subprogs
|
||||
* and got tracked onto map, so we are sure that none of them will
|
||||
* be missed after the operation below
|
||||
*/
|
||||
for (i = 0; i < prog->aux->size_poke_tab; i++) {
|
||||
map_ptr = prog->aux->poke_tab[i].tail_call.map;
|
||||
|
||||
map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
|
||||
}
|
||||
|
||||
/* at this point all bpf functions were successfully JITed
|
||||
* now populate all bpf_calls with correct addresses and
|
||||
* run last pass of JIT
|
||||
|
@ -10325,9 +10489,16 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
|||
bpf_prog_free_unused_jited_linfo(prog);
|
||||
return 0;
|
||||
out_free:
|
||||
for (i = 0; i < env->subprog_cnt; i++)
|
||||
if (func[i])
|
||||
bpf_jit_free(func[i]);
|
||||
for (i = 0; i < env->subprog_cnt; i++) {
|
||||
if (!func[i])
|
||||
continue;
|
||||
|
||||
for (j = 0; j < func[i]->aux->size_poke_tab; j++) {
|
||||
map_ptr = func[i]->aux->poke_tab[j].tail_call.map;
|
||||
map_ptr->ops->map_poke_untrack(map_ptr, func[i]->aux);
|
||||
}
|
||||
bpf_jit_free(func[i]);
|
||||
}
|
||||
kfree(func);
|
||||
out_undo_insn:
|
||||
/* cleanup main prog to be interpreted */
|
||||
|
@ -10361,6 +10532,13 @@ static int fixup_call_args(struct bpf_verifier_env *env)
|
|||
return err;
|
||||
}
|
||||
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
||||
if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
|
||||
/* When JIT fails the progs with bpf2bpf calls and tail_calls
|
||||
* have to be rejected, since interpreter doesn't support them yet.
|
||||
*/
|
||||
verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < prog->len; i++, insn++) {
|
||||
if (insn->code != (BPF_JMP | BPF_CALL) ||
|
||||
insn->src_reg != BPF_PSEUDO_CALL)
|
||||
|
@ -10524,8 +10702,9 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
|||
* the program array.
|
||||
*/
|
||||
prog->cb_access = 1;
|
||||
env->prog->aux->stack_depth = MAX_BPF_STACK;
|
||||
env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
|
||||
if (!allow_tail_call_in_subprogs(env))
|
||||
prog->aux->stack_depth = MAX_BPF_STACK;
|
||||
prog->aux->max_pkt_offset = MAX_PACKET_OFF;
|
||||
|
||||
/* mark bpf_tail_call as different opcode to avoid
|
||||
* conditional branch in the interpeter for every normal
|
||||
|
@ -10545,6 +10724,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
|||
.reason = BPF_POKE_REASON_TAIL_CALL,
|
||||
.tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
|
||||
.tail_call.key = bpf_map_key_immediate(aux),
|
||||
.insn_idx = i + delta,
|
||||
};
|
||||
|
||||
ret = bpf_jit_add_poke_descriptor(prog, &desc);
|
||||
|
|
|
@ -743,19 +743,18 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
BTF_ID_LIST(bpf_seq_printf_btf_ids)
|
||||
BTF_ID(struct, seq_file)
|
||||
BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
|
||||
|
||||
static const struct bpf_func_proto bpf_seq_printf_proto = {
|
||||
.func = bpf_seq_printf,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg1_btf_id = &btf_seq_file_ids[0],
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_PTR_TO_MEM_OR_NULL,
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.btf_id = bpf_seq_printf_btf_ids,
|
||||
};
|
||||
|
||||
BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
|
||||
|
@ -763,17 +762,14 @@ BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
|
|||
return seq_write(m, data, len) ? -EOVERFLOW : 0;
|
||||
}
|
||||
|
||||
BTF_ID_LIST(bpf_seq_write_btf_ids)
|
||||
BTF_ID(struct, seq_file)
|
||||
|
||||
static const struct bpf_func_proto bpf_seq_write_proto = {
|
||||
.func = bpf_seq_write,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg1_btf_id = &btf_seq_file_ids[0],
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.btf_id = bpf_seq_write_btf_ids,
|
||||
};
|
||||
|
||||
static __always_inline int
|
||||
|
@ -1118,6 +1114,14 @@ BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
|
|||
}
|
||||
|
||||
BTF_SET_START(btf_allowlist_d_path)
|
||||
#ifdef CONFIG_SECURITY
|
||||
BTF_ID(func, security_file_permission)
|
||||
BTF_ID(func, security_inode_getattr)
|
||||
BTF_ID(func, security_file_open)
|
||||
#endif
|
||||
#ifdef CONFIG_SECURITY_PATH
|
||||
BTF_ID(func, security_path_truncate)
|
||||
#endif
|
||||
BTF_ID(func, vfs_truncate)
|
||||
BTF_ID(func, vfs_fallocate)
|
||||
BTF_ID(func, dentry_open)
|
||||
|
@ -1130,17 +1134,16 @@ static bool bpf_d_path_allowed(const struct bpf_prog *prog)
|
|||
return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id);
|
||||
}
|
||||
|
||||
BTF_ID_LIST(bpf_d_path_btf_ids)
|
||||
BTF_ID(struct, path)
|
||||
BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
|
||||
|
||||
static const struct bpf_func_proto bpf_d_path_proto = {
|
||||
.func = bpf_d_path,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg1_btf_id = &bpf_d_path_btf_ids[0],
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.btf_id = bpf_d_path_btf_ids,
|
||||
.allowed = bpf_d_path_allowed,
|
||||
};
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
#include <net/sock.h>
|
||||
#include <uapi/linux/sock_diag.h>
|
||||
#include <uapi/linux/btf.h>
|
||||
#include <linux/btf_ids.h>
|
||||
|
||||
DEFINE_BPF_STORAGE_CACHE(sk_cache);
|
||||
|
||||
|
@ -379,19 +378,15 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
|
|||
.arg2_type = ARG_PTR_TO_SOCKET,
|
||||
};
|
||||
|
||||
BTF_ID_LIST(sk_storage_btf_ids)
|
||||
BTF_ID_UNUSED
|
||||
BTF_ID(struct, sock)
|
||||
|
||||
const struct bpf_func_proto sk_storage_get_btf_proto = {
|
||||
.func = bpf_sk_storage_get,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
|
||||
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
.btf_id = sk_storage_btf_ids,
|
||||
};
|
||||
|
||||
const struct bpf_func_proto sk_storage_delete_btf_proto = {
|
||||
|
@ -400,7 +395,7 @@ const struct bpf_func_proto sk_storage_delete_btf_proto = {
|
|||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID,
|
||||
.btf_id = sk_storage_btf_ids,
|
||||
.arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
|
||||
};
|
||||
|
||||
struct bpf_sk_storage_diag {
|
||||
|
@ -679,6 +674,7 @@ struct bpf_iter_seq_sk_storage_map_info {
|
|||
static struct bpf_local_storage_elem *
|
||||
bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
|
||||
struct bpf_local_storage_elem *prev_selem)
|
||||
__acquires(RCU) __releases(RCU)
|
||||
{
|
||||
struct bpf_local_storage *sk_storage;
|
||||
struct bpf_local_storage_elem *selem;
|
||||
|
@ -697,16 +693,16 @@ bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
|
|||
selem = prev_selem;
|
||||
count = 0;
|
||||
while (selem) {
|
||||
selem = hlist_entry_safe(selem->map_node.next,
|
||||
selem = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem->map_node)),
|
||||
struct bpf_local_storage_elem, map_node);
|
||||
if (!selem) {
|
||||
/* not found, unlock and go to the next bucket */
|
||||
b = &smap->buckets[bucket_id++];
|
||||
raw_spin_unlock_bh(&b->lock);
|
||||
rcu_read_unlock();
|
||||
skip_elems = 0;
|
||||
break;
|
||||
}
|
||||
sk_storage = rcu_dereference_raw(selem->local_storage);
|
||||
sk_storage = rcu_dereference(selem->local_storage);
|
||||
if (sk_storage) {
|
||||
info->skip_elems = skip_elems + count;
|
||||
return selem;
|
||||
|
@ -716,10 +712,10 @@ bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
|
|||
|
||||
for (i = bucket_id; i < (1U << smap->bucket_log); i++) {
|
||||
b = &smap->buckets[i];
|
||||
raw_spin_lock_bh(&b->lock);
|
||||
rcu_read_lock();
|
||||
count = 0;
|
||||
hlist_for_each_entry(selem, &b->list, map_node) {
|
||||
sk_storage = rcu_dereference_raw(selem->local_storage);
|
||||
hlist_for_each_entry_rcu(selem, &b->list, map_node) {
|
||||
sk_storage = rcu_dereference(selem->local_storage);
|
||||
if (sk_storage && count >= skip_elems) {
|
||||
info->bucket_id = i;
|
||||
info->skip_elems = count;
|
||||
|
@ -727,7 +723,7 @@ bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
|
|||
}
|
||||
count++;
|
||||
}
|
||||
raw_spin_unlock_bh(&b->lock);
|
||||
rcu_read_unlock();
|
||||
skip_elems = 0;
|
||||
}
|
||||
|
||||
|
@ -786,7 +782,7 @@ static int __bpf_sk_storage_map_seq_show(struct seq_file *seq,
|
|||
ctx.meta = &meta;
|
||||
ctx.map = info->map;
|
||||
if (selem) {
|
||||
sk_storage = rcu_dereference_raw(selem->local_storage);
|
||||
sk_storage = rcu_dereference(selem->local_storage);
|
||||
ctx.sk = sk_storage->owner;
|
||||
ctx.value = SDATA(selem)->data;
|
||||
}
|
||||
|
@ -802,18 +798,12 @@ static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v)
|
|||
}
|
||||
|
||||
static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v)
|
||||
__releases(RCU)
|
||||
{
|
||||
struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
|
||||
struct bpf_local_storage_map *smap;
|
||||
struct bpf_local_storage_map_bucket *b;
|
||||
|
||||
if (!v) {
|
||||
if (!v)
|
||||
(void)__bpf_sk_storage_map_seq_show(seq, v);
|
||||
} else {
|
||||
smap = (struct bpf_local_storage_map *)info->map;
|
||||
b = &smap->buckets[info->bucket_id];
|
||||
raw_spin_unlock_bh(&b->lock);
|
||||
}
|
||||
else
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int bpf_iter_init_sk_storage_map(void *priv_data,
|
||||
|
|
|
@ -5442,15 +5442,20 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
|
|||
if (new) {
|
||||
u32 i;
|
||||
|
||||
mutex_lock(&new->aux->used_maps_mutex);
|
||||
|
||||
/* generic XDP does not work with DEVMAPs that can
|
||||
* have a bpf_prog installed on an entry
|
||||
*/
|
||||
for (i = 0; i < new->aux->used_map_cnt; i++) {
|
||||
if (dev_map_can_have_prog(new->aux->used_maps[i]))
|
||||
return -EINVAL;
|
||||
if (cpu_map_prog_allowed(new->aux->used_maps[i]))
|
||||
if (dev_map_can_have_prog(new->aux->used_maps[i]) ||
|
||||
cpu_map_prog_allowed(new->aux->used_maps[i])) {
|
||||
mutex_unlock(&new->aux->used_maps_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&new->aux->used_maps_mutex);
|
||||
}
|
||||
|
||||
switch (xdp->command) {
|
||||
|
|
|
@ -3803,19 +3803,18 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = {
|
|||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
BTF_ID_LIST(bpf_skb_output_btf_ids)
|
||||
BTF_ID(struct, sk_buff)
|
||||
BTF_ID_LIST_SINGLE(bpf_skb_output_btf_ids, struct, sk_buff)
|
||||
|
||||
const struct bpf_func_proto bpf_skb_output_proto = {
|
||||
.func = bpf_skb_event_output,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg1_btf_id = &bpf_skb_output_btf_ids[0],
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.btf_id = bpf_skb_output_btf_ids,
|
||||
};
|
||||
|
||||
static unsigned short bpf_tunnel_key_af(u64 flags)
|
||||
|
@ -4199,19 +4198,18 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
|
|||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
BTF_ID_LIST(bpf_xdp_output_btf_ids)
|
||||
BTF_ID(struct, xdp_buff)
|
||||
BTF_ID_LIST_SINGLE(bpf_xdp_output_btf_ids, struct, xdp_buff)
|
||||
|
||||
const struct bpf_func_proto bpf_xdp_output_proto = {
|
||||
.func = bpf_xdp_event_output,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg1_btf_id = &bpf_xdp_output_btf_ids[0],
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.btf_id = bpf_xdp_output_btf_ids,
|
||||
};
|
||||
|
||||
BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
|
||||
|
@ -4313,10 +4311,8 @@ static const struct bpf_func_proto bpf_get_socket_uid_proto = {
|
|||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
};
|
||||
|
||||
#define SOCKOPT_CC_REINIT (1 << 0)
|
||||
|
||||
static int _bpf_setsockopt(struct sock *sk, int level, int optname,
|
||||
char *optval, int optlen, u32 flags)
|
||||
char *optval, int optlen)
|
||||
{
|
||||
char devname[IFNAMSIZ];
|
||||
int val, valbool;
|
||||
|
@ -4449,13 +4445,11 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
|
|||
sk->sk_prot->setsockopt == tcp_setsockopt) {
|
||||
if (optname == TCP_CONGESTION) {
|
||||
char name[TCP_CA_NAME_MAX];
|
||||
bool reinit = flags & SOCKOPT_CC_REINIT;
|
||||
|
||||
strncpy(name, optval, min_t(long, optlen,
|
||||
TCP_CA_NAME_MAX-1));
|
||||
name[TCP_CA_NAME_MAX-1] = 0;
|
||||
ret = tcp_set_congestion_control(sk, name, false,
|
||||
reinit, true);
|
||||
ret = tcp_set_congestion_control(sk, name, false, true);
|
||||
} else {
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
@ -4615,9 +4609,7 @@ err_clear:
|
|||
BPF_CALL_5(bpf_sock_addr_setsockopt, struct bpf_sock_addr_kern *, ctx,
|
||||
int, level, int, optname, char *, optval, int, optlen)
|
||||
{
|
||||
u32 flags = 0;
|
||||
return _bpf_setsockopt(ctx->sk, level, optname, optval, optlen,
|
||||
flags);
|
||||
return _bpf_setsockopt(ctx->sk, level, optname, optval, optlen);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = {
|
||||
|
@ -4651,11 +4643,7 @@ static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = {
|
|||
BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
|
||||
int, level, int, optname, char *, optval, int, optlen)
|
||||
{
|
||||
u32 flags = 0;
|
||||
if (bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN)
|
||||
flags |= SOCKOPT_CC_REINIT;
|
||||
return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen,
|
||||
flags);
|
||||
return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = {
|
||||
|
@ -9908,17 +9896,6 @@ BTF_SOCK_TYPE_xxx
|
|||
u32 btf_sock_ids[MAX_BTF_SOCK_TYPE];
|
||||
#endif
|
||||
|
||||
static bool check_arg_btf_id(u32 btf_id, u32 arg)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* only one argument, no need to check arg */
|
||||
for (i = 0; i < MAX_BTF_SOCK_TYPE; i++)
|
||||
if (btf_sock_ids[i] == btf_id)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk)
|
||||
{
|
||||
/* tcp6_sock type is not generated in dwarf and hence btf,
|
||||
|
@ -9937,7 +9914,7 @@ const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = {
|
|||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID,
|
||||
.check_btf_id = check_arg_btf_id,
|
||||
.arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
|
||||
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6],
|
||||
};
|
||||
|
||||
|
@ -9954,7 +9931,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = {
|
|||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID,
|
||||
.check_btf_id = check_arg_btf_id,
|
||||
.arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
|
||||
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
|
||||
};
|
||||
|
||||
|
@ -9978,7 +9955,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = {
|
|||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID,
|
||||
.check_btf_id = check_arg_btf_id,
|
||||
.arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
|
||||
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW],
|
||||
};
|
||||
|
||||
|
@ -10002,7 +9979,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = {
|
|||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID,
|
||||
.check_btf_id = check_arg_btf_id,
|
||||
.arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
|
||||
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ],
|
||||
};
|
||||
|
||||
|
@ -10024,6 +10001,6 @@ const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = {
|
|||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID,
|
||||
.check_btf_id = check_arg_btf_id,
|
||||
.arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
|
||||
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6],
|
||||
};
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/file.h>
|
||||
|
@ -382,7 +383,7 @@ static void *sock_map_lookup(struct bpf_map *map, void *key)
|
|||
struct sock *sk;
|
||||
|
||||
sk = __sock_map_lookup_elem(map, *(u32 *)key);
|
||||
if (!sk || !sk_fullsock(sk))
|
||||
if (!sk)
|
||||
return NULL;
|
||||
if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
|
||||
return NULL;
|
||||
|
@ -703,6 +704,109 @@ const struct bpf_func_proto bpf_msg_redirect_map_proto = {
|
|||
.arg4_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
struct sock_map_seq_info {
|
||||
struct bpf_map *map;
|
||||
struct sock *sk;
|
||||
u32 index;
|
||||
};
|
||||
|
||||
struct bpf_iter__sockmap {
|
||||
__bpf_md_ptr(struct bpf_iter_meta *, meta);
|
||||
__bpf_md_ptr(struct bpf_map *, map);
|
||||
__bpf_md_ptr(void *, key);
|
||||
__bpf_md_ptr(struct sock *, sk);
|
||||
};
|
||||
|
||||
DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta,
|
||||
struct bpf_map *map, void *key,
|
||||
struct sock *sk)
|
||||
|
||||
static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info)
|
||||
{
|
||||
if (unlikely(info->index >= info->map->max_entries))
|
||||
return NULL;
|
||||
|
||||
info->sk = __sock_map_lookup_elem(info->map, info->index);
|
||||
|
||||
/* can't return sk directly, since that might be NULL */
|
||||
return info;
|
||||
}
|
||||
|
||||
static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
{
|
||||
struct sock_map_seq_info *info = seq->private;
|
||||
|
||||
if (*pos == 0)
|
||||
++*pos;
|
||||
|
||||
/* pairs with sock_map_seq_stop */
|
||||
rcu_read_lock();
|
||||
return sock_map_seq_lookup_elem(info);
|
||||
}
|
||||
|
||||
static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
struct sock_map_seq_info *info = seq->private;
|
||||
|
||||
++*pos;
|
||||
++info->index;
|
||||
|
||||
return sock_map_seq_lookup_elem(info);
|
||||
}
|
||||
|
||||
static int sock_map_seq_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct sock_map_seq_info *info = seq->private;
|
||||
struct bpf_iter__sockmap ctx = {};
|
||||
struct bpf_iter_meta meta;
|
||||
struct bpf_prog *prog;
|
||||
|
||||
meta.seq = seq;
|
||||
prog = bpf_iter_get_info(&meta, !v);
|
||||
if (!prog)
|
||||
return 0;
|
||||
|
||||
ctx.meta = &meta;
|
||||
ctx.map = info->map;
|
||||
if (v) {
|
||||
ctx.key = &info->index;
|
||||
ctx.sk = info->sk;
|
||||
}
|
||||
|
||||
return bpf_iter_run_prog(prog, &ctx);
|
||||
}
|
||||
|
||||
static void sock_map_seq_stop(struct seq_file *seq, void *v)
|
||||
{
|
||||
if (!v)
|
||||
(void)sock_map_seq_show(seq, NULL);
|
||||
|
||||
/* pairs with sock_map_seq_start */
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static const struct seq_operations sock_map_seq_ops = {
|
||||
.start = sock_map_seq_start,
|
||||
.next = sock_map_seq_next,
|
||||
.stop = sock_map_seq_stop,
|
||||
.show = sock_map_seq_show,
|
||||
};
|
||||
|
||||
static int sock_map_init_seq_private(void *priv_data,
|
||||
struct bpf_iter_aux_info *aux)
|
||||
{
|
||||
struct sock_map_seq_info *info = priv_data;
|
||||
|
||||
info->map = aux->map;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
|
||||
.seq_ops = &sock_map_seq_ops,
|
||||
.init_seq_private = sock_map_init_seq_private,
|
||||
.seq_priv_size = sizeof(struct sock_map_seq_info),
|
||||
};
|
||||
|
||||
static int sock_map_btf_id;
|
||||
const struct bpf_map_ops sock_map_ops = {
|
||||
.map_meta_equal = bpf_map_meta_equal,
|
||||
|
@ -717,6 +821,7 @@ const struct bpf_map_ops sock_map_ops = {
|
|||
.map_check_btf = map_check_no_btf,
|
||||
.map_btf_name = "bpf_stab",
|
||||
.map_btf_id = &sock_map_btf_id,
|
||||
.iter_seq_info = &sock_map_iter_seq_info,
|
||||
};
|
||||
|
||||
struct bpf_shtab_elem {
|
||||
|
@ -953,7 +1058,7 @@ static int sock_hash_get_next_key(struct bpf_map *map, void *key,
|
|||
if (!elem)
|
||||
goto find_first_elem;
|
||||
|
||||
elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&elem->node)),
|
||||
elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
|
||||
struct bpf_shtab_elem, node);
|
||||
if (elem_next) {
|
||||
memcpy(key_next, elem_next->key, key_size);
|
||||
|
@ -965,7 +1070,7 @@ static int sock_hash_get_next_key(struct bpf_map *map, void *key,
|
|||
find_first_elem:
|
||||
for (; i < htab->buckets_num; i++) {
|
||||
head = &sock_hash_select_bucket(htab, i)->head;
|
||||
elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
|
||||
elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)),
|
||||
struct bpf_shtab_elem, node);
|
||||
if (elem_next) {
|
||||
memcpy(key_next, elem_next->key, key_size);
|
||||
|
@ -1110,7 +1215,7 @@ static void *sock_hash_lookup(struct bpf_map *map, void *key)
|
|||
struct sock *sk;
|
||||
|
||||
sk = __sock_hash_lookup_elem(map, key);
|
||||
if (!sk || !sk_fullsock(sk))
|
||||
if (!sk)
|
||||
return NULL;
|
||||
if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
|
||||
return NULL;
|
||||
|
@ -1199,6 +1304,117 @@ const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
|
|||
.arg4_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
struct sock_hash_seq_info {
|
||||
struct bpf_map *map;
|
||||
struct bpf_shtab *htab;
|
||||
u32 bucket_id;
|
||||
};
|
||||
|
||||
static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info,
|
||||
struct bpf_shtab_elem *prev_elem)
|
||||
{
|
||||
const struct bpf_shtab *htab = info->htab;
|
||||
struct bpf_shtab_bucket *bucket;
|
||||
struct bpf_shtab_elem *elem;
|
||||
struct hlist_node *node;
|
||||
|
||||
/* try to find next elem in the same bucket */
|
||||
if (prev_elem) {
|
||||
node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
|
||||
elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
|
||||
if (elem)
|
||||
return elem;
|
||||
|
||||
/* no more elements, continue in the next bucket */
|
||||
info->bucket_id++;
|
||||
}
|
||||
|
||||
for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
|
||||
bucket = &htab->buckets[info->bucket_id];
|
||||
node = rcu_dereference(hlist_first_rcu(&bucket->head));
|
||||
elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
|
||||
if (elem)
|
||||
return elem;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
{
|
||||
struct sock_hash_seq_info *info = seq->private;
|
||||
|
||||
if (*pos == 0)
|
||||
++*pos;
|
||||
|
||||
/* pairs with sock_hash_seq_stop */
|
||||
rcu_read_lock();
|
||||
return sock_hash_seq_find_next(info, NULL);
|
||||
}
|
||||
|
||||
static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
struct sock_hash_seq_info *info = seq->private;
|
||||
|
||||
++*pos;
|
||||
return sock_hash_seq_find_next(info, v);
|
||||
}
|
||||
|
||||
static int sock_hash_seq_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct sock_hash_seq_info *info = seq->private;
|
||||
struct bpf_iter__sockmap ctx = {};
|
||||
struct bpf_shtab_elem *elem = v;
|
||||
struct bpf_iter_meta meta;
|
||||
struct bpf_prog *prog;
|
||||
|
||||
meta.seq = seq;
|
||||
prog = bpf_iter_get_info(&meta, !elem);
|
||||
if (!prog)
|
||||
return 0;
|
||||
|
||||
ctx.meta = &meta;
|
||||
ctx.map = info->map;
|
||||
if (elem) {
|
||||
ctx.key = elem->key;
|
||||
ctx.sk = elem->sk;
|
||||
}
|
||||
|
||||
return bpf_iter_run_prog(prog, &ctx);
|
||||
}
|
||||
|
||||
static void sock_hash_seq_stop(struct seq_file *seq, void *v)
|
||||
{
|
||||
if (!v)
|
||||
(void)sock_hash_seq_show(seq, NULL);
|
||||
|
||||
/* pairs with sock_hash_seq_start */
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static const struct seq_operations sock_hash_seq_ops = {
|
||||
.start = sock_hash_seq_start,
|
||||
.next = sock_hash_seq_next,
|
||||
.stop = sock_hash_seq_stop,
|
||||
.show = sock_hash_seq_show,
|
||||
};
|
||||
|
||||
static int sock_hash_init_seq_private(void *priv_data,
|
||||
struct bpf_iter_aux_info *aux)
|
||||
{
|
||||
struct sock_hash_seq_info *info = priv_data;
|
||||
|
||||
info->map = aux->map;
|
||||
info->htab = container_of(aux->map, struct bpf_shtab, map);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
|
||||
.seq_ops = &sock_hash_seq_ops,
|
||||
.init_seq_private = sock_hash_init_seq_private,
|
||||
.seq_priv_size = sizeof(struct sock_hash_seq_info),
|
||||
};
|
||||
|
||||
static int sock_hash_map_btf_id;
|
||||
const struct bpf_map_ops sock_hash_ops = {
|
||||
.map_meta_equal = bpf_map_meta_equal,
|
||||
|
@ -1213,6 +1429,7 @@ const struct bpf_map_ops sock_hash_ops = {
|
|||
.map_check_btf = map_check_no_btf,
|
||||
.map_btf_name = "bpf_shtab",
|
||||
.map_btf_id = &sock_hash_map_btf_id,
|
||||
.iter_seq_info = &sock_hash_iter_seq_info,
|
||||
};
|
||||
|
||||
static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
|
||||
|
@ -1323,3 +1540,62 @@ void sock_map_close(struct sock *sk, long timeout)
|
|||
release_sock(sk);
|
||||
saved_close(sk, timeout);
|
||||
}
|
||||
|
||||
static int sock_map_iter_attach_target(struct bpf_prog *prog,
|
||||
union bpf_iter_link_info *linfo,
|
||||
struct bpf_iter_aux_info *aux)
|
||||
{
|
||||
struct bpf_map *map;
|
||||
int err = -EINVAL;
|
||||
|
||||
if (!linfo->map.map_fd)
|
||||
return -EBADF;
|
||||
|
||||
map = bpf_map_get_with_uref(linfo->map.map_fd);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
|
||||
map->map_type != BPF_MAP_TYPE_SOCKHASH)
|
||||
goto put_map;
|
||||
|
||||
if (prog->aux->max_rdonly_access > map->key_size) {
|
||||
err = -EACCES;
|
||||
goto put_map;
|
||||
}
|
||||
|
||||
aux->map = map;
|
||||
return 0;
|
||||
|
||||
put_map:
|
||||
bpf_map_put_with_uref(map);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux)
|
||||
{
|
||||
bpf_map_put_with_uref(aux->map);
|
||||
}
|
||||
|
||||
static struct bpf_iter_reg sock_map_iter_reg = {
|
||||
.target = "sockmap",
|
||||
.attach_target = sock_map_iter_attach_target,
|
||||
.detach_target = sock_map_iter_detach_target,
|
||||
.show_fdinfo = bpf_iter_map_show_fdinfo,
|
||||
.fill_link_info = bpf_iter_map_fill_link_info,
|
||||
.ctx_arg_info_size = 2,
|
||||
.ctx_arg_info = {
|
||||
{ offsetof(struct bpf_iter__sockmap, key),
|
||||
PTR_TO_RDONLY_BUF_OR_NULL },
|
||||
{ offsetof(struct bpf_iter__sockmap, sk),
|
||||
PTR_TO_BTF_ID_OR_NULL },
|
||||
},
|
||||
};
|
||||
|
||||
static int __init bpf_sockmap_iter_init(void)
|
||||
{
|
||||
sock_map_iter_reg.ctx_arg_info[1].btf_id =
|
||||
btf_sock_ids[BTF_SOCK_TYPE_SOCK];
|
||||
return bpf_iter_reg_target(&sock_map_iter_reg);
|
||||
}
|
||||
late_initcall(bpf_sockmap_iter_init);
|
||||
|
|
|
@ -28,23 +28,18 @@ static u32 unsupported_ops[] = {
|
|||
static const struct btf_type *tcp_sock_type;
|
||||
static u32 tcp_sock_id, sock_id;
|
||||
|
||||
static int btf_sk_storage_get_ids[5];
|
||||
static struct bpf_func_proto btf_sk_storage_get_proto __read_mostly;
|
||||
|
||||
static int btf_sk_storage_delete_ids[5];
|
||||
static struct bpf_func_proto btf_sk_storage_delete_proto __read_mostly;
|
||||
|
||||
static void convert_sk_func_proto(struct bpf_func_proto *to, int *to_btf_ids,
|
||||
const struct bpf_func_proto *from)
|
||||
static void convert_sk_func_proto(struct bpf_func_proto *to, const struct bpf_func_proto *from)
|
||||
{
|
||||
int i;
|
||||
|
||||
*to = *from;
|
||||
to->btf_id = to_btf_ids;
|
||||
for (i = 0; i < ARRAY_SIZE(to->arg_type); i++) {
|
||||
if (to->arg_type[i] == ARG_PTR_TO_SOCKET) {
|
||||
to->arg_type[i] = ARG_PTR_TO_BTF_ID;
|
||||
to->btf_id[i] = tcp_sock_id;
|
||||
to->arg_btf_id[i] = &tcp_sock_id;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -64,12 +59,8 @@ static int bpf_tcp_ca_init(struct btf *btf)
|
|||
tcp_sock_id = type_id;
|
||||
tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
|
||||
|
||||
convert_sk_func_proto(&btf_sk_storage_get_proto,
|
||||
btf_sk_storage_get_ids,
|
||||
&bpf_sk_storage_get_proto);
|
||||
convert_sk_func_proto(&btf_sk_storage_delete_proto,
|
||||
btf_sk_storage_delete_ids,
|
||||
&bpf_sk_storage_delete_proto);
|
||||
convert_sk_func_proto(&btf_sk_storage_get_proto, &bpf_sk_storage_get_proto);
|
||||
convert_sk_func_proto(&btf_sk_storage_delete_proto, &bpf_sk_storage_delete_proto);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -185,8 +176,8 @@ static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
|
|||
/* In case we want to report error later */
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg1_btf_id = &tcp_sock_id,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.btf_id = &tcp_sock_id,
|
||||
};
|
||||
|
||||
static const struct bpf_func_proto *
|
||||
|
|
|
@ -2696,6 +2696,7 @@ int tcp_disconnect(struct sock *sk, int flags)
|
|||
if (icsk->icsk_ca_ops->release)
|
||||
icsk->icsk_ca_ops->release(sk);
|
||||
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
|
||||
icsk->icsk_ca_initialized = 0;
|
||||
tcp_set_ca_state(sk, TCP_CA_Open);
|
||||
tp->is_sack_reneg = 0;
|
||||
tcp_clear_retrans(tp);
|
||||
|
@ -3047,7 +3048,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname,
|
|||
name[val] = 0;
|
||||
|
||||
lock_sock(sk);
|
||||
err = tcp_set_congestion_control(sk, name, true, true,
|
||||
err = tcp_set_congestion_control(sk, name, true,
|
||||
ns_capable(sock_net(sk)->user_ns,
|
||||
CAP_NET_ADMIN));
|
||||
release_sock(sk);
|
||||
|
|
|
@ -176,7 +176,7 @@ void tcp_assign_congestion_control(struct sock *sk)
|
|||
|
||||
void tcp_init_congestion_control(struct sock *sk)
|
||||
{
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
|
||||
tcp_sk(sk)->prior_ssthresh = 0;
|
||||
if (icsk->icsk_ca_ops->init)
|
||||
|
@ -185,6 +185,7 @@ void tcp_init_congestion_control(struct sock *sk)
|
|||
INET_ECN_xmit(sk);
|
||||
else
|
||||
INET_ECN_dontxmit(sk);
|
||||
icsk->icsk_ca_initialized = 1;
|
||||
}
|
||||
|
||||
static void tcp_reinit_congestion_control(struct sock *sk,
|
||||
|
@ -340,7 +341,7 @@ out:
|
|||
* already initialized.
|
||||
*/
|
||||
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
|
||||
bool reinit, bool cap_net_admin)
|
||||
bool cap_net_admin)
|
||||
{
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
const struct tcp_congestion_ops *ca;
|
||||
|
@ -361,28 +362,14 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!ca) {
|
||||
if (!ca)
|
||||
err = -ENOENT;
|
||||
} else if (!load) {
|
||||
const struct tcp_congestion_ops *old_ca = icsk->icsk_ca_ops;
|
||||
|
||||
if (bpf_try_module_get(ca, ca->owner)) {
|
||||
if (reinit) {
|
||||
tcp_reinit_congestion_control(sk, ca);
|
||||
} else {
|
||||
icsk->icsk_ca_ops = ca;
|
||||
bpf_module_put(old_ca, old_ca->owner);
|
||||
}
|
||||
} else {
|
||||
err = -EBUSY;
|
||||
}
|
||||
} else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin)) {
|
||||
else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin))
|
||||
err = -EPERM;
|
||||
} else if (!bpf_try_module_get(ca, ca->owner)) {
|
||||
else if (!bpf_try_module_get(ca, ca->owner))
|
||||
err = -EBUSY;
|
||||
} else {
|
||||
else
|
||||
tcp_reinit_congestion_control(sk, ca);
|
||||
}
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
|
|
|
@ -5885,8 +5885,10 @@ void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb)
|
|||
tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
|
||||
tp->snd_cwnd_stamp = tcp_jiffies32;
|
||||
|
||||
icsk->icsk_ca_initialized = 0;
|
||||
bpf_skops_established(sk, bpf_op, skb);
|
||||
tcp_init_congestion_control(sk);
|
||||
if (!icsk->icsk_ca_initialized)
|
||||
tcp_init_congestion_control(sk);
|
||||
tcp_init_buffer_space(sk);
|
||||
}
|
||||
|
||||
|
|
|
@ -33,12 +33,6 @@
|
|||
|
||||
static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
|
||||
|
||||
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
|
||||
{
|
||||
return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
|
||||
(xs->pool->fq || READ_ONCE(xs->fq_tmp));
|
||||
}
|
||||
|
||||
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
|
||||
{
|
||||
if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
|
||||
|
@ -717,6 +711,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
|
|||
dev, qid);
|
||||
if (err) {
|
||||
xp_destroy(xs->pool);
|
||||
xs->pool = NULL;
|
||||
sockfd_put(sock);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
|
|
@ -39,7 +39,6 @@ static inline struct xdp_sock *xdp_sk(struct sock *sk)
|
|||
return (struct xdp_sock *)sk;
|
||||
}
|
||||
|
||||
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
|
||||
void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
|
||||
struct xdp_sock **map_entry);
|
||||
int xsk_map_inc(struct xsk_map *map);
|
||||
|
|
|
@ -287,7 +287,7 @@ static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_devi
|
|||
return NULL;
|
||||
|
||||
dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL);
|
||||
if (!dma_map) {
|
||||
if (!dma_map->dma_pages) {
|
||||
kfree(dma_map);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -296,7 +296,7 @@ static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_devi
|
|||
dma_map->dev = dev;
|
||||
dma_map->dma_need_sync = false;
|
||||
dma_map->dma_pages_cnt = nr_pages;
|
||||
refcount_set(&dma_map->users, 0);
|
||||
refcount_set(&dma_map->users, 1);
|
||||
list_add(&dma_map->list, &umem->xsk_dma_list);
|
||||
return dma_map;
|
||||
}
|
||||
|
@ -369,7 +369,6 @@ static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_
|
|||
pool->dev = dma_map->dev;
|
||||
pool->dma_pages_cnt = dma_map->dma_pages_cnt;
|
||||
pool->dma_need_sync = dma_map->dma_need_sync;
|
||||
refcount_inc(&dma_map->users);
|
||||
memcpy(pool->dma_pages, dma_map->dma_pages,
|
||||
pool->dma_pages_cnt * sizeof(*pool->dma_pages));
|
||||
|
||||
|
@ -390,6 +389,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
refcount_inc(&dma_map->users);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -59,22 +59,20 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
|
|||
du.num_pages = umem->npgs;
|
||||
du.chunk_size = umem->chunk_size;
|
||||
du.headroom = umem->headroom;
|
||||
du.ifindex = pool->netdev ? pool->netdev->ifindex : 0;
|
||||
du.queue_id = pool->queue_id;
|
||||
du.ifindex = (pool && pool->netdev) ? pool->netdev->ifindex : 0;
|
||||
du.queue_id = pool ? pool->queue_id : 0;
|
||||
du.flags = 0;
|
||||
if (umem->zc)
|
||||
du.flags |= XDP_DU_F_ZEROCOPY;
|
||||
du.refs = refcount_read(&umem->users);
|
||||
|
||||
err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
|
||||
|
||||
if (!err && pool->fq)
|
||||
if (!err && pool && pool->fq)
|
||||
err = xsk_diag_put_ring(pool->fq,
|
||||
XDP_DIAG_UMEM_FILL_RING, nlskb);
|
||||
if (!err && pool->cq) {
|
||||
err = xsk_diag_put_ring(pool->cq, XDP_DIAG_UMEM_COMPLETION_RING,
|
||||
nlskb);
|
||||
}
|
||||
if (!err && pool && pool->cq)
|
||||
err = xsk_diag_put_ring(pool->cq,
|
||||
XDP_DIAG_UMEM_COMPLETION_RING, nlskb);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -185,11 +185,6 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|||
|
||||
xs = (struct xdp_sock *)sock->sk;
|
||||
|
||||
if (!xsk_is_setup_for_bpf_map(xs)) {
|
||||
sockfd_put(sock);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
map_entry = &m->xsk_map[i];
|
||||
node = xsk_map_node_alloc(m, map_entry);
|
||||
if (IS_ERR(node)) {
|
||||
|
|
|
@ -50,4 +50,5 @@ xdp_rxq_info
|
|||
xdp_sample_pkts
|
||||
xdp_tx_iptunnel
|
||||
xdpsock
|
||||
xsk_fwd
|
||||
testfile.img
|
||||
|
|
|
@ -29,8 +29,8 @@ int main(int argc, char **argv)
|
|||
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
const char *section;
|
||||
char filename[256];
|
||||
const char *title;
|
||||
FILE *f;
|
||||
|
||||
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
|
||||
|
@ -58,8 +58,8 @@ int main(int argc, char **argv)
|
|||
bpf_object__for_each_program(prog, obj) {
|
||||
fd = bpf_program__fd(prog);
|
||||
|
||||
title = bpf_program__title(prog, false);
|
||||
if (sscanf(title, "socket/%d", &key) != 1) {
|
||||
section = bpf_program__section_name(prog);
|
||||
if (sscanf(section, "socket/%d", &key) != 1) {
|
||||
fprintf(stderr, "ERROR: finding prog failed\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ int main(int ac, char **argv)
|
|||
long key, next_key, value;
|
||||
struct bpf_program *prog;
|
||||
int map_fd, i, j = 0;
|
||||
const char *title;
|
||||
const char *section;
|
||||
struct ksym *sym;
|
||||
|
||||
if (setrlimit(RLIMIT_MEMLOCK, &r)) {
|
||||
|
@ -51,8 +51,8 @@ int main(int ac, char **argv)
|
|||
}
|
||||
|
||||
bpf_object__for_each_program(prog, obj) {
|
||||
title = bpf_program__title(prog, false);
|
||||
if (sscanf(title, "kprobe/%s", symbol) != 1)
|
||||
section = bpf_program__section_name(prog);
|
||||
if (sscanf(section, "kprobe/%s", symbol) != 1)
|
||||
continue;
|
||||
|
||||
/* Attach prog only when symbol exists */
|
||||
|
|
|
@ -103,10 +103,9 @@ static __always_inline int do_inline_hash_lookup(void *inner_map, u32 port)
|
|||
return result ? *result : -ENOENT;
|
||||
}
|
||||
|
||||
SEC("kprobe/" SYSCALL(sys_connect))
|
||||
SEC("kprobe/__sys_connect")
|
||||
int trace_sys_connect(struct pt_regs *ctx)
|
||||
{
|
||||
struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
|
||||
struct sockaddr_in6 *in6;
|
||||
u16 test_case, port, dst6[8];
|
||||
int addrlen, ret, inline_ret, ret_key = 0;
|
||||
|
@ -114,8 +113,8 @@ int trace_sys_connect(struct pt_regs *ctx)
|
|||
void *outer_map, *inner_map;
|
||||
bool inline_hash = false;
|
||||
|
||||
in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs);
|
||||
addrlen = (int)PT_REGS_PARM3_CORE(real_regs);
|
||||
in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(ctx);
|
||||
addrlen = (int)PT_REGS_PARM3_CORE(ctx);
|
||||
|
||||
if (addrlen != sizeof(*in6))
|
||||
return 0;
|
||||
|
|
|
@ -39,8 +39,8 @@ int main(int ac, char **argv)
|
|||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
int key, fd, progs_fd;
|
||||
const char *section;
|
||||
char filename[256];
|
||||
const char *title;
|
||||
FILE *f;
|
||||
|
||||
setrlimit(RLIMIT_MEMLOCK, &r);
|
||||
|
@ -78,9 +78,9 @@ int main(int ac, char **argv)
|
|||
}
|
||||
|
||||
bpf_object__for_each_program(prog, obj) {
|
||||
title = bpf_program__title(prog, false);
|
||||
section = bpf_program__section_name(prog);
|
||||
/* register only syscalls to PROG_ARRAY */
|
||||
if (sscanf(title, "kprobe/%d", &key) != 1)
|
||||
if (sscanf(section, "kprobe/%d", &key) != 1)
|
||||
continue;
|
||||
|
||||
fd = bpf_program__fd(prog);
|
||||
|
|
|
@ -111,7 +111,7 @@ static void print_avail_progs(struct bpf_object *obj)
|
|||
|
||||
bpf_object__for_each_program(pos, obj) {
|
||||
if (bpf_program__is_xdp(pos))
|
||||
printf(" %s\n", bpf_program__title(pos, false));
|
||||
printf(" %s\n", bpf_program__section_name(pos));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -78,6 +78,7 @@ static int opt_pkt_count;
|
|||
static u16 opt_pkt_size = MIN_PKT_SIZE;
|
||||
static u32 opt_pkt_fill_pattern = 0x12345678;
|
||||
static bool opt_extra_stats;
|
||||
static bool opt_quiet;
|
||||
static int opt_poll;
|
||||
static int opt_interval = 1;
|
||||
static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP;
|
||||
|
@ -718,6 +719,7 @@ static struct option long_options[] = {
|
|||
{"tx-pkt-size", required_argument, 0, 's'},
|
||||
{"tx-pkt-pattern", required_argument, 0, 'P'},
|
||||
{"extra-stats", no_argument, 0, 'x'},
|
||||
{"quiet", no_argument, 0, 'Q'},
|
||||
{0, 0, 0, 0}
|
||||
};
|
||||
|
||||
|
@ -753,6 +755,7 @@ static void usage(const char *prog)
|
|||
" Min size: %d, Max size %d.\n"
|
||||
" -P, --tx-pkt-pattern=nPacket fill pattern. Default: 0x%x\n"
|
||||
" -x, --extra-stats Display extra statistics.\n"
|
||||
" -Q, --quiet Do not display any stats.\n"
|
||||
"\n";
|
||||
fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE,
|
||||
opt_batch_size, MIN_PKT_SIZE, MIN_PKT_SIZE,
|
||||
|
@ -768,7 +771,7 @@ static void parse_command_line(int argc, char **argv)
|
|||
opterr = 0;
|
||||
|
||||
for (;;) {
|
||||
c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:x",
|
||||
c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:xQ",
|
||||
long_options, &option_index);
|
||||
if (c == -1)
|
||||
break;
|
||||
|
@ -852,6 +855,9 @@ static void parse_command_line(int argc, char **argv)
|
|||
case 'x':
|
||||
opt_extra_stats = 1;
|
||||
break;
|
||||
case 'Q':
|
||||
opt_quiet = 1;
|
||||
break;
|
||||
default:
|
||||
usage(basename(argv[0]));
|
||||
}
|
||||
|
@ -897,6 +903,14 @@ static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk,
|
|||
if (!xsk->outstanding_tx)
|
||||
return;
|
||||
|
||||
/* In copy mode, Tx is driven by a syscall so we need to use e.g. sendto() to
|
||||
* really send the packets. In zero-copy mode we do not have to do this, since Tx
|
||||
* is driven by the NAPI loop. So as an optimization, we do not have to call
|
||||
* sendto() all the time in zero-copy mode for l2fwd.
|
||||
*/
|
||||
if (opt_xdp_bind_flags & XDP_COPY)
|
||||
kick_tx(xsk);
|
||||
|
||||
ndescs = (xsk->outstanding_tx > opt_batch_size) ? opt_batch_size :
|
||||
xsk->outstanding_tx;
|
||||
|
||||
|
@ -1117,6 +1131,7 @@ static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds)
|
|||
while (ret != rcvd) {
|
||||
if (ret < 0)
|
||||
exit_with_error(-ret);
|
||||
complete_tx_l2fwd(xsk, fds);
|
||||
if (xsk_ring_prod__needs_wakeup(&xsk->tx))
|
||||
kick_tx(xsk);
|
||||
ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
|
||||
|
@ -1277,9 +1292,11 @@ int main(int argc, char **argv)
|
|||
|
||||
setlocale(LC_ALL, "");
|
||||
|
||||
ret = pthread_create(&pt, NULL, poller, NULL);
|
||||
if (ret)
|
||||
exit_with_error(ret);
|
||||
if (!opt_quiet) {
|
||||
ret = pthread_create(&pt, NULL, poller, NULL);
|
||||
if (ret)
|
||||
exit_with_error(ret);
|
||||
}
|
||||
|
||||
prev_time = get_nsecs();
|
||||
start_time = prev_time;
|
||||
|
@ -1293,7 +1310,8 @@ int main(int argc, char **argv)
|
|||
|
||||
benchmark_done = true;
|
||||
|
||||
pthread_join(pt, NULL);
|
||||
if (!opt_quiet)
|
||||
pthread_join(pt, NULL);
|
||||
|
||||
xdpsock_cleanup();
|
||||
|
||||
|
|
|
@ -341,9 +341,9 @@ fi
|
|||
vmlinux_link vmlinux "${kallsymso}" ${btf_vmlinux_bin_o}
|
||||
|
||||
# fill in BTF IDs
|
||||
if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then
|
||||
info BTFIDS vmlinux
|
||||
${RESOLVE_BTFIDS} vmlinux
|
||||
if [ -n "${CONFIG_DEBUG_INFO_BTF}" -a -n "${CONFIG_BPF}" ]; then
|
||||
info BTFIDS vmlinux
|
||||
${RESOLVE_BTFIDS} vmlinux
|
||||
fi
|
||||
|
||||
if [ -n "${CONFIG_BUILDTIME_TABLE_SORT}" ]; then
|
||||
|
|
|
@ -19,7 +19,7 @@ man8dir = $(mandir)/man8
|
|||
# Load targets for building eBPF helpers man page.
|
||||
include ../../Makefile.helpers
|
||||
|
||||
MAN8_RST = $(filter-out $(HELPERS_RST),$(wildcard *.rst))
|
||||
MAN8_RST = $(wildcard bpftool*.rst)
|
||||
|
||||
_DOC_MAN8 = $(patsubst %.rst,%.8,$(MAN8_RST))
|
||||
DOC_MAN8 = $(addprefix $(OUTPUT),$(_DOC_MAN8))
|
||||
|
@ -28,12 +28,23 @@ man: man8 helpers
|
|||
man8: $(DOC_MAN8)
|
||||
|
||||
RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
|
||||
RST2MAN_OPTS += --verbose
|
||||
|
||||
list_pages = $(sort $(basename $(filter-out $(1),$(MAN8_RST))))
|
||||
see_also = $(subst " ",, \
|
||||
"\n" \
|
||||
"SEE ALSO\n" \
|
||||
"========\n" \
|
||||
"\t**bpf**\ (2),\n" \
|
||||
"\t**bpf-helpers**\\ (7)" \
|
||||
$(foreach page,$(call list_pages,$(1)),",\n\t**$(page)**\\ (8)") \
|
||||
"\n")
|
||||
|
||||
$(OUTPUT)%.8: %.rst
|
||||
ifndef RST2MAN_DEP
|
||||
$(error "rst2man not found, but required to generate man pages")
|
||||
endif
|
||||
$(QUIET_GEN)rst2man $< > $@
|
||||
$(QUIET_GEN)( cat $< ; printf "%b" $(call see_also,$<) ) | rst2man $(RST2MAN_OPTS) > $@
|
||||
|
||||
clean: helpers-clean
|
||||
$(call QUIET_CLEAN, Documentation)
|
||||
|
|
|
@ -71,26 +71,12 @@ DESCRIPTION
|
|||
|
||||
OPTIONS
|
||||
=======
|
||||
-h, --help
|
||||
Print short generic help message (similar to **bpftool help**).
|
||||
|
||||
-V, --version
|
||||
Print version number (similar to **bpftool version**).
|
||||
|
||||
-j, --json
|
||||
Generate JSON output. For commands that cannot produce JSON, this
|
||||
option has no effect.
|
||||
|
||||
-p, --pretty
|
||||
Generate human-readable JSON output. Implies **-j**.
|
||||
|
||||
-d, --debug
|
||||
Print all logs available from libbpf, including debug-level
|
||||
information.
|
||||
.. include:: common_options.rst
|
||||
|
||||
EXAMPLES
|
||||
========
|
||||
**# bpftool btf dump id 1226**
|
||||
|
||||
::
|
||||
|
||||
[1] PTR '(anon)' type_id=2
|
||||
|
@ -104,6 +90,7 @@ EXAMPLES
|
|||
This gives an example of default output for all supported BTF kinds.
|
||||
|
||||
**$ cat prog.c**
|
||||
|
||||
::
|
||||
|
||||
struct fwd_struct;
|
||||
|
@ -144,6 +131,7 @@ This gives an example of default output for all supported BTF kinds.
|
|||
}
|
||||
|
||||
**$ bpftool btf dump file prog.o**
|
||||
|
||||
::
|
||||
|
||||
[1] PTR '(anon)' type_id=2
|
||||
|
@ -229,20 +217,3 @@ All the standard ways to specify map or program are supported:
|
|||
**# bpftool btf dump prog tag b88e0a09b1d9759d**
|
||||
|
||||
**# bpftool btf dump prog pinned /sys/fs/bpf/prog_name**
|
||||
|
||||
SEE ALSO
|
||||
========
|
||||
**bpf**\ (2),
|
||||
**bpf-helpers**\ (7),
|
||||
**bpftool**\ (8),
|
||||
**bpftool-btf**\ (8),
|
||||
**bpftool-cgroup**\ (8),
|
||||
**bpftool-feature**\ (8),
|
||||
**bpftool-gen**\ (8),
|
||||
**bpftool-iter**\ (8),
|
||||
**bpftool-link**\ (8),
|
||||
**bpftool-map**\ (8),
|
||||
**bpftool-net**\ (8),
|
||||
**bpftool-perf**\ (8),
|
||||
**bpftool-prog**\ (8),
|
||||
**bpftool-struct_ops**\ (8)
|
||||
|
|
|
@ -116,26 +116,11 @@ DESCRIPTION
|
|||
|
||||
OPTIONS
|
||||
=======
|
||||
-h, --help
|
||||
Print short generic help message (similar to **bpftool help**).
|
||||
|
||||
-V, --version
|
||||
Print version number (similar to **bpftool version**).
|
||||
|
||||
-j, --json
|
||||
Generate JSON output. For commands that cannot produce JSON, this
|
||||
option has no effect.
|
||||
|
||||
-p, --pretty
|
||||
Generate human-readable JSON output. Implies **-j**.
|
||||
.. include:: common_options.rst
|
||||
|
||||
-f, --bpffs
|
||||
Show file names of pinned programs.
|
||||
|
||||
-d, --debug
|
||||
Print all logs available from libbpf, including debug-level
|
||||
information.
|
||||
|
||||
EXAMPLES
|
||||
========
|
||||
|
|
||||
|
@ -158,19 +143,3 @@ EXAMPLES
|
|||
::
|
||||
|
||||
ID AttachType AttachFlags Name
|
||||
|
||||
SEE ALSO
|
||||
========
|
||||
**bpf**\ (2),
|
||||
**bpf-helpers**\ (7),
|
||||
**bpftool**\ (8),
|
||||
**bpftool-btf**\ (8),
|
||||
**bpftool-feature**\ (8),
|
||||
**bpftool-gen**\ (8),
|
||||
**bpftool-iter**\ (8),
|
||||
**bpftool-link**\ (8),
|
||||
**bpftool-map**\ (8),
|
||||
**bpftool-net**\ (8),
|
||||
**bpftool-perf**\ (8),
|
||||
**bpftool-prog**\ (8),
|
||||
**bpftool-struct_ops**\ (8)
|
||||
|
|
|
@ -71,35 +71,4 @@ DESCRIPTION
|
|||
|
||||
OPTIONS
|
||||
=======
|
||||
-h, --help
|
||||
Print short generic help message (similar to **bpftool help**).
|
||||
|
||||
-V, --version
|
||||
Print version number (similar to **bpftool version**).
|
||||
|
||||
-j, --json
|
||||
Generate JSON output. For commands that cannot produce JSON, this
|
||||
option has no effect.
|
||||
|
||||
-p, --pretty
|
||||
Generate human-readable JSON output. Implies **-j**.
|
||||
|
||||
-d, --debug
|
||||
Print all logs available from libbpf, including debug-level
|
||||
information.
|
||||
|
||||
SEE ALSO
|
||||
========
|
||||
**bpf**\ (2),
|
||||
**bpf-helpers**\ (7),
|
||||
**bpftool**\ (8),
|
||||
**bpftool-btf**\ (8),
|
||||
**bpftool-cgroup**\ (8),
|
||||
**bpftool-gen**\ (8),
|
||||
**bpftool-iter**\ (8),
|
||||
**bpftool-link**\ (8),
|
||||
**bpftool-map**\ (8),
|
||||
**bpftool-net**\ (8),
|
||||
**bpftool-perf**\ (8),
|
||||
**bpftool-prog**\ (8),
|
||||
**bpftool-struct_ops**\ (8)
|
||||
.. include:: common_options.rst
|
||||
|
|
|
@ -126,26 +126,12 @@ DESCRIPTION
|
|||
|
||||
OPTIONS
|
||||
=======
|
||||
-h, --help
|
||||
Print short generic help message (similar to **bpftool help**).
|
||||
|
||||
-V, --version
|
||||
Print version number (similar to **bpftool version**).
|
||||
|
||||
-j, --json
|
||||
Generate JSON output. For commands that cannot produce JSON,
|
||||
this option has no effect.
|
||||
|
||||
-p, --pretty
|
||||
Generate human-readable JSON output. Implies **-j**.
|
||||
|
||||
-d, --debug
|
||||
Print all logs available from libbpf, including debug-level
|
||||
information.
|
||||
.. include:: common_options.rst
|
||||
|
||||
EXAMPLES
|
||||
========
|
||||
**$ cat example.c**
|
||||
|
||||
::
|
||||
|
||||
#include <stdbool.h>
|
||||
|
@ -187,6 +173,7 @@ This is example BPF application with two BPF programs and a mix of BPF maps
|
|||
and global variables.
|
||||
|
||||
**$ bpftool gen skeleton example.o**
|
||||
|
||||
::
|
||||
|
||||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
|
@ -241,6 +228,7 @@ and global variables.
|
|||
#endif /* __EXAMPLE_SKEL_H__ */
|
||||
|
||||
**$ cat example_user.c**
|
||||
|
||||
::
|
||||
|
||||
#include "example.skel.h"
|
||||
|
@ -283,6 +271,7 @@ and global variables.
|
|||
}
|
||||
|
||||
**# ./example_user**
|
||||
|
||||
::
|
||||
|
||||
my_map name: my_map
|
||||
|
@ -290,19 +279,3 @@ and global variables.
|
|||
my_static_var: 7
|
||||
|
||||
This is a stripped-out version of skeleton generated for above example code.
|
||||
|
||||
SEE ALSO
|
||||
========
|
||||
**bpf**\ (2),
|
||||
**bpf-helpers**\ (7),
|
||||
**bpftool**\ (8),
|
||||
**bpftool-btf**\ (8),
|
||||
**bpftool-cgroup**\ (8),
|
||||
**bpftool-feature**\ (8),
|
||||
**bpftool-iter**\ (8),
|
||||
**bpftool-link**\ (8),
|
||||
**bpftool-map**\ (8),
|
||||
**bpftool-net**\ (8),
|
||||
**bpftool-perf**\ (8),
|
||||
**bpftool-prog**\ (8),
|
||||
**bpftool-struct_ops**\ (8)
|
||||
|
|
|
@ -51,16 +51,7 @@ DESCRIPTION
|
|||
|
||||
OPTIONS
|
||||
=======
|
||||
-h, --help
|
||||
Print short generic help message (similar to **bpftool help**).
|
||||
|
||||
-V, --version
|
||||
Print version number (similar to **bpftool version**).
|
||||
|
||||
-d, --debug
|
||||
Print all logs available, even debug-level information. This
|
||||
includes logs from libbpf as well as from the verifier, when
|
||||
attempting to load programs.
|
||||
.. include:: common_options.rst
|
||||
|
||||
EXAMPLES
|
||||
========
|
||||
|
@ -77,19 +68,3 @@ EXAMPLES
|
|||
|
||||
Create a file-based bpf iterator from bpf_iter_hashmap.o and map with
|
||||
id 20, and pin it to /sys/fs/bpf/my_hashmap
|
||||
|
||||
SEE ALSO
|
||||
========
|
||||
**bpf**\ (2),
|
||||
**bpf-helpers**\ (7),
|
||||
**bpftool**\ (8),
|
||||
**bpftool-btf**\ (8),
|
||||
**bpftool-cgroup**\ (8),
|
||||
**bpftool-feature**\ (8),
|
||||
**bpftool-gen**\ (8),
|
||||
**bpftool-link**\ (8),
|
||||
**bpftool-map**\ (8),
|
||||
**bpftool-net**\ (8),
|
||||
**bpftool-perf**\ (8),
|
||||
**bpftool-prog**\ (8),
|
||||
**bpftool-struct_ops**\ (8)
|
||||
|
|
|
@ -21,7 +21,7 @@ LINK COMMANDS
|
|||
|
||||
| **bpftool** **link { show | list }** [*LINK*]
|
||||
| **bpftool** **link pin** *LINK* *FILE*
|
||||
| **bpftool** **link detach *LINK*
|
||||
| **bpftool** **link detach** *LINK*
|
||||
| **bpftool** **link help**
|
||||
|
|
||||
| *LINK* := { **id** *LINK_ID* | **pinned** *FILE* }
|
||||
|
@ -62,18 +62,7 @@ DESCRIPTION
|
|||
|
||||
OPTIONS
|
||||
=======
|
||||
-h, --help
|
||||
Print short generic help message (similar to **bpftool help**).
|
||||
|
||||
-V, --version
|
||||
Print version number (similar to **bpftool version**).
|
||||
|
||||
-j, --json
|
||||
Generate JSON output. For commands that cannot produce JSON, this
|
||||
option has no effect.
|
||||
|
||||
-p, --pretty
|
||||
Generate human-readable JSON output. Implies **-j**.
|
||||
.. include:: common_options.rst
|
||||
|
||||
-f, --bpffs
|
||||
When showing BPF links, show file names of pinned
|
||||
|
@ -83,10 +72,6 @@ OPTIONS
|
|||
Do not automatically attempt to mount any virtual file system
|
||||
(such as tracefs or BPF virtual file system) when necessary.
|
||||
|
||||
-d, --debug
|
||||
Print all logs available, even debug-level information. This
|
||||
includes logs from libbpf.
|
||||
|
||||
EXAMPLES
|
||||
========
|
||||
**# bpftool link show**
|
||||
|
@ -121,20 +106,3 @@ EXAMPLES
|
|||
::
|
||||
|
||||
-rw------- 1 root root 0 Apr 23 21:39 link
|
||||
|
||||
|
||||
SEE ALSO
|
||||
========
|
||||
**bpf**\ (2),
|
||||
**bpf-helpers**\ (7),
|
||||
**bpftool**\ (8),
|
||||
**bpftool-btf**\ (8),
|
||||
**bpftool-cgroup**\ (8),
|
||||
**bpftool-feature**\ (8),
|
||||
**bpftool-gen**\ (8),
|
||||
**bpftool-iter**\ (8),
|
||||
**bpftool-map**\ (8),
|
||||
**bpftool-net**\ (8),
|
||||
**bpftool-perf**\ (8),
|
||||
**bpftool-prog**\ (8),
|
||||
**bpftool-struct_ops**\ (8)
|
||||
|
|
|
@ -23,7 +23,8 @@ MAP COMMANDS
|
|||
|
||||
| **bpftool** **map** { **show** | **list** } [*MAP*]
|
||||
| **bpftool** **map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* \
|
||||
| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*]
|
||||
| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] \
|
||||
| [**dev** *NAME*]
|
||||
| **bpftool** **map dump** *MAP*
|
||||
| **bpftool** **map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*]
|
||||
| **bpftool** **map lookup** *MAP* [**key** *DATA*]
|
||||
|
@ -67,7 +68,7 @@ DESCRIPTION
|
|||
maps. On such kernels bpftool will automatically emit this
|
||||
information as well.
|
||||
|
||||
**bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*]
|
||||
**bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] [**dev** *NAME*]
|
||||
Create a new map with given parameters and pin it to *bpffs*
|
||||
as *FILE*.
|
||||
|
||||
|
@ -75,6 +76,11 @@ DESCRIPTION
|
|||
desired flags, e.g. 1024 for **BPF_F_MMAPABLE** (see bpf.h
|
||||
UAPI header for existing flags).
|
||||
|
||||
To create maps of type array-of-maps or hash-of-maps, the
|
||||
**inner_map** keyword must be used to pass an inner map. The
|
||||
kernel needs it to collect metadata related to the inner maps
|
||||
that the new map will work with.
|
||||
|
||||
Keyword **dev** expects a network interface name, and is used
|
||||
to request hardware offload for the map.
|
||||
|
||||
|
@ -155,18 +161,7 @@ DESCRIPTION
|
|||
|
||||
OPTIONS
|
||||
=======
|
||||
-h, --help
|
||||
Print short generic help message (similar to **bpftool help**).
|
||||
|
||||
-V, --version
|
||||
Print version number (similar to **bpftool version**).
|
||||
|
||||
-j, --json
|
||||
Generate JSON output. For commands that cannot produce JSON, this
|
||||
option has no effect.
|
||||
|
||||
-p, --pretty
|
||||
Generate human-readable JSON output. Implies **-j**.
|
||||
.. include:: common_options.rst
|
||||
|
||||
-f, --bpffs
|
||||
Show file names of pinned maps.
|
||||
|
@ -175,13 +170,10 @@ OPTIONS
|
|||
Do not automatically attempt to mount any virtual file system
|
||||
(such as tracefs or BPF virtual file system) when necessary.
|
||||
|
||||
-d, --debug
|
||||
Print all logs available from libbpf, including debug-level
|
||||
information.
|
||||
|
||||
EXAMPLES
|
||||
========
|
||||
**# bpftool map show**
|
||||
|
||||
::
|
||||
|
||||
10: hash name some_map flags 0x0
|
||||
|
@ -203,6 +195,7 @@ The following three commands are equivalent:
|
|||
|
||||
|
||||
**# bpftool map dump id 10**
|
||||
|
||||
::
|
||||
|
||||
key: 00 01 02 03 value: 00 01 02 03 04 05 06 07
|
||||
|
@ -210,6 +203,7 @@ The following three commands are equivalent:
|
|||
Found 2 elements
|
||||
|
||||
**# bpftool map getnext id 10 key 0 1 2 3**
|
||||
|
||||
::
|
||||
|
||||
key:
|
||||
|
@ -276,19 +270,3 @@ would be lost as soon as bpftool exits).
|
|||
|
||||
key: 00 00 00 00 value: 22 02 00 00
|
||||
Found 1 element
|
||||
|
||||
SEE ALSO
|
||||
========
|
||||
**bpf**\ (2),
|
||||
**bpf-helpers**\ (7),
|
||||
**bpftool**\ (8),
|
||||
**bpftool-btf**\ (8),
|
||||
**bpftool-cgroup**\ (8),
|
||||
**bpftool-feature**\ (8),
|
||||
**bpftool-gen**\ (8),
|
||||
**bpftool-iter**\ (8),
|
||||
**bpftool-link**\ (8),
|
||||
**bpftool-net**\ (8),
|
||||
**bpftool-perf**\ (8),
|
||||
**bpftool-prog**\ (8),
|
||||
**bpftool-struct_ops**\ (8)
|
||||
|
|
|
@ -75,22 +75,7 @@ DESCRIPTION
|
|||
|
||||
OPTIONS
|
||||
=======
|
||||
-h, --help
|
||||
Print short generic help message (similar to **bpftool help**).
|
||||
|
||||
-V, --version
|
||||
Print version number (similar to **bpftool version**).
|
||||
|
||||
-j, --json
|
||||
Generate JSON output. For commands that cannot produce JSON, this
|
||||
option has no effect.
|
||||
|
||||
-p, --pretty
|
||||
Generate human-readable JSON output. Implies **-j**.
|
||||
|
||||
-d, --debug
|
||||
Print all logs available from libbpf, including debug-level
|
||||
information.
|
||||
.. include:: common_options.rst
|
||||
|
||||
EXAMPLES
|
||||
========
|
||||
|
@ -187,20 +172,3 @@ EXAMPLES
|
|||
::
|
||||
|
||||
xdp:
|
||||
|
||||
|
||||
SEE ALSO
|
||||
========
|
||||
**bpf**\ (2),
|
||||
**bpf-helpers**\ (7),
|
||||
**bpftool**\ (8),
|
||||
**bpftool-btf**\ (8),
|
||||
**bpftool-cgroup**\ (8),
|
||||
**bpftool-feature**\ (8),
|
||||
**bpftool-gen**\ (8),
|
||||
**bpftool-iter**\ (8),
|
||||
**bpftool-link**\ (8),
|
||||
**bpftool-map**\ (8),
|
||||
**bpftool-perf**\ (8),
|
||||
**bpftool-prog**\ (8),
|
||||
**bpftool-struct_ops**\ (8)
|
||||
|
|
|
@ -40,22 +40,7 @@ DESCRIPTION
|
|||
|
||||
OPTIONS
|
||||
=======
|
||||
-h, --help
|
||||
Print short generic help message (similar to **bpftool help**).
|
||||
|
||||
-V, --version
|
||||
Print version number (similar to **bpftool version**).
|
||||
|
||||
-j, --json
|
||||
Generate JSON output. For commands that cannot produce JSON, this
|
||||
option has no effect.
|
||||
|
||||
-p, --pretty
|
||||
Generate human-readable JSON output. Implies **-j**.
|
||||
|
||||
-d, --debug
|
||||
Print all logs available from libbpf, including debug-level
|
||||
information.
|
||||
.. include:: common_options.rst
|
||||
|
||||
EXAMPLES
|
||||
========
|
||||
|
@ -78,20 +63,3 @@ EXAMPLES
|
|||
{"pid":21765,"fd":5,"prog_id":7,"fd_type":"kretprobe","func":"__x64_sys_nanosleep","offset":0}, \
|
||||
{"pid":21767,"fd":5,"prog_id":8,"fd_type":"tracepoint","tracepoint":"sys_enter_nanosleep"}, \
|
||||
{"pid":21800,"fd":5,"prog_id":9,"fd_type":"uprobe","filename":"/home/yhs/a.out","offset":1159}]
|
||||
|
||||
|
||||
SEE ALSO
|
||||
========
|
||||
**bpf**\ (2),
|
||||
**bpf-helpers**\ (7),
|
||||
**bpftool**\ (8),
|
||||
**bpftool-btf**\ (8),
|
||||
**bpftool-cgroup**\ (8),
|
||||
**bpftool-feature**\ (8),
|
||||
**bpftool-gen**\ (8),
|
||||
**bpftool-iter**\ (8),
|
||||
**bpftool-link**\ (8),
|
||||
**bpftool-map**\ (8),
|
||||
**bpftool-net**\ (8),
|
||||
**bpftool-prog**\ (8),
|
||||
**bpftool-struct_ops**\ (8)
|
||||
|
|
|
@ -210,18 +210,7 @@ DESCRIPTION
|
|||
|
||||
OPTIONS
|
||||
=======
|
||||
-h, --help
|
||||
Print short generic help message (similar to **bpftool help**).
|
||||
|
||||
-V, --version
|
||||
Print version number (similar to **bpftool version**).
|
||||
|
||||
-j, --json
|
||||
Generate JSON output. For commands that cannot produce JSON, this
|
||||
option has no effect.
|
||||
|
||||
-p, --pretty
|
||||
Generate human-readable JSON output. Implies **-j**.
|
||||
.. include:: common_options.rst
|
||||
|
||||
-f, --bpffs
|
||||
When showing BPF programs, show file names of pinned
|
||||
|
@ -234,11 +223,6 @@ OPTIONS
|
|||
Do not automatically attempt to mount any virtual file system
|
||||
(such as tracefs or BPF virtual file system) when necessary.
|
||||
|
||||
-d, --debug
|
||||
Print all logs available, even debug-level information. This
|
||||
includes logs from libbpf as well as from the verifier, when
|
||||
attempting to load programs.
|
||||
|
||||
EXAMPLES
|
||||
========
|
||||
**# bpftool prog show**
|
||||
|
@ -342,19 +326,3 @@ EXAMPLES
|
|||
40176203 cycles (83.05%)
|
||||
42518139 instructions # 1.06 insns per cycle (83.39%)
|
||||
123 llc_misses # 2.89 LLC misses per million insns (83.15%)
|
||||
|
||||
SEE ALSO
|
||||
========
|
||||
**bpf**\ (2),
|
||||
**bpf-helpers**\ (7),
|
||||
**bpftool**\ (8),
|
||||
**bpftool-btf**\ (8),
|
||||
**bpftool-cgroup**\ (8),
|
||||
**bpftool-feature**\ (8),
|
||||
**bpftool-gen**\ (8),
|
||||
**bpftool-iter**\ (8),
|
||||
**bpftool-link**\ (8),
|
||||
**bpftool-map**\ (8),
|
||||
**bpftool-net**\ (8),
|
||||
**bpftool-perf**\ (8),
|
||||
**bpftool-struct_ops**\ (8)
|
||||
|
|
|
@ -60,23 +60,7 @@ DESCRIPTION
|
|||
|
||||
OPTIONS
|
||||
=======
|
||||
-h, --help
|
||||
Print short generic help message (similar to **bpftool help**).
|
||||
|
||||
-V, --version
|
||||
Print version number (similar to **bpftool version**).
|
||||
|
||||
-j, --json
|
||||
Generate JSON output. For commands that cannot produce JSON, this
|
||||
option has no effect.
|
||||
|
||||
-p, --pretty
|
||||
Generate human-readable JSON output. Implies **-j**.
|
||||
|
||||
-d, --debug
|
||||
Print all logs available, even debug-level information. This
|
||||
includes logs from libbpf as well as from the verifier, when
|
||||
attempting to load programs.
|
||||
.. include:: common_options.rst
|
||||
|
||||
EXAMPLES
|
||||
========
|
||||
|
@ -98,20 +82,3 @@ EXAMPLES
|
|||
::
|
||||
|
||||
Registered tcp_congestion_ops cubic id 110
|
||||
|
||||
|
||||
SEE ALSO
|
||||
========
|
||||
**bpf**\ (2),
|
||||
**bpf-helpers**\ (7),
|
||||
**bpftool**\ (8),
|
||||
**bpftool-btf**\ (8),
|
||||
**bpftool-cgroup**\ (8),
|
||||
**bpftool-feature**\ (8),
|
||||
**bpftool-gen**\ (8),
|
||||
**bpftool-iter**\ (8),
|
||||
**bpftool-link**\ (8),
|
||||
**bpftool-map**\ (8),
|
||||
**bpftool-net**\ (8),
|
||||
**bpftool-perf**\ (8),
|
||||
**bpftool-prog**\ (8)
|
||||
|
|
|
@ -46,18 +46,7 @@ DESCRIPTION
|
|||
|
||||
OPTIONS
|
||||
=======
|
||||
-h, --help
|
||||
Print short help message (similar to **bpftool help**).
|
||||
|
||||
-V, --version
|
||||
Print version number (similar to **bpftool version**).
|
||||
|
||||
-j, --json
|
||||
Generate JSON output. For commands that cannot produce JSON, this
|
||||
option has no effect.
|
||||
|
||||
-p, --pretty
|
||||
Generate human-readable JSON output. Implies **-j**.
|
||||
.. include:: common_options.rst
|
||||
|
||||
-m, --mapcompat
|
||||
Allow loading maps with unknown map definitions.
|
||||
|
@ -65,24 +54,3 @@ OPTIONS
|
|||
-n, --nomount
|
||||
Do not automatically attempt to mount any virtual file system
|
||||
(such as tracefs or BPF virtual file system) when necessary.
|
||||
|
||||
-d, --debug
|
||||
Print all logs available, even debug-level information. This
|
||||
includes logs from libbpf as well as from the verifier, when
|
||||
attempting to load programs.
|
||||
|
||||
SEE ALSO
|
||||
========
|
||||
**bpf**\ (2),
|
||||
**bpf-helpers**\ (7),
|
||||
**bpftool-btf**\ (8),
|
||||
**bpftool-cgroup**\ (8),
|
||||
**bpftool-feature**\ (8),
|
||||
**bpftool-gen**\ (8),
|
||||
**bpftool-iter**\ (8),
|
||||
**bpftool-link**\ (8),
|
||||
**bpftool-map**\ (8),
|
||||
**bpftool-net**\ (8),
|
||||
**bpftool-perf**\ (8),
|
||||
**bpftool-prog**\ (8),
|
||||
**bpftool-struct_ops**\ (8)
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
-h, --help
|
||||
Print short help message (similar to **bpftool help**).
|
||||
|
||||
-V, --version
|
||||
Print version number (similar to **bpftool version**), and optional
|
||||
features that were included when bpftool was compiled. Optional
|
||||
features include linking against libbfd to provide the disassembler
|
||||
for JIT-ted programs (**bpftool prog dump jited**) and usage of BPF
|
||||
skeletons (some features like **bpftool prog profile** or showing
|
||||
pids associated to BPF objects may rely on it).
|
||||
|
||||
-j, --json
|
||||
Generate JSON output. For commands that cannot produce JSON, this
|
||||
option has no effect.
|
||||
|
||||
-p, --pretty
|
||||
Generate human-readable JSON output. Implies **-j**.
|
||||
|
||||
-d, --debug
|
||||
Print all logs available, even debug-level information. This includes
|
||||
logs from libbpf as well as from the verifier, when attempting to
|
||||
load programs.
|
|
@ -709,9 +709,26 @@ _bpftool()
|
|||
"$cur" ) )
|
||||
return 0
|
||||
;;
|
||||
key|value|flags|name|entries)
|
||||
key|value|flags|entries)
|
||||
return 0
|
||||
;;
|
||||
inner_map)
|
||||
COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
|
||||
return 0
|
||||
;;
|
||||
id)
|
||||
_bpftool_get_map_ids
|
||||
;;
|
||||
name)
|
||||
case $pprev in
|
||||
inner_map)
|
||||
_bpftool_get_map_names
|
||||
;;
|
||||
*)
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
_bpftool_once_attr 'type'
|
||||
_bpftool_once_attr 'key'
|
||||
|
@ -719,6 +736,9 @@ _bpftool()
|
|||
_bpftool_once_attr 'entries'
|
||||
_bpftool_once_attr 'name'
|
||||
_bpftool_once_attr 'flags'
|
||||
if _bpftool_search_list 'array_of_maps' 'hash_of_maps'; then
|
||||
_bpftool_once_attr 'inner_map'
|
||||
fi
|
||||
_bpftool_once_attr 'dev'
|
||||
return 0
|
||||
;;
|
||||
|
|
|
@ -119,6 +119,12 @@ void jsonw_pretty(json_writer_t *self, bool on)
|
|||
self->pretty = on;
|
||||
}
|
||||
|
||||
void jsonw_reset(json_writer_t *self)
|
||||
{
|
||||
assert(self->depth == 0);
|
||||
self->sep = '\0';
|
||||
}
|
||||
|
||||
/* Basic blocks */
|
||||
static void jsonw_begin(json_writer_t *self, int c)
|
||||
{
|
||||
|
|
|
@ -27,6 +27,9 @@ void jsonw_destroy(json_writer_t **self_p);
|
|||
/* Cause output to have pretty whitespace */
|
||||
void jsonw_pretty(json_writer_t *self, bool on);
|
||||
|
||||
/* Reset separator to create new JSON */
|
||||
void jsonw_reset(json_writer_t *self);
|
||||
|
||||
/* Add property name */
|
||||
void jsonw_name(json_writer_t *self, const char *name);
|
||||
|
||||
|
|
|
@ -70,13 +70,42 @@ static int do_help(int argc, char **argv)
|
|||
|
||||
static int do_version(int argc, char **argv)
|
||||
{
|
||||
#ifdef HAVE_LIBBFD_SUPPORT
|
||||
const bool has_libbfd = true;
|
||||
#else
|
||||
const bool has_libbfd = false;
|
||||
#endif
|
||||
#ifdef BPFTOOL_WITHOUT_SKELETONS
|
||||
const bool has_skeletons = false;
|
||||
#else
|
||||
const bool has_skeletons = true;
|
||||
#endif
|
||||
|
||||
if (json_output) {
|
||||
jsonw_start_object(json_wtr);
|
||||
jsonw_start_object(json_wtr); /* root object */
|
||||
|
||||
jsonw_name(json_wtr, "version");
|
||||
jsonw_printf(json_wtr, "\"%s\"", BPFTOOL_VERSION);
|
||||
jsonw_end_object(json_wtr);
|
||||
|
||||
jsonw_name(json_wtr, "features");
|
||||
jsonw_start_object(json_wtr); /* features */
|
||||
jsonw_bool_field(json_wtr, "libbfd", has_libbfd);
|
||||
jsonw_bool_field(json_wtr, "skeletons", has_skeletons);
|
||||
jsonw_end_object(json_wtr); /* features */
|
||||
|
||||
jsonw_end_object(json_wtr); /* root object */
|
||||
} else {
|
||||
unsigned int nb_features = 0;
|
||||
|
||||
printf("%s v%s\n", bin_name, BPFTOOL_VERSION);
|
||||
printf("features:");
|
||||
if (has_libbfd) {
|
||||
printf(" libbfd");
|
||||
nb_features++;
|
||||
}
|
||||
if (has_skeletons)
|
||||
printf("%s skeletons", nb_features++ ? "," : "");
|
||||
printf("\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -213,8 +213,9 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
|
|||
jsonw_end_object(json_wtr);
|
||||
}
|
||||
|
||||
static void print_entry_error(struct bpf_map_info *info, unsigned char *key,
|
||||
const char *error_msg)
|
||||
static void
|
||||
print_entry_error_msg(struct bpf_map_info *info, unsigned char *key,
|
||||
const char *error_msg)
|
||||
{
|
||||
int msg_size = strlen(error_msg);
|
||||
bool single_line, break_names;
|
||||
|
@ -232,6 +233,40 @@ static void print_entry_error(struct bpf_map_info *info, unsigned char *key,
|
|||
printf("\n");
|
||||
}
|
||||
|
||||
static void
|
||||
print_entry_error(struct bpf_map_info *map_info, void *key, int lookup_errno)
|
||||
{
|
||||
/* For prog_array maps or arrays of maps, failure to lookup the value
|
||||
* means there is no entry for that key. Do not print an error message
|
||||
* in that case.
|
||||
*/
|
||||
if ((map_is_map_of_maps(map_info->type) ||
|
||||
map_is_map_of_progs(map_info->type)) && lookup_errno == ENOENT)
|
||||
return;
|
||||
|
||||
if (json_output) {
|
||||
jsonw_start_object(json_wtr); /* entry */
|
||||
jsonw_name(json_wtr, "key");
|
||||
print_hex_data_json(key, map_info->key_size);
|
||||
jsonw_name(json_wtr, "value");
|
||||
jsonw_start_object(json_wtr); /* error */
|
||||
jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
|
||||
jsonw_end_object(json_wtr); /* error */
|
||||
jsonw_end_object(json_wtr); /* entry */
|
||||
} else {
|
||||
const char *msg = NULL;
|
||||
|
||||
if (lookup_errno == ENOENT)
|
||||
msg = "<no entry>";
|
||||
else if (lookup_errno == ENOSPC &&
|
||||
map_info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
|
||||
msg = "<cannot read>";
|
||||
|
||||
print_entry_error_msg(map_info, key,
|
||||
msg ? : strerror(lookup_errno));
|
||||
}
|
||||
}
|
||||
|
||||
static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
|
||||
unsigned char *value)
|
||||
{
|
||||
|
@ -713,56 +748,23 @@ static int dump_map_elem(int fd, void *key, void *value,
|
|||
struct bpf_map_info *map_info, struct btf *btf,
|
||||
json_writer_t *btf_wtr)
|
||||
{
|
||||
int num_elems = 0;
|
||||
int lookup_errno;
|
||||
|
||||
if (!bpf_map_lookup_elem(fd, key, value)) {
|
||||
if (json_output) {
|
||||
print_entry_json(map_info, key, value, btf);
|
||||
} else {
|
||||
if (btf) {
|
||||
struct btf_dumper d = {
|
||||
.btf = btf,
|
||||
.jw = btf_wtr,
|
||||
.is_plain_text = true,
|
||||
};
|
||||
|
||||
do_dump_btf(&d, map_info, key, value);
|
||||
} else {
|
||||
print_entry_plain(map_info, key, value);
|
||||
}
|
||||
num_elems++;
|
||||
}
|
||||
return num_elems;
|
||||
if (bpf_map_lookup_elem(fd, key, value)) {
|
||||
print_entry_error(map_info, key, errno);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* lookup error handling */
|
||||
lookup_errno = errno;
|
||||
|
||||
if (map_is_map_of_maps(map_info->type) ||
|
||||
map_is_map_of_progs(map_info->type))
|
||||
return 0;
|
||||
|
||||
if (json_output) {
|
||||
jsonw_start_object(json_wtr);
|
||||
jsonw_name(json_wtr, "key");
|
||||
print_hex_data_json(key, map_info->key_size);
|
||||
jsonw_name(json_wtr, "value");
|
||||
jsonw_start_object(json_wtr);
|
||||
jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
|
||||
jsonw_end_object(json_wtr);
|
||||
jsonw_end_object(json_wtr);
|
||||
print_entry_json(map_info, key, value, btf);
|
||||
} else if (btf) {
|
||||
struct btf_dumper d = {
|
||||
.btf = btf,
|
||||
.jw = btf_wtr,
|
||||
.is_plain_text = true,
|
||||
};
|
||||
|
||||
do_dump_btf(&d, map_info, key, value);
|
||||
} else {
|
||||
const char *msg = NULL;
|
||||
|
||||
if (lookup_errno == ENOENT)
|
||||
msg = "<no entry>";
|
||||
else if (lookup_errno == ENOSPC &&
|
||||
map_info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
|
||||
msg = "<cannot read>";
|
||||
|
||||
print_entry_error(map_info, key,
|
||||
msg ? : strerror(lookup_errno));
|
||||
print_entry_plain(map_info, key, value);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -873,7 +875,8 @@ map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
|
|||
err = 0;
|
||||
break;
|
||||
}
|
||||
num_elems += dump_map_elem(fd, key, value, info, btf, wtr);
|
||||
if (!dump_map_elem(fd, key, value, info, btf, wtr))
|
||||
num_elems++;
|
||||
prev_key = key;
|
||||
}
|
||||
|
||||
|
@ -1247,7 +1250,7 @@ static int do_create(int argc, char **argv)
|
|||
{
|
||||
struct bpf_create_map_attr attr = { NULL, };
|
||||
const char *pinfile;
|
||||
int err, fd;
|
||||
int err = -1, fd;
|
||||
|
||||
if (!REQ_ARGS(7))
|
||||
return -1;
|
||||
|
@ -1262,13 +1265,13 @@ static int do_create(int argc, char **argv)
|
|||
|
||||
if (attr.map_type) {
|
||||
p_err("map type already specified");
|
||||
return -1;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
attr.map_type = map_type_from_str(*argv);
|
||||
if ((int)attr.map_type < 0) {
|
||||
p_err("unrecognized map type: %s", *argv);
|
||||
return -1;
|
||||
goto exit;
|
||||
}
|
||||
NEXT_ARG();
|
||||
} else if (is_prefix(*argv, "name")) {
|
||||
|
@ -1277,43 +1280,56 @@ static int do_create(int argc, char **argv)
|
|||
} else if (is_prefix(*argv, "key")) {
|
||||
if (parse_u32_arg(&argc, &argv, &attr.key_size,
|
||||
"key size"))
|
||||
return -1;
|
||||
goto exit;
|
||||
} else if (is_prefix(*argv, "value")) {
|
||||
if (parse_u32_arg(&argc, &argv, &attr.value_size,
|
||||
"value size"))
|
||||
return -1;
|
||||
goto exit;
|
||||
} else if (is_prefix(*argv, "entries")) {
|
||||
if (parse_u32_arg(&argc, &argv, &attr.max_entries,
|
||||
"max entries"))
|
||||
return -1;
|
||||
goto exit;
|
||||
} else if (is_prefix(*argv, "flags")) {
|
||||
if (parse_u32_arg(&argc, &argv, &attr.map_flags,
|
||||
"flags"))
|
||||
return -1;
|
||||
goto exit;
|
||||
} else if (is_prefix(*argv, "dev")) {
|
||||
NEXT_ARG();
|
||||
|
||||
if (attr.map_ifindex) {
|
||||
p_err("offload device already specified");
|
||||
return -1;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
attr.map_ifindex = if_nametoindex(*argv);
|
||||
if (!attr.map_ifindex) {
|
||||
p_err("unrecognized netdevice '%s': %s",
|
||||
*argv, strerror(errno));
|
||||
return -1;
|
||||
goto exit;
|
||||
}
|
||||
NEXT_ARG();
|
||||
} else if (is_prefix(*argv, "inner_map")) {
|
||||
struct bpf_map_info info = {};
|
||||
__u32 len = sizeof(info);
|
||||
int inner_map_fd;
|
||||
|
||||
NEXT_ARG();
|
||||
if (!REQ_ARGS(2))
|
||||
usage();
|
||||
inner_map_fd = map_parse_fd_and_info(&argc, &argv,
|
||||
&info, &len);
|
||||
if (inner_map_fd < 0)
|
||||
return -1;
|
||||
attr.inner_map_fd = inner_map_fd;
|
||||
} else {
|
||||
p_err("unknown arg %s", *argv);
|
||||
return -1;
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
if (!attr.name) {
|
||||
p_err("map name not specified");
|
||||
return -1;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
set_max_rlimit();
|
||||
|
@ -1321,17 +1337,22 @@ static int do_create(int argc, char **argv)
|
|||
fd = bpf_create_map_xattr(&attr);
|
||||
if (fd < 0) {
|
||||
p_err("map create failed: %s", strerror(errno));
|
||||
return -1;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = do_pin_fd(fd, pinfile);
|
||||
close(fd);
|
||||
if (err)
|
||||
return err;
|
||||
goto exit;
|
||||
|
||||
if (json_output)
|
||||
jsonw_null(json_wtr);
|
||||
return 0;
|
||||
|
||||
exit:
|
||||
if (attr.inner_map_fd > 0)
|
||||
close(attr.inner_map_fd);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int do_pop_dequeue(int argc, char **argv)
|
||||
|
@ -1417,7 +1438,7 @@ static int do_help(int argc, char **argv)
|
|||
"Usage: %1$s %2$s { show | list } [MAP]\n"
|
||||
" %1$s %2$s create FILE type TYPE key KEY_SIZE value VALUE_SIZE \\\n"
|
||||
" entries MAX_ENTRIES name NAME [flags FLAGS] \\\n"
|
||||
" [dev NAME]\n"
|
||||
" [inner_map MAP] [dev NAME]\n"
|
||||
" %1$s %2$s dump MAP\n"
|
||||
" %1$s %2$s update MAP [key DATA] [value VALUE] [UPDATE_FLAGS]\n"
|
||||
" %1$s %2$s lookup MAP [key DATA]\n"
|
||||
|
|
|
@ -29,6 +29,9 @@
|
|||
#include "main.h"
|
||||
#include "xlated_dumper.h"
|
||||
|
||||
#define BPF_METADATA_PREFIX "bpf_metadata_"
|
||||
#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
|
||||
|
||||
const char * const prog_type_name[] = {
|
||||
[BPF_PROG_TYPE_UNSPEC] = "unspec",
|
||||
[BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
|
||||
|
@ -151,6 +154,198 @@ static void show_prog_maps(int fd, __u32 num_maps)
|
|||
}
|
||||
}
|
||||
|
||||
static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
|
||||
{
|
||||
struct bpf_prog_info prog_info;
|
||||
__u32 prog_info_len;
|
||||
__u32 map_info_len;
|
||||
void *value = NULL;
|
||||
__u32 *map_ids;
|
||||
int nr_maps;
|
||||
int key = 0;
|
||||
int map_fd;
|
||||
int ret;
|
||||
__u32 i;
|
||||
|
||||
memset(&prog_info, 0, sizeof(prog_info));
|
||||
prog_info_len = sizeof(prog_info);
|
||||
ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
if (!prog_info.nr_map_ids)
|
||||
return NULL;
|
||||
|
||||
map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
|
||||
if (!map_ids)
|
||||
return NULL;
|
||||
|
||||
nr_maps = prog_info.nr_map_ids;
|
||||
memset(&prog_info, 0, sizeof(prog_info));
|
||||
prog_info.nr_map_ids = nr_maps;
|
||||
prog_info.map_ids = ptr_to_u64(map_ids);
|
||||
prog_info_len = sizeof(prog_info);
|
||||
|
||||
ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
|
||||
if (ret)
|
||||
goto free_map_ids;
|
||||
|
||||
for (i = 0; i < prog_info.nr_map_ids; i++) {
|
||||
map_fd = bpf_map_get_fd_by_id(map_ids[i]);
|
||||
if (map_fd < 0)
|
||||
goto free_map_ids;
|
||||
|
||||
memset(map_info, 0, sizeof(*map_info));
|
||||
map_info_len = sizeof(*map_info);
|
||||
ret = bpf_obj_get_info_by_fd(map_fd, map_info, &map_info_len);
|
||||
if (ret < 0) {
|
||||
close(map_fd);
|
||||
goto free_map_ids;
|
||||
}
|
||||
|
||||
if (map_info->type != BPF_MAP_TYPE_ARRAY ||
|
||||
map_info->key_size != sizeof(int) ||
|
||||
map_info->max_entries != 1 ||
|
||||
!map_info->btf_value_type_id ||
|
||||
!strstr(map_info->name, ".rodata")) {
|
||||
close(map_fd);
|
||||
continue;
|
||||
}
|
||||
|
||||
value = malloc(map_info->value_size);
|
||||
if (!value) {
|
||||
close(map_fd);
|
||||
goto free_map_ids;
|
||||
}
|
||||
|
||||
if (bpf_map_lookup_elem(map_fd, &key, value)) {
|
||||
close(map_fd);
|
||||
free(value);
|
||||
value = NULL;
|
||||
goto free_map_ids;
|
||||
}
|
||||
|
||||
close(map_fd);
|
||||
break;
|
||||
}
|
||||
|
||||
free_map_ids:
|
||||
free(map_ids);
|
||||
return value;
|
||||
}
|
||||
|
||||
static bool has_metadata_prefix(const char *s)
|
||||
{
|
||||
return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0;
|
||||
}
|
||||
|
||||
static void show_prog_metadata(int fd, __u32 num_maps)
|
||||
{
|
||||
const struct btf_type *t_datasec, *t_var;
|
||||
struct bpf_map_info map_info;
|
||||
struct btf_var_secinfo *vsi;
|
||||
bool printed_header = false;
|
||||
struct btf *btf = NULL;
|
||||
unsigned int i, vlen;
|
||||
void *value = NULL;
|
||||
const char *name;
|
||||
int err;
|
||||
|
||||
if (!num_maps)
|
||||
return;
|
||||
|
||||
memset(&map_info, 0, sizeof(map_info));
|
||||
value = find_metadata(fd, &map_info);
|
||||
if (!value)
|
||||
return;
|
||||
|
||||
err = btf__get_from_id(map_info.btf_id, &btf);
|
||||
if (err || !btf)
|
||||
goto out_free;
|
||||
|
||||
t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
|
||||
if (!btf_is_datasec(t_datasec))
|
||||
goto out_free;
|
||||
|
||||
vlen = btf_vlen(t_datasec);
|
||||
vsi = btf_var_secinfos(t_datasec);
|
||||
|
||||
/* We don't proceed to check the kinds of the elements of the DATASEC.
|
||||
* The verifier enforces them to be BTF_KIND_VAR.
|
||||
*/
|
||||
|
||||
if (json_output) {
|
||||
struct btf_dumper d = {
|
||||
.btf = btf,
|
||||
.jw = json_wtr,
|
||||
.is_plain_text = false,
|
||||
};
|
||||
|
||||
for (i = 0; i < vlen; i++, vsi++) {
|
||||
t_var = btf__type_by_id(btf, vsi->type);
|
||||
name = btf__name_by_offset(btf, t_var->name_off);
|
||||
|
||||
if (!has_metadata_prefix(name))
|
||||
continue;
|
||||
|
||||
if (!printed_header) {
|
||||
jsonw_name(json_wtr, "metadata");
|
||||
jsonw_start_object(json_wtr);
|
||||
printed_header = true;
|
||||
}
|
||||
|
||||
jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN);
|
||||
err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
|
||||
if (err) {
|
||||
p_err("btf dump failed: %d", err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (printed_header)
|
||||
jsonw_end_object(json_wtr);
|
||||
} else {
|
||||
json_writer_t *btf_wtr = jsonw_new(stdout);
|
||||
struct btf_dumper d = {
|
||||
.btf = btf,
|
||||
.jw = btf_wtr,
|
||||
.is_plain_text = true,
|
||||
};
|
||||
|
||||
if (!btf_wtr) {
|
||||
p_err("jsonw alloc failed");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
for (i = 0; i < vlen; i++, vsi++) {
|
||||
t_var = btf__type_by_id(btf, vsi->type);
|
||||
name = btf__name_by_offset(btf, t_var->name_off);
|
||||
|
||||
if (!has_metadata_prefix(name))
|
||||
continue;
|
||||
|
||||
if (!printed_header) {
|
||||
printf("\tmetadata:");
|
||||
printed_header = true;
|
||||
}
|
||||
|
||||
printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN);
|
||||
|
||||
jsonw_reset(btf_wtr);
|
||||
err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
|
||||
if (err) {
|
||||
p_err("btf dump failed: %d", err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (printed_header)
|
||||
jsonw_destroy(&btf_wtr);
|
||||
}
|
||||
|
||||
out_free:
|
||||
btf__free(btf);
|
||||
free(value);
|
||||
}
|
||||
|
||||
static void print_prog_header_json(struct bpf_prog_info *info)
|
||||
{
|
||||
jsonw_uint_field(json_wtr, "id", info->id);
|
||||
|
@ -228,6 +423,8 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
|
|||
|
||||
emit_obj_refs_json(&refs_table, info->id, json_wtr);
|
||||
|
||||
show_prog_metadata(fd, info->nr_map_ids);
|
||||
|
||||
jsonw_end_object(json_wtr);
|
||||
}
|
||||
|
||||
|
@ -297,6 +494,8 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
|
|||
emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
|
||||
|
||||
printf("\n");
|
||||
|
||||
show_prog_metadata(fd, info->nr_map_ids);
|
||||
}
|
||||
|
||||
static int show_prog(int fd)
|
||||
|
@ -1304,7 +1503,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
|
|||
enum bpf_prog_type prog_type = common_prog_type;
|
||||
|
||||
if (prog_type == BPF_PROG_TYPE_UNSPEC) {
|
||||
const char *sec_name = bpf_program__title(pos, false);
|
||||
const char *sec_name = bpf_program__section_name(pos);
|
||||
|
||||
err = get_prog_type_by_name(sec_name, &prog_type,
|
||||
&expected_attach_type);
|
||||
|
@ -1398,7 +1597,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
|
|||
err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
|
||||
if (err) {
|
||||
p_err("failed to pin program %s",
|
||||
bpf_program__title(prog, false));
|
||||
bpf_program__section_name(prog));
|
||||
goto err_close_obj;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
include ../../scripts/Makefile.include
|
||||
include ../../scripts/Makefile.arch
|
||||
|
||||
ifeq ($(srctree),)
|
||||
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
|
||||
|
@ -29,6 +30,7 @@ endif
|
|||
AR = $(HOSTAR)
|
||||
CC = $(HOSTCC)
|
||||
LD = $(HOSTLD)
|
||||
ARCH = $(HOSTARCH)
|
||||
|
||||
OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
|
||||
|
||||
|
|
|
@ -76,6 +76,13 @@ extern u32 name[];
|
|||
#define BTF_ID_LIST_GLOBAL(name) \
|
||||
__BTF_ID_LIST(name, globl)
|
||||
|
||||
/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
|
||||
* a single entry.
|
||||
*/
|
||||
#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
|
||||
BTF_ID_LIST(name) \
|
||||
BTF_ID(prefix, typename)
|
||||
|
||||
/*
|
||||
* The BTF_ID_UNUSED macro defines 4 zero bytes.
|
||||
* It's used when we want to define 'unused' entry
|
||||
|
@ -140,6 +147,7 @@ extern struct btf_id_set name;
|
|||
#define BTF_ID(prefix, name)
|
||||
#define BTF_ID_UNUSED
|
||||
#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
|
||||
#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
|
||||
#define BTF_SET_START(name) static struct btf_id_set name = { 0 };
|
||||
#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 };
|
||||
#define BTF_SET_END(name)
|
||||
|
|
|
@ -124,6 +124,7 @@ enum bpf_cmd {
|
|||
BPF_ENABLE_STATS,
|
||||
BPF_ITER_CREATE,
|
||||
BPF_LINK_DETACH,
|
||||
BPF_PROG_BIND_MAP,
|
||||
};
|
||||
|
||||
enum bpf_map_type {
|
||||
|
@ -658,6 +659,12 @@ union bpf_attr {
|
|||
__u32 flags;
|
||||
} iter_create;
|
||||
|
||||
struct { /* struct used by BPF_PROG_BIND_MAP command */
|
||||
__u32 prog_fd;
|
||||
__u32 map_fd;
|
||||
__u32 flags; /* extra flags */
|
||||
} prog_bind_map;
|
||||
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
/* The description below is an attempt at providing documentation to eBPF
|
||||
|
@ -1447,8 +1454,8 @@ union bpf_attr {
|
|||
* Return
|
||||
* The return value depends on the result of the test, and can be:
|
||||
*
|
||||
* * 0, if the *skb* task belongs to the cgroup2.
|
||||
* * 1, if the *skb* task does not belong to the cgroup2.
|
||||
* * 0, if current task belongs to the cgroup2.
|
||||
* * 1, if current task does not belong to the cgroup2.
|
||||
* * A negative error code, if an error occurred.
|
||||
*
|
||||
* long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
|
||||
|
@ -3349,38 +3356,38 @@ union bpf_attr {
|
|||
* Description
|
||||
* Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
|
||||
* Return
|
||||
* *sk* if casting is valid, or NULL otherwise.
|
||||
* *sk* if casting is valid, or **NULL** otherwise.
|
||||
*
|
||||
* struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
|
||||
* Description
|
||||
* Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
|
||||
* Return
|
||||
* *sk* if casting is valid, or NULL otherwise.
|
||||
* *sk* if casting is valid, or **NULL** otherwise.
|
||||
*
|
||||
* struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
|
||||
* Description
|
||||
* Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
|
||||
* Return
|
||||
* *sk* if casting is valid, or NULL otherwise.
|
||||
* *sk* if casting is valid, or **NULL** otherwise.
|
||||
*
|
||||
* struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
|
||||
* Description
|
||||
* Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
|
||||
* Return
|
||||
* *sk* if casting is valid, or NULL otherwise.
|
||||
* *sk* if casting is valid, or **NULL** otherwise.
|
||||
*
|
||||
* struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
|
||||
* Description
|
||||
* Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
|
||||
* Return
|
||||
* *sk* if casting is valid, or NULL otherwise.
|
||||
* *sk* if casting is valid, or **NULL** otherwise.
|
||||
*
|
||||
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
|
||||
* Description
|
||||
* Return a user or a kernel stack in bpf program provided buffer.
|
||||
* To achieve this, the helper needs *task*, which is a valid
|
||||
* pointer to struct task_struct. To store the stacktrace, the
|
||||
* bpf program provides *buf* with a nonnegative *size*.
|
||||
* pointer to **struct task_struct**. To store the stacktrace, the
|
||||
* bpf program provides *buf* with a nonnegative *size*.
|
||||
*
|
||||
* The last argument, *flags*, holds the number of stack frames to
|
||||
* skip (from 0 to 255), masked with
|
||||
|
@ -3410,12 +3417,12 @@ union bpf_attr {
|
|||
* long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
|
||||
* Description
|
||||
* Load header option. Support reading a particular TCP header
|
||||
* option for bpf program (BPF_PROG_TYPE_SOCK_OPS).
|
||||
* option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**).
|
||||
*
|
||||
* If *flags* is 0, it will search the option from the
|
||||
* sock_ops->skb_data. The comment in "struct bpf_sock_ops"
|
||||
* *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops**
|
||||
* has details on what skb_data contains under different
|
||||
* sock_ops->op.
|
||||
* *skops*\ **->op**.
|
||||
*
|
||||
* The first byte of the *searchby_res* specifies the
|
||||
* kind that it wants to search.
|
||||
|
@ -3435,7 +3442,7 @@ union bpf_attr {
|
|||
* [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ].
|
||||
*
|
||||
* To search for the standard window scale option (3),
|
||||
* the searchby_res should be [ 3, 0, 0, .... 0 ].
|
||||
* the *searchby_res* should be [ 3, 0, 0, .... 0 ].
|
||||
* Note, kind-length must be 0 for regular option.
|
||||
*
|
||||
* Searching for No-Op (0) and End-of-Option-List (1) are
|
||||
|
@ -3445,27 +3452,30 @@ union bpf_attr {
|
|||
* of a header option.
|
||||
*
|
||||
* Supported flags:
|
||||
*
|
||||
* * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the
|
||||
* saved_syn packet or the just-received syn packet.
|
||||
*
|
||||
* Return
|
||||
* >0 when found, the header option is copied to *searchby_res*.
|
||||
* The return value is the total length copied.
|
||||
* > 0 when found, the header option is copied to *searchby_res*.
|
||||
* The return value is the total length copied. On failure, a
|
||||
* negative error code is returned:
|
||||
*
|
||||
* **-EINVAL** If param is invalid
|
||||
* **-EINVAL** if a parameter is invalid.
|
||||
*
|
||||
* **-ENOMSG** The option is not found
|
||||
* **-ENOMSG** if the option is not found.
|
||||
*
|
||||
* **-ENOENT** No syn packet available when
|
||||
* **BPF_LOAD_HDR_OPT_TCP_SYN** is used
|
||||
* **-ENOENT** if no syn packet is available when
|
||||
* **BPF_LOAD_HDR_OPT_TCP_SYN** is used.
|
||||
*
|
||||
* **-ENOSPC** Not enough space. Only *len* number of
|
||||
* bytes are copied.
|
||||
* **-ENOSPC** if there is not enough space. Only *len* number of
|
||||
* bytes are copied.
|
||||
*
|
||||
* **-EFAULT** Cannot parse the header options in the packet
|
||||
* **-EFAULT** on failure to parse the header options in the
|
||||
* packet.
|
||||
*
|
||||
* **-EPERM** This helper cannot be used under the
|
||||
* current sock_ops->op.
|
||||
* **-EPERM** if the helper cannot be used under the current
|
||||
* *skops*\ **->op**.
|
||||
*
|
||||
* long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags)
|
||||
* Description
|
||||
|
@ -3483,44 +3493,44 @@ union bpf_attr {
|
|||
* by searching the same option in the outgoing skb.
|
||||
*
|
||||
* This helper can only be called during
|
||||
* BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
|
||||
* **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
|
||||
*
|
||||
* Return
|
||||
* 0 on success, or negative error in case of failure:
|
||||
*
|
||||
* **-EINVAL** If param is invalid
|
||||
* **-EINVAL** If param is invalid.
|
||||
*
|
||||
* **-ENOSPC** Not enough space in the header.
|
||||
* Nothing has been written
|
||||
* **-ENOSPC** if there is not enough space in the header.
|
||||
* Nothing has been written
|
||||
*
|
||||
* **-EEXIST** The option has already existed
|
||||
* **-EEXIST** if the option already exists.
|
||||
*
|
||||
* **-EFAULT** Cannot parse the existing header options
|
||||
* **-EFAULT** on failrue to parse the existing header options.
|
||||
*
|
||||
* **-EPERM** This helper cannot be used under the
|
||||
* current sock_ops->op.
|
||||
* **-EPERM** if the helper cannot be used under the current
|
||||
* *skops*\ **->op**.
|
||||
*
|
||||
* long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags)
|
||||
* Description
|
||||
* Reserve *len* bytes for the bpf header option. The
|
||||
* space will be used by bpf_store_hdr_opt() later in
|
||||
* BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
|
||||
* space will be used by **bpf_store_hdr_opt**\ () later in
|
||||
* **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
|
||||
*
|
||||
* If bpf_reserve_hdr_opt() is called multiple times,
|
||||
* If **bpf_reserve_hdr_opt**\ () is called multiple times,
|
||||
* the total number of bytes will be reserved.
|
||||
*
|
||||
* This helper can only be called during
|
||||
* BPF_SOCK_OPS_HDR_OPT_LEN_CB.
|
||||
* **BPF_SOCK_OPS_HDR_OPT_LEN_CB**.
|
||||
*
|
||||
* Return
|
||||
* 0 on success, or negative error in case of failure:
|
||||
*
|
||||
* **-EINVAL** if param is invalid
|
||||
* **-EINVAL** if a parameter is invalid.
|
||||
*
|
||||
* **-ENOSPC** Not enough space in the header.
|
||||
* **-ENOSPC** if there is not enough space in the header.
|
||||
*
|
||||
* **-EPERM** This helper cannot be used under the
|
||||
* current sock_ops->op.
|
||||
* **-EPERM** if the helper cannot be used under the current
|
||||
* *skops*\ **->op**.
|
||||
*
|
||||
* void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags)
|
||||
* Description
|
||||
|
@ -3560,9 +3570,9 @@ union bpf_attr {
|
|||
*
|
||||
* long bpf_d_path(struct path *path, char *buf, u32 sz)
|
||||
* Description
|
||||
* Return full path for given 'struct path' object, which
|
||||
* needs to be the kernel BTF 'path' object. The path is
|
||||
* returned in the provided buffer 'buf' of size 'sz' and
|
||||
* Return full path for given **struct path** object, which
|
||||
* needs to be the kernel BTF *path* object. The path is
|
||||
* returned in the provided buffer *buf* of size *sz* and
|
||||
* is zero terminated.
|
||||
*
|
||||
* Return
|
||||
|
@ -3573,7 +3583,7 @@ union bpf_attr {
|
|||
* long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr)
|
||||
* Description
|
||||
* Read *size* bytes from user space address *user_ptr* and store
|
||||
* the data in *dst*. This is a wrapper of copy_from_user().
|
||||
* the data in *dst*. This is a wrapper of **copy_from_user**\ ().
|
||||
* Return
|
||||
* 0 on success, or a negative error in case of failure.
|
||||
*/
|
||||
|
|
|
@ -872,3 +872,19 @@ int bpf_enable_stats(enum bpf_stats_type type)
|
|||
|
||||
return sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr));
|
||||
}
|
||||
|
||||
int bpf_prog_bind_map(int prog_fd, int map_fd,
|
||||
const struct bpf_prog_bind_opts *opts)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_prog_bind_opts))
|
||||
return -EINVAL;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.prog_bind_map.prog_fd = prog_fd;
|
||||
attr.prog_bind_map.map_fd = map_fd;
|
||||
attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
|
||||
|
||||
return sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr));
|
||||
}
|
||||
|
|
|
@ -243,6 +243,14 @@ LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
|
|||
enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
|
||||
LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
|
||||
|
||||
struct bpf_prog_bind_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
__u32 flags;
|
||||
};
|
||||
#define bpf_prog_bind_opts__last_field flags
|
||||
|
||||
LIBBPF_API int bpf_prog_bind_map(int prog_fd, int map_fd,
|
||||
const struct bpf_prog_bind_opts *opts);
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
|
|
@ -57,14 +57,16 @@ LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size);
|
|||
LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
|
||||
LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext,
|
||||
__u32 *size);
|
||||
LIBBPF_API int btf_ext__reloc_func_info(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const char *sec_name, __u32 insns_cnt,
|
||||
void **func_info, __u32 *cnt);
|
||||
LIBBPF_API int btf_ext__reloc_line_info(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const char *sec_name, __u32 insns_cnt,
|
||||
void **line_info, __u32 *cnt);
|
||||
LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_func_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions")
|
||||
int btf_ext__reloc_func_info(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const char *sec_name, __u32 insns_cnt,
|
||||
void **func_info, __u32 *cnt);
|
||||
LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_line_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions")
|
||||
int btf_ext__reloc_line_info(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const char *sec_name, __u32 insns_cnt,
|
||||
void **line_info, __u32 *cnt);
|
||||
LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
|
||||
LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -198,8 +198,9 @@ LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
|
|||
__u32 ifindex);
|
||||
|
||||
LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog);
|
||||
LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog,
|
||||
bool needs_copy);
|
||||
LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog);
|
||||
LIBBPF_API LIBBPF_DEPRECATED("BPF program title is confusing term; please use bpf_program__section_name() instead")
|
||||
const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy);
|
||||
LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog);
|
||||
LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload);
|
||||
|
||||
|
|
|
@ -302,6 +302,8 @@ LIBBPF_0.1.0 {
|
|||
|
||||
LIBBPF_0.2.0 {
|
||||
global:
|
||||
bpf_prog_bind_map;
|
||||
bpf_program__section_name;
|
||||
perf_buffer__buffer_cnt;
|
||||
perf_buffer__buffer_fd;
|
||||
perf_buffer__epoll_fd;
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
#define LIBBPF_API __attribute__((visibility("default")))
|
||||
#endif
|
||||
|
||||
#define LIBBPF_DEPRECATED(msg) __attribute__((deprecated(msg)))
|
||||
|
||||
/* Helper macro to declare and initialize libbpf options struct
|
||||
*
|
||||
* This dance with uninitialized declaration, followed by memset to zero,
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/if_ether.h>
|
||||
#include <linux/if_packet.h>
|
||||
#include <linux/if_xdp.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/sockios.h>
|
||||
#include <net/if.h>
|
||||
|
|
|
@ -328,12 +328,6 @@ config_bpf_program(struct bpf_program *prog)
|
|||
probe_conf.no_inlines = false;
|
||||
probe_conf.force_add = false;
|
||||
|
||||
config_str = bpf_program__title(prog, false);
|
||||
if (IS_ERR(config_str)) {
|
||||
pr_debug("bpf: unable to get title for program\n");
|
||||
return PTR_ERR(config_str);
|
||||
}
|
||||
|
||||
priv = calloc(sizeof(*priv), 1);
|
||||
if (!priv) {
|
||||
pr_debug("bpf: failed to alloc priv\n");
|
||||
|
@ -341,6 +335,7 @@ config_bpf_program(struct bpf_program *prog)
|
|||
}
|
||||
pev = &priv->pev;
|
||||
|
||||
config_str = bpf_program__section_name(prog);
|
||||
pr_debug("bpf: config program '%s'\n", config_str);
|
||||
err = parse_prog_config(config_str, &main_str, &is_tp, pev);
|
||||
if (err)
|
||||
|
@ -454,10 +449,7 @@ preproc_gen_prologue(struct bpf_program *prog, int n,
|
|||
if (err) {
|
||||
const char *title;
|
||||
|
||||
title = bpf_program__title(prog, false);
|
||||
if (!title)
|
||||
title = "[unknown]";
|
||||
|
||||
title = bpf_program__section_name(prog);
|
||||
pr_debug("Failed to generate prologue for program %s\n",
|
||||
title);
|
||||
return err;
|
||||
|
|
|
@ -15,7 +15,6 @@ test_sock
|
|||
test_sock_addr
|
||||
test_sock_fields
|
||||
urandom_read
|
||||
test_btf
|
||||
test_sockmap
|
||||
test_lirc_mode2_user
|
||||
get_cgroup_id_user
|
||||
|
|
|
@ -33,7 +33,7 @@ LDLIBS += -lcap -lelf -lz -lrt -lpthread
|
|||
# Order correspond to 'make run_tests' order
|
||||
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
|
||||
test_verifier_log test_dev_cgroup test_tcpbpf_user \
|
||||
test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
|
||||
test_sock test_sockmap get_cgroup_id_user test_socket_cookie \
|
||||
test_cgroup_storage \
|
||||
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl \
|
||||
test_progs-no_alu32 \
|
||||
|
@ -68,7 +68,8 @@ TEST_PROGS := test_kmod.sh \
|
|||
test_tc_edt.sh \
|
||||
test_xdping.sh \
|
||||
test_bpftool_build.sh \
|
||||
test_bpftool.sh
|
||||
test_bpftool.sh \
|
||||
test_bpftool_metadata.sh \
|
||||
|
||||
TEST_PROGS_EXTENDED := with_addr.sh \
|
||||
with_tunnels.sh \
|
||||
|
@ -176,6 +177,11 @@ $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
|
|||
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
|
||||
OUTPUT=$(BUILD_DIR)/bpftool/ \
|
||||
prefix= DESTDIR=$(SCRATCH_DIR)/ install
|
||||
$(Q)mkdir -p $(BUILD_DIR)/bpftool/Documentation
|
||||
$(Q)RST2MAN_OPTS="--exit-status=1" $(MAKE) $(submake_extras) \
|
||||
-C $(BPFTOOLDIR)/Documentation \
|
||||
OUTPUT=$(BUILD_DIR)/bpftool/Documentation/ \
|
||||
prefix= DESTDIR=$(SCRATCH_DIR)/ install
|
||||
|
||||
$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
|
||||
../../../include/uapi/linux/bpf.h \
|
||||
|
|
|
@ -23,7 +23,13 @@ static inline int bpf_flow_load(struct bpf_object **obj,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
main_prog = bpf_object__find_program_by_title(*obj, section_name);
|
||||
main_prog = NULL;
|
||||
bpf_object__for_each_program(prog, *obj) {
|
||||
if (strcmp(section_name, bpf_program__section_name(prog)) == 0) {
|
||||
main_prog = prog;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!main_prog)
|
||||
return -1;
|
||||
|
||||
|
|
|
@ -132,17 +132,38 @@ static void test_task_stack(void)
|
|||
bpf_iter_task_stack__destroy(skel);
|
||||
}
|
||||
|
||||
static void *do_nothing(void *arg)
|
||||
{
|
||||
pthread_exit(arg);
|
||||
}
|
||||
|
||||
static void test_task_file(void)
|
||||
{
|
||||
struct bpf_iter_task_file *skel;
|
||||
pthread_t thread_id;
|
||||
void *ret;
|
||||
|
||||
skel = bpf_iter_task_file__open_and_load();
|
||||
if (CHECK(!skel, "bpf_iter_task_file__open_and_load",
|
||||
"skeleton open_and_load failed\n"))
|
||||
return;
|
||||
|
||||
skel->bss->tgid = getpid();
|
||||
|
||||
if (CHECK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
|
||||
"pthread_create", "pthread_create failed\n"))
|
||||
goto done;
|
||||
|
||||
do_dummy_read(skel->progs.dump_task_file);
|
||||
|
||||
if (CHECK(pthread_join(thread_id, &ret) || ret != NULL,
|
||||
"pthread_join", "pthread_join failed\n"))
|
||||
goto done;
|
||||
|
||||
CHECK(skel->bss->count != 0, "check_count",
|
||||
"invalid non pthread file visit count %d\n", skel->bss->count);
|
||||
|
||||
done:
|
||||
bpf_iter_task_file__destroy(skel);
|
||||
}
|
||||
|
||||
|
|
|
@ -49,6 +49,7 @@ void test_bpf_verif_scale(void)
|
|||
{ "test_verif_scale3.o", BPF_PROG_TYPE_SCHED_CLS },
|
||||
|
||||
{ "pyperf_global.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
|
||||
{ "pyperf_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
|
||||
|
||||
/* full unroll by llvm */
|
||||
{ "pyperf50.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
|
||||
|
@ -86,6 +87,9 @@ void test_bpf_verif_scale(void)
|
|||
{ "strobemeta_nounroll1.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
|
||||
{ "strobemeta_nounroll2.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
|
||||
|
||||
/* non-inlined subprogs */
|
||||
{ "strobemeta_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
|
||||
|
||||
{ "test_sysctl_loop1.o", BPF_PROG_TYPE_CGROUP_SYSCTL },
|
||||
{ "test_sysctl_loop2.o", BPF_PROG_TYPE_CGROUP_SYSCTL },
|
||||
|
||||
|
|
|
@ -24,40 +24,17 @@
|
|||
|
||||
#include "bpf_rlimit.h"
|
||||
#include "bpf_util.h"
|
||||
#include "test_btf.h"
|
||||
#include "../test_btf.h"
|
||||
#include "test_progs.h"
|
||||
|
||||
#define MAX_INSNS 512
|
||||
#define MAX_SUBPROGS 16
|
||||
|
||||
static uint32_t pass_cnt;
|
||||
static uint32_t error_cnt;
|
||||
static uint32_t skip_cnt;
|
||||
static int duration = 0;
|
||||
static bool always_log;
|
||||
|
||||
#define CHECK(condition, format...) ({ \
|
||||
int __ret = !!(condition); \
|
||||
if (__ret) { \
|
||||
fprintf(stderr, "%s:%d:FAIL ", __func__, __LINE__); \
|
||||
fprintf(stderr, format); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
static int count_result(int err)
|
||||
{
|
||||
if (err)
|
||||
error_cnt++;
|
||||
else
|
||||
pass_cnt++;
|
||||
|
||||
fprintf(stderr, "\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __base_pr(enum libbpf_print_level level __attribute__((unused)),
|
||||
const char *format, va_list args)
|
||||
{
|
||||
return vfprintf(stderr, format, args);
|
||||
}
|
||||
#undef CHECK
|
||||
#define CHECK(condition, format...) _CHECK(condition, "check", duration, format)
|
||||
|
||||
#define BTF_END_RAW 0xdeadbeef
|
||||
#define NAME_TBD 0xdeadb33f
|
||||
|
@ -69,21 +46,6 @@ static int __base_pr(enum libbpf_print_level level __attribute__((unused)),
|
|||
#define MAX_NR_RAW_U32 1024
|
||||
#define BTF_LOG_BUF_SIZE 65535
|
||||
|
||||
static struct args {
|
||||
unsigned int raw_test_num;
|
||||
unsigned int file_test_num;
|
||||
unsigned int get_info_test_num;
|
||||
unsigned int info_raw_test_num;
|
||||
unsigned int dedup_test_num;
|
||||
bool raw_test;
|
||||
bool file_test;
|
||||
bool get_info_test;
|
||||
bool pprint_test;
|
||||
bool always_log;
|
||||
bool info_raw_test;
|
||||
bool dedup_test;
|
||||
} args;
|
||||
|
||||
static char btf_log_buf[BTF_LOG_BUF_SIZE];
|
||||
|
||||
static struct btf_header hdr_tmpl = {
|
||||
|
@ -3664,7 +3626,7 @@ done:
|
|||
return raw_btf;
|
||||
}
|
||||
|
||||
static int do_test_raw(unsigned int test_num)
|
||||
static void do_test_raw(unsigned int test_num)
|
||||
{
|
||||
struct btf_raw_test *test = &raw_tests[test_num - 1];
|
||||
struct bpf_create_map_attr create_attr = {};
|
||||
|
@ -3674,15 +3636,16 @@ static int do_test_raw(unsigned int test_num)
|
|||
void *raw_btf;
|
||||
int err;
|
||||
|
||||
fprintf(stderr, "BTF raw test[%u] (%s): ", test_num, test->descr);
|
||||
if (!test__start_subtest(test->descr))
|
||||
return;
|
||||
|
||||
raw_btf = btf_raw_create(&hdr_tmpl,
|
||||
test->raw_types,
|
||||
test->str_sec,
|
||||
test->str_sec_size,
|
||||
&raw_btf_size, NULL);
|
||||
|
||||
if (!raw_btf)
|
||||
return -1;
|
||||
return;
|
||||
|
||||
hdr = raw_btf;
|
||||
|
||||
|
@ -3694,7 +3657,7 @@ static int do_test_raw(unsigned int test_num)
|
|||
*btf_log_buf = '\0';
|
||||
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
|
||||
btf_log_buf, BTF_LOG_BUF_SIZE,
|
||||
args.always_log);
|
||||
always_log);
|
||||
free(raw_btf);
|
||||
|
||||
err = ((btf_fd == -1) != test->btf_load_err);
|
||||
|
@ -3725,32 +3688,12 @@ static int do_test_raw(unsigned int test_num)
|
|||
map_fd, test->map_create_err);
|
||||
|
||||
done:
|
||||
if (!err)
|
||||
fprintf(stderr, "OK");
|
||||
|
||||
if (*btf_log_buf && (err || args.always_log))
|
||||
if (*btf_log_buf && (err || always_log))
|
||||
fprintf(stderr, "\n%s", btf_log_buf);
|
||||
|
||||
if (btf_fd != -1)
|
||||
close(btf_fd);
|
||||
if (map_fd != -1)
|
||||
close(map_fd);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int test_raw(void)
|
||||
{
|
||||
unsigned int i;
|
||||
int err = 0;
|
||||
|
||||
if (args.raw_test_num)
|
||||
return count_result(do_test_raw(args.raw_test_num));
|
||||
|
||||
for (i = 1; i <= ARRAY_SIZE(raw_tests); i++)
|
||||
err |= count_result(do_test_raw(i));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
struct btf_get_info_test {
|
||||
|
@ -3814,11 +3757,6 @@ const struct btf_get_info_test get_info_tests[] = {
|
|||
},
|
||||
};
|
||||
|
||||
static inline __u64 ptr_to_u64(const void *ptr)
|
||||
{
|
||||
return (__u64)(unsigned long)ptr;
|
||||
}
|
||||
|
||||
static int test_big_btf_info(unsigned int test_num)
|
||||
{
|
||||
const struct btf_get_info_test *test = &get_info_tests[test_num - 1];
|
||||
|
@ -3851,7 +3789,7 @@ static int test_big_btf_info(unsigned int test_num)
|
|||
|
||||
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
|
||||
btf_log_buf, BTF_LOG_BUF_SIZE,
|
||||
args.always_log);
|
||||
always_log);
|
||||
if (CHECK(btf_fd == -1, "errno:%d", errno)) {
|
||||
err = -1;
|
||||
goto done;
|
||||
|
@ -3892,7 +3830,7 @@ static int test_big_btf_info(unsigned int test_num)
|
|||
fprintf(stderr, "OK");
|
||||
|
||||
done:
|
||||
if (*btf_log_buf && (err || args.always_log))
|
||||
if (*btf_log_buf && (err || always_log))
|
||||
fprintf(stderr, "\n%s", btf_log_buf);
|
||||
|
||||
free(raw_btf);
|
||||
|
@ -3939,7 +3877,7 @@ static int test_btf_id(unsigned int test_num)
|
|||
|
||||
btf_fd[0] = bpf_load_btf(raw_btf, raw_btf_size,
|
||||
btf_log_buf, BTF_LOG_BUF_SIZE,
|
||||
args.always_log);
|
||||
always_log);
|
||||
if (CHECK(btf_fd[0] == -1, "errno:%d", errno)) {
|
||||
err = -1;
|
||||
goto done;
|
||||
|
@ -4024,7 +3962,7 @@ static int test_btf_id(unsigned int test_num)
|
|||
fprintf(stderr, "OK");
|
||||
|
||||
done:
|
||||
if (*btf_log_buf && (err || args.always_log))
|
||||
if (*btf_log_buf && (err || always_log))
|
||||
fprintf(stderr, "\n%s", btf_log_buf);
|
||||
|
||||
free(raw_btf);
|
||||
|
@ -4039,7 +3977,7 @@ done:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int do_test_get_info(unsigned int test_num)
|
||||
static void do_test_get_info(unsigned int test_num)
|
||||
{
|
||||
const struct btf_get_info_test *test = &get_info_tests[test_num - 1];
|
||||
unsigned int raw_btf_size, user_btf_size, expected_nbytes;
|
||||
|
@ -4048,11 +3986,14 @@ static int do_test_get_info(unsigned int test_num)
|
|||
int btf_fd = -1, err, ret;
|
||||
uint32_t info_len;
|
||||
|
||||
fprintf(stderr, "BTF GET_INFO test[%u] (%s): ",
|
||||
test_num, test->descr);
|
||||
if (!test__start_subtest(test->descr))
|
||||
return;
|
||||
|
||||
if (test->special_test)
|
||||
return test->special_test(test_num);
|
||||
if (test->special_test) {
|
||||
err = test->special_test(test_num);
|
||||
if (CHECK(err, "failed: %d\n", err))
|
||||
return;
|
||||
}
|
||||
|
||||
raw_btf = btf_raw_create(&hdr_tmpl,
|
||||
test->raw_types,
|
||||
|
@ -4061,7 +4002,7 @@ static int do_test_get_info(unsigned int test_num)
|
|||
&raw_btf_size, NULL);
|
||||
|
||||
if (!raw_btf)
|
||||
return -1;
|
||||
return;
|
||||
|
||||
*btf_log_buf = '\0';
|
||||
|
||||
|
@ -4073,7 +4014,7 @@ static int do_test_get_info(unsigned int test_num)
|
|||
|
||||
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
|
||||
btf_log_buf, BTF_LOG_BUF_SIZE,
|
||||
args.always_log);
|
||||
always_log);
|
||||
if (CHECK(btf_fd == -1, "errno:%d", errno)) {
|
||||
err = -1;
|
||||
goto done;
|
||||
|
@ -4114,7 +4055,7 @@ static int do_test_get_info(unsigned int test_num)
|
|||
fprintf(stderr, "OK");
|
||||
|
||||
done:
|
||||
if (*btf_log_buf && (err || args.always_log))
|
||||
if (*btf_log_buf && (err || always_log))
|
||||
fprintf(stderr, "\n%s", btf_log_buf);
|
||||
|
||||
free(raw_btf);
|
||||
|
@ -4122,22 +4063,6 @@ done:
|
|||
|
||||
if (btf_fd != -1)
|
||||
close(btf_fd);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int test_get_info(void)
|
||||
{
|
||||
unsigned int i;
|
||||
int err = 0;
|
||||
|
||||
if (args.get_info_test_num)
|
||||
return count_result(do_test_get_info(args.get_info_test_num));
|
||||
|
||||
for (i = 1; i <= ARRAY_SIZE(get_info_tests); i++)
|
||||
err |= count_result(do_test_get_info(i));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
struct btf_file_test {
|
||||
|
@ -4151,7 +4076,7 @@ static struct btf_file_test file_tests[] = {
|
|||
{ .file = "test_btf_nokv.o", .btf_kv_notfound = true, },
|
||||
};
|
||||
|
||||
static int do_test_file(unsigned int test_num)
|
||||
static void do_test_file(unsigned int test_num)
|
||||
{
|
||||
const struct btf_file_test *test = &file_tests[test_num - 1];
|
||||
const char *expected_fnames[] = {"_dummy_tracepoint",
|
||||
|
@ -4169,17 +4094,17 @@ static int do_test_file(unsigned int test_num)
|
|||
struct bpf_map *map;
|
||||
int i, err, prog_fd;
|
||||
|
||||
fprintf(stderr, "BTF libbpf test[%u] (%s): ", test_num,
|
||||
test->file);
|
||||
if (!test__start_subtest(test->file))
|
||||
return;
|
||||
|
||||
btf = btf__parse_elf(test->file, &btf_ext);
|
||||
if (IS_ERR(btf)) {
|
||||
if (PTR_ERR(btf) == -ENOENT) {
|
||||
fprintf(stderr, "SKIP. No ELF %s found", BTF_ELF_SEC);
|
||||
skip_cnt++;
|
||||
return 0;
|
||||
printf("%s:SKIP: No ELF %s found", __func__, BTF_ELF_SEC);
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
return PTR_ERR(btf);
|
||||
return;
|
||||
}
|
||||
btf__free(btf);
|
||||
|
||||
|
@ -4188,7 +4113,7 @@ static int do_test_file(unsigned int test_num)
|
|||
|
||||
obj = bpf_object__open(test->file);
|
||||
if (CHECK(IS_ERR(obj), "obj: %ld", PTR_ERR(obj)))
|
||||
return PTR_ERR(obj);
|
||||
return;
|
||||
|
||||
prog = bpf_program__next(NULL, obj);
|
||||
if (CHECK(!prog, "Cannot find bpf_prog")) {
|
||||
|
@ -4310,21 +4235,6 @@ skip:
|
|||
done:
|
||||
free(func_info);
|
||||
bpf_object__close(obj);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int test_file(void)
|
||||
{
|
||||
unsigned int i;
|
||||
int err = 0;
|
||||
|
||||
if (args.file_test_num)
|
||||
return count_result(do_test_file(args.file_test_num));
|
||||
|
||||
for (i = 1; i <= ARRAY_SIZE(file_tests); i++)
|
||||
err |= count_result(do_test_file(i));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
const char *pprint_enum_str[] = {
|
||||
|
@ -4428,7 +4338,7 @@ static struct btf_raw_test pprint_test_template[] = {
|
|||
.value_size = sizeof(struct pprint_mapv),
|
||||
.key_type_id = 3, /* unsigned int */
|
||||
.value_type_id = 16, /* struct pprint_mapv */
|
||||
.max_entries = 128 * 1024,
|
||||
.max_entries = 128,
|
||||
},
|
||||
|
||||
{
|
||||
|
@ -4493,7 +4403,7 @@ static struct btf_raw_test pprint_test_template[] = {
|
|||
.value_size = sizeof(struct pprint_mapv),
|
||||
.key_type_id = 3, /* unsigned int */
|
||||
.value_type_id = 16, /* struct pprint_mapv */
|
||||
.max_entries = 128 * 1024,
|
||||
.max_entries = 128,
|
||||
},
|
||||
|
||||
{
|
||||
|
@ -4564,7 +4474,7 @@ static struct btf_raw_test pprint_test_template[] = {
|
|||
.value_size = sizeof(struct pprint_mapv),
|
||||
.key_type_id = 3, /* unsigned int */
|
||||
.value_type_id = 16, /* struct pprint_mapv */
|
||||
.max_entries = 128 * 1024,
|
||||
.max_entries = 128,
|
||||
},
|
||||
|
||||
#ifdef __SIZEOF_INT128__
|
||||
|
@ -4591,7 +4501,7 @@ static struct btf_raw_test pprint_test_template[] = {
|
|||
.value_size = sizeof(struct pprint_mapv_int128),
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 4,
|
||||
.max_entries = 128 * 1024,
|
||||
.max_entries = 128,
|
||||
.mapv_kind = PPRINT_MAPV_KIND_INT128,
|
||||
},
|
||||
#endif
|
||||
|
@ -4790,7 +4700,7 @@ static int check_line(const char *expected_line, int nexpected_line,
|
|||
}
|
||||
|
||||
|
||||
static int do_test_pprint(int test_num)
|
||||
static void do_test_pprint(int test_num)
|
||||
{
|
||||
const struct btf_raw_test *test = &pprint_test_template[test_num];
|
||||
enum pprint_mapv_kind_t mapv_kind = test->mapv_kind;
|
||||
|
@ -4809,18 +4719,20 @@ static int do_test_pprint(int test_num)
|
|||
uint8_t *raw_btf;
|
||||
ssize_t nread;
|
||||
|
||||
fprintf(stderr, "%s(#%d)......", test->descr, test_num);
|
||||
if (!test__start_subtest(test->descr))
|
||||
return;
|
||||
|
||||
raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types,
|
||||
test->str_sec, test->str_sec_size,
|
||||
&raw_btf_size, NULL);
|
||||
|
||||
if (!raw_btf)
|
||||
return -1;
|
||||
return;
|
||||
|
||||
*btf_log_buf = '\0';
|
||||
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
|
||||
btf_log_buf, BTF_LOG_BUF_SIZE,
|
||||
args.always_log);
|
||||
always_log);
|
||||
free(raw_btf);
|
||||
|
||||
if (CHECK(btf_fd == -1, "errno:%d", errno)) {
|
||||
|
@ -4971,7 +4883,7 @@ done:
|
|||
free(mapv);
|
||||
if (!err)
|
||||
fprintf(stderr, "OK");
|
||||
if (*btf_log_buf && (err || args.always_log))
|
||||
if (*btf_log_buf && (err || always_log))
|
||||
fprintf(stderr, "\n%s", btf_log_buf);
|
||||
if (btf_fd != -1)
|
||||
close(btf_fd);
|
||||
|
@ -4981,14 +4893,11 @@ done:
|
|||
fclose(pin_file);
|
||||
unlink(pin_path);
|
||||
free(line);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int test_pprint(void)
|
||||
static void test_pprint(void)
|
||||
{
|
||||
unsigned int i;
|
||||
int err = 0;
|
||||
|
||||
/* test various maps with the first test template */
|
||||
for (i = 0; i < ARRAY_SIZE(pprint_tests_meta); i++) {
|
||||
|
@ -4999,7 +4908,7 @@ static int test_pprint(void)
|
|||
pprint_test_template[0].lossless_map = pprint_tests_meta[i].lossless_map;
|
||||
pprint_test_template[0].percpu_map = pprint_tests_meta[i].percpu_map;
|
||||
|
||||
err |= count_result(do_test_pprint(0));
|
||||
do_test_pprint(0);
|
||||
}
|
||||
|
||||
/* test rest test templates with the first map */
|
||||
|
@ -5010,10 +4919,8 @@ static int test_pprint(void)
|
|||
pprint_test_template[i].ordered_map = pprint_tests_meta[0].ordered_map;
|
||||
pprint_test_template[i].lossless_map = pprint_tests_meta[0].lossless_map;
|
||||
pprint_test_template[i].percpu_map = pprint_tests_meta[0].percpu_map;
|
||||
err |= count_result(do_test_pprint(i));
|
||||
do_test_pprint(i);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
#define BPF_LINE_INFO_ENC(insn_off, file_off, line_off, line_num, line_col) \
|
||||
|
@ -6178,7 +6085,7 @@ done:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int do_test_info_raw(unsigned int test_num)
|
||||
static void do_test_info_raw(unsigned int test_num)
|
||||
{
|
||||
const struct prog_info_raw_test *test = &info_raw_tests[test_num - 1];
|
||||
unsigned int raw_btf_size, linfo_str_off, linfo_size;
|
||||
|
@ -6187,18 +6094,19 @@ static int do_test_info_raw(unsigned int test_num)
|
|||
const char *ret_next_str;
|
||||
union bpf_attr attr = {};
|
||||
|
||||
fprintf(stderr, "BTF prog info raw test[%u] (%s): ", test_num, test->descr);
|
||||
if (!test__start_subtest(test->descr))
|
||||
return;
|
||||
|
||||
raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types,
|
||||
test->str_sec, test->str_sec_size,
|
||||
&raw_btf_size, &ret_next_str);
|
||||
|
||||
if (!raw_btf)
|
||||
return -1;
|
||||
return;
|
||||
|
||||
*btf_log_buf = '\0';
|
||||
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
|
||||
btf_log_buf, BTF_LOG_BUF_SIZE,
|
||||
args.always_log);
|
||||
always_log);
|
||||
free(raw_btf);
|
||||
|
||||
if (CHECK(btf_fd == -1, "invalid btf_fd errno:%d", errno)) {
|
||||
|
@ -6206,7 +6114,7 @@ static int do_test_info_raw(unsigned int test_num)
|
|||
goto done;
|
||||
}
|
||||
|
||||
if (*btf_log_buf && args.always_log)
|
||||
if (*btf_log_buf && always_log)
|
||||
fprintf(stderr, "\n%s", btf_log_buf);
|
||||
*btf_log_buf = '\0';
|
||||
|
||||
|
@ -6261,10 +6169,7 @@ static int do_test_info_raw(unsigned int test_num)
|
|||
goto done;
|
||||
|
||||
done:
|
||||
if (!err)
|
||||
fprintf(stderr, "OK");
|
||||
|
||||
if (*btf_log_buf && (err || args.always_log))
|
||||
if (*btf_log_buf && (err || always_log))
|
||||
fprintf(stderr, "\n%s", btf_log_buf);
|
||||
|
||||
if (btf_fd != -1)
|
||||
|
@ -6274,22 +6179,6 @@ done:
|
|||
|
||||
if (!IS_ERR(patched_linfo))
|
||||
free(patched_linfo);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int test_info_raw(void)
|
||||
{
|
||||
unsigned int i;
|
||||
int err = 0;
|
||||
|
||||
if (args.info_raw_test_num)
|
||||
return count_result(do_test_info_raw(args.info_raw_test_num));
|
||||
|
||||
for (i = 1; i <= ARRAY_SIZE(info_raw_tests); i++)
|
||||
err |= count_result(do_test_info_raw(i));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
struct btf_raw_data {
|
||||
|
@ -6754,7 +6643,7 @@ static void dump_btf_strings(const char *strs, __u32 len)
|
|||
}
|
||||
}
|
||||
|
||||
static int do_test_dedup(unsigned int test_num)
|
||||
static void do_test_dedup(unsigned int test_num)
|
||||
{
|
||||
const struct btf_dedup_test *test = &dedup_tests[test_num - 1];
|
||||
__u32 test_nr_types, expect_nr_types, test_btf_size, expect_btf_size;
|
||||
|
@ -6769,13 +6658,15 @@ static int do_test_dedup(unsigned int test_num)
|
|||
void *raw_btf;
|
||||
int err = 0, i;
|
||||
|
||||
fprintf(stderr, "BTF dedup test[%u] (%s):", test_num, test->descr);
|
||||
if (!test__start_subtest(test->descr))
|
||||
return;
|
||||
|
||||
raw_btf = btf_raw_create(&hdr_tmpl, test->input.raw_types,
|
||||
test->input.str_sec, test->input.str_sec_size,
|
||||
&raw_btf_size, &ret_test_next_str);
|
||||
if (!raw_btf)
|
||||
return -1;
|
||||
return;
|
||||
|
||||
test_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
|
||||
free(raw_btf);
|
||||
if (CHECK(IS_ERR(test_btf), "invalid test_btf errno:%ld",
|
||||
|
@ -6789,7 +6680,7 @@ static int do_test_dedup(unsigned int test_num)
|
|||
test->expect.str_sec_size,
|
||||
&raw_btf_size, &ret_expect_next_str);
|
||||
if (!raw_btf)
|
||||
return -1;
|
||||
return;
|
||||
expect_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
|
||||
free(raw_btf);
|
||||
if (CHECK(IS_ERR(expect_btf), "invalid expect_btf errno:%ld",
|
||||
|
@ -6894,174 +6785,27 @@ static int do_test_dedup(unsigned int test_num)
|
|||
}
|
||||
|
||||
done:
|
||||
if (!err)
|
||||
fprintf(stderr, "OK");
|
||||
if (!IS_ERR(test_btf))
|
||||
btf__free(test_btf);
|
||||
if (!IS_ERR(expect_btf))
|
||||
btf__free(expect_btf);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int test_dedup(void)
|
||||
void test_btf(void)
|
||||
{
|
||||
unsigned int i;
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
if (args.dedup_test_num)
|
||||
return count_result(do_test_dedup(args.dedup_test_num));
|
||||
always_log = env.verbosity > VERBOSE_NONE;
|
||||
|
||||
for (i = 1; i <= ARRAY_SIZE(raw_tests); i++)
|
||||
do_test_raw(i);
|
||||
for (i = 1; i <= ARRAY_SIZE(get_info_tests); i++)
|
||||
do_test_get_info(i);
|
||||
for (i = 1; i <= ARRAY_SIZE(file_tests); i++)
|
||||
do_test_file(i);
|
||||
for (i = 1; i <= ARRAY_SIZE(info_raw_tests); i++)
|
||||
do_test_info_raw(i);
|
||||
for (i = 1; i <= ARRAY_SIZE(dedup_tests); i++)
|
||||
err |= count_result(do_test_dedup(i));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void usage(const char *cmd)
|
||||
{
|
||||
fprintf(stderr, "Usage: %s [-l] [[-r btf_raw_test_num (1 - %zu)] |\n"
|
||||
"\t[-g btf_get_info_test_num (1 - %zu)] |\n"
|
||||
"\t[-f btf_file_test_num (1 - %zu)] |\n"
|
||||
"\t[-k btf_prog_info_raw_test_num (1 - %zu)] |\n"
|
||||
"\t[-p (pretty print test)] |\n"
|
||||
"\t[-d btf_dedup_test_num (1 - %zu)]]\n",
|
||||
cmd, ARRAY_SIZE(raw_tests), ARRAY_SIZE(get_info_tests),
|
||||
ARRAY_SIZE(file_tests), ARRAY_SIZE(info_raw_tests),
|
||||
ARRAY_SIZE(dedup_tests));
|
||||
}
|
||||
|
||||
static int parse_args(int argc, char **argv)
|
||||
{
|
||||
const char *optstr = "hlpk:f:r:g:d:";
|
||||
int opt;
|
||||
|
||||
while ((opt = getopt(argc, argv, optstr)) != -1) {
|
||||
switch (opt) {
|
||||
case 'l':
|
||||
args.always_log = true;
|
||||
break;
|
||||
case 'f':
|
||||
args.file_test_num = atoi(optarg);
|
||||
args.file_test = true;
|
||||
break;
|
||||
case 'r':
|
||||
args.raw_test_num = atoi(optarg);
|
||||
args.raw_test = true;
|
||||
break;
|
||||
case 'g':
|
||||
args.get_info_test_num = atoi(optarg);
|
||||
args.get_info_test = true;
|
||||
break;
|
||||
case 'p':
|
||||
args.pprint_test = true;
|
||||
break;
|
||||
case 'k':
|
||||
args.info_raw_test_num = atoi(optarg);
|
||||
args.info_raw_test = true;
|
||||
break;
|
||||
case 'd':
|
||||
args.dedup_test_num = atoi(optarg);
|
||||
args.dedup_test = true;
|
||||
break;
|
||||
case 'h':
|
||||
usage(argv[0]);
|
||||
exit(0);
|
||||
default:
|
||||
usage(argv[0]);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (args.raw_test_num &&
|
||||
(args.raw_test_num < 1 ||
|
||||
args.raw_test_num > ARRAY_SIZE(raw_tests))) {
|
||||
fprintf(stderr, "BTF raw test number must be [1 - %zu]\n",
|
||||
ARRAY_SIZE(raw_tests));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (args.file_test_num &&
|
||||
(args.file_test_num < 1 ||
|
||||
args.file_test_num > ARRAY_SIZE(file_tests))) {
|
||||
fprintf(stderr, "BTF file test number must be [1 - %zu]\n",
|
||||
ARRAY_SIZE(file_tests));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (args.get_info_test_num &&
|
||||
(args.get_info_test_num < 1 ||
|
||||
args.get_info_test_num > ARRAY_SIZE(get_info_tests))) {
|
||||
fprintf(stderr, "BTF get info test number must be [1 - %zu]\n",
|
||||
ARRAY_SIZE(get_info_tests));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (args.info_raw_test_num &&
|
||||
(args.info_raw_test_num < 1 ||
|
||||
args.info_raw_test_num > ARRAY_SIZE(info_raw_tests))) {
|
||||
fprintf(stderr, "BTF prog info raw test number must be [1 - %zu]\n",
|
||||
ARRAY_SIZE(info_raw_tests));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (args.dedup_test_num &&
|
||||
(args.dedup_test_num < 1 ||
|
||||
args.dedup_test_num > ARRAY_SIZE(dedup_tests))) {
|
||||
fprintf(stderr, "BTF dedup test number must be [1 - %zu]\n",
|
||||
ARRAY_SIZE(dedup_tests));
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void print_summary(void)
|
||||
{
|
||||
fprintf(stderr, "PASS:%u SKIP:%u FAIL:%u\n",
|
||||
pass_cnt - skip_cnt, skip_cnt, error_cnt);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
err = parse_args(argc, argv);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (args.always_log)
|
||||
libbpf_set_print(__base_pr);
|
||||
|
||||
if (args.raw_test)
|
||||
err |= test_raw();
|
||||
|
||||
if (args.get_info_test)
|
||||
err |= test_get_info();
|
||||
|
||||
if (args.file_test)
|
||||
err |= test_file();
|
||||
|
||||
if (args.pprint_test)
|
||||
err |= test_pprint();
|
||||
|
||||
if (args.info_raw_test)
|
||||
err |= test_info_raw();
|
||||
|
||||
if (args.dedup_test)
|
||||
err |= test_dedup();
|
||||
|
||||
if (args.raw_test || args.get_info_test || args.file_test ||
|
||||
args.pprint_test || args.info_raw_test || args.dedup_test)
|
||||
goto done;
|
||||
|
||||
err |= test_raw();
|
||||
err |= test_get_info();
|
||||
err |= test_file();
|
||||
err |= test_info_raw();
|
||||
err |= test_dedup();
|
||||
|
||||
done:
|
||||
print_summary();
|
||||
return err;
|
||||
do_test_dedup(i);
|
||||
test_pprint();
|
||||
}
|
|
@ -12,10 +12,13 @@
|
|||
|
||||
#include "progs/test_cls_redirect.h"
|
||||
#include "test_cls_redirect.skel.h"
|
||||
#include "test_cls_redirect_subprogs.skel.h"
|
||||
|
||||
#define ENCAP_IP INADDR_LOOPBACK
|
||||
#define ENCAP_PORT (1234)
|
||||
|
||||
static int duration = 0;
|
||||
|
||||
struct addr_port {
|
||||
in_port_t port;
|
||||
union {
|
||||
|
@ -361,30 +364,18 @@ static void close_fds(int *fds, int n)
|
|||
close(fds[i]);
|
||||
}
|
||||
|
||||
void test_cls_redirect(void)
|
||||
static void test_cls_redirect_common(struct bpf_program *prog)
|
||||
{
|
||||
struct test_cls_redirect *skel = NULL;
|
||||
struct bpf_prog_test_run_attr tattr = {};
|
||||
int families[] = { AF_INET, AF_INET6 };
|
||||
struct sockaddr_storage ss;
|
||||
struct sockaddr *addr;
|
||||
socklen_t slen;
|
||||
int i, j, err;
|
||||
|
||||
int servers[__NR_KIND][ARRAY_SIZE(families)] = {};
|
||||
int conns[__NR_KIND][ARRAY_SIZE(families)] = {};
|
||||
struct tuple tuples[__NR_KIND][ARRAY_SIZE(families)];
|
||||
|
||||
skel = test_cls_redirect__open();
|
||||
if (CHECK_FAIL(!skel))
|
||||
return;
|
||||
|
||||
skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP);
|
||||
skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT);
|
||||
|
||||
if (CHECK_FAIL(test_cls_redirect__load(skel)))
|
||||
goto cleanup;
|
||||
|
||||
addr = (struct sockaddr *)&ss;
|
||||
for (i = 0; i < ARRAY_SIZE(families); i++) {
|
||||
slen = prepare_addr(&ss, families[i]);
|
||||
|
@ -402,7 +393,7 @@ void test_cls_redirect(void)
|
|||
goto cleanup;
|
||||
}
|
||||
|
||||
tattr.prog_fd = bpf_program__fd(skel->progs.cls_redirect);
|
||||
tattr.prog_fd = bpf_program__fd(prog);
|
||||
for (i = 0; i < ARRAY_SIZE(tests); i++) {
|
||||
struct test_cfg *test = &tests[i];
|
||||
|
||||
|
@ -450,7 +441,58 @@ void test_cls_redirect(void)
|
|||
}
|
||||
|
||||
cleanup:
|
||||
test_cls_redirect__destroy(skel);
|
||||
close_fds((int *)servers, sizeof(servers) / sizeof(servers[0][0]));
|
||||
close_fds((int *)conns, sizeof(conns) / sizeof(conns[0][0]));
|
||||
}
|
||||
|
||||
static void test_cls_redirect_inlined(void)
|
||||
{
|
||||
struct test_cls_redirect *skel;
|
||||
int err;
|
||||
|
||||
skel = test_cls_redirect__open();
|
||||
if (CHECK(!skel, "skel_open", "failed\n"))
|
||||
return;
|
||||
|
||||
skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP);
|
||||
skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT);
|
||||
|
||||
err = test_cls_redirect__load(skel);
|
||||
if (CHECK(err, "skel_load", "failed: %d\n", err))
|
||||
goto cleanup;
|
||||
|
||||
test_cls_redirect_common(skel->progs.cls_redirect);
|
||||
|
||||
cleanup:
|
||||
test_cls_redirect__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_cls_redirect_subprogs(void)
|
||||
{
|
||||
struct test_cls_redirect_subprogs *skel;
|
||||
int err;
|
||||
|
||||
skel = test_cls_redirect_subprogs__open();
|
||||
if (CHECK(!skel, "skel_open", "failed\n"))
|
||||
return;
|
||||
|
||||
skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP);
|
||||
skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT);
|
||||
|
||||
err = test_cls_redirect_subprogs__load(skel);
|
||||
if (CHECK(err, "skel_load", "failed: %d\n", err))
|
||||
goto cleanup;
|
||||
|
||||
test_cls_redirect_common(skel->progs.cls_redirect);
|
||||
|
||||
cleanup:
|
||||
test_cls_redirect_subprogs__destroy(skel);
|
||||
}
|
||||
|
||||
void test_cls_redirect(void)
|
||||
{
|
||||
if (test__start_subtest("cls_redirect_inlined"))
|
||||
test_cls_redirect_inlined();
|
||||
if (test__start_subtest("cls_redirect_subprogs"))
|
||||
test_cls_redirect_subprogs();
|
||||
}
|
||||
|
|
|
@ -120,6 +120,16 @@ void test_d_path(void)
|
|||
if (err < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (CHECK(!bss->called_stat,
|
||||
"stat",
|
||||
"trampoline for security_inode_getattr was not called\n"))
|
||||
goto cleanup;
|
||||
|
||||
if (CHECK(!bss->called_close,
|
||||
"close",
|
||||
"trampoline for filp_close was not called\n"))
|
||||
goto cleanup;
|
||||
|
||||
for (int i = 0; i < MAX_FILES; i++) {
|
||||
CHECK(strncmp(src.paths[i], bss->paths_stat[i], MAX_PATH_LEN),
|
||||
"check",
|
||||
|
|
|
@ -208,11 +208,18 @@ static void test_func_map_prog_compatibility(void)
|
|||
|
||||
void test_fexit_bpf2bpf(void)
|
||||
{
|
||||
test_target_no_callees();
|
||||
test_target_yes_callees();
|
||||
test_func_replace();
|
||||
test_func_replace_verify();
|
||||
test_func_sockmap_update();
|
||||
test_func_replace_return_code();
|
||||
test_func_map_prog_compatibility();
|
||||
if (test__start_subtest("target_no_callees"))
|
||||
test_target_no_callees();
|
||||
if (test__start_subtest("target_yes_callees"))
|
||||
test_target_yes_callees();
|
||||
if (test__start_subtest("func_replace"))
|
||||
test_func_replace();
|
||||
if (test__start_subtest("func_replace_verify"))
|
||||
test_func_replace_verify();
|
||||
if (test__start_subtest("func_sockmap_update"))
|
||||
test_func_sockmap_update();
|
||||
if (test__start_subtest("func_replace_return_code"))
|
||||
test_func_replace_return_code();
|
||||
if (test__start_subtest("func_map_prog_compatibility"))
|
||||
test_func_map_prog_compatibility();
|
||||
}
|
||||
|
|
|
@ -12,7 +12,8 @@ void test_global_data_init(void)
|
|||
size_t sz;
|
||||
|
||||
obj = bpf_object__open_file(file, NULL);
|
||||
if (CHECK_FAIL(!obj))
|
||||
err = libbpf_get_error(obj);
|
||||
if (CHECK_FAIL(err))
|
||||
return;
|
||||
|
||||
map = bpf_object__find_map_by_name(obj, "test_glo.rodata");
|
||||
|
|
|
@ -32,6 +32,7 @@ out:
|
|||
|
||||
void test_ksyms(void)
|
||||
{
|
||||
__u64 per_cpu_start_addr = kallsyms_find("__per_cpu_start");
|
||||
__u64 link_fops_addr = kallsyms_find("bpf_link_fops");
|
||||
const char *btf_path = "/sys/kernel/btf/vmlinux";
|
||||
struct test_ksyms *skel;
|
||||
|
@ -63,8 +64,9 @@ void test_ksyms(void)
|
|||
"got %llu, exp %llu\n", data->out__bpf_link_fops1, (__u64)0);
|
||||
CHECK(data->out__btf_size != btf_size, "btf_size",
|
||||
"got %llu, exp %llu\n", data->out__btf_size, btf_size);
|
||||
CHECK(data->out__per_cpu_start != 0, "__per_cpu_start",
|
||||
"got %llu, exp %llu\n", data->out__per_cpu_start, (__u64)0);
|
||||
CHECK(data->out__per_cpu_start != per_cpu_start_addr, "__per_cpu_start",
|
||||
"got %llu, exp %llu\n", data->out__per_cpu_start,
|
||||
per_cpu_start_addr);
|
||||
|
||||
cleanup:
|
||||
test_ksyms__destroy(skel);
|
||||
|
|
|
@ -80,9 +80,8 @@ out:
|
|||
|
||||
void test_l4lb_all(void)
|
||||
{
|
||||
const char *file1 = "./test_l4lb.o";
|
||||
const char *file2 = "./test_l4lb_noinline.o";
|
||||
|
||||
test_l4lb(file1);
|
||||
test_l4lb(file2);
|
||||
if (test__start_subtest("l4lb_inline"))
|
||||
test_l4lb("test_l4lb.o");
|
||||
if (test__start_subtest("l4lb_noinline"))
|
||||
test_l4lb("test_l4lb_noinline.o");
|
||||
}
|
||||
|
|
|
@ -0,0 +1,141 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
/*
|
||||
* Copyright 2020 Google LLC.
|
||||
*/
|
||||
|
||||
#include <test_progs.h>
|
||||
#include <cgroup_helpers.h>
|
||||
#include <network_helpers.h>
|
||||
|
||||
#include "metadata_unused.skel.h"
|
||||
#include "metadata_used.skel.h"
|
||||
|
||||
static int duration;
|
||||
|
||||
static int prog_holds_map(int prog_fd, int map_fd)
|
||||
{
|
||||
struct bpf_prog_info prog_info = {};
|
||||
struct bpf_prog_info map_info = {};
|
||||
__u32 prog_info_len;
|
||||
__u32 map_info_len;
|
||||
__u32 *map_ids;
|
||||
int nr_maps;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
map_info_len = sizeof(map_info);
|
||||
ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
|
||||
if (ret)
|
||||
return -errno;
|
||||
|
||||
prog_info_len = sizeof(prog_info);
|
||||
ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
|
||||
if (ret)
|
||||
return -errno;
|
||||
|
||||
map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
|
||||
if (!map_ids)
|
||||
return -ENOMEM;
|
||||
|
||||
nr_maps = prog_info.nr_map_ids;
|
||||
memset(&prog_info, 0, sizeof(prog_info));
|
||||
prog_info.nr_map_ids = nr_maps;
|
||||
prog_info.map_ids = ptr_to_u64(map_ids);
|
||||
prog_info_len = sizeof(prog_info);
|
||||
|
||||
ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
|
||||
if (ret) {
|
||||
ret = -errno;
|
||||
goto free_map_ids;
|
||||
}
|
||||
|
||||
ret = -ENOENT;
|
||||
for (i = 0; i < prog_info.nr_map_ids; i++) {
|
||||
if (map_ids[i] == map_info.id) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
free_map_ids:
|
||||
free(map_ids);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void test_metadata_unused(void)
|
||||
{
|
||||
struct metadata_unused *obj;
|
||||
int err;
|
||||
|
||||
obj = metadata_unused__open_and_load();
|
||||
if (CHECK(!obj, "skel-load", "errno %d", errno))
|
||||
return;
|
||||
|
||||
err = prog_holds_map(bpf_program__fd(obj->progs.prog),
|
||||
bpf_map__fd(obj->maps.rodata));
|
||||
if (CHECK(err, "prog-holds-rodata", "errno: %d", err))
|
||||
return;
|
||||
|
||||
/* Assert that we can access the metadata in skel and the values are
|
||||
* what we expect.
|
||||
*/
|
||||
if (CHECK(strncmp(obj->rodata->bpf_metadata_a, "foo",
|
||||
sizeof(obj->rodata->bpf_metadata_a)),
|
||||
"bpf_metadata_a", "expected \"foo\", value differ"))
|
||||
goto close_bpf_object;
|
||||
if (CHECK(obj->rodata->bpf_metadata_b != 1, "bpf_metadata_b",
|
||||
"expected 1, got %d", obj->rodata->bpf_metadata_b))
|
||||
goto close_bpf_object;
|
||||
|
||||
/* Assert that binding metadata map to prog again succeeds. */
|
||||
err = bpf_prog_bind_map(bpf_program__fd(obj->progs.prog),
|
||||
bpf_map__fd(obj->maps.rodata), NULL);
|
||||
CHECK(err, "rebind_map", "errno %d, expected 0", errno);
|
||||
|
||||
close_bpf_object:
|
||||
metadata_unused__destroy(obj);
|
||||
}
|
||||
|
||||
static void test_metadata_used(void)
|
||||
{
|
||||
struct metadata_used *obj;
|
||||
int err;
|
||||
|
||||
obj = metadata_used__open_and_load();
|
||||
if (CHECK(!obj, "skel-load", "errno %d", errno))
|
||||
return;
|
||||
|
||||
err = prog_holds_map(bpf_program__fd(obj->progs.prog),
|
||||
bpf_map__fd(obj->maps.rodata));
|
||||
if (CHECK(err, "prog-holds-rodata", "errno: %d", err))
|
||||
return;
|
||||
|
||||
/* Assert that we can access the metadata in skel and the values are
|
||||
* what we expect.
|
||||
*/
|
||||
if (CHECK(strncmp(obj->rodata->bpf_metadata_a, "bar",
|
||||
sizeof(obj->rodata->bpf_metadata_a)),
|
||||
"metadata_a", "expected \"bar\", value differ"))
|
||||
goto close_bpf_object;
|
||||
if (CHECK(obj->rodata->bpf_metadata_b != 2, "metadata_b",
|
||||
"expected 2, got %d", obj->rodata->bpf_metadata_b))
|
||||
goto close_bpf_object;
|
||||
|
||||
/* Assert that binding metadata map to prog again succeeds. */
|
||||
err = bpf_prog_bind_map(bpf_program__fd(obj->progs.prog),
|
||||
bpf_map__fd(obj->maps.rodata), NULL);
|
||||
CHECK(err, "rebind_map", "errno %d, expected 0", errno);
|
||||
|
||||
close_bpf_object:
|
||||
metadata_used__destroy(obj);
|
||||
}
|
||||
|
||||
void test_metadata(void)
|
||||
{
|
||||
if (test__start_subtest("unused"))
|
||||
test_metadata_unused();
|
||||
|
||||
if (test__start_subtest("used"))
|
||||
test_metadata_used();
|
||||
}
|
|
@ -27,7 +27,7 @@ void test_reference_tracking(void)
|
|||
const char *title;
|
||||
|
||||
/* Ignore .text sections */
|
||||
title = bpf_program__title(prog, false);
|
||||
title = bpf_program__section_name(prog);
|
||||
if (strstr(title, ".text") != NULL)
|
||||
continue;
|
||||
|
||||
|
|
|
@ -265,7 +265,7 @@ void test_sk_assign(void)
|
|||
TEST("ipv6 udp port redir", AF_INET6, SOCK_DGRAM, false),
|
||||
TEST("ipv6 udp addr redir", AF_INET6, SOCK_DGRAM, true),
|
||||
};
|
||||
int server = -1;
|
||||
__s64 server = -1;
|
||||
int server_map;
|
||||
int self_net;
|
||||
int i;
|
||||
|
|
|
@ -6,6 +6,9 @@
|
|||
#include "test_skmsg_load_helpers.skel.h"
|
||||
#include "test_sockmap_update.skel.h"
|
||||
#include "test_sockmap_invalid_update.skel.h"
|
||||
#include "bpf_iter_sockmap.skel.h"
|
||||
|
||||
#include "progs/bpf_iter_sockmap.h"
|
||||
|
||||
#define TCP_REPAIR 19 /* TCP sock is under repair right now */
|
||||
|
||||
|
@ -171,6 +174,88 @@ static void test_sockmap_invalid_update(void)
|
|||
test_sockmap_invalid_update__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_sockmap_iter(enum bpf_map_type map_type)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
|
||||
int err, len, src_fd, iter_fd, duration = 0;
|
||||
union bpf_iter_link_info linfo = {0};
|
||||
__s64 sock_fd[SOCKMAP_MAX_ENTRIES];
|
||||
__u32 i, num_sockets, max_elems;
|
||||
struct bpf_iter_sockmap *skel;
|
||||
struct bpf_link *link;
|
||||
struct bpf_map *src;
|
||||
char buf[64];
|
||||
|
||||
skel = bpf_iter_sockmap__open_and_load();
|
||||
if (CHECK(!skel, "bpf_iter_sockmap__open_and_load", "skeleton open_and_load failed\n"))
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sock_fd); i++)
|
||||
sock_fd[i] = -1;
|
||||
|
||||
/* Make sure we have at least one "empty" entry to test iteration of
|
||||
* an empty slot.
|
||||
*/
|
||||
num_sockets = ARRAY_SIZE(sock_fd) - 1;
|
||||
|
||||
if (map_type == BPF_MAP_TYPE_SOCKMAP) {
|
||||
src = skel->maps.sockmap;
|
||||
max_elems = bpf_map__max_entries(src);
|
||||
} else {
|
||||
src = skel->maps.sockhash;
|
||||
max_elems = num_sockets;
|
||||
}
|
||||
|
||||
src_fd = bpf_map__fd(src);
|
||||
|
||||
for (i = 0; i < num_sockets; i++) {
|
||||
sock_fd[i] = connected_socket_v4();
|
||||
if (CHECK(sock_fd[i] == -1, "connected_socket_v4", "cannot connect\n"))
|
||||
goto out;
|
||||
|
||||
err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST);
|
||||
if (CHECK(err, "map_update", "failed: %s\n", strerror(errno)))
|
||||
goto out;
|
||||
}
|
||||
|
||||
linfo.map.map_fd = src_fd;
|
||||
opts.link_info = &linfo;
|
||||
opts.link_info_len = sizeof(linfo);
|
||||
link = bpf_program__attach_iter(skel->progs.count_elems, &opts);
|
||||
if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
|
||||
goto out;
|
||||
|
||||
iter_fd = bpf_iter_create(bpf_link__fd(link));
|
||||
if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
|
||||
goto free_link;
|
||||
|
||||
/* do some tests */
|
||||
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
|
||||
;
|
||||
if (CHECK(len < 0, "read", "failed: %s\n", strerror(errno)))
|
||||
goto close_iter;
|
||||
|
||||
/* test results */
|
||||
if (CHECK(skel->bss->elems != max_elems, "elems", "got %u expected %u\n",
|
||||
skel->bss->elems, max_elems))
|
||||
goto close_iter;
|
||||
|
||||
if (CHECK(skel->bss->socks != num_sockets, "socks", "got %u expected %u\n",
|
||||
skel->bss->socks, num_sockets))
|
||||
goto close_iter;
|
||||
|
||||
close_iter:
|
||||
close(iter_fd);
|
||||
free_link:
|
||||
bpf_link__destroy(link);
|
||||
out:
|
||||
for (i = 0; i < num_sockets; i++) {
|
||||
if (sock_fd[i] >= 0)
|
||||
close(sock_fd[i]);
|
||||
}
|
||||
bpf_iter_sockmap__destroy(skel);
|
||||
}
|
||||
|
||||
void test_sockmap_basic(void)
|
||||
{
|
||||
if (test__start_subtest("sockmap create_update_free"))
|
||||
|
@ -187,4 +272,8 @@ void test_sockmap_basic(void)
|
|||
test_sockmap_update(BPF_MAP_TYPE_SOCKHASH);
|
||||
if (test__start_subtest("sockmap update in unsafe context"))
|
||||
test_sockmap_invalid_update();
|
||||
if (test__start_subtest("sockmap iter"))
|
||||
test_sockmap_iter(BPF_MAP_TYPE_SOCKMAP);
|
||||
if (test__start_subtest("sockhash iter"))
|
||||
test_sockmap_iter(BPF_MAP_TYPE_SOCKHASH);
|
||||
}
|
||||
|
|
|
@ -45,9 +45,9 @@ static int getsetsockopt(void)
|
|||
goto err;
|
||||
}
|
||||
|
||||
if (*(int *)big_buf != 0x08) {
|
||||
if (*big_buf != 0x08) {
|
||||
log_err("Unexpected getsockopt(IP_TOS) optval 0x%x != 0x08",
|
||||
*(int *)big_buf);
|
||||
(int)*big_buf);
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2020 Facebook */
|
||||
#include <test_progs.h>
|
||||
#include <time.h>
|
||||
#include "test_subprogs.skel.h"
|
||||
|
||||
static int duration;
|
||||
|
||||
void test_subprogs(void)
|
||||
{
|
||||
struct test_subprogs *skel;
|
||||
int err;
|
||||
|
||||
skel = test_subprogs__open_and_load();
|
||||
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
|
||||
return;
|
||||
|
||||
err = test_subprogs__attach(skel);
|
||||
if (CHECK(err, "skel_attach", "failed to attach skeleton: %d\n", err))
|
||||
goto cleanup;
|
||||
|
||||
usleep(1);
|
||||
|
||||
CHECK(skel->bss->res1 != 12, "res1", "got %d, exp %d\n", skel->bss->res1, 12);
|
||||
CHECK(skel->bss->res2 != 17, "res2", "got %d, exp %d\n", skel->bss->res2, 17);
|
||||
CHECK(skel->bss->res3 != 19, "res3", "got %d, exp %d\n", skel->bss->res3, 19);
|
||||
CHECK(skel->bss->res4 != 36, "res4", "got %d, exp %d\n", skel->bss->res4, 36);
|
||||
|
||||
cleanup:
|
||||
test_subprogs__destroy(skel);
|
||||
}
|
|
@ -1,5 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <test_progs.h>
|
||||
#include <network_helpers.h>
|
||||
|
||||
/* test_tailcall_1 checks basic functionality by patching multiple locations
|
||||
* in a single program for a single tail call slot with nop->jmp, jmp->nop
|
||||
|
@ -472,6 +473,329 @@ out:
|
|||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
/* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
|
||||
* correctly in correlation with BPF subprograms
|
||||
*/
|
||||
static void test_tailcall_bpf2bpf_1(void)
|
||||
{
|
||||
int err, map_fd, prog_fd, main_fd, i;
|
||||
struct bpf_map *prog_array;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
__u32 retval, duration;
|
||||
char prog_name[32];
|
||||
|
||||
err = bpf_prog_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS,
|
||||
&obj, &prog_fd);
|
||||
if (CHECK_FAIL(err))
|
||||
return;
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, "classifier");
|
||||
if (CHECK_FAIL(!prog))
|
||||
goto out;
|
||||
|
||||
main_fd = bpf_program__fd(prog);
|
||||
if (CHECK_FAIL(main_fd < 0))
|
||||
goto out;
|
||||
|
||||
prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
|
||||
if (CHECK_FAIL(!prog_array))
|
||||
goto out;
|
||||
|
||||
map_fd = bpf_map__fd(prog_array);
|
||||
if (CHECK_FAIL(map_fd < 0))
|
||||
goto out;
|
||||
|
||||
/* nop -> jmp */
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, prog_name);
|
||||
if (CHECK_FAIL(!prog))
|
||||
goto out;
|
||||
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
if (CHECK_FAIL(prog_fd < 0))
|
||||
goto out;
|
||||
|
||||
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
|
||||
0, &retval, &duration);
|
||||
CHECK(err || retval != 1, "tailcall",
|
||||
"err %d errno %d retval %d\n", err, errno, retval);
|
||||
|
||||
/* jmp -> nop, call subprog that will do tailcall */
|
||||
i = 1;
|
||||
err = bpf_map_delete_elem(map_fd, &i);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out;
|
||||
|
||||
err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
|
||||
0, &retval, &duration);
|
||||
CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
|
||||
err, errno, retval);
|
||||
|
||||
/* make sure that subprog can access ctx and entry prog that
|
||||
* called this subprog can properly return
|
||||
*/
|
||||
i = 0;
|
||||
err = bpf_map_delete_elem(map_fd, &i);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out;
|
||||
|
||||
err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
|
||||
0, &retval, &duration);
|
||||
CHECK(err || retval != sizeof(pkt_v4) * 2,
|
||||
"tailcall", "err %d errno %d retval %d\n",
|
||||
err, errno, retval);
|
||||
out:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
/* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
|
||||
* enforcement matches with expectations when tailcall is preceded with
|
||||
* bpf2bpf call.
|
||||
*/
|
||||
static void test_tailcall_bpf2bpf_2(void)
|
||||
{
|
||||
int err, map_fd, prog_fd, main_fd, data_fd, i, val;
|
||||
struct bpf_map *prog_array, *data_map;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
__u32 retval, duration;
|
||||
char buff[128] = {};
|
||||
|
||||
err = bpf_prog_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS,
|
||||
&obj, &prog_fd);
|
||||
if (CHECK_FAIL(err))
|
||||
return;
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, "classifier");
|
||||
if (CHECK_FAIL(!prog))
|
||||
goto out;
|
||||
|
||||
main_fd = bpf_program__fd(prog);
|
||||
if (CHECK_FAIL(main_fd < 0))
|
||||
goto out;
|
||||
|
||||
prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
|
||||
if (CHECK_FAIL(!prog_array))
|
||||
goto out;
|
||||
|
||||
map_fd = bpf_map__fd(prog_array);
|
||||
if (CHECK_FAIL(map_fd < 0))
|
||||
goto out;
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, "classifier/0");
|
||||
if (CHECK_FAIL(!prog))
|
||||
goto out;
|
||||
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
if (CHECK_FAIL(prog_fd < 0))
|
||||
goto out;
|
||||
|
||||
i = 0;
|
||||
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out;
|
||||
|
||||
err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
|
||||
&duration, &retval, NULL);
|
||||
CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
|
||||
err, errno, retval);
|
||||
|
||||
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
|
||||
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
|
||||
return;
|
||||
|
||||
data_fd = bpf_map__fd(data_map);
|
||||
if (CHECK_FAIL(map_fd < 0))
|
||||
return;
|
||||
|
||||
i = 0;
|
||||
err = bpf_map_lookup_elem(data_fd, &i, &val);
|
||||
CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
|
||||
err, errno, val);
|
||||
|
||||
i = 0;
|
||||
err = bpf_map_delete_elem(map_fd, &i);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out;
|
||||
|
||||
err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
|
||||
&duration, &retval, NULL);
|
||||
CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
|
||||
err, errno, retval);
|
||||
out:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
/* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
|
||||
* 256 bytes) can be used within bpf subprograms that have the tailcalls
|
||||
* in them
|
||||
*/
|
||||
static void test_tailcall_bpf2bpf_3(void)
|
||||
{
|
||||
int err, map_fd, prog_fd, main_fd, i;
|
||||
struct bpf_map *prog_array;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
__u32 retval, duration;
|
||||
char prog_name[32];
|
||||
|
||||
err = bpf_prog_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS,
|
||||
&obj, &prog_fd);
|
||||
if (CHECK_FAIL(err))
|
||||
return;
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, "classifier");
|
||||
if (CHECK_FAIL(!prog))
|
||||
goto out;
|
||||
|
||||
main_fd = bpf_program__fd(prog);
|
||||
if (CHECK_FAIL(main_fd < 0))
|
||||
goto out;
|
||||
|
||||
prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
|
||||
if (CHECK_FAIL(!prog_array))
|
||||
goto out;
|
||||
|
||||
map_fd = bpf_map__fd(prog_array);
|
||||
if (CHECK_FAIL(map_fd < 0))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, prog_name);
|
||||
if (CHECK_FAIL(!prog))
|
||||
goto out;
|
||||
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
if (CHECK_FAIL(prog_fd < 0))
|
||||
goto out;
|
||||
|
||||
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
|
||||
&duration, &retval, NULL);
|
||||
CHECK(err || retval != sizeof(pkt_v4) * 3,
|
||||
"tailcall", "err %d errno %d retval %d\n",
|
||||
err, errno, retval);
|
||||
|
||||
i = 1;
|
||||
err = bpf_map_delete_elem(map_fd, &i);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out;
|
||||
|
||||
err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
|
||||
&duration, &retval, NULL);
|
||||
CHECK(err || retval != sizeof(pkt_v4),
|
||||
"tailcall", "err %d errno %d retval %d\n",
|
||||
err, errno, retval);
|
||||
|
||||
i = 0;
|
||||
err = bpf_map_delete_elem(map_fd, &i);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out;
|
||||
|
||||
err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
|
||||
&duration, &retval, NULL);
|
||||
CHECK(err || retval != sizeof(pkt_v4) * 2,
|
||||
"tailcall", "err %d errno %d retval %d\n",
|
||||
err, errno, retval);
|
||||
out:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
|
||||
* across tailcalls combined with bpf2bpf calls. for making sure that tailcall
|
||||
* counter behaves correctly, bpf program will go through following flow:
|
||||
*
|
||||
* entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
|
||||
* -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
|
||||
* subprog2 [here bump global counter] --------^
|
||||
*
|
||||
* We go through first two tailcalls and start counting from the subprog2 where
|
||||
* the loop begins. At the end of the test make sure that the global counter is
|
||||
* equal to 31, because tailcall counter includes the first two tailcalls
|
||||
* whereas global counter is incremented only on loop presented on flow above.
|
||||
*/
|
||||
static void test_tailcall_bpf2bpf_4(void)
|
||||
{
|
||||
int err, map_fd, prog_fd, main_fd, data_fd, i, val;
|
||||
struct bpf_map *prog_array, *data_map;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
__u32 retval, duration;
|
||||
char prog_name[32];
|
||||
|
||||
err = bpf_prog_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS,
|
||||
&obj, &prog_fd);
|
||||
if (CHECK_FAIL(err))
|
||||
return;
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, "classifier");
|
||||
if (CHECK_FAIL(!prog))
|
||||
goto out;
|
||||
|
||||
main_fd = bpf_program__fd(prog);
|
||||
if (CHECK_FAIL(main_fd < 0))
|
||||
goto out;
|
||||
|
||||
prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
|
||||
if (CHECK_FAIL(!prog_array))
|
||||
goto out;
|
||||
|
||||
map_fd = bpf_map__fd(prog_array);
|
||||
if (CHECK_FAIL(map_fd < 0))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, prog_name);
|
||||
if (CHECK_FAIL(!prog))
|
||||
goto out;
|
||||
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
if (CHECK_FAIL(prog_fd < 0))
|
||||
goto out;
|
||||
|
||||
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
|
||||
&duration, &retval, NULL);
|
||||
CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n",
|
||||
err, errno, retval);
|
||||
|
||||
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
|
||||
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
|
||||
return;
|
||||
|
||||
data_fd = bpf_map__fd(data_map);
|
||||
if (CHECK_FAIL(map_fd < 0))
|
||||
return;
|
||||
|
||||
i = 0;
|
||||
err = bpf_map_lookup_elem(data_fd, &i, &val);
|
||||
CHECK(err || val != 31, "tailcall count", "err %d errno %d count %d\n",
|
||||
err, errno, val);
|
||||
|
||||
out:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
void test_tailcalls(void)
|
||||
{
|
||||
if (test__start_subtest("tailcall_1"))
|
||||
|
@ -484,4 +808,12 @@ void test_tailcalls(void)
|
|||
test_tailcall_4();
|
||||
if (test__start_subtest("tailcall_5"))
|
||||
test_tailcall_5();
|
||||
if (test__start_subtest("tailcall_bpf2bpf_1"))
|
||||
test_tailcall_bpf2bpf_1();
|
||||
if (test__start_subtest("tailcall_bpf2bpf_2"))
|
||||
test_tailcall_bpf2bpf_2();
|
||||
if (test__start_subtest("tailcall_bpf2bpf_3"))
|
||||
test_tailcall_bpf2bpf_3();
|
||||
if (test__start_subtest("tailcall_bpf2bpf_4"))
|
||||
test_tailcall_bpf2bpf_4();
|
||||
}
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <test_progs.h>
|
||||
#include <network_helpers.h>
|
||||
#include "test_xdp_noinline.skel.h"
|
||||
|
||||
void test_xdp_noinline(void)
|
||||
{
|
||||
const char *file = "./test_xdp_noinline.o";
|
||||
unsigned int nr_cpus = bpf_num_possible_cpus();
|
||||
struct test_xdp_noinline *skel;
|
||||
struct vip key = {.protocol = 6};
|
||||
struct vip_meta {
|
||||
__u32 flags;
|
||||
|
@ -25,58 +26,42 @@ void test_xdp_noinline(void)
|
|||
} real_def = {.dst = MAGIC_VAL};
|
||||
__u32 ch_key = 11, real_num = 3;
|
||||
__u32 duration, retval, size;
|
||||
int err, i, prog_fd, map_fd;
|
||||
int err, i;
|
||||
__u64 bytes = 0, pkts = 0;
|
||||
struct bpf_object *obj;
|
||||
char buf[128];
|
||||
u32 *magic = (u32 *)buf;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
|
||||
if (CHECK_FAIL(err))
|
||||
skel = test_xdp_noinline__open_and_load();
|
||||
if (CHECK(!skel, "skel_open_and_load", "failed\n"))
|
||||
return;
|
||||
|
||||
map_fd = bpf_find_map(__func__, obj, "vip_map");
|
||||
if (map_fd < 0)
|
||||
goto out;
|
||||
bpf_map_update_elem(map_fd, &key, &value, 0);
|
||||
bpf_map_update_elem(bpf_map__fd(skel->maps.vip_map), &key, &value, 0);
|
||||
bpf_map_update_elem(bpf_map__fd(skel->maps.ch_rings), &ch_key, &real_num, 0);
|
||||
bpf_map_update_elem(bpf_map__fd(skel->maps.reals), &real_num, &real_def, 0);
|
||||
|
||||
map_fd = bpf_find_map(__func__, obj, "ch_rings");
|
||||
if (map_fd < 0)
|
||||
goto out;
|
||||
bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
|
||||
|
||||
map_fd = bpf_find_map(__func__, obj, "reals");
|
||||
if (map_fd < 0)
|
||||
goto out;
|
||||
bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
|
||||
|
||||
err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
|
||||
err = bpf_prog_test_run(bpf_program__fd(skel->progs.balancer_ingress_v4),
|
||||
NUM_ITER, &pkt_v4, sizeof(pkt_v4),
|
||||
buf, &size, &retval, &duration);
|
||||
CHECK(err || retval != 1 || size != 54 ||
|
||||
*magic != MAGIC_VAL, "ipv4",
|
||||
"err %d errno %d retval %d size %d magic %x\n",
|
||||
err, errno, retval, size, *magic);
|
||||
|
||||
err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
|
||||
err = bpf_prog_test_run(bpf_program__fd(skel->progs.balancer_ingress_v6),
|
||||
NUM_ITER, &pkt_v6, sizeof(pkt_v6),
|
||||
buf, &size, &retval, &duration);
|
||||
CHECK(err || retval != 1 || size != 74 ||
|
||||
*magic != MAGIC_VAL, "ipv6",
|
||||
"err %d errno %d retval %d size %d magic %x\n",
|
||||
err, errno, retval, size, *magic);
|
||||
|
||||
map_fd = bpf_find_map(__func__, obj, "stats");
|
||||
if (map_fd < 0)
|
||||
goto out;
|
||||
bpf_map_lookup_elem(map_fd, &stats_key, stats);
|
||||
bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats), &stats_key, stats);
|
||||
for (i = 0; i < nr_cpus; i++) {
|
||||
bytes += stats[i].bytes;
|
||||
pkts += stats[i].pkts;
|
||||
}
|
||||
if (CHECK_FAIL(bytes != MAGIC_BYTES * NUM_ITER * 2 ||
|
||||
pkts != NUM_ITER * 2)) {
|
||||
printf("test_xdp_noinline:FAIL:stats %lld %lld\n",
|
||||
bytes, pkts);
|
||||
}
|
||||
out:
|
||||
bpf_object__close(obj);
|
||||
CHECK(bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2,
|
||||
"stats", "bytes %lld pkts %lld\n",
|
||||
(unsigned long long)bytes, (unsigned long long)pkts);
|
||||
test_xdp_noinline__destroy(skel);
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#define udp6_sock udp6_sock___not_used
|
||||
#define bpf_iter__bpf_map_elem bpf_iter__bpf_map_elem___not_used
|
||||
#define bpf_iter__bpf_sk_storage_map bpf_iter__bpf_sk_storage_map___not_used
|
||||
#define bpf_iter__sockmap bpf_iter__sockmap___not_used
|
||||
#include "vmlinux.h"
|
||||
#undef bpf_iter_meta
|
||||
#undef bpf_iter__bpf_map
|
||||
|
@ -26,6 +27,7 @@
|
|||
#undef udp6_sock
|
||||
#undef bpf_iter__bpf_map_elem
|
||||
#undef bpf_iter__bpf_sk_storage_map
|
||||
#undef bpf_iter__sockmap
|
||||
|
||||
struct bpf_iter_meta {
|
||||
struct seq_file *seq;
|
||||
|
@ -96,3 +98,10 @@ struct bpf_iter__bpf_sk_storage_map {
|
|||
struct sock *sk;
|
||||
void *value;
|
||||
};
|
||||
|
||||
struct bpf_iter__sockmap {
|
||||
struct bpf_iter_meta *meta;
|
||||
struct bpf_map *map;
|
||||
void *key;
|
||||
struct sock *sk;
|
||||
};
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2020 Cloudflare */
|
||||
#include "bpf_iter.h"
|
||||
#include "bpf_tracing_net.h"
|
||||
#include "bpf_iter_sockmap.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <errno.h>
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_SOCKMAP);
|
||||
__uint(max_entries, SOCKMAP_MAX_ENTRIES);
|
||||
__type(key, __u32);
|
||||
__type(value, __u64);
|
||||
} sockmap SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_SOCKHASH);
|
||||
__uint(max_entries, SOCKMAP_MAX_ENTRIES);
|
||||
__type(key, __u32);
|
||||
__type(value, __u64);
|
||||
} sockhash SEC(".maps");
|
||||
|
||||
__u32 elems = 0;
|
||||
__u32 socks = 0;
|
||||
|
||||
SEC("iter/sockmap")
|
||||
int count_elems(struct bpf_iter__sockmap *ctx)
|
||||
{
|
||||
struct sock *sk = ctx->sk;
|
||||
__u32 tmp, *key = ctx->key;
|
||||
int ret;
|
||||
|
||||
if (key)
|
||||
elems++;
|
||||
|
||||
if (sk)
|
||||
socks++;
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#define SOCKMAP_MAX_ENTRIES (64)
|
|
@ -6,6 +6,9 @@
|
|||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
int count = 0;
|
||||
int tgid = 0;
|
||||
|
||||
SEC("iter/task_file")
|
||||
int dump_task_file(struct bpf_iter__task_file *ctx)
|
||||
{
|
||||
|
@ -17,8 +20,13 @@ int dump_task_file(struct bpf_iter__task_file *ctx)
|
|||
if (task == (void *)0 || file == (void *)0)
|
||||
return 0;
|
||||
|
||||
if (ctx->meta->seq_num == 0)
|
||||
if (ctx->meta->seq_num == 0) {
|
||||
count = 0;
|
||||
BPF_SEQ_PRINTF(seq, " tgid gid fd file\n");
|
||||
}
|
||||
|
||||
if (tgid == task->tgid && task->tgid != task->pid)
|
||||
count++;
|
||||
|
||||
BPF_SEQ_PRINTF(seq, "%8d %8d %8d %lx\n", task->tgid, task->pid, fd,
|
||||
(long)file->f_op);
|
||||
|
|
|
@ -82,6 +82,14 @@ static inline int check_default(struct bpf_map *indirect,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static __noinline int
|
||||
check_default_noinline(struct bpf_map *indirect, struct bpf_map *direct)
|
||||
{
|
||||
VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
|
||||
MAX_ENTRIES));
|
||||
return 1;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
int counter;
|
||||
} atomic_t;
|
||||
|
@ -107,7 +115,7 @@ static inline int check_hash(void)
|
|||
struct bpf_map *map = (struct bpf_map *)&m_hash;
|
||||
int i;
|
||||
|
||||
VERIFY(check_default(&hash->map, map));
|
||||
VERIFY(check_default_noinline(&hash->map, map));
|
||||
|
||||
VERIFY(hash->n_buckets == MAX_ENTRIES);
|
||||
VERIFY(hash->elem_size == 64);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue