bpf/selftests: Add bpf_get_task_stack retval bounds test_prog
Add a libbpf test prog which feeds bpf_get_task_stack's return value into seq_write after confirming it's positive. No attempt to bound the value from above is made. Load will fail if verifier does not refine retval range based on buf sz input to bpf_get_task_stack. Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Song Liu <songliubraving@fb.com> Link: https://lore.kernel.org/bpf/20210416204704.2816874-4-davemarchevsky@fb.com
This commit is contained in:
parent
bdc4e36945
commit
c77cec5c20
|
@ -147,6 +147,7 @@ static void test_task_stack(void)
|
|||
return;
|
||||
|
||||
do_dummy_read(skel->progs.dump_task_stack);
|
||||
do_dummy_read(skel->progs.get_task_user_stacks);
|
||||
|
||||
bpf_iter_task_stack__destroy(skel);
|
||||
}
|
||||
|
|
|
@ -35,3 +35,30 @@ int dump_task_stack(struct bpf_iter__task *ctx)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("iter/task")
|
||||
int get_task_user_stacks(struct bpf_iter__task *ctx)
|
||||
{
|
||||
struct seq_file *seq = ctx->meta->seq;
|
||||
struct task_struct *task = ctx->task;
|
||||
uint64_t buf_sz = 0;
|
||||
int64_t res;
|
||||
|
||||
if (task == (void *)0)
|
||||
return 0;
|
||||
|
||||
res = bpf_get_task_stack(task, entries,
|
||||
MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, BPF_F_USER_STACK);
|
||||
if (res <= 0)
|
||||
return 0;
|
||||
|
||||
buf_sz += res;
|
||||
|
||||
/* If the verifier doesn't refine bpf_get_task_stack res, and instead
|
||||
* assumes res is entirely unknown, this program will fail to load as
|
||||
* the verifier will believe that max buf_sz value allows reading
|
||||
* past the end of entries in bpf_seq_write call
|
||||
*/
|
||||
bpf_seq_write(seq, &entries, buf_sz);
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue