bpf: Move lnode list node to struct bpf_ksym
Adding lnode list node to 'struct bpf_ksym' object, so the struct bpf_ksym itself can be chained and used in other objects like bpf_trampoline and bpf_dispatcher. Changing iterator to bpf_ksym in bpf_get_kallsym function. The ksym->start is holding the prog->bpf_func value, so it's ok to use it as value in bpf_get_kallsym. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Song Liu <songliubraving@fb.com> Link: https://lore.kernel.org/bpf/20200312195610.346362-6-jolsa@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
bfea9a8574
commit
ecb60d1c67
|
@ -476,6 +476,7 @@ struct bpf_ksym {
|
|||
unsigned long start;
|
||||
unsigned long end;
|
||||
char name[KSYM_NAME_LEN];
|
||||
struct list_head lnode;
|
||||
};
|
||||
|
||||
enum bpf_tramp_prog_type {
|
||||
|
@ -659,7 +660,6 @@ struct bpf_prog_aux {
|
|||
struct bpf_jit_poke_descriptor *poke_tab;
|
||||
u32 size_poke_tab;
|
||||
struct latch_tree_node ksym_tnode;
|
||||
struct list_head ksym_lnode;
|
||||
struct bpf_ksym ksym;
|
||||
const struct bpf_prog_ops *ops;
|
||||
struct bpf_map **used_maps;
|
||||
|
|
|
@ -97,7 +97,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
|
|||
fp->aux->prog = fp;
|
||||
fp->jit_requested = ebpf_jit_enabled();
|
||||
|
||||
INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
|
||||
INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
|
||||
|
||||
return fp;
|
||||
}
|
||||
|
@ -613,18 +613,18 @@ static struct latch_tree_root bpf_tree __cacheline_aligned;
|
|||
|
||||
static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
|
||||
{
|
||||
WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
|
||||
list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
|
||||
WARN_ON_ONCE(!list_empty(&aux->ksym.lnode));
|
||||
list_add_tail_rcu(&aux->ksym.lnode, &bpf_kallsyms);
|
||||
latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
|
||||
}
|
||||
|
||||
static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
|
||||
{
|
||||
if (list_empty(&aux->ksym_lnode))
|
||||
if (list_empty(&aux->ksym.lnode))
|
||||
return;
|
||||
|
||||
latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
|
||||
list_del_rcu(&aux->ksym_lnode);
|
||||
list_del_rcu(&aux->ksym.lnode);
|
||||
}
|
||||
|
||||
static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
|
||||
|
@ -634,8 +634,8 @@ static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
|
|||
|
||||
static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
|
||||
{
|
||||
return list_empty(&fp->aux->ksym_lnode) ||
|
||||
fp->aux->ksym_lnode.prev == LIST_POISON2;
|
||||
return list_empty(&fp->aux->ksym.lnode) ||
|
||||
fp->aux->ksym.lnode.prev == LIST_POISON2;
|
||||
}
|
||||
|
||||
void bpf_prog_kallsyms_add(struct bpf_prog *fp)
|
||||
|
@ -729,7 +729,7 @@ out:
|
|||
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
|
||||
char *sym)
|
||||
{
|
||||
struct bpf_prog_aux *aux;
|
||||
struct bpf_ksym *ksym;
|
||||
unsigned int it = 0;
|
||||
int ret = -ERANGE;
|
||||
|
||||
|
@ -737,13 +737,13 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
|
|||
return ret;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
|
||||
list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
|
||||
if (it++ != symnum)
|
||||
continue;
|
||||
|
||||
strncpy(sym, aux->ksym.name, KSYM_NAME_LEN);
|
||||
strncpy(sym, ksym->name, KSYM_NAME_LEN);
|
||||
|
||||
*value = (unsigned long)aux->prog->bpf_func;
|
||||
*value = ksym->start;
|
||||
*type = BPF_SYM_ELF_TYPE;
|
||||
|
||||
ret = 0;
|
||||
|
|
Loading…
Reference in New Issue