bpf: mark all registered map/prog types as __ro_after_init
All map types and prog types are registered to the BPF core through bpf_register_map_type() and bpf_register_prog_type() during init and remain unchanged thereafter. As by design we don't (and never will) have any pluggable code that can register to that at any later point in time, lets mark all the existing bpf_{map,prog}_type_list objects in the tree as __ro_after_init, so they can be moved to read-only section from then onwards. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
afcb50ba7f
commit
c78f8bdfa1
|
@ -269,7 +269,7 @@ static const struct bpf_map_ops array_ops = {
|
|||
.map_delete_elem = array_map_delete_elem,
|
||||
};
|
||||
|
||||
static struct bpf_map_type_list array_type __read_mostly = {
|
||||
static struct bpf_map_type_list array_type __ro_after_init = {
|
||||
.ops = &array_ops,
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
};
|
||||
|
@ -283,7 +283,7 @@ static const struct bpf_map_ops percpu_array_ops = {
|
|||
.map_delete_elem = array_map_delete_elem,
|
||||
};
|
||||
|
||||
static struct bpf_map_type_list percpu_array_type __read_mostly = {
|
||||
static struct bpf_map_type_list percpu_array_type __ro_after_init = {
|
||||
.ops = &percpu_array_ops,
|
||||
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
||||
};
|
||||
|
@ -409,7 +409,7 @@ static const struct bpf_map_ops prog_array_ops = {
|
|||
.map_fd_put_ptr = prog_fd_array_put_ptr,
|
||||
};
|
||||
|
||||
static struct bpf_map_type_list prog_array_type __read_mostly = {
|
||||
static struct bpf_map_type_list prog_array_type __ro_after_init = {
|
||||
.ops = &prog_array_ops,
|
||||
.type = BPF_MAP_TYPE_PROG_ARRAY,
|
||||
};
|
||||
|
@ -522,7 +522,7 @@ static const struct bpf_map_ops perf_event_array_ops = {
|
|||
.map_release = perf_event_fd_array_release,
|
||||
};
|
||||
|
||||
static struct bpf_map_type_list perf_event_array_type __read_mostly = {
|
||||
static struct bpf_map_type_list perf_event_array_type __ro_after_init = {
|
||||
.ops = &perf_event_array_ops,
|
||||
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
|
||||
};
|
||||
|
@ -564,7 +564,7 @@ static const struct bpf_map_ops cgroup_array_ops = {
|
|||
.map_fd_put_ptr = cgroup_fd_array_put_ptr,
|
||||
};
|
||||
|
||||
static struct bpf_map_type_list cgroup_array_type __read_mostly = {
|
||||
static struct bpf_map_type_list cgroup_array_type __ro_after_init = {
|
||||
.ops = &cgroup_array_ops,
|
||||
.type = BPF_MAP_TYPE_CGROUP_ARRAY,
|
||||
};
|
||||
|
|
|
@ -1023,7 +1023,7 @@ static const struct bpf_map_ops htab_ops = {
|
|||
.map_delete_elem = htab_map_delete_elem,
|
||||
};
|
||||
|
||||
static struct bpf_map_type_list htab_type __read_mostly = {
|
||||
static struct bpf_map_type_list htab_type __ro_after_init = {
|
||||
.ops = &htab_ops,
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
};
|
||||
|
@ -1037,7 +1037,7 @@ static const struct bpf_map_ops htab_lru_ops = {
|
|||
.map_delete_elem = htab_lru_map_delete_elem,
|
||||
};
|
||||
|
||||
static struct bpf_map_type_list htab_lru_type __read_mostly = {
|
||||
static struct bpf_map_type_list htab_lru_type __ro_after_init = {
|
||||
.ops = &htab_lru_ops,
|
||||
.type = BPF_MAP_TYPE_LRU_HASH,
|
||||
};
|
||||
|
@ -1124,7 +1124,7 @@ static const struct bpf_map_ops htab_percpu_ops = {
|
|||
.map_delete_elem = htab_map_delete_elem,
|
||||
};
|
||||
|
||||
static struct bpf_map_type_list htab_percpu_type __read_mostly = {
|
||||
static struct bpf_map_type_list htab_percpu_type __ro_after_init = {
|
||||
.ops = &htab_percpu_ops,
|
||||
.type = BPF_MAP_TYPE_PERCPU_HASH,
|
||||
};
|
||||
|
@ -1138,7 +1138,7 @@ static const struct bpf_map_ops htab_lru_percpu_ops = {
|
|||
.map_delete_elem = htab_lru_map_delete_elem,
|
||||
};
|
||||
|
||||
static struct bpf_map_type_list htab_lru_percpu_type __read_mostly = {
|
||||
static struct bpf_map_type_list htab_lru_percpu_type __ro_after_init = {
|
||||
.ops = &htab_lru_percpu_ops,
|
||||
.type = BPF_MAP_TYPE_LRU_PERCPU_HASH,
|
||||
};
|
||||
|
|
|
@ -508,7 +508,7 @@ static const struct bpf_map_ops trie_ops = {
|
|||
.map_delete_elem = trie_delete_elem,
|
||||
};
|
||||
|
||||
static struct bpf_map_type_list trie_type __read_mostly = {
|
||||
static struct bpf_map_type_list trie_type __ro_after_init = {
|
||||
.ops = &trie_ops,
|
||||
.type = BPF_MAP_TYPE_LPM_TRIE,
|
||||
};
|
||||
|
|
|
@ -273,7 +273,7 @@ static const struct bpf_map_ops stack_map_ops = {
|
|||
.map_delete_elem = stack_map_delete_elem,
|
||||
};
|
||||
|
||||
static struct bpf_map_type_list stack_map_type __read_mostly = {
|
||||
static struct bpf_map_type_list stack_map_type __ro_after_init = {
|
||||
.ops = &stack_map_ops,
|
||||
.type = BPF_MAP_TYPE_STACK_TRACE,
|
||||
};
|
||||
|
|
|
@ -506,7 +506,7 @@ static const struct bpf_verifier_ops kprobe_prog_ops = {
|
|||
.is_valid_access = kprobe_prog_is_valid_access,
|
||||
};
|
||||
|
||||
static struct bpf_prog_type_list kprobe_tl = {
|
||||
static struct bpf_prog_type_list kprobe_tl __ro_after_init = {
|
||||
.ops = &kprobe_prog_ops,
|
||||
.type = BPF_PROG_TYPE_KPROBE,
|
||||
};
|
||||
|
@ -589,7 +589,7 @@ static const struct bpf_verifier_ops tracepoint_prog_ops = {
|
|||
.is_valid_access = tp_prog_is_valid_access,
|
||||
};
|
||||
|
||||
static struct bpf_prog_type_list tracepoint_tl = {
|
||||
static struct bpf_prog_type_list tracepoint_tl __ro_after_init = {
|
||||
.ops = &tracepoint_prog_ops,
|
||||
.type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
};
|
||||
|
@ -648,7 +648,7 @@ static const struct bpf_verifier_ops perf_event_prog_ops = {
|
|||
.convert_ctx_access = pe_prog_convert_ctx_access,
|
||||
};
|
||||
|
||||
static struct bpf_prog_type_list perf_event_tl = {
|
||||
static struct bpf_prog_type_list perf_event_tl __ro_after_init = {
|
||||
.ops = &perf_event_prog_ops,
|
||||
.type = BPF_PROG_TYPE_PERF_EVENT,
|
||||
};
|
||||
|
|
|
@ -3296,47 +3296,47 @@ static const struct bpf_verifier_ops cg_sock_ops = {
|
|||
.convert_ctx_access = sock_filter_convert_ctx_access,
|
||||
};
|
||||
|
||||
static struct bpf_prog_type_list sk_filter_type __read_mostly = {
|
||||
static struct bpf_prog_type_list sk_filter_type __ro_after_init = {
|
||||
.ops = &sk_filter_ops,
|
||||
.type = BPF_PROG_TYPE_SOCKET_FILTER,
|
||||
};
|
||||
|
||||
static struct bpf_prog_type_list sched_cls_type __read_mostly = {
|
||||
static struct bpf_prog_type_list sched_cls_type __ro_after_init = {
|
||||
.ops = &tc_cls_act_ops,
|
||||
.type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
};
|
||||
|
||||
static struct bpf_prog_type_list sched_act_type __read_mostly = {
|
||||
static struct bpf_prog_type_list sched_act_type __ro_after_init = {
|
||||
.ops = &tc_cls_act_ops,
|
||||
.type = BPF_PROG_TYPE_SCHED_ACT,
|
||||
};
|
||||
|
||||
static struct bpf_prog_type_list xdp_type __read_mostly = {
|
||||
static struct bpf_prog_type_list xdp_type __ro_after_init = {
|
||||
.ops = &xdp_ops,
|
||||
.type = BPF_PROG_TYPE_XDP,
|
||||
};
|
||||
|
||||
static struct bpf_prog_type_list cg_skb_type __read_mostly = {
|
||||
static struct bpf_prog_type_list cg_skb_type __ro_after_init = {
|
||||
.ops = &cg_skb_ops,
|
||||
.type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
};
|
||||
|
||||
static struct bpf_prog_type_list lwt_in_type __read_mostly = {
|
||||
static struct bpf_prog_type_list lwt_in_type __ro_after_init = {
|
||||
.ops = &lwt_inout_ops,
|
||||
.type = BPF_PROG_TYPE_LWT_IN,
|
||||
};
|
||||
|
||||
static struct bpf_prog_type_list lwt_out_type __read_mostly = {
|
||||
static struct bpf_prog_type_list lwt_out_type __ro_after_init = {
|
||||
.ops = &lwt_inout_ops,
|
||||
.type = BPF_PROG_TYPE_LWT_OUT,
|
||||
};
|
||||
|
||||
static struct bpf_prog_type_list lwt_xmit_type __read_mostly = {
|
||||
static struct bpf_prog_type_list lwt_xmit_type __ro_after_init = {
|
||||
.ops = &lwt_xmit_ops,
|
||||
.type = BPF_PROG_TYPE_LWT_XMIT,
|
||||
};
|
||||
|
||||
static struct bpf_prog_type_list cg_sock_type __read_mostly = {
|
||||
static struct bpf_prog_type_list cg_sock_type __ro_after_init = {
|
||||
.ops = &cg_sock_ops,
|
||||
.type = BPF_PROG_TYPE_CGROUP_SOCK
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue