2020-07-12 05:53:23 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
|
|
|
|
#ifndef _LINUX_BTF_IDS_H
|
|
|
|
#define _LINUX_BTF_IDS_H
|
|
|
|
|
2020-08-26 03:21:19 +08:00
|
|
|
struct btf_id_set {
|
|
|
|
u32 cnt;
|
|
|
|
u32 ids[];
|
|
|
|
};
|
|
|
|
|
2022-07-21 21:42:33 +08:00
|
|
|
struct btf_id_set8 {
|
|
|
|
u32 cnt;
|
|
|
|
u32 flags;
|
|
|
|
struct {
|
|
|
|
u32 id;
|
|
|
|
u32 flags;
|
|
|
|
} pairs[];
|
|
|
|
};
|
|
|
|
|
2020-07-14 18:25:33 +08:00
|
|
|
#ifdef CONFIG_DEBUG_INFO_BTF
|
|
|
|
|
2020-07-12 05:53:23 +08:00
|
|
|
#include <linux/compiler.h> /* for __PASTE */
|
bpf: Populate kfunc BTF ID sets in struct btf
This patch prepares the kernel to support putting all kinds of kfunc BTF
ID sets in the struct btf itself. The various kernel subsystems will
make register_btf_kfunc_id_set call in the initcalls (for built-in code
and modules).
The 'hook' is one of the many program types, e.g. XDP and TC/SCHED_CLS,
STRUCT_OPS, and 'types' are check (allowed or not), acquire, release,
and ret_null (with PTR_TO_BTF_ID_OR_NULL return type).
A maximum of BTF_KFUNC_SET_MAX_CNT (32) kfunc BTF IDs are permitted in a
set of certain hook and type for vmlinux sets, since they are allocated
on demand, and otherwise set as NULL. Module sets can only be registered
once per hook and type, hence they are directly assigned.
A new btf_kfunc_id_set_contains function is exposed for use in verifier,
this new method is faster than the existing list searching method, and
is also automatic. It also lets other code not care whether the set is
unallocated or not.
Note that module code can only do single register_btf_kfunc_id_set call
per hook. This is why sorting is only done for in-kernel vmlinux sets,
because there might be multiple sets for the same hook and type that
must be concatenated, hence sorting them is required to ensure bsearch
in btf_id_set_contains continues to work correctly.
Next commit will update the kernel users to make use of this
infrastructure.
Finally, add __maybe_unused annotation for BTF ID macros for the
!CONFIG_DEBUG_INFO_BTF case, so that they don't produce warnings during
build time.
The previous patch is also needed to provide synchronization against
initialization for module BTF's kfunc_set_tab introduced here, as
described below:
The kfunc_set_tab pointer in struct btf is write-once (if we consider
the registration phase (comprised of multiple register_btf_kfunc_id_set
calls) as a single operation). In this sense, once it has been fully
prepared, it isn't modified, only used for lookup (from the verifier
context).
For btf_vmlinux, it is initialized fully during the do_initcalls phase,
which happens fairly early in the boot process, before any processes are
present. This also eliminates the possibility of bpf_check being called
at that point, thus relieving us of ensuring any synchronization between
the registration and lookup function (btf_kfunc_id_set_contains).
However, the case for module BTF is a bit tricky. The BTF is parsed,
prepared, and published from the MODULE_STATE_COMING notifier callback.
After this, the module initcalls are invoked, where our registration
function will be called to populate the kfunc_set_tab for module BTF.
At this point, BTF may be available to userspace while its corresponding
module is still intializing. A BTF fd can then be passed to verifier
using bpf syscall (e.g. for kfunc call insn).
Hence, there is a race window where verifier may concurrently try to
lookup the kfunc_set_tab. To prevent this race, we must ensure the
operations are serialized, or waiting for the __init functions to
complete.
In the earlier registration API, this race was alleviated as verifier
bpf_check_mod_kfunc_call didn't find the kfunc BTF ID until it was added
by the registration function (called usually at the end of module __init
function after all module resources have been initialized). If the
verifier made the check_kfunc_call before kfunc BTF ID was added to the
list, it would fail verification (saying call isn't allowed). The
access to list was protected using a mutex.
Now, it would still fail verification, but for a different reason
(returning ENXIO due to the failed btf_try_get_module call in
add_kfunc_call), because if the __init call is in progress the module
will be in the middle of MODULE_STATE_COMING -> MODULE_STATE_LIVE
transition, and the BTF_MODULE_LIVE flag for btf_module instance will
not be set, so the btf_try_get_module call will fail.
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20220114163953.1455836-3-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2022-01-15 00:39:45 +08:00
|
|
|
#include <linux/compiler_attributes.h> /* for __maybe_unused */
|
2020-07-12 05:53:23 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Following macros help to define lists of BTF IDs placed
|
|
|
|
* in .BTF_ids section. They are initially filled with zeros
|
|
|
|
* (during compilation) and resolved later during the
|
|
|
|
* linking phase by resolve_btfids tool.
|
|
|
|
*
|
|
|
|
* Any change in list layout must be reflected in resolve_btfids
|
|
|
|
* tool logic.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define BTF_IDS_SECTION ".BTF_ids"
|
|
|
|
|
2022-07-21 21:42:33 +08:00
|
|
|
#define ____BTF_ID(symbol, word) \
|
2020-07-12 05:53:23 +08:00
|
|
|
asm( \
|
|
|
|
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
|
|
|
|
".local " #symbol " ; \n" \
|
2020-07-14 18:25:34 +08:00
|
|
|
".type " #symbol ", STT_OBJECT; \n" \
|
2020-07-12 05:53:23 +08:00
|
|
|
".size " #symbol ", 4; \n" \
|
|
|
|
#symbol ": \n" \
|
|
|
|
".zero 4 \n" \
|
2022-07-21 21:42:33 +08:00
|
|
|
word \
|
2020-07-12 05:53:23 +08:00
|
|
|
".popsection; \n");
|
|
|
|
|
2022-07-21 21:42:33 +08:00
|
|
|
#define __BTF_ID(symbol, word) \
|
|
|
|
____BTF_ID(symbol, word)
|
2020-07-12 05:53:23 +08:00
|
|
|
|
|
|
|
#define __ID(prefix) \
|
|
|
|
__PASTE(prefix, __COUNTER__)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The BTF_ID defines unique symbol for each ID pointing
|
|
|
|
* to 4 zero bytes.
|
|
|
|
*/
|
|
|
|
#define BTF_ID(prefix, name) \
|
2022-07-21 21:42:33 +08:00
|
|
|
__BTF_ID(__ID(__BTF_ID__##prefix##__##name##__), "")
|
|
|
|
|
|
|
|
#define ____BTF_ID_FLAGS(prefix, name, flags) \
|
|
|
|
__BTF_ID(__ID(__BTF_ID__##prefix##__##name##__), ".long " #flags "\n")
|
|
|
|
#define __BTF_ID_FLAGS(prefix, name, flags, ...) \
|
|
|
|
____BTF_ID_FLAGS(prefix, name, flags)
|
|
|
|
#define BTF_ID_FLAGS(prefix, name, ...) \
|
|
|
|
__BTF_ID_FLAGS(prefix, name, ##__VA_ARGS__, 0)
|
2020-07-12 05:53:23 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The BTF_ID_LIST macro defines pure (unsorted) list
|
|
|
|
* of BTF IDs, with following layout:
|
|
|
|
*
|
|
|
|
* BTF_ID_LIST(list1)
|
|
|
|
* BTF_ID(type1, name1)
|
|
|
|
* BTF_ID(type2, name2)
|
|
|
|
*
|
|
|
|
* list1:
|
|
|
|
* __BTF_ID__type1__name1__1:
|
|
|
|
* .zero 4
|
|
|
|
* __BTF_ID__type2__name2__2:
|
|
|
|
* .zero 4
|
|
|
|
*
|
|
|
|
*/
|
2020-07-21 00:34:01 +08:00
|
|
|
#define __BTF_ID_LIST(name, scope) \
|
2020-07-12 05:53:23 +08:00
|
|
|
asm( \
|
|
|
|
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
|
2020-07-21 00:34:01 +08:00
|
|
|
"." #scope " " #name "; \n" \
|
2020-07-12 05:53:23 +08:00
|
|
|
#name ":; \n" \
|
2020-08-26 03:21:19 +08:00
|
|
|
".popsection; \n");
|
2020-07-12 05:53:23 +08:00
|
|
|
|
|
|
|
#define BTF_ID_LIST(name) \
|
2020-07-21 00:34:01 +08:00
|
|
|
__BTF_ID_LIST(name, local) \
|
2020-07-12 05:53:23 +08:00
|
|
|
extern u32 name[];
|
|
|
|
|
2021-11-12 23:02:42 +08:00
|
|
|
#define BTF_ID_LIST_GLOBAL(name, n) \
|
2020-07-21 00:34:01 +08:00
|
|
|
__BTF_ID_LIST(name, globl)
|
|
|
|
|
2020-09-21 20:12:19 +08:00
|
|
|
/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
|
|
|
|
* a single entry.
|
|
|
|
*/
|
|
|
|
#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
|
|
|
|
BTF_ID_LIST(name) \
|
|
|
|
BTF_ID(prefix, typename)
|
2021-08-24 10:43:46 +08:00
|
|
|
#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \
|
2021-11-12 23:02:42 +08:00
|
|
|
BTF_ID_LIST_GLOBAL(name, 1) \
|
2021-08-24 10:43:46 +08:00
|
|
|
BTF_ID(prefix, typename)
|
2020-09-21 20:12:19 +08:00
|
|
|
|
2020-07-12 05:53:23 +08:00
|
|
|
/*
|
|
|
|
* The BTF_ID_UNUSED macro defines 4 zero bytes.
|
|
|
|
* It's used when we want to define 'unused' entry
|
|
|
|
* in BTF_ID_LIST, like:
|
|
|
|
*
|
|
|
|
* BTF_ID_LIST(bpf_skb_output_btf_ids)
|
|
|
|
* BTF_ID(struct, sk_buff)
|
|
|
|
* BTF_ID_UNUSED
|
|
|
|
* BTF_ID(struct, task_struct)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define BTF_ID_UNUSED \
|
|
|
|
asm( \
|
|
|
|
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
|
|
|
|
".zero 4 \n" \
|
|
|
|
".popsection; \n");
|
|
|
|
|
2020-08-26 03:21:19 +08:00
|
|
|
/*
|
|
|
|
* The BTF_SET_START/END macros pair defines sorted list of
|
|
|
|
* BTF IDs plus its members count, with following layout:
|
|
|
|
*
|
|
|
|
* BTF_SET_START(list)
|
|
|
|
* BTF_ID(type1, name1)
|
|
|
|
* BTF_ID(type2, name2)
|
|
|
|
* BTF_SET_END(list)
|
|
|
|
*
|
|
|
|
* __BTF_ID__set__list:
|
|
|
|
* .zero 4
|
|
|
|
* list:
|
|
|
|
* __BTF_ID__type1__name1__3:
|
|
|
|
* .zero 4
|
|
|
|
* __BTF_ID__type2__name2__4:
|
|
|
|
* .zero 4
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
#define __BTF_SET_START(name, scope) \
|
|
|
|
asm( \
|
|
|
|
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
|
|
|
|
"." #scope " __BTF_ID__set__" #name "; \n" \
|
|
|
|
"__BTF_ID__set__" #name ":; \n" \
|
|
|
|
".zero 4 \n" \
|
|
|
|
".popsection; \n");
|
|
|
|
|
|
|
|
#define BTF_SET_START(name) \
|
|
|
|
__BTF_ID_LIST(name, local) \
|
|
|
|
__BTF_SET_START(name, local)
|
|
|
|
|
|
|
|
#define BTF_SET_START_GLOBAL(name) \
|
|
|
|
__BTF_ID_LIST(name, globl) \
|
|
|
|
__BTF_SET_START(name, globl)
|
|
|
|
|
|
|
|
#define BTF_SET_END(name) \
|
|
|
|
asm( \
|
|
|
|
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
|
|
|
|
".size __BTF_ID__set__" #name ", .-" #name " \n" \
|
|
|
|
".popsection; \n"); \
|
|
|
|
extern struct btf_id_set name;
|
|
|
|
|
2022-07-21 21:42:33 +08:00
|
|
|
/*
|
|
|
|
* The BTF_SET8_START/END macros pair defines sorted list of
|
|
|
|
* BTF IDs and their flags plus its members count, with the
|
|
|
|
* following layout:
|
|
|
|
*
|
|
|
|
* BTF_SET8_START(list)
|
|
|
|
* BTF_ID_FLAGS(type1, name1, flags)
|
|
|
|
* BTF_ID_FLAGS(type2, name2, flags)
|
|
|
|
* BTF_SET8_END(list)
|
|
|
|
*
|
|
|
|
* __BTF_ID__set8__list:
|
|
|
|
* .zero 8
|
|
|
|
* list:
|
|
|
|
* __BTF_ID__type1__name1__3:
|
|
|
|
* .zero 4
|
|
|
|
* .word (1 << 0) | (1 << 2)
|
|
|
|
* __BTF_ID__type2__name2__5:
|
|
|
|
* .zero 4
|
|
|
|
* .word (1 << 3) | (1 << 1) | (1 << 2)
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
#define __BTF_SET8_START(name, scope) \
|
|
|
|
asm( \
|
|
|
|
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
|
|
|
|
"." #scope " __BTF_ID__set8__" #name "; \n" \
|
|
|
|
"__BTF_ID__set8__" #name ":; \n" \
|
|
|
|
".zero 8 \n" \
|
|
|
|
".popsection; \n");
|
|
|
|
|
|
|
|
#define BTF_SET8_START(name) \
|
|
|
|
__BTF_ID_LIST(name, local) \
|
|
|
|
__BTF_SET8_START(name, local)
|
|
|
|
|
|
|
|
#define BTF_SET8_END(name) \
|
|
|
|
asm( \
|
|
|
|
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
|
|
|
|
".size __BTF_ID__set8__" #name ", .-" #name " \n" \
|
|
|
|
".popsection; \n"); \
|
|
|
|
extern struct btf_id_set8 name;
|
|
|
|
|
2020-07-14 18:25:33 +08:00
|
|
|
#else
|
|
|
|
|
2022-11-23 23:57:59 +08:00
|
|
|
#define BTF_ID_LIST(name) static u32 __maybe_unused name[16];
|
2020-07-14 18:25:33 +08:00
|
|
|
#define BTF_ID(prefix, name)
|
2022-07-22 19:36:05 +08:00
|
|
|
#define BTF_ID_FLAGS(prefix, name, ...)
|
2020-07-14 18:25:33 +08:00
|
|
|
#define BTF_ID_UNUSED
|
bpf: Populate kfunc BTF ID sets in struct btf
This patch prepares the kernel to support putting all kinds of kfunc BTF
ID sets in the struct btf itself. The various kernel subsystems will
make register_btf_kfunc_id_set call in the initcalls (for built-in code
and modules).
The 'hook' is one of the many program types, e.g. XDP and TC/SCHED_CLS,
STRUCT_OPS, and 'types' are check (allowed or not), acquire, release,
and ret_null (with PTR_TO_BTF_ID_OR_NULL return type).
A maximum of BTF_KFUNC_SET_MAX_CNT (32) kfunc BTF IDs are permitted in a
set of certain hook and type for vmlinux sets, since they are allocated
on demand, and otherwise set as NULL. Module sets can only be registered
once per hook and type, hence they are directly assigned.
A new btf_kfunc_id_set_contains function is exposed for use in verifier,
this new method is faster than the existing list searching method, and
is also automatic. It also lets other code not care whether the set is
unallocated or not.
Note that module code can only do single register_btf_kfunc_id_set call
per hook. This is why sorting is only done for in-kernel vmlinux sets,
because there might be multiple sets for the same hook and type that
must be concatenated, hence sorting them is required to ensure bsearch
in btf_id_set_contains continues to work correctly.
Next commit will update the kernel users to make use of this
infrastructure.
Finally, add __maybe_unused annotation for BTF ID macros for the
!CONFIG_DEBUG_INFO_BTF case, so that they don't produce warnings during
build time.
The previous patch is also needed to provide synchronization against
initialization for module BTF's kfunc_set_tab introduced here, as
described below:
The kfunc_set_tab pointer in struct btf is write-once (if we consider
the registration phase (comprised of multiple register_btf_kfunc_id_set
calls) as a single operation). In this sense, once it has been fully
prepared, it isn't modified, only used for lookup (from the verifier
context).
For btf_vmlinux, it is initialized fully during the do_initcalls phase,
which happens fairly early in the boot process, before any processes are
present. This also eliminates the possibility of bpf_check being called
at that point, thus relieving us of ensuring any synchronization between
the registration and lookup function (btf_kfunc_id_set_contains).
However, the case for module BTF is a bit tricky. The BTF is parsed,
prepared, and published from the MODULE_STATE_COMING notifier callback.
After this, the module initcalls are invoked, where our registration
function will be called to populate the kfunc_set_tab for module BTF.
At this point, BTF may be available to userspace while its corresponding
module is still intializing. A BTF fd can then be passed to verifier
using bpf syscall (e.g. for kfunc call insn).
Hence, there is a race window where verifier may concurrently try to
lookup the kfunc_set_tab. To prevent this race, we must ensure the
operations are serialized, or waiting for the __init functions to
complete.
In the earlier registration API, this race was alleviated as verifier
bpf_check_mod_kfunc_call didn't find the kfunc BTF ID until it was added
by the registration function (called usually at the end of module __init
function after all module resources have been initialized). If the
verifier made the check_kfunc_call before kfunc BTF ID was added to the
list, it would fail verification (saying call isn't allowed). The
access to list was protected using a mutex.
Now, it would still fail verification, but for a different reason
(returning ENXIO due to the failed btf_try_get_module call in
add_kfunc_call), because if the __init call is in progress the module
will be in the middle of MODULE_STATE_COMING -> MODULE_STATE_LIVE
transition, and the BTF_MODULE_LIVE flag for btf_module instance will
not be set, so the btf_try_get_module call will fail.
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20220114163953.1455836-3-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2022-01-15 00:39:45 +08:00
|
|
|
#define BTF_ID_LIST_GLOBAL(name, n) u32 __maybe_unused name[n];
|
|
|
|
#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 __maybe_unused name[1];
|
|
|
|
#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 __maybe_unused name[1];
|
|
|
|
#define BTF_SET_START(name) static struct btf_id_set __maybe_unused name = { 0 };
|
|
|
|
#define BTF_SET_START_GLOBAL(name) static struct btf_id_set __maybe_unused name = { 0 };
|
2020-08-26 03:21:19 +08:00
|
|
|
#define BTF_SET_END(name)
|
2022-07-21 21:42:33 +08:00
|
|
|
#define BTF_SET8_START(name) static struct btf_id_set8 __maybe_unused name = { 0 };
|
2022-07-22 19:36:05 +08:00
|
|
|
#define BTF_SET8_END(name)
|
2020-07-14 18:25:33 +08:00
|
|
|
|
|
|
|
#endif /* CONFIG_DEBUG_INFO_BTF */
|
2020-07-12 05:53:23 +08:00
|
|
|
|
2020-07-21 00:34:02 +08:00
|
|
|
#ifdef CONFIG_NET
|
|
|
|
/* Define a list of socket types which can be the argument for
|
|
|
|
* skc_to_*_sock() helpers. All these sockets should have
|
|
|
|
* sock_common as the first argument in its memory layout.
|
|
|
|
*/
|
|
|
|
#define BTF_SOCK_TYPE_xxx \
|
|
|
|
BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock) \
|
|
|
|
BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock) \
|
|
|
|
BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock) \
|
|
|
|
BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock) \
|
|
|
|
BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock) \
|
|
|
|
BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock) \
|
|
|
|
BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common) \
|
|
|
|
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock) \
|
|
|
|
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock) \
|
|
|
|
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \
|
|
|
|
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \
|
|
|
|
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \
|
2021-08-14 09:57:15 +08:00
|
|
|
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \
|
2022-05-20 07:30:10 +08:00
|
|
|
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) \
|
2022-06-29 01:43:06 +08:00
|
|
|
BTF_SOCK_TYPE(BTF_SOCK_TYPE_MPTCP, mptcp_sock) \
|
|
|
|
BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCKET, socket)
|
2020-07-21 00:34:02 +08:00
|
|
|
|
|
|
|
enum {
|
|
|
|
#define BTF_SOCK_TYPE(name, str) name,
|
|
|
|
BTF_SOCK_TYPE_xxx
|
|
|
|
#undef BTF_SOCK_TYPE
|
|
|
|
MAX_BTF_SOCK_TYPE,
|
|
|
|
};
|
|
|
|
|
|
|
|
extern u32 btf_sock_ids[];
|
|
|
|
#endif
|
|
|
|
|
2021-11-12 23:02:43 +08:00
|
|
|
#define BTF_TRACING_TYPE_xxx \
|
|
|
|
BTF_TRACING_TYPE(BTF_TRACING_TYPE_TASK, task_struct) \
|
|
|
|
BTF_TRACING_TYPE(BTF_TRACING_TYPE_FILE, file) \
|
|
|
|
BTF_TRACING_TYPE(BTF_TRACING_TYPE_VMA, vm_area_struct)
|
|
|
|
|
|
|
|
enum {
|
|
|
|
#define BTF_TRACING_TYPE(name, type) name,
|
|
|
|
BTF_TRACING_TYPE_xxx
|
|
|
|
#undef BTF_TRACING_TYPE
|
|
|
|
MAX_BTF_TRACING_TYPE,
|
|
|
|
};
|
|
|
|
|
|
|
|
extern u32 btf_tracing_ids[];
|
2022-10-26 12:28:40 +08:00
|
|
|
extern u32 bpf_cgroup_btf_id[];
|
2022-11-30 13:21:47 +08:00
|
|
|
extern u32 bpf_local_storage_map_btf_id[];
|
2021-08-24 10:43:47 +08:00
|
|
|
|
2020-07-12 05:53:23 +08:00
|
|
|
#endif
|