Daniel Borkmann says: ==================== pull-request: bpf-next 2022-02-09 We've added 126 non-merge commits during the last 16 day(s) which contain a total of 201 files changed, 4049 insertions(+), 2215 deletions(-). The main changes are: 1) Add custom BPF allocator for JITs that pack multiple programs into a huge page to reduce iTLB pressure, from Song Liu. 2) Add __user tagging support in vmlinux BTF and utilize it from BPF verifier when generating loads, from Yonghong Song. 3) Add per-socket fast path check guarding from cgroup/BPF overhead when used by only some sockets, from Pavel Begunkov. 4) Continued libbpf deprecation work of APIs/features and removal of their usage from samples, selftests, libbpf & bpftool, from Andrii Nakryiko and various others. 5) Improve BPF instruction set documentation by adding byte swap instructions and cleaning up load/store section, from Christoph Hellwig. 6) Switch BPF preload infra to light skeleton and remove libbpf dependency from it, from Alexei Starovoitov. 7) Fix architecture-agnostic macros in libbpf for accessing syscall arguments from BPF progs for non-x86 architectures, from Ilya Leoshkevich. 8) Rework port members in struct bpf_sk_lookup and struct bpf_sock to be of 16-bit field with anonymous zero padding, from Jakub Sitnicki. 9) Add new bpf_copy_from_user_task() helper to read memory from a different task than current. Add ability to create sleepable BPF iterator progs, from Kenny Yu. 10) Implement XSK batching for ice's zero-copy driver used by AF_XDP and utilize TX batching API from XSK buffer pool, from Maciej Fijalkowski. 11) Generate temporary netns names for BPF selftests to avoid naming collisions, from Hangbin Liu. 12) Implement bpf_core_types_are_compat() with limited recursion for in-kernel usage, from Matteo Croce. 13) Simplify pahole version detection and finally enable CONFIG_DEBUG_INFO_DWARF5 to be selected with CONFIG_DEBUG_INFO_BTF, from Nathan Chancellor. 14) Misc minor fixes to libbpf and selftests from various folks. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (126 commits) selftests/bpf: Cover 4-byte load from remote_port in bpf_sk_lookup bpf: Make remote_port field in struct bpf_sk_lookup 16-bit wide libbpf: Fix compilation warning due to mismatched printf format selftests/bpf: Test BPF_KPROBE_SYSCALL macro libbpf: Add BPF_KPROBE_SYSCALL macro libbpf: Fix accessing the first syscall argument on s390 libbpf: Fix accessing the first syscall argument on arm64 libbpf: Allow overriding PT_REGS_PARM1{_CORE}_SYSCALL selftests/bpf: Skip test_bpf_syscall_macro's syscall_arg1 on arm64 and s390 libbpf: Fix accessing syscall arguments on riscv libbpf: Fix riscv register names libbpf: Fix accessing syscall arguments on powerpc selftests/bpf: Use PT_REGS_SYSCALL_REGS in bpf_syscall_macro libbpf: Add PT_REGS_SYSCALL_REGS macro selftests/bpf: Fix an endianness issue in bpf_syscall_macro test bpf: Fix bpf_prog_pack build HPAGE_PMD_SIZE bpf: Fix leftover header->pages in sparc and powerpc code. libbpf: Fix signedness bug in btf_dump_array_data() selftests/bpf: Do not export subtest as standalone test bpf, x86_64: Fail gracefully on bpf_jit_binary_pack_finalize failures ... ==================== Link: https://lore.kernel.org/r/20220209210050.8425-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
1127170d45
|
@ -503,6 +503,19 @@ valid index (starting from 0) pointing to a member or an argument.
|
|||
* ``info.vlen``: 0
|
||||
* ``type``: the type with ``btf_type_tag`` attribute
|
||||
|
||||
Currently, ``BTF_KIND_TYPE_TAG`` is only emitted for pointer types.
|
||||
It has the following btf type chain:
|
||||
::
|
||||
|
||||
ptr -> [type_tag]*
|
||||
-> [const | volatile | restrict | typedef]*
|
||||
-> base_type
|
||||
|
||||
Basically, a pointer type points to zero or more
|
||||
type_tag, then zero or more const/volatile/restrict/typedef
|
||||
and finally the base type. The base type is one of
|
||||
int, ptr, array, struct, union, enum, func_proto and float types.
|
||||
|
||||
3. BTF Kernel API
|
||||
=================
|
||||
|
||||
|
|
|
@ -22,7 +22,13 @@ necessary across calls.
|
|||
Instruction encoding
|
||||
====================
|
||||
|
||||
eBPF uses 64-bit instructions with the following encoding:
|
||||
eBPF has two instruction encodings:
|
||||
|
||||
* the basic instruction encoding, which uses 64 bits to encode an instruction
|
||||
* the wide instruction encoding, which appends a second 64-bit immediate value
|
||||
(imm64) after the basic instruction for a total of 128 bits.
|
||||
|
||||
The basic instruction encoding looks as follows:
|
||||
|
||||
============= ======= =============== ==================== ============
|
||||
32 bits (MSB) 16 bits 4 bits 4 bits 8 bits (LSB)
|
||||
|
@ -82,9 +88,9 @@ BPF_ALU uses 32-bit wide operands while BPF_ALU64 uses 64-bit wide operands for
|
|||
otherwise identical operations.
|
||||
The code field encodes the operation as below:
|
||||
|
||||
======== ===== ==========================
|
||||
======== ===== =================================================
|
||||
code value description
|
||||
======== ===== ==========================
|
||||
======== ===== =================================================
|
||||
BPF_ADD 0x00 dst += src
|
||||
BPF_SUB 0x10 dst -= src
|
||||
BPF_MUL 0x20 dst \*= src
|
||||
|
@ -98,8 +104,8 @@ The code field encodes the operation as below:
|
|||
BPF_XOR 0xa0 dst ^= src
|
||||
BPF_MOV 0xb0 dst = src
|
||||
BPF_ARSH 0xc0 sign extending shift right
|
||||
BPF_END 0xd0 endianness conversion
|
||||
======== ===== ==========================
|
||||
BPF_END 0xd0 byte swap operations (see separate section below)
|
||||
======== ===== =================================================
|
||||
|
||||
BPF_ADD | BPF_X | BPF_ALU means::
|
||||
|
||||
|
@ -118,6 +124,42 @@ BPF_XOR | BPF_K | BPF_ALU64 means::
|
|||
src_reg = src_reg ^ imm32
|
||||
|
||||
|
||||
Byte swap instructions
|
||||
----------------------
|
||||
|
||||
The byte swap instructions use an instruction class of ``BFP_ALU`` and a 4-bit
|
||||
code field of ``BPF_END``.
|
||||
|
||||
The byte swap instructions instructions operate on the destination register
|
||||
only and do not use a separate source register or immediate value.
|
||||
|
||||
The 1-bit source operand field in the opcode is used to to select what byte
|
||||
order the operation convert from or to:
|
||||
|
||||
========= ===== =================================================
|
||||
source value description
|
||||
========= ===== =================================================
|
||||
BPF_TO_LE 0x00 convert between host byte order and little endian
|
||||
BPF_TO_BE 0x08 convert between host byte order and big endian
|
||||
========= ===== =================================================
|
||||
|
||||
The imm field encodes the width of the swap operations. The following widths
|
||||
are supported: 16, 32 and 64.
|
||||
|
||||
Examples:
|
||||
|
||||
``BPF_ALU | BPF_TO_LE | BPF_END`` with imm = 16 means::
|
||||
|
||||
dst_reg = htole16(dst_reg)
|
||||
|
||||
``BPF_ALU | BPF_TO_BE | BPF_END`` with imm = 64 means::
|
||||
|
||||
dst_reg = htobe64(dst_reg)
|
||||
|
||||
``BPF_FROM_LE`` and ``BPF_FROM_BE`` exist as aliases for ``BPF_TO_LE`` and
|
||||
``BPF_TO_LE`` respetively.
|
||||
|
||||
|
||||
Jump instructions
|
||||
-----------------
|
||||
|
||||
|
@ -176,63 +218,96 @@ The mode modifier is one of:
|
|||
============= ===== ====================================
|
||||
mode modifier value description
|
||||
============= ===== ====================================
|
||||
BPF_IMM 0x00 used for 64-bit mov
|
||||
BPF_ABS 0x20 legacy BPF packet access
|
||||
BPF_IND 0x40 legacy BPF packet access
|
||||
BPF_MEM 0x60 all normal load and store operations
|
||||
BPF_IMM 0x00 64-bit immediate instructions
|
||||
BPF_ABS 0x20 legacy BPF packet access (absolute)
|
||||
BPF_IND 0x40 legacy BPF packet access (indirect)
|
||||
BPF_MEM 0x60 regular load and store operations
|
||||
BPF_ATOMIC 0xc0 atomic operations
|
||||
============= ===== ====================================
|
||||
|
||||
BPF_MEM | <size> | BPF_STX means::
|
||||
|
||||
Regular load and store operations
|
||||
---------------------------------
|
||||
|
||||
The ``BPF_MEM`` mode modifier is used to encode regular load and store
|
||||
instructions that transfer data between a register and memory.
|
||||
|
||||
``BPF_MEM | <size> | BPF_STX`` means::
|
||||
|
||||
*(size *) (dst_reg + off) = src_reg
|
||||
|
||||
BPF_MEM | <size> | BPF_ST means::
|
||||
``BPF_MEM | <size> | BPF_ST`` means::
|
||||
|
||||
*(size *) (dst_reg + off) = imm32
|
||||
|
||||
BPF_MEM | <size> | BPF_LDX means::
|
||||
``BPF_MEM | <size> | BPF_LDX`` means::
|
||||
|
||||
dst_reg = *(size *) (src_reg + off)
|
||||
|
||||
Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW.
|
||||
Where size is one of: ``BPF_B``, ``BPF_H``, ``BPF_W``, or ``BPF_DW``.
|
||||
|
||||
Atomic operations
|
||||
-----------------
|
||||
|
||||
eBPF includes atomic operations, which use the immediate field for extra
|
||||
encoding::
|
||||
Atomic operations are operations that operate on memory and can not be
|
||||
interrupted or corrupted by other access to the same memory region
|
||||
by other eBPF programs or means outside of this specification.
|
||||
|
||||
.imm = BPF_ADD, .code = BPF_ATOMIC | BPF_W | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg
|
||||
.imm = BPF_ADD, .code = BPF_ATOMIC | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg
|
||||
All atomic operations supported by eBPF are encoded as store operations
|
||||
that use the ``BPF_ATOMIC`` mode modifier as follows:
|
||||
|
||||
The basic atomic operations supported are::
|
||||
* ``BPF_ATOMIC | BPF_W | BPF_STX`` for 32-bit operations
|
||||
* ``BPF_ATOMIC | BPF_DW | BPF_STX`` for 64-bit operations
|
||||
* 8-bit and 16-bit wide atomic operations are not supported.
|
||||
|
||||
BPF_ADD
|
||||
BPF_AND
|
||||
BPF_OR
|
||||
BPF_XOR
|
||||
The imm field is used to encode the actual atomic operation.
|
||||
Simple atomic operation use a subset of the values defined to encode
|
||||
arithmetic operations in the imm field to encode the atomic operation:
|
||||
|
||||
Each having equivalent semantics with the ``BPF_ADD`` example, that is: the
|
||||
memory location addresed by ``dst_reg + off`` is atomically modified, with
|
||||
``src_reg`` as the other operand. If the ``BPF_FETCH`` flag is set in the
|
||||
immediate, then these operations also overwrite ``src_reg`` with the
|
||||
value that was in memory before it was modified.
|
||||
======== ===== ===========
|
||||
imm value description
|
||||
======== ===== ===========
|
||||
BPF_ADD 0x00 atomic add
|
||||
BPF_OR 0x40 atomic or
|
||||
BPF_AND 0x50 atomic and
|
||||
BPF_XOR 0xa0 atomic xor
|
||||
======== ===== ===========
|
||||
|
||||
The more special operations are::
|
||||
|
||||
BPF_XCHG
|
||||
``BPF_ATOMIC | BPF_W | BPF_STX`` with imm = BPF_ADD means::
|
||||
|
||||
This atomically exchanges ``src_reg`` with the value addressed by ``dst_reg +
|
||||
off``. ::
|
||||
*(u32 *)(dst_reg + off16) += src_reg
|
||||
|
||||
BPF_CMPXCHG
|
||||
``BPF_ATOMIC | BPF_DW | BPF_STX`` with imm = BPF ADD means::
|
||||
|
||||
This atomically compares the value addressed by ``dst_reg + off`` with
|
||||
``R0``. If they match it is replaced with ``src_reg``. In either case, the
|
||||
value that was there before is zero-extended and loaded back to ``R0``.
|
||||
*(u64 *)(dst_reg + off16) += src_reg
|
||||
|
||||
Note that 1 and 2 byte atomic operations are not supported.
|
||||
``BPF_XADD`` is a deprecated name for ``BPF_ATOMIC | BPF_ADD``.
|
||||
|
||||
In addition to the simple atomic operations, there also is a modifier and
|
||||
two complex atomic operations:
|
||||
|
||||
=========== ================ ===========================
|
||||
imm value description
|
||||
=========== ================ ===========================
|
||||
BPF_FETCH 0x01 modifier: return old value
|
||||
BPF_XCHG 0xe0 | BPF_FETCH atomic exchange
|
||||
BPF_CMPXCHG 0xf0 | BPF_FETCH atomic compare and exchange
|
||||
=========== ================ ===========================
|
||||
|
||||
The ``BPF_FETCH`` modifier is optional for simple atomic operations, and
|
||||
always set for the complex atomic operations. If the ``BPF_FETCH`` flag
|
||||
is set, then the operation also overwrites ``src_reg`` with the value that
|
||||
was in memory before it was modified.
|
||||
|
||||
The ``BPF_XCHG`` operation atomically exchanges ``src_reg`` with the value
|
||||
addressed by ``dst_reg + off``.
|
||||
|
||||
The ``BPF_CMPXCHG`` operation atomically compares the value addressed by
|
||||
``dst_reg + off`` with ``R0``. If they match, the value addressed by
|
||||
``dst_reg + off`` is replaced with ``src_reg``. In either case, the
|
||||
value that was at ``dst_reg + off`` before the operation is zero-extended
|
||||
and loaded back to ``R0``.
|
||||
|
||||
Clang can generate atomic instructions by default when ``-mcpu=v3`` is
|
||||
enabled. If a lower version for ``-mcpu`` is set, the only atomic instruction
|
||||
|
@ -240,40 +315,52 @@ Clang can generate is ``BPF_ADD`` *without* ``BPF_FETCH``. If you need to enable
|
|||
the atomics features, while keeping a lower ``-mcpu`` version, you can use
|
||||
``-Xclang -target-feature -Xclang +alu32``.
|
||||
|
||||
You may encounter ``BPF_XADD`` - this is a legacy name for ``BPF_ATOMIC``,
|
||||
referring to the exclusive-add operation encoded when the immediate field is
|
||||
zero.
|
||||
64-bit immediate instructions
|
||||
-----------------------------
|
||||
|
||||
16-byte instructions
|
||||
--------------------
|
||||
Instructions with the ``BPF_IMM`` mode modifier use the wide instruction
|
||||
encoding for an extra imm64 value.
|
||||
|
||||
eBPF has one 16-byte instruction: ``BPF_LD | BPF_DW | BPF_IMM`` which consists
|
||||
of two consecutive ``struct bpf_insn`` 8-byte blocks and interpreted as single
|
||||
instruction that loads 64-bit immediate value into a dst_reg.
|
||||
There is currently only one such instruction.
|
||||
|
||||
Packet access instructions
|
||||
--------------------------
|
||||
``BPF_LD | BPF_DW | BPF_IMM`` means::
|
||||
|
||||
eBPF has two non-generic instructions: (BPF_ABS | <size> | BPF_LD) and
|
||||
(BPF_IND | <size> | BPF_LD) which are used to access packet data.
|
||||
dst_reg = imm64
|
||||
|
||||
They had to be carried over from classic BPF to have strong performance of
|
||||
socket filters running in eBPF interpreter. These instructions can only
|
||||
be used when interpreter context is a pointer to ``struct sk_buff`` and
|
||||
have seven implicit operands. Register R6 is an implicit input that must
|
||||
contain pointer to sk_buff. Register R0 is an implicit output which contains
|
||||
the data fetched from the packet. Registers R1-R5 are scratch registers
|
||||
and must not be used to store the data across BPF_ABS | BPF_LD or
|
||||
BPF_IND | BPF_LD instructions.
|
||||
|
||||
These instructions have implicit program exit condition as well. When
|
||||
eBPF program is trying to access the data beyond the packet boundary,
|
||||
the interpreter will abort the execution of the program. JIT compilers
|
||||
therefore must preserve this property. src_reg and imm32 fields are
|
||||
explicit inputs to these instructions.
|
||||
Legacy BPF Packet access instructions
|
||||
-------------------------------------
|
||||
|
||||
For example, BPF_IND | BPF_W | BPF_LD means::
|
||||
eBPF has special instructions for access to packet data that have been
|
||||
carried over from classic BPF to retain the performance of legacy socket
|
||||
filters running in the eBPF interpreter.
|
||||
|
||||
The instructions come in two forms: ``BPF_ABS | <size> | BPF_LD`` and
|
||||
``BPF_IND | <size> | BPF_LD``.
|
||||
|
||||
These instructions are used to access packet data and can only be used when
|
||||
the program context is a pointer to networking packet. ``BPF_ABS``
|
||||
accesses packet data at an absolute offset specified by the immediate data
|
||||
and ``BPF_IND`` access packet data at an offset that includes the value of
|
||||
a register in addition to the immediate data.
|
||||
|
||||
These instructions have seven implicit operands:
|
||||
|
||||
* Register R6 is an implicit input that must contain pointer to a
|
||||
struct sk_buff.
|
||||
* Register R0 is an implicit output which contains the data fetched from
|
||||
the packet.
|
||||
* Registers R1-R5 are scratch registers that are clobbered after a call to
|
||||
``BPF_ABS | BPF_LD`` or ``BPF_IND`` | BPF_LD instructions.
|
||||
|
||||
These instructions have an implicit program exit condition as well. When an
|
||||
eBPF program is trying to access the data beyond the packet boundary, the
|
||||
program execution will be aborted.
|
||||
|
||||
``BPF_ABS | BPF_W | BPF_LD`` means::
|
||||
|
||||
R0 = ntohl(*(u32 *) (((struct sk_buff *) R6)->data + imm32))
|
||||
|
||||
``BPF_IND | BPF_W | BPF_LD`` means::
|
||||
|
||||
R0 = ntohl(*(u32 *) (((struct sk_buff *) R6)->data + src_reg + imm32))
|
||||
|
||||
and R1 - R5 are clobbered.
|
||||
|
|
|
@ -3523,6 +3523,8 @@ F: net/sched/act_bpf.c
|
|||
F: net/sched/cls_bpf.c
|
||||
F: samples/bpf/
|
||||
F: scripts/bpf_doc.py
|
||||
F: scripts/pahole-flags.sh
|
||||
F: scripts/pahole-version.sh
|
||||
F: tools/bpf/
|
||||
F: tools/lib/bpf/
|
||||
F: tools/testing/selftests/bpf/
|
||||
|
|
|
@ -1143,6 +1143,11 @@ out:
|
|||
return prog;
|
||||
}
|
||||
|
||||
bool bpf_jit_supports_kfunc_call(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
u64 bpf_jit_alloc_exec_limit(void)
|
||||
{
|
||||
return VMALLOC_END - VMALLOC_START;
|
||||
|
|
|
@ -264,7 +264,7 @@ skip_codegen_passes:
|
|||
fp->jited = 1;
|
||||
fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
|
||||
|
||||
bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
|
||||
bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + bpf_hdr->size);
|
||||
if (!fp->is_func || extra_pass) {
|
||||
bpf_jit_binary_lock_ro(bpf_hdr);
|
||||
bpf_prog_fill_jited_linfo(fp, addrs);
|
||||
|
|
|
@ -1599,7 +1599,7 @@ skip_init_ctx:
|
|||
if (bpf_jit_enable > 1)
|
||||
bpf_jit_dump(prog->len, image_size, pass, ctx.image);
|
||||
|
||||
bpf_flush_icache(header, (u8 *)header + (header->pages * PAGE_SIZE));
|
||||
bpf_flush_icache(header, (u8 *)header + header->size);
|
||||
|
||||
if (!prog->is_func || extra_pass) {
|
||||
bpf_jit_binary_lock_ro(header);
|
||||
|
|
|
@ -158,6 +158,7 @@ config X86
|
|||
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
|
||||
select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
|
||||
select HAVE_ARCH_JUMP_LABEL
|
||||
select HAVE_ARCH_JUMP_LABEL_RELATIVE
|
||||
select HAVE_ARCH_KASAN if X86_64
|
||||
|
|
|
@ -44,6 +44,7 @@ extern void text_poke_early(void *addr, const void *opcode, size_t len);
|
|||
extern void *text_poke(void *addr, const void *opcode, size_t len);
|
||||
extern void text_poke_sync(void);
|
||||
extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
|
||||
extern void *text_poke_copy(void *addr, const void *opcode, size_t len);
|
||||
extern int poke_int3_handler(struct pt_regs *regs);
|
||||
extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate);
|
||||
|
||||
|
|
|
@ -1102,6 +1102,40 @@ void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
|
|||
return __text_poke(addr, opcode, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* text_poke_copy - Copy instructions into (an unused part of) RX memory
|
||||
* @addr: address to modify
|
||||
* @opcode: source of the copy
|
||||
* @len: length to copy, could be more than 2x PAGE_SIZE
|
||||
*
|
||||
* Not safe against concurrent execution; useful for JITs to dump
|
||||
* new code blocks into unused regions of RX memory. Can be used in
|
||||
* conjunction with synchronize_rcu_tasks() to wait for existing
|
||||
* execution to quiesce after having made sure no existing functions
|
||||
* pointers are live.
|
||||
*/
|
||||
void *text_poke_copy(void *addr, const void *opcode, size_t len)
|
||||
{
|
||||
unsigned long start = (unsigned long)addr;
|
||||
size_t patched = 0;
|
||||
|
||||
if (WARN_ON_ONCE(core_kernel_text(start)))
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&text_mutex);
|
||||
while (patched < len) {
|
||||
unsigned long ptr = start + patched;
|
||||
size_t s;
|
||||
|
||||
s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
|
||||
|
||||
__text_poke((void *)ptr, opcode + patched, s);
|
||||
patched += s;
|
||||
}
|
||||
mutex_unlock(&text_mutex);
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void do_sync_core(void *info)
|
||||
{
|
||||
sync_core();
|
||||
|
|
|
@ -330,8 +330,7 @@ static int emit_jump(u8 **pprog, void *func, void *ip)
|
|||
}
|
||||
|
||||
static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
||||
void *old_addr, void *new_addr,
|
||||
const bool text_live)
|
||||
void *old_addr, void *new_addr)
|
||||
{
|
||||
const u8 *nop_insn = x86_nops[5];
|
||||
u8 old_insn[X86_PATCH_SIZE];
|
||||
|
@ -365,10 +364,7 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
|||
goto out;
|
||||
ret = 1;
|
||||
if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
|
||||
if (text_live)
|
||||
text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
|
||||
else
|
||||
memcpy(ip, new_insn, X86_PATCH_SIZE);
|
||||
text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
|
||||
ret = 0;
|
||||
}
|
||||
out:
|
||||
|
@ -384,7 +380,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
|||
/* BPF poking in modules is not supported */
|
||||
return -EINVAL;
|
||||
|
||||
return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
|
||||
return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
|
||||
}
|
||||
|
||||
#define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8)
|
||||
|
@ -558,24 +554,15 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
|
|||
mutex_lock(&array->aux->poke_mutex);
|
||||
target = array->ptrs[poke->tail_call.key];
|
||||
if (target) {
|
||||
/* Plain memcpy is used when image is not live yet
|
||||
* and still not locked as read-only. Once poke
|
||||
* location is active (poke->tailcall_target_stable),
|
||||
* any parallel bpf_arch_text_poke() might occur
|
||||
* still on the read-write image until we finally
|
||||
* locked it as read-only. Both modifications on
|
||||
* the given image are under text_mutex to avoid
|
||||
* interference.
|
||||
*/
|
||||
ret = __bpf_arch_text_poke(poke->tailcall_target,
|
||||
BPF_MOD_JUMP, NULL,
|
||||
(u8 *)target->bpf_func +
|
||||
poke->adj_off, false);
|
||||
poke->adj_off);
|
||||
BUG_ON(ret < 0);
|
||||
ret = __bpf_arch_text_poke(poke->tailcall_bypass,
|
||||
BPF_MOD_JUMP,
|
||||
(u8 *)poke->tailcall_target +
|
||||
X86_PATCH_SIZE, NULL, false);
|
||||
X86_PATCH_SIZE, NULL);
|
||||
BUG_ON(ret < 0);
|
||||
}
|
||||
WRITE_ONCE(poke->tailcall_target_stable, true);
|
||||
|
@ -787,7 +774,6 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
|
|||
/* emit opcode */
|
||||
switch (atomic_op) {
|
||||
case BPF_ADD:
|
||||
case BPF_SUB:
|
||||
case BPF_AND:
|
||||
case BPF_OR:
|
||||
case BPF_XOR:
|
||||
|
@ -867,7 +853,7 @@ static void emit_nops(u8 **pprog, int len)
|
|||
|
||||
#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
|
||||
|
||||
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
|
||||
int oldproglen, struct jit_context *ctx, bool jmp_padding)
|
||||
{
|
||||
bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
|
||||
|
@ -894,8 +880,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
|||
push_callee_regs(&prog, callee_regs_used);
|
||||
|
||||
ilen = prog - temp;
|
||||
if (image)
|
||||
memcpy(image + proglen, temp, ilen);
|
||||
if (rw_image)
|
||||
memcpy(rw_image + proglen, temp, ilen);
|
||||
proglen += ilen;
|
||||
addrs[0] = proglen;
|
||||
prog = temp;
|
||||
|
@ -1324,6 +1310,9 @@ st: if (is_imm8(insn->off))
|
|||
pr_err("extable->insn doesn't fit into 32-bit\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
/* switch ex to rw buffer for writes */
|
||||
ex = (void *)rw_image + ((void *)ex - (void *)image);
|
||||
|
||||
ex->insn = delta;
|
||||
|
||||
ex->data = EX_TYPE_BPF;
|
||||
|
@ -1706,7 +1695,7 @@ emit_jmp:
|
|||
pr_err("bpf_jit: fatal error\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
memcpy(image + proglen, temp, ilen);
|
||||
memcpy(rw_image + proglen, temp, ilen);
|
||||
}
|
||||
proglen += ilen;
|
||||
addrs[i] = proglen;
|
||||
|
@ -2247,6 +2236,7 @@ int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
|
|||
}
|
||||
|
||||
struct x64_jit_data {
|
||||
struct bpf_binary_header *rw_header;
|
||||
struct bpf_binary_header *header;
|
||||
int *addrs;
|
||||
u8 *image;
|
||||
|
@ -2259,6 +2249,7 @@ struct x64_jit_data {
|
|||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_binary_header *rw_header = NULL;
|
||||
struct bpf_binary_header *header = NULL;
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
struct x64_jit_data *jit_data;
|
||||
|
@ -2267,6 +2258,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
bool tmp_blinded = false;
|
||||
bool extra_pass = false;
|
||||
bool padding = false;
|
||||
u8 *rw_image = NULL;
|
||||
u8 *image = NULL;
|
||||
int *addrs;
|
||||
int pass;
|
||||
|
@ -2302,6 +2294,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
oldproglen = jit_data->proglen;
|
||||
image = jit_data->image;
|
||||
header = jit_data->header;
|
||||
rw_header = jit_data->rw_header;
|
||||
rw_image = (void *)rw_header + ((void *)image - (void *)header);
|
||||
extra_pass = true;
|
||||
padding = true;
|
||||
goto skip_init_addrs;
|
||||
|
@ -2332,12 +2326,12 @@ skip_init_addrs:
|
|||
for (pass = 0; pass < MAX_PASSES || image; pass++) {
|
||||
if (!padding && pass >= PADDING_PASSES)
|
||||
padding = true;
|
||||
proglen = do_jit(prog, addrs, image, oldproglen, &ctx, padding);
|
||||
proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
|
||||
if (proglen <= 0) {
|
||||
out_image:
|
||||
image = NULL;
|
||||
if (header)
|
||||
bpf_jit_binary_free(header);
|
||||
bpf_jit_binary_pack_free(header, rw_header);
|
||||
prog = orig_prog;
|
||||
goto out_addrs;
|
||||
}
|
||||
|
@ -2361,8 +2355,9 @@ out_image:
|
|||
sizeof(struct exception_table_entry);
|
||||
|
||||
/* allocate module memory for x86 insns and extable */
|
||||
header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
|
||||
&image, align, jit_fill_hole);
|
||||
header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
|
||||
&image, align, &rw_header, &rw_image,
|
||||
jit_fill_hole);
|
||||
if (!header) {
|
||||
prog = orig_prog;
|
||||
goto out_addrs;
|
||||
|
@ -2378,14 +2373,26 @@ out_image:
|
|||
|
||||
if (image) {
|
||||
if (!prog->is_func || extra_pass) {
|
||||
/*
|
||||
* bpf_jit_binary_pack_finalize fails in two scenarios:
|
||||
* 1) header is not pointing to proper module memory;
|
||||
* 2) the arch doesn't support bpf_arch_text_copy().
|
||||
*
|
||||
* Both cases are serious bugs and justify WARN_ON.
|
||||
*/
|
||||
if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) {
|
||||
prog = orig_prog;
|
||||
goto out_addrs;
|
||||
}
|
||||
|
||||
bpf_tail_call_direct_fixup(prog);
|
||||
bpf_jit_binary_lock_ro(header);
|
||||
} else {
|
||||
jit_data->addrs = addrs;
|
||||
jit_data->ctx = ctx;
|
||||
jit_data->proglen = proglen;
|
||||
jit_data->image = image;
|
||||
jit_data->header = header;
|
||||
jit_data->rw_header = rw_header;
|
||||
}
|
||||
prog->bpf_func = (void *)image;
|
||||
prog->jited = 1;
|
||||
|
@ -2413,3 +2420,10 @@ bool bpf_jit_supports_kfunc_call(void)
|
|||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
void *bpf_arch_text_copy(void *dst, void *src, size_t len)
|
||||
{
|
||||
if (text_poke_copy(dst, src, len) == NULL)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return dst;
|
||||
}
|
||||
|
|
|
@ -830,8 +830,6 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
|
|||
i40e_clean_tx_ring(tx_ring);
|
||||
kfree(tx_ring->tx_bi);
|
||||
tx_ring->tx_bi = NULL;
|
||||
kfree(tx_ring->xsk_descs);
|
||||
tx_ring->xsk_descs = NULL;
|
||||
|
||||
if (tx_ring->desc) {
|
||||
dma_free_coherent(tx_ring->dev, tx_ring->size,
|
||||
|
@ -1431,13 +1429,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
|
|||
if (!tx_ring->tx_bi)
|
||||
goto err;
|
||||
|
||||
if (ring_is_xdp(tx_ring)) {
|
||||
tx_ring->xsk_descs = kcalloc(I40E_MAX_NUM_DESCRIPTORS, sizeof(*tx_ring->xsk_descs),
|
||||
GFP_KERNEL);
|
||||
if (!tx_ring->xsk_descs)
|
||||
goto err;
|
||||
}
|
||||
|
||||
u64_stats_init(&tx_ring->syncp);
|
||||
|
||||
/* round up to nearest 4K */
|
||||
|
@ -1461,8 +1452,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
|
|||
return 0;
|
||||
|
||||
err:
|
||||
kfree(tx_ring->xsk_descs);
|
||||
tx_ring->xsk_descs = NULL;
|
||||
kfree(tx_ring->tx_bi);
|
||||
tx_ring->tx_bi = NULL;
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -392,7 +392,6 @@ struct i40e_ring {
|
|||
u16 rx_offset;
|
||||
struct xdp_rxq_info xdp_rxq;
|
||||
struct xsk_buff_pool *xsk_pool;
|
||||
struct xdp_desc *xsk_descs; /* For storing descriptors in the AF_XDP ZC path */
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
|
||||
|
|
|
@ -471,11 +471,11 @@ static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
|
|||
**/
|
||||
static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
|
||||
{
|
||||
struct xdp_desc *descs = xdp_ring->xsk_descs;
|
||||
struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
|
||||
u32 nb_pkts, nb_processed = 0;
|
||||
unsigned int total_bytes = 0;
|
||||
|
||||
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
|
||||
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
|
||||
if (!nb_pkts)
|
||||
return true;
|
||||
|
||||
|
|
|
@ -2803,6 +2803,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
|
|||
/* clone ring and setup updated count */
|
||||
xdp_rings[i] = *vsi->xdp_rings[i];
|
||||
xdp_rings[i].count = new_tx_cnt;
|
||||
xdp_rings[i].next_dd = ICE_RING_QUARTER(&xdp_rings[i]) - 1;
|
||||
xdp_rings[i].next_rs = ICE_RING_QUARTER(&xdp_rings[i]) - 1;
|
||||
xdp_rings[i].desc = NULL;
|
||||
xdp_rings[i].tx_buf = NULL;
|
||||
err = ice_setup_tx_ring(&xdp_rings[i]);
|
||||
|
|
|
@ -2495,10 +2495,10 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
|
|||
xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
|
||||
xdp_ring->vsi = vsi;
|
||||
xdp_ring->netdev = NULL;
|
||||
xdp_ring->next_dd = ICE_TX_THRESH - 1;
|
||||
xdp_ring->next_rs = ICE_TX_THRESH - 1;
|
||||
xdp_ring->dev = dev;
|
||||
xdp_ring->count = vsi->num_tx_desc;
|
||||
xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1;
|
||||
xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1;
|
||||
WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
|
||||
if (ice_setup_tx_ring(xdp_ring))
|
||||
goto free_xdp_rings;
|
||||
|
|
|
@ -173,6 +173,8 @@ tx_skip_free:
|
|||
|
||||
tx_ring->next_to_use = 0;
|
||||
tx_ring->next_to_clean = 0;
|
||||
tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1;
|
||||
tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1;
|
||||
|
||||
if (!tx_ring->netdev)
|
||||
return;
|
||||
|
@ -1467,7 +1469,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
|
|||
bool wd;
|
||||
|
||||
if (tx_ring->xsk_pool)
|
||||
wd = ice_clean_tx_irq_zc(tx_ring, budget);
|
||||
wd = ice_xmit_zc(tx_ring, ICE_DESC_UNUSED(tx_ring), budget);
|
||||
else if (ice_ring_is_xdp(tx_ring))
|
||||
wd = true;
|
||||
else
|
||||
|
@ -1520,7 +1522,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
|
|||
/* Exit the polling mode, but don't re-enable interrupts if stack might
|
||||
* poll us due to busy-polling
|
||||
*/
|
||||
if (likely(napi_complete_done(napi, work_done))) {
|
||||
if (napi_complete_done(napi, work_done)) {
|
||||
ice_net_dim(q_vector);
|
||||
ice_enable_interrupt(q_vector);
|
||||
} else {
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
#define ICE_MAX_CHAINED_RX_BUFS 5
|
||||
#define ICE_MAX_BUF_TXD 8
|
||||
#define ICE_MIN_TX_LEN 17
|
||||
#define ICE_TX_THRESH 32
|
||||
|
||||
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
|
||||
* In order to align with the read requests we will align the value to
|
||||
|
@ -111,6 +110,8 @@ static inline int ice_skb_pad(void)
|
|||
(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
|
||||
(R)->next_to_clean - (R)->next_to_use - 1)
|
||||
|
||||
#define ICE_RING_QUARTER(R) ((R)->count >> 2)
|
||||
|
||||
#define ICE_TX_FLAGS_TSO BIT(0)
|
||||
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
|
||||
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
|
||||
|
@ -321,17 +322,18 @@ struct ice_tx_ring {
|
|||
u16 count; /* Number of descriptors */
|
||||
u16 q_index; /* Queue number of ring */
|
||||
/* stats structs */
|
||||
struct ice_txq_stats tx_stats;
|
||||
/* CL3 - 3rd cacheline starts here */
|
||||
struct ice_q_stats stats;
|
||||
struct u64_stats_sync syncp;
|
||||
struct ice_txq_stats tx_stats;
|
||||
|
||||
/* CL3 - 3rd cacheline starts here */
|
||||
struct rcu_head rcu; /* to avoid race on free */
|
||||
DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
|
||||
struct ice_channel *ch;
|
||||
struct ice_ptp_tx *tx_tstamps;
|
||||
spinlock_t tx_lock;
|
||||
u32 txq_teid; /* Added Tx queue TEID */
|
||||
/* CL4 - 4th cacheline starts here */
|
||||
u16 xdp_tx_active;
|
||||
#define ICE_TX_FLAGS_RING_XDP BIT(0)
|
||||
u8 flags;
|
||||
u8 dcb_tc; /* Traffic class of ring */
|
||||
|
|
|
@ -222,6 +222,7 @@ ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
|
|||
static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
|
||||
{
|
||||
unsigned int total_bytes = 0, total_pkts = 0;
|
||||
u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
|
||||
u16 ntc = xdp_ring->next_to_clean;
|
||||
struct ice_tx_desc *next_dd_desc;
|
||||
u16 next_dd = xdp_ring->next_dd;
|
||||
|
@ -233,7 +234,7 @@ static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
|
|||
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
|
||||
return;
|
||||
|
||||
for (i = 0; i < ICE_TX_THRESH; i++) {
|
||||
for (i = 0; i < tx_thresh; i++) {
|
||||
tx_buf = &xdp_ring->tx_buf[ntc];
|
||||
|
||||
total_bytes += tx_buf->bytecount;
|
||||
|
@ -254,9 +255,9 @@ static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
|
|||
}
|
||||
|
||||
next_dd_desc->cmd_type_offset_bsz = 0;
|
||||
xdp_ring->next_dd = xdp_ring->next_dd + ICE_TX_THRESH;
|
||||
xdp_ring->next_dd = xdp_ring->next_dd + tx_thresh;
|
||||
if (xdp_ring->next_dd > xdp_ring->count)
|
||||
xdp_ring->next_dd = ICE_TX_THRESH - 1;
|
||||
xdp_ring->next_dd = tx_thresh - 1;
|
||||
xdp_ring->next_to_clean = ntc;
|
||||
ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
|
||||
}
|
||||
|
@ -269,12 +270,13 @@ static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
|
|||
*/
|
||||
int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
|
||||
{
|
||||
u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
|
||||
u16 i = xdp_ring->next_to_use;
|
||||
struct ice_tx_desc *tx_desc;
|
||||
struct ice_tx_buf *tx_buf;
|
||||
dma_addr_t dma;
|
||||
|
||||
if (ICE_DESC_UNUSED(xdp_ring) < ICE_TX_THRESH)
|
||||
if (ICE_DESC_UNUSED(xdp_ring) < tx_thresh)
|
||||
ice_clean_xdp_irq(xdp_ring);
|
||||
|
||||
if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
|
||||
|
@ -300,13 +302,14 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
|
|||
tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
|
||||
size, 0);
|
||||
|
||||
xdp_ring->xdp_tx_active++;
|
||||
i++;
|
||||
if (i == xdp_ring->count) {
|
||||
i = 0;
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
|
||||
tx_desc->cmd_type_offset_bsz |=
|
||||
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
|
||||
xdp_ring->next_rs = ICE_TX_THRESH - 1;
|
||||
xdp_ring->next_rs = tx_thresh - 1;
|
||||
}
|
||||
xdp_ring->next_to_use = i;
|
||||
|
||||
|
@ -314,7 +317,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
|
|||
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
|
||||
tx_desc->cmd_type_offset_bsz |=
|
||||
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
|
||||
xdp_ring->next_rs += ICE_TX_THRESH;
|
||||
xdp_ring->next_rs += tx_thresh;
|
||||
}
|
||||
|
||||
return ICE_XDP_TX;
|
||||
|
|
|
@ -327,6 +327,13 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
|
|||
bool if_running, pool_present = !!pool;
|
||||
int ret = 0, pool_failure = 0;
|
||||
|
||||
if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
|
||||
!is_power_of_2(vsi->tx_rings[qid]->count)) {
|
||||
netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
|
||||
pool_failure = -EINVAL;
|
||||
goto failure;
|
||||
}
|
||||
|
||||
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
|
||||
|
||||
if (if_running) {
|
||||
|
@ -349,6 +356,7 @@ xsk_pool_if_up:
|
|||
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
|
||||
}
|
||||
|
||||
failure:
|
||||
if (pool_failure) {
|
||||
netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
|
||||
pool_present ? "en" : "dis", pool_failure);
|
||||
|
@ -359,33 +367,28 @@ xsk_pool_if_up:
|
|||
}
|
||||
|
||||
/**
|
||||
* ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
|
||||
* @rx_ring: Rx ring
|
||||
* ice_fill_rx_descs - pick buffers from XSK buffer pool and use it
|
||||
* @pool: XSK Buffer pool to pull the buffers from
|
||||
* @xdp: SW ring of xdp_buff that will hold the buffers
|
||||
* @rx_desc: Pointer to Rx descriptors that will be filled
|
||||
* @count: The number of buffers to allocate
|
||||
*
|
||||
* This function allocates a number of Rx buffers from the fill ring
|
||||
* or the internal recycle mechanism and places them on the Rx ring.
|
||||
*
|
||||
* Returns true if all allocations were successful, false if any fail.
|
||||
* Note that ring wrap should be handled by caller of this function.
|
||||
*
|
||||
* Returns the amount of allocated Rx descriptors
|
||||
*/
|
||||
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
|
||||
static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
|
||||
union ice_32b_rx_flex_desc *rx_desc, u16 count)
|
||||
{
|
||||
union ice_32b_rx_flex_desc *rx_desc;
|
||||
u16 ntu = rx_ring->next_to_use;
|
||||
struct xdp_buff **xdp;
|
||||
u32 nb_buffs, i;
|
||||
dma_addr_t dma;
|
||||
u16 buffs;
|
||||
int i;
|
||||
|
||||
rx_desc = ICE_RX_DESC(rx_ring, ntu);
|
||||
xdp = ice_xdp_buf(rx_ring, ntu);
|
||||
|
||||
nb_buffs = min_t(u16, count, rx_ring->count - ntu);
|
||||
nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
|
||||
if (!nb_buffs)
|
||||
return false;
|
||||
|
||||
i = nb_buffs;
|
||||
while (i--) {
|
||||
buffs = xsk_buff_alloc_batch(pool, xdp, count);
|
||||
for (i = 0; i < buffs; i++) {
|
||||
dma = xsk_buff_xdp_get_dma(*xdp);
|
||||
rx_desc->read.pkt_addr = cpu_to_le64(dma);
|
||||
rx_desc->wb.status_error0 = 0;
|
||||
|
@ -394,13 +397,77 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
|
|||
xdp++;
|
||||
}
|
||||
|
||||
return buffs;
|
||||
}
|
||||
|
||||
/**
|
||||
* __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
|
||||
* @rx_ring: Rx ring
|
||||
* @count: The number of buffers to allocate
|
||||
*
|
||||
* Place the @count of descriptors onto Rx ring. Handle the ring wrap
|
||||
* for case where space from next_to_use up to the end of ring is less
|
||||
* than @count. Finally do a tail bump.
|
||||
*
|
||||
* Returns true if all allocations were successful, false if any fail.
|
||||
*/
|
||||
static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
|
||||
{
|
||||
union ice_32b_rx_flex_desc *rx_desc;
|
||||
u32 nb_buffs_extra = 0, nb_buffs;
|
||||
u16 ntu = rx_ring->next_to_use;
|
||||
u16 total_count = count;
|
||||
struct xdp_buff **xdp;
|
||||
|
||||
rx_desc = ICE_RX_DESC(rx_ring, ntu);
|
||||
xdp = ice_xdp_buf(rx_ring, ntu);
|
||||
|
||||
if (ntu + count >= rx_ring->count) {
|
||||
nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
|
||||
rx_desc,
|
||||
rx_ring->count - ntu);
|
||||
rx_desc = ICE_RX_DESC(rx_ring, 0);
|
||||
xdp = ice_xdp_buf(rx_ring, 0);
|
||||
ntu = 0;
|
||||
count -= nb_buffs_extra;
|
||||
ice_release_rx_desc(rx_ring, 0);
|
||||
}
|
||||
|
||||
nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
|
||||
|
||||
ntu += nb_buffs;
|
||||
if (ntu == rx_ring->count)
|
||||
ntu = 0;
|
||||
|
||||
ice_release_rx_desc(rx_ring, ntu);
|
||||
if (rx_ring->next_to_use != ntu)
|
||||
ice_release_rx_desc(rx_ring, ntu);
|
||||
|
||||
return count == nb_buffs;
|
||||
return total_count == (nb_buffs_extra + nb_buffs);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
|
||||
* @rx_ring: Rx ring
|
||||
* @count: The number of buffers to allocate
|
||||
*
|
||||
* Wrapper for internal allocation routine; figure out how many tail
|
||||
* bumps should take place based on the given threshold
|
||||
*
|
||||
* Returns true if all calls to internal alloc routine succeeded
|
||||
*/
|
||||
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
|
||||
{
|
||||
u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
|
||||
u16 batched, leftover, i, tail_bumps;
|
||||
|
||||
batched = ALIGN_DOWN(count, rx_thresh);
|
||||
tail_bumps = batched / rx_thresh;
|
||||
leftover = count & (rx_thresh - 1);
|
||||
|
||||
for (i = 0; i < tail_bumps; i++)
|
||||
if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
|
||||
return false;
|
||||
return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -615,58 +682,6 @@ construct_skb:
|
|||
return failure ? budget : (int)total_rx_packets;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
|
||||
* @xdp_ring: XDP Tx ring
|
||||
* @budget: max number of frames to xmit
|
||||
*
|
||||
* Returns true if cleanup/transmission is done.
|
||||
*/
|
||||
static bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, int budget)
|
||||
{
|
||||
struct ice_tx_desc *tx_desc = NULL;
|
||||
bool work_done = true;
|
||||
struct xdp_desc desc;
|
||||
dma_addr_t dma;
|
||||
|
||||
while (likely(budget-- > 0)) {
|
||||
struct ice_tx_buf *tx_buf;
|
||||
|
||||
if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
|
||||
xdp_ring->tx_stats.tx_busy++;
|
||||
work_done = false;
|
||||
break;
|
||||
}
|
||||
|
||||
tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
|
||||
|
||||
if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
|
||||
break;
|
||||
|
||||
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
|
||||
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
|
||||
desc.len);
|
||||
|
||||
tx_buf->bytecount = desc.len;
|
||||
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
|
||||
tx_desc->buf_addr = cpu_to_le64(dma);
|
||||
tx_desc->cmd_type_offset_bsz =
|
||||
ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
|
||||
|
||||
xdp_ring->next_to_use++;
|
||||
if (xdp_ring->next_to_use == xdp_ring->count)
|
||||
xdp_ring->next_to_use = 0;
|
||||
}
|
||||
|
||||
if (tx_desc) {
|
||||
ice_xdp_ring_update_tail(xdp_ring);
|
||||
xsk_tx_release(xdp_ring->xsk_pool);
|
||||
}
|
||||
|
||||
return budget > 0 && work_done;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
|
||||
* @xdp_ring: XDP Tx ring
|
||||
|
@ -676,74 +691,213 @@ static void
|
|||
ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
|
||||
{
|
||||
xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
|
||||
xdp_ring->xdp_tx_active--;
|
||||
dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
|
||||
dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
|
||||
dma_unmap_len_set(tx_buf, len, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
|
||||
* @xdp_ring: XDP Tx ring
|
||||
* @budget: NAPI budget
|
||||
* ice_clean_xdp_irq_zc - Reclaim resources after transmit completes on XDP ring
|
||||
* @xdp_ring: XDP ring to clean
|
||||
* @napi_budget: amount of descriptors that NAPI allows us to clean
|
||||
*
|
||||
* Returns true if cleanup/tranmission is done.
|
||||
* Returns count of cleaned descriptors
|
||||
*/
|
||||
bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget)
|
||||
static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget)
|
||||
{
|
||||
int total_packets = 0, total_bytes = 0;
|
||||
s16 ntc = xdp_ring->next_to_clean;
|
||||
struct ice_tx_desc *tx_desc;
|
||||
struct ice_tx_buf *tx_buf;
|
||||
u32 xsk_frames = 0;
|
||||
bool xmit_done;
|
||||
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, ntc);
|
||||
tx_buf = &xdp_ring->tx_buf[ntc];
|
||||
ntc -= xdp_ring->count;
|
||||
u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
|
||||
int budget = napi_budget / tx_thresh;
|
||||
u16 next_dd = xdp_ring->next_dd;
|
||||
u16 ntc, cleared_dds = 0;
|
||||
|
||||
do {
|
||||
if (!(tx_desc->cmd_type_offset_bsz &
|
||||
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
|
||||
struct ice_tx_desc *next_dd_desc;
|
||||
u16 desc_cnt = xdp_ring->count;
|
||||
struct ice_tx_buf *tx_buf;
|
||||
u32 xsk_frames;
|
||||
u16 i;
|
||||
|
||||
next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
|
||||
if (!(next_dd_desc->cmd_type_offset_bsz &
|
||||
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
|
||||
break;
|
||||
|
||||
total_bytes += tx_buf->bytecount;
|
||||
total_packets++;
|
||||
|
||||
if (tx_buf->raw_buf) {
|
||||
ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
|
||||
tx_buf->raw_buf = NULL;
|
||||
} else {
|
||||
xsk_frames++;
|
||||
cleared_dds++;
|
||||
xsk_frames = 0;
|
||||
if (likely(!xdp_ring->xdp_tx_active)) {
|
||||
xsk_frames = tx_thresh;
|
||||
goto skip;
|
||||
}
|
||||
|
||||
tx_desc->cmd_type_offset_bsz = 0;
|
||||
tx_buf++;
|
||||
tx_desc++;
|
||||
ntc++;
|
||||
ntc = xdp_ring->next_to_clean;
|
||||
|
||||
if (unlikely(!ntc)) {
|
||||
ntc -= xdp_ring->count;
|
||||
tx_buf = xdp_ring->tx_buf;
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, 0);
|
||||
for (i = 0; i < tx_thresh; i++) {
|
||||
tx_buf = &xdp_ring->tx_buf[ntc];
|
||||
|
||||
if (tx_buf->raw_buf) {
|
||||
ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
|
||||
tx_buf->raw_buf = NULL;
|
||||
} else {
|
||||
xsk_frames++;
|
||||
}
|
||||
|
||||
ntc++;
|
||||
if (ntc >= xdp_ring->count)
|
||||
ntc = 0;
|
||||
}
|
||||
skip:
|
||||
xdp_ring->next_to_clean += tx_thresh;
|
||||
if (xdp_ring->next_to_clean >= desc_cnt)
|
||||
xdp_ring->next_to_clean -= desc_cnt;
|
||||
if (xsk_frames)
|
||||
xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
|
||||
next_dd_desc->cmd_type_offset_bsz = 0;
|
||||
next_dd = next_dd + tx_thresh;
|
||||
if (next_dd >= desc_cnt)
|
||||
next_dd = tx_thresh - 1;
|
||||
} while (budget--);
|
||||
|
||||
prefetch(tx_desc);
|
||||
xdp_ring->next_dd = next_dd;
|
||||
|
||||
} while (likely(--budget));
|
||||
return cleared_dds * tx_thresh;
|
||||
}
|
||||
|
||||
ntc += xdp_ring->count;
|
||||
xdp_ring->next_to_clean = ntc;
|
||||
/**
|
||||
* ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
|
||||
* @xdp_ring: XDP ring to produce the HW Tx descriptor on
|
||||
* @desc: AF_XDP descriptor to pull the DMA address and length from
|
||||
* @total_bytes: bytes accumulator that will be used for stats update
|
||||
*/
|
||||
static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
|
||||
unsigned int *total_bytes)
|
||||
{
|
||||
struct ice_tx_desc *tx_desc;
|
||||
dma_addr_t dma;
|
||||
|
||||
if (xsk_frames)
|
||||
xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
|
||||
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
|
||||
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
|
||||
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
|
||||
tx_desc->buf_addr = cpu_to_le64(dma);
|
||||
tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
|
||||
0, desc->len, 0);
|
||||
|
||||
*total_bytes += desc->len;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
|
||||
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
|
||||
* @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
|
||||
* @total_bytes: bytes accumulator that will be used for stats update
|
||||
*/
|
||||
static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
|
||||
unsigned int *total_bytes)
|
||||
{
|
||||
u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
|
||||
u16 ntu = xdp_ring->next_to_use;
|
||||
struct ice_tx_desc *tx_desc;
|
||||
u32 i;
|
||||
|
||||
loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
|
||||
dma_addr_t dma;
|
||||
|
||||
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
|
||||
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
|
||||
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
|
||||
tx_desc->buf_addr = cpu_to_le64(dma);
|
||||
tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
|
||||
0, descs[i].len, 0);
|
||||
|
||||
*total_bytes += descs[i].len;
|
||||
}
|
||||
|
||||
xdp_ring->next_to_use = ntu;
|
||||
|
||||
if (xdp_ring->next_to_use > xdp_ring->next_rs) {
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
|
||||
tx_desc->cmd_type_offset_bsz |=
|
||||
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
|
||||
xdp_ring->next_rs += tx_thresh;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
|
||||
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
|
||||
* @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
|
||||
* @nb_pkts: count of packets to be send
|
||||
* @total_bytes: bytes accumulator that will be used for stats update
|
||||
*/
|
||||
static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
|
||||
u32 nb_pkts, unsigned int *total_bytes)
|
||||
{
|
||||
u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
|
||||
u32 batched, leftover, i;
|
||||
|
||||
batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
|
||||
leftover = nb_pkts & (PKTS_PER_BATCH - 1);
|
||||
for (i = 0; i < batched; i += PKTS_PER_BATCH)
|
||||
ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
|
||||
for (; i < batched + leftover; i++)
|
||||
ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
|
||||
|
||||
if (xdp_ring->next_to_use > xdp_ring->next_rs) {
|
||||
struct ice_tx_desc *tx_desc;
|
||||
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
|
||||
tx_desc->cmd_type_offset_bsz |=
|
||||
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
|
||||
xdp_ring->next_rs += tx_thresh;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
|
||||
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
|
||||
* @budget: number of free descriptors on HW Tx ring that can be used
|
||||
* @napi_budget: amount of descriptors that NAPI allows us to clean
|
||||
*
|
||||
* Returns true if there is no more work that needs to be done, false otherwise
|
||||
*/
|
||||
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget)
|
||||
{
|
||||
struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
|
||||
u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
|
||||
u32 nb_pkts, nb_processed = 0;
|
||||
unsigned int total_bytes = 0;
|
||||
|
||||
if (budget < tx_thresh)
|
||||
budget += ice_clean_xdp_irq_zc(xdp_ring, napi_budget);
|
||||
|
||||
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
|
||||
if (!nb_pkts)
|
||||
return true;
|
||||
|
||||
if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
|
||||
struct ice_tx_desc *tx_desc;
|
||||
|
||||
nb_processed = xdp_ring->count - xdp_ring->next_to_use;
|
||||
ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
|
||||
tx_desc->cmd_type_offset_bsz |=
|
||||
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
|
||||
xdp_ring->next_rs = tx_thresh - 1;
|
||||
xdp_ring->next_to_use = 0;
|
||||
}
|
||||
|
||||
ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
|
||||
&total_bytes);
|
||||
|
||||
ice_xdp_ring_update_tail(xdp_ring);
|
||||
ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
|
||||
|
||||
if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
|
||||
xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
|
||||
|
||||
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
|
||||
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
|
||||
|
||||
return budget > 0 && xmit_done;
|
||||
return nb_pkts < budget;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -6,19 +6,37 @@
|
|||
#include "ice_txrx.h"
|
||||
#include "ice.h"
|
||||
|
||||
#define PKTS_PER_BATCH 8
|
||||
|
||||
#ifdef __clang__
|
||||
#define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
|
||||
#elif __GNUC__ >= 4
|
||||
#define loop_unrolled_for _Pragma("GCC unroll 8") for
|
||||
#else
|
||||
#define loop_unrolled_for for
|
||||
#endif
|
||||
|
||||
struct ice_vsi;
|
||||
|
||||
#ifdef CONFIG_XDP_SOCKETS
|
||||
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
|
||||
u16 qid);
|
||||
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
|
||||
bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget);
|
||||
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
|
||||
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
|
||||
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
|
||||
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
|
||||
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
|
||||
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget);
|
||||
#else
|
||||
static inline bool
|
||||
ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
|
||||
u32 __always_unused budget,
|
||||
int __always_unused napi_budget)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int
|
||||
ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
|
||||
struct xsk_buff_pool __always_unused *pool,
|
||||
|
@ -34,13 +52,6 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ice_clean_tx_irq_zc(struct ice_tx_ring __always_unused *xdp_ring,
|
||||
int __always_unused budget)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
|
||||
u16 __always_unused count)
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/jump_label.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <net/sock.h>
|
||||
#include <uapi/linux/bpf.h>
|
||||
|
||||
struct sock;
|
||||
|
@ -165,11 +166,23 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
|
|||
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
void *value, u64 flags);
|
||||
|
||||
/* Opportunistic check to see whether we have any BPF program attached*/
|
||||
static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
|
||||
enum cgroup_bpf_attach_type type)
|
||||
{
|
||||
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
|
||||
struct bpf_prog_array *array;
|
||||
|
||||
array = rcu_access_pointer(cgrp->bpf.effective[type]);
|
||||
return array != &bpf_empty_prog_array.hdr;
|
||||
}
|
||||
|
||||
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
|
||||
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(CGROUP_INET_INGRESS)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \
|
||||
cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS)) \
|
||||
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
|
||||
CGROUP_INET_INGRESS); \
|
||||
\
|
||||
|
@ -181,7 +194,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
|||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
|
||||
typeof(sk) __sk = sk_to_full_sk(sk); \
|
||||
if (sk_fullsock(__sk)) \
|
||||
if (sk_fullsock(__sk) && \
|
||||
cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \
|
||||
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
|
||||
CGROUP_INET_EGRESS); \
|
||||
} \
|
||||
|
@ -347,7 +361,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
|||
kernel_optval) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \
|
||||
cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \
|
||||
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
|
||||
optname, optval, \
|
||||
optlen, \
|
||||
|
@ -367,7 +382,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
|||
max_optlen, retval) \
|
||||
({ \
|
||||
int __ret = retval; \
|
||||
if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \
|
||||
cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \
|
||||
if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
|
||||
!INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
|
||||
tcp_bpf_bypass_getsockopt, \
|
||||
|
|
|
@ -332,7 +332,10 @@ enum bpf_type_flag {
|
|||
*/
|
||||
MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
__BPF_TYPE_LAST_FLAG = MEM_ALLOC,
|
||||
/* MEM is in user address space. */
|
||||
MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
__BPF_TYPE_LAST_FLAG = MEM_USER,
|
||||
};
|
||||
|
||||
/* Max number of base types. */
|
||||
|
@ -588,7 +591,7 @@ struct bpf_verifier_ops {
|
|||
const struct btf *btf,
|
||||
const struct btf_type *t, int off, int size,
|
||||
enum bpf_access_type atype,
|
||||
u32 *next_btf_id);
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag);
|
||||
};
|
||||
|
||||
struct bpf_prog_offload_ops {
|
||||
|
@ -843,8 +846,8 @@ void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
|
|||
void bpf_image_ksym_del(struct bpf_ksym *ksym);
|
||||
void bpf_ksym_add(struct bpf_ksym *ksym);
|
||||
void bpf_ksym_del(struct bpf_ksym *ksym);
|
||||
int bpf_jit_charge_modmem(u32 pages);
|
||||
void bpf_jit_uncharge_modmem(u32 pages);
|
||||
int bpf_jit_charge_modmem(u32 size);
|
||||
void bpf_jit_uncharge_modmem(u32 size);
|
||||
bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
|
||||
#else
|
||||
static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
|
||||
|
@ -950,6 +953,7 @@ struct bpf_prog_aux {
|
|||
bool sleepable;
|
||||
bool tail_call_reachable;
|
||||
bool xdp_has_frags;
|
||||
bool use_bpf_prog_pack;
|
||||
struct hlist_node tramp_hlist;
|
||||
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
|
||||
const struct btf_type *attach_func_proto;
|
||||
|
@ -1233,6 +1237,19 @@ struct bpf_prog_array {
|
|||
struct bpf_prog_array_item items[];
|
||||
};
|
||||
|
||||
struct bpf_empty_prog_array {
|
||||
struct bpf_prog_array hdr;
|
||||
struct bpf_prog *null_prog;
|
||||
};
|
||||
|
||||
/* to avoid allocating empty bpf_prog_array for cgroups that
|
||||
* don't have bpf program attached use one global 'bpf_empty_prog_array'
|
||||
* It will not be modified the caller of bpf_prog_array_alloc()
|
||||
* (since caller requested prog_cnt == 0)
|
||||
* that pointer should be 'freed' by bpf_prog_array_free()
|
||||
*/
|
||||
extern struct bpf_empty_prog_array bpf_empty_prog_array;
|
||||
|
||||
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
|
||||
void bpf_prog_array_free(struct bpf_prog_array *progs);
|
||||
int bpf_prog_array_length(struct bpf_prog_array *progs);
|
||||
|
@ -1767,7 +1784,7 @@ static inline bool bpf_tracing_btf_ctx_access(int off, int size,
|
|||
int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
|
||||
const struct btf_type *t, int off, int size,
|
||||
enum bpf_access_type atype,
|
||||
u32 *next_btf_id);
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag);
|
||||
bool btf_struct_ids_match(struct bpf_verifier_log *log,
|
||||
const struct btf *btf, u32 id, int off,
|
||||
const struct btf *need_btf, u32 need_type_id);
|
||||
|
@ -1875,11 +1892,6 @@ static inline int bpf_obj_get_user(const char __user *pathname, int flags)
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline bool dev_map_can_have_prog(struct bpf_map *map)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void __dev_flush(void)
|
||||
{
|
||||
}
|
||||
|
@ -1943,11 +1955,6 @@ static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline bool cpu_map_prog_allowed(struct bpf_map *map)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
|
||||
enum bpf_prog_type type)
|
||||
{
|
||||
|
@ -2243,6 +2250,7 @@ extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
|
|||
extern const struct bpf_func_proto bpf_find_vma_proto;
|
||||
extern const struct bpf_func_proto bpf_loop_proto;
|
||||
extern const struct bpf_func_proto bpf_strncmp_proto;
|
||||
extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
|
||||
|
||||
const struct bpf_func_proto *tracing_prog_func_proto(
|
||||
enum bpf_func_id func_id, const struct bpf_prog *prog);
|
||||
|
@ -2355,6 +2363,8 @@ enum bpf_text_poke_type {
|
|||
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
||||
void *addr1, void *addr2);
|
||||
|
||||
void *bpf_arch_text_copy(void *dst, void *src, size_t len);
|
||||
|
||||
struct btf_id_set;
|
||||
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
|
||||
|
||||
|
|
|
@ -238,6 +238,11 @@ static inline bool btf_type_is_var(const struct btf_type *t)
|
|||
return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
|
||||
}
|
||||
|
||||
static inline bool btf_type_is_type_tag(const struct btf_type *t)
|
||||
{
|
||||
return BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG;
|
||||
}
|
||||
|
||||
/* union is only a special case of struct:
|
||||
* all its offsetof(member) == 0
|
||||
*/
|
||||
|
@ -322,6 +327,11 @@ static inline const struct btf_var_secinfo *btf_type_var_secinfo(
|
|||
return (const struct btf_var_secinfo *)(t + 1);
|
||||
}
|
||||
|
||||
static inline struct btf_param *btf_params(const struct btf_type *t)
|
||||
{
|
||||
return (struct btf_param *)(t + 1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
struct bpf_prog;
|
||||
|
||||
|
|
|
@ -31,6 +31,9 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
|
|||
# define __kernel
|
||||
# ifdef STRUCTLEAK_PLUGIN
|
||||
# define __user __attribute__((user))
|
||||
# elif defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \
|
||||
__has_attribute(btf_type_tag)
|
||||
# define __user __attribute__((btf_type_tag("user")))
|
||||
# else
|
||||
# define __user
|
||||
# endif
|
||||
|
|
|
@ -548,7 +548,7 @@ struct sock_fprog_kern {
|
|||
#define BPF_IMAGE_ALIGNMENT 8
|
||||
|
||||
struct bpf_binary_header {
|
||||
u32 pages;
|
||||
u32 size;
|
||||
u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
|
||||
};
|
||||
|
||||
|
@ -886,17 +886,8 @@ static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
|||
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
|
||||
{
|
||||
set_vm_flush_reset_perms(hdr);
|
||||
set_memory_ro((unsigned long)hdr, hdr->pages);
|
||||
set_memory_x((unsigned long)hdr, hdr->pages);
|
||||
}
|
||||
|
||||
static inline struct bpf_binary_header *
|
||||
bpf_jit_binary_hdr(const struct bpf_prog *fp)
|
||||
{
|
||||
unsigned long real_start = (unsigned long)fp->bpf_func;
|
||||
unsigned long addr = real_start & PAGE_MASK;
|
||||
|
||||
return (void *)addr;
|
||||
set_memory_ro((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
|
||||
set_memory_x((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
|
||||
|
@ -1068,6 +1059,18 @@ void *bpf_jit_alloc_exec(unsigned long size);
|
|||
void bpf_jit_free_exec(void *addr);
|
||||
void bpf_jit_free(struct bpf_prog *fp);
|
||||
|
||||
struct bpf_binary_header *
|
||||
bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image,
|
||||
unsigned int alignment,
|
||||
struct bpf_binary_header **rw_hdr,
|
||||
u8 **rw_image,
|
||||
bpf_jit_fill_hole_t bpf_fill_ill_insns);
|
||||
int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
|
||||
struct bpf_binary_header *ro_header,
|
||||
struct bpf_binary_header *rw_header);
|
||||
void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
|
||||
struct bpf_binary_header *rw_header);
|
||||
|
||||
int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
|
||||
struct bpf_jit_poke_descriptor *poke);
|
||||
|
||||
|
|
|
@ -170,11 +170,6 @@ static inline u32 sk_msg_iter_dist(u32 start, u32 end)
|
|||
#define sk_msg_iter_next(msg, which) \
|
||||
sk_msg_iter_var_next(msg->sg.which)
|
||||
|
||||
static inline void sk_msg_clear_meta(struct sk_msg *msg)
|
||||
{
|
||||
memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy));
|
||||
}
|
||||
|
||||
static inline void sk_msg_init(struct sk_msg *msg)
|
||||
{
|
||||
BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
|
||||
bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
|
||||
u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *desc, u32 max);
|
||||
u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
|
||||
void xsk_tx_release(struct xsk_buff_pool *pool);
|
||||
struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
|
||||
u16 queue_id);
|
||||
|
@ -142,8 +142,7 @@ static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *desc,
|
||||
u32 max)
|
||||
static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ struct xsk_buff_pool {
|
|||
*/
|
||||
dma_addr_t *dma_pages;
|
||||
struct xdp_buff_xsk *heads;
|
||||
struct xdp_desc *tx_descs;
|
||||
u64 chunk_mask;
|
||||
u64 addrs_cnt;
|
||||
u32 free_list_cnt;
|
||||
|
|
|
@ -5076,6 +5076,16 @@ union bpf_attr {
|
|||
* associated to *xdp_md*, at *offset*.
|
||||
* Return
|
||||
* 0 on success, or a negative error in case of failure.
|
||||
*
|
||||
* long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags)
|
||||
* Description
|
||||
* Read *size* bytes from user space address *user_ptr* in *tsk*'s
|
||||
* address space, and stores the data in *dst*. *flags* is not
|
||||
* used yet and is provided for future extensibility. This helper
|
||||
* can only be used by sleepable programs.
|
||||
* Return
|
||||
* 0 on success, or a negative error in case of failure. On error
|
||||
* *dst* buffer is zeroed out.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
|
@ -5269,6 +5279,7 @@ union bpf_attr {
|
|||
FN(xdp_get_buff_len), \
|
||||
FN(xdp_load_bytes), \
|
||||
FN(xdp_store_bytes), \
|
||||
FN(copy_from_user_task), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
|
@ -5563,7 +5574,8 @@ struct bpf_sock {
|
|||
__u32 src_ip4;
|
||||
__u32 src_ip6[4];
|
||||
__u32 src_port; /* host byte order */
|
||||
__u32 dst_port; /* network byte order */
|
||||
__be16 dst_port; /* network byte order */
|
||||
__u16 :16; /* zero padding */
|
||||
__u32 dst_ip4;
|
||||
__u32 dst_ip6[4];
|
||||
__u32 state;
|
||||
|
@ -6441,7 +6453,8 @@ struct bpf_sk_lookup {
|
|||
__u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
|
||||
__u32 remote_ip4; /* Network byte order */
|
||||
__u32 remote_ip6[4]; /* Network byte order */
|
||||
__u32 remote_port; /* Network byte order */
|
||||
__be16 remote_port; /* Network byte order */
|
||||
__u16 :16; /* Zero padding */
|
||||
__u32 local_ip4; /* Network byte order */
|
||||
__u32 local_ip6[4]; /* Network byte order */
|
||||
__u32 local_port; /* Host byte order */
|
||||
|
|
|
@ -86,6 +86,10 @@ config CC_HAS_ASM_INLINE
|
|||
config CC_HAS_NO_PROFILE_FN_ATTR
|
||||
def_bool $(success,echo '__attribute__((no_profile_instrument_function)) int x();' | $(CC) -x c - -c -o /dev/null -Werror)
|
||||
|
||||
config PAHOLE_VERSION
|
||||
int
|
||||
default $(shell,$(srctree)/scripts/pahole-version.sh $(PAHOLE))
|
||||
|
||||
config CONSTRUCTORS
|
||||
bool
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <linux/anon_inodes.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/rcupdate_trace.h>
|
||||
|
||||
struct bpf_iter_target_info {
|
||||
struct list_head list;
|
||||
|
@ -684,11 +685,20 @@ int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx)
|
|||
{
|
||||
int ret;
|
||||
|
||||
rcu_read_lock();
|
||||
migrate_disable();
|
||||
ret = bpf_prog_run(prog, ctx);
|
||||
migrate_enable();
|
||||
rcu_read_unlock();
|
||||
if (prog->aux->sleepable) {
|
||||
rcu_read_lock_trace();
|
||||
migrate_disable();
|
||||
might_fault();
|
||||
ret = bpf_prog_run(prog, ctx);
|
||||
migrate_enable();
|
||||
rcu_read_unlock_trace();
|
||||
} else {
|
||||
rcu_read_lock();
|
||||
migrate_disable();
|
||||
ret = bpf_prog_run(prog, ctx);
|
||||
migrate_enable();
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* bpf program can only return 0 or 1:
|
||||
* 0 : okay
|
||||
|
|
183
kernel/bpf/btf.c
183
kernel/bpf/btf.c
|
@ -419,6 +419,9 @@ static struct btf_type btf_void;
|
|||
static int btf_resolve(struct btf_verifier_env *env,
|
||||
const struct btf_type *t, u32 type_id);
|
||||
|
||||
static int btf_func_check(struct btf_verifier_env *env,
|
||||
const struct btf_type *t);
|
||||
|
||||
static bool btf_type_is_modifier(const struct btf_type *t)
|
||||
{
|
||||
/* Some of them is not strictly a C modifier
|
||||
|
@ -595,6 +598,7 @@ static bool btf_type_needs_resolve(const struct btf_type *t)
|
|||
btf_type_is_struct(t) ||
|
||||
btf_type_is_array(t) ||
|
||||
btf_type_is_var(t) ||
|
||||
btf_type_is_func(t) ||
|
||||
btf_type_is_decl_tag(t) ||
|
||||
btf_type_is_datasec(t);
|
||||
}
|
||||
|
@ -3571,9 +3575,24 @@ static s32 btf_func_check_meta(struct btf_verifier_env *env,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int btf_func_resolve(struct btf_verifier_env *env,
|
||||
const struct resolve_vertex *v)
|
||||
{
|
||||
const struct btf_type *t = v->t;
|
||||
u32 next_type_id = t->type;
|
||||
int err;
|
||||
|
||||
err = btf_func_check(env, t);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
env_stack_pop_resolved(env, next_type_id, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct btf_kind_operations func_ops = {
|
||||
.check_meta = btf_func_check_meta,
|
||||
.resolve = btf_df_resolve,
|
||||
.resolve = btf_func_resolve,
|
||||
.check_member = btf_df_check_member,
|
||||
.check_kflag_member = btf_df_check_kflag_member,
|
||||
.log_details = btf_ref_type_log,
|
||||
|
@ -4194,7 +4213,7 @@ static bool btf_resolve_valid(struct btf_verifier_env *env,
|
|||
return !btf_resolved_type_id(btf, type_id) &&
|
||||
!btf_resolved_type_size(btf, type_id);
|
||||
|
||||
if (btf_type_is_decl_tag(t))
|
||||
if (btf_type_is_decl_tag(t) || btf_type_is_func(t))
|
||||
return btf_resolved_type_id(btf, type_id) &&
|
||||
!btf_resolved_type_size(btf, type_id);
|
||||
|
||||
|
@ -4284,12 +4303,6 @@ static int btf_check_all_types(struct btf_verifier_env *env)
|
|||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (btf_type_is_func(t)) {
|
||||
err = btf_func_check(env, t);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -4886,6 +4899,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
|
|||
const char *tname = prog->aux->attach_func_name;
|
||||
struct bpf_verifier_log *log = info->log;
|
||||
const struct btf_param *args;
|
||||
const char *tag_value;
|
||||
u32 nr_args, arg;
|
||||
int i, ret;
|
||||
|
||||
|
@ -5038,6 +5052,13 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
|
|||
info->btf = btf;
|
||||
info->btf_id = t->type;
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
|
||||
if (btf_type_is_type_tag(t)) {
|
||||
tag_value = __btf_name_by_offset(btf, t->name_off);
|
||||
if (strcmp(tag_value, "user") == 0)
|
||||
info->reg_type |= MEM_USER;
|
||||
}
|
||||
|
||||
/* skip modifiers */
|
||||
while (btf_type_is_modifier(t)) {
|
||||
info->btf_id = t->type;
|
||||
|
@ -5064,12 +5085,12 @@ enum bpf_struct_walk_result {
|
|||
|
||||
static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
|
||||
const struct btf_type *t, int off, int size,
|
||||
u32 *next_btf_id)
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag)
|
||||
{
|
||||
u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
|
||||
const struct btf_type *mtype, *elem_type = NULL;
|
||||
const struct btf_member *member;
|
||||
const char *tname, *mname;
|
||||
const char *tname, *mname, *tag_value;
|
||||
u32 vlen, elem_id, mid;
|
||||
|
||||
again:
|
||||
|
@ -5253,7 +5274,8 @@ error:
|
|||
}
|
||||
|
||||
if (btf_type_is_ptr(mtype)) {
|
||||
const struct btf_type *stype;
|
||||
const struct btf_type *stype, *t;
|
||||
enum bpf_type_flag tmp_flag = 0;
|
||||
u32 id;
|
||||
|
||||
if (msize != size || off != moff) {
|
||||
|
@ -5262,9 +5284,19 @@ error:
|
|||
mname, moff, tname, off, size);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
/* check __user tag */
|
||||
t = btf_type_by_id(btf, mtype->type);
|
||||
if (btf_type_is_type_tag(t)) {
|
||||
tag_value = __btf_name_by_offset(btf, t->name_off);
|
||||
if (strcmp(tag_value, "user") == 0)
|
||||
tmp_flag = MEM_USER;
|
||||
}
|
||||
|
||||
stype = btf_type_skip_modifiers(btf, mtype->type, &id);
|
||||
if (btf_type_is_struct(stype)) {
|
||||
*next_btf_id = id;
|
||||
*flag = tmp_flag;
|
||||
return WALK_PTR;
|
||||
}
|
||||
}
|
||||
|
@ -5291,13 +5323,14 @@ error:
|
|||
int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
|
||||
const struct btf_type *t, int off, int size,
|
||||
enum bpf_access_type atype __maybe_unused,
|
||||
u32 *next_btf_id)
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag)
|
||||
{
|
||||
enum bpf_type_flag tmp_flag = 0;
|
||||
int err;
|
||||
u32 id;
|
||||
|
||||
do {
|
||||
err = btf_struct_walk(log, btf, t, off, size, &id);
|
||||
err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag);
|
||||
|
||||
switch (err) {
|
||||
case WALK_PTR:
|
||||
|
@ -5305,6 +5338,7 @@ int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
|
|||
* we're done.
|
||||
*/
|
||||
*next_btf_id = id;
|
||||
*flag = tmp_flag;
|
||||
return PTR_TO_BTF_ID;
|
||||
case WALK_SCALAR:
|
||||
return SCALAR_VALUE;
|
||||
|
@ -5349,6 +5383,7 @@ bool btf_struct_ids_match(struct bpf_verifier_log *log,
|
|||
const struct btf *need_btf, u32 need_type_id)
|
||||
{
|
||||
const struct btf_type *type;
|
||||
enum bpf_type_flag flag;
|
||||
int err;
|
||||
|
||||
/* Are we already done? */
|
||||
|
@ -5359,7 +5394,7 @@ again:
|
|||
type = btf_type_by_id(btf, id);
|
||||
if (!type)
|
||||
return false;
|
||||
err = btf_struct_walk(log, btf, type, off, 1, &id);
|
||||
err = btf_struct_walk(log, btf, type, off, 1, &id, &flag);
|
||||
if (err != WALK_STRUCT)
|
||||
return false;
|
||||
|
||||
|
@ -6740,8 +6775,19 @@ int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
|
|||
int ret;
|
||||
|
||||
btf = btf_get_module_btf(kset->owner);
|
||||
if (IS_ERR_OR_NULL(btf))
|
||||
return btf ? PTR_ERR(btf) : -ENOENT;
|
||||
if (!btf) {
|
||||
if (!kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
|
||||
pr_err("missing vmlinux BTF, cannot register kfuncs\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) {
|
||||
pr_err("missing module BTF, cannot register kfuncs\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
if (IS_ERR(btf))
|
||||
return PTR_ERR(btf);
|
||||
|
||||
hook = bpf_prog_type_to_kfunc_hook(prog_type);
|
||||
ret = btf_populate_kfunc_set(btf, hook, kset);
|
||||
|
@ -6752,10 +6798,113 @@ int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set);
|
||||
|
||||
#define MAX_TYPES_ARE_COMPAT_DEPTH 2
|
||||
|
||||
static
|
||||
int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
|
||||
const struct btf *targ_btf, __u32 targ_id,
|
||||
int level)
|
||||
{
|
||||
const struct btf_type *local_type, *targ_type;
|
||||
int depth = 32; /* max recursion depth */
|
||||
|
||||
/* caller made sure that names match (ignoring flavor suffix) */
|
||||
local_type = btf_type_by_id(local_btf, local_id);
|
||||
targ_type = btf_type_by_id(targ_btf, targ_id);
|
||||
if (btf_kind(local_type) != btf_kind(targ_type))
|
||||
return 0;
|
||||
|
||||
recur:
|
||||
depth--;
|
||||
if (depth < 0)
|
||||
return -EINVAL;
|
||||
|
||||
local_type = btf_type_skip_modifiers(local_btf, local_id, &local_id);
|
||||
targ_type = btf_type_skip_modifiers(targ_btf, targ_id, &targ_id);
|
||||
if (!local_type || !targ_type)
|
||||
return -EINVAL;
|
||||
|
||||
if (btf_kind(local_type) != btf_kind(targ_type))
|
||||
return 0;
|
||||
|
||||
switch (btf_kind(local_type)) {
|
||||
case BTF_KIND_UNKN:
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_FWD:
|
||||
return 1;
|
||||
case BTF_KIND_INT:
|
||||
/* just reject deprecated bitfield-like integers; all other
|
||||
* integers are by default compatible between each other
|
||||
*/
|
||||
return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
|
||||
case BTF_KIND_PTR:
|
||||
local_id = local_type->type;
|
||||
targ_id = targ_type->type;
|
||||
goto recur;
|
||||
case BTF_KIND_ARRAY:
|
||||
local_id = btf_array(local_type)->type;
|
||||
targ_id = btf_array(targ_type)->type;
|
||||
goto recur;
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
struct btf_param *local_p = btf_params(local_type);
|
||||
struct btf_param *targ_p = btf_params(targ_type);
|
||||
__u16 local_vlen = btf_vlen(local_type);
|
||||
__u16 targ_vlen = btf_vlen(targ_type);
|
||||
int i, err;
|
||||
|
||||
if (local_vlen != targ_vlen)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
|
||||
if (level <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
btf_type_skip_modifiers(local_btf, local_p->type, &local_id);
|
||||
btf_type_skip_modifiers(targ_btf, targ_p->type, &targ_id);
|
||||
err = __bpf_core_types_are_compat(local_btf, local_id,
|
||||
targ_btf, targ_id,
|
||||
level - 1);
|
||||
if (err <= 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* tail recurse for return type check */
|
||||
btf_type_skip_modifiers(local_btf, local_type->type, &local_id);
|
||||
btf_type_skip_modifiers(targ_btf, targ_type->type, &targ_id);
|
||||
goto recur;
|
||||
}
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check local and target types for compatibility. This check is used for
|
||||
* type-based CO-RE relocations and follow slightly different rules than
|
||||
* field-based relocations. This function assumes that root types were already
|
||||
* checked for name match. Beyond that initial root-level name check, names
|
||||
* are completely ignored. Compatibility rules are as follows:
|
||||
* - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
|
||||
* kind should match for local and target types (i.e., STRUCT is not
|
||||
* compatible with UNION);
|
||||
* - for ENUMs, the size is ignored;
|
||||
* - for INT, size and signedness are ignored;
|
||||
* - for ARRAY, dimensionality is ignored, element types are checked for
|
||||
* compatibility recursively;
|
||||
* - CONST/VOLATILE/RESTRICT modifiers are ignored;
|
||||
* - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
|
||||
* - FUNC_PROTOs are compatible if they have compatible signature: same
|
||||
* number of input args and compatible return and argument types.
|
||||
* These rules are not set in stone and probably will be adjusted as we get
|
||||
* more experience with using BPF CO-RE relocations.
|
||||
*/
|
||||
int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
|
||||
const struct btf *targ_btf, __u32 targ_id)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
return __bpf_core_types_are_compat(local_btf, local_id,
|
||||
targ_btf, targ_id,
|
||||
MAX_TYPES_ARE_COMPAT_DEPTH);
|
||||
}
|
||||
|
||||
static bool bpf_core_is_flavor_sep(const char *s)
|
||||
|
|
|
@ -1384,20 +1384,6 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
|
||||
enum cgroup_bpf_attach_type attach_type)
|
||||
{
|
||||
struct bpf_prog_array *prog_array;
|
||||
bool empty;
|
||||
|
||||
rcu_read_lock();
|
||||
prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]);
|
||||
empty = bpf_prog_array_is_empty(prog_array);
|
||||
rcu_read_unlock();
|
||||
|
||||
return empty;
|
||||
}
|
||||
|
||||
static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
|
||||
struct bpf_sockopt_buf *buf)
|
||||
{
|
||||
|
@ -1456,19 +1442,11 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
|
|||
};
|
||||
int ret, max_optlen;
|
||||
|
||||
/* Opportunistic check to see whether we have any BPF program
|
||||
* attached to the hook so we don't waste time allocating
|
||||
* memory and locking the socket.
|
||||
*/
|
||||
if (__cgroup_bpf_prog_array_is_empty(cgrp, CGROUP_SETSOCKOPT))
|
||||
return 0;
|
||||
|
||||
/* Allocate a bit more than the initial user buffer for
|
||||
* BPF program. The canonical use case is overriding
|
||||
* TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
|
||||
*/
|
||||
max_optlen = max_t(int, 16, *optlen);
|
||||
|
||||
max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
|
||||
if (max_optlen < 0)
|
||||
return max_optlen;
|
||||
|
@ -1550,15 +1528,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
|
|||
};
|
||||
int ret;
|
||||
|
||||
/* Opportunistic check to see whether we have any BPF program
|
||||
* attached to the hook so we don't waste time allocating
|
||||
* memory and locking the socket.
|
||||
*/
|
||||
if (__cgroup_bpf_prog_array_is_empty(cgrp, CGROUP_GETSOCKOPT))
|
||||
return retval;
|
||||
|
||||
ctx.optlen = max_optlen;
|
||||
|
||||
max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
|
||||
if (max_optlen < 0)
|
||||
return max_optlen;
|
||||
|
|
|
@ -537,13 +537,10 @@ long bpf_jit_limit_max __read_mostly;
|
|||
static void
|
||||
bpf_prog_ksym_set_addr(struct bpf_prog *prog)
|
||||
{
|
||||
const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
|
||||
unsigned long addr = (unsigned long)hdr;
|
||||
|
||||
WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
|
||||
|
||||
prog->aux->ksym.start = (unsigned long) prog->bpf_func;
|
||||
prog->aux->ksym.end = addr + hdr->pages * PAGE_SIZE;
|
||||
prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -808,6 +805,137 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
|
|||
return slot;
|
||||
}
|
||||
|
||||
/*
|
||||
* BPF program pack allocator.
|
||||
*
|
||||
* Most BPF programs are pretty small. Allocating a hole page for each
|
||||
* program is sometime a waste. Many small bpf program also adds pressure
|
||||
* to instruction TLB. To solve this issue, we introduce a BPF program pack
|
||||
* allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
|
||||
* to host BPF programs.
|
||||
*/
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define BPF_PROG_PACK_SIZE HPAGE_PMD_SIZE
|
||||
#else
|
||||
#define BPF_PROG_PACK_SIZE PAGE_SIZE
|
||||
#endif
|
||||
#define BPF_PROG_CHUNK_SHIFT 6
|
||||
#define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
|
||||
#define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
|
||||
#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
|
||||
|
||||
struct bpf_prog_pack {
|
||||
struct list_head list;
|
||||
void *ptr;
|
||||
unsigned long bitmap[BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)];
|
||||
};
|
||||
|
||||
#define BPF_PROG_MAX_PACK_PROG_SIZE BPF_PROG_PACK_SIZE
|
||||
#define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
|
||||
|
||||
static DEFINE_MUTEX(pack_mutex);
|
||||
static LIST_HEAD(pack_list);
|
||||
|
||||
static struct bpf_prog_pack *alloc_new_pack(void)
|
||||
{
|
||||
struct bpf_prog_pack *pack;
|
||||
|
||||
pack = kzalloc(sizeof(*pack), GFP_KERNEL);
|
||||
if (!pack)
|
||||
return NULL;
|
||||
pack->ptr = module_alloc(BPF_PROG_PACK_SIZE);
|
||||
if (!pack->ptr) {
|
||||
kfree(pack);
|
||||
return NULL;
|
||||
}
|
||||
bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
|
||||
list_add_tail(&pack->list, &pack_list);
|
||||
|
||||
set_vm_flush_reset_perms(pack->ptr);
|
||||
set_memory_ro((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
|
||||
set_memory_x((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
|
||||
return pack;
|
||||
}
|
||||
|
||||
static void *bpf_prog_pack_alloc(u32 size)
|
||||
{
|
||||
unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
|
||||
struct bpf_prog_pack *pack;
|
||||
unsigned long pos;
|
||||
void *ptr = NULL;
|
||||
|
||||
if (size > BPF_PROG_MAX_PACK_PROG_SIZE) {
|
||||
size = round_up(size, PAGE_SIZE);
|
||||
ptr = module_alloc(size);
|
||||
if (ptr) {
|
||||
set_vm_flush_reset_perms(ptr);
|
||||
set_memory_ro((unsigned long)ptr, size / PAGE_SIZE);
|
||||
set_memory_x((unsigned long)ptr, size / PAGE_SIZE);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
mutex_lock(&pack_mutex);
|
||||
list_for_each_entry(pack, &pack_list, list) {
|
||||
pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
|
||||
nbits, 0);
|
||||
if (pos < BPF_PROG_CHUNK_COUNT)
|
||||
goto found_free_area;
|
||||
}
|
||||
|
||||
pack = alloc_new_pack();
|
||||
if (!pack)
|
||||
goto out;
|
||||
|
||||
pos = 0;
|
||||
|
||||
found_free_area:
|
||||
bitmap_set(pack->bitmap, pos, nbits);
|
||||
ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
|
||||
|
||||
out:
|
||||
mutex_unlock(&pack_mutex);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
|
||||
{
|
||||
struct bpf_prog_pack *pack = NULL, *tmp;
|
||||
unsigned int nbits;
|
||||
unsigned long pos;
|
||||
void *pack_ptr;
|
||||
|
||||
if (hdr->size > BPF_PROG_MAX_PACK_PROG_SIZE) {
|
||||
module_memfree(hdr);
|
||||
return;
|
||||
}
|
||||
|
||||
pack_ptr = (void *)((unsigned long)hdr & ~(BPF_PROG_PACK_SIZE - 1));
|
||||
mutex_lock(&pack_mutex);
|
||||
|
||||
list_for_each_entry(tmp, &pack_list, list) {
|
||||
if (tmp->ptr == pack_ptr) {
|
||||
pack = tmp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
|
||||
goto out;
|
||||
|
||||
nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
|
||||
pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >> BPF_PROG_CHUNK_SHIFT;
|
||||
|
||||
bitmap_clear(pack->bitmap, pos, nbits);
|
||||
if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
|
||||
BPF_PROG_CHUNK_COUNT, 0) == 0) {
|
||||
list_del(&pack->list);
|
||||
module_memfree(pack->ptr);
|
||||
kfree(pack);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&pack_mutex);
|
||||
}
|
||||
|
||||
static atomic_long_t bpf_jit_current;
|
||||
|
||||
/* Can be overridden by an arch's JIT compiler if it has a custom,
|
||||
|
@ -833,12 +961,11 @@ static int __init bpf_jit_charge_init(void)
|
|||
}
|
||||
pure_initcall(bpf_jit_charge_init);
|
||||
|
||||
int bpf_jit_charge_modmem(u32 pages)
|
||||
int bpf_jit_charge_modmem(u32 size)
|
||||
{
|
||||
if (atomic_long_add_return(pages, &bpf_jit_current) >
|
||||
(bpf_jit_limit >> PAGE_SHIFT)) {
|
||||
if (atomic_long_add_return(size, &bpf_jit_current) > bpf_jit_limit) {
|
||||
if (!bpf_capable()) {
|
||||
atomic_long_sub(pages, &bpf_jit_current);
|
||||
atomic_long_sub(size, &bpf_jit_current);
|
||||
return -EPERM;
|
||||
}
|
||||
}
|
||||
|
@ -846,9 +973,9 @@ int bpf_jit_charge_modmem(u32 pages)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void bpf_jit_uncharge_modmem(u32 pages)
|
||||
void bpf_jit_uncharge_modmem(u32 size)
|
||||
{
|
||||
atomic_long_sub(pages, &bpf_jit_current);
|
||||
atomic_long_sub(size, &bpf_jit_current);
|
||||
}
|
||||
|
||||
void *__weak bpf_jit_alloc_exec(unsigned long size)
|
||||
|
@ -867,7 +994,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
|
|||
bpf_jit_fill_hole_t bpf_fill_ill_insns)
|
||||
{
|
||||
struct bpf_binary_header *hdr;
|
||||
u32 size, hole, start, pages;
|
||||
u32 size, hole, start;
|
||||
|
||||
WARN_ON_ONCE(!is_power_of_2(alignment) ||
|
||||
alignment > BPF_IMAGE_ALIGNMENT);
|
||||
|
@ -877,20 +1004,19 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
|
|||
* random section of illegal instructions.
|
||||
*/
|
||||
size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
|
||||
pages = size / PAGE_SIZE;
|
||||
|
||||
if (bpf_jit_charge_modmem(pages))
|
||||
if (bpf_jit_charge_modmem(size))
|
||||
return NULL;
|
||||
hdr = bpf_jit_alloc_exec(size);
|
||||
if (!hdr) {
|
||||
bpf_jit_uncharge_modmem(pages);
|
||||
bpf_jit_uncharge_modmem(size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Fill space with illegal/arch-dep instructions. */
|
||||
bpf_fill_ill_insns(hdr, size);
|
||||
|
||||
hdr->pages = pages;
|
||||
hdr->size = size;
|
||||
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
|
||||
PAGE_SIZE - sizeof(*hdr));
|
||||
start = (get_random_int() % hole) & ~(alignment - 1);
|
||||
|
@ -903,10 +1029,113 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
|
|||
|
||||
void bpf_jit_binary_free(struct bpf_binary_header *hdr)
|
||||
{
|
||||
u32 pages = hdr->pages;
|
||||
u32 size = hdr->size;
|
||||
|
||||
bpf_jit_free_exec(hdr);
|
||||
bpf_jit_uncharge_modmem(pages);
|
||||
bpf_jit_uncharge_modmem(size);
|
||||
}
|
||||
|
||||
/* Allocate jit binary from bpf_prog_pack allocator.
|
||||
* Since the allocated memory is RO+X, the JIT engine cannot write directly
|
||||
* to the memory. To solve this problem, a RW buffer is also allocated at
|
||||
* as the same time. The JIT engine should calculate offsets based on the
|
||||
* RO memory address, but write JITed program to the RW buffer. Once the
|
||||
* JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
|
||||
* the JITed program to the RO memory.
|
||||
*/
|
||||
struct bpf_binary_header *
|
||||
bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
|
||||
unsigned int alignment,
|
||||
struct bpf_binary_header **rw_header,
|
||||
u8 **rw_image,
|
||||
bpf_jit_fill_hole_t bpf_fill_ill_insns)
|
||||
{
|
||||
struct bpf_binary_header *ro_header;
|
||||
u32 size, hole, start;
|
||||
|
||||
WARN_ON_ONCE(!is_power_of_2(alignment) ||
|
||||
alignment > BPF_IMAGE_ALIGNMENT);
|
||||
|
||||
/* add 16 bytes for a random section of illegal instructions */
|
||||
size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
|
||||
|
||||
if (bpf_jit_charge_modmem(size))
|
||||
return NULL;
|
||||
ro_header = bpf_prog_pack_alloc(size);
|
||||
if (!ro_header) {
|
||||
bpf_jit_uncharge_modmem(size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
*rw_header = kvmalloc(size, GFP_KERNEL);
|
||||
if (!*rw_header) {
|
||||
bpf_prog_pack_free(ro_header);
|
||||
bpf_jit_uncharge_modmem(size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Fill space with illegal/arch-dep instructions. */
|
||||
bpf_fill_ill_insns(*rw_header, size);
|
||||
(*rw_header)->size = size;
|
||||
|
||||
hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
|
||||
BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
|
||||
start = (get_random_int() % hole) & ~(alignment - 1);
|
||||
|
||||
*image_ptr = &ro_header->image[start];
|
||||
*rw_image = &(*rw_header)->image[start];
|
||||
|
||||
return ro_header;
|
||||
}
|
||||
|
||||
/* Copy JITed text from rw_header to its final location, the ro_header. */
|
||||
int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
|
||||
struct bpf_binary_header *ro_header,
|
||||
struct bpf_binary_header *rw_header)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
|
||||
|
||||
kvfree(rw_header);
|
||||
|
||||
if (IS_ERR(ptr)) {
|
||||
bpf_prog_pack_free(ro_header);
|
||||
return PTR_ERR(ptr);
|
||||
}
|
||||
prog->aux->use_bpf_prog_pack = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* bpf_jit_binary_pack_free is called in two different scenarios:
|
||||
* 1) when the program is freed after;
|
||||
* 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
|
||||
* For case 2), we need to free both the RO memory and the RW buffer.
|
||||
* Also, ro_header->size in 2) is not properly set yet, so rw_header->size
|
||||
* is used for uncharge.
|
||||
*/
|
||||
void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
|
||||
struct bpf_binary_header *rw_header)
|
||||
{
|
||||
u32 size = rw_header ? rw_header->size : ro_header->size;
|
||||
|
||||
bpf_prog_pack_free(ro_header);
|
||||
kvfree(rw_header);
|
||||
bpf_jit_uncharge_modmem(size);
|
||||
}
|
||||
|
||||
static inline struct bpf_binary_header *
|
||||
bpf_jit_binary_hdr(const struct bpf_prog *fp)
|
||||
{
|
||||
unsigned long real_start = (unsigned long)fp->bpf_func;
|
||||
unsigned long addr;
|
||||
|
||||
if (fp->aux->use_bpf_prog_pack)
|
||||
addr = real_start & BPF_PROG_CHUNK_MASK;
|
||||
else
|
||||
addr = real_start & PAGE_MASK;
|
||||
|
||||
return (void *)addr;
|
||||
}
|
||||
|
||||
/* This symbol is only overridden by archs that have different
|
||||
|
@ -918,7 +1147,10 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
|
|||
if (fp->jited) {
|
||||
struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
|
||||
|
||||
bpf_jit_binary_free(hdr);
|
||||
if (fp->aux->use_bpf_prog_pack)
|
||||
bpf_jit_binary_pack_free(hdr, NULL /* rw_buffer */);
|
||||
else
|
||||
bpf_jit_binary_free(hdr);
|
||||
|
||||
WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
|
||||
}
|
||||
|
@ -1968,18 +2200,10 @@ static struct bpf_prog_dummy {
|
|||
},
|
||||
};
|
||||
|
||||
/* to avoid allocating empty bpf_prog_array for cgroups that
|
||||
* don't have bpf program attached use one global 'empty_prog_array'
|
||||
* It will not be modified the caller of bpf_prog_array_alloc()
|
||||
* (since caller requested prog_cnt == 0)
|
||||
* that pointer should be 'freed' by bpf_prog_array_free()
|
||||
*/
|
||||
static struct {
|
||||
struct bpf_prog_array hdr;
|
||||
struct bpf_prog *null_prog;
|
||||
} empty_prog_array = {
|
||||
struct bpf_empty_prog_array bpf_empty_prog_array = {
|
||||
.null_prog = NULL,
|
||||
};
|
||||
EXPORT_SYMBOL(bpf_empty_prog_array);
|
||||
|
||||
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
|
||||
{
|
||||
|
@ -1989,12 +2213,12 @@ struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
|
|||
(prog_cnt + 1),
|
||||
flags);
|
||||
|
||||
return &empty_prog_array.hdr;
|
||||
return &bpf_empty_prog_array.hdr;
|
||||
}
|
||||
|
||||
void bpf_prog_array_free(struct bpf_prog_array *progs)
|
||||
{
|
||||
if (!progs || progs == &empty_prog_array.hdr)
|
||||
if (!progs || progs == &bpf_empty_prog_array.hdr)
|
||||
return;
|
||||
kfree_rcu(progs, rcu);
|
||||
}
|
||||
|
@ -2453,6 +2677,11 @@ int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
|||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
|
||||
{
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
|
||||
EXPORT_SYMBOL(bpf_stats_enabled_key);
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/pid_namespace.h>
|
||||
#include <linux/proc_ns.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/btf_ids.h>
|
||||
|
||||
#include "../../lib/kstrtox.h"
|
||||
|
||||
|
@ -671,6 +672,39 @@ const struct bpf_func_proto bpf_copy_from_user_proto = {
|
|||
.arg3_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
|
||||
const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* flags is not used yet */
|
||||
if (unlikely(flags))
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(!size))
|
||||
return 0;
|
||||
|
||||
ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0);
|
||||
if (ret == size)
|
||||
return 0;
|
||||
|
||||
memset(dst, 0, size);
|
||||
/* Return -EFAULT for partial read */
|
||||
return ret < 0 ? ret : -EFAULT;
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_copy_from_user_task_proto = {
|
||||
.func = bpf_copy_from_user_task,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
|
||||
.arg5_type = ARG_ANYTHING
|
||||
};
|
||||
|
||||
BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
|
||||
{
|
||||
if (cpu >= nr_cpu_ids)
|
||||
|
|
|
@ -1,40 +1,16 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
LIBBPF_SRCS = $(srctree)/tools/lib/bpf/
|
||||
LIBBPF_OUT = $(abspath $(obj))/libbpf
|
||||
LIBBPF_A = $(LIBBPF_OUT)/libbpf.a
|
||||
LIBBPF_DESTDIR = $(LIBBPF_OUT)
|
||||
LIBBPF_INCLUDE = $(LIBBPF_DESTDIR)/include
|
||||
|
||||
# Although not in use by libbpf's Makefile, set $(O) so that the "dummy" test
|
||||
# in tools/scripts/Makefile.include always succeeds when building the kernel
|
||||
# with $(O) pointing to a relative path, as in "make O=build bindeb-pkg".
|
||||
$(LIBBPF_A): | $(LIBBPF_OUT)
|
||||
$(Q)$(MAKE) -C $(LIBBPF_SRCS) O=$(LIBBPF_OUT)/ OUTPUT=$(LIBBPF_OUT)/ \
|
||||
DESTDIR=$(LIBBPF_DESTDIR) prefix= \
|
||||
$(LIBBPF_OUT)/libbpf.a install_headers
|
||||
|
||||
libbpf_hdrs: $(LIBBPF_A)
|
||||
|
||||
.PHONY: libbpf_hdrs
|
||||
|
||||
$(LIBBPF_OUT):
|
||||
$(call msg,MKDIR,$@)
|
||||
$(Q)mkdir -p $@
|
||||
LIBBPF_INCLUDE = $(LIBBPF_SRCS)/..
|
||||
|
||||
userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi \
|
||||
-I $(LIBBPF_INCLUDE) -Wno-unused-result
|
||||
|
||||
userprogs := bpf_preload_umd
|
||||
|
||||
clean-files := libbpf/
|
||||
|
||||
$(obj)/iterators/iterators.o: | libbpf_hdrs
|
||||
|
||||
bpf_preload_umd-objs := iterators/iterators.o
|
||||
bpf_preload_umd-userldlibs := $(LIBBPF_A) -lelf -lz
|
||||
|
||||
$(obj)/bpf_preload_umd: $(LIBBPF_A)
|
||||
$(obj)/bpf_preload_umd:
|
||||
|
||||
$(obj)/bpf_preload_umd_blob.o: $(obj)/bpf_preload_umd
|
||||
|
||||
|
|
|
@ -35,15 +35,15 @@ endif
|
|||
|
||||
.PHONY: all clean
|
||||
|
||||
all: iterators.skel.h
|
||||
all: iterators.lskel.h
|
||||
|
||||
clean:
|
||||
$(call msg,CLEAN)
|
||||
$(Q)rm -rf $(OUTPUT) iterators
|
||||
|
||||
iterators.skel.h: $(OUTPUT)/iterators.bpf.o | $(BPFTOOL)
|
||||
iterators.lskel.h: $(OUTPUT)/iterators.bpf.o | $(BPFTOOL)
|
||||
$(call msg,GEN-SKEL,$@)
|
||||
$(Q)$(BPFTOOL) gen skeleton $< > $@
|
||||
$(Q)$(BPFTOOL) gen skeleton -L $< > $@
|
||||
|
||||
|
||||
$(OUTPUT)/iterators.bpf.o: iterators.bpf.c $(BPFOBJ) | $(OUTPUT)
|
||||
|
|
|
@ -10,20 +10,36 @@
|
|||
#include <bpf/libbpf.h>
|
||||
#include <bpf/bpf.h>
|
||||
#include <sys/mount.h>
|
||||
#include "iterators.skel.h"
|
||||
#include "iterators.lskel.h"
|
||||
#include "bpf_preload_common.h"
|
||||
|
||||
int to_kernel = -1;
|
||||
int from_kernel = 0;
|
||||
|
||||
static int send_link_to_kernel(struct bpf_link *link, const char *link_name)
|
||||
static int __bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
int err;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.info.bpf_fd = bpf_fd;
|
||||
attr.info.info_len = *info_len;
|
||||
attr.info.info = (long) info;
|
||||
|
||||
err = skel_sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
|
||||
if (!err)
|
||||
*info_len = attr.info.info_len;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int send_link_to_kernel(int link_fd, const char *link_name)
|
||||
{
|
||||
struct bpf_preload_info obj = {};
|
||||
struct bpf_link_info info = {};
|
||||
__u32 info_len = sizeof(info);
|
||||
int err;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
|
||||
err = __bpf_obj_get_info_by_fd(link_fd, &info, &info_len);
|
||||
if (err)
|
||||
return err;
|
||||
obj.link_id = info.id;
|
||||
|
@ -37,7 +53,6 @@ static int send_link_to_kernel(struct bpf_link *link, const char *link_name)
|
|||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct rlimit rlim = { RLIM_INFINITY, RLIM_INFINITY };
|
||||
struct iterators_bpf *skel;
|
||||
int err, magic;
|
||||
int debug_fd;
|
||||
|
@ -55,7 +70,6 @@ int main(int argc, char **argv)
|
|||
printf("bad start magic %d\n", magic);
|
||||
return 1;
|
||||
}
|
||||
setrlimit(RLIMIT_MEMLOCK, &rlim);
|
||||
/* libbpf opens BPF object and loads it into the kernel */
|
||||
skel = iterators_bpf__open_and_load();
|
||||
if (!skel) {
|
||||
|
@ -72,10 +86,10 @@ int main(int argc, char **argv)
|
|||
goto cleanup;
|
||||
|
||||
/* send two bpf_link IDs with names to the kernel */
|
||||
err = send_link_to_kernel(skel->links.dump_bpf_map, "maps.debug");
|
||||
err = send_link_to_kernel(skel->links.dump_bpf_map_fd, "maps.debug");
|
||||
if (err)
|
||||
goto cleanup;
|
||||
err = send_link_to_kernel(skel->links.dump_bpf_prog, "progs.debug");
|
||||
err = send_link_to_kernel(skel->links.dump_bpf_prog_fd, "progs.debug");
|
||||
if (err)
|
||||
goto cleanup;
|
||||
|
||||
|
|
|
@ -0,0 +1,428 @@
|
|||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
/* THIS FILE IS AUTOGENERATED! */
|
||||
#ifndef __ITERATORS_BPF_SKEL_H__
|
||||
#define __ITERATORS_BPF_SKEL_H__
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/skel_internal.h>
|
||||
|
||||
struct iterators_bpf {
|
||||
struct bpf_loader_ctx ctx;
|
||||
struct {
|
||||
struct bpf_map_desc rodata;
|
||||
} maps;
|
||||
struct {
|
||||
struct bpf_prog_desc dump_bpf_map;
|
||||
struct bpf_prog_desc dump_bpf_prog;
|
||||
} progs;
|
||||
struct {
|
||||
int dump_bpf_map_fd;
|
||||
int dump_bpf_prog_fd;
|
||||
} links;
|
||||
struct iterators_bpf__rodata {
|
||||
} *rodata;
|
||||
};
|
||||
|
||||
static inline int
|
||||
iterators_bpf__dump_bpf_map__attach(struct iterators_bpf *skel)
|
||||
{
|
||||
int prog_fd = skel->progs.dump_bpf_map.prog_fd;
|
||||
int fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);
|
||||
|
||||
if (fd > 0)
|
||||
skel->links.dump_bpf_map_fd = fd;
|
||||
return fd;
|
||||
}
|
||||
|
||||
static inline int
|
||||
iterators_bpf__dump_bpf_prog__attach(struct iterators_bpf *skel)
|
||||
{
|
||||
int prog_fd = skel->progs.dump_bpf_prog.prog_fd;
|
||||
int fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);
|
||||
|
||||
if (fd > 0)
|
||||
skel->links.dump_bpf_prog_fd = fd;
|
||||
return fd;
|
||||
}
|
||||
|
||||
static inline int
|
||||
iterators_bpf__attach(struct iterators_bpf *skel)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = ret < 0 ? ret : iterators_bpf__dump_bpf_map__attach(skel);
|
||||
ret = ret < 0 ? ret : iterators_bpf__dump_bpf_prog__attach(skel);
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
iterators_bpf__detach(struct iterators_bpf *skel)
|
||||
{
|
||||
skel_closenz(skel->links.dump_bpf_map_fd);
|
||||
skel_closenz(skel->links.dump_bpf_prog_fd);
|
||||
}
|
||||
static void
|
||||
iterators_bpf__destroy(struct iterators_bpf *skel)
|
||||
{
|
||||
if (!skel)
|
||||
return;
|
||||
iterators_bpf__detach(skel);
|
||||
skel_closenz(skel->progs.dump_bpf_map.prog_fd);
|
||||
skel_closenz(skel->progs.dump_bpf_prog.prog_fd);
|
||||
munmap(skel->rodata, 4096);
|
||||
skel_closenz(skel->maps.rodata.map_fd);
|
||||
free(skel);
|
||||
}
|
||||
static inline struct iterators_bpf *
|
||||
iterators_bpf__open(void)
|
||||
{
|
||||
struct iterators_bpf *skel;
|
||||
|
||||
skel = calloc(sizeof(*skel), 1);
|
||||
if (!skel)
|
||||
goto cleanup;
|
||||
skel->ctx.sz = (void *)&skel->links - (void *)skel;
|
||||
skel->rodata =
|
||||
mmap(NULL, 4096, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
|
||||
if (skel->rodata == (void *) -1)
|
||||
goto cleanup;
|
||||
memcpy(skel->rodata, (void *)"\
|
||||
\x20\x20\x69\x64\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
|
||||
\x20\x20\x20\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\x0a\0\x25\x34\x75\x20\
|
||||
\x25\x2d\x31\x36\x73\x25\x36\x64\x0a\0\x20\x20\x69\x64\x20\x6e\x61\x6d\x65\x20\
|
||||
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x61\x74\x74\x61\x63\x68\x65\
|
||||
\x64\x0a\0\x25\x34\x75\x20\x25\x2d\x31\x36\x73\x20\x25\x73\x20\x25\x73\x0a\0", 98);
|
||||
skel->maps.rodata.initial_value = (__u64)(long)skel->rodata;
|
||||
return skel;
|
||||
cleanup:
|
||||
iterators_bpf__destroy(skel);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int
|
||||
iterators_bpf__load(struct iterators_bpf *skel)
|
||||
{
|
||||
struct bpf_load_and_run_opts opts = {};
|
||||
int err;
|
||||
|
||||
opts.ctx = (struct bpf_loader_ctx *)skel;
|
||||
opts.data_sz = 6056;
|
||||
opts.data = (void *)"\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x9f\xeb\x01\0\
|
||||
\x18\0\0\0\0\0\0\0\x1c\x04\0\0\x1c\x04\0\0\xf9\x04\0\0\0\0\0\0\0\0\0\x02\x02\0\
|
||||
\0\0\x01\0\0\0\x02\0\0\x04\x10\0\0\0\x13\0\0\0\x03\0\0\0\0\0\0\0\x18\0\0\0\x04\
|
||||
\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\x02\x08\0\0\0\0\0\0\0\0\0\0\x02\x0d\0\0\0\0\0\0\
|
||||
\0\x01\0\0\x0d\x06\0\0\0\x1c\0\0\0\x01\0\0\0\x20\0\0\0\0\0\0\x01\x04\0\0\0\x20\
|
||||
\0\0\x01\x24\0\0\0\x01\0\0\x0c\x05\0\0\0\xa3\0\0\0\x03\0\0\x04\x18\0\0\0\xb1\0\
|
||||
\0\0\x09\0\0\0\0\0\0\0\xb5\0\0\0\x0b\0\0\0\x40\0\0\0\xc0\0\0\0\x0b\0\0\0\x80\0\
|
||||
\0\0\0\0\0\0\0\0\0\x02\x0a\0\0\0\xc8\0\0\0\0\0\0\x07\0\0\0\0\xd1\0\0\0\0\0\0\
|
||||
\x08\x0c\0\0\0\xd7\0\0\0\0\0\0\x01\x08\0\0\0\x40\0\0\0\x94\x01\0\0\x03\0\0\x04\
|
||||
\x18\0\0\0\x9c\x01\0\0\x0e\0\0\0\0\0\0\0\x9f\x01\0\0\x11\0\0\0\x20\0\0\0\xa4\
|
||||
\x01\0\0\x0e\0\0\0\xa0\0\0\0\xb0\x01\0\0\0\0\0\x08\x0f\0\0\0\xb6\x01\0\0\0\0\0\
|
||||
\x01\x04\0\0\0\x20\0\0\0\xc3\x01\0\0\0\0\0\x01\x01\0\0\0\x08\0\0\x01\0\0\0\0\0\
|
||||
\0\0\x03\0\0\0\0\x10\0\0\0\x12\0\0\0\x10\0\0\0\xc8\x01\0\0\0\0\0\x01\x04\0\0\0\
|
||||
\x20\0\0\0\0\0\0\0\0\0\0\x02\x14\0\0\0\x2c\x02\0\0\x02\0\0\x04\x10\0\0\0\x13\0\
|
||||
\0\0\x03\0\0\0\0\0\0\0\x3f\x02\0\0\x15\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\x02\x18\0\
|
||||
\0\0\0\0\0\0\x01\0\0\x0d\x06\0\0\0\x1c\0\0\0\x13\0\0\0\x44\x02\0\0\x01\0\0\x0c\
|
||||
\x16\0\0\0\x90\x02\0\0\x01\0\0\x04\x08\0\0\0\x99\x02\0\0\x19\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\x02\x1a\0\0\0\xea\x02\0\0\x06\0\0\x04\x38\0\0\0\x9c\x01\0\0\x0e\0\0\
|
||||
\0\0\0\0\0\x9f\x01\0\0\x11\0\0\0\x20\0\0\0\xf7\x02\0\0\x1b\0\0\0\xc0\0\0\0\x08\
|
||||
\x03\0\0\x15\0\0\0\0\x01\0\0\x11\x03\0\0\x1d\0\0\0\x40\x01\0\0\x1b\x03\0\0\x1e\
|
||||
\0\0\0\x80\x01\0\0\0\0\0\0\0\0\0\x02\x1c\0\0\0\0\0\0\0\0\0\0\x0a\x10\0\0\0\0\0\
|
||||
\0\0\0\0\0\x02\x1f\0\0\0\0\0\0\0\0\0\0\x02\x20\0\0\0\x65\x03\0\0\x02\0\0\x04\
|
||||
\x08\0\0\0\x73\x03\0\0\x0e\0\0\0\0\0\0\0\x7c\x03\0\0\x0e\0\0\0\x20\0\0\0\x1b\
|
||||
\x03\0\0\x03\0\0\x04\x18\0\0\0\x86\x03\0\0\x1b\0\0\0\0\0\0\0\x8e\x03\0\0\x21\0\
|
||||
\0\0\x40\0\0\0\x94\x03\0\0\x23\0\0\0\x80\0\0\0\0\0\0\0\0\0\0\x02\x22\0\0\0\0\0\
|
||||
\0\0\0\0\0\x02\x24\0\0\0\x98\x03\0\0\x01\0\0\x04\x04\0\0\0\xa3\x03\0\0\x0e\0\0\
|
||||
\0\0\0\0\0\x0c\x04\0\0\x01\0\0\x04\x04\0\0\0\x15\x04\0\0\x0e\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\x03\0\0\0\0\x1c\0\0\0\x12\0\0\0\x23\0\0\0\x8b\x04\0\0\0\0\0\x0e\x25\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x1c\0\0\0\x12\0\0\0\x0e\0\0\0\x9f\x04\
|
||||
\0\0\0\0\0\x0e\x27\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x1c\0\0\0\x12\0\0\0\
|
||||
\x20\0\0\0\xb5\x04\0\0\0\0\0\x0e\x29\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\
|
||||
\x1c\0\0\0\x12\0\0\0\x11\0\0\0\xca\x04\0\0\0\0\0\x0e\x2b\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\x03\0\0\0\0\x10\0\0\0\x12\0\0\0\x04\0\0\0\xe1\x04\0\0\0\0\0\x0e\x2d\0\0\
|
||||
\0\x01\0\0\0\xe9\x04\0\0\x04\0\0\x0f\x62\0\0\0\x26\0\0\0\0\0\0\0\x23\0\0\0\x28\
|
||||
\0\0\0\x23\0\0\0\x0e\0\0\0\x2a\0\0\0\x31\0\0\0\x20\0\0\0\x2c\0\0\0\x51\0\0\0\
|
||||
\x11\0\0\0\xf1\x04\0\0\x01\0\0\x0f\x04\0\0\0\x2e\0\0\0\0\0\0\0\x04\0\0\0\0\x62\
|
||||
\x70\x66\x5f\x69\x74\x65\x72\x5f\x5f\x62\x70\x66\x5f\x6d\x61\x70\0\x6d\x65\x74\
|
||||
\x61\0\x6d\x61\x70\0\x63\x74\x78\0\x69\x6e\x74\0\x64\x75\x6d\x70\x5f\x62\x70\
|
||||
\x66\x5f\x6d\x61\x70\0\x69\x74\x65\x72\x2f\x62\x70\x66\x5f\x6d\x61\x70\0\x30\
|
||||
\x3a\x30\0\x2f\x77\x2f\x6e\x65\x74\x2d\x6e\x65\x78\x74\x2f\x6b\x65\x72\x6e\x65\
|
||||
\x6c\x2f\x62\x70\x66\x2f\x70\x72\x65\x6c\x6f\x61\x64\x2f\x69\x74\x65\x72\x61\
|
||||
\x74\x6f\x72\x73\x2f\x69\x74\x65\x72\x61\x74\x6f\x72\x73\x2e\x62\x70\x66\x2e\
|
||||
\x63\0\x09\x73\x74\x72\x75\x63\x74\x20\x73\x65\x71\x5f\x66\x69\x6c\x65\x20\x2a\
|
||||
\x73\x65\x71\x20\x3d\x20\x63\x74\x78\x2d\x3e\x6d\x65\x74\x61\x2d\x3e\x73\x65\
|
||||
\x71\x3b\0\x62\x70\x66\x5f\x69\x74\x65\x72\x5f\x6d\x65\x74\x61\0\x73\x65\x71\0\
|
||||
\x73\x65\x73\x73\x69\x6f\x6e\x5f\x69\x64\0\x73\x65\x71\x5f\x6e\x75\x6d\0\x73\
|
||||
\x65\x71\x5f\x66\x69\x6c\x65\0\x5f\x5f\x75\x36\x34\0\x75\x6e\x73\x69\x67\x6e\
|
||||
\x65\x64\x20\x6c\x6f\x6e\x67\x20\x6c\x6f\x6e\x67\0\x30\x3a\x31\0\x09\x73\x74\
|
||||
\x72\x75\x63\x74\x20\x62\x70\x66\x5f\x6d\x61\x70\x20\x2a\x6d\x61\x70\x20\x3d\
|
||||
\x20\x63\x74\x78\x2d\x3e\x6d\x61\x70\x3b\0\x09\x69\x66\x20\x28\x21\x6d\x61\x70\
|
||||
\x29\0\x09\x5f\x5f\x75\x36\x34\x20\x73\x65\x71\x5f\x6e\x75\x6d\x20\x3d\x20\x63\
|
||||
\x74\x78\x2d\x3e\x6d\x65\x74\x61\x2d\x3e\x73\x65\x71\x5f\x6e\x75\x6d\x3b\0\x30\
|
||||
\x3a\x32\0\x09\x69\x66\x20\x28\x73\x65\x71\x5f\x6e\x75\x6d\x20\x3d\x3d\x20\x30\
|
||||
\x29\0\x09\x09\x42\x50\x46\x5f\x53\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\
|
||||
\x65\x71\x2c\x20\x22\x20\x20\x69\x64\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\
|
||||
\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\
|
||||
\x5c\x6e\x22\x29\x3b\0\x62\x70\x66\x5f\x6d\x61\x70\0\x69\x64\0\x6e\x61\x6d\x65\
|
||||
\0\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\0\x5f\x5f\x75\x33\x32\0\x75\x6e\
|
||||
\x73\x69\x67\x6e\x65\x64\x20\x69\x6e\x74\0\x63\x68\x61\x72\0\x5f\x5f\x41\x52\
|
||||
\x52\x41\x59\x5f\x53\x49\x5a\x45\x5f\x54\x59\x50\x45\x5f\x5f\0\x09\x42\x50\x46\
|
||||
\x5f\x53\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\x65\x71\x2c\x20\x22\x25\
|
||||
\x34\x75\x20\x25\x2d\x31\x36\x73\x25\x36\x64\x5c\x6e\x22\x2c\x20\x6d\x61\x70\
|
||||
\x2d\x3e\x69\x64\x2c\x20\x6d\x61\x70\x2d\x3e\x6e\x61\x6d\x65\x2c\x20\x6d\x61\
|
||||
\x70\x2d\x3e\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\x29\x3b\0\x7d\0\x62\
|
||||
\x70\x66\x5f\x69\x74\x65\x72\x5f\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x70\x72\
|
||||
\x6f\x67\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x69\x74\x65\
|
||||
\x72\x2f\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x09\x73\x74\x72\x75\x63\x74\x20\x62\
|
||||
\x70\x66\x5f\x70\x72\x6f\x67\x20\x2a\x70\x72\x6f\x67\x20\x3d\x20\x63\x74\x78\
|
||||
\x2d\x3e\x70\x72\x6f\x67\x3b\0\x09\x69\x66\x20\x28\x21\x70\x72\x6f\x67\x29\0\
|
||||
\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x61\x75\x78\0\x09\x61\x75\x78\x20\x3d\x20\
|
||||
\x70\x72\x6f\x67\x2d\x3e\x61\x75\x78\x3b\0\x09\x09\x42\x50\x46\x5f\x53\x45\x51\
|
||||
\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\x65\x71\x2c\x20\x22\x20\x20\x69\x64\x20\
|
||||
\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x61\x74\
|
||||
\x74\x61\x63\x68\x65\x64\x5c\x6e\x22\x29\x3b\0\x62\x70\x66\x5f\x70\x72\x6f\x67\
|
||||
\x5f\x61\x75\x78\0\x61\x74\x74\x61\x63\x68\x5f\x66\x75\x6e\x63\x5f\x6e\x61\x6d\
|
||||
\x65\0\x64\x73\x74\x5f\x70\x72\x6f\x67\0\x66\x75\x6e\x63\x5f\x69\x6e\x66\x6f\0\
|
||||
\x62\x74\x66\0\x09\x42\x50\x46\x5f\x53\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\
|
||||
\x73\x65\x71\x2c\x20\x22\x25\x34\x75\x20\x25\x2d\x31\x36\x73\x20\x25\x73\x20\
|
||||
\x25\x73\x5c\x6e\x22\x2c\x20\x61\x75\x78\x2d\x3e\x69\x64\x2c\0\x30\x3a\x34\0\
|
||||
\x30\x3a\x35\0\x09\x69\x66\x20\x28\x21\x62\x74\x66\x29\0\x62\x70\x66\x5f\x66\
|
||||
\x75\x6e\x63\x5f\x69\x6e\x66\x6f\0\x69\x6e\x73\x6e\x5f\x6f\x66\x66\0\x74\x79\
|
||||
\x70\x65\x5f\x69\x64\0\x30\0\x73\x74\x72\x69\x6e\x67\x73\0\x74\x79\x70\x65\x73\
|
||||
\0\x68\x64\x72\0\x62\x74\x66\x5f\x68\x65\x61\x64\x65\x72\0\x73\x74\x72\x5f\x6c\
|
||||
\x65\x6e\0\x09\x74\x79\x70\x65\x73\x20\x3d\x20\x62\x74\x66\x2d\x3e\x74\x79\x70\
|
||||
\x65\x73\x3b\0\x09\x62\x70\x66\x5f\x70\x72\x6f\x62\x65\x5f\x72\x65\x61\x64\x5f\
|
||||
\x6b\x65\x72\x6e\x65\x6c\x28\x26\x74\x2c\x20\x73\x69\x7a\x65\x6f\x66\x28\x74\
|
||||
\x29\x2c\x20\x74\x79\x70\x65\x73\x20\x2b\x20\x62\x74\x66\x5f\x69\x64\x29\x3b\0\
|
||||
\x09\x73\x74\x72\x20\x3d\x20\x62\x74\x66\x2d\x3e\x73\x74\x72\x69\x6e\x67\x73\
|
||||
\x3b\0\x62\x74\x66\x5f\x74\x79\x70\x65\0\x6e\x61\x6d\x65\x5f\x6f\x66\x66\0\x09\
|
||||
\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x20\x3d\x20\x42\x50\x46\x5f\x43\x4f\x52\x45\
|
||||
\x5f\x52\x45\x41\x44\x28\x74\x2c\x20\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x29\x3b\0\
|
||||
\x30\x3a\x32\x3a\x30\0\x09\x69\x66\x20\x28\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x20\
|
||||
\x3e\x3d\x20\x62\x74\x66\x2d\x3e\x68\x64\x72\x2e\x73\x74\x72\x5f\x6c\x65\x6e\
|
||||
\x29\0\x09\x72\x65\x74\x75\x72\x6e\x20\x73\x74\x72\x20\x2b\x20\x6e\x61\x6d\x65\
|
||||
\x5f\x6f\x66\x66\x3b\0\x30\x3a\x33\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\
|
||||
\x61\x70\x2e\x5f\x5f\x5f\x66\x6d\x74\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\
|
||||
\x61\x70\x2e\x5f\x5f\x5f\x66\x6d\x74\x2e\x31\0\x64\x75\x6d\x70\x5f\x62\x70\x66\
|
||||
\x5f\x70\x72\x6f\x67\x2e\x5f\x5f\x5f\x66\x6d\x74\0\x64\x75\x6d\x70\x5f\x62\x70\
|
||||
\x66\x5f\x70\x72\x6f\x67\x2e\x5f\x5f\x5f\x66\x6d\x74\x2e\x32\0\x4c\x49\x43\x45\
|
||||
\x4e\x53\x45\0\x2e\x72\x6f\x64\x61\x74\x61\0\x6c\x69\x63\x65\x6e\x73\x65\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x2d\x09\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x02\0\0\
|
||||
\0\x04\0\0\0\x62\0\0\0\x01\0\0\0\x80\x04\0\0\0\0\0\0\0\0\0\0\x69\x74\x65\x72\
|
||||
\x61\x74\x6f\x72\x2e\x72\x6f\x64\x61\x74\x61\0\0\0\0\0\0\0\0\0\0\0\0\0\x2f\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\x20\x20\x69\x64\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\
|
||||
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\
|
||||
\x73\x0a\0\x25\x34\x75\x20\x25\x2d\x31\x36\x73\x25\x36\x64\x0a\0\x20\x20\x69\
|
||||
\x64\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
|
||||
\x61\x74\x74\x61\x63\x68\x65\x64\x0a\0\x25\x34\x75\x20\x25\x2d\x31\x36\x73\x20\
|
||||
\x25\x73\x20\x25\x73\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x47\x50\x4c\0\0\0\0\0\
|
||||
\x79\x12\0\0\0\0\0\0\x79\x26\0\0\0\0\0\0\x79\x17\x08\0\0\0\0\0\x15\x07\x1b\0\0\
|
||||
\0\0\0\x79\x11\0\0\0\0\0\0\x79\x11\x10\0\0\0\0\0\x55\x01\x08\0\0\0\0\0\xbf\xa4\
|
||||
\0\0\0\0\0\0\x07\x04\0\0\xe8\xff\xff\xff\xbf\x61\0\0\0\0\0\0\x18\x62\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\xb7\x03\0\0\x23\0\0\0\xb7\x05\0\0\0\0\0\0\x85\0\0\0\x7e\0\0\
|
||||
\0\x61\x71\0\0\0\0\0\0\x7b\x1a\xe8\xff\0\0\0\0\xb7\x01\0\0\x04\0\0\0\xbf\x72\0\
|
||||
\0\0\0\0\0\x0f\x12\0\0\0\0\0\0\x7b\x2a\xf0\xff\0\0\0\0\x61\x71\x14\0\0\0\0\0\
|
||||
\x7b\x1a\xf8\xff\0\0\0\0\xbf\xa4\0\0\0\0\0\0\x07\x04\0\0\xe8\xff\xff\xff\xbf\
|
||||
\x61\0\0\0\0\0\0\x18\x62\0\0\0\0\0\0\0\0\0\0\x23\0\0\0\xb7\x03\0\0\x0e\0\0\0\
|
||||
\xb7\x05\0\0\x18\0\0\0\x85\0\0\0\x7e\0\0\0\xb7\0\0\0\0\0\0\0\x95\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\x07\0\0\0\0\0\0\0\x42\0\0\0\x7b\0\0\0\x1e\x3c\x01\0\x01\0\0\0\x42\0\0\
|
||||
\0\x7b\0\0\0\x24\x3c\x01\0\x02\0\0\0\x42\0\0\0\xee\0\0\0\x1d\x44\x01\0\x03\0\0\
|
||||
\0\x42\0\0\0\x0f\x01\0\0\x06\x4c\x01\0\x04\0\0\0\x42\0\0\0\x1a\x01\0\0\x17\x40\
|
||||
\x01\0\x05\0\0\0\x42\0\0\0\x1a\x01\0\0\x1d\x40\x01\0\x06\0\0\0\x42\0\0\0\x43\
|
||||
\x01\0\0\x06\x58\x01\0\x08\0\0\0\x42\0\0\0\x56\x01\0\0\x03\x5c\x01\0\x0f\0\0\0\
|
||||
\x42\0\0\0\xdc\x01\0\0\x02\x64\x01\0\x1f\0\0\0\x42\0\0\0\x2a\x02\0\0\x01\x6c\
|
||||
\x01\0\0\0\0\0\x02\0\0\0\x3e\0\0\0\0\0\0\0\x08\0\0\0\x08\0\0\0\x3e\0\0\0\0\0\0\
|
||||
\0\x10\0\0\0\x02\0\0\0\xea\0\0\0\0\0\0\0\x20\0\0\0\x02\0\0\0\x3e\0\0\0\0\0\0\0\
|
||||
\x28\0\0\0\x08\0\0\0\x3f\x01\0\0\0\0\0\0\x78\0\0\0\x0d\0\0\0\x3e\0\0\0\0\0\0\0\
|
||||
\x88\0\0\0\x0d\0\0\0\xea\0\0\0\0\0\0\0\xa8\0\0\0\x0d\0\0\0\x3f\x01\0\0\0\0\0\0\
|
||||
\x1a\0\0\0\x21\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\x61\x70\0\0\0\0\
|
||||
\0\0\0\0\x1c\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\x10\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\x0a\0\0\0\x01\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\x10\0\0\0\0\0\0\0\x62\x70\x66\x5f\x69\x74\x65\x72\x5f\x62\x70\x66\x5f\x6d\
|
||||
\x61\x70\0\0\0\0\0\0\0\0\x47\x50\x4c\0\0\0\0\0\x79\x12\0\0\0\0\0\0\x79\x26\0\0\
|
||||
\0\0\0\0\x79\x12\x08\0\0\0\0\0\x15\x02\x3c\0\0\0\0\0\x79\x11\0\0\0\0\0\0\x79\
|
||||
\x27\0\0\0\0\0\0\x79\x11\x10\0\0\0\0\0\x55\x01\x08\0\0\0\0\0\xbf\xa4\0\0\0\0\0\
|
||||
\0\x07\x04\0\0\xd0\xff\xff\xff\xbf\x61\0\0\0\0\0\0\x18\x62\0\0\0\0\0\0\0\0\0\0\
|
||||
\x31\0\0\0\xb7\x03\0\0\x20\0\0\0\xb7\x05\0\0\0\0\0\0\x85\0\0\0\x7e\0\0\0\x7b\
|
||||
\x6a\xc8\xff\0\0\0\0\x61\x71\0\0\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\xb7\x03\0\0\
|
||||
\x04\0\0\0\xbf\x79\0\0\0\0\0\0\x0f\x39\0\0\0\0\0\0\x79\x71\x28\0\0\0\0\0\x79\
|
||||
\x78\x30\0\0\0\0\0\x15\x08\x18\0\0\0\0\0\xb7\x02\0\0\0\0\0\0\x0f\x21\0\0\0\0\0\
|
||||
\0\x61\x11\x04\0\0\0\0\0\x79\x83\x08\0\0\0\0\0\x67\x01\0\0\x03\0\0\0\x0f\x13\0\
|
||||
\0\0\0\0\0\x79\x86\0\0\0\0\0\0\xbf\xa1\0\0\0\0\0\0\x07\x01\0\0\xf8\xff\xff\xff\
|
||||
\xb7\x02\0\0\x08\0\0\0\x85\0\0\0\x71\0\0\0\xb7\x01\0\0\0\0\0\0\x79\xa3\xf8\xff\
|
||||
\0\0\0\0\x0f\x13\0\0\0\0\0\0\xbf\xa1\0\0\0\0\0\0\x07\x01\0\0\xf4\xff\xff\xff\
|
||||
\xb7\x02\0\0\x04\0\0\0\x85\0\0\0\x71\0\0\0\xb7\x03\0\0\x04\0\0\0\x61\xa1\xf4\
|
||||
\xff\0\0\0\0\x61\x82\x10\0\0\0\0\0\x3d\x21\x02\0\0\0\0\0\x0f\x16\0\0\0\0\0\0\
|
||||
\xbf\x69\0\0\0\0\0\0\x7b\x9a\xd8\xff\0\0\0\0\x79\x71\x18\0\0\0\0\0\x7b\x1a\xe0\
|
||||
\xff\0\0\0\0\x79\x71\x20\0\0\0\0\0\x79\x11\0\0\0\0\0\0\x0f\x31\0\0\0\0\0\0\x7b\
|
||||
\x1a\xe8\xff\0\0\0\0\xbf\xa4\0\0\0\0\0\0\x07\x04\0\0\xd0\xff\xff\xff\x79\xa1\
|
||||
\xc8\xff\0\0\0\0\x18\x62\0\0\0\0\0\0\0\0\0\0\x51\0\0\0\xb7\x03\0\0\x11\0\0\0\
|
||||
\xb7\x05\0\0\x20\0\0\0\x85\0\0\0\x7e\0\0\0\xb7\0\0\0\0\0\0\0\x95\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\x17\0\0\0\0\0\0\0\x42\0\0\0\x7b\0\0\0\x1e\x80\x01\0\x01\0\0\0\x42\0\0\
|
||||
\0\x7b\0\0\0\x24\x80\x01\0\x02\0\0\0\x42\0\0\0\x60\x02\0\0\x1f\x88\x01\0\x03\0\
|
||||
\0\0\x42\0\0\0\x84\x02\0\0\x06\x94\x01\0\x04\0\0\0\x42\0\0\0\x1a\x01\0\0\x17\
|
||||
\x84\x01\0\x05\0\0\0\x42\0\0\0\x9d\x02\0\0\x0e\xa0\x01\0\x06\0\0\0\x42\0\0\0\
|
||||
\x1a\x01\0\0\x1d\x84\x01\0\x07\0\0\0\x42\0\0\0\x43\x01\0\0\x06\xa4\x01\0\x09\0\
|
||||
\0\0\x42\0\0\0\xaf\x02\0\0\x03\xa8\x01\0\x11\0\0\0\x42\0\0\0\x1f\x03\0\0\x02\
|
||||
\xb0\x01\0\x18\0\0\0\x42\0\0\0\x5a\x03\0\0\x06\x04\x01\0\x1b\0\0\0\x42\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\x1c\0\0\0\x42\0\0\0\xab\x03\0\0\x0f\x10\x01\0\x1d\0\0\0\x42\0\0\
|
||||
\0\xc0\x03\0\0\x2d\x14\x01\0\x1f\0\0\0\x42\0\0\0\xf7\x03\0\0\x0d\x0c\x01\0\x21\
|
||||
\0\0\0\x42\0\0\0\0\0\0\0\0\0\0\0\x22\0\0\0\x42\0\0\0\xc0\x03\0\0\x02\x14\x01\0\
|
||||
\x25\0\0\0\x42\0\0\0\x1e\x04\0\0\x0d\x18\x01\0\x28\0\0\0\x42\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\x29\0\0\0\x42\0\0\0\x1e\x04\0\0\x0d\x18\x01\0\x2c\0\0\0\x42\0\0\0\x1e\x04\
|
||||
\0\0\x0d\x18\x01\0\x2d\0\0\0\x42\0\0\0\x4c\x04\0\0\x1b\x1c\x01\0\x2e\0\0\0\x42\
|
||||
\0\0\0\x4c\x04\0\0\x06\x1c\x01\0\x2f\0\0\0\x42\0\0\0\x6f\x04\0\0\x0d\x24\x01\0\
|
||||
\x31\0\0\0\x42\0\0\0\x1f\x03\0\0\x02\xb0\x01\0\x40\0\0\0\x42\0\0\0\x2a\x02\0\0\
|
||||
\x01\xc0\x01\0\0\0\0\0\x14\0\0\0\x3e\0\0\0\0\0\0\0\x08\0\0\0\x08\0\0\0\x3e\0\0\
|
||||
\0\0\0\0\0\x10\0\0\0\x14\0\0\0\xea\0\0\0\0\0\0\0\x20\0\0\0\x14\0\0\0\x3e\0\0\0\
|
||||
\0\0\0\0\x28\0\0\0\x18\0\0\0\x3e\0\0\0\0\0\0\0\x30\0\0\0\x08\0\0\0\x3f\x01\0\0\
|
||||
\0\0\0\0\x88\0\0\0\x1a\0\0\0\x3e\0\0\0\0\0\0\0\x98\0\0\0\x1a\0\0\0\xea\0\0\0\0\
|
||||
\0\0\0\xb0\0\0\0\x1a\0\0\0\x52\x03\0\0\0\0\0\0\xb8\0\0\0\x1a\0\0\0\x56\x03\0\0\
|
||||
\0\0\0\0\xc8\0\0\0\x1f\0\0\0\x84\x03\0\0\0\0\0\0\xe0\0\0\0\x20\0\0\0\xea\0\0\0\
|
||||
\0\0\0\0\xf8\0\0\0\x20\0\0\0\x3e\0\0\0\0\0\0\0\x20\x01\0\0\x24\0\0\0\x3e\0\0\0\
|
||||
\0\0\0\0\x58\x01\0\0\x1a\0\0\0\xea\0\0\0\0\0\0\0\x68\x01\0\0\x20\0\0\0\x46\x04\
|
||||
\0\0\0\0\0\0\x90\x01\0\0\x1a\0\0\0\x3f\x01\0\0\0\0\0\0\xa0\x01\0\0\x1a\0\0\0\
|
||||
\x87\x04\0\0\0\0\0\0\xa8\x01\0\0\x18\0\0\0\x3e\0\0\0\0\0\0\0\x1a\0\0\0\x42\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\0\0\0\0\0\0\0\x1c\0\0\
|
||||
\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\x10\0\0\0\0\0\0\0\0\0\0\0\x1a\0\
|
||||
\0\0\x01\0\0\0\0\0\0\0\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x10\0\0\0\0\0\
|
||||
\0\0\x62\x70\x66\x5f\x69\x74\x65\x72\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\0\0\0\
|
||||
\0\0\0\0";
|
||||
opts.insns_sz = 2184;
|
||||
opts.insns = (void *)"\
|
||||
\xbf\x16\0\0\0\0\0\0\xbf\xa1\0\0\0\0\0\0\x07\x01\0\0\x78\xff\xff\xff\xb7\x02\0\
|
||||
\0\x88\0\0\0\xb7\x03\0\0\0\0\0\0\x85\0\0\0\x71\0\0\0\x05\0\x14\0\0\0\0\0\x61\
|
||||
\xa1\x78\xff\0\0\0\0\xd5\x01\x01\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\x61\xa1\x7c\xff\
|
||||
\0\0\0\0\xd5\x01\x01\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\x61\xa1\x80\xff\0\0\0\0\xd5\
|
||||
\x01\x01\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\x61\xa1\x84\xff\0\0\0\0\xd5\x01\x01\0\0\
|
||||
\0\0\0\x85\0\0\0\xa8\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x61\x01\0\0\0\0\
|
||||
\0\0\xd5\x01\x02\0\0\0\0\0\xbf\x19\0\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\xbf\x70\0\0\
|
||||
\0\0\0\0\x95\0\0\0\0\0\0\0\x61\x60\x08\0\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\
|
||||
\x48\x0e\0\0\x63\x01\0\0\0\0\0\0\x61\x60\x0c\0\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\
|
||||
\0\0\x44\x0e\0\0\x63\x01\0\0\0\0\0\0\x79\x60\x10\0\0\0\0\0\x18\x61\0\0\0\0\0\0\
|
||||
\0\0\0\0\x38\x0e\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\0\x05\0\0\
|
||||
\x18\x61\0\0\0\0\0\0\0\0\0\0\x30\x0e\0\0\x7b\x01\0\0\0\0\0\0\xb7\x01\0\0\x12\0\
|
||||
\0\0\x18\x62\0\0\0\0\0\0\0\0\0\0\x30\x0e\0\0\xb7\x03\0\0\x1c\0\0\0\x85\0\0\0\
|
||||
\xa6\0\0\0\xbf\x07\0\0\0\0\0\0\xc5\x07\xd4\xff\0\0\0\0\x63\x7a\x78\xff\0\0\0\0\
|
||||
\x61\xa0\x78\xff\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x80\x0e\0\0\x63\x01\0\0\0\
|
||||
\0\0\0\x61\x60\x20\0\0\0\0\0\x15\0\x03\0\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\
|
||||
\x5c\x0e\0\0\x63\x01\0\0\0\0\0\0\xb7\x01\0\0\0\0\0\0\x18\x62\0\0\0\0\0\0\0\0\0\
|
||||
\0\x50\x0e\0\0\xb7\x03\0\0\x48\0\0\0\x85\0\0\0\xa6\0\0\0\xbf\x07\0\0\0\0\0\0\
|
||||
\xc5\x07\xc3\xff\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x63\x71\0\0\0\0\0\
|
||||
\0\x79\x63\x18\0\0\0\0\0\x15\x03\x04\0\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x98\
|
||||
\x0e\0\0\xb7\x02\0\0\x62\0\0\0\x85\0\0\0\x94\0\0\0\x18\x62\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\x61\x20\0\0\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x08\x0f\0\0\x63\x01\0\
|
||||
\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\0\x0f\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\
|
||||
\x10\x0f\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\x98\x0e\0\0\x18\
|
||||
\x61\0\0\0\0\0\0\0\0\0\0\x18\x0f\0\0\x7b\x01\0\0\0\0\0\0\xb7\x01\0\0\x02\0\0\0\
|
||||
\x18\x62\0\0\0\0\0\0\0\0\0\0\x08\x0f\0\0\xb7\x03\0\0\x20\0\0\0\x85\0\0\0\xa6\0\
|
||||
\0\0\xbf\x07\0\0\0\0\0\0\xc5\x07\xa3\xff\0\0\0\0\x18\x62\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\x61\x20\0\0\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x28\x0f\0\0\x63\x01\0\0\
|
||||
\0\0\0\0\xb7\x01\0\0\x16\0\0\0\x18\x62\0\0\0\0\0\0\0\0\0\0\x28\x0f\0\0\xb7\x03\
|
||||
\0\0\x04\0\0\0\x85\0\0\0\xa6\0\0\0\xbf\x07\0\0\0\0\0\0\xc5\x07\x96\xff\0\0\0\0\
|
||||
\x18\x60\0\0\0\0\0\0\0\0\0\0\x30\x0f\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x78\x11\0\
|
||||
\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\x38\x0f\0\0\x18\x61\0\0\0\0\
|
||||
\0\0\0\0\0\0\x70\x11\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\x40\
|
||||
\x10\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\xb8\x11\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\
|
||||
\0\0\0\0\0\0\0\0\0\x48\x10\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\xc8\x11\0\0\x7b\x01\
|
||||
\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\xe8\x10\0\0\x18\x61\0\0\0\0\0\0\0\0\0\
|
||||
\0\xe8\x11\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x18\x61\
|
||||
\0\0\0\0\0\0\0\0\0\0\xe0\x11\0\0\x7b\x01\0\0\0\0\0\0\x61\x60\x08\0\0\0\0\0\x18\
|
||||
\x61\0\0\0\0\0\0\0\0\0\0\x80\x11\0\0\x63\x01\0\0\0\0\0\0\x61\x60\x0c\0\0\0\0\0\
|
||||
\x18\x61\0\0\0\0\0\0\0\0\0\0\x84\x11\0\0\x63\x01\0\0\0\0\0\0\x79\x60\x10\0\0\0\
|
||||
\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x88\x11\0\0\x7b\x01\0\0\0\0\0\0\x61\xa0\x78\
|
||||
\xff\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\xb0\x11\0\0\x63\x01\0\0\0\0\0\0\x18\
|
||||
\x61\0\0\0\0\0\0\0\0\0\0\xf8\x11\0\0\xb7\x02\0\0\x11\0\0\0\xb7\x03\0\0\x0c\0\0\
|
||||
\0\xb7\x04\0\0\0\0\0\0\x85\0\0\0\xa7\0\0\0\xbf\x07\0\0\0\0\0\0\xc5\x07\x60\xff\
|
||||
\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\x68\x11\0\0\x63\x70\x6c\0\0\0\0\0\x77\x07\
|
||||
\0\0\x20\0\0\0\x63\x70\x70\0\0\0\0\0\xb7\x01\0\0\x05\0\0\0\x18\x62\0\0\0\0\0\0\
|
||||
\0\0\0\0\x68\x11\0\0\xb7\x03\0\0\x8c\0\0\0\x85\0\0\0\xa6\0\0\0\xbf\x07\0\0\0\0\
|
||||
\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\xd8\x11\0\0\x61\x01\0\0\0\0\0\0\xd5\x01\x02\0\
|
||||
\0\0\0\0\xbf\x19\0\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\xc5\x07\x4e\xff\0\0\0\0\x63\
|
||||
\x7a\x80\xff\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\x10\x12\0\0\x18\x61\0\0\0\0\0\
|
||||
\0\0\0\0\0\x10\x17\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\x18\x12\
|
||||
\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x08\x17\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\
|
||||
\0\0\0\0\0\0\0\x28\x14\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x50\x17\0\0\x7b\x01\0\0\
|
||||
\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\x30\x14\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\
|
||||
\x60\x17\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\xd0\x15\0\0\x18\
|
||||
\x61\0\0\0\0\0\0\0\0\0\0\x80\x17\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x78\x17\0\0\x7b\x01\0\0\0\0\0\0\x61\
|
||||
\x60\x08\0\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x18\x17\0\0\x63\x01\0\0\0\0\0\0\
|
||||
\x61\x60\x0c\0\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x1c\x17\0\0\x63\x01\0\0\0\0\
|
||||
\0\0\x79\x60\x10\0\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x20\x17\0\0\x7b\x01\0\0\
|
||||
\0\0\0\0\x61\xa0\x78\xff\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x48\x17\0\0\x63\
|
||||
\x01\0\0\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x90\x17\0\0\xb7\x02\0\0\x12\0\0\0\
|
||||
\xb7\x03\0\0\x0c\0\0\0\xb7\x04\0\0\0\0\0\0\x85\0\0\0\xa7\0\0\0\xbf\x07\0\0\0\0\
|
||||
\0\0\xc5\x07\x17\xff\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\0\x17\0\0\x63\x70\x6c\
|
||||
\0\0\0\0\0\x77\x07\0\0\x20\0\0\0\x63\x70\x70\0\0\0\0\0\xb7\x01\0\0\x05\0\0\0\
|
||||
\x18\x62\0\0\0\0\0\0\0\0\0\0\0\x17\0\0\xb7\x03\0\0\x8c\0\0\0\x85\0\0\0\xa6\0\0\
|
||||
\0\xbf\x07\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\x70\x17\0\0\x61\x01\0\0\0\0\
|
||||
\0\0\xd5\x01\x02\0\0\0\0\0\xbf\x19\0\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\xc5\x07\x05\
|
||||
\xff\0\0\0\0\x63\x7a\x84\xff\0\0\0\0\x61\xa1\x78\xff\0\0\0\0\xd5\x01\x02\0\0\0\
|
||||
\0\0\xbf\x19\0\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\x61\xa0\x80\xff\0\0\0\0\x63\x06\
|
||||
\x28\0\0\0\0\0\x61\xa0\x84\xff\0\0\0\0\x63\x06\x2c\0\0\0\0\0\x18\x61\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\x61\x10\0\0\0\0\0\0\x63\x06\x18\0\0\0\0\0\xb7\0\0\0\0\0\0\0\
|
||||
\x95\0\0\0\0\0\0\0";
|
||||
err = bpf_load_and_run(&opts);
|
||||
if (err < 0)
|
||||
return err;
|
||||
skel->rodata =
|
||||
mmap(skel->rodata, 4096, PROT_READ, MAP_SHARED | MAP_FIXED,
|
||||
skel->maps.rodata.map_fd, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct iterators_bpf *
|
||||
iterators_bpf__open_and_load(void)
|
||||
{
|
||||
struct iterators_bpf *skel;
|
||||
|
||||
skel = iterators_bpf__open();
|
||||
if (!skel)
|
||||
return NULL;
|
||||
if (iterators_bpf__load(skel)) {
|
||||
iterators_bpf__destroy(skel);
|
||||
return NULL;
|
||||
}
|
||||
return skel;
|
||||
}
|
||||
|
||||
#endif /* __ITERATORS_BPF_SKEL_H__ */
|
|
@ -1,412 +0,0 @@
|
|||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
|
||||
/* THIS FILE IS AUTOGENERATED! */
|
||||
#ifndef __ITERATORS_BPF_SKEL_H__
|
||||
#define __ITERATORS_BPF_SKEL_H__
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <bpf/libbpf.h>
|
||||
|
||||
struct iterators_bpf {
|
||||
struct bpf_object_skeleton *skeleton;
|
||||
struct bpf_object *obj;
|
||||
struct {
|
||||
struct bpf_map *rodata;
|
||||
} maps;
|
||||
struct {
|
||||
struct bpf_program *dump_bpf_map;
|
||||
struct bpf_program *dump_bpf_prog;
|
||||
} progs;
|
||||
struct {
|
||||
struct bpf_link *dump_bpf_map;
|
||||
struct bpf_link *dump_bpf_prog;
|
||||
} links;
|
||||
struct iterators_bpf__rodata {
|
||||
char dump_bpf_map____fmt[35];
|
||||
char dump_bpf_map____fmt_1[14];
|
||||
char dump_bpf_prog____fmt[32];
|
||||
char dump_bpf_prog____fmt_2[17];
|
||||
} *rodata;
|
||||
};
|
||||
|
||||
static void
|
||||
iterators_bpf__destroy(struct iterators_bpf *obj)
|
||||
{
|
||||
if (!obj)
|
||||
return;
|
||||
if (obj->skeleton)
|
||||
bpf_object__destroy_skeleton(obj->skeleton);
|
||||
free(obj);
|
||||
}
|
||||
|
||||
static inline int
|
||||
iterators_bpf__create_skeleton(struct iterators_bpf *obj);
|
||||
|
||||
static inline struct iterators_bpf *
|
||||
iterators_bpf__open_opts(const struct bpf_object_open_opts *opts)
|
||||
{
|
||||
struct iterators_bpf *obj;
|
||||
|
||||
obj = (struct iterators_bpf *)calloc(1, sizeof(*obj));
|
||||
if (!obj)
|
||||
return NULL;
|
||||
if (iterators_bpf__create_skeleton(obj))
|
||||
goto err;
|
||||
if (bpf_object__open_skeleton(obj->skeleton, opts))
|
||||
goto err;
|
||||
|
||||
return obj;
|
||||
err:
|
||||
iterators_bpf__destroy(obj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct iterators_bpf *
|
||||
iterators_bpf__open(void)
|
||||
{
|
||||
return iterators_bpf__open_opts(NULL);
|
||||
}
|
||||
|
||||
static inline int
|
||||
iterators_bpf__load(struct iterators_bpf *obj)
|
||||
{
|
||||
return bpf_object__load_skeleton(obj->skeleton);
|
||||
}
|
||||
|
||||
static inline struct iterators_bpf *
|
||||
iterators_bpf__open_and_load(void)
|
||||
{
|
||||
struct iterators_bpf *obj;
|
||||
|
||||
obj = iterators_bpf__open();
|
||||
if (!obj)
|
||||
return NULL;
|
||||
if (iterators_bpf__load(obj)) {
|
||||
iterators_bpf__destroy(obj);
|
||||
return NULL;
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
static inline int
|
||||
iterators_bpf__attach(struct iterators_bpf *obj)
|
||||
{
|
||||
return bpf_object__attach_skeleton(obj->skeleton);
|
||||
}
|
||||
|
||||
static inline void
|
||||
iterators_bpf__detach(struct iterators_bpf *obj)
|
||||
{
|
||||
return bpf_object__detach_skeleton(obj->skeleton);
|
||||
}
|
||||
|
||||
static inline int
|
||||
iterators_bpf__create_skeleton(struct iterators_bpf *obj)
|
||||
{
|
||||
struct bpf_object_skeleton *s;
|
||||
|
||||
s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));
|
||||
if (!s)
|
||||
return -1;
|
||||
obj->skeleton = s;
|
||||
|
||||
s->sz = sizeof(*s);
|
||||
s->name = "iterators_bpf";
|
||||
s->obj = &obj->obj;
|
||||
|
||||
/* maps */
|
||||
s->map_cnt = 1;
|
||||
s->map_skel_sz = sizeof(*s->maps);
|
||||
s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);
|
||||
if (!s->maps)
|
||||
goto err;
|
||||
|
||||
s->maps[0].name = "iterator.rodata";
|
||||
s->maps[0].map = &obj->maps.rodata;
|
||||
s->maps[0].mmaped = (void **)&obj->rodata;
|
||||
|
||||
/* programs */
|
||||
s->prog_cnt = 2;
|
||||
s->prog_skel_sz = sizeof(*s->progs);
|
||||
s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);
|
||||
if (!s->progs)
|
||||
goto err;
|
||||
|
||||
s->progs[0].name = "dump_bpf_map";
|
||||
s->progs[0].prog = &obj->progs.dump_bpf_map;
|
||||
s->progs[0].link = &obj->links.dump_bpf_map;
|
||||
|
||||
s->progs[1].name = "dump_bpf_prog";
|
||||
s->progs[1].prog = &obj->progs.dump_bpf_prog;
|
||||
s->progs[1].link = &obj->links.dump_bpf_prog;
|
||||
|
||||
s->data_sz = 7176;
|
||||
s->data = (void *)"\
|
||||
\x7f\x45\x4c\x46\x02\x01\x01\0\0\0\0\0\0\0\0\0\x01\0\xf7\0\x01\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\x48\x18\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\x40\0\x0f\0\
|
||||
\x0e\0\x79\x12\0\0\0\0\0\0\x79\x26\0\0\0\0\0\0\x79\x17\x08\0\0\0\0\0\x15\x07\
|
||||
\x1a\0\0\0\0\0\x79\x21\x10\0\0\0\0\0\x55\x01\x08\0\0\0\0\0\xbf\xa4\0\0\0\0\0\0\
|
||||
\x07\x04\0\0\xe8\xff\xff\xff\xbf\x61\0\0\0\0\0\0\x18\x02\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\xb7\x03\0\0\x23\0\0\0\xb7\x05\0\0\0\0\0\0\x85\0\0\0\x7e\0\0\0\x61\x71\0\
|
||||
\0\0\0\0\0\x7b\x1a\xe8\xff\0\0\0\0\xb7\x01\0\0\x04\0\0\0\xbf\x72\0\0\0\0\0\0\
|
||||
\x0f\x12\0\0\0\0\0\0\x7b\x2a\xf0\xff\0\0\0\0\x61\x71\x14\0\0\0\0\0\x7b\x1a\xf8\
|
||||
\xff\0\0\0\0\xbf\xa4\0\0\0\0\0\0\x07\x04\0\0\xe8\xff\xff\xff\xbf\x61\0\0\0\0\0\
|
||||
\0\x18\x02\0\0\x23\0\0\0\0\0\0\0\0\0\0\0\xb7\x03\0\0\x0e\0\0\0\xb7\x05\0\0\x18\
|
||||
\0\0\0\x85\0\0\0\x7e\0\0\0\xb7\0\0\0\0\0\0\0\x95\0\0\0\0\0\0\0\x79\x12\0\0\0\0\
|
||||
\0\0\x79\x26\0\0\0\0\0\0\x79\x11\x08\0\0\0\0\0\x15\x01\x3b\0\0\0\0\0\x79\x17\0\
|
||||
\0\0\0\0\0\x79\x21\x10\0\0\0\0\0\x55\x01\x08\0\0\0\0\0\xbf\xa4\0\0\0\0\0\0\x07\
|
||||
\x04\0\0\xd0\xff\xff\xff\xbf\x61\0\0\0\0\0\0\x18\x02\0\0\x31\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\xb7\x03\0\0\x20\0\0\0\xb7\x05\0\0\0\0\0\0\x85\0\0\0\x7e\0\0\0\x7b\x6a\xc8\
|
||||
\xff\0\0\0\0\x61\x71\0\0\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\xb7\x03\0\0\x04\0\0\0\
|
||||
\xbf\x79\0\0\0\0\0\0\x0f\x39\0\0\0\0\0\0\x79\x71\x28\0\0\0\0\0\x79\x78\x30\0\0\
|
||||
\0\0\0\x15\x08\x18\0\0\0\0\0\xb7\x02\0\0\0\0\0\0\x0f\x21\0\0\0\0\0\0\x61\x11\
|
||||
\x04\0\0\0\0\0\x79\x83\x08\0\0\0\0\0\x67\x01\0\0\x03\0\0\0\x0f\x13\0\0\0\0\0\0\
|
||||
\x79\x86\0\0\0\0\0\0\xbf\xa1\0\0\0\0\0\0\x07\x01\0\0\xf8\xff\xff\xff\xb7\x02\0\
|
||||
\0\x08\0\0\0\x85\0\0\0\x71\0\0\0\xb7\x01\0\0\0\0\0\0\x79\xa3\xf8\xff\0\0\0\0\
|
||||
\x0f\x13\0\0\0\0\0\0\xbf\xa1\0\0\0\0\0\0\x07\x01\0\0\xf4\xff\xff\xff\xb7\x02\0\
|
||||
\0\x04\0\0\0\x85\0\0\0\x71\0\0\0\xb7\x03\0\0\x04\0\0\0\x61\xa1\xf4\xff\0\0\0\0\
|
||||
\x61\x82\x10\0\0\0\0\0\x3d\x21\x02\0\0\0\0\0\x0f\x16\0\0\0\0\0\0\xbf\x69\0\0\0\
|
||||
\0\0\0\x7b\x9a\xd8\xff\0\0\0\0\x79\x71\x18\0\0\0\0\0\x7b\x1a\xe0\xff\0\0\0\0\
|
||||
\x79\x71\x20\0\0\0\0\0\x79\x11\0\0\0\0\0\0\x0f\x31\0\0\0\0\0\0\x7b\x1a\xe8\xff\
|
||||
\0\0\0\0\xbf\xa4\0\0\0\0\0\0\x07\x04\0\0\xd0\xff\xff\xff\x79\xa1\xc8\xff\0\0\0\
|
||||
\0\x18\x02\0\0\x51\0\0\0\0\0\0\0\0\0\0\0\xb7\x03\0\0\x11\0\0\0\xb7\x05\0\0\x20\
|
||||
\0\0\0\x85\0\0\0\x7e\0\0\0\xb7\0\0\0\0\0\0\0\x95\0\0\0\0\0\0\0\x20\x20\x69\x64\
|
||||
\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\
|
||||
\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\x0a\0\x25\x34\x75\x20\x25\x2d\x31\x36\
|
||||
\x73\x25\x36\x64\x0a\0\x20\x20\x69\x64\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\
|
||||
\x20\x20\x20\x20\x20\x20\x20\x20\x61\x74\x74\x61\x63\x68\x65\x64\x0a\0\x25\x34\
|
||||
\x75\x20\x25\x2d\x31\x36\x73\x20\x25\x73\x20\x25\x73\x0a\0\x47\x50\x4c\0\x9f\
|
||||
\xeb\x01\0\x18\0\0\0\0\0\0\0\x1c\x04\0\0\x1c\x04\0\0\x09\x05\0\0\0\0\0\0\0\0\0\
|
||||
\x02\x02\0\0\0\x01\0\0\0\x02\0\0\x04\x10\0\0\0\x13\0\0\0\x03\0\0\0\0\0\0\0\x18\
|
||||
\0\0\0\x04\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\x02\x08\0\0\0\0\0\0\0\0\0\0\x02\x0d\0\
|
||||
\0\0\0\0\0\0\x01\0\0\x0d\x06\0\0\0\x1c\0\0\0\x01\0\0\0\x20\0\0\0\0\0\0\x01\x04\
|
||||
\0\0\0\x20\0\0\x01\x24\0\0\0\x01\0\0\x0c\x05\0\0\0\xaf\0\0\0\x03\0\0\x04\x18\0\
|
||||
\0\0\xbd\0\0\0\x09\0\0\0\0\0\0\0\xc1\0\0\0\x0b\0\0\0\x40\0\0\0\xcc\0\0\0\x0b\0\
|
||||
\0\0\x80\0\0\0\0\0\0\0\0\0\0\x02\x0a\0\0\0\xd4\0\0\0\0\0\0\x07\0\0\0\0\xdd\0\0\
|
||||
\0\0\0\0\x08\x0c\0\0\0\xe3\0\0\0\0\0\0\x01\x08\0\0\0\x40\0\0\0\xa4\x01\0\0\x03\
|
||||
\0\0\x04\x18\0\0\0\xac\x01\0\0\x0e\0\0\0\0\0\0\0\xaf\x01\0\0\x11\0\0\0\x20\0\0\
|
||||
\0\xb4\x01\0\0\x0e\0\0\0\xa0\0\0\0\xc0\x01\0\0\0\0\0\x08\x0f\0\0\0\xc6\x01\0\0\
|
||||
\0\0\0\x01\x04\0\0\0\x20\0\0\0\xd3\x01\0\0\0\0\0\x01\x01\0\0\0\x08\0\0\x01\0\0\
|
||||
\0\0\0\0\0\x03\0\0\0\0\x10\0\0\0\x12\0\0\0\x10\0\0\0\xd8\x01\0\0\0\0\0\x01\x04\
|
||||
\0\0\0\x20\0\0\0\0\0\0\0\0\0\0\x02\x14\0\0\0\x3c\x02\0\0\x02\0\0\x04\x10\0\0\0\
|
||||
\x13\0\0\0\x03\0\0\0\0\0\0\0\x4f\x02\0\0\x15\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\x02\
|
||||
\x18\0\0\0\0\0\0\0\x01\0\0\x0d\x06\0\0\0\x1c\0\0\0\x13\0\0\0\x54\x02\0\0\x01\0\
|
||||
\0\x0c\x16\0\0\0\xa0\x02\0\0\x01\0\0\x04\x08\0\0\0\xa9\x02\0\0\x19\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\x02\x1a\0\0\0\xfa\x02\0\0\x06\0\0\x04\x38\0\0\0\xac\x01\0\0\
|
||||
\x0e\0\0\0\0\0\0\0\xaf\x01\0\0\x11\0\0\0\x20\0\0\0\x07\x03\0\0\x1b\0\0\0\xc0\0\
|
||||
\0\0\x18\x03\0\0\x15\0\0\0\0\x01\0\0\x21\x03\0\0\x1d\0\0\0\x40\x01\0\0\x2b\x03\
|
||||
\0\0\x1e\0\0\0\x80\x01\0\0\0\0\0\0\0\0\0\x02\x1c\0\0\0\0\0\0\0\0\0\0\x0a\x10\0\
|
||||
\0\0\0\0\0\0\0\0\0\x02\x1f\0\0\0\0\0\0\0\0\0\0\x02\x20\0\0\0\x75\x03\0\0\x02\0\
|
||||
\0\x04\x08\0\0\0\x83\x03\0\0\x0e\0\0\0\0\0\0\0\x8c\x03\0\0\x0e\0\0\0\x20\0\0\0\
|
||||
\x2b\x03\0\0\x03\0\0\x04\x18\0\0\0\x96\x03\0\0\x1b\0\0\0\0\0\0\0\x9e\x03\0\0\
|
||||
\x21\0\0\0\x40\0\0\0\xa4\x03\0\0\x23\0\0\0\x80\0\0\0\0\0\0\0\0\0\0\x02\x22\0\0\
|
||||
\0\0\0\0\0\0\0\0\x02\x24\0\0\0\xa8\x03\0\0\x01\0\0\x04\x04\0\0\0\xb3\x03\0\0\
|
||||
\x0e\0\0\0\0\0\0\0\x1c\x04\0\0\x01\0\0\x04\x04\0\0\0\x25\x04\0\0\x0e\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x1c\0\0\0\x12\0\0\0\x23\0\0\0\x9b\x04\0\0\0\0\0\
|
||||
\x0e\x25\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x1c\0\0\0\x12\0\0\0\x0e\0\0\0\
|
||||
\xaf\x04\0\0\0\0\0\x0e\x27\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x1c\0\0\0\
|
||||
\x12\0\0\0\x20\0\0\0\xc5\x04\0\0\0\0\0\x0e\x29\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\
|
||||
\0\0\0\0\x1c\0\0\0\x12\0\0\0\x11\0\0\0\xda\x04\0\0\0\0\0\x0e\x2b\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\x03\0\0\0\0\x10\0\0\0\x12\0\0\0\x04\0\0\0\xf1\x04\0\0\0\0\0\x0e\
|
||||
\x2d\0\0\0\x01\0\0\0\xf9\x04\0\0\x04\0\0\x0f\0\0\0\0\x26\0\0\0\0\0\0\0\x23\0\0\
|
||||
\0\x28\0\0\0\x23\0\0\0\x0e\0\0\0\x2a\0\0\0\x31\0\0\0\x20\0\0\0\x2c\0\0\0\x51\0\
|
||||
\0\0\x11\0\0\0\x01\x05\0\0\x01\0\0\x0f\0\0\0\0\x2e\0\0\0\0\0\0\0\x04\0\0\0\0\
|
||||
\x62\x70\x66\x5f\x69\x74\x65\x72\x5f\x5f\x62\x70\x66\x5f\x6d\x61\x70\0\x6d\x65\
|
||||
\x74\x61\0\x6d\x61\x70\0\x63\x74\x78\0\x69\x6e\x74\0\x64\x75\x6d\x70\x5f\x62\
|
||||
\x70\x66\x5f\x6d\x61\x70\0\x69\x74\x65\x72\x2f\x62\x70\x66\x5f\x6d\x61\x70\0\
|
||||
\x30\x3a\x30\0\x2f\x68\x6f\x6d\x65\x2f\x61\x6c\x72\x75\x61\x2f\x62\x75\x69\x6c\
|
||||
\x64\x2f\x6c\x69\x6e\x75\x78\x2f\x6b\x65\x72\x6e\x65\x6c\x2f\x62\x70\x66\x2f\
|
||||
\x70\x72\x65\x6c\x6f\x61\x64\x2f\x69\x74\x65\x72\x61\x74\x6f\x72\x73\x2f\x69\
|
||||
\x74\x65\x72\x61\x74\x6f\x72\x73\x2e\x62\x70\x66\x2e\x63\0\x09\x73\x74\x72\x75\
|
||||
\x63\x74\x20\x73\x65\x71\x5f\x66\x69\x6c\x65\x20\x2a\x73\x65\x71\x20\x3d\x20\
|
||||
\x63\x74\x78\x2d\x3e\x6d\x65\x74\x61\x2d\x3e\x73\x65\x71\x3b\0\x62\x70\x66\x5f\
|
||||
\x69\x74\x65\x72\x5f\x6d\x65\x74\x61\0\x73\x65\x71\0\x73\x65\x73\x73\x69\x6f\
|
||||
\x6e\x5f\x69\x64\0\x73\x65\x71\x5f\x6e\x75\x6d\0\x73\x65\x71\x5f\x66\x69\x6c\
|
||||
\x65\0\x5f\x5f\x75\x36\x34\0\x6c\x6f\x6e\x67\x20\x6c\x6f\x6e\x67\x20\x75\x6e\
|
||||
\x73\x69\x67\x6e\x65\x64\x20\x69\x6e\x74\0\x30\x3a\x31\0\x09\x73\x74\x72\x75\
|
||||
\x63\x74\x20\x62\x70\x66\x5f\x6d\x61\x70\x20\x2a\x6d\x61\x70\x20\x3d\x20\x63\
|
||||
\x74\x78\x2d\x3e\x6d\x61\x70\x3b\0\x09\x69\x66\x20\x28\x21\x6d\x61\x70\x29\0\
|
||||
\x30\x3a\x32\0\x09\x5f\x5f\x75\x36\x34\x20\x73\x65\x71\x5f\x6e\x75\x6d\x20\x3d\
|
||||
\x20\x63\x74\x78\x2d\x3e\x6d\x65\x74\x61\x2d\x3e\x73\x65\x71\x5f\x6e\x75\x6d\
|
||||
\x3b\0\x09\x69\x66\x20\x28\x73\x65\x71\x5f\x6e\x75\x6d\x20\x3d\x3d\x20\x30\x29\
|
||||
\0\x09\x09\x42\x50\x46\x5f\x53\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\x65\
|
||||
\x71\x2c\x20\x22\x20\x20\x69\x64\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\x20\
|
||||
\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\x5c\
|
||||
\x6e\x22\x29\x3b\0\x62\x70\x66\x5f\x6d\x61\x70\0\x69\x64\0\x6e\x61\x6d\x65\0\
|
||||
\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\0\x5f\x5f\x75\x33\x32\0\x75\x6e\
|
||||
\x73\x69\x67\x6e\x65\x64\x20\x69\x6e\x74\0\x63\x68\x61\x72\0\x5f\x5f\x41\x52\
|
||||
\x52\x41\x59\x5f\x53\x49\x5a\x45\x5f\x54\x59\x50\x45\x5f\x5f\0\x09\x42\x50\x46\
|
||||
\x5f\x53\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\x65\x71\x2c\x20\x22\x25\
|
||||
\x34\x75\x20\x25\x2d\x31\x36\x73\x25\x36\x64\x5c\x6e\x22\x2c\x20\x6d\x61\x70\
|
||||
\x2d\x3e\x69\x64\x2c\x20\x6d\x61\x70\x2d\x3e\x6e\x61\x6d\x65\x2c\x20\x6d\x61\
|
||||
\x70\x2d\x3e\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\x29\x3b\0\x7d\0\x62\
|
||||
\x70\x66\x5f\x69\x74\x65\x72\x5f\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x70\x72\
|
||||
\x6f\x67\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x69\x74\x65\
|
||||
\x72\x2f\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x09\x73\x74\x72\x75\x63\x74\x20\x62\
|
||||
\x70\x66\x5f\x70\x72\x6f\x67\x20\x2a\x70\x72\x6f\x67\x20\x3d\x20\x63\x74\x78\
|
||||
\x2d\x3e\x70\x72\x6f\x67\x3b\0\x09\x69\x66\x20\x28\x21\x70\x72\x6f\x67\x29\0\
|
||||
\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x61\x75\x78\0\x09\x61\x75\x78\x20\x3d\x20\
|
||||
\x70\x72\x6f\x67\x2d\x3e\x61\x75\x78\x3b\0\x09\x09\x42\x50\x46\x5f\x53\x45\x51\
|
||||
\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\x65\x71\x2c\x20\x22\x20\x20\x69\x64\x20\
|
||||
\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x61\x74\
|
||||
\x74\x61\x63\x68\x65\x64\x5c\x6e\x22\x29\x3b\0\x62\x70\x66\x5f\x70\x72\x6f\x67\
|
||||
\x5f\x61\x75\x78\0\x61\x74\x74\x61\x63\x68\x5f\x66\x75\x6e\x63\x5f\x6e\x61\x6d\
|
||||
\x65\0\x64\x73\x74\x5f\x70\x72\x6f\x67\0\x66\x75\x6e\x63\x5f\x69\x6e\x66\x6f\0\
|
||||
\x62\x74\x66\0\x09\x42\x50\x46\x5f\x53\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\
|
||||
\x73\x65\x71\x2c\x20\x22\x25\x34\x75\x20\x25\x2d\x31\x36\x73\x20\x25\x73\x20\
|
||||
\x25\x73\x5c\x6e\x22\x2c\x20\x61\x75\x78\x2d\x3e\x69\x64\x2c\0\x30\x3a\x34\0\
|
||||
\x30\x3a\x35\0\x09\x69\x66\x20\x28\x21\x62\x74\x66\x29\0\x62\x70\x66\x5f\x66\
|
||||
\x75\x6e\x63\x5f\x69\x6e\x66\x6f\0\x69\x6e\x73\x6e\x5f\x6f\x66\x66\0\x74\x79\
|
||||
\x70\x65\x5f\x69\x64\0\x30\0\x73\x74\x72\x69\x6e\x67\x73\0\x74\x79\x70\x65\x73\
|
||||
\0\x68\x64\x72\0\x62\x74\x66\x5f\x68\x65\x61\x64\x65\x72\0\x73\x74\x72\x5f\x6c\
|
||||
\x65\x6e\0\x09\x74\x79\x70\x65\x73\x20\x3d\x20\x62\x74\x66\x2d\x3e\x74\x79\x70\
|
||||
\x65\x73\x3b\0\x09\x62\x70\x66\x5f\x70\x72\x6f\x62\x65\x5f\x72\x65\x61\x64\x5f\
|
||||
\x6b\x65\x72\x6e\x65\x6c\x28\x26\x74\x2c\x20\x73\x69\x7a\x65\x6f\x66\x28\x74\
|
||||
\x29\x2c\x20\x74\x79\x70\x65\x73\x20\x2b\x20\x62\x74\x66\x5f\x69\x64\x29\x3b\0\
|
||||
\x09\x73\x74\x72\x20\x3d\x20\x62\x74\x66\x2d\x3e\x73\x74\x72\x69\x6e\x67\x73\
|
||||
\x3b\0\x62\x74\x66\x5f\x74\x79\x70\x65\0\x6e\x61\x6d\x65\x5f\x6f\x66\x66\0\x09\
|
||||
\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x20\x3d\x20\x42\x50\x46\x5f\x43\x4f\x52\x45\
|
||||
\x5f\x52\x45\x41\x44\x28\x74\x2c\x20\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x29\x3b\0\
|
||||
\x30\x3a\x32\x3a\x30\0\x09\x69\x66\x20\x28\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x20\
|
||||
\x3e\x3d\x20\x62\x74\x66\x2d\x3e\x68\x64\x72\x2e\x73\x74\x72\x5f\x6c\x65\x6e\
|
||||
\x29\0\x09\x72\x65\x74\x75\x72\x6e\x20\x73\x74\x72\x20\x2b\x20\x6e\x61\x6d\x65\
|
||||
\x5f\x6f\x66\x66\x3b\0\x30\x3a\x33\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\
|
||||
\x61\x70\x2e\x5f\x5f\x5f\x66\x6d\x74\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\
|
||||
\x61\x70\x2e\x5f\x5f\x5f\x66\x6d\x74\x2e\x31\0\x64\x75\x6d\x70\x5f\x62\x70\x66\
|
||||
\x5f\x70\x72\x6f\x67\x2e\x5f\x5f\x5f\x66\x6d\x74\0\x64\x75\x6d\x70\x5f\x62\x70\
|
||||
\x66\x5f\x70\x72\x6f\x67\x2e\x5f\x5f\x5f\x66\x6d\x74\x2e\x32\0\x4c\x49\x43\x45\
|
||||
\x4e\x53\x45\0\x2e\x72\x6f\x64\x61\x74\x61\0\x6c\x69\x63\x65\x6e\x73\x65\0\x9f\
|
||||
\xeb\x01\0\x20\0\0\0\0\0\0\0\x24\0\0\0\x24\0\0\0\x44\x02\0\0\x68\x02\0\0\xa4\
|
||||
\x01\0\0\x08\0\0\0\x31\0\0\0\x01\0\0\0\0\0\0\0\x07\0\0\0\x62\x02\0\0\x01\0\0\0\
|
||||
\0\0\0\0\x17\0\0\0\x10\0\0\0\x31\0\0\0\x09\0\0\0\0\0\0\0\x42\0\0\0\x87\0\0\0\
|
||||
\x1e\x40\x01\0\x08\0\0\0\x42\0\0\0\x87\0\0\0\x24\x40\x01\0\x10\0\0\0\x42\0\0\0\
|
||||
\xfe\0\0\0\x1d\x48\x01\0\x18\0\0\0\x42\0\0\0\x1f\x01\0\0\x06\x50\x01\0\x20\0\0\
|
||||
\0\x42\0\0\0\x2e\x01\0\0\x1d\x44\x01\0\x28\0\0\0\x42\0\0\0\x53\x01\0\0\x06\x5c\
|
||||
\x01\0\x38\0\0\0\x42\0\0\0\x66\x01\0\0\x03\x60\x01\0\x70\0\0\0\x42\0\0\0\xec\
|
||||
\x01\0\0\x02\x68\x01\0\xf0\0\0\0\x42\0\0\0\x3a\x02\0\0\x01\x70\x01\0\x62\x02\0\
|
||||
\0\x1a\0\0\0\0\0\0\0\x42\0\0\0\x87\0\0\0\x1e\x84\x01\0\x08\0\0\0\x42\0\0\0\x87\
|
||||
\0\0\0\x24\x84\x01\0\x10\0\0\0\x42\0\0\0\x70\x02\0\0\x1f\x8c\x01\0\x18\0\0\0\
|
||||
\x42\0\0\0\x94\x02\0\0\x06\x98\x01\0\x20\0\0\0\x42\0\0\0\xad\x02\0\0\x0e\xa4\
|
||||
\x01\0\x28\0\0\0\x42\0\0\0\x2e\x01\0\0\x1d\x88\x01\0\x30\0\0\0\x42\0\0\0\x53\
|
||||
\x01\0\0\x06\xa8\x01\0\x40\0\0\0\x42\0\0\0\xbf\x02\0\0\x03\xac\x01\0\x80\0\0\0\
|
||||
\x42\0\0\0\x2f\x03\0\0\x02\xb4\x01\0\xb8\0\0\0\x42\0\0\0\x6a\x03\0\0\x06\x08\
|
||||
\x01\0\xd0\0\0\0\x42\0\0\0\0\0\0\0\0\0\0\0\xd8\0\0\0\x42\0\0\0\xbb\x03\0\0\x0f\
|
||||
\x14\x01\0\xe0\0\0\0\x42\0\0\0\xd0\x03\0\0\x2d\x18\x01\0\xf0\0\0\0\x42\0\0\0\
|
||||
\x07\x04\0\0\x0d\x10\x01\0\0\x01\0\0\x42\0\0\0\0\0\0\0\0\0\0\0\x08\x01\0\0\x42\
|
||||
\0\0\0\xd0\x03\0\0\x02\x18\x01\0\x20\x01\0\0\x42\0\0\0\x2e\x04\0\0\x0d\x1c\x01\
|
||||
\0\x38\x01\0\0\x42\0\0\0\0\0\0\0\0\0\0\0\x40\x01\0\0\x42\0\0\0\x2e\x04\0\0\x0d\
|
||||
\x1c\x01\0\x58\x01\0\0\x42\0\0\0\x2e\x04\0\0\x0d\x1c\x01\0\x60\x01\0\0\x42\0\0\
|
||||
\0\x5c\x04\0\0\x1b\x20\x01\0\x68\x01\0\0\x42\0\0\0\x5c\x04\0\0\x06\x20\x01\0\
|
||||
\x70\x01\0\0\x42\0\0\0\x7f\x04\0\0\x0d\x28\x01\0\x78\x01\0\0\x42\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\x80\x01\0\0\x42\0\0\0\x2f\x03\0\0\x02\xb4\x01\0\xf8\x01\0\0\x42\0\0\0\
|
||||
\x3a\x02\0\0\x01\xc4\x01\0\x10\0\0\0\x31\0\0\0\x07\0\0\0\0\0\0\0\x02\0\0\0\x3e\
|
||||
\0\0\0\0\0\0\0\x08\0\0\0\x08\0\0\0\x3e\0\0\0\0\0\0\0\x10\0\0\0\x02\0\0\0\xfa\0\
|
||||
\0\0\0\0\0\0\x20\0\0\0\x08\0\0\0\x2a\x01\0\0\0\0\0\0\x70\0\0\0\x0d\0\0\0\x3e\0\
|
||||
\0\0\0\0\0\0\x80\0\0\0\x0d\0\0\0\xfa\0\0\0\0\0\0\0\xa0\0\0\0\x0d\0\0\0\x2a\x01\
|
||||
\0\0\0\0\0\0\x62\x02\0\0\x12\0\0\0\0\0\0\0\x14\0\0\0\x3e\0\0\0\0\0\0\0\x08\0\0\
|
||||
\0\x08\0\0\0\x3e\0\0\0\0\0\0\0\x10\0\0\0\x14\0\0\0\xfa\0\0\0\0\0\0\0\x20\0\0\0\
|
||||
\x18\0\0\0\x3e\0\0\0\0\0\0\0\x28\0\0\0\x08\0\0\0\x2a\x01\0\0\0\0\0\0\x80\0\0\0\
|
||||
\x1a\0\0\0\x3e\0\0\0\0\0\0\0\x90\0\0\0\x1a\0\0\0\xfa\0\0\0\0\0\0\0\xa8\0\0\0\
|
||||
\x1a\0\0\0\x62\x03\0\0\0\0\0\0\xb0\0\0\0\x1a\0\0\0\x66\x03\0\0\0\0\0\0\xc0\0\0\
|
||||
\0\x1f\0\0\0\x94\x03\0\0\0\0\0\0\xd8\0\0\0\x20\0\0\0\xfa\0\0\0\0\0\0\0\xf0\0\0\
|
||||
\0\x20\0\0\0\x3e\0\0\0\0\0\0\0\x18\x01\0\0\x24\0\0\0\x3e\0\0\0\0\0\0\0\x50\x01\
|
||||
\0\0\x1a\0\0\0\xfa\0\0\0\0\0\0\0\x60\x01\0\0\x20\0\0\0\x56\x04\0\0\0\0\0\0\x88\
|
||||
\x01\0\0\x1a\0\0\0\x2a\x01\0\0\0\0\0\0\x98\x01\0\0\x1a\0\0\0\x97\x04\0\0\0\0\0\
|
||||
\0\xa0\x01\0\0\x18\0\0\0\x3e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\x91\0\0\0\x04\0\xf1\xff\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xe6\0\0\
|
||||
\0\0\0\x02\0\x70\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xd8\0\0\0\0\0\x02\0\xf0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\xdf\0\0\0\0\0\x03\0\x78\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\xd1\0\0\0\0\0\x03\0\x80\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xca\0\0\0\0\0\x03\0\
|
||||
\xf8\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x14\0\0\0\x01\0\x04\0\0\0\0\0\0\0\0\0\x23\
|
||||
\0\0\0\0\0\0\0\x04\x01\0\0\x01\0\x04\0\x23\0\0\0\0\0\0\0\x0e\0\0\0\0\0\0\0\x28\
|
||||
\0\0\0\x01\0\x04\0\x31\0\0\0\0\0\0\0\x20\0\0\0\0\0\0\0\xed\0\0\0\x01\0\x04\0\
|
||||
\x51\0\0\0\0\0\0\0\x11\0\0\0\0\0\0\0\0\0\0\0\x03\0\x02\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\x03\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\0\
|
||||
\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xc2\0\0\0\x11\0\x05\0\0\0\0\0\0\0\0\0\
|
||||
\x04\0\0\0\0\0\0\0\x3d\0\0\0\x12\0\x02\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\x5b\
|
||||
\0\0\0\x12\0\x03\0\0\0\0\0\0\0\0\0\x08\x02\0\0\0\0\0\0\x48\0\0\0\0\0\0\0\x01\0\
|
||||
\0\0\x0d\0\0\0\xc8\0\0\0\0\0\0\0\x01\0\0\0\x0d\0\0\0\x50\0\0\0\0\0\0\0\x01\0\0\
|
||||
\0\x0d\0\0\0\xd0\x01\0\0\0\0\0\0\x01\0\0\0\x0d\0\0\0\xf0\x03\0\0\0\0\0\0\x0a\0\
|
||||
\0\0\x0d\0\0\0\xfc\x03\0\0\0\0\0\0\x0a\0\0\0\x0d\0\0\0\x08\x04\0\0\0\0\0\0\x0a\
|
||||
\0\0\0\x0d\0\0\0\x14\x04\0\0\0\0\0\0\x0a\0\0\0\x0d\0\0\0\x2c\x04\0\0\0\0\0\0\0\
|
||||
\0\0\0\x0e\0\0\0\x2c\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\x3c\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\x50\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\x60\0\0\0\0\0\0\0\0\0\0\0\x0b\0\
|
||||
\0\0\x70\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\x80\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\
|
||||
\x90\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xa0\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xb0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xc0\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xd0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\x0b\0\0\0\xe8\0\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\xf8\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\x0c\0\0\0\x08\x01\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x18\x01\0\0\0\0\0\0\0\
|
||||
\0\0\0\x0c\0\0\0\x28\x01\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x38\x01\0\0\0\0\0\0\0\0\
|
||||
\0\0\x0c\0\0\0\x48\x01\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x58\x01\0\0\0\0\0\0\0\0\0\
|
||||
\0\x0c\0\0\0\x68\x01\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x78\x01\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\x88\x01\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x98\x01\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\xa8\x01\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\xb8\x01\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\xc8\x01\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\xd8\x01\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\xe8\x01\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\xf8\x01\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\x08\x02\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x18\x02\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\x28\x02\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x38\x02\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\x48\x02\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x58\x02\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\x68\x02\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x78\x02\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\x94\x02\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xa4\x02\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0b\0\0\0\xb4\x02\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xc4\x02\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0b\0\0\0\xd4\x02\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xe4\x02\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0b\0\0\0\xf4\x02\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\x0c\x03\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\x1c\x03\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x2c\x03\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\x3c\x03\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x4c\x03\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\x5c\x03\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x6c\x03\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\x7c\x03\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x8c\x03\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\x9c\x03\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\xac\x03\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\xbc\x03\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\xcc\x03\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\xdc\x03\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\xec\x03\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\xfc\x03\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x0c\x04\0\0\0\0\0\0\0\0\0\0\
|
||||
\x0c\0\0\0\x1c\x04\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x4d\x4e\x40\x41\x42\x43\x4c\0\
|
||||
\x2e\x74\x65\x78\x74\0\x2e\x72\x65\x6c\x2e\x42\x54\x46\x2e\x65\x78\x74\0\x64\
|
||||
\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\x61\x70\x2e\x5f\x5f\x5f\x66\x6d\x74\0\x64\
|
||||
\x75\x6d\x70\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\x2e\x5f\x5f\x5f\x66\x6d\x74\0\
|
||||
\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\x61\x70\0\x2e\x72\x65\x6c\x69\x74\x65\
|
||||
\x72\x2f\x62\x70\x66\x5f\x6d\x61\x70\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x70\
|
||||
\x72\x6f\x67\0\x2e\x72\x65\x6c\x69\x74\x65\x72\x2f\x62\x70\x66\x5f\x70\x72\x6f\
|
||||
\x67\0\x2e\x6c\x6c\x76\x6d\x5f\x61\x64\x64\x72\x73\x69\x67\0\x6c\x69\x63\x65\
|
||||
\x6e\x73\x65\0\x69\x74\x65\x72\x61\x74\x6f\x72\x73\x2e\x62\x70\x66\x2e\x63\0\
|
||||
\x2e\x73\x74\x72\x74\x61\x62\0\x2e\x73\x79\x6d\x74\x61\x62\0\x2e\x72\x6f\x64\
|
||||
\x61\x74\x61\0\x2e\x72\x65\x6c\x2e\x42\x54\x46\0\x4c\x49\x43\x45\x4e\x53\x45\0\
|
||||
\x4c\x42\x42\x31\x5f\x37\0\x4c\x42\x42\x31\x5f\x36\0\x4c\x42\x42\x30\x5f\x34\0\
|
||||
\x4c\x42\x42\x31\x5f\x33\0\x4c\x42\x42\x30\x5f\x33\0\x64\x75\x6d\x70\x5f\x62\
|
||||
\x70\x66\x5f\x70\x72\x6f\x67\x2e\x5f\x5f\x5f\x66\x6d\x74\x2e\x32\0\x64\x75\x6d\
|
||||
\x70\x5f\x62\x70\x66\x5f\x6d\x61\x70\x2e\x5f\x5f\x5f\x66\x6d\x74\x2e\x31\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\x01\0\0\
|
||||
\0\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x4e\0\0\0\x01\0\0\0\x06\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\x6d\0\0\0\x01\0\0\0\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\x40\x01\0\0\0\0\0\0\x08\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\xb1\0\0\0\x01\0\0\0\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x48\x03\0\
|
||||
\0\0\0\0\0\x62\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\x89\0\0\0\x01\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xaa\x03\0\0\0\0\0\0\x04\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xbd\0\0\0\x01\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xae\x03\0\0\0\0\0\0\x3d\x09\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\x01\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\xeb\x0c\0\0\0\0\0\0\x2c\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xa9\0\0\0\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\x18\x11\0\0\0\0\0\0\x98\x01\0\0\0\0\0\0\x0e\0\0\0\x0e\0\0\0\x08\0\0\
|
||||
\0\0\0\0\0\x18\0\0\0\0\0\0\0\x4a\0\0\0\x09\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
|
||||
\0\xb0\x12\0\0\0\0\0\0\x20\0\0\0\0\0\0\0\x08\0\0\0\x02\0\0\0\x08\0\0\0\0\0\0\0\
|
||||
\x10\0\0\0\0\0\0\0\x69\0\0\0\x09\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xd0\x12\
|
||||
\0\0\0\0\0\0\x20\0\0\0\0\0\0\0\x08\0\0\0\x03\0\0\0\x08\0\0\0\0\0\0\0\x10\0\0\0\
|
||||
\0\0\0\0\xb9\0\0\0\x09\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xf0\x12\0\0\0\0\0\
|
||||
\0\x50\0\0\0\0\0\0\0\x08\0\0\0\x06\0\0\0\x08\0\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\
|
||||
\x07\0\0\0\x09\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x40\x13\0\0\0\0\0\0\xe0\
|
||||
\x03\0\0\0\0\0\0\x08\0\0\0\x07\0\0\0\x08\0\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\x7b\0\
|
||||
\0\0\x03\x4c\xff\x6f\0\0\0\x80\0\0\0\0\0\0\0\0\0\0\0\0\x20\x17\0\0\0\0\0\0\x07\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xa1\0\0\0\x03\
|
||||
\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x27\x17\0\0\0\0\0\0\x1a\x01\0\0\0\0\0\0\
|
||||
\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
|
||||
|
||||
return 0;
|
||||
err:
|
||||
bpf_object__destroy_skeleton(s);
|
||||
return -1;
|
||||
}
|
||||
|
||||
#endif /* __ITERATORS_BPF_SKEL_H__ */
|
|
@ -213,7 +213,7 @@ static void __bpf_tramp_image_put_deferred(struct work_struct *work)
|
|||
im = container_of(work, struct bpf_tramp_image, work);
|
||||
bpf_image_ksym_del(&im->ksym);
|
||||
bpf_jit_free_exec(im->image);
|
||||
bpf_jit_uncharge_modmem(1);
|
||||
bpf_jit_uncharge_modmem(PAGE_SIZE);
|
||||
percpu_ref_exit(&im->pcref);
|
||||
kfree_rcu(im, rcu);
|
||||
}
|
||||
|
@ -310,7 +310,7 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
|
|||
if (!im)
|
||||
goto out;
|
||||
|
||||
err = bpf_jit_charge_modmem(1);
|
||||
err = bpf_jit_charge_modmem(PAGE_SIZE);
|
||||
if (err)
|
||||
goto out_free_im;
|
||||
|
||||
|
@ -332,7 +332,7 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
|
|||
out_free_image:
|
||||
bpf_jit_free_exec(im->image);
|
||||
out_uncharge:
|
||||
bpf_jit_uncharge_modmem(1);
|
||||
bpf_jit_uncharge_modmem(PAGE_SIZE);
|
||||
out_free_im:
|
||||
kfree(im);
|
||||
out:
|
||||
|
|
|
@ -536,7 +536,7 @@ static bool is_cmpxchg_insn(const struct bpf_insn *insn)
|
|||
static const char *reg_type_str(struct bpf_verifier_env *env,
|
||||
enum bpf_reg_type type)
|
||||
{
|
||||
char postfix[16] = {0}, prefix[16] = {0};
|
||||
char postfix[16] = {0}, prefix[32] = {0};
|
||||
static const char * const str[] = {
|
||||
[NOT_INIT] = "?",
|
||||
[SCALAR_VALUE] = "inv",
|
||||
|
@ -570,9 +570,11 @@ static const char *reg_type_str(struct bpf_verifier_env *env,
|
|||
}
|
||||
|
||||
if (type & MEM_RDONLY)
|
||||
strncpy(prefix, "rdonly_", 16);
|
||||
strncpy(prefix, "rdonly_", 32);
|
||||
if (type & MEM_ALLOC)
|
||||
strncpy(prefix, "alloc_", 16);
|
||||
strncpy(prefix, "alloc_", 32);
|
||||
if (type & MEM_USER)
|
||||
strncpy(prefix, "user_", 32);
|
||||
|
||||
snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
|
||||
prefix, str[base_type(type)], postfix);
|
||||
|
@ -1547,14 +1549,15 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
|
|||
static void mark_btf_ld_reg(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *regs, u32 regno,
|
||||
enum bpf_reg_type reg_type,
|
||||
struct btf *btf, u32 btf_id)
|
||||
struct btf *btf, u32 btf_id,
|
||||
enum bpf_type_flag flag)
|
||||
{
|
||||
if (reg_type == SCALAR_VALUE) {
|
||||
mark_reg_unknown(env, regs, regno);
|
||||
return;
|
||||
}
|
||||
mark_reg_known_zero(env, regs, regno);
|
||||
regs[regno].type = PTR_TO_BTF_ID;
|
||||
regs[regno].type = PTR_TO_BTF_ID | flag;
|
||||
regs[regno].btf = btf;
|
||||
regs[regno].btf_id = btf_id;
|
||||
}
|
||||
|
@ -4152,6 +4155,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
|
|||
struct bpf_reg_state *reg = regs + regno;
|
||||
const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
|
||||
const char *tname = btf_name_by_offset(reg->btf, t->name_off);
|
||||
enum bpf_type_flag flag = 0;
|
||||
u32 btf_id;
|
||||
int ret;
|
||||
|
||||
|
@ -4171,9 +4175,16 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
|
|||
return -EACCES;
|
||||
}
|
||||
|
||||
if (reg->type & MEM_USER) {
|
||||
verbose(env,
|
||||
"R%d is ptr_%s access user memory: off=%d\n",
|
||||
regno, tname, off);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (env->ops->btf_struct_access) {
|
||||
ret = env->ops->btf_struct_access(&env->log, reg->btf, t,
|
||||
off, size, atype, &btf_id);
|
||||
off, size, atype, &btf_id, &flag);
|
||||
} else {
|
||||
if (atype != BPF_READ) {
|
||||
verbose(env, "only read is supported\n");
|
||||
|
@ -4181,14 +4192,14 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
|
|||
}
|
||||
|
||||
ret = btf_struct_access(&env->log, reg->btf, t, off, size,
|
||||
atype, &btf_id);
|
||||
atype, &btf_id, &flag);
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (atype == BPF_READ && value_regno >= 0)
|
||||
mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id);
|
||||
mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4201,6 +4212,7 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
|
|||
{
|
||||
struct bpf_reg_state *reg = regs + regno;
|
||||
struct bpf_map *map = reg->map_ptr;
|
||||
enum bpf_type_flag flag = 0;
|
||||
const struct btf_type *t;
|
||||
const char *tname;
|
||||
u32 btf_id;
|
||||
|
@ -4238,12 +4250,12 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
|
|||
return -EACCES;
|
||||
}
|
||||
|
||||
ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id);
|
||||
ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id, &flag);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (value_regno >= 0)
|
||||
mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id);
|
||||
mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4444,7 +4456,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf, &btf_id);
|
||||
err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf,
|
||||
&btf_id);
|
||||
if (err)
|
||||
verbose_linfo(env, insn_idx, "; ");
|
||||
if (!err && t == BPF_READ && value_regno >= 0) {
|
||||
|
@ -13054,6 +13067,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
|||
|
||||
prog->jited = 1;
|
||||
prog->bpf_func = func[0]->bpf_func;
|
||||
prog->jited_len = func[0]->jited_len;
|
||||
prog->aux->func = func;
|
||||
prog->aux->func_cnt = env->subprog_cnt;
|
||||
bpf_prog_jit_attempt_done(prog);
|
||||
|
|
|
@ -1235,6 +1235,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|||
return &bpf_get_task_stack_proto;
|
||||
case BPF_FUNC_copy_from_user:
|
||||
return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
|
||||
case BPF_FUNC_copy_from_user_task:
|
||||
return prog->aux->sleepable ? &bpf_copy_from_user_task_proto : NULL;
|
||||
case BPF_FUNC_snprintf_btf:
|
||||
return &bpf_snprintf_btf_proto;
|
||||
case BPF_FUNC_per_cpu_ptr:
|
||||
|
|
|
@ -296,7 +296,7 @@ config DEBUG_INFO_DWARF4
|
|||
config DEBUG_INFO_DWARF5
|
||||
bool "Generate DWARF Version 5 debuginfo"
|
||||
depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
|
||||
depends on !DEBUG_INFO_BTF
|
||||
depends on !DEBUG_INFO_BTF || PAHOLE_VERSION >= 121
|
||||
help
|
||||
Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
|
||||
5.0+ accepts the -gdwarf-5 flag but only had partial support for some
|
||||
|
@ -323,7 +323,15 @@ config DEBUG_INFO_BTF
|
|||
DWARF type info into equivalent deduplicated BTF type info.
|
||||
|
||||
config PAHOLE_HAS_SPLIT_BTF
|
||||
def_bool $(success, test `$(PAHOLE) --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/'` -ge "119")
|
||||
def_bool PAHOLE_VERSION >= 119
|
||||
|
||||
config PAHOLE_HAS_BTF_TAG
|
||||
def_bool PAHOLE_VERSION >= 123
|
||||
depends on CC_IS_CLANG
|
||||
help
|
||||
Decide whether pahole emits btf_tag attributes (btf_type_tag and
|
||||
btf_decl_tag) or not. Currently only clang compiler implements
|
||||
these attributes, so make the config depend on CC_IS_CLANG.
|
||||
|
||||
config DEBUG_INFO_BTF_MODULES
|
||||
def_bool y
|
||||
|
|
|
@ -145,7 +145,8 @@ static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
|
|||
const struct btf *btf,
|
||||
const struct btf_type *t, int off,
|
||||
int size, enum bpf_access_type atype,
|
||||
u32 *next_btf_id)
|
||||
u32 *next_btf_id,
|
||||
enum bpf_type_flag *flag)
|
||||
{
|
||||
const struct btf_type *state;
|
||||
s32 type_id;
|
||||
|
@ -162,7 +163,8 @@ static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
|
|||
return -EACCES;
|
||||
}
|
||||
|
||||
err = btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
|
||||
err = btf_struct_access(log, btf, t, off, size, atype, next_btf_id,
|
||||
flag);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -154,7 +154,8 @@ static int bpf_test_finish(const union bpf_attr *kattr,
|
|||
goto out;
|
||||
|
||||
if (sinfo) {
|
||||
int i, offset = len, data_len;
|
||||
int i, offset = len;
|
||||
u32 data_len;
|
||||
|
||||
for (i = 0; i < sinfo->nr_frags; i++) {
|
||||
skb_frag_t *frag = &sinfo->frags[i];
|
||||
|
@ -164,7 +165,7 @@ static int bpf_test_finish(const union bpf_attr *kattr,
|
|||
break;
|
||||
}
|
||||
|
||||
data_len = min_t(int, copy_size - offset,
|
||||
data_len = min_t(u32, copy_size - offset,
|
||||
skb_frag_size(frag));
|
||||
|
||||
if (copy_to_user(data_out + offset,
|
||||
|
@ -960,7 +961,12 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|||
while (size < kattr->test.data_size_in) {
|
||||
struct page *page;
|
||||
skb_frag_t *frag;
|
||||
int data_len;
|
||||
u32 data_len;
|
||||
|
||||
if (sinfo->nr_frags == MAX_SKB_FRAGS) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
if (!page) {
|
||||
|
@ -971,7 +977,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|||
frag = &sinfo->frags[sinfo->nr_frags++];
|
||||
__skb_frag_set_page(frag, page);
|
||||
|
||||
data_len = min_t(int, kattr->test.data_size_in - size,
|
||||
data_len = min_t(u32, kattr->test.data_size_in - size,
|
||||
PAGE_SIZE);
|
||||
skb_frag_size_set(frag, data_len);
|
||||
|
||||
|
@ -1141,7 +1147,7 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat
|
|||
if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
|
||||
goto out;
|
||||
|
||||
if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) {
|
||||
if (user_ctx->local_port > U16_MAX) {
|
||||
ret = -ERANGE;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1149,7 +1155,7 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat
|
|||
ctx.family = (u16)user_ctx->family;
|
||||
ctx.protocol = (u16)user_ctx->protocol;
|
||||
ctx.dport = (u16)user_ctx->local_port;
|
||||
ctx.sport = (__force __be16)user_ctx->remote_port;
|
||||
ctx.sport = user_ctx->remote_port;
|
||||
|
||||
switch (ctx.family) {
|
||||
case AF_INET:
|
||||
|
|
|
@ -8275,6 +8275,7 @@ bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
|
|||
struct bpf_insn_access_aux *info)
|
||||
{
|
||||
const int size_default = sizeof(__u32);
|
||||
int field_size;
|
||||
|
||||
if (off < 0 || off >= sizeof(struct bpf_sock))
|
||||
return false;
|
||||
|
@ -8286,7 +8287,6 @@ bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
|
|||
case offsetof(struct bpf_sock, family):
|
||||
case offsetof(struct bpf_sock, type):
|
||||
case offsetof(struct bpf_sock, protocol):
|
||||
case offsetof(struct bpf_sock, dst_port):
|
||||
case offsetof(struct bpf_sock, src_port):
|
||||
case offsetof(struct bpf_sock, rx_queue_mapping):
|
||||
case bpf_ctx_range(struct bpf_sock, src_ip4):
|
||||
|
@ -8295,6 +8295,14 @@ bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
|
|||
case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
|
||||
bpf_ctx_record_field_size(info, size_default);
|
||||
return bpf_ctx_narrow_access_ok(off, size, size_default);
|
||||
case bpf_ctx_range(struct bpf_sock, dst_port):
|
||||
field_size = size == size_default ?
|
||||
size_default : sizeof_field(struct bpf_sock, dst_port);
|
||||
bpf_ctx_record_field_size(info, field_size);
|
||||
return bpf_ctx_narrow_access_ok(off, size, field_size);
|
||||
case offsetofend(struct bpf_sock, dst_port) ...
|
||||
offsetof(struct bpf_sock, dst_ip4) - 1:
|
||||
return false;
|
||||
}
|
||||
|
||||
return size == size_default;
|
||||
|
@ -10845,7 +10853,8 @@ static bool sk_lookup_is_valid_access(int off, int size,
|
|||
case bpf_ctx_range(struct bpf_sk_lookup, local_ip4):
|
||||
case bpf_ctx_range_till(struct bpf_sk_lookup, remote_ip6[0], remote_ip6[3]):
|
||||
case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]):
|
||||
case bpf_ctx_range(struct bpf_sk_lookup, remote_port):
|
||||
case offsetof(struct bpf_sk_lookup, remote_port) ...
|
||||
offsetof(struct bpf_sk_lookup, local_ip4) - 1:
|
||||
case bpf_ctx_range(struct bpf_sk_lookup, local_port):
|
||||
case bpf_ctx_range(struct bpf_sk_lookup, ingress_ifindex):
|
||||
bpf_ctx_record_field_size(info, sizeof(__u32));
|
||||
|
|
|
@ -96,12 +96,14 @@ static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
|
|||
const struct btf *btf,
|
||||
const struct btf_type *t, int off,
|
||||
int size, enum bpf_access_type atype,
|
||||
u32 *next_btf_id)
|
||||
u32 *next_btf_id,
|
||||
enum bpf_type_flag *flag)
|
||||
{
|
||||
size_t end;
|
||||
|
||||
if (atype == BPF_READ)
|
||||
return btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
|
||||
return btf_struct_access(log, btf, t, off, size, atype, next_btf_id,
|
||||
flag);
|
||||
|
||||
if (t != tcp_sock_type) {
|
||||
bpf_log(log, "only read is supported\n");
|
||||
|
|
|
@ -343,9 +343,9 @@ out:
|
|||
}
|
||||
EXPORT_SYMBOL(xsk_tx_peek_desc);
|
||||
|
||||
static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, struct xdp_desc *descs,
|
||||
u32 max_entries)
|
||||
static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
|
||||
{
|
||||
struct xdp_desc *descs = pool->tx_descs;
|
||||
u32 nb_pkts = 0;
|
||||
|
||||
while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
|
||||
|
@ -355,8 +355,7 @@ static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, struct xdp_d
|
|||
return nb_pkts;
|
||||
}
|
||||
|
||||
u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *descs,
|
||||
u32 max_entries)
|
||||
u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
|
||||
{
|
||||
struct xdp_sock *xs;
|
||||
u32 nb_pkts;
|
||||
|
@ -365,7 +364,7 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *
|
|||
if (!list_is_singular(&pool->xsk_tx_list)) {
|
||||
/* Fallback to the non-batched version */
|
||||
rcu_read_unlock();
|
||||
return xsk_tx_peek_release_fallback(pool, descs, max_entries);
|
||||
return xsk_tx_peek_release_fallback(pool, max_entries);
|
||||
}
|
||||
|
||||
xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
|
||||
|
@ -374,7 +373,7 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *
|
|||
goto out;
|
||||
}
|
||||
|
||||
nb_pkts = xskq_cons_peek_desc_batch(xs->tx, descs, pool, max_entries);
|
||||
nb_pkts = xskq_cons_peek_desc_batch(xs->tx, pool, max_entries);
|
||||
if (!nb_pkts) {
|
||||
xs->tx->queue_empty_descs++;
|
||||
goto out;
|
||||
|
@ -386,7 +385,7 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *
|
|||
* packets. This avoids having to implement any buffering in
|
||||
* the Tx path.
|
||||
*/
|
||||
nb_pkts = xskq_prod_reserve_addr_batch(pool->cq, descs, nb_pkts);
|
||||
nb_pkts = xskq_prod_reserve_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
|
||||
if (!nb_pkts)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -37,6 +37,7 @@ void xp_destroy(struct xsk_buff_pool *pool)
|
|||
if (!pool)
|
||||
return;
|
||||
|
||||
kvfree(pool->tx_descs);
|
||||
kvfree(pool->heads);
|
||||
kvfree(pool);
|
||||
}
|
||||
|
@ -58,6 +59,12 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
|
|||
if (!pool->heads)
|
||||
goto out;
|
||||
|
||||
if (xs->tx) {
|
||||
pool->tx_descs = kcalloc(xs->tx->nentries, sizeof(*pool->tx_descs), GFP_KERNEL);
|
||||
if (!pool->tx_descs)
|
||||
goto out;
|
||||
}
|
||||
|
||||
pool->chunk_mask = ~((u64)umem->chunk_size - 1);
|
||||
pool->addrs_cnt = umem->size;
|
||||
pool->heads_cnt = umem->chunks;
|
||||
|
|
|
@ -205,11 +205,11 @@ static inline bool xskq_cons_read_desc(struct xsk_queue *q,
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q,
|
||||
struct xdp_desc *descs,
|
||||
struct xsk_buff_pool *pool, u32 max)
|
||||
static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
|
||||
u32 max)
|
||||
{
|
||||
u32 cached_cons = q->cached_cons, nb_entries = 0;
|
||||
struct xdp_desc *descs = pool->tx_descs;
|
||||
|
||||
while (cached_cons != q->cached_prod && nb_entries < max) {
|
||||
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
|
||||
|
@ -282,12 +282,12 @@ static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
|
|||
return xskq_cons_read_desc(q, desc, pool);
|
||||
}
|
||||
|
||||
static inline u32 xskq_cons_peek_desc_batch(struct xsk_queue *q, struct xdp_desc *descs,
|
||||
struct xsk_buff_pool *pool, u32 max)
|
||||
static inline u32 xskq_cons_peek_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
|
||||
u32 max)
|
||||
{
|
||||
u32 entries = xskq_cons_nb_entries(q, max);
|
||||
|
||||
return xskq_cons_read_desc_batch(q, descs, pool, entries);
|
||||
return xskq_cons_read_desc_batch(q, pool, entries);
|
||||
}
|
||||
|
||||
/* To improve performance in the xskq_cons_release functions, only update local state here.
|
||||
|
@ -304,13 +304,6 @@ static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
|
|||
q->cached_cons += cnt;
|
||||
}
|
||||
|
||||
static inline bool xskq_cons_is_full(struct xsk_queue *q)
|
||||
{
|
||||
/* No barriers needed since data is not accessed */
|
||||
return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) ==
|
||||
q->nentries;
|
||||
}
|
||||
|
||||
static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
|
||||
{
|
||||
/* No barriers needed since data is not accessed */
|
||||
|
|
|
@ -413,7 +413,7 @@ static void fixup_map(struct bpf_object *obj)
|
|||
for (i = 0; i < NR_TESTS; i++) {
|
||||
if (!strcmp(test_map_names[i], name) &&
|
||||
(check_test_flags(i))) {
|
||||
bpf_map__resize(map, num_map_entries);
|
||||
bpf_map__set_max_entries(map, num_map_entries);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -79,13 +79,11 @@ static void usage(const char *prog)
|
|||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct bpf_prog_load_attr prog_load_attr = {
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
};
|
||||
struct bpf_prog_info info = {};
|
||||
__u32 info_len = sizeof(info);
|
||||
const char *optstr = "FSN";
|
||||
int prog_fd, map_fd, opt;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
struct bpf_map *map;
|
||||
char filename[256];
|
||||
|
@ -123,11 +121,19 @@ int main(int argc, char **argv)
|
|||
}
|
||||
|
||||
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
|
||||
prog_load_attr.file = filename;
|
||||
|
||||
if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
|
||||
obj = bpf_object__open_file(filename, NULL);
|
||||
if (libbpf_get_error(obj))
|
||||
return 1;
|
||||
|
||||
prog = bpf_object__next_program(obj, NULL);
|
||||
bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
|
||||
|
||||
err = bpf_object__load(obj);
|
||||
if (err)
|
||||
return 1;
|
||||
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
|
||||
map = bpf_object__next_map(obj, NULL);
|
||||
if (!map) {
|
||||
printf("finding a map in obj file failed\n");
|
||||
|
|
|
@ -82,15 +82,13 @@ static void usage(const char *cmd)
|
|||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct bpf_prog_load_attr prog_load_attr = {
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
};
|
||||
unsigned char opt_flags[256] = {};
|
||||
const char *optstr = "i:T:P:SNFh";
|
||||
struct bpf_prog_info info = {};
|
||||
__u32 info_len = sizeof(info);
|
||||
unsigned int kill_after_s = 0;
|
||||
int i, prog_fd, map_fd, opt;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
__u32 max_pckt_size = 0;
|
||||
__u32 key = 0;
|
||||
|
@ -148,11 +146,20 @@ int main(int argc, char **argv)
|
|||
}
|
||||
|
||||
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
|
||||
prog_load_attr.file = filename;
|
||||
|
||||
if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
|
||||
obj = bpf_object__open_file(filename, NULL);
|
||||
if (libbpf_get_error(obj))
|
||||
return 1;
|
||||
|
||||
prog = bpf_object__next_program(obj, NULL);
|
||||
bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
|
||||
|
||||
err = bpf_object__load(obj);
|
||||
if (err)
|
||||
return 1;
|
||||
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
|
||||
/* static global var 'max_pcktsz' is accessible from .data section */
|
||||
if (max_pckt_size) {
|
||||
map_fd = bpf_object__find_map_fd_by_name(obj, "xdp_adju.data");
|
||||
|
|
|
@ -75,14 +75,11 @@ static void usage(const char *prog)
|
|||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct bpf_prog_load_attr prog_load_attr = {
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
};
|
||||
const char *prog_name = "xdp_fwd";
|
||||
struct bpf_program *prog = NULL;
|
||||
struct bpf_program *pos;
|
||||
const char *sec_name;
|
||||
int prog_fd, map_fd = -1;
|
||||
int prog_fd = -1, map_fd = -1;
|
||||
char filename[PATH_MAX];
|
||||
struct bpf_object *obj;
|
||||
int opt, i, idx, err;
|
||||
|
@ -119,7 +116,6 @@ int main(int argc, char **argv)
|
|||
|
||||
if (attach) {
|
||||
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
|
||||
prog_load_attr.file = filename;
|
||||
|
||||
if (access(filename, O_RDONLY) < 0) {
|
||||
printf("error accessing file %s: %s\n",
|
||||
|
@ -127,7 +123,14 @@ int main(int argc, char **argv)
|
|||
return 1;
|
||||
}
|
||||
|
||||
err = bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd);
|
||||
obj = bpf_object__open_file(filename, NULL);
|
||||
if (libbpf_get_error(obj))
|
||||
return 1;
|
||||
|
||||
prog = bpf_object__next_program(obj, NULL);
|
||||
bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
|
||||
|
||||
err = bpf_object__load(obj);
|
||||
if (err) {
|
||||
printf("Does kernel support devmap lookup?\n");
|
||||
/* If not, the error message will be:
|
||||
|
|
|
@ -491,7 +491,7 @@ int xdp_prognum5_lb_hash_ip_pairs(struct xdp_md *ctx)
|
|||
return bpf_redirect_map(&cpu_map, cpu_dest, 0);
|
||||
}
|
||||
|
||||
SEC("xdp_cpumap/redirect")
|
||||
SEC("xdp/cpumap")
|
||||
int xdp_redirect_cpu_devmap(struct xdp_md *ctx)
|
||||
{
|
||||
void *data_end = (void *)(long)ctx->data_end;
|
||||
|
@ -507,19 +507,19 @@ int xdp_redirect_cpu_devmap(struct xdp_md *ctx)
|
|||
return bpf_redirect_map(&tx_port, 0, 0);
|
||||
}
|
||||
|
||||
SEC("xdp_cpumap/pass")
|
||||
SEC("xdp/cpumap")
|
||||
int xdp_redirect_cpu_pass(struct xdp_md *ctx)
|
||||
{
|
||||
return XDP_PASS;
|
||||
}
|
||||
|
||||
SEC("xdp_cpumap/drop")
|
||||
SEC("xdp/cpumap")
|
||||
int xdp_redirect_cpu_drop(struct xdp_md *ctx)
|
||||
{
|
||||
return XDP_DROP;
|
||||
}
|
||||
|
||||
SEC("xdp_devmap/egress")
|
||||
SEC("xdp/devmap")
|
||||
int xdp_redirect_egress_prog(struct xdp_md *ctx)
|
||||
{
|
||||
void *data_end = (void *)(long)ctx->data_end;
|
||||
|
|
|
@ -70,7 +70,7 @@ static void print_avail_progs(struct bpf_object *obj)
|
|||
|
||||
printf(" Programs to be used for -p/--progname:\n");
|
||||
bpf_object__for_each_program(pos, obj) {
|
||||
if (bpf_program__is_xdp(pos)) {
|
||||
if (bpf_program__type(pos) == BPF_PROG_TYPE_XDP) {
|
||||
if (!strncmp(bpf_program__name(pos), "xdp_prognum",
|
||||
sizeof("xdp_prognum") - 1))
|
||||
printf(" %s\n", bpf_program__name(pos));
|
||||
|
|
|
@ -68,7 +68,7 @@ int xdp_redirect_map_native(struct xdp_md *ctx)
|
|||
return xdp_redirect_map(ctx, &tx_port_native);
|
||||
}
|
||||
|
||||
SEC("xdp_devmap/egress")
|
||||
SEC("xdp/devmap")
|
||||
int xdp_redirect_map_egress(struct xdp_md *ctx)
|
||||
{
|
||||
void *data_end = (void *)(long)ctx->data_end;
|
||||
|
|
|
@ -53,7 +53,7 @@ int xdp_redirect_map_native(struct xdp_md *ctx)
|
|||
return xdp_redirect_map(ctx, &forward_map_native);
|
||||
}
|
||||
|
||||
SEC("xdp_devmap/egress")
|
||||
SEC("xdp/devmap")
|
||||
int xdp_devmap_prog(struct xdp_md *ctx)
|
||||
{
|
||||
void *data_end = (void *)(long)ctx->data_end;
|
||||
|
|
|
@ -640,12 +640,10 @@ static void usage(const char *prog)
|
|||
|
||||
int main(int ac, char **argv)
|
||||
{
|
||||
struct bpf_prog_load_attr prog_load_attr = {
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
};
|
||||
struct bpf_prog_info info = {};
|
||||
__u32 info_len = sizeof(info);
|
||||
const char *optstr = "SF";
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
char filename[256];
|
||||
char **ifname_list;
|
||||
|
@ -653,7 +651,6 @@ int main(int ac, char **argv)
|
|||
int err, i = 1;
|
||||
|
||||
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
|
||||
prog_load_attr.file = filename;
|
||||
|
||||
total_ifindex = ac - 1;
|
||||
ifname_list = (argv + 1);
|
||||
|
@ -684,14 +681,20 @@ int main(int ac, char **argv)
|
|||
return 1;
|
||||
}
|
||||
|
||||
if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
|
||||
obj = bpf_object__open_file(filename, NULL);
|
||||
if (libbpf_get_error(obj))
|
||||
return 1;
|
||||
|
||||
prog = bpf_object__next_program(obj, NULL);
|
||||
bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
|
||||
|
||||
printf("\n******************loading bpf file*********************\n");
|
||||
if (!prog_fd) {
|
||||
printf("bpf_prog_load_xattr: %s\n", strerror(errno));
|
||||
err = bpf_object__load(obj);
|
||||
if (err) {
|
||||
printf("bpf_object__load(): %s\n", strerror(errno));
|
||||
return 1;
|
||||
}
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
|
||||
lpm_map_fd = bpf_object__find_map_fd_by_name(obj, "lpm_map");
|
||||
rxcnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rxcnt");
|
||||
|
|
|
@ -450,14 +450,12 @@ static void stats_poll(int interval, int action, __u32 cfg_opt)
|
|||
int main(int argc, char **argv)
|
||||
{
|
||||
__u32 cfg_options= NO_TOUCH ; /* Default: Don't touch packet memory */
|
||||
struct bpf_prog_load_attr prog_load_attr = {
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
};
|
||||
struct bpf_prog_info info = {};
|
||||
__u32 info_len = sizeof(info);
|
||||
int prog_fd, map_fd, opt, err;
|
||||
bool use_separators = true;
|
||||
struct config cfg = { 0 };
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
struct bpf_map *map;
|
||||
char filename[256];
|
||||
|
@ -471,11 +469,19 @@ int main(int argc, char **argv)
|
|||
char *action_str = NULL;
|
||||
|
||||
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
|
||||
prog_load_attr.file = filename;
|
||||
|
||||
if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
|
||||
obj = bpf_object__open_file(filename, NULL);
|
||||
if (libbpf_get_error(obj))
|
||||
return EXIT_FAIL;
|
||||
|
||||
prog = bpf_object__next_program(obj, NULL);
|
||||
bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
|
||||
|
||||
err = bpf_object__load(obj);
|
||||
if (err)
|
||||
return EXIT_FAIL;
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
|
||||
map = bpf_object__find_map_by_name(obj, "config_map");
|
||||
stats_global_map = bpf_object__find_map_by_name(obj, "stats_global_map");
|
||||
rx_queue_index_map = bpf_object__find_map_by_name(obj, "rx_queue_index_map");
|
||||
|
|
|
@ -1218,7 +1218,7 @@ int sample_setup_maps(struct bpf_map **maps)
|
|||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (bpf_map__resize(sample_map[i], sample_map_count[i]) < 0)
|
||||
if (bpf_map__set_max_entries(sample_map[i], sample_map_count[i]) < 0)
|
||||
return -errno;
|
||||
}
|
||||
sample_map[MAP_DEVMAP_XMIT_MULTI] = maps[MAP_DEVMAP_XMIT_MULTI];
|
||||
|
|
|
@ -61,7 +61,7 @@ static inline char *safe_strncpy(char *dst, const char *src, size_t size)
|
|||
|
||||
#define __attach_tp(name) \
|
||||
({ \
|
||||
if (!bpf_program__is_tracing(skel->progs.name)) \
|
||||
if (bpf_program__type(skel->progs.name) != BPF_PROG_TYPE_TRACING)\
|
||||
return -EINVAL; \
|
||||
skel->links.name = bpf_program__attach(skel->progs.name); \
|
||||
if (!skel->links.name) \
|
||||
|
|
|
@ -152,9 +152,6 @@ static int parse_ports(const char *port_str, int *min_port, int *max_port)
|
|||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct bpf_prog_load_attr prog_load_attr = {
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
};
|
||||
int min_port = 0, max_port = 0, vip2tnl_map_fd;
|
||||
const char *optstr = "i:a:p:s:d:m:T:P:FSNh";
|
||||
unsigned char opt_flags[256] = {};
|
||||
|
@ -162,6 +159,7 @@ int main(int argc, char **argv)
|
|||
__u32 info_len = sizeof(info);
|
||||
unsigned int kill_after_s = 0;
|
||||
struct iptnl_info tnl = {};
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
struct vip vip = {};
|
||||
char filename[256];
|
||||
|
@ -259,15 +257,20 @@ int main(int argc, char **argv)
|
|||
}
|
||||
|
||||
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
|
||||
prog_load_attr.file = filename;
|
||||
|
||||
if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
|
||||
obj = bpf_object__open_file(filename, NULL);
|
||||
if (libbpf_get_error(obj))
|
||||
return 1;
|
||||
|
||||
if (!prog_fd) {
|
||||
printf("bpf_prog_load_xattr: %s\n", strerror(errno));
|
||||
prog = bpf_object__next_program(obj, NULL);
|
||||
bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
|
||||
|
||||
err = bpf_object__load(obj);
|
||||
if (err) {
|
||||
printf("bpf_object__load(): %s\n", strerror(errno));
|
||||
return 1;
|
||||
}
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
|
||||
rxcnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rxcnt");
|
||||
vip2tnl_map_fd = bpf_object__find_map_fd_by_name(obj, "vip2tnl");
|
||||
|
|
|
@ -7,7 +7,7 @@ if ! [ -x "$(command -v ${PAHOLE})" ]; then
|
|||
exit 0
|
||||
fi
|
||||
|
||||
pahole_ver=$(${PAHOLE} --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/')
|
||||
pahole_ver=$($(dirname $0)/pahole-version.sh ${PAHOLE})
|
||||
|
||||
if [ "${pahole_ver}" -ge "118" ] && [ "${pahole_ver}" -le "121" ]; then
|
||||
# pahole 1.18 through 1.21 can't handle zero-sized per-CPU vars
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
#!/bin/sh
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Usage: $ ./pahole-version.sh pahole
|
||||
#
|
||||
# Prints pahole's version in a 3-digit form, such as 119 for v1.19.
|
||||
|
||||
if [ ! -x "$(command -v "$@")" ]; then
|
||||
echo 0
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"$@" --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/'
|
|
@ -310,7 +310,7 @@ void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
|
|||
{
|
||||
const char *prog_name = prog_info->name;
|
||||
const struct btf_type *func_type;
|
||||
const struct bpf_func_info finfo;
|
||||
const struct bpf_func_info finfo = {};
|
||||
struct bpf_prog_info info = {};
|
||||
__u32 info_len = sizeof(info);
|
||||
struct btf *prog_btf = NULL;
|
||||
|
|
|
@ -487,17 +487,12 @@ probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
|
|||
size_t maxlen;
|
||||
bool res;
|
||||
|
||||
if (ifindex)
|
||||
/* Only test offload-able program types */
|
||||
switch (prog_type) {
|
||||
case BPF_PROG_TYPE_SCHED_CLS:
|
||||
case BPF_PROG_TYPE_XDP:
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
if (ifindex) {
|
||||
p_info("BPF offload feature probing is not supported");
|
||||
return;
|
||||
}
|
||||
|
||||
res = bpf_probe_prog_type(prog_type, ifindex);
|
||||
res = libbpf_probe_bpf_prog_type(prog_type, NULL);
|
||||
#ifdef USE_LIBCAP
|
||||
/* Probe may succeed even if program load fails, for unprivileged users
|
||||
* check that we did not fail because of insufficient permissions
|
||||
|
@ -535,7 +530,12 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
|
|||
size_t maxlen;
|
||||
bool res;
|
||||
|
||||
res = bpf_probe_map_type(map_type, ifindex);
|
||||
if (ifindex) {
|
||||
p_info("BPF offload feature probing is not supported");
|
||||
return;
|
||||
}
|
||||
|
||||
res = libbpf_probe_bpf_map_type(map_type, NULL);
|
||||
|
||||
/* Probe result depends on the success of map creation, no additional
|
||||
* check required for unprivileged users
|
||||
|
@ -567,7 +567,12 @@ probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
|
|||
bool res = false;
|
||||
|
||||
if (supported_type) {
|
||||
res = bpf_probe_helper(id, prog_type, ifindex);
|
||||
if (ifindex) {
|
||||
p_info("BPF offload feature probing is not supported");
|
||||
return;
|
||||
}
|
||||
|
||||
res = libbpf_probe_bpf_helper(prog_type, id, NULL);
|
||||
#ifdef USE_LIBCAP
|
||||
/* Probe may succeed even if program load fails, for
|
||||
* unprivileged users check that we did not fail because of
|
||||
|
|
|
@ -378,13 +378,16 @@ static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
|
|||
int prog_fd = skel->progs.%2$s.prog_fd; \n\
|
||||
", obj_name, bpf_program__name(prog));
|
||||
|
||||
switch (bpf_program__get_type(prog)) {
|
||||
switch (bpf_program__type(prog)) {
|
||||
case BPF_PROG_TYPE_RAW_TRACEPOINT:
|
||||
tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
|
||||
printf("\tint fd = bpf_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
|
||||
printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
|
||||
break;
|
||||
case BPF_PROG_TYPE_TRACING:
|
||||
printf("\tint fd = bpf_raw_tracepoint_open(NULL, prog_fd);\n");
|
||||
if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER)
|
||||
printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
|
||||
else
|
||||
printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n");
|
||||
break;
|
||||
default:
|
||||
printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
|
||||
|
|
|
@ -478,14 +478,11 @@ int main(int argc, char **argv)
|
|||
}
|
||||
|
||||
if (!legacy_libbpf) {
|
||||
enum libbpf_strict_mode mode;
|
||||
|
||||
/* Allow legacy map definitions for skeleton generation.
|
||||
* It will still be rejected if users use LIBBPF_STRICT_ALL
|
||||
* mode for loading generated skeleton.
|
||||
*/
|
||||
mode = (__LIBBPF_STRICT_LAST - 1) & ~LIBBPF_STRICT_MAP_DEFINITIONS;
|
||||
ret = libbpf_set_strict_mode(mode);
|
||||
ret = libbpf_set_strict_mode(LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS);
|
||||
if (ret)
|
||||
p_err("failed to enable libbpf strict mode: %d", ret);
|
||||
}
|
||||
|
|
|
@ -1272,12 +1272,12 @@ static int do_run(int argc, char **argv)
|
|||
{
|
||||
char *data_fname_in = NULL, *data_fname_out = NULL;
|
||||
char *ctx_fname_in = NULL, *ctx_fname_out = NULL;
|
||||
struct bpf_prog_test_run_attr test_attr = {0};
|
||||
const unsigned int default_size = SZ_32K;
|
||||
void *data_in = NULL, *data_out = NULL;
|
||||
void *ctx_in = NULL, *ctx_out = NULL;
|
||||
unsigned int repeat = 1;
|
||||
int fd, err;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, test_attr);
|
||||
|
||||
if (!REQ_ARGS(4))
|
||||
return -1;
|
||||
|
@ -1395,14 +1395,13 @@ static int do_run(int argc, char **argv)
|
|||
goto free_ctx_in;
|
||||
}
|
||||
|
||||
test_attr.prog_fd = fd;
|
||||
test_attr.repeat = repeat;
|
||||
test_attr.data_in = data_in;
|
||||
test_attr.data_out = data_out;
|
||||
test_attr.ctx_in = ctx_in;
|
||||
test_attr.ctx_out = ctx_out;
|
||||
|
||||
err = bpf_prog_test_run_xattr(&test_attr);
|
||||
err = bpf_prog_test_run_opts(fd, &test_attr);
|
||||
if (err) {
|
||||
p_err("failed to run program: %s", strerror(errno));
|
||||
goto free_ctx_out;
|
||||
|
@ -2283,10 +2282,10 @@ static int do_profile(int argc, char **argv)
|
|||
profile_obj->rodata->num_metric = num_metric;
|
||||
|
||||
/* adjust map sizes */
|
||||
bpf_map__resize(profile_obj->maps.events, num_metric * num_cpu);
|
||||
bpf_map__resize(profile_obj->maps.fentry_readings, num_metric);
|
||||
bpf_map__resize(profile_obj->maps.accum_readings, num_metric);
|
||||
bpf_map__resize(profile_obj->maps.counts, 1);
|
||||
bpf_map__set_max_entries(profile_obj->maps.events, num_metric * num_cpu);
|
||||
bpf_map__set_max_entries(profile_obj->maps.fentry_readings, num_metric);
|
||||
bpf_map__set_max_entries(profile_obj->maps.accum_readings, num_metric);
|
||||
bpf_map__set_max_entries(profile_obj->maps.counts, 1);
|
||||
|
||||
/* change target name */
|
||||
profile_tgt_name = profile_target_name(profile_tgt_fd);
|
||||
|
|
|
@ -5076,6 +5076,16 @@ union bpf_attr {
|
|||
* associated to *xdp_md*, at *offset*.
|
||||
* Return
|
||||
* 0 on success, or a negative error in case of failure.
|
||||
*
|
||||
* long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags)
|
||||
* Description
|
||||
* Read *size* bytes from user space address *user_ptr* in *tsk*'s
|
||||
* address space, and stores the data in *dst*. *flags* is not
|
||||
* used yet and is provided for future extensibility. This helper
|
||||
* can only be used by sleepable programs.
|
||||
* Return
|
||||
* 0 on success, or a negative error in case of failure. On error
|
||||
* *dst* buffer is zeroed out.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
|
@ -5269,6 +5279,7 @@ union bpf_attr {
|
|||
FN(xdp_get_buff_len), \
|
||||
FN(xdp_load_bytes), \
|
||||
FN(xdp_store_bytes), \
|
||||
FN(copy_from_user_task), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
|
@ -5563,7 +5574,8 @@ struct bpf_sock {
|
|||
__u32 src_ip4;
|
||||
__u32 src_ip6[4];
|
||||
__u32 src_port; /* host byte order */
|
||||
__u32 dst_port; /* network byte order */
|
||||
__be16 dst_port; /* network byte order */
|
||||
__u16 :16; /* zero padding */
|
||||
__u32 dst_ip4;
|
||||
__u32 dst_ip6[4];
|
||||
__u32 state;
|
||||
|
@ -6441,7 +6453,8 @@ struct bpf_sk_lookup {
|
|||
__u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
|
||||
__u32 remote_ip4; /* Network byte order */
|
||||
__u32 remote_ip6[4]; /* Network byte order */
|
||||
__u32 remote_port; /* Network byte order */
|
||||
__be16 remote_port; /* Network byte order */
|
||||
__u16 :16; /* Zero padding */
|
||||
__u32 local_ip4; /* Network byte order */
|
||||
__u32 local_ip6[4]; /* Network byte order */
|
||||
__u32 local_port; /* Host byte order */
|
||||
|
|
|
@ -131,7 +131,7 @@ GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \
|
|||
sort -u | wc -l)
|
||||
VERSIONED_SYM_COUNT = $(shell readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \
|
||||
sed 's/\[.*\]//' | \
|
||||
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \
|
||||
awk '/GLOBAL/ && /DEFAULT/ && !/UND|ABS/ {print $$NF}' | \
|
||||
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
|
||||
|
||||
CMD_TARGETS = $(LIB_TARGET) $(PC_FILE)
|
||||
|
@ -194,7 +194,7 @@ check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT)
|
|||
sort -u > $(OUTPUT)libbpf_global_syms.tmp; \
|
||||
readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \
|
||||
sed 's/\[.*\]//' | \
|
||||
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \
|
||||
awk '/GLOBAL/ && /DEFAULT/ && !/UND|ABS/ {print $$NF}'| \
|
||||
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \
|
||||
sort -u > $(OUTPUT)libbpf_versioned_syms.tmp; \
|
||||
diff -u $(OUTPUT)libbpf_global_syms.tmp \
|
||||
|
|
|
@ -453,12 +453,14 @@ struct bpf_prog_test_run_attr {
|
|||
* out: length of cxt_out */
|
||||
};
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_test_run_opts() instead")
|
||||
LIBBPF_API int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr);
|
||||
|
||||
/*
|
||||
* bpf_prog_test_run does not check that data_out is large enough. Consider
|
||||
* using bpf_prog_test_run_xattr instead.
|
||||
* using bpf_prog_test_run_opts instead.
|
||||
*/
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_test_run_opts() instead")
|
||||
LIBBPF_API int bpf_prog_test_run(int prog_fd, int repeat, void *data,
|
||||
__u32 size, void *data_out, __u32 *size_out,
|
||||
__u32 *retval, __u32 *duration);
|
||||
|
|
|
@ -76,6 +76,9 @@
|
|||
#define __PT_RC_REG ax
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG ip
|
||||
/* syscall uses r10 for PARM4 */
|
||||
#define PT_REGS_PARM4_SYSCALL(x) ((x)->r10)
|
||||
#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10)
|
||||
|
||||
#else
|
||||
|
||||
|
@ -105,6 +108,9 @@
|
|||
#define __PT_RC_REG rax
|
||||
#define __PT_SP_REG rsp
|
||||
#define __PT_IP_REG rip
|
||||
/* syscall uses r10 for PARM4 */
|
||||
#define PT_REGS_PARM4_SYSCALL(x) ((x)->r10)
|
||||
#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10)
|
||||
|
||||
#endif /* __i386__ */
|
||||
|
||||
|
@ -112,6 +118,10 @@
|
|||
|
||||
#elif defined(bpf_target_s390)
|
||||
|
||||
struct pt_regs___s390 {
|
||||
unsigned long orig_gpr2;
|
||||
};
|
||||
|
||||
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
|
||||
#define __PT_REGS_CAST(x) ((const user_pt_regs *)(x))
|
||||
#define __PT_PARM1_REG gprs[2]
|
||||
|
@ -124,6 +134,8 @@
|
|||
#define __PT_RC_REG gprs[2]
|
||||
#define __PT_SP_REG gprs[15]
|
||||
#define __PT_IP_REG psw.addr
|
||||
#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma("GCC error \"use PT_REGS_PARM1_CORE_SYSCALL() instead\""); 0l; })
|
||||
#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___s390 *)(x), orig_gpr2)
|
||||
|
||||
#elif defined(bpf_target_arm)
|
||||
|
||||
|
@ -140,6 +152,10 @@
|
|||
|
||||
#elif defined(bpf_target_arm64)
|
||||
|
||||
struct pt_regs___arm64 {
|
||||
unsigned long orig_x0;
|
||||
};
|
||||
|
||||
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
|
||||
#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
|
||||
#define __PT_PARM1_REG regs[0]
|
||||
|
@ -152,6 +168,8 @@
|
|||
#define __PT_RC_REG regs[0]
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG pc
|
||||
#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma("GCC error \"use PT_REGS_PARM1_CORE_SYSCALL() instead\""); 0l; })
|
||||
#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___arm64 *)(x), orig_x0)
|
||||
|
||||
#elif defined(bpf_target_mips)
|
||||
|
||||
|
@ -178,6 +196,8 @@
|
|||
#define __PT_RC_REG gpr[3]
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG nip
|
||||
/* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER. */
|
||||
#define PT_REGS_SYSCALL_REGS(ctx) ctx
|
||||
|
||||
#elif defined(bpf_target_sparc)
|
||||
|
||||
|
@ -206,10 +226,12 @@
|
|||
#define __PT_PARM4_REG a3
|
||||
#define __PT_PARM5_REG a4
|
||||
#define __PT_RET_REG ra
|
||||
#define __PT_FP_REG fp
|
||||
#define __PT_FP_REG s0
|
||||
#define __PT_RC_REG a5
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG epc
|
||||
#define __PT_IP_REG pc
|
||||
/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
|
||||
#define PT_REGS_SYSCALL_REGS(ctx) ctx
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -263,6 +285,26 @@ struct pt_regs;
|
|||
|
||||
#endif
|
||||
|
||||
#ifndef PT_REGS_PARM1_SYSCALL
|
||||
#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1(x)
|
||||
#endif
|
||||
#define PT_REGS_PARM2_SYSCALL(x) PT_REGS_PARM2(x)
|
||||
#define PT_REGS_PARM3_SYSCALL(x) PT_REGS_PARM3(x)
|
||||
#ifndef PT_REGS_PARM4_SYSCALL
|
||||
#define PT_REGS_PARM4_SYSCALL(x) PT_REGS_PARM4(x)
|
||||
#endif
|
||||
#define PT_REGS_PARM5_SYSCALL(x) PT_REGS_PARM5(x)
|
||||
|
||||
#ifndef PT_REGS_PARM1_CORE_SYSCALL
|
||||
#define PT_REGS_PARM1_CORE_SYSCALL(x) PT_REGS_PARM1_CORE(x)
|
||||
#endif
|
||||
#define PT_REGS_PARM2_CORE_SYSCALL(x) PT_REGS_PARM2_CORE(x)
|
||||
#define PT_REGS_PARM3_CORE_SYSCALL(x) PT_REGS_PARM3_CORE(x)
|
||||
#ifndef PT_REGS_PARM4_CORE_SYSCALL
|
||||
#define PT_REGS_PARM4_CORE_SYSCALL(x) PT_REGS_PARM4_CORE(x)
|
||||
#endif
|
||||
#define PT_REGS_PARM5_CORE_SYSCALL(x) PT_REGS_PARM5_CORE(x)
|
||||
|
||||
#else /* defined(bpf_target_defined) */
|
||||
|
||||
#define PT_REGS_PARM1(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
|
@ -290,8 +332,30 @@ struct pt_regs;
|
|||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
|
||||
#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM2_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM3_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM4_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM5_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
|
||||
#define PT_REGS_PARM1_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM2_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM3_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM4_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM5_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
|
||||
#endif /* defined(bpf_target_defined) */
|
||||
|
||||
/*
|
||||
* When invoked from a syscall handler kprobe, returns a pointer to a
|
||||
* struct pt_regs containing syscall arguments and suitable for passing to
|
||||
* PT_REGS_PARMn_SYSCALL() and PT_REGS_PARMn_CORE_SYSCALL().
|
||||
*/
|
||||
#ifndef PT_REGS_SYSCALL_REGS
|
||||
/* By default, assume that the arch selects ARCH_HAS_SYSCALL_WRAPPER. */
|
||||
#define PT_REGS_SYSCALL_REGS(ctx) ((struct pt_regs *)PT_REGS_PARM1(ctx))
|
||||
#endif
|
||||
|
||||
#ifndef ___bpf_concat
|
||||
#define ___bpf_concat(a, b) a ## b
|
||||
#endif
|
||||
|
@ -406,4 +470,39 @@ typeof(name(0)) name(struct pt_regs *ctx) \
|
|||
} \
|
||||
static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
|
||||
|
||||
#define ___bpf_syscall_args0() ctx
|
||||
#define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args)
|
||||
|
||||
/*
|
||||
* BPF_KPROBE_SYSCALL is a variant of BPF_KPROBE, which is intended for
|
||||
* tracing syscall functions, like __x64_sys_close. It hides the underlying
|
||||
* platform-specific low-level way of getting syscall input arguments from
|
||||
* struct pt_regs, and provides a familiar typed and named function arguments
|
||||
* syntax and semantics of accessing syscall input parameters.
|
||||
*
|
||||
* Original struct pt_regs* context is preserved as 'ctx' argument. This might
|
||||
* be necessary when using BPF helpers like bpf_perf_event_output().
|
||||
*
|
||||
* This macro relies on BPF CO-RE support.
|
||||
*/
|
||||
#define BPF_KPROBE_SYSCALL(name, args...) \
|
||||
name(struct pt_regs *ctx); \
|
||||
static __attribute__((always_inline)) typeof(name(0)) \
|
||||
____##name(struct pt_regs *ctx, ##args); \
|
||||
typeof(name(0)) name(struct pt_regs *ctx) \
|
||||
{ \
|
||||
struct pt_regs *regs = PT_REGS_SYSCALL_REGS(ctx); \
|
||||
_Pragma("GCC diagnostic push") \
|
||||
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
|
||||
return ____##name(___bpf_syscall_args(args)); \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
} \
|
||||
static __attribute__((always_inline)) typeof(name(0)) \
|
||||
____##name(struct pt_regs *ctx, ##args)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -147,11 +147,10 @@ LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
|
|||
LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id);
|
||||
LIBBPF_API int btf__fd(const struct btf *btf);
|
||||
LIBBPF_API void btf__set_fd(struct btf *btf, int fd);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__raw_data() instead")
|
||||
LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
|
||||
LIBBPF_API const void *btf__raw_data(const struct btf *btf, __u32 *size);
|
||||
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
|
||||
LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "this API is not necessary when BTF-defined maps are used")
|
||||
LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
|
||||
__u32 expected_key_size,
|
||||
__u32 expected_value_size,
|
||||
|
@ -159,8 +158,7 @@ LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
|
|||
|
||||
LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size);
|
||||
LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
|
||||
LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext,
|
||||
__u32 *size);
|
||||
LIBBPF_API const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size);
|
||||
LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_func_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions")
|
||||
int btf_ext__reloc_func_info(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
|
@ -171,8 +169,10 @@ int btf_ext__reloc_line_info(const struct btf *btf,
|
|||
const struct btf_ext *btf_ext,
|
||||
const char *sec_name, __u32 insns_cnt,
|
||||
void **line_info, __u32 *cnt);
|
||||
LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
|
||||
LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
|
||||
LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_func_info is deprecated; write custom func_info parsing to fetch rec_size")
|
||||
__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
|
||||
LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_line_info is deprecated; write custom line_info parsing to fetch rec_size")
|
||||
__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
|
||||
|
||||
LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
|
||||
LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
|
||||
|
|
|
@ -1861,14 +1861,16 @@ static int btf_dump_array_data(struct btf_dump *d,
|
|||
{
|
||||
const struct btf_array *array = btf_array(t);
|
||||
const struct btf_type *elem_type;
|
||||
__u32 i, elem_size = 0, elem_type_id;
|
||||
__u32 i, elem_type_id;
|
||||
__s64 elem_size;
|
||||
bool is_array_member;
|
||||
|
||||
elem_type_id = array->type;
|
||||
elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL);
|
||||
elem_size = btf__resolve_size(d->btf, elem_type_id);
|
||||
if (elem_size <= 0) {
|
||||
pr_warn("unexpected elem size %d for array type [%u]\n", elem_size, id);
|
||||
pr_warn("unexpected elem size %zd for array type [%u]\n",
|
||||
(ssize_t)elem_size, id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -156,14 +156,6 @@ enum libbpf_strict_mode libbpf_mode = LIBBPF_STRICT_NONE;
|
|||
|
||||
int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
|
||||
{
|
||||
/* __LIBBPF_STRICT_LAST is the last power-of-2 value used + 1, so to
|
||||
* get all possible values we compensate last +1, and then (2*x - 1)
|
||||
* to get the bit mask
|
||||
*/
|
||||
if (mode != LIBBPF_STRICT_ALL
|
||||
&& (mode & ~((__LIBBPF_STRICT_LAST - 1) * 2 - 1)))
|
||||
return errno = EINVAL, -EINVAL;
|
||||
|
||||
libbpf_mode = mode;
|
||||
return 0;
|
||||
}
|
||||
|
@ -237,6 +229,8 @@ enum sec_def_flags {
|
|||
SEC_SLOPPY_PFX = 16,
|
||||
/* BPF program support non-linear XDP buffer */
|
||||
SEC_XDP_FRAGS = 32,
|
||||
/* deprecated sec definitions not supposed to be used */
|
||||
SEC_DEPRECATED = 64,
|
||||
};
|
||||
|
||||
struct bpf_sec_def {
|
||||
|
@ -4200,9 +4194,12 @@ static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
|
|||
|
||||
if (!bpf_map__is_internal(map)) {
|
||||
pr_warn("Use of BPF_ANNOTATE_KV_PAIR is deprecated, use BTF-defined maps in .maps section instead\n");
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
||||
ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
|
||||
def->value_size, &key_type_id,
|
||||
&value_type_id);
|
||||
#pragma GCC diagnostic pop
|
||||
} else {
|
||||
/*
|
||||
* LLVM annotates global data differently in BTF, that is,
|
||||
|
@ -6575,6 +6572,10 @@ static int libbpf_preload_prog(struct bpf_program *prog,
|
|||
if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS))
|
||||
opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
|
||||
|
||||
if (def & SEC_DEPRECATED)
|
||||
pr_warn("SEC(\"%s\") is deprecated, please see https://github.com/libbpf/libbpf/wiki/Libbpf-1.0-migration-guide#bpf-program-sec-annotation-deprecations for details\n",
|
||||
prog->sec_name);
|
||||
|
||||
if ((prog->type == BPF_PROG_TYPE_TRACING ||
|
||||
prog->type == BPF_PROG_TYPE_LSM ||
|
||||
prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
|
||||
|
@ -7896,10 +7897,8 @@ int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
|
|||
return 0;
|
||||
}
|
||||
|
||||
const char *bpf_map__get_pin_path(const struct bpf_map *map)
|
||||
{
|
||||
return map->pin_path;
|
||||
}
|
||||
__alias(bpf_map__pin_path)
|
||||
const char *bpf_map__get_pin_path(const struct bpf_map *map);
|
||||
|
||||
const char *bpf_map__pin_path(const struct bpf_map *map)
|
||||
{
|
||||
|
@ -8464,7 +8463,10 @@ static int bpf_program_nth_fd(const struct bpf_program *prog, int n)
|
|||
return fd;
|
||||
}
|
||||
|
||||
enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog)
|
||||
__alias(bpf_program__type)
|
||||
enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
|
||||
|
||||
enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
|
||||
{
|
||||
return prog->type;
|
||||
}
|
||||
|
@ -8508,8 +8510,10 @@ BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
|
|||
BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
|
||||
BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
|
||||
|
||||
enum bpf_attach_type
|
||||
bpf_program__get_expected_attach_type(const struct bpf_program *prog)
|
||||
__alias(bpf_program__expected_attach_type)
|
||||
enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
|
||||
|
||||
enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog)
|
||||
{
|
||||
return prog->expected_attach_type;
|
||||
}
|
||||
|
@ -8593,7 +8597,7 @@ static const struct bpf_sec_def section_defs[] = {
|
|||
SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
|
||||
SEC_DEF("uretprobe/", KPROBE, 0, SEC_NONE),
|
||||
SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE),
|
||||
SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX),
|
||||
SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX | SEC_DEPRECATED),
|
||||
SEC_DEF("action", SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX),
|
||||
SEC_DEF("tracepoint/", TRACEPOINT, 0, SEC_NONE, attach_tp),
|
||||
SEC_DEF("tp/", TRACEPOINT, 0, SEC_NONE, attach_tp),
|
||||
|
@ -8612,11 +8616,14 @@ static const struct bpf_sec_def section_defs[] = {
|
|||
SEC_DEF("lsm/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
|
||||
SEC_DEF("lsm.s/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
|
||||
SEC_DEF("iter/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
|
||||
SEC_DEF("iter.s/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter),
|
||||
SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE),
|
||||
SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS),
|
||||
SEC_DEF("xdp_devmap/", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
|
||||
SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
|
||||
SEC_DEF("xdp_devmap/", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE | SEC_DEPRECATED),
|
||||
SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS),
|
||||
SEC_DEF("xdp_cpumap/", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
|
||||
SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
|
||||
SEC_DEF("xdp_cpumap/", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE | SEC_DEPRECATED),
|
||||
SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS),
|
||||
SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
|
||||
SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE | SEC_SLOPPY_PFX),
|
||||
|
@ -9459,7 +9466,7 @@ static int bpf_prog_load_xattr2(const struct bpf_prog_load_attr *attr,
|
|||
open_attr.file = attr->file;
|
||||
open_attr.prog_type = attr->prog_type;
|
||||
|
||||
obj = bpf_object__open_xattr(&open_attr);
|
||||
obj = __bpf_object__open_xattr(&open_attr, 0);
|
||||
err = libbpf_get_error(obj);
|
||||
if (err)
|
||||
return libbpf_err(-ENOENT);
|
||||
|
@ -9476,7 +9483,7 @@ static int bpf_prog_load_xattr2(const struct bpf_prog_load_attr *attr,
|
|||
bpf_program__set_expected_attach_type(prog,
|
||||
attach_type);
|
||||
}
|
||||
if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
|
||||
if (bpf_program__type(prog) == BPF_PROG_TYPE_UNSPEC) {
|
||||
/*
|
||||
* we haven't guessed from section name and user
|
||||
* didn't provide a fallback type, too bad...
|
||||
|
@ -9493,7 +9500,7 @@ static int bpf_prog_load_xattr2(const struct bpf_prog_load_attr *attr,
|
|||
}
|
||||
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
if (!bpf_map__is_offload_neutral(map))
|
||||
if (map->def.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
|
||||
map->map_ifindex = attr->ifindex;
|
||||
}
|
||||
|
||||
|
@ -10527,7 +10534,7 @@ bpf_program__attach_fd(const struct bpf_program *prog, int target_fd, int btf_id
|
|||
return libbpf_err_ptr(-ENOMEM);
|
||||
link->detach = &bpf_link__detach_fd;
|
||||
|
||||
attach_type = bpf_program__get_expected_attach_type(prog);
|
||||
attach_type = bpf_program__expected_attach_type(prog);
|
||||
link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
|
||||
if (link_fd < 0) {
|
||||
link_fd = -errno;
|
||||
|
|
|
@ -180,9 +180,11 @@ bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
|
|||
const struct bpf_object_open_opts *opts);
|
||||
|
||||
/* deprecated bpf_object__open variants */
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__open_mem() instead")
|
||||
LIBBPF_API struct bpf_object *
|
||||
bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
|
||||
const char *name);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__open_file() instead")
|
||||
LIBBPF_API struct bpf_object *
|
||||
bpf_object__open_xattr(struct bpf_object_open_attr *attr);
|
||||
|
||||
|
@ -244,8 +246,10 @@ struct bpf_object *bpf_object__next(struct bpf_object *prev);
|
|||
(pos) = (tmp), (tmp) = bpf_object__next(tmp))
|
||||
|
||||
typedef void (*bpf_object_clear_priv_t)(struct bpf_object *, void *);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API int bpf_object__set_priv(struct bpf_object *obj, void *priv,
|
||||
bpf_object_clear_priv_t clear_priv);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API void *bpf_object__priv(const struct bpf_object *prog);
|
||||
|
||||
LIBBPF_API int
|
||||
|
@ -277,9 +281,10 @@ bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *prog)
|
|||
|
||||
typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, void *);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API int bpf_program__set_priv(struct bpf_program *prog, void *priv,
|
||||
bpf_program_clear_priv_t clear_priv);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API void *bpf_program__priv(const struct bpf_program *prog);
|
||||
LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
|
||||
__u32 ifindex);
|
||||
|
@ -591,26 +596,39 @@ LIBBPF_API int bpf_program__nth_fd(const struct bpf_program *prog, int n);
|
|||
/*
|
||||
* Adjust type of BPF program. Default is kprobe.
|
||||
*/
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_socket_filter(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_tracepoint(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_raw_tracepoint(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_kprobe(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_lsm(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_sk_lookup(struct bpf_program *prog);
|
||||
|
||||
LIBBPF_API enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
|
||||
LIBBPF_API enum bpf_prog_type bpf_program__type(const struct bpf_program *prog);
|
||||
LIBBPF_API void bpf_program__set_type(struct bpf_program *prog,
|
||||
enum bpf_prog_type type);
|
||||
|
||||
LIBBPF_API enum bpf_attach_type
|
||||
bpf_program__get_expected_attach_type(const struct bpf_program *prog);
|
||||
bpf_program__expected_attach_type(const struct bpf_program *prog);
|
||||
LIBBPF_API void
|
||||
bpf_program__set_expected_attach_type(struct bpf_program *prog,
|
||||
enum bpf_attach_type type);
|
||||
|
@ -631,18 +649,31 @@ LIBBPF_API int
|
|||
bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
|
||||
const char *attach_func_name);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_kprobe(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_lsm(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_struct_ops(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_extension(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_sk_lookup(const struct bpf_program *prog);
|
||||
|
||||
/*
|
||||
|
@ -716,6 +747,7 @@ LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type);
|
|||
/* get/set map size (max_entries) */
|
||||
LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_map__set_max_entries() instead")
|
||||
LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
|
||||
/* get/set map flags */
|
||||
LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map);
|
||||
|
@ -740,8 +772,10 @@ LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map);
|
|||
LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra);
|
||||
|
||||
typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
|
||||
bpf_map_clear_priv_t clear_priv);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API void *bpf_map__priv(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
|
||||
const void *data, size_t size);
|
||||
|
@ -758,7 +792,6 @@ LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
|
|||
*/
|
||||
LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path);
|
||||
LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map);
|
||||
LIBBPF_API const char *bpf_map__pin_path(const struct bpf_map *map);
|
||||
LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
|
||||
|
|
|
@ -424,10 +424,12 @@ LIBBPF_0.6.0 {
|
|||
LIBBPF_0.7.0 {
|
||||
global:
|
||||
bpf_btf_load;
|
||||
bpf_program__expected_attach_type;
|
||||
bpf_program__log_buf;
|
||||
bpf_program__log_level;
|
||||
bpf_program__set_log_buf;
|
||||
bpf_program__set_log_level;
|
||||
bpf_program__type;
|
||||
bpf_xdp_attach;
|
||||
bpf_xdp_detach;
|
||||
bpf_xdp_query;
|
||||
|
|
|
@ -92,6 +92,9 @@
|
|||
# define offsetofend(TYPE, FIELD) \
|
||||
(offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD))
|
||||
#endif
|
||||
#ifndef __alias
|
||||
#define __alias(symbol) __attribute__((alias(#symbol)))
|
||||
#endif
|
||||
|
||||
/* Check whether a string `str` has prefix `pfx`, regardless if `pfx` is
|
||||
* a string literal known at compilation time or char * pointer known only at
|
||||
|
|
|
@ -86,6 +86,23 @@ LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode);
|
|||
|
||||
#define DECLARE_LIBBPF_OPTS LIBBPF_OPTS
|
||||
|
||||
/* "Discouraged" APIs which don't follow consistent libbpf naming patterns.
|
||||
* They are normally a trivial aliases or wrappers for proper APIs and are
|
||||
* left to minimize unnecessary disruption for users of libbpf. But they
|
||||
* shouldn't be used going forward.
|
||||
*/
|
||||
|
||||
struct bpf_program;
|
||||
struct bpf_map;
|
||||
struct btf;
|
||||
struct btf_ext;
|
||||
|
||||
LIBBPF_API enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
|
||||
LIBBPF_API enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
|
||||
LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map);
|
||||
LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
|
||||
LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
|
|
@ -70,19 +70,85 @@ static inline int skel_closenz(int fd)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifndef offsetofend
|
||||
#define offsetofend(TYPE, MEMBER) \
|
||||
(offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
|
||||
#endif
|
||||
|
||||
static inline int skel_map_create(enum bpf_map_type map_type,
|
||||
const char *map_name,
|
||||
__u32 key_size,
|
||||
__u32 value_size,
|
||||
__u32 max_entries)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
|
||||
union bpf_attr attr;
|
||||
|
||||
memset(&attr, 0, attr_sz);
|
||||
|
||||
attr.map_type = map_type;
|
||||
strncpy(attr.map_name, map_name, sizeof(attr.map_name));
|
||||
attr.key_size = key_size;
|
||||
attr.value_size = value_size;
|
||||
attr.max_entries = max_entries;
|
||||
|
||||
return skel_sys_bpf(BPF_MAP_CREATE, &attr, attr_sz);
|
||||
}
|
||||
|
||||
static inline int skel_map_update_elem(int fd, const void *key,
|
||||
const void *value, __u64 flags)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, flags);
|
||||
union bpf_attr attr;
|
||||
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.map_fd = fd;
|
||||
attr.key = (long) key;
|
||||
attr.value = (long) value;
|
||||
attr.flags = flags;
|
||||
|
||||
return skel_sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz);
|
||||
}
|
||||
|
||||
static inline int skel_raw_tracepoint_open(const char *name, int prog_fd)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint.prog_fd);
|
||||
union bpf_attr attr;
|
||||
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.raw_tracepoint.name = (long) name;
|
||||
attr.raw_tracepoint.prog_fd = prog_fd;
|
||||
|
||||
return skel_sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
|
||||
}
|
||||
|
||||
static inline int skel_link_create(int prog_fd, int target_fd,
|
||||
enum bpf_attach_type attach_type)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, link_create.iter_info_len);
|
||||
union bpf_attr attr;
|
||||
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.link_create.prog_fd = prog_fd;
|
||||
attr.link_create.target_fd = target_fd;
|
||||
attr.link_create.attach_type = attach_type;
|
||||
|
||||
return skel_sys_bpf(BPF_LINK_CREATE, &attr, attr_sz);
|
||||
}
|
||||
|
||||
static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
|
||||
{
|
||||
int map_fd = -1, prog_fd = -1, key = 0, err;
|
||||
union bpf_attr attr;
|
||||
|
||||
map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1, NULL);
|
||||
map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1);
|
||||
if (map_fd < 0) {
|
||||
opts->errstr = "failed to create loader map";
|
||||
err = -errno;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = bpf_map_update_elem(map_fd, &key, opts->data, 0);
|
||||
err = skel_map_update_elem(map_fd, &key, opts->data, 0);
|
||||
if (err < 0) {
|
||||
opts->errstr = "failed to update loader map";
|
||||
err = -errno;
|
||||
|
|
|
@ -13,7 +13,7 @@ static int test__bpf_parsing(void *obj_buf, size_t obj_buf_sz)
|
|||
{
|
||||
struct bpf_object *obj;
|
||||
|
||||
obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, NULL);
|
||||
obj = bpf_object__open_mem(obj_buf, obj_buf_sz, NULL);
|
||||
if (libbpf_get_error(obj))
|
||||
return TEST_FAIL;
|
||||
bpf_object__close(obj);
|
||||
|
|
|
@ -54,6 +54,7 @@ static bool libbpf_initialized;
|
|||
struct bpf_object *
|
||||
bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = name);
|
||||
struct bpf_object *obj;
|
||||
|
||||
if (!libbpf_initialized) {
|
||||
|
@ -61,7 +62,7 @@ bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
|
|||
libbpf_initialized = true;
|
||||
}
|
||||
|
||||
obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name);
|
||||
obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
|
||||
if (IS_ERR_OR_NULL(obj)) {
|
||||
pr_debug("bpf: failed to load buffer\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
@ -72,6 +73,7 @@ bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
|
|||
|
||||
struct bpf_object *bpf__prepare_load(const char *filename, bool source)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = filename);
|
||||
struct bpf_object *obj;
|
||||
|
||||
if (!libbpf_initialized) {
|
||||
|
@ -94,7 +96,7 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
|
|||
return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
|
||||
} else
|
||||
pr_debug("bpf: successful builtin compilation\n");
|
||||
obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
|
||||
obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
|
||||
|
||||
if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
|
||||
llvm__dump_obj(filename, obj_buf, obj_buf_sz);
|
||||
|
@ -654,11 +656,11 @@ int bpf__probe(struct bpf_object *obj)
|
|||
}
|
||||
|
||||
if (priv->is_tp) {
|
||||
bpf_program__set_tracepoint(prog);
|
||||
bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
|
||||
continue;
|
||||
}
|
||||
|
||||
bpf_program__set_kprobe(prog);
|
||||
bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
|
||||
pev = &priv->pev;
|
||||
|
||||
err = convert_perf_probe_events(pev, 1);
|
||||
|
|
|
@ -330,7 +330,7 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
|
|||
|
||||
LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
|
||||
test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \
|
||||
map_ptr_kern.c core_kern.c
|
||||
map_ptr_kern.c core_kern.c core_kern_overflow.c
|
||||
# Generate both light skeleton and libbpf skeleton for these
|
||||
LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test_subprog.c
|
||||
SKEL_BLACKLIST += $$(LSKELS)
|
||||
|
|
|
@ -206,6 +206,8 @@ btf_tag test and Clang version
|
|||
|
||||
The btf_tag selftest requires LLVM support to recognize the btf_decl_tag and
|
||||
btf_type_tag attributes. They are introduced in `Clang 14` [0_, 1_].
|
||||
The subtests ``btf_type_tag_user_{mod1, mod2, vmlinux}`` also requires
|
||||
pahole version ``1.23``.
|
||||
|
||||
Without them, the btf_tag selftest will be skipped and you will observe:
|
||||
|
||||
|
|
|
@ -151,7 +151,7 @@ static struct ringbuf_bench *ringbuf_setup_skeleton(void)
|
|||
/* record data + header take 16 bytes */
|
||||
skel->rodata->wakeup_data_size = args.sample_rate * 16;
|
||||
|
||||
bpf_map__resize(skel->maps.ringbuf, args.ringbuf_sz);
|
||||
bpf_map__set_max_entries(skel->maps.ringbuf, args.ringbuf_sz);
|
||||
|
||||
if (ringbuf_bench__load(skel)) {
|
||||
fprintf(stderr, "failed to load skeleton\n");
|
||||
|
|
|
@ -154,7 +154,6 @@ static void *uprobe_producer_without_nop(void *input)
|
|||
static void usetup(bool use_retprobe, bool use_nop)
|
||||
{
|
||||
size_t uprobe_offset;
|
||||
ssize_t base_addr;
|
||||
struct bpf_link *link;
|
||||
|
||||
setup_libbpf();
|
||||
|
@ -165,11 +164,10 @@ static void usetup(bool use_retprobe, bool use_nop)
|
|||
exit(1);
|
||||
}
|
||||
|
||||
base_addr = get_base_addr();
|
||||
if (use_nop)
|
||||
uprobe_offset = get_uprobe_offset(&uprobe_target_with_nop, base_addr);
|
||||
uprobe_offset = get_uprobe_offset(&uprobe_target_with_nop);
|
||||
else
|
||||
uprobe_offset = get_uprobe_offset(&uprobe_target_without_nop, base_addr);
|
||||
uprobe_offset = get_uprobe_offset(&uprobe_target_without_nop);
|
||||
|
||||
link = bpf_program__attach_uprobe(ctx.skel->progs.bench_trigger_uprobe,
|
||||
use_retprobe,
|
||||
|
|
|
@ -13,6 +13,10 @@
|
|||
#define CREATE_TRACE_POINTS
|
||||
#include "bpf_testmod-events.h"
|
||||
|
||||
typedef int (*func_proto_typedef)(long);
|
||||
typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
|
||||
typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
|
||||
|
||||
DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
|
||||
|
||||
noinline void
|
||||
|
@ -21,6 +25,27 @@ bpf_testmod_test_mod_kfunc(int i)
|
|||
*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
|
||||
}
|
||||
|
||||
struct bpf_testmod_btf_type_tag_1 {
|
||||
int a;
|
||||
};
|
||||
|
||||
struct bpf_testmod_btf_type_tag_2 {
|
||||
struct bpf_testmod_btf_type_tag_1 __user *p;
|
||||
};
|
||||
|
||||
noinline int
|
||||
bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
|
||||
BTF_TYPE_EMIT(func_proto_typedef);
|
||||
BTF_TYPE_EMIT(func_proto_typedef_nested1);
|
||||
BTF_TYPE_EMIT(func_proto_typedef_nested2);
|
||||
return arg->a;
|
||||
}
|
||||
|
||||
noinline int
|
||||
bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
|
||||
return arg->p->a;
|
||||
}
|
||||
|
||||
noinline int bpf_testmod_loop_test(int n)
|
||||
{
|
||||
int i, sum = 0;
|
||||
|
|
|
@ -7,18 +7,18 @@
|
|||
static void test_add(struct atomics_lskel *skel)
|
||||
{
|
||||
int err, prog_fd;
|
||||
__u32 duration = 0, retval;
|
||||
int link_fd;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts);
|
||||
|
||||
link_fd = atomics_lskel__add__attach(skel);
|
||||
if (!ASSERT_GT(link_fd, 0, "attach(add)"))
|
||||
return;
|
||||
|
||||
prog_fd = skel->progs.add.prog_fd;
|
||||
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
|
||||
NULL, NULL, &retval, &duration);
|
||||
if (CHECK(err || retval, "test_run add",
|
||||
"err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
if (!ASSERT_OK(err, "test_run_opts err"))
|
||||
goto cleanup;
|
||||
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
|
||||
goto cleanup;
|
||||
|
||||
ASSERT_EQ(skel->data->add64_value, 3, "add64_value");
|
||||
|
@ -39,19 +39,18 @@ cleanup:
|
|||
static void test_sub(struct atomics_lskel *skel)
|
||||
{
|
||||
int err, prog_fd;
|
||||
__u32 duration = 0, retval;
|
||||
int link_fd;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts);
|
||||
|
||||
link_fd = atomics_lskel__sub__attach(skel);
|
||||
if (!ASSERT_GT(link_fd, 0, "attach(sub)"))
|
||||
return;
|
||||
|
||||
prog_fd = skel->progs.sub.prog_fd;
|
||||
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
|
||||
NULL, NULL, &retval, &duration);
|
||||
if (CHECK(err || retval, "test_run sub",
|
||||
"err %d errno %d retval %d duration %d\n",
|
||||
err, errno, retval, duration))
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
if (!ASSERT_OK(err, "test_run_opts err"))
|
||||
goto cleanup;
|
||||
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
|
||||
goto cleanup;
|
||||
|
||||
ASSERT_EQ(skel->data->sub64_value, -1, "sub64_value");
|
||||
|
@ -72,18 +71,18 @@ cleanup:
|
|||
static void test_and(struct atomics_lskel *skel)
|
||||
{
|
||||
int err, prog_fd;
|
||||
__u32 duration = 0, retval;
|
||||
int link_fd;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts);
|
||||
|
||||
link_fd = atomics_lskel__and__attach(skel);
|
||||
if (!ASSERT_GT(link_fd, 0, "attach(and)"))
|
||||
return;
|
||||
|
||||
prog_fd = skel->progs.and.prog_fd;
|
||||
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
|
||||
NULL, NULL, &retval, &duration);
|
||||
if (CHECK(err || retval, "test_run and",
|
||||
"err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
if (!ASSERT_OK(err, "test_run_opts err"))
|
||||
goto cleanup;
|
||||
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
|
||||
goto cleanup;
|
||||
|
||||
ASSERT_EQ(skel->data->and64_value, 0x010ull << 32, "and64_value");
|
||||
|
@ -100,19 +99,18 @@ cleanup:
|
|||
static void test_or(struct atomics_lskel *skel)
|
||||
{
|
||||
int err, prog_fd;
|
||||
__u32 duration = 0, retval;
|
||||
int link_fd;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts);
|
||||
|
||||
link_fd = atomics_lskel__or__attach(skel);
|
||||
if (!ASSERT_GT(link_fd, 0, "attach(or)"))
|
||||
return;
|
||||
|
||||
prog_fd = skel->progs.or.prog_fd;
|
||||
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
|
||||
NULL, NULL, &retval, &duration);
|
||||
if (CHECK(err || retval, "test_run or",
|
||||
"err %d errno %d retval %d duration %d\n",
|
||||
err, errno, retval, duration))
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
if (!ASSERT_OK(err, "test_run_opts err"))
|
||||
goto cleanup;
|
||||
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
|
||||
goto cleanup;
|
||||
|
||||
ASSERT_EQ(skel->data->or64_value, 0x111ull << 32, "or64_value");
|
||||
|
@ -129,18 +127,18 @@ cleanup:
|
|||
static void test_xor(struct atomics_lskel *skel)
|
||||
{
|
||||
int err, prog_fd;
|
||||
__u32 duration = 0, retval;
|
||||
int link_fd;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts);
|
||||
|
||||
link_fd = atomics_lskel__xor__attach(skel);
|
||||
if (!ASSERT_GT(link_fd, 0, "attach(xor)"))
|
||||
return;
|
||||
|
||||
prog_fd = skel->progs.xor.prog_fd;
|
||||
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
|
||||
NULL, NULL, &retval, &duration);
|
||||
if (CHECK(err || retval, "test_run xor",
|
||||
"err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
if (!ASSERT_OK(err, "test_run_opts err"))
|
||||
goto cleanup;
|
||||
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
|
||||
goto cleanup;
|
||||
|
||||
ASSERT_EQ(skel->data->xor64_value, 0x101ull << 32, "xor64_value");
|
||||
|
@ -157,18 +155,18 @@ cleanup:
|
|||
static void test_cmpxchg(struct atomics_lskel *skel)
|
||||
{
|
||||
int err, prog_fd;
|
||||
__u32 duration = 0, retval;
|
||||
int link_fd;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts);
|
||||
|
||||
link_fd = atomics_lskel__cmpxchg__attach(skel);
|
||||
if (!ASSERT_GT(link_fd, 0, "attach(cmpxchg)"))
|
||||
return;
|
||||
|
||||
prog_fd = skel->progs.cmpxchg.prog_fd;
|
||||
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
|
||||
NULL, NULL, &retval, &duration);
|
||||
if (CHECK(err || retval, "test_run cmpxchg",
|
||||
"err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
if (!ASSERT_OK(err, "test_run_opts err"))
|
||||
goto cleanup;
|
||||
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
|
||||
goto cleanup;
|
||||
|
||||
ASSERT_EQ(skel->data->cmpxchg64_value, 2, "cmpxchg64_value");
|
||||
|
@ -186,18 +184,18 @@ cleanup:
|
|||
static void test_xchg(struct atomics_lskel *skel)
|
||||
{
|
||||
int err, prog_fd;
|
||||
__u32 duration = 0, retval;
|
||||
int link_fd;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts);
|
||||
|
||||
link_fd = atomics_lskel__xchg__attach(skel);
|
||||
if (!ASSERT_GT(link_fd, 0, "attach(xchg)"))
|
||||
return;
|
||||
|
||||
prog_fd = skel->progs.xchg.prog_fd;
|
||||
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
|
||||
NULL, NULL, &retval, &duration);
|
||||
if (CHECK(err || retval, "test_run xchg",
|
||||
"err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
if (!ASSERT_OK(err, "test_run_opts err"))
|
||||
goto cleanup;
|
||||
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
|
||||
goto cleanup;
|
||||
|
||||
ASSERT_EQ(skel->data->xchg64_value, 2, "xchg64_value");
|
||||
|
|
|
@ -5,9 +5,10 @@
|
|||
/* this is how USDT semaphore is actually defined, except volatile modifier */
|
||||
volatile unsigned short uprobe_ref_ctr __attribute__((unused)) __attribute((section(".probes")));
|
||||
|
||||
/* attach point */
|
||||
static void method(void) {
|
||||
return ;
|
||||
/* uprobe attach point */
|
||||
static void trigger_func(void)
|
||||
{
|
||||
asm volatile ("");
|
||||
}
|
||||
|
||||
void test_attach_probe(void)
|
||||
|
@ -17,8 +18,7 @@ void test_attach_probe(void)
|
|||
struct bpf_link *kprobe_link, *kretprobe_link;
|
||||
struct bpf_link *uprobe_link, *uretprobe_link;
|
||||
struct test_attach_probe* skel;
|
||||
size_t uprobe_offset;
|
||||
ssize_t base_addr, ref_ctr_offset;
|
||||
ssize_t uprobe_offset, ref_ctr_offset;
|
||||
bool legacy;
|
||||
|
||||
/* Check if new-style kprobe/uprobe API is supported.
|
||||
|
@ -34,11 +34,9 @@ void test_attach_probe(void)
|
|||
*/
|
||||
legacy = access("/sys/bus/event_source/devices/kprobe/type", F_OK) != 0;
|
||||
|
||||
base_addr = get_base_addr();
|
||||
if (CHECK(base_addr < 0, "get_base_addr",
|
||||
"failed to find base addr: %zd", base_addr))
|
||||
uprobe_offset = get_uprobe_offset(&trigger_func);
|
||||
if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
|
||||
return;
|
||||
uprobe_offset = get_uprobe_offset(&method, base_addr);
|
||||
|
||||
ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr);
|
||||
if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset"))
|
||||
|
@ -103,7 +101,7 @@ void test_attach_probe(void)
|
|||
goto cleanup;
|
||||
|
||||
/* trigger & validate uprobe & uretprobe */
|
||||
method();
|
||||
trigger_func();
|
||||
|
||||
if (CHECK(skel->bss->uprobe_res != 3, "check_uprobe_res",
|
||||
"wrong uprobe res: %d\n", skel->bss->uprobe_res))
|
||||
|
|
|
@ -8,6 +8,12 @@
|
|||
#include <test_progs.h>
|
||||
#include "test_bpf_cookie.skel.h"
|
||||
|
||||
/* uprobe attach point */
|
||||
static void trigger_func(void)
|
||||
{
|
||||
asm volatile ("");
|
||||
}
|
||||
|
||||
static void kprobe_subtest(struct test_bpf_cookie *skel)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
|
||||
|
@ -62,11 +68,11 @@ static void uprobe_subtest(struct test_bpf_cookie *skel)
|
|||
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
|
||||
struct bpf_link *link1 = NULL, *link2 = NULL;
|
||||
struct bpf_link *retlink1 = NULL, *retlink2 = NULL;
|
||||
size_t uprobe_offset;
|
||||
ssize_t base_addr;
|
||||
ssize_t uprobe_offset;
|
||||
|
||||
base_addr = get_base_addr();
|
||||
uprobe_offset = get_uprobe_offset(&get_base_addr, base_addr);
|
||||
uprobe_offset = get_uprobe_offset(&trigger_func);
|
||||
if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
|
||||
goto cleanup;
|
||||
|
||||
/* attach two uprobes */
|
||||
opts.bpf_cookie = 0x100;
|
||||
|
@ -99,7 +105,7 @@ static void uprobe_subtest(struct test_bpf_cookie *skel)
|
|||
goto cleanup;
|
||||
|
||||
/* trigger uprobe && uretprobe */
|
||||
get_base_addr();
|
||||
trigger_func();
|
||||
|
||||
ASSERT_EQ(skel->bss->uprobe_res, 0x100 | 0x200, "uprobe_res");
|
||||
ASSERT_EQ(skel->bss->uretprobe_res, 0x1000 | 0x2000, "uretprobe_res");
|
||||
|
|
|
@ -138,6 +138,24 @@ static void test_task(void)
|
|||
bpf_iter_task__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_task_sleepable(void)
|
||||
{
|
||||
struct bpf_iter_task *skel;
|
||||
|
||||
skel = bpf_iter_task__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
|
||||
return;
|
||||
|
||||
do_dummy_read(skel->progs.dump_task_sleepable);
|
||||
|
||||
ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0,
|
||||
"num_expected_failure_copy_from_user_task");
|
||||
ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0,
|
||||
"num_success_copy_from_user_task");
|
||||
|
||||
bpf_iter_task__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_task_stack(void)
|
||||
{
|
||||
struct bpf_iter_task_stack *skel;
|
||||
|
@ -1252,6 +1270,8 @@ void test_bpf_iter(void)
|
|||
test_bpf_map();
|
||||
if (test__start_subtest("task"))
|
||||
test_task();
|
||||
if (test__start_subtest("task_sleepable"))
|
||||
test_task_sleepable();
|
||||
if (test__start_subtest("task_stack"))
|
||||
test_task_stack();
|
||||
if (test__start_subtest("task_file"))
|
||||
|
|
|
@ -11,7 +11,12 @@ enum {
|
|||
void test_bpf_nf_ct(int mode)
|
||||
{
|
||||
struct test_bpf_nf *skel;
|
||||
int prog_fd, err, retval;
|
||||
int prog_fd, err;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.repeat = 1,
|
||||
);
|
||||
|
||||
skel = test_bpf_nf__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "test_bpf_nf__open_and_load"))
|
||||
|
@ -22,8 +27,7 @@ void test_bpf_nf_ct(int mode)
|
|||
else
|
||||
prog_fd = bpf_program__fd(skel->progs.nf_skb_ct_test);
|
||||
|
||||
err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), NULL, NULL,
|
||||
(__u32 *)&retval, NULL);
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
if (!ASSERT_OK(err, "bpf_prog_test_run"))
|
||||
goto end;
|
||||
|
||||
|
|
|
@ -3938,6 +3938,25 @@ static struct btf_raw_test raw_tests[] = {
|
|||
.btf_load_err = true,
|
||||
.err_str = "Invalid component_idx",
|
||||
},
|
||||
{
|
||||
.descr = "decl_tag test #15, func, invalid func proto",
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_DECL_TAG_ENC(NAME_TBD, 3, 0), /* [2] */
|
||||
BTF_FUNC_ENC(NAME_TBD, 8), /* [3] */
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0tag\0func"),
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.map_name = "tag_type_check_btf",
|
||||
.key_size = sizeof(int),
|
||||
.value_size = 4,
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 1,
|
||||
.max_entries = 1,
|
||||
.btf_load_err = true,
|
||||
.err_str = "Invalid type_id",
|
||||
},
|
||||
{
|
||||
.descr = "type_tag test #1",
|
||||
.raw_types = {
|
||||
|
@ -4561,7 +4580,7 @@ static void do_test_file(unsigned int test_num)
|
|||
btf_ext__free(btf_ext);
|
||||
|
||||
/* temporary disable LIBBPF_STRICT_MAP_DEFINITIONS to test legacy maps */
|
||||
libbpf_set_strict_mode((__LIBBPF_STRICT_LAST - 1) & ~LIBBPF_STRICT_MAP_DEFINITIONS);
|
||||
libbpf_set_strict_mode(LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS);
|
||||
obj = bpf_object__open(test->file);
|
||||
err = libbpf_get_error(obj);
|
||||
if (CHECK(err, "obj: %d", err))
|
||||
|
|
|
@ -1,19 +1,21 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
#include <test_progs.h>
|
||||
#include "btf_decl_tag.skel.h"
|
||||
#include <bpf/btf.h>
|
||||
#include "test_btf_decl_tag.skel.h"
|
||||
|
||||
/* struct btf_type_tag_test is referenced in btf_type_tag.skel.h */
|
||||
struct btf_type_tag_test {
|
||||
int **p;
|
||||
};
|
||||
#include "btf_type_tag.skel.h"
|
||||
#include "btf_type_tag_user.skel.h"
|
||||
|
||||
static void test_btf_decl_tag(void)
|
||||
{
|
||||
struct btf_decl_tag *skel;
|
||||
struct test_btf_decl_tag *skel;
|
||||
|
||||
skel = btf_decl_tag__open_and_load();
|
||||
skel = test_btf_decl_tag__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "btf_decl_tag"))
|
||||
return;
|
||||
|
||||
|
@ -22,7 +24,7 @@ static void test_btf_decl_tag(void)
|
|||
test__skip();
|
||||
}
|
||||
|
||||
btf_decl_tag__destroy(skel);
|
||||
test_btf_decl_tag__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_btf_type_tag(void)
|
||||
|
@ -41,10 +43,101 @@ static void test_btf_type_tag(void)
|
|||
btf_type_tag__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_btf_type_tag_mod_user(bool load_test_user1)
|
||||
{
|
||||
const char *module_name = "bpf_testmod";
|
||||
struct btf *vmlinux_btf, *module_btf;
|
||||
struct btf_type_tag_user *skel;
|
||||
__s32 type_id;
|
||||
int err;
|
||||
|
||||
if (!env.has_testmod) {
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
|
||||
/* skip the test if the module does not have __user tags */
|
||||
vmlinux_btf = btf__load_vmlinux_btf();
|
||||
if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF"))
|
||||
return;
|
||||
|
||||
module_btf = btf__load_module_btf(module_name, vmlinux_btf);
|
||||
if (!ASSERT_OK_PTR(module_btf, "could not load module BTF"))
|
||||
goto free_vmlinux_btf;
|
||||
|
||||
type_id = btf__find_by_name_kind(module_btf, "user", BTF_KIND_TYPE_TAG);
|
||||
if (type_id <= 0) {
|
||||
printf("%s:SKIP: btf_type_tag attribute not in %s", __func__, module_name);
|
||||
test__skip();
|
||||
goto free_module_btf;
|
||||
}
|
||||
|
||||
skel = btf_type_tag_user__open();
|
||||
if (!ASSERT_OK_PTR(skel, "btf_type_tag_user"))
|
||||
goto free_module_btf;
|
||||
|
||||
bpf_program__set_autoload(skel->progs.test_sys_getsockname, false);
|
||||
if (load_test_user1)
|
||||
bpf_program__set_autoload(skel->progs.test_user2, false);
|
||||
else
|
||||
bpf_program__set_autoload(skel->progs.test_user1, false);
|
||||
|
||||
err = btf_type_tag_user__load(skel);
|
||||
ASSERT_ERR(err, "btf_type_tag_user");
|
||||
|
||||
btf_type_tag_user__destroy(skel);
|
||||
|
||||
free_module_btf:
|
||||
btf__free(module_btf);
|
||||
free_vmlinux_btf:
|
||||
btf__free(vmlinux_btf);
|
||||
}
|
||||
|
||||
static void test_btf_type_tag_vmlinux_user(void)
|
||||
{
|
||||
struct btf_type_tag_user *skel;
|
||||
struct btf *vmlinux_btf;
|
||||
__s32 type_id;
|
||||
int err;
|
||||
|
||||
/* skip the test if the vmlinux does not have __user tags */
|
||||
vmlinux_btf = btf__load_vmlinux_btf();
|
||||
if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF"))
|
||||
return;
|
||||
|
||||
type_id = btf__find_by_name_kind(vmlinux_btf, "user", BTF_KIND_TYPE_TAG);
|
||||
if (type_id <= 0) {
|
||||
printf("%s:SKIP: btf_type_tag attribute not in vmlinux btf", __func__);
|
||||
test__skip();
|
||||
goto free_vmlinux_btf;
|
||||
}
|
||||
|
||||
skel = btf_type_tag_user__open();
|
||||
if (!ASSERT_OK_PTR(skel, "btf_type_tag_user"))
|
||||
goto free_vmlinux_btf;
|
||||
|
||||
bpf_program__set_autoload(skel->progs.test_user2, false);
|
||||
bpf_program__set_autoload(skel->progs.test_user1, false);
|
||||
|
||||
err = btf_type_tag_user__load(skel);
|
||||
ASSERT_ERR(err, "btf_type_tag_user");
|
||||
|
||||
btf_type_tag_user__destroy(skel);
|
||||
|
||||
free_vmlinux_btf:
|
||||
btf__free(vmlinux_btf);
|
||||
}
|
||||
|
||||
void test_btf_tag(void)
|
||||
{
|
||||
if (test__start_subtest("btf_decl_tag"))
|
||||
test_btf_decl_tag();
|
||||
if (test__start_subtest("btf_type_tag"))
|
||||
test_btf_type_tag();
|
||||
if (test__start_subtest("btf_type_tag_user_mod1"))
|
||||
test_btf_type_tag_mod_user(true);
|
||||
if (test__start_subtest("btf_type_tag_user_mod2"))
|
||||
test_btf_type_tag_mod_user(false);
|
||||
if (test__start_subtest("btf_type_tag_sys_user_vmlinux"))
|
||||
test_btf_type_tag_vmlinux_user();
|
||||
}
|
||||
|
|
|
@ -79,28 +79,21 @@ static void test_check_mtu_run_xdp(struct test_check_mtu *skel,
|
|||
struct bpf_program *prog,
|
||||
__u32 mtu_expect)
|
||||
{
|
||||
const char *prog_name = bpf_program__name(prog);
|
||||
int retval_expect = XDP_PASS;
|
||||
__u32 mtu_result = 0;
|
||||
char buf[256] = {};
|
||||
int err;
|
||||
struct bpf_prog_test_run_attr tattr = {
|
||||
int err, prog_fd = bpf_program__fd(prog);
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts,
|
||||
.repeat = 1,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.data_out = buf,
|
||||
.data_size_out = sizeof(buf),
|
||||
.prog_fd = bpf_program__fd(prog),
|
||||
};
|
||||
);
|
||||
|
||||
err = bpf_prog_test_run_xattr(&tattr);
|
||||
CHECK_ATTR(err != 0, "bpf_prog_test_run",
|
||||
"prog_name:%s (err %d errno %d retval %d)\n",
|
||||
prog_name, err, errno, tattr.retval);
|
||||
|
||||
CHECK(tattr.retval != retval_expect, "retval",
|
||||
"progname:%s unexpected retval=%d expected=%d\n",
|
||||
prog_name, tattr.retval, retval_expect);
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
ASSERT_OK(err, "test_run");
|
||||
ASSERT_EQ(topts.retval, retval_expect, "retval");
|
||||
|
||||
/* Extract MTU that BPF-prog got */
|
||||
mtu_result = skel->bss->global_bpf_mtu_xdp;
|
||||
|
@ -139,28 +132,21 @@ static void test_check_mtu_run_tc(struct test_check_mtu *skel,
|
|||
struct bpf_program *prog,
|
||||
__u32 mtu_expect)
|
||||
{
|
||||
const char *prog_name = bpf_program__name(prog);
|
||||
int retval_expect = BPF_OK;
|
||||
__u32 mtu_result = 0;
|
||||
char buf[256] = {};
|
||||
int err;
|
||||
struct bpf_prog_test_run_attr tattr = {
|
||||
.repeat = 1,
|
||||
int err, prog_fd = bpf_program__fd(prog);
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.data_out = buf,
|
||||
.data_size_out = sizeof(buf),
|
||||
.prog_fd = bpf_program__fd(prog),
|
||||
};
|
||||
.repeat = 1,
|
||||
);
|
||||
|
||||
err = bpf_prog_test_run_xattr(&tattr);
|
||||
CHECK_ATTR(err != 0, "bpf_prog_test_run",
|
||||
"prog_name:%s (err %d errno %d retval %d)\n",
|
||||
prog_name, err, errno, tattr.retval);
|
||||
|
||||
CHECK(tattr.retval != retval_expect, "retval",
|
||||
"progname:%s unexpected retval=%d expected=%d\n",
|
||||
prog_name, tattr.retval, retval_expect);
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
ASSERT_OK(err, "test_run");
|
||||
ASSERT_EQ(topts.retval, retval_expect, "retval");
|
||||
|
||||
/* Extract MTU that BPF-prog got */
|
||||
mtu_result = skel->bss->global_bpf_mtu_tc;
|
||||
|
|
|
@ -161,7 +161,7 @@ static socklen_t prepare_addr(struct sockaddr_storage *addr, int family)
|
|||
}
|
||||
}
|
||||
|
||||
static bool was_decapsulated(struct bpf_prog_test_run_attr *tattr)
|
||||
static bool was_decapsulated(struct bpf_test_run_opts *tattr)
|
||||
{
|
||||
return tattr->data_size_out < tattr->data_size_in;
|
||||
}
|
||||
|
@ -367,12 +367,12 @@ static void close_fds(int *fds, int n)
|
|||
|
||||
static void test_cls_redirect_common(struct bpf_program *prog)
|
||||
{
|
||||
struct bpf_prog_test_run_attr tattr = {};
|
||||
LIBBPF_OPTS(bpf_test_run_opts, tattr);
|
||||
int families[] = { AF_INET, AF_INET6 };
|
||||
struct sockaddr_storage ss;
|
||||
struct sockaddr *addr;
|
||||
socklen_t slen;
|
||||
int i, j, err;
|
||||
int i, j, err, prog_fd;
|
||||
int servers[__NR_KIND][ARRAY_SIZE(families)] = {};
|
||||
int conns[__NR_KIND][ARRAY_SIZE(families)] = {};
|
||||
struct tuple tuples[__NR_KIND][ARRAY_SIZE(families)];
|
||||
|
@ -394,7 +394,7 @@ static void test_cls_redirect_common(struct bpf_program *prog)
|
|||
goto cleanup;
|
||||
}
|
||||
|
||||
tattr.prog_fd = bpf_program__fd(prog);
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
for (i = 0; i < ARRAY_SIZE(tests); i++) {
|
||||
struct test_cfg *test = &tests[i];
|
||||
|
||||
|
@ -415,7 +415,7 @@ static void test_cls_redirect_common(struct bpf_program *prog)
|
|||
if (CHECK_FAIL(!tattr.data_size_in))
|
||||
continue;
|
||||
|
||||
err = bpf_prog_test_run_xattr(&tattr);
|
||||
err = bpf_prog_test_run_opts(prog_fd, &tattr);
|
||||
if (CHECK_FAIL(err))
|
||||
continue;
|
||||
|
||||
|
|
|
@ -7,8 +7,22 @@
|
|||
void test_core_kern_lskel(void)
|
||||
{
|
||||
struct core_kern_lskel *skel;
|
||||
int link_fd;
|
||||
|
||||
skel = core_kern_lskel__open_and_load();
|
||||
ASSERT_OK_PTR(skel, "open_and_load");
|
||||
if (!ASSERT_OK_PTR(skel, "open_and_load"))
|
||||
return;
|
||||
|
||||
link_fd = core_kern_lskel__core_relo_proto__attach(skel);
|
||||
if (!ASSERT_GT(link_fd, 0, "attach(core_relo_proto)"))
|
||||
goto cleanup;
|
||||
|
||||
/* trigger tracepoints */
|
||||
usleep(1);
|
||||
ASSERT_TRUE(skel->bss->proto_out[0], "bpf_core_type_exists");
|
||||
ASSERT_FALSE(skel->bss->proto_out[1], "!bpf_core_type_exists");
|
||||
ASSERT_TRUE(skel->bss->proto_out[2], "bpf_core_type_exists. nested");
|
||||
|
||||
cleanup:
|
||||
core_kern_lskel__destroy(skel);
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue