2019-10-25 13:57:06 +08:00
; RUN: llc -march=bpfel -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK,CHECK-ALU64 %s
; RUN: llc -march=bpfeb -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK,CHECK-ALU64 %s
; RUN: llc -march=bpfel -mattr=+alu32 -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK,CHECK-ALU32 %s
; RUN: llc -march=bpfeb -mattr=+alu32 -filetype=asm -o - %s | FileCheck -check-prefixes=CHECK,CHECK-ALU32 %s
[BPF] do compile-once run-everywhere relocation for bitfields
A bpf specific clang intrinsic is introduced:
u32 __builtin_preserve_field_info(member_access, info_kind)
Depending on info_kind, different information will
be returned to the program. A relocation is also
recorded for this builtin so that bpf loader can
patch the instruction on the target host.
This clang intrinsic is used to get certain information
to facilitate struct/union member relocations.
The offset relocation is extended by 4 bytes to
include relocation kind.
Currently supported relocation kinds are
enum {
FIELD_BYTE_OFFSET = 0,
FIELD_BYTE_SIZE,
FIELD_EXISTENCE,
FIELD_SIGNEDNESS,
FIELD_LSHIFT_U64,
FIELD_RSHIFT_U64,
};
for __builtin_preserve_field_info. The old
access offset relocation is covered by
FIELD_BYTE_OFFSET = 0.
An example:
struct s {
int a;
int b1:9;
int b2:4;
};
enum {
FIELD_BYTE_OFFSET = 0,
FIELD_BYTE_SIZE,
FIELD_EXISTENCE,
FIELD_SIGNEDNESS,
FIELD_LSHIFT_U64,
FIELD_RSHIFT_U64,
};
void bpf_probe_read(void *, unsigned, const void *);
int field_read(struct s *arg) {
unsigned long long ull = 0;
unsigned offset = __builtin_preserve_field_info(arg->b2, FIELD_BYTE_OFFSET);
unsigned size = __builtin_preserve_field_info(arg->b2, FIELD_BYTE_SIZE);
#ifdef USE_PROBE_READ
bpf_probe_read(&ull, size, (const void *)arg + offset);
unsigned lshift = __builtin_preserve_field_info(arg->b2, FIELD_LSHIFT_U64);
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
lshift = lshift + (size << 3) - 64;
#endif
#else
switch(size) {
case 1:
ull = *(unsigned char *)((void *)arg + offset); break;
case 2:
ull = *(unsigned short *)((void *)arg + offset); break;
case 4:
ull = *(unsigned int *)((void *)arg + offset); break;
case 8:
ull = *(unsigned long long *)((void *)arg + offset); break;
}
unsigned lshift = __builtin_preserve_field_info(arg->b2, FIELD_LSHIFT_U64);
#endif
ull <<= lshift;
if (__builtin_preserve_field_info(arg->b2, FIELD_SIGNEDNESS))
return (long long)ull >> __builtin_preserve_field_info(arg->b2, FIELD_RSHIFT_U64);
return ull >> __builtin_preserve_field_info(arg->b2, FIELD_RSHIFT_U64);
}
There is a minor overhead for bpf_probe_read() on big endian.
The code and relocation generated for field_read where bpf_probe_read() is
used to access argument data on little endian mode:
r3 = r1
r1 = 0
r1 = 4 <=== relocation (FIELD_BYTE_OFFSET)
r3 += r1
r1 = r10
r1 += -8
r2 = 4 <=== relocation (FIELD_BYTE_SIZE)
call bpf_probe_read
r2 = 51 <=== relocation (FIELD_LSHIFT_U64)
r1 = *(u64 *)(r10 - 8)
r1 <<= r2
r2 = 60 <=== relocation (FIELD_RSHIFT_U64)
r0 = r1
r0 >>= r2
r3 = 1 <=== relocation (FIELD_SIGNEDNESS)
if r3 == 0 goto LBB0_2
r1 s>>= r2
r0 = r1
LBB0_2:
exit
Compare to the above code between relocations FIELD_LSHIFT_U64 and
FIELD_LSHIFT_U64, the code with big endian mode has four more
instructions.
r1 = 41 <=== relocation (FIELD_LSHIFT_U64)
r6 += r1
r6 += -64
r6 <<= 32
r6 >>= 32
r1 = *(u64 *)(r10 - 8)
r1 <<= r6
r2 = 60 <=== relocation (FIELD_RSHIFT_U64)
The code and relocation generated when using direct load.
r2 = 0
r3 = 4
r4 = 4
if r4 s> 3 goto LBB0_3
if r4 == 1 goto LBB0_5
if r4 == 2 goto LBB0_6
goto LBB0_9
LBB0_6: # %sw.bb1
r1 += r3
r2 = *(u16 *)(r1 + 0)
goto LBB0_9
LBB0_3: # %entry
if r4 == 4 goto LBB0_7
if r4 == 8 goto LBB0_8
goto LBB0_9
LBB0_8: # %sw.bb9
r1 += r3
r2 = *(u64 *)(r1 + 0)
goto LBB0_9
LBB0_5: # %sw.bb
r1 += r3
r2 = *(u8 *)(r1 + 0)
goto LBB0_9
LBB0_7: # %sw.bb5
r1 += r3
r2 = *(u32 *)(r1 + 0)
LBB0_9: # %sw.epilog
r1 = 51
r2 <<= r1
r1 = 60
r0 = r2
r0 >>= r1
r3 = 1
if r3 == 0 goto LBB0_11
r2 s>>= r1
r0 = r2
LBB0_11: # %sw.epilog
exit
Considering verifier is able to do limited constant
propogation following branches. The following is the
code actually traversed.
r2 = 0
r3 = 4 <=== relocation
r4 = 4 <=== relocation
if r4 s> 3 goto LBB0_3
LBB0_3: # %entry
if r4 == 4 goto LBB0_7
LBB0_7: # %sw.bb5
r1 += r3
r2 = *(u32 *)(r1 + 0)
LBB0_9: # %sw.epilog
r1 = 51 <=== relocation
r2 <<= r1
r1 = 60 <=== relocation
r0 = r2
r0 >>= r1
r3 = 1
if r3 == 0 goto LBB0_11
r2 s>>= r1
r0 = r2
LBB0_11: # %sw.epilog
exit
For native load case, the load size is calculated to be the
same as the size of load width LLVM otherwise used to load
the value which is then used to extract the bitfield value.
Differential Revision: https://reviews.llvm.org/D67980
llvm-svn: 374099
2019-10-09 02:23:17 +08:00
; Source code:
; typedef struct s1 { int a1; char a2; } __s1;
; union u1 { int b1; __s1 b2; };
; enum { FIELD_RSHIFT_U64 = 5, };
; int test(union u1 *arg) {
; unsigned r1 = __builtin_preserve_field_info(arg->b2.a1, FIELD_RSHIFT_U64);
; unsigned r2 = __builtin_preserve_field_info(arg->b2.a2, FIELD_RSHIFT_U64);
; /* r1: 32, r2: 56 */
; return r1 + r2;
; }
; Compilation flag:
; clang -target bpf -O2 -g -S -emit-llvm test.c
%union.u1 = type { %struct.s1 }
%struct.s1 = type { i32 , i8 }
; Function Attrs: nounwind readnone
define d s o _ l o c a l i32 @test ( %union.u1 * %arg ) local_unnamed_addr #0 !dbg !11 {
entry:
call void @llvm.dbg.value ( metadata %union.u1 * %arg , metadata !27 , metadata !DIExpression ( ) ) , !dbg !30
%0 = tail call %union.u1 * @llvm.preserve.union.access.index.p0s_union.u1s.p0s_union.u1s ( %union.u1 * %arg , i32 1 ) , !dbg !31 , !llvm.preserve.access.index !16
%b2 = getelementptr inbounds %union.u1 , %union.u1 * %0 , i64 0 , i32 0 , !dbg !31
%1 = tail call i32 * @llvm.preserve.struct.access.index.p0i32.p0s_struct.s1s ( %struct.s1 * %b2 , i32 0 , i32 0 ) , !dbg !32 , !llvm.preserve.access.index !21
%2 = tail call i32 @llvm.bpf.preserve.field.info.p0i32 ( i32 * %1 , i64 5 ) , !dbg !33
call void @llvm.dbg.value ( metadata i32 %2 , metadata !28 , metadata !DIExpression ( ) ) , !dbg !30
%3 = tail call i8 * @llvm.preserve.struct.access.index.p0i8.p0s_struct.s1s ( %struct.s1 * %b2 , i32 1 , i32 1 ) , !dbg !34 , !llvm.preserve.access.index !21
%4 = tail call i32 @llvm.bpf.preserve.field.info.p0i8 ( i8 * %3 , i64 5 ) , !dbg !35
call void @llvm.dbg.value ( metadata i32 %4 , metadata !29 , metadata !DIExpression ( ) ) , !dbg !30
%add = add i32 %4 , %2 , !dbg !36
ret i32 %add , !dbg !37
}
; CHECK: r1 = 32
; CHECK: r0 = 56
2019-10-25 13:57:06 +08:00
; CHECK-ALU64: r0 += r1
; CHECK-ALU32: w0 += w1
[BPF] do compile-once run-everywhere relocation for bitfields
A bpf specific clang intrinsic is introduced:
u32 __builtin_preserve_field_info(member_access, info_kind)
Depending on info_kind, different information will
be returned to the program. A relocation is also
recorded for this builtin so that bpf loader can
patch the instruction on the target host.
This clang intrinsic is used to get certain information
to facilitate struct/union member relocations.
The offset relocation is extended by 4 bytes to
include relocation kind.
Currently supported relocation kinds are
enum {
FIELD_BYTE_OFFSET = 0,
FIELD_BYTE_SIZE,
FIELD_EXISTENCE,
FIELD_SIGNEDNESS,
FIELD_LSHIFT_U64,
FIELD_RSHIFT_U64,
};
for __builtin_preserve_field_info. The old
access offset relocation is covered by
FIELD_BYTE_OFFSET = 0.
An example:
struct s {
int a;
int b1:9;
int b2:4;
};
enum {
FIELD_BYTE_OFFSET = 0,
FIELD_BYTE_SIZE,
FIELD_EXISTENCE,
FIELD_SIGNEDNESS,
FIELD_LSHIFT_U64,
FIELD_RSHIFT_U64,
};
void bpf_probe_read(void *, unsigned, const void *);
int field_read(struct s *arg) {
unsigned long long ull = 0;
unsigned offset = __builtin_preserve_field_info(arg->b2, FIELD_BYTE_OFFSET);
unsigned size = __builtin_preserve_field_info(arg->b2, FIELD_BYTE_SIZE);
#ifdef USE_PROBE_READ
bpf_probe_read(&ull, size, (const void *)arg + offset);
unsigned lshift = __builtin_preserve_field_info(arg->b2, FIELD_LSHIFT_U64);
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
lshift = lshift + (size << 3) - 64;
#endif
#else
switch(size) {
case 1:
ull = *(unsigned char *)((void *)arg + offset); break;
case 2:
ull = *(unsigned short *)((void *)arg + offset); break;
case 4:
ull = *(unsigned int *)((void *)arg + offset); break;
case 8:
ull = *(unsigned long long *)((void *)arg + offset); break;
}
unsigned lshift = __builtin_preserve_field_info(arg->b2, FIELD_LSHIFT_U64);
#endif
ull <<= lshift;
if (__builtin_preserve_field_info(arg->b2, FIELD_SIGNEDNESS))
return (long long)ull >> __builtin_preserve_field_info(arg->b2, FIELD_RSHIFT_U64);
return ull >> __builtin_preserve_field_info(arg->b2, FIELD_RSHIFT_U64);
}
There is a minor overhead for bpf_probe_read() on big endian.
The code and relocation generated for field_read where bpf_probe_read() is
used to access argument data on little endian mode:
r3 = r1
r1 = 0
r1 = 4 <=== relocation (FIELD_BYTE_OFFSET)
r3 += r1
r1 = r10
r1 += -8
r2 = 4 <=== relocation (FIELD_BYTE_SIZE)
call bpf_probe_read
r2 = 51 <=== relocation (FIELD_LSHIFT_U64)
r1 = *(u64 *)(r10 - 8)
r1 <<= r2
r2 = 60 <=== relocation (FIELD_RSHIFT_U64)
r0 = r1
r0 >>= r2
r3 = 1 <=== relocation (FIELD_SIGNEDNESS)
if r3 == 0 goto LBB0_2
r1 s>>= r2
r0 = r1
LBB0_2:
exit
Compare to the above code between relocations FIELD_LSHIFT_U64 and
FIELD_LSHIFT_U64, the code with big endian mode has four more
instructions.
r1 = 41 <=== relocation (FIELD_LSHIFT_U64)
r6 += r1
r6 += -64
r6 <<= 32
r6 >>= 32
r1 = *(u64 *)(r10 - 8)
r1 <<= r6
r2 = 60 <=== relocation (FIELD_RSHIFT_U64)
The code and relocation generated when using direct load.
r2 = 0
r3 = 4
r4 = 4
if r4 s> 3 goto LBB0_3
if r4 == 1 goto LBB0_5
if r4 == 2 goto LBB0_6
goto LBB0_9
LBB0_6: # %sw.bb1
r1 += r3
r2 = *(u16 *)(r1 + 0)
goto LBB0_9
LBB0_3: # %entry
if r4 == 4 goto LBB0_7
if r4 == 8 goto LBB0_8
goto LBB0_9
LBB0_8: # %sw.bb9
r1 += r3
r2 = *(u64 *)(r1 + 0)
goto LBB0_9
LBB0_5: # %sw.bb
r1 += r3
r2 = *(u8 *)(r1 + 0)
goto LBB0_9
LBB0_7: # %sw.bb5
r1 += r3
r2 = *(u32 *)(r1 + 0)
LBB0_9: # %sw.epilog
r1 = 51
r2 <<= r1
r1 = 60
r0 = r2
r0 >>= r1
r3 = 1
if r3 == 0 goto LBB0_11
r2 s>>= r1
r0 = r2
LBB0_11: # %sw.epilog
exit
Considering verifier is able to do limited constant
propogation following branches. The following is the
code actually traversed.
r2 = 0
r3 = 4 <=== relocation
r4 = 4 <=== relocation
if r4 s> 3 goto LBB0_3
LBB0_3: # %entry
if r4 == 4 goto LBB0_7
LBB0_7: # %sw.bb5
r1 += r3
r2 = *(u32 *)(r1 + 0)
LBB0_9: # %sw.epilog
r1 = 51 <=== relocation
r2 <<= r1
r1 = 60 <=== relocation
r0 = r2
r0 >>= r1
r3 = 1
if r3 == 0 goto LBB0_11
r2 s>>= r1
r0 = r2
LBB0_11: # %sw.epilog
exit
For native load case, the load size is calculated to be the
same as the size of load width LLVM otherwise used to load
the value which is then used to extract the bitfield value.
Differential Revision: https://reviews.llvm.org/D67980
llvm-svn: 374099
2019-10-09 02:23:17 +08:00
; CHECK: exit
; CHECK: .long 1 # BTF_KIND_UNION(id = 2)
; CHECK: .ascii "u1" # string offset=1
; CHECK: .ascii ".text" # string offset=42
; CHECK: .ascii "0:1:0" # string offset=48
; CHECK: .ascii "0:1:1" # string offset=91
; CHECK: .long 16 # FieldReloc
; CHECK-NEXT: .long 42 # Field reloc section string offset=42
; CHECK-NEXT: .long 2
; CHECK-NEXT: .long .Ltmp{{[0-9]+}}
; CHECK-NEXT: .long 2
; CHECK-NEXT: .long 48
; CHECK-NEXT: .long 5
; CHECK-NEXT: .long .Ltmp{{[0-9]+}}
; CHECK-NEXT: .long 2
; CHECK-NEXT: .long 91
; CHECK-NEXT: .long 5
; Function Attrs: nounwind readnone
declare %union.u1 * @llvm.preserve.union.access.index.p0s_union.u1s.p0s_union.u1s ( %union.u1 * , i32 ) #1
; Function Attrs: nounwind readnone
declare i32 * @llvm.preserve.struct.access.index.p0i32.p0s_struct.s1s ( %struct.s1 * , i32 , i32 ) #1
; Function Attrs: nounwind readnone
declare i32 @llvm.bpf.preserve.field.info.p0i32 ( i32 * , i64 ) #1
; Function Attrs: nounwind readnone
declare i8 * @llvm.preserve.struct.access.index.p0i8.p0s_struct.s1s ( %struct.s1 * , i32 , i32 ) #1
; Function Attrs: nounwind readnone
declare i32 @llvm.bpf.preserve.field.info.p0i8 ( i8 * , i64 ) #1
; Function Attrs: nounwind readnone speculatable willreturn
declare void @llvm.dbg.value ( metadata , metadata , metadata ) #2
attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math" = "false" "disable-tail-calls" = "false" "frame-pointer" = "all" "less-precise-fpmad" = "false" "min-legal-vector-width" = "0" "no-infs-fp-math" = "false" "no-jump-tables" = "false" "no-nans-fp-math" = "false" "no-signed-zeros-fp-math" = "false" "no-trapping-math" = "false" "stack-protector-buffer-size" = "8" "unsafe-fp-math" = "false" "use-soft-float" = "false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind readnone s p e c u l a t a b l e w i l l r e t u r n }
!llvm.dbg.cu = ! { !0 }
!llvm.module.flags = ! { !7 , !8 , !9 }
!llvm.ident = ! { !10 }
!0 = distinct !DICompileUnit ( language: D W _ L A N G _ C 99 , file: !1 , producer: "clang version 10.0.0 (https://github.com/llvm/llvm-project.git 4a60741b74384f14b21fdc0131ede326438840ab)" , isOptimized: true , runtimeVersion: 0 , emissionKind: F u l l D e b u g , enums: !2 , nameTableKind: N one )
!1 = !DIFile ( filename: "test.c" , directory: "/tmp/home/yhs/work/tests/core" )
!2 = ! { !3 }
!3 = !DICompositeType ( tag: D W _ T A G _ e n u m e r a t i o n _ type , file: !1 , line: 3 , baseType: !4 , size: 32 , elements: !5 )
!4 = !DIBasicType ( name: "unsigned int" , size: 32 , encoding: D W _ A T E _ u n s i g n e d )
!5 = ! { !6 }
!6 = !DIEnumerator ( name: "FIELD_RSHIFT_U64" , value: 5 , isUnsigned: true )
!7 = ! { i32 2 , !"Dwarf Version" , i32 4 }
!8 = ! { i32 2 , !"Debug Info Version" , i32 3 }
!9 = ! { i32 1 , !"wchar_size" , i32 4 }
!10 = ! { !"clang version 10.0.0 (https://github.com/llvm/llvm-project.git 4a60741b74384f14b21fdc0131ede326438840ab)" }
!11 = distinct !DISubprogram ( name: "test" , scope: !1 , file: !1 , line: 4 , type: !12 , scopeLine: 4 , flags: D I F l a g P r o t o t y p e d , isDefinition: true , isOptimized: true , unit: !0 , retainedNodes: !26 )
!12 = !DISubroutineType ( types: !13 )
!13 = ! { !14 , !15 }
!14 = !DIBasicType ( name: "int" , size: 32 , encoding: D W _ A T E _ s i g n e d )
!15 = !DIDerivedType ( tag: D W _ T A G _ p o i n t e r _ type , baseType: !16 , size: 64 )
!16 = distinct !DICompositeType ( tag: D W _ T A G _ u n i o n _ type , name: "u1" , file: !1 , line: 2 , size: 64 , elements: !17 )
!17 = ! { !18 , !19 }
!18 = !DIDerivedType ( tag: D W _ T A G _ m e m b e r , name: "b1" , scope: !16 , file: !1 , line: 2 , baseType: !14 , size: 32 )
!19 = !DIDerivedType ( tag: D W _ T A G _ m e m b e r , name: "b2" , scope: !16 , file: !1 , line: 2 , baseType: !20 , size: 64 )
!20 = !DIDerivedType ( tag: D W _ T A G _ t y p e d e f , name: "__s1" , file: !1 , line: 1 , baseType: !21 )
!21 = distinct !DICompositeType ( tag: D W _ T A G _ s t r u c t u r e _ type , name: "s1" , file: !1 , line: 1 , size: 64 , elements: !22 )
!22 = ! { !23 , !24 }
!23 = !DIDerivedType ( tag: D W _ T A G _ m e m b e r , name: "a1" , scope: !21 , file: !1 , line: 1 , baseType: !14 , size: 32 )
!24 = !DIDerivedType ( tag: D W _ T A G _ m e m b e r , name: "a2" , scope: !21 , file: !1 , line: 1 , baseType: !25 , size: 8 , offset: 32 )
!25 = !DIBasicType ( name: "char" , size: 8 , encoding: D W _ A T E _ s i g n e d _ c h a r )
!26 = ! { !27 , !28 , !29 }
!27 = !DILocalVariable ( name: "arg" , arg: 1 , scope: !11 , file: !1 , line: 4 , type: !15 )
!28 = !DILocalVariable ( name: "r1" , scope: !11 , file: !1 , line: 5 , type: !4 )
!29 = !DILocalVariable ( name: "r2" , scope: !11 , file: !1 , line: 6 , type: !4 )
!30 = !DILocation ( line: 0 , scope: !11 )
!31 = !DILocation ( line: 5 , column: 52 , scope: !11 )
!32 = !DILocation ( line: 5 , column: 55 , scope: !11 )
!33 = !DILocation ( line: 5 , column: 17 , scope: !11 )
!34 = !DILocation ( line: 6 , column: 55 , scope: !11 )
!35 = !DILocation ( line: 6 , column: 17 , scope: !11 )
!36 = !DILocation ( line: 8 , column: 13 , scope: !11 )
!37 = !DILocation ( line: 8 , column: 3 , scope: !11 )