llvm-project/llvm/test/CodeGen/BPF/sanity.ll

118 lines
3.1 KiB
LLVM
Raw Normal View History

BPF backend Summary: V8->V9: - cleanup tests V7->V8: - addressed feedback from David: - switched to range-based 'for' loops - fixed formatting of tests V6->V7: - rebased and adjusted AsmPrinter args - CamelCased .td, fixed formatting, cleaned up names, removed unused patterns - diffstat: 3 files changed, 203 insertions(+), 227 deletions(-) V5->V6: - addressed feedback from Chandler: - reinstated full verbose standard banner in all files - fixed variables that were not in CamelCase - fixed names of #ifdef in header files - removed redundant braces in if/else chains with single statements - fixed comments - removed trailing empty line - dropped debug annotations from tests - diffstat of these changes: 46 files changed, 456 insertions(+), 469 deletions(-) V4->V5: - fix setLoadExtAction() interface - clang-formated all where it made sense V3->V4: - added CODE_OWNERS entry for BPF backend V2->V3: - fix metadata in tests V1->V2: - addressed feedback from Tom and Matt - removed top level change to configure (now everything via 'experimental-backend') - reworked error reporting via DiagnosticInfo (similar to R600) - added few more tests - added cmake build - added Triple::bpf - tested on linux and darwin V1 cover letter: --------------------- recently linux gained "universal in-kernel virtual machine" which is called eBPF or extended BPF. The name comes from "Berkeley Packet Filter", since new instruction set is based on it. This patch adds a new backend that emits extended BPF instruction set. The concept and development are covered by the following articles: http://lwn.net/Articles/599755/ http://lwn.net/Articles/575531/ http://lwn.net/Articles/603983/ http://lwn.net/Articles/606089/ http://lwn.net/Articles/612878/ One of use cases: dtrace/systemtap alternative. bpf syscall manpage: https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=b4fc1a460f3017e958e6a8ea560ea0afd91bf6fe instruction set description and differences vs classic BPF: http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/networking/filter.txt Short summary of instruction set: - 64-bit registers R0 - return value from in-kernel function, and exit value for BPF program R1 - R5 - arguments from BPF program to in-kernel function R6 - R9 - callee saved registers that in-kernel function will preserve R10 - read-only frame pointer to access stack - two-operand instructions like +, -, *, mov, load/store - implicit prologue/epilogue (invisible stack pointer) - no floating point, no simd Short history of extended BPF in kernel: interpreter in 3.15, x64 JIT in 3.16, arm64 JIT, verifier, bpf syscall in 3.18, more to come in the future. It's a very small and simple backend. There is no support for global variables, arbitrary function calls, floating point, varargs, exceptions, indirect jumps, arbitrary pointer arithmetic, alloca, etc. From C front-end point of view it's very restricted. It's done on purpose, since kernel rejects all programs that it cannot prove safe. It rejects programs with loops and with memory accesses via arbitrary pointers. When kernel accepts the program it is guaranteed that program will terminate and will not crash the kernel. This patch implements all 'must have' bits. There are several things on TODO list, so this is not the end of development. Most of the code is a boiler plate code, copy-pasted from other backends. Only odd things are lack or < and <= instructions, specialized load_byte intrinsics and 'compare and goto' as single instruction. Current instruction set is fixed, but more instructions can be added in the future. Signed-off-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> Subscribers: majnemer, chandlerc, echristo, joerg, pete, rengolin, kristof.beyls, arsenm, t.p.northover, tstellarAMD, aemerson, llvm-commits Differential Revision: http://reviews.llvm.org/D6494 llvm-svn: 227008
2015-01-25 01:51:26 +08:00
; RUN: llc < %s -march=bpf | FileCheck %s
@foo_printf.fmt = private unnamed_addr constant [9 x i8] c"hello \0A\00", align 1
; Function Attrs: nounwind readnone uwtable
define i32 @foo_int(i32 %a, i32 %b) #0 {
%1 = add nsw i32 %b, %a
ret i32 %1
; CHECK-LABEL: foo_int:
; CHECK: add r2, r1
}
; Function Attrs: nounwind readnone uwtable
define signext i8 @foo_char(i8 signext %a, i8 signext %b) #0 {
%1 = add i8 %b, %a
ret i8 %1
; CHECK-LABEL: foo_char:
; CHECK: add r2, r1
; CHECK: slli r2, 56
; CHECK: srai r2, 56
}
; Function Attrs: nounwind readnone uwtable
define i64 @foo_ll(i64 %a, i64 %b, i64 %c) #0 {
%1 = add nsw i64 %b, %a
%2 = sub i64 %1, %c
ret i64 %2
; CHECK-LABEL: foo_ll:
; CHECK: add r2, r1
; CHECK: sub r2, r3
; CHECK: mov r0, r2
}
; Function Attrs: nounwind uwtable
define void @foo_call2(i32 %a, i32 %b) #1 {
%1 = trunc i32 %b to i8
tail call void @foo_2arg(i8 signext %1, i32 %a) #3
ret void
; CHECK-LABEL: foo_call2:
; CHECK: slli r2, 56
; CHECK: srai r2, 56
; CHECK: mov r1, r2
}
declare void @foo_2arg(i8 signext, i32) #2
; Function Attrs: nounwind uwtable
define i32 @foo_call5(i8 signext %a, i16 signext %b, i32 %c, i64 %d) #1 {
%1 = tail call i32 @bar(i8 signext %a, i16 signext %b, i32 %c, i64 %d) #3
ret i32 0
; CHECK-LABEL: foo_call5:
; CHECK: call bar
}
declare i32 @bar(i8 signext, i16 signext, i32, i64) #2
; Function Attrs: nounwind readnone uwtable
define signext i8 @foo_cmp(i8 signext %a, i8 signext %b) #0 {
%1 = icmp slt i8 %a, %b
%a.b = select i1 %1, i8 %a, i8 %b
ret i8 %a.b
; CHECK-LABEL: foo_cmp:
; CHECK: jsgt r2, r1
}
; Function Attrs: nounwind readnone uwtable
define i32 @foo_muldiv(i8 signext %a, i16 signext %b, i32 %c, i64 %d) #0 {
%1 = icmp eq i8 %a, 0
br i1 %1, label %5, label %2
; <label>:2 ; preds = %0
%3 = sext i16 %b to i32
%4 = mul nsw i32 %3, %c
br label %8
; <label>:5 ; preds = %0
%6 = trunc i64 %d to i32
%7 = udiv i32 %6, %c
br label %8
; <label>:8 ; preds = %5, %2
%.0 = phi i32 [ %4, %2 ], [ %7, %5 ]
ret i32 %.0
; CHECK-LABEL: foo_muldiv:
; CHECK: mul r2, r3
}
; Function Attrs: nounwind uwtable
define i32 @foo_optimized() #1 {
%1 = tail call i32 @manyarg(i32 1, i32 2, i32 3, i32 4, i32 5) #3
ret i32 %1
; CHECK-LABEL: foo_optimized:
; CHECK: mov r1, 1
; CHECK: mov r2, 2
; CHECK: mov r3, 3
; CHECK: mov r4, 4
; CHECK: mov r5, 5
}
declare i32 @manyarg(i32, i32, i32, i32, i32) #2
; Function Attrs: nounwind uwtable
define void @foo_printf() #1 {
%fmt = alloca [9 x i8], align 1
%1 = getelementptr inbounds [9 x i8]* %fmt, i64 0, i64 0
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* getelementptr inbounds ([9 x i8]* @foo_printf.fmt, i64 0, i64 0), i64 9, i32 1, i1 false)
; CHECK-LABEL: foo_printf:
; CHECK: ld_64 r1, 729618802566522216
%2 = call i32 (i8*, ...)* @printf(i8* %1) #3
ret void
}
; Function Attrs: nounwind
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) #3
; Function Attrs: nounwind
declare i32 @printf(i8* nocapture, ...) #4