2016-05-18 17:14:13 +08:00
|
|
|
; RUN: llc < %s -march=sparcv9 -disable-sparc-delay-filler -disable-sparc-leaf-proc | FileCheck %s --check-prefix=CHECK --check-prefix=HARD
|
|
|
|
; RUN: llc < %s -march=sparcv9 -disable-sparc-delay-filler -disable-sparc-leaf-proc -mattr=soft-float | FileCheck %s --check-prefix=CHECK --check-prefix=SOFT
|
2013-04-07 02:32:12 +08:00
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: intarg:
|
2013-04-09 12:37:47 +08:00
|
|
|
; The save/restore frame is not strictly necessary here, but we would need to
|
|
|
|
; refer to %o registers instead.
|
|
|
|
; CHECK: save %sp, -128, %sp
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK: ldx [%fp+2231], [[R2:%[gilo][0-7]]]
|
|
|
|
; CHECK: ld [%fp+2227], [[R1:%[gilo][0-7]]]
|
2013-04-07 02:32:12 +08:00
|
|
|
; CHECK: stb %i0, [%i4]
|
|
|
|
; CHECK: stb %i1, [%i4]
|
|
|
|
; CHECK: sth %i2, [%i4]
|
|
|
|
; CHECK: st %i3, [%i4]
|
|
|
|
; CHECK: stx %i4, [%i4]
|
|
|
|
; CHECK: st %i5, [%i4]
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK: st [[R1]], [%i4]
|
|
|
|
; CHECK: stx [[R2]], [%i4]
|
2013-04-09 12:37:47 +08:00
|
|
|
; CHECK: restore
|
2013-04-07 02:32:12 +08:00
|
|
|
define void @intarg(i8 %a0, ; %i0
|
|
|
|
i8 %a1, ; %i1
|
|
|
|
i16 %a2, ; %i2
|
|
|
|
i32 %a3, ; %i3
|
|
|
|
i8* %a4, ; %i4
|
|
|
|
i32 %a5, ; %i5
|
2013-04-08 03:10:57 +08:00
|
|
|
i32 signext %a6, ; [%fp+BIAS+176]
|
2013-04-07 02:32:12 +08:00
|
|
|
i8* %a7) { ; [%fp+BIAS+184]
|
Elide stores which are overwritten without being observed.
Summary:
In SelectionDAG, when a store is immediately chained to another store
to the same address, elide the first store as it has no observable
effects. This is causes small improvements dealing with intrinsics
lowered to stores.
Test notes:
* Many testcases overwrite store addresses multiple times and needed
minor changes, mainly making stores volatile to prevent the
optimization from optimizing the test away.
* Many X86 test cases optimized out instructions associated with
associated with va_start.
* Note that test_splat in CodeGen/AArch64/misched-stp.ll no longer has
dependencies to check and can probably be removed and potentially
replaced with another test.
Reviewers: rnk, john.brawn
Subscribers: aemerson, rengolin, qcolombet, jyknight, nemanjai, nhaehnle, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D33206
llvm-svn: 303198
2017-05-17 03:43:56 +08:00
|
|
|
store volatile i8 %a0, i8* %a4
|
|
|
|
store volatile i8 %a1, i8* %a4
|
2013-04-07 02:32:12 +08:00
|
|
|
%p16 = bitcast i8* %a4 to i16*
|
Elide stores which are overwritten without being observed.
Summary:
In SelectionDAG, when a store is immediately chained to another store
to the same address, elide the first store as it has no observable
effects. This is causes small improvements dealing with intrinsics
lowered to stores.
Test notes:
* Many testcases overwrite store addresses multiple times and needed
minor changes, mainly making stores volatile to prevent the
optimization from optimizing the test away.
* Many X86 test cases optimized out instructions associated with
associated with va_start.
* Note that test_splat in CodeGen/AArch64/misched-stp.ll no longer has
dependencies to check and can probably be removed and potentially
replaced with another test.
Reviewers: rnk, john.brawn
Subscribers: aemerson, rengolin, qcolombet, jyknight, nemanjai, nhaehnle, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D33206
llvm-svn: 303198
2017-05-17 03:43:56 +08:00
|
|
|
store volatile i16 %a2, i16* %p16
|
2013-04-07 02:32:12 +08:00
|
|
|
%p32 = bitcast i8* %a4 to i32*
|
Elide stores which are overwritten without being observed.
Summary:
In SelectionDAG, when a store is immediately chained to another store
to the same address, elide the first store as it has no observable
effects. This is causes small improvements dealing with intrinsics
lowered to stores.
Test notes:
* Many testcases overwrite store addresses multiple times and needed
minor changes, mainly making stores volatile to prevent the
optimization from optimizing the test away.
* Many X86 test cases optimized out instructions associated with
associated with va_start.
* Note that test_splat in CodeGen/AArch64/misched-stp.ll no longer has
dependencies to check and can probably be removed and potentially
replaced with another test.
Reviewers: rnk, john.brawn
Subscribers: aemerson, rengolin, qcolombet, jyknight, nemanjai, nhaehnle, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D33206
llvm-svn: 303198
2017-05-17 03:43:56 +08:00
|
|
|
store volatile i32 %a3, i32* %p32
|
2013-04-07 02:32:12 +08:00
|
|
|
%pp = bitcast i8* %a4 to i8**
|
Elide stores which are overwritten without being observed.
Summary:
In SelectionDAG, when a store is immediately chained to another store
to the same address, elide the first store as it has no observable
effects. This is causes small improvements dealing with intrinsics
lowered to stores.
Test notes:
* Many testcases overwrite store addresses multiple times and needed
minor changes, mainly making stores volatile to prevent the
optimization from optimizing the test away.
* Many X86 test cases optimized out instructions associated with
associated with va_start.
* Note that test_splat in CodeGen/AArch64/misched-stp.ll no longer has
dependencies to check and can probably be removed and potentially
replaced with another test.
Reviewers: rnk, john.brawn
Subscribers: aemerson, rengolin, qcolombet, jyknight, nemanjai, nhaehnle, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D33206
llvm-svn: 303198
2017-05-17 03:43:56 +08:00
|
|
|
store volatile i8* %a4, i8** %pp
|
|
|
|
store volatile i32 %a5, i32* %p32
|
|
|
|
store volatile i32 %a6, i32* %p32
|
|
|
|
store volatile i8* %a7, i8** %pp
|
2013-04-07 02:32:12 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: call_intarg:
|
2013-04-09 12:37:47 +08:00
|
|
|
; 16 saved + 8 args.
|
|
|
|
; CHECK: save %sp, -192, %sp
|
2013-04-08 03:10:57 +08:00
|
|
|
; Sign-extend and store the full 64 bits.
|
|
|
|
; CHECK: sra %i0, 0, [[R:%[gilo][0-7]]]
|
|
|
|
; Use %o0-%o5 for outgoing arguments
|
TableGen: fix operand counting for aliases
TableGen has a fairly dubious heuristic to decide whether an alias should be
printed: does the alias have lest operands than the real instruction. This is
bad enough (particularly with no way to override it), but it should at least be
calculated consistently for both strings.
This patch implements that logic: first get the *correct* string for the
variant, in the same way as the Matcher, without guessing; then count the
number of whitespace chars.
There are basically 4 changes this brings about after the previous
commits; all of these appear to be good, so I have changed the tests:
+ ARM64: we print "neg X, Y" instead of "sub X, xzr, Y".
+ ARM64: we skip implicit "uxtx" and "uxtw" modifiers.
+ Sparc: we print "mov A, B" instead of "or %g0, A, B".
+ Sparc: we print "fcmpX A, B" instead of "fcmpX %fcc0, A, B"
llvm-svn: 208969
2014-05-16 17:42:04 +08:00
|
|
|
; CHECK: mov 5, %o5
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK: stx [[R]], [%sp+2223]
|
2013-04-08 03:10:57 +08:00
|
|
|
; CHECK: call intarg
|
2013-04-09 12:37:47 +08:00
|
|
|
; CHECK-NOT: add %sp
|
|
|
|
; CHECK: restore
|
2013-04-08 03:10:57 +08:00
|
|
|
define void @call_intarg(i32 %i0, i8* %i1) {
|
|
|
|
call void @intarg(i8 0, i8 1, i16 2, i32 3, i8* undef, i32 5, i32 %i0, i8* %i1)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: floatarg:
|
2016-05-18 17:14:13 +08:00
|
|
|
; HARD: save %sp, -128, %sp
|
|
|
|
; HARD: ld [%fp+2307], [[F:%f[0-9]+]]
|
|
|
|
; HARD: fstod %f1,
|
|
|
|
; HARD: faddd %f2,
|
|
|
|
; HARD: faddd %f4,
|
|
|
|
; HARD: faddd %f6,
|
|
|
|
; HARD: fadds %f31, [[F]]
|
|
|
|
; SOFT: save %sp, -176, %sp
|
2018-05-02 03:26:15 +08:00
|
|
|
; SOFT: ld [%fp+2299], %i4
|
|
|
|
; SOFT: ld [%fp+2307], %i5
|
2016-05-18 17:14:13 +08:00
|
|
|
; SOFT: srl %i0, 0, %o0
|
|
|
|
; SOFT-NEXT: call __extendsfdf2
|
2018-02-24 16:24:31 +08:00
|
|
|
; SOFT: mov %o0, %o1
|
2016-05-18 17:14:13 +08:00
|
|
|
; SOFT: mov %i1, %o0
|
|
|
|
; SOFT: mov %i2, %o0
|
|
|
|
; SOFT: mov %i3, %o0
|
2013-04-07 02:32:12 +08:00
|
|
|
define double @floatarg(float %a0, ; %f1
|
|
|
|
double %a1, ; %d2
|
|
|
|
double %a2, ; %d4
|
|
|
|
double %a3, ; %d6
|
|
|
|
float %a4, ; %f9
|
|
|
|
float %a5, ; %f11
|
|
|
|
float %a6, ; %f13
|
|
|
|
float %a7, ; %f15
|
|
|
|
float %a8, ; %f17
|
|
|
|
float %a9, ; %f19
|
|
|
|
float %a10, ; %f21
|
|
|
|
float %a11, ; %f23
|
|
|
|
float %a12, ; %f25
|
|
|
|
float %a13, ; %f27
|
|
|
|
float %a14, ; %f29
|
|
|
|
float %a15, ; %f31
|
|
|
|
float %a16, ; [%fp+BIAS+256] (using 8 bytes)
|
2013-04-08 03:10:57 +08:00
|
|
|
double %a17) { ; [%fp+BIAS+264] (using 8 bytes)
|
2013-04-07 02:32:12 +08:00
|
|
|
%d0 = fpext float %a0 to double
|
|
|
|
%s1 = fadd double %a1, %d0
|
|
|
|
%s2 = fadd double %a2, %s1
|
|
|
|
%s3 = fadd double %a3, %s2
|
|
|
|
%s16 = fadd float %a15, %a16
|
|
|
|
%d16 = fpext float %s16 to double
|
|
|
|
%s17 = fadd double %d16, %s3
|
|
|
|
ret double %s17
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: call_floatarg:
|
2013-04-09 12:37:47 +08:00
|
|
|
; CHECK: save %sp, -272, %sp
|
2013-04-08 03:10:57 +08:00
|
|
|
; Store 8 bytes in full slot.
|
2016-05-18 17:14:13 +08:00
|
|
|
; HARD: std %f2, [%sp+2311]
|
2015-09-11 05:49:06 +08:00
|
|
|
; Store 4 bytes, right-aligned in slot.
|
2016-05-18 17:14:13 +08:00
|
|
|
; HARD: st %f1, [%sp+2307]
|
|
|
|
; HARD: fmovd %f2, %f4
|
|
|
|
; SOFT: stx %i1, [%sp+2311]
|
|
|
|
; SOFT: stx %i0, [%sp+2303]
|
|
|
|
; SOFT: stx %i2, [%sp+2295]
|
|
|
|
; SOFT: stx %i2, [%sp+2287]
|
|
|
|
; SOFT: stx %i2, [%sp+2279]
|
|
|
|
; SOFT: stx %i2, [%sp+2271]
|
|
|
|
; SOFT: stx %i2, [%sp+2263]
|
|
|
|
; SOFT: stx %i2, [%sp+2255]
|
|
|
|
; SOFT: stx %i2, [%sp+2247]
|
|
|
|
; SOFT: stx %i2, [%sp+2239]
|
|
|
|
; SOFT: stx %i2, [%sp+2231]
|
|
|
|
; SOFT: stx %i2, [%sp+2223]
|
|
|
|
; SOFT: mov %i2, %o0
|
|
|
|
; SOFT: mov %i1, %o1
|
|
|
|
; SOFT: mov %i1, %o2
|
|
|
|
; SOFT: mov %i1, %o3
|
|
|
|
; SOFT: mov %i2, %o4
|
|
|
|
; SOFT: mov %i2, %o5
|
2013-04-08 03:10:57 +08:00
|
|
|
; CHECK: call floatarg
|
2013-04-09 12:37:47 +08:00
|
|
|
; CHECK-NOT: add %sp
|
|
|
|
; CHECK: restore
|
2016-05-18 17:14:13 +08:00
|
|
|
|
2013-04-08 03:10:57 +08:00
|
|
|
define void @call_floatarg(float %f1, double %d2, float %f5, double *%p) {
|
|
|
|
%r = call double @floatarg(float %f5, double %d2, double %d2, double %d2,
|
|
|
|
float %f5, float %f5, float %f5, float %f5,
|
|
|
|
float %f5, float %f5, float %f5, float %f5,
|
|
|
|
float %f5, float %f5, float %f5, float %f5,
|
|
|
|
float %f1, double %d2)
|
|
|
|
store double %r, double* %p
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: mixedarg:
|
|
|
|
; CHECK: ldx [%fp+2247]
|
|
|
|
; CHECK: ldx [%fp+2231]
|
2016-05-18 17:14:13 +08:00
|
|
|
; SOFT: ldx [%fp+2239], %i0
|
|
|
|
; HARD: fstod %f3
|
|
|
|
; HARD: faddd %f6
|
|
|
|
; HARD: faddd %f16
|
2018-02-24 16:24:31 +08:00
|
|
|
; SOFT: mov %o0, %o1
|
2016-05-18 17:14:13 +08:00
|
|
|
; SOFT-NEXT: mov %i3, %o0
|
|
|
|
; SOFT-NEXT: call __adddf3
|
2018-02-24 16:24:31 +08:00
|
|
|
; SOFT: mov %o0, %o1
|
2016-05-18 17:14:13 +08:00
|
|
|
; SOFT-NEXT: mov %i0, %o0
|
|
|
|
; SOFT-NEXT: call __adddf3
|
|
|
|
; HARD: std %f0, [%i1]
|
|
|
|
; SOFT: stx %o0, [%i5]
|
|
|
|
|
2013-04-07 02:32:12 +08:00
|
|
|
define void @mixedarg(i8 %a0, ; %i0
|
|
|
|
float %a1, ; %f3
|
|
|
|
i16 %a2, ; %i2
|
|
|
|
double %a3, ; %d6
|
|
|
|
i13 %a4, ; %i4
|
|
|
|
float %a5, ; %f11
|
|
|
|
i64 %a6, ; [%fp+BIAS+176]
|
|
|
|
double *%a7, ; [%fp+BIAS+184]
|
|
|
|
double %a8, ; %d16
|
|
|
|
i16* %a9) { ; [%fp+BIAS+200]
|
|
|
|
%d1 = fpext float %a1 to double
|
|
|
|
%s3 = fadd double %a3, %d1
|
|
|
|
%s8 = fadd double %a8, %s3
|
|
|
|
store double %s8, double* %a7
|
|
|
|
store i16 %a2, i16* %a9
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: call_mixedarg:
|
2013-04-08 03:10:57 +08:00
|
|
|
; CHECK: stx %i2, [%sp+2247]
|
2016-05-18 17:14:13 +08:00
|
|
|
; SOFT: stx %i1, [%sp+2239]
|
2013-04-08 03:10:57 +08:00
|
|
|
; CHECK: stx %i0, [%sp+2223]
|
2016-05-18 17:14:13 +08:00
|
|
|
; HARD: fmovd %f2, %f6
|
|
|
|
; HARD: fmovd %f2, %f16
|
|
|
|
; SOFT: mov %i1, %o3
|
2013-04-08 03:10:57 +08:00
|
|
|
; CHECK: call mixedarg
|
2013-04-09 12:37:47 +08:00
|
|
|
; CHECK-NOT: add %sp
|
|
|
|
; CHECK: restore
|
2016-05-18 17:14:13 +08:00
|
|
|
|
2013-04-08 03:10:57 +08:00
|
|
|
define void @call_mixedarg(i64 %i0, double %f2, i16* %i2) {
|
|
|
|
call void @mixedarg(i8 undef,
|
|
|
|
float undef,
|
|
|
|
i16 undef,
|
|
|
|
double %f2,
|
|
|
|
i13 undef,
|
|
|
|
float undef,
|
|
|
|
i64 %i0,
|
|
|
|
double* undef,
|
|
|
|
double %f2,
|
|
|
|
i16* %i2)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2013-04-07 02:32:12 +08:00
|
|
|
; The inreg attribute is used to indicate 32-bit sized struct elements that
|
|
|
|
; share an 8-byte slot.
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: inreg_fi:
|
2016-05-18 17:14:13 +08:00
|
|
|
; SOFT: srlx %i0, 32, [[R:%[gilo][0-7]]]
|
|
|
|
; HARD: fstoi %f1
|
|
|
|
; SOFT: call __fixsfsi
|
|
|
|
; HARD: srlx %i0, 32, [[R:%[gilo][0-7]]]
|
2013-04-07 02:32:12 +08:00
|
|
|
; CHECK: sub [[R]],
|
|
|
|
define i32 @inreg_fi(i32 inreg %a0, ; high bits of %i0
|
|
|
|
float inreg %a1) { ; %f1
|
|
|
|
%b1 = fptosi float %a1 to i32
|
|
|
|
%rv = sub i32 %a0, %b1
|
|
|
|
ret i32 %rv
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: call_inreg_fi:
|
2013-04-09 12:37:47 +08:00
|
|
|
; Allocate space for 6 arguments, even when only 2 are used.
|
|
|
|
; CHECK: save %sp, -176, %sp
|
2018-02-24 16:24:31 +08:00
|
|
|
; HARD-DAG: sllx %i1, 32, %o0
|
|
|
|
; HARD-DAG: fmovs %f5, %f1
|
2016-05-18 17:14:13 +08:00
|
|
|
; SOFT: srl %i2, 0, %i0
|
|
|
|
; SOFT: sllx %i1, 32, %i1
|
|
|
|
; SOFT: or %i1, %i0, %o0
|
2013-04-08 03:10:57 +08:00
|
|
|
; CHECK: call inreg_fi
|
|
|
|
define void @call_inreg_fi(i32* %p, i32 %i1, float %f5) {
|
|
|
|
%x = call i32 @inreg_fi(i32 %i1, float %f5)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: inreg_ff:
|
2016-05-18 17:14:13 +08:00
|
|
|
; HARD: fsubs %f0, %f1, %f0
|
|
|
|
; SOFT: srlx %i0, 32, %o0
|
|
|
|
; SOFT: srl %i0, 0, %o1
|
|
|
|
; SOFT: call __subsf3
|
2013-04-07 02:32:12 +08:00
|
|
|
define float @inreg_ff(float inreg %a0, ; %f0
|
|
|
|
float inreg %a1) { ; %f1
|
|
|
|
%rv = fsub float %a0, %a1
|
|
|
|
ret float %rv
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: call_inreg_ff:
|
2018-02-24 16:24:31 +08:00
|
|
|
; HARD-DAG: fmovs %f3, %f0
|
|
|
|
; HARD-DAG: fmovs %f5, %f1
|
2016-05-18 17:14:13 +08:00
|
|
|
; SOFT: srl %i2, 0, %i0
|
|
|
|
; SOFT: sllx %i1, 32, %i1
|
|
|
|
; SOFT: or %i1, %i0, %o0
|
2013-04-08 03:10:57 +08:00
|
|
|
; CHECK: call inreg_ff
|
|
|
|
define void @call_inreg_ff(i32* %p, float %f3, float %f5) {
|
|
|
|
%x = call float @inreg_ff(float %f3, float %f5)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: inreg_if:
|
2016-05-18 17:14:13 +08:00
|
|
|
; HARD: fstoi %f0
|
|
|
|
; SOFT: srlx %i0, 32, %o0
|
|
|
|
; SOFT: call __fixsfsi
|
2013-04-07 02:32:12 +08:00
|
|
|
; CHECK: sub %i0
|
|
|
|
define i32 @inreg_if(float inreg %a0, ; %f0
|
|
|
|
i32 inreg %a1) { ; low bits of %i0
|
|
|
|
%b0 = fptosi float %a0 to i32
|
|
|
|
%rv = sub i32 %a1, %b0
|
|
|
|
ret i32 %rv
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: call_inreg_if:
|
2016-05-18 17:14:13 +08:00
|
|
|
; HARD: fmovs %f3, %f0
|
|
|
|
; HARD: mov %i2, %o0
|
|
|
|
; SOFT: srl %i2, 0, %i0
|
|
|
|
; SOFT: sllx %i1, 32, %i1
|
|
|
|
; SOFT: or %i1, %i0, %o0
|
2013-04-08 03:10:57 +08:00
|
|
|
; CHECK: call inreg_if
|
|
|
|
define void @call_inreg_if(i32* %p, float %f3, i32 %i2) {
|
|
|
|
%x = call i32 @inreg_if(float %f3, i32 %i2)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2013-04-07 02:32:12 +08:00
|
|
|
; The frontend shouldn't do this. Just pass i64 instead.
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: inreg_ii:
|
2013-04-07 02:32:12 +08:00
|
|
|
; CHECK: srlx %i0, 32, [[R:%[gilo][0-7]]]
|
|
|
|
; CHECK: sub %i0, [[R]], %i0
|
|
|
|
define i32 @inreg_ii(i32 inreg %a0, ; high bits of %i0
|
|
|
|
i32 inreg %a1) { ; low bits of %i0
|
|
|
|
%rv = sub i32 %a1, %a0
|
|
|
|
ret i32 %rv
|
|
|
|
}
|
2013-04-07 07:57:33 +08:00
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: call_inreg_ii:
|
2013-04-08 03:10:57 +08:00
|
|
|
; CHECK: srl %i2, 0, [[R2:%[gilo][0-7]]]
|
|
|
|
; CHECK: sllx %i1, 32, [[R1:%[gilo][0-7]]]
|
|
|
|
; CHECK: or [[R1]], [[R2]], %o0
|
|
|
|
; CHECK: call inreg_ii
|
|
|
|
define void @call_inreg_ii(i32* %p, i32 %i1, i32 %i2) {
|
|
|
|
%x = call i32 @inreg_ii(i32 %i1, i32 %i2)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2013-04-07 07:57:33 +08:00
|
|
|
; Structs up to 32 bytes in size can be returned in registers.
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: ret_i64_pair:
|
2013-04-07 07:57:33 +08:00
|
|
|
; CHECK: ldx [%i2], %i0
|
|
|
|
; CHECK: ldx [%i3], %i1
|
|
|
|
define { i64, i64 } @ret_i64_pair(i32 %a0, i32 %a1, i64* %p, i64* %q) {
|
2015-02-28 05:17:42 +08:00
|
|
|
%r1 = load i64, i64* %p
|
2013-04-07 07:57:33 +08:00
|
|
|
%rv1 = insertvalue { i64, i64 } undef, i64 %r1, 0
|
|
|
|
store i64 0, i64* %p
|
2015-02-28 05:17:42 +08:00
|
|
|
%r2 = load i64, i64* %q
|
2013-04-07 07:57:33 +08:00
|
|
|
%rv2 = insertvalue { i64, i64 } %rv1, i64 %r2, 1
|
|
|
|
ret { i64, i64 } %rv2
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: call_ret_i64_pair:
|
2013-04-08 03:10:57 +08:00
|
|
|
; CHECK: call ret_i64_pair
|
|
|
|
; CHECK: stx %o0, [%i0]
|
|
|
|
; CHECK: stx %o1, [%i0]
|
|
|
|
define void @call_ret_i64_pair(i64* %i0) {
|
|
|
|
%rv = call { i64, i64 } @ret_i64_pair(i32 undef, i32 undef,
|
|
|
|
i64* undef, i64* undef)
|
|
|
|
%e0 = extractvalue { i64, i64 } %rv, 0
|
Elide stores which are overwritten without being observed.
Summary:
In SelectionDAG, when a store is immediately chained to another store
to the same address, elide the first store as it has no observable
effects. This is causes small improvements dealing with intrinsics
lowered to stores.
Test notes:
* Many testcases overwrite store addresses multiple times and needed
minor changes, mainly making stores volatile to prevent the
optimization from optimizing the test away.
* Many X86 test cases optimized out instructions associated with
associated with va_start.
* Note that test_splat in CodeGen/AArch64/misched-stp.ll no longer has
dependencies to check and can probably be removed and potentially
replaced with another test.
Reviewers: rnk, john.brawn
Subscribers: aemerson, rengolin, qcolombet, jyknight, nemanjai, nhaehnle, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D33206
llvm-svn: 303198
2017-05-17 03:43:56 +08:00
|
|
|
store volatile i64 %e0, i64* %i0
|
2013-04-08 03:10:57 +08:00
|
|
|
%e1 = extractvalue { i64, i64 } %rv, 1
|
|
|
|
store i64 %e1, i64* %i0
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2014-01-12 12:13:17 +08:00
|
|
|
; This is not a C struct, the i32 member uses 8 bytes, but the float only 4.
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: ret_i32_float_pair:
|
2013-04-07 07:57:33 +08:00
|
|
|
; CHECK: ld [%i2], %i0
|
2016-05-18 17:14:13 +08:00
|
|
|
; HARD: ld [%i3], %f2
|
|
|
|
; SOFT: ld [%i3], %i1
|
2013-04-07 07:57:33 +08:00
|
|
|
define { i32, float } @ret_i32_float_pair(i32 %a0, i32 %a1,
|
|
|
|
i32* %p, float* %q) {
|
2015-02-28 05:17:42 +08:00
|
|
|
%r1 = load i32, i32* %p
|
2013-04-07 07:57:33 +08:00
|
|
|
%rv1 = insertvalue { i32, float } undef, i32 %r1, 0
|
|
|
|
store i32 0, i32* %p
|
2015-02-28 05:17:42 +08:00
|
|
|
%r2 = load float, float* %q
|
2013-04-07 07:57:33 +08:00
|
|
|
%rv2 = insertvalue { i32, float } %rv1, float %r2, 1
|
|
|
|
ret { i32, float } %rv2
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: call_ret_i32_float_pair:
|
2013-04-08 03:10:57 +08:00
|
|
|
; CHECK: call ret_i32_float_pair
|
|
|
|
; CHECK: st %o0, [%i0]
|
2016-05-18 17:14:13 +08:00
|
|
|
; HARD: st %f2, [%i1]
|
|
|
|
; SOFT: st %o1, [%i1]
|
2013-04-08 03:10:57 +08:00
|
|
|
define void @call_ret_i32_float_pair(i32* %i0, float* %i1) {
|
|
|
|
%rv = call { i32, float } @ret_i32_float_pair(i32 undef, i32 undef,
|
|
|
|
i32* undef, float* undef)
|
|
|
|
%e0 = extractvalue { i32, float } %rv, 0
|
|
|
|
store i32 %e0, i32* %i0
|
|
|
|
%e1 = extractvalue { i32, float } %rv, 1
|
|
|
|
store float %e1, float* %i1
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2013-04-07 07:57:33 +08:00
|
|
|
; This is a C struct, each member uses 4 bytes.
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: ret_i32_float_packed:
|
2013-04-07 07:57:33 +08:00
|
|
|
; CHECK: ld [%i2], [[R:%[gilo][0-7]]]
|
2016-05-18 17:14:13 +08:00
|
|
|
; HARD: ld [%i3], %f1
|
|
|
|
; SOFT: ld [%i3], %i1
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK: sllx [[R]], 32, %i0
|
2013-04-07 07:57:33 +08:00
|
|
|
define inreg { i32, float } @ret_i32_float_packed(i32 %a0, i32 %a1,
|
|
|
|
i32* %p, float* %q) {
|
2015-02-28 05:17:42 +08:00
|
|
|
%r1 = load i32, i32* %p
|
2013-04-07 07:57:33 +08:00
|
|
|
%rv1 = insertvalue { i32, float } undef, i32 %r1, 0
|
|
|
|
store i32 0, i32* %p
|
2015-02-28 05:17:42 +08:00
|
|
|
%r2 = load float, float* %q
|
2013-04-07 07:57:33 +08:00
|
|
|
%rv2 = insertvalue { i32, float } %rv1, float %r2, 1
|
|
|
|
ret { i32, float } %rv2
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: call_ret_i32_float_packed:
|
2013-04-08 03:10:57 +08:00
|
|
|
; CHECK: call ret_i32_float_packed
|
|
|
|
; CHECK: srlx %o0, 32, [[R:%[gilo][0-7]]]
|
|
|
|
; CHECK: st [[R]], [%i0]
|
2016-05-18 17:14:13 +08:00
|
|
|
; HARD: st %f1, [%i1]
|
|
|
|
; SOFT: st %o0, [%i1]
|
2013-04-08 03:10:57 +08:00
|
|
|
define void @call_ret_i32_float_packed(i32* %i0, float* %i1) {
|
|
|
|
%rv = call { i32, float } @ret_i32_float_packed(i32 undef, i32 undef,
|
|
|
|
i32* undef, float* undef)
|
|
|
|
%e0 = extractvalue { i32, float } %rv, 0
|
|
|
|
store i32 %e0, i32* %i0
|
|
|
|
%e1 = extractvalue { i32, float } %rv, 1
|
|
|
|
store float %e1, float* %i1
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2013-04-07 07:57:33 +08:00
|
|
|
; The C frontend should use i64 to return { i32, i32 } structs, but verify that
|
|
|
|
; we don't miscompile thi case where both struct elements are placed in %i0.
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: ret_i32_packed:
|
2013-04-07 07:57:33 +08:00
|
|
|
; CHECK: ld [%i2], [[R1:%[gilo][0-7]]]
|
|
|
|
; CHECK: ld [%i3], [[R2:%[gilo][0-7]]]
|
|
|
|
; CHECK: sllx [[R2]], 32, [[R3:%[gilo][0-7]]]
|
|
|
|
; CHECK: or [[R3]], [[R1]], %i0
|
|
|
|
define inreg { i32, i32 } @ret_i32_packed(i32 %a0, i32 %a1,
|
|
|
|
i32* %p, i32* %q) {
|
2015-02-28 05:17:42 +08:00
|
|
|
%r1 = load i32, i32* %p
|
2013-04-07 07:57:33 +08:00
|
|
|
%rv1 = insertvalue { i32, i32 } undef, i32 %r1, 1
|
|
|
|
store i32 0, i32* %p
|
2015-02-28 05:17:42 +08:00
|
|
|
%r2 = load i32, i32* %q
|
2013-04-07 07:57:33 +08:00
|
|
|
%rv2 = insertvalue { i32, i32 } %rv1, i32 %r2, 0
|
|
|
|
ret { i32, i32 } %rv2
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: call_ret_i32_packed:
|
2013-04-08 03:10:57 +08:00
|
|
|
; CHECK: call ret_i32_packed
|
|
|
|
; CHECK: srlx %o0, 32, [[R:%[gilo][0-7]]]
|
|
|
|
; CHECK: st [[R]], [%i0]
|
|
|
|
; CHECK: st %o0, [%i1]
|
|
|
|
define void @call_ret_i32_packed(i32* %i0, i32* %i1) {
|
|
|
|
%rv = call { i32, i32 } @ret_i32_packed(i32 undef, i32 undef,
|
|
|
|
i32* undef, i32* undef)
|
|
|
|
%e0 = extractvalue { i32, i32 } %rv, 0
|
|
|
|
store i32 %e0, i32* %i0
|
|
|
|
%e1 = extractvalue { i32, i32 } %rv, 1
|
|
|
|
store i32 %e1, i32* %i1
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2013-04-07 07:57:33 +08:00
|
|
|
; The return value must be sign-extended to 64 bits.
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: ret_sext:
|
2013-04-07 07:57:33 +08:00
|
|
|
; CHECK: sra %i0, 0, %i0
|
|
|
|
define signext i32 @ret_sext(i32 %a0) {
|
|
|
|
ret i32 %a0
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: ret_zext:
|
2013-04-07 07:57:33 +08:00
|
|
|
; CHECK: srl %i0, 0, %i0
|
|
|
|
define zeroext i32 @ret_zext(i32 %a0) {
|
|
|
|
ret i32 %a0
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: ret_nosext:
|
2013-04-07 07:57:33 +08:00
|
|
|
; CHECK-NOT: sra
|
|
|
|
define signext i32 @ret_nosext(i32 signext %a0) {
|
|
|
|
ret i32 %a0
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: ret_nozext:
|
2013-04-07 07:57:33 +08:00
|
|
|
; CHECK-NOT: srl
|
|
|
|
define signext i32 @ret_nozext(i32 signext %a0) {
|
|
|
|
ret i32 %a0
|
|
|
|
}
|
2013-09-22 08:42:30 +08:00
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: test_register_directive:
|
2013-09-22 08:42:30 +08:00
|
|
|
; CHECK: .register %g2, #scratch
|
|
|
|
; CHECK: .register %g3, #scratch
|
|
|
|
; CHECK: add %i0, 2, %g2
|
|
|
|
; CHECK: add %i0, 3, %g3
|
|
|
|
define i32 @test_register_directive(i32 %i0) {
|
|
|
|
entry:
|
|
|
|
%0 = add nsw i32 %i0, 2
|
|
|
|
%1 = add nsw i32 %i0, 3
|
|
|
|
tail call void asm sideeffect "", "r,r,~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{i6},~{i7},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o6},~{o7},~{g1},~{g4},~{g5},~{g6},~{g7}"(i32 %0, i32 %1)
|
|
|
|
%2 = add nsw i32 %0, %1
|
|
|
|
ret i32 %2
|
|
|
|
}
|
2013-11-25 04:23:25 +08:00
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: test_large_stack:
|
2013-11-25 04:23:25 +08:00
|
|
|
|
|
|
|
; CHECK: sethi 16, %g1
|
|
|
|
; CHECK: xor %g1, -176, %g1
|
|
|
|
; CHECK: save %sp, %g1, %sp
|
|
|
|
|
|
|
|
; CHECK: sethi 14, %g1
|
|
|
|
; CHECK: xor %g1, -1, %g1
|
|
|
|
; CHECK: add %g1, %fp, %g1
|
|
|
|
; CHECK: call use_buf
|
|
|
|
|
|
|
|
define i32 @test_large_stack() {
|
|
|
|
entry:
|
|
|
|
%buffer1 = alloca [16384 x i8], align 8
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%buffer1.sub = getelementptr inbounds [16384 x i8], [16384 x i8]* %buffer1, i32 0, i32 0
|
2013-11-25 04:23:25 +08:00
|
|
|
%0 = call i32 @use_buf(i32 16384, i8* %buffer1.sub)
|
|
|
|
ret i32 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
declare i32 @use_buf(i32, i8*)
|
2013-12-29 09:20:36 +08:00
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: test_fp128_args:
|
2016-05-18 17:14:13 +08:00
|
|
|
; HARD-DAG: std %f0, [%fp+{{.+}}]
|
|
|
|
; HARD-DAG: std %f2, [%fp+{{.+}}]
|
|
|
|
; HARD-DAG: std %f6, [%fp+{{.+}}]
|
|
|
|
; HARD-DAG: std %f4, [%fp+{{.+}}]
|
|
|
|
; HARD: add %fp, [[Offset:[0-9]+]], %o0
|
|
|
|
; HARD: call _Qp_add
|
|
|
|
; HARD: ldd [%fp+[[Offset]]], %f0
|
|
|
|
; SOFT-DAG: mov %i0, %o0
|
|
|
|
; SOFT-DAG: mov %i1, %o1
|
|
|
|
; SOFT-DAG: mov %i2, %o2
|
|
|
|
; SOFT-DAG: mov %i3, %o3
|
|
|
|
; SOFT: call __addtf3
|
|
|
|
; SOFT: mov %o0, %i0
|
|
|
|
; SOFT: mov %o1, %i1
|
|
|
|
|
2013-12-29 09:20:36 +08:00
|
|
|
define fp128 @test_fp128_args(fp128 %a, fp128 %b) {
|
|
|
|
entry:
|
|
|
|
%0 = fadd fp128 %a, %b
|
|
|
|
ret fp128 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
declare i64 @receive_fp128(i64 %a, ...)
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: test_fp128_variable_args:
|
2016-05-18 17:14:13 +08:00
|
|
|
; HARD-DAG: std %f4, [%sp+[[Offset0:[0-9]+]]]
|
|
|
|
; HARD-DAG: std %f6, [%sp+[[Offset1:[0-9]+]]]
|
|
|
|
; HARD-DAG: ldx [%sp+[[Offset0]]], %o2
|
|
|
|
; HARD-DAG: ldx [%sp+[[Offset1]]], %o3
|
|
|
|
; SOFT-DAG: mov %i0, %o0
|
|
|
|
; SOFT-DAG: mov %i1, %o1
|
|
|
|
; SOFT-DAG: mov %i2, %o2
|
|
|
|
; CHECK: call receive_fp128
|
2013-12-29 09:20:36 +08:00
|
|
|
define i64 @test_fp128_variable_args(i64 %a, fp128 %b) {
|
|
|
|
entry:
|
[opaque pointer type] Add textual IR support for explicit type parameter to the call instruction
See r230786 and r230794 for similar changes to gep and load
respectively.
Call is a bit different because it often doesn't have a single explicit
type - usually the type is deduced from the arguments, and just the
return type is explicit. In those cases there's no need to change the
IR.
When that's not the case, the IR usually contains the pointer type of
the first operand - but since typed pointers are going away, that
representation is insufficient so I'm just stripping the "pointerness"
of the explicit type away.
This does make the IR a bit weird - it /sort of/ reads like the type of
the first operand: "call void () %x(" but %x is actually of type "void
()*" and will eventually be just of type "ptr". But this seems not too
bad and I don't think it would benefit from repeating the type
("void (), void () * %x(" and then eventually "void (), ptr %x(") as has
been done with gep and load.
This also has a side benefit: since the explicit type is no longer a
pointer, there's no ambiguity between an explicit type and a function
that returns a function pointer. Previously this case needed an explicit
type (eg: a function returning a void() function was written as
"call void () () * @x(" rather than "call void () * @x(" because of the
ambiguity between a function returning a pointer to a void() function
and a function returning void).
No ambiguity means even function pointer return types can just be
written alone, without writing the whole function's type.
This leaves /only/ the varargs case where the explicit type is required.
Given the special type syntax in call instructions, the regex-fu used
for migration was a bit more involved in its own unique way (as every
one of these is) so here it is. Use it in conjunction with the apply.sh
script and associated find/xargs commands I've provided in rr230786 to
migrate your out of tree tests. Do let me know if any of this doesn't
cover your cases & we can iterate on a more general script/regexes to
help others with out of tree tests.
About 9 test cases couldn't be automatically migrated - half of those
were functions returning function pointers, where I just had to manually
delete the function argument types now that we didn't need an explicit
function type there. The other half were typedefs of function types used
in calls - just had to manually drop the * from those.
import fileinput
import sys
import re
pat = re.compile(r'((?:=|:|^|\s)call\s(?:[^@]*?))(\s*$|\s*(?:(?:\[\[[a-zA-Z0-9_]+\]\]|[@%](?:(")?[\\\?@a-zA-Z0-9_.]*?(?(3)"|)|{{.*}}))(?:\(|$)|undef|inttoptr|bitcast|null|asm).*$)')
addrspace_end = re.compile(r"addrspace\(\d+\)\s*\*$")
func_end = re.compile("(?:void.*|\)\s*)\*$")
def conv(match, line):
if not match or re.search(addrspace_end, match.group(1)) or not re.search(func_end, match.group(1)):
return line
return line[:match.start()] + match.group(1)[:match.group(1).rfind('*')].rstrip() + match.group(2) + line[match.end():]
for line in sys.stdin:
sys.stdout.write(conv(re.search(pat, line), line))
llvm-svn: 235145
2015-04-17 07:24:18 +08:00
|
|
|
%0 = call i64 (i64, ...) @receive_fp128(i64 %a, fp128 %b)
|
2013-12-29 09:20:36 +08:00
|
|
|
ret i64 %0
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:49:06 +08:00
|
|
|
; CHECK-LABEL: test_call_libfunc:
|
2016-05-18 17:14:13 +08:00
|
|
|
; HARD: st %f1, [%fp+[[Offset0:[0-9]+]]]
|
|
|
|
; HARD: fmovs %f3, %f1
|
|
|
|
; SOFT: srl %i1, 0, %o0
|
|
|
|
; CHECK: call cosf
|
|
|
|
; HARD: st %f0, [%fp+[[Offset1:[0-9]+]]]
|
|
|
|
; HARD: ld [%fp+[[Offset0]]], %f1
|
|
|
|
; SOFT: mov %o0, %i1
|
|
|
|
; SOFT: srl %i0, 0, %o0
|
|
|
|
; CHECK: call sinf
|
|
|
|
; HARD: ld [%fp+[[Offset1]]], %f1
|
|
|
|
; HARD: fmuls %f1, %f0, %f0
|
2018-02-24 16:24:31 +08:00
|
|
|
; SOFT: mov %o0, %o1
|
2016-05-18 17:14:13 +08:00
|
|
|
; SOFT: mov %i1, %o0
|
|
|
|
; SOFT: call __mulsf3
|
|
|
|
; SOFT: sllx %o0, 32, %i0
|
2013-12-29 12:27:21 +08:00
|
|
|
|
|
|
|
define inreg float @test_call_libfunc(float %arg0, float %arg1) {
|
|
|
|
entry:
|
|
|
|
%0 = tail call inreg float @cosf(float %arg1)
|
|
|
|
%1 = tail call inreg float @sinf(float %arg0)
|
|
|
|
%2 = fmul float %0, %1
|
|
|
|
ret float %2
|
|
|
|
}
|
|
|
|
|
|
|
|
declare inreg float @cosf(float %arg) readnone nounwind
|
|
|
|
declare inreg float @sinf(float %arg) readnone nounwind
|