[FPEnv] Strict FP tests should use the requisite function attributes.

A set of function attributes is required in any function that uses constrained
floating point intrinsics. None of our tests use these attributes.

This patch fixes this.

These tests have been tested against the IR verifier changes in D68233.

Reviewed by:	andrew.w.kaylor, cameron.mcinally, uweigand
Approved by:	andrew.w.kaylor
Differential Revision:	https://reviews.llvm.org/D67925

llvm-svn: 373761
This commit is contained in:
Kevin P. Neal 2019-10-04 17:03:46 +00:00
parent 9819b9d35f
commit 68b8052121
73 changed files with 1771 additions and 1663 deletions

View File

@ -8,10 +8,11 @@
; Verify that no gross errors happen.
; CHECK-LABEL: @f20
; COMMON: cfdctsiz
define i32 @f20(double %a) {
define i32 @f20(double %a) strictfp {
entry:
%result = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double 42.1,
metadata !"fpexcept.strict")
strictfp
ret i32 %result
}

File diff suppressed because it is too large Load Diff

View File

@ -8,19 +8,19 @@ declare float @foo()
declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
; Check register addition.
define float @f1(float %f1, float %f2) {
define float @f1(float %f1, float %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: aebr %f0, %f2
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.fadd.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the low end of the AEB range.
define float @f2(float %f1, float *%ptr) {
define float @f2(float %f1, float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: aeb %f0, 0(%r2)
; CHECK: br %r14
@ -28,12 +28,12 @@ define float @f2(float %f1, float *%ptr) {
%res = call float @llvm.experimental.constrained.fadd.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the high end of the aligned AEB range.
define float @f3(float %f1, float *%base) {
define float @f3(float %f1, float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: aeb %f0, 4092(%r2)
; CHECK: br %r14
@ -42,13 +42,13 @@ define float @f3(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fadd.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
define float @f4(float %f1, float *%base) {
define float @f4(float %f1, float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: aeb %f0, 0(%r2)
@ -58,12 +58,12 @@ define float @f4(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fadd.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check negative displacements, which also need separate address logic.
define float @f5(float %f1, float *%base) {
define float @f5(float %f1, float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: aeb %f0, 0(%r2)
@ -73,12 +73,12 @@ define float @f5(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fadd.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check that AEB allows indices.
define float @f6(float %f1, float *%base, i64 %index) {
define float @f6(float %f1, float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: aeb %f0, 400(%r1,%r2)
@ -89,12 +89,12 @@ define float @f6(float %f1, float *%base, i64 %index) {
%res = call float @llvm.experimental.constrained.fadd.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check that additions of spilled values can use AEB rather than AEBR.
define float @f7(float *%ptr0) {
define float @f7(float *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: aeb %f0, 16{{[04]}}(%r15)
@ -122,52 +122,54 @@ define float @f7(float *%ptr0) {
%val9 = load float, float *%ptr9
%val10 = load float, float *%ptr10
%ret = call float @foo()
%ret = call float @foo() #0
%add0 = call float @llvm.experimental.constrained.fadd.f32(
float %ret, float %val0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add1 = call float @llvm.experimental.constrained.fadd.f32(
float %add0, float %val1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add2 = call float @llvm.experimental.constrained.fadd.f32(
float %add1, float %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add3 = call float @llvm.experimental.constrained.fadd.f32(
float %add2, float %val3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add4 = call float @llvm.experimental.constrained.fadd.f32(
float %add3, float %val4,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add5 = call float @llvm.experimental.constrained.fadd.f32(
float %add4, float %val5,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add6 = call float @llvm.experimental.constrained.fadd.f32(
float %add5, float %val6,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add7 = call float @llvm.experimental.constrained.fadd.f32(
float %add6, float %val7,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add8 = call float @llvm.experimental.constrained.fadd.f32(
float %add7, float %val8,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add9 = call float @llvm.experimental.constrained.fadd.f32(
float %add8, float %val9,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add10 = call float @llvm.experimental.constrained.fadd.f32(
float %add9, float %val10,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %add10
}
attributes #0 = { strictfp }

View File

@ -7,19 +7,19 @@ declare double @foo()
declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
; Check register addition.
define double @f1(double %f1, double %f2) {
define double @f1(double %f1, double %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: adbr %f0, %f2
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.fadd.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the low end of the ADB range.
define double @f2(double %f1, double *%ptr) {
define double @f2(double %f1, double *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: adb %f0, 0(%r2)
; CHECK: br %r14
@ -27,12 +27,12 @@ define double @f2(double %f1, double *%ptr) {
%res = call double @llvm.experimental.constrained.fadd.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the high end of the aligned ADB range.
define double @f3(double %f1, double *%base) {
define double @f3(double %f1, double *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: adb %f0, 4088(%r2)
; CHECK: br %r14
@ -41,13 +41,13 @@ define double @f3(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fadd.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
define double @f4(double %f1, double *%base) {
define double @f4(double %f1, double *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: adb %f0, 0(%r2)
@ -57,12 +57,12 @@ define double @f4(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fadd.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check negative displacements, which also need separate address logic.
define double @f5(double %f1, double *%base) {
define double @f5(double %f1, double *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -8
; CHECK: adb %f0, 0(%r2)
@ -72,12 +72,12 @@ define double @f5(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fadd.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check that ADB allows indices.
define double @f6(double %f1, double *%base, i64 %index) {
define double @f6(double %f1, double *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: adb %f0, 800(%r1,%r2)
@ -88,12 +88,12 @@ define double @f6(double %f1, double *%base, i64 %index) {
%res = call double @llvm.experimental.constrained.fadd.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check that additions of spilled values can use ADB rather than ADBR.
define double @f7(double *%ptr0) {
define double @f7(double *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: adb %f0, 160(%r15)
@ -121,52 +121,54 @@ define double @f7(double *%ptr0) {
%val9 = load double, double *%ptr9
%val10 = load double, double *%ptr10
%ret = call double @foo()
%ret = call double @foo() #0
%add0 = call double @llvm.experimental.constrained.fadd.f64(
double %ret, double %val0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add1 = call double @llvm.experimental.constrained.fadd.f64(
double %add0, double %val1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add2 = call double @llvm.experimental.constrained.fadd.f64(
double %add1, double %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add3 = call double @llvm.experimental.constrained.fadd.f64(
double %add2, double %val3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add4 = call double @llvm.experimental.constrained.fadd.f64(
double %add3, double %val4,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add5 = call double @llvm.experimental.constrained.fadd.f64(
double %add4, double %val5,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add6 = call double @llvm.experimental.constrained.fadd.f64(
double %add5, double %val6,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add7 = call double @llvm.experimental.constrained.fadd.f64(
double %add6, double %val7,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add8 = call double @llvm.experimental.constrained.fadd.f64(
double %add7, double %val8,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add9 = call double @llvm.experimental.constrained.fadd.f64(
double %add8, double %val9,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%add10 = call double @llvm.experimental.constrained.fadd.f64(
double %add9, double %val10,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %add10
}
attributes #0 = { strictfp }

View File

@ -5,7 +5,7 @@
declare fp128 @llvm.experimental.constrained.fadd.f128(fp128, fp128, metadata, metadata)
; There is no memory form of 128-bit addition.
define void @f1(fp128 *%ptr, float %f2) {
define void @f1(fp128 *%ptr, float %f2) strictfp {
; CHECK-LABEL: f1:
; CHECK-DAG: lxebr %f0, %f0
; CHECK-DAG: ld %f1, 0(%r2)
@ -19,7 +19,7 @@ define void @f1(fp128 *%ptr, float %f2) {
%sum = call fp128 @llvm.experimental.constrained.fadd.f128(
fp128 %f1, fp128 %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") strictfp
store fp128 %sum, fp128 *%ptr
ret void
}

View File

@ -4,7 +4,7 @@
declare fp128 @llvm.experimental.constrained.fadd.f128(fp128, fp128, metadata, metadata)
define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
define void @f1(fp128 *%ptr1, fp128 *%ptr2) strictfp {
; CHECK-LABEL: f1:
; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
@ -16,7 +16,7 @@ define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
%sum = call fp128 @llvm.experimental.constrained.fadd.f128(
fp128 %f1, fp128 %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") strictfp
store fp128 %sum, fp128 *%ptr1
ret void
}

View File

@ -30,7 +30,7 @@ define void @f1(float %f1, float %f2, float *%ptr1, float *%ptr2) {
ret void
}
define void @f2(float %f1, float %f2, float *%ptr1, float *%ptr2) {
define void @f2(float %f1, float %f2, float *%ptr1, float *%ptr2) #0 {
; CHECK-LABEL: f2:
; CHECK: sqebr
; CHECK: ste
@ -41,11 +41,11 @@ define void @f2(float %f1, float %f2, float *%ptr1, float *%ptr2) {
%sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
float %f1,
metadata !"round.dynamic",
metadata !"fpexcept.ignore")
metadata !"fpexcept.ignore") #0
%sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.ignore")
metadata !"fpexcept.ignore") #0
store float %sqrt1, float *%ptr1
store float %sqrt2, float *%ptr2
@ -53,7 +53,7 @@ define void @f2(float %f1, float %f2, float *%ptr1, float *%ptr2) {
ret void
}
define void @f3(float %f1, float %f2, float *%ptr1, float *%ptr2) {
define void @f3(float %f1, float %f2, float *%ptr1, float *%ptr2) #0 {
; CHECK-LABEL: f3:
; CHECK: sqebr
; CHECK: ste
@ -64,11 +64,11 @@ define void @f3(float %f1, float %f2, float *%ptr1, float *%ptr2) {
%sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
float %f1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store float %sqrt1, float *%ptr1
store float %sqrt2, float *%ptr2
@ -98,7 +98,7 @@ define void @f4(float %f1, float %f2, float *%ptr1, float *%ptr2) {
ret void
}
define void @f5(float %f1, float %f2, float *%ptr1, float *%ptr2) {
define void @f5(float %f1, float %f2, float *%ptr1, float *%ptr2) #0 {
; CHECK-LABEL: f5:
; CHECK: sqebr
; CHECK: ste
@ -109,11 +109,11 @@ define void @f5(float %f1, float %f2, float *%ptr1, float *%ptr2) {
%sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
float %f1,
metadata !"round.dynamic",
metadata !"fpexcept.ignore")
metadata !"fpexcept.ignore") #0
%sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.ignore")
metadata !"fpexcept.ignore") #0
store volatile float %sqrt1, float *%ptr1
store volatile float %sqrt2, float *%ptr2
@ -121,7 +121,7 @@ define void @f5(float %f1, float %f2, float *%ptr1, float *%ptr2) {
ret void
}
define void @f6(float %f1, float %f2, float *%ptr1, float *%ptr2) {
define void @f6(float %f1, float %f2, float *%ptr1, float *%ptr2) #0 {
; CHECK-LABEL: f6:
; CHECK: sqebr
; CHECK: sqebr
@ -132,11 +132,11 @@ define void @f6(float %f1, float %f2, float *%ptr1, float *%ptr2) {
%sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
float %f1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store volatile float %sqrt1, float *%ptr1
store volatile float %sqrt2, float *%ptr2
@ -166,7 +166,7 @@ define void @f7(float %f1, float %f2, float *%ptr1, float *%ptr2) {
ret void
}
define void @f8(float %f1, float %f2, float *%ptr1, float *%ptr2) {
define void @f8(float %f1, float %f2, float *%ptr1, float *%ptr2) #0 {
; CHECK-LABEL: f8:
; CHECK: sqebr
; CHECK: sqebr
@ -177,13 +177,13 @@ define void @f8(float %f1, float %f2, float *%ptr1, float *%ptr2) {
%sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
float %f1,
metadata !"round.dynamic",
metadata !"fpexcept.ignore")
metadata !"fpexcept.ignore") #0
%sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.ignore")
metadata !"fpexcept.ignore") #0
call void @llvm.s390.sfpc(i32 0)
call void @llvm.s390.sfpc(i32 0) #0
store float %sqrt1, float *%ptr1
store float %sqrt2, float *%ptr2
@ -191,7 +191,7 @@ define void @f8(float %f1, float %f2, float *%ptr1, float *%ptr2) {
ret void
}
define void @f9(float %f1, float %f2, float *%ptr1, float *%ptr2) {
define void @f9(float %f1, float %f2, float *%ptr1, float *%ptr2) #0 {
; CHECK-LABEL: f9:
; CHECK: sqebr
; CHECK: sqebr
@ -202,13 +202,13 @@ define void @f9(float %f1, float %f2, float *%ptr1, float *%ptr2) {
%sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
float %f1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
call void @llvm.s390.sfpc(i32 0)
call void @llvm.s390.sfpc(i32 0) #0
store float %sqrt1, float *%ptr1
store float %sqrt2, float *%ptr2
@ -216,3 +216,4 @@ define void @f9(float %f1, float %f2, float *%ptr1, float *%ptr2) {
ret void
}
attributes #0 = { strictfp }

View File

@ -13,7 +13,7 @@ declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, me
declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
; Test f64->f32.
define float @f1(double %d1, double %d2) {
define float @f1(double %d1, double %d2) #0 {
; CHECK-LABEL: f1:
; CHECK-SCALAR: ledbr %f0, %f2
; CHECK-VECTOR: ledbra %f0, 0, %f2, 0
@ -21,12 +21,12 @@ define float @f1(double %d1, double %d2) {
%res = call float @llvm.experimental.constrained.fptrunc.f32.f64(
double %d2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test f128->f32.
define float @f2(fp128 *%ptr) {
define float @f2(fp128 *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: lexbr %f0, %f0
; CHECK: br %r14
@ -34,13 +34,13 @@ define float @f2(fp128 *%ptr) {
%res = call float @llvm.experimental.constrained.fptrunc.f32.f128(
fp128 %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Make sure that we don't use %f0 as the destination of LEXBR when %f2
; is still live.
define void @f3(float *%dst, fp128 *%ptr, float %d1, float %d2) {
define void @f3(float *%dst, fp128 *%ptr, float %d1, float %d2) #0 {
; CHECK-LABEL: f3:
; CHECK: lexbr %f1, %f1
; CHECK: aebr %f1, %f2
@ -50,17 +50,17 @@ define void @f3(float *%dst, fp128 *%ptr, float %d1, float %d2) {
%conv = call float @llvm.experimental.constrained.fptrunc.f32.f128(
fp128 %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%res = call float @llvm.experimental.constrained.fadd.f32(
float %conv, float %d2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store float %res, float *%dst
ret void
}
; Test f128->f64.
define double @f4(fp128 *%ptr) {
define double @f4(fp128 *%ptr) #0 {
; CHECK-LABEL: f4:
; CHECK: ldxbr %f0, %f0
; CHECK: br %r14
@ -68,12 +68,12 @@ define double @f4(fp128 *%ptr) {
%res = call double @llvm.experimental.constrained.fptrunc.f64.f128(
fp128 %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Like f3, but for f128->f64.
define void @f5(double *%dst, fp128 *%ptr, double %d1, double %d2) {
define void @f5(double *%dst, fp128 *%ptr, double %d1, double %d2) #0 {
; CHECK-LABEL: f5:
; CHECK: ldxbr %f1, %f1
; CHECK-SCALAR: adbr %f1, %f2
@ -85,11 +85,13 @@ define void @f5(double *%dst, fp128 *%ptr, double %d1, double %d2) {
%conv = call double @llvm.experimental.constrained.fptrunc.f64.f128(
fp128 %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%res = call double @llvm.experimental.constrained.fadd.f64(
double %conv, double %d2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store double %res, double *%dst
ret void
}
attributes #0 = { strictfp }

View File

@ -5,41 +5,41 @@
declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
; Check register extension.
define double @f1(float %val) {
define double @f1(float %val) #0 {
; CHECK-LABEL: f1:
; CHECK: ldebr %f0, %f0
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.fpext.f64.f32(float %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the low end of the LDEB range.
define double @f2(float *%ptr) {
define double @f2(float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: ldeb %f0, 0(%r2)
; CHECK: br %r14
%val = load float, float *%ptr
%res = call double @llvm.experimental.constrained.fpext.f64.f32(float %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the high end of the aligned LDEB range.
define double @f3(float *%base) {
define double @f3(float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: ldeb %f0, 4092(%r2)
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 1023
%val = load float, float *%ptr
%res = call double @llvm.experimental.constrained.fpext.f64.f32(float %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
define double @f4(float *%base) {
define double @f4(float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: ldeb %f0, 0(%r2)
@ -47,12 +47,12 @@ define double @f4(float *%base) {
%ptr = getelementptr float, float *%base, i64 1024
%val = load float, float *%ptr
%res = call double @llvm.experimental.constrained.fpext.f64.f32(float %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check negative displacements, which also need separate address logic.
define double @f5(float *%base) {
define double @f5(float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: ldeb %f0, 0(%r2)
@ -60,12 +60,12 @@ define double @f5(float *%base) {
%ptr = getelementptr float, float *%base, i64 -1
%val = load float, float *%ptr
%res = call double @llvm.experimental.constrained.fpext.f64.f32(float %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check that LDEB allows indices.
define double @f6(float *%base, i64 %index) {
define double @f6(float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: ldeb %f0, 400(%r1,%r2)
@ -74,7 +74,8 @@ define double @f6(float *%base, i64 %index) {
%ptr2 = getelementptr float, float *%ptr1, i64 100
%val = load float, float *%ptr2
%res = call double @llvm.experimental.constrained.fpext.f64.f32(float %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
attributes #0 = { strictfp }

View File

@ -5,20 +5,20 @@
declare fp128 @llvm.experimental.constrained.fpext.f128.f32(float, metadata)
; Check register extension.
define void @f1(fp128 *%dst, float %val) {
define void @f1(fp128 *%dst, float %val) #0 {
; CHECK-LABEL: f1:
; CHECK: lxebr %f0, %f0
; CHECK: std %f0, 0(%r2)
; CHECK: std %f2, 8(%r2)
; CHECK: br %r14
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the low end of the LXEB range.
define void @f2(fp128 *%dst, float *%ptr) {
define void @f2(fp128 *%dst, float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: lxeb %f0, 0(%r3)
; CHECK: std %f0, 0(%r2)
@ -26,13 +26,13 @@ define void @f2(fp128 *%dst, float *%ptr) {
; CHECK: br %r14
%val = load float, float *%ptr
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the high end of the aligned LXEB range.
define void @f3(fp128 *%dst, float *%base) {
define void @f3(fp128 *%dst, float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: lxeb %f0, 4092(%r3)
; CHECK: std %f0, 0(%r2)
@ -41,14 +41,14 @@ define void @f3(fp128 *%dst, float *%base) {
%ptr = getelementptr float, float *%base, i64 1023
%val = load float, float *%ptr
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
define void @f4(fp128 *%dst, float *%base) {
define void @f4(fp128 *%dst, float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r3, 4096
; CHECK: lxeb %f0, 0(%r3)
@ -58,13 +58,13 @@ define void @f4(fp128 *%dst, float *%base) {
%ptr = getelementptr float, float *%base, i64 1024
%val = load float, float *%ptr
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check negative displacements, which also need separate address logic.
define void @f5(fp128 *%dst, float *%base) {
define void @f5(fp128 *%dst, float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r3, -4
; CHECK: lxeb %f0, 0(%r3)
@ -74,13 +74,13 @@ define void @f5(fp128 *%dst, float *%base) {
%ptr = getelementptr float, float *%base, i64 -1
%val = load float, float *%ptr
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check that LXEB allows indices.
define void @f6(fp128 *%dst, float *%base, i64 %index) {
define void @f6(fp128 *%dst, float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r4, 2
; CHECK: lxeb %f0, 400(%r1,%r3)
@ -91,8 +91,9 @@ define void @f6(fp128 *%dst, float *%base, i64 %index) {
%ptr2 = getelementptr float, float *%ptr1, i64 100
%val = load float, float *%ptr2
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
attributes #0 = { strictfp }

View File

@ -5,20 +5,20 @@
declare fp128 @llvm.experimental.constrained.fpext.f128.f64(double, metadata)
; Check register extension.
define void @f1(fp128 *%dst, double %val) {
define void @f1(fp128 *%dst, double %val) #0 {
; CHECK-LABEL: f1:
; CHECK: lxdbr %f0, %f0
; CHECK: std %f0, 0(%r2)
; CHECK: std %f2, 8(%r2)
; CHECK: br %r14
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the low end of the LXDB range.
define void @f2(fp128 *%dst, double *%ptr) {
define void @f2(fp128 *%dst, double *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: lxdb %f0, 0(%r3)
; CHECK: std %f0, 0(%r2)
@ -26,13 +26,13 @@ define void @f2(fp128 *%dst, double *%ptr) {
; CHECK: br %r14
%val = load double, double *%ptr
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the high end of the aligned LXDB range.
define void @f3(fp128 *%dst, double *%base) {
define void @f3(fp128 *%dst, double *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: lxdb %f0, 4088(%r3)
; CHECK: std %f0, 0(%r2)
@ -41,14 +41,14 @@ define void @f3(fp128 *%dst, double *%base) {
%ptr = getelementptr double, double *%base, i64 511
%val = load double, double *%ptr
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
define void @f4(fp128 *%dst, double *%base) {
define void @f4(fp128 *%dst, double *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r3, 4096
; CHECK: lxdb %f0, 0(%r3)
@ -58,13 +58,13 @@ define void @f4(fp128 *%dst, double *%base) {
%ptr = getelementptr double, double *%base, i64 512
%val = load double, double *%ptr
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check negative displacements, which also need separate address logic.
define void @f5(fp128 *%dst, double *%base) {
define void @f5(fp128 *%dst, double *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r3, -8
; CHECK: lxdb %f0, 0(%r3)
@ -74,13 +74,13 @@ define void @f5(fp128 *%dst, double *%base) {
%ptr = getelementptr double, double *%base, i64 -1
%val = load double, double *%ptr
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check that LXDB allows indices.
define void @f6(fp128 *%dst, double *%base, i64 %index) {
define void @f6(fp128 *%dst, double *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r4, 3
; CHECK: lxdb %f0, 800(%r1,%r3)
@ -91,8 +91,9 @@ define void @f6(fp128 *%dst, double *%base, i64 %index) {
%ptr2 = getelementptr double, double *%ptr1, i64 100
%val = load double, double *%ptr2
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
attributes #0 = { strictfp }

View File

@ -7,27 +7,27 @@ declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
declare i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128, metadata)
; Test f32->i32.
define i32 @f1(float %f) {
define i32 @f1(float %f) #0 {
; CHECK-LABEL: f1:
; CHECK: cfebr %r2, 5, %f0
; CHECK: br %r14
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test f64->i32.
define i32 @f2(double %f) {
define i32 @f2(double %f) #0 {
; CHECK-LABEL: f2:
; CHECK: cfdbr %r2, 5, %f0
; CHECK: br %r14
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test f128->i32.
define i32 @f3(fp128 *%src) {
define i32 @f3(fp128 *%src) #0 {
; CHECK-LABEL: f3:
; CHECK: ld %f0, 0(%r2)
; CHECK: ld %f2, 8(%r2)
@ -35,6 +35,8 @@ define i32 @f3(fp128 *%src) {
; CHECK: br %r14
%f = load fp128, fp128 *%src
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128 %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i32 %conv
}
attributes #0 = { strictfp }

View File

@ -14,7 +14,7 @@ declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
declare i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128, metadata)
; Test f32->i32.
define i32 @f1(float %f) {
define i32 @f1(float %f) #0 {
; CHECK-LABEL: f1:
; CHECK: # %bb.0:
; CHECK-NEXT: larl %r1, .LCPI0_0
@ -30,12 +30,12 @@ define i32 @f1(float %f) {
; CHECK-NEXT: xr %r2, %r0
; CHECK-NEXT: br %r14
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test f64->i32.
define i32 @f2(double %f) {
define i32 @f2(double %f) #0 {
; CHECK-LABEL: f2:
; CHECK: # %bb.0:
; CHECK-NEXT: larl %r1, .LCPI1_0
@ -51,12 +51,12 @@ define i32 @f2(double %f) {
; CHECK-NEXT: xr %r2, %r0
; CHECK-NEXT: br %r14
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test f128->i32.
define i32 @f3(fp128 *%src) {
define i32 @f3(fp128 *%src) #0 {
; CHECK-LABEL: f3:
; CHECK: # %bb.0:
; CHECK-NEXT: ld %f0, 0(%r2)
@ -75,6 +75,8 @@ define i32 @f3(fp128 *%src) {
; CHECK-NEXT: br %r14
%f = load fp128, fp128 *%src
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i32 %conv
}
attributes #0 = { strictfp }

View File

@ -7,27 +7,27 @@ declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
declare i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128, metadata)
; Test f32->i64.
define i64 @f1(float %f) {
define i64 @f1(float %f) #0 {
; CHECK-LABEL: f1:
; CHECK: cgebr %r2, 5, %f0
; CHECK: br %r14
%conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i64 %conv
}
; Test f64->i64.
define i64 @f2(double %f) {
define i64 @f2(double %f) #0 {
; CHECK-LABEL: f2:
; CHECK: cgdbr %r2, 5, %f0
; CHECK: br %r14
%conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i64 %conv
}
; Test f128->i64.
define i64 @f3(fp128 *%src) {
define i64 @f3(fp128 *%src) #0 {
; CHECK-LABEL: f3:
; CHECK: ld %f0, 0(%r2)
; CHECK: ld %f2, 8(%r2)
@ -35,6 +35,8 @@ define i64 @f3(fp128 *%src) {
; CHECK: br %r14
%f = load fp128, fp128 *%src
%conv = call i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128 %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i64 %conv
}
attributes #0 = { strictfp }

View File

@ -13,7 +13,7 @@ declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
declare i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128, metadata)
; Test f32->i64.
define i64 @f1(float %f) {
define i64 @f1(float %f) #0 {
; CHECK-LABEL: f1:
; CHECK: # %bb.0:
; CHECK-NEXT: larl %r1, .LCPI0_0
@ -29,12 +29,12 @@ define i64 @f1(float %f) {
; CHECK-NEXT: xgr %r2, %r0
; CHECK-NEXT: br %r14
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i64 %conv
}
; Test f64->i64.
define i64 @f2(double %f) {
define i64 @f2(double %f) #0 {
; CHECK-LABEL: f2:
; CHECK: # %bb.0:
; CHECK-NEXT: larl %r1, .LCPI1_0
@ -50,12 +50,12 @@ define i64 @f2(double %f) {
; CHECK-NEXT: xgr %r2, %r0
; CHECK-NEXT: br %r14
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i64 %conv
}
; Test f128->i64.
define i64 @f3(fp128 *%src) {
define i64 @f3(fp128 *%src) #0 {
; CHECK-LABEL: f3:
; CHECK: # %bb.0:
; CHECK-NEXT: ld %f0, 0(%r2)
@ -74,6 +74,8 @@ define i64 @f3(fp128 *%src) {
; CHECK-NEXT: br %r14
%f = load fp128, fp128 *%src
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i64 %conv
}
attributes #0 = { strictfp }

View File

@ -11,27 +11,27 @@ declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
declare i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128, metadata)
; Test f32->i32.
define i32 @f1(float %f) {
define i32 @f1(float %f) #0 {
; CHECK-LABEL: f1:
; CHECK: clfebr %r2, 5, %f0, 0
; CHECK: br %r14
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test f64->i32.
define i32 @f2(double %f) {
define i32 @f2(double %f) #0 {
; CHECK-LABEL: f2:
; CHECK: clfdbr %r2, 5, %f0, 0
; CHECK: br %r14
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test f128->i32.
define i32 @f3(fp128 *%src) {
define i32 @f3(fp128 *%src) #0 {
; CHECK-LABEL: f3:
; CHECK-DAG: ld %f0, 0(%r2)
; CHECK-DAG: ld %f2, 8(%r2)
@ -39,32 +39,32 @@ define i32 @f3(fp128 *%src) {
; CHECK: br %r14
%f = load fp128, fp128 *%src
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test f32->i64.
define i64 @f4(float %f) {
define i64 @f4(float %f) #0 {
; CHECK-LABEL: f4:
; CHECK: clgebr %r2, 5, %f0, 0
; CHECK: br %r14
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i64 %conv
}
; Test f64->i64.
define i64 @f5(double %f) {
define i64 @f5(double %f) #0 {
; CHECK-LABEL: f5:
; CHECK: clgdbr %r2, 5, %f0, 0
; CHECK: br %r14
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i64 %conv
}
; Test f128->i64.
define i64 @f6(fp128 *%src) {
define i64 @f6(fp128 *%src) #0 {
; CHECK-LABEL: f6:
; CHECK-DAG: ld %f0, 0(%r2)
; CHECK-DAG: ld %f2, 8(%r2)
@ -72,6 +72,8 @@ define i64 @f6(fp128 *%src) {
; CHECK: br %r14
%f = load fp128, fp128 *%src
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i64 %conv
}
attributes #0 = { strictfp }

View File

@ -9,7 +9,7 @@ declare fp128 @llvm.experimental.constrained.fpext.f128.f32(float, metadata)
declare fp128 @llvm.experimental.constrained.fpext.f128.f64(double, metadata)
; Test f128->f64.
define double @f1(fp128 *%ptr) {
define double @f1(fp128 *%ptr) #0 {
; CHECK-LABEL: f1:
; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wflrx %f0, [[REG]], 0, 0
@ -18,12 +18,12 @@ define double @f1(fp128 *%ptr) {
%res = call double @llvm.experimental.constrained.fptrunc.f64.f128(
fp128 %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test f128->f32.
define float @f2(fp128 *%ptr) {
define float @f2(fp128 *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wflrx %f0, [[REG]], 0, 3
@ -33,32 +33,33 @@ define float @f2(fp128 *%ptr) {
%res = call float @llvm.experimental.constrained.fptrunc.f32.f128(
fp128 %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test f64->f128.
define void @f3(fp128 *%dst, double %val) {
define void @f3(fp128 *%dst, double %val) #0 {
; CHECK-LABEL: f3:
; CHECK: wflld [[RES:%v[0-9]+]], %f0
; CHECK: vst [[RES]], 0(%r2)
; CHECK: br %r14
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Test f32->f128.
define void @f4(fp128 *%dst, float %val) {
define void @f4(fp128 *%dst, float %val) #0 {
; CHECK-LABEL: f4:
; CHECK: ldebr %f0, %f0
; CHECK: wflld [[RES:%v[0-9]+]], %f0
; CHECK: vst [[RES]], 0(%r2)
; CHECK: br %r14
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %val,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
attributes #0 = { strictfp }

View File

@ -11,7 +11,7 @@ declare i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128, metadata)
declare i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128, metadata)
; Test signed f128->i32.
define i32 @f5(fp128 *%src) {
define i32 @f5(fp128 *%src) #0 {
; CHECK-LABEL: f5:
; CHECK: vl %v0, 0(%r2)
; CHECK: vrepg %v2, %v0, 1
@ -19,12 +19,12 @@ define i32 @f5(fp128 *%src) {
; CHECK: br %r14
%f = load fp128, fp128 *%src
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128 %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test signed f128->i64.
define i64 @f6(fp128 *%src) {
define i64 @f6(fp128 *%src) #0 {
; CHECK-LABEL: f6:
; CHECK: vl %v0, 0(%r2)
; CHECK: vrepg %v2, %v0, 1
@ -32,12 +32,12 @@ define i64 @f6(fp128 *%src) {
; CHECK: br %r14
%f = load fp128, fp128 *%src
%conv = call i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128 %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i64 %conv
}
; Test unsigned f128->i32.
define i32 @f7(fp128 *%src) {
define i32 @f7(fp128 *%src) #0 {
; CHECK-LABEL: f7:
; CHECK: vl %v0, 0(%r2)
; CHECK: vrepg %v2, %v0, 1
@ -45,12 +45,12 @@ define i32 @f7(fp128 *%src) {
; CHECK: br %r14
%f = load fp128, fp128 *%src
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test unsigned f128->i64.
define i64 @f8(fp128 *%src) {
define i64 @f8(fp128 *%src) #0 {
; CHECK-LABEL: f8:
; CHECK: vl %v0, 0(%r2)
; CHECK: vrepg %v2, %v0, 1
@ -58,6 +58,8 @@ define i64 @f8(fp128 *%src) {
; CHECK: br %r14
%f = load fp128, fp128 *%src
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %f,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i64 %conv
}
attributes #0 = { strictfp }

View File

@ -8,19 +8,19 @@ declare float @foo()
declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata)
; Check register division.
define float @f1(float %f1, float %f2) {
define float @f1(float %f1, float %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: debr %f0, %f2
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.fdiv.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the low end of the DEB range.
define float @f2(float %f1, float *%ptr) {
define float @f2(float %f1, float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: deb %f0, 0(%r2)
; CHECK: br %r14
@ -28,12 +28,12 @@ define float @f2(float %f1, float *%ptr) {
%res = call float @llvm.experimental.constrained.fdiv.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the high end of the aligned DEB range.
define float @f3(float %f1, float *%base) {
define float @f3(float %f1, float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: deb %f0, 4092(%r2)
; CHECK: br %r14
@ -42,13 +42,13 @@ define float @f3(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fdiv.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
define float @f4(float %f1, float *%base) {
define float @f4(float %f1, float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: deb %f0, 0(%r2)
@ -58,12 +58,12 @@ define float @f4(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fdiv.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check negative displacements, which also need separate address logic.
define float @f5(float %f1, float *%base) {
define float @f5(float %f1, float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: deb %f0, 0(%r2)
@ -73,12 +73,12 @@ define float @f5(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fdiv.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check that DEB allows indices.
define float @f6(float %f1, float *%base, i64 %index) {
define float @f6(float %f1, float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: deb %f0, 400(%r1,%r2)
@ -89,12 +89,12 @@ define float @f6(float %f1, float *%base, i64 %index) {
%res = call float @llvm.experimental.constrained.fdiv.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check that divisions of spilled values can use DEB rather than DEBR.
define float @f7(float *%ptr0) {
define float @f7(float *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: deb %f0, 16{{[04]}}(%r15)
@ -122,52 +122,54 @@ define float @f7(float *%ptr0) {
%val9 = load float, float *%ptr9
%val10 = load float, float *%ptr10
%ret = call float @foo()
%ret = call float @foo() #0
%div0 = call float @llvm.experimental.constrained.fdiv.f32(
float %ret, float %val0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div1 = call float @llvm.experimental.constrained.fdiv.f32(
float %div0, float %val1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div2 = call float @llvm.experimental.constrained.fdiv.f32(
float %div1, float %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div3 = call float @llvm.experimental.constrained.fdiv.f32(
float %div2, float %val3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div4 = call float @llvm.experimental.constrained.fdiv.f32(
float %div3, float %val4,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div5 = call float @llvm.experimental.constrained.fdiv.f32(
float %div4, float %val5,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div6 = call float @llvm.experimental.constrained.fdiv.f32(
float %div5, float %val6,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div7 = call float @llvm.experimental.constrained.fdiv.f32(
float %div6, float %val7,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div8 = call float @llvm.experimental.constrained.fdiv.f32(
float %div7, float %val8,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div9 = call float @llvm.experimental.constrained.fdiv.f32(
float %div8, float %val9,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div10 = call float @llvm.experimental.constrained.fdiv.f32(
float %div9, float %val10,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %div10
}
attributes #0 = { strictfp }

View File

@ -8,19 +8,19 @@ declare double @foo()
declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
; Check register division.
define double @f1(double %f1, double %f2) {
define double @f1(double %f1, double %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: ddbr %f0, %f2
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the low end of the DDB range.
define double @f2(double %f1, double *%ptr) {
define double @f2(double %f1, double *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: ddb %f0, 0(%r2)
; CHECK: br %r14
@ -28,12 +28,12 @@ define double @f2(double %f1, double *%ptr) {
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the high end of the aligned DDB range.
define double @f3(double %f1, double *%base) {
define double @f3(double %f1, double *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: ddb %f0, 4088(%r2)
; CHECK: br %r14
@ -42,13 +42,13 @@ define double @f3(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
define double @f4(double %f1, double *%base) {
define double @f4(double %f1, double *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: ddb %f0, 0(%r2)
@ -58,12 +58,12 @@ define double @f4(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check negative displacements, which also need separate address logic.
define double @f5(double %f1, double *%base) {
define double @f5(double %f1, double *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -8
; CHECK: ddb %f0, 0(%r2)
@ -73,12 +73,12 @@ define double @f5(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check that DDB allows indices.
define double @f6(double %f1, double *%base, i64 %index) {
define double @f6(double %f1, double *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: ddb %f0, 800(%r1,%r2)
@ -89,12 +89,12 @@ define double @f6(double %f1, double *%base, i64 %index) {
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check that divisions of spilled values can use DDB rather than DDBR.
define double @f7(double *%ptr0) {
define double @f7(double *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: ddb %f0, 160(%r15)
@ -122,52 +122,54 @@ define double @f7(double *%ptr0) {
%val9 = load double, double *%ptr9
%val10 = load double, double *%ptr10
%ret = call double @foo()
%ret = call double @foo() #0
%div0 = call double @llvm.experimental.constrained.fdiv.f64(
double %ret, double %val0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div1 = call double @llvm.experimental.constrained.fdiv.f64(
double %div0, double %val1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div2 = call double @llvm.experimental.constrained.fdiv.f64(
double %div1, double %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div3 = call double @llvm.experimental.constrained.fdiv.f64(
double %div2, double %val3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div4 = call double @llvm.experimental.constrained.fdiv.f64(
double %div3, double %val4,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div5 = call double @llvm.experimental.constrained.fdiv.f64(
double %div4, double %val5,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div6 = call double @llvm.experimental.constrained.fdiv.f64(
double %div5, double %val6,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div7 = call double @llvm.experimental.constrained.fdiv.f64(
double %div6, double %val7,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div8 = call double @llvm.experimental.constrained.fdiv.f64(
double %div7, double %val8,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div9 = call double @llvm.experimental.constrained.fdiv.f64(
double %div8, double %val9,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%div10 = call double @llvm.experimental.constrained.fdiv.f64(
double %div9, double %val10,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %div10
}
attributes #0 = { strictfp }

View File

@ -5,7 +5,7 @@
declare fp128 @llvm.experimental.constrained.fdiv.f128(fp128, fp128, metadata, metadata)
; There is no memory form of 128-bit division.
define void @f1(fp128 *%ptr, float %f2) {
define void @f1(fp128 *%ptr, float %f2) strictfp {
; CHECK-LABEL: f1:
; CHECK-DAG: lxebr %f0, %f0
; CHECK-DAG: ld %f1, 0(%r2)
@ -19,7 +19,7 @@ define void @f1(fp128 *%ptr, float %f2) {
%sum = call fp128 @llvm.experimental.constrained.fdiv.f128(
fp128 %f1, fp128 %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") strictfp
store fp128 %sum, fp128 *%ptr
ret void
}

View File

@ -4,7 +4,7 @@
declare fp128 @llvm.experimental.constrained.fdiv.f128(fp128, fp128, metadata, metadata)
define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
define void @f1(fp128 *%ptr1, fp128 *%ptr2) strictfp {
; CHECK-LABEL: f1:
; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
@ -16,7 +16,7 @@ define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
%sum = call fp128 @llvm.experimental.constrained.fdiv.f128(
fp128 %f1, fp128 %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") strictfp
store fp128 %sum, fp128 *%ptr1
ret void
}

View File

@ -8,19 +8,19 @@ declare float @foo()
declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata)
; Check register multiplication.
define float @f1(float %f1, float %f2) {
define float @f1(float %f1, float %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: meebr %f0, %f2
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.fmul.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the low end of the MEEB range.
define float @f2(float %f1, float *%ptr) {
define float @f2(float %f1, float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: meeb %f0, 0(%r2)
; CHECK: br %r14
@ -28,12 +28,12 @@ define float @f2(float %f1, float *%ptr) {
%res = call float @llvm.experimental.constrained.fmul.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the high end of the aligned MEEB range.
define float @f3(float %f1, float *%base) {
define float @f3(float %f1, float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: meeb %f0, 4092(%r2)
; CHECK: br %r14
@ -42,13 +42,13 @@ define float @f3(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fmul.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
define float @f4(float %f1, float *%base) {
define float @f4(float %f1, float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: meeb %f0, 0(%r2)
@ -58,12 +58,12 @@ define float @f4(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fmul.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check negative displacements, which also need separate address logic.
define float @f5(float %f1, float *%base) {
define float @f5(float %f1, float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: meeb %f0, 0(%r2)
@ -73,12 +73,12 @@ define float @f5(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fmul.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check that MEEB allows indices.
define float @f6(float %f1, float *%base, i64 %index) {
define float @f6(float %f1, float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: meeb %f0, 400(%r1,%r2)
@ -89,12 +89,12 @@ define float @f6(float %f1, float *%base, i64 %index) {
%res = call float @llvm.experimental.constrained.fmul.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check that multiplications of spilled values can use MEEB rather than MEEBR.
define float @f7(float *%ptr0) {
define float @f7(float *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: meeb %f0, 16{{[04]}}(%r15)
@ -122,52 +122,54 @@ define float @f7(float *%ptr0) {
%val9 = load float, float *%ptr9
%val10 = load float, float *%ptr10
%ret = call float @foo()
%ret = call float @foo() #0
%mul0 = call float @llvm.experimental.constrained.fmul.f32(
float %ret, float %val0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul1 = call float @llvm.experimental.constrained.fmul.f32(
float %mul0, float %val1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul2 = call float @llvm.experimental.constrained.fmul.f32(
float %mul1, float %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul3 = call float @llvm.experimental.constrained.fmul.f32(
float %mul2, float %val3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul4 = call float @llvm.experimental.constrained.fmul.f32(
float %mul3, float %val4,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul5 = call float @llvm.experimental.constrained.fmul.f32(
float %mul4, float %val5,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul6 = call float @llvm.experimental.constrained.fmul.f32(
float %mul5, float %val6,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul7 = call float @llvm.experimental.constrained.fmul.f32(
float %mul6, float %val7,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul8 = call float @llvm.experimental.constrained.fmul.f32(
float %mul7, float %val8,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul9 = call float @llvm.experimental.constrained.fmul.f32(
float %mul8, float %val9,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul10 = call float @llvm.experimental.constrained.fmul.f32(
float %mul9, float %val10,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %mul10
}
attributes #0 = { strictfp }

View File

@ -7,7 +7,7 @@ declare float @foo()
declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
; Check register multiplication.
define double @f1(float %f1, float %f2) {
define double @f1(float %f1, float %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: mdebr %f0, %f2
; CHECK: br %r14
@ -16,12 +16,12 @@ define double @f1(float %f1, float %f2) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1x, double %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the low end of the MDEB range.
define double @f2(float %f1, float *%ptr) {
define double @f2(float %f1, float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: mdeb %f0, 0(%r2)
; CHECK: br %r14
@ -31,12 +31,12 @@ define double @f2(float %f1, float *%ptr) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1x, double %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the high end of the aligned MDEB range.
define double @f3(float %f1, float *%base) {
define double @f3(float %f1, float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: mdeb %f0, 4092(%r2)
; CHECK: br %r14
@ -47,13 +47,13 @@ define double @f3(float %f1, float *%base) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1x, double %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
define double @f4(float %f1, float *%base) {
define double @f4(float %f1, float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: mdeb %f0, 0(%r2)
@ -65,12 +65,12 @@ define double @f4(float %f1, float *%base) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1x, double %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check negative displacements, which also need separate address logic.
define double @f5(float %f1, float *%base) {
define double @f5(float %f1, float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: mdeb %f0, 0(%r2)
@ -82,12 +82,12 @@ define double @f5(float %f1, float *%base) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1x, double %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check that MDEB allows indices.
define double @f6(float %f1, float *%base, i64 %index) {
define double @f6(float %f1, float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: mdeb %f0, 400(%r1,%r2)
@ -100,12 +100,12 @@ define double @f6(float %f1, float *%base, i64 %index) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1x, double %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check that multiplications of spilled values can use MDEB rather than MDEBR.
define float @f7(float *%ptr0) {
define float @f7(float *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK: mdeb %f0, 16{{[04]}}(%r15)
@ -157,18 +157,18 @@ define float @f7(float *%ptr0) {
store float %frob9, float *%ptr9
store float %frob10, float *%ptr10
%ret = call float @foo()
%ret = call float @foo() #0
%accext0 = fpext float %ret to double
%ext0 = fpext float %frob0 to double
%mul0 = call double @llvm.experimental.constrained.fmul.f64(
double %accext0, double %ext0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%extra0 = call double @llvm.experimental.constrained.fmul.f64(
double %mul0, double 1.01,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc0 = fptrunc double %extra0 to float
%accext1 = fpext float %trunc0 to double
@ -176,11 +176,11 @@ define float @f7(float *%ptr0) {
%mul1 = call double @llvm.experimental.constrained.fmul.f64(
double %accext1, double %ext1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%extra1 = call double @llvm.experimental.constrained.fmul.f64(
double %mul1, double 1.11,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc1 = fptrunc double %extra1 to float
%accext2 = fpext float %trunc1 to double
@ -188,11 +188,11 @@ define float @f7(float *%ptr0) {
%mul2 = call double @llvm.experimental.constrained.fmul.f64(
double %accext2, double %ext2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%extra2 = call double @llvm.experimental.constrained.fmul.f64(
double %mul2, double 1.21,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc2 = fptrunc double %extra2 to float
%accext3 = fpext float %trunc2 to double
@ -200,11 +200,11 @@ define float @f7(float *%ptr0) {
%mul3 = call double @llvm.experimental.constrained.fmul.f64(
double %accext3, double %ext3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%extra3 = call double @llvm.experimental.constrained.fmul.f64(
double %mul3, double 1.31,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc3 = fptrunc double %extra3 to float
%accext4 = fpext float %trunc3 to double
@ -212,11 +212,11 @@ define float @f7(float *%ptr0) {
%mul4 = call double @llvm.experimental.constrained.fmul.f64(
double %accext4, double %ext4,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%extra4 = call double @llvm.experimental.constrained.fmul.f64(
double %mul4, double 1.41,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc4 = fptrunc double %extra4 to float
%accext5 = fpext float %trunc4 to double
@ -224,11 +224,11 @@ define float @f7(float *%ptr0) {
%mul5 = call double @llvm.experimental.constrained.fmul.f64(
double %accext5, double %ext5,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%extra5 = call double @llvm.experimental.constrained.fmul.f64(
double %mul5, double 1.51,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc5 = fptrunc double %extra5 to float
%accext6 = fpext float %trunc5 to double
@ -236,11 +236,11 @@ define float @f7(float *%ptr0) {
%mul6 = call double @llvm.experimental.constrained.fmul.f64(
double %accext6, double %ext6,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%extra6 = call double @llvm.experimental.constrained.fmul.f64(
double %mul6, double 1.61,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc6 = fptrunc double %extra6 to float
%accext7 = fpext float %trunc6 to double
@ -248,11 +248,11 @@ define float @f7(float *%ptr0) {
%mul7 = call double @llvm.experimental.constrained.fmul.f64(
double %accext7, double %ext7,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%extra7 = call double @llvm.experimental.constrained.fmul.f64(
double %mul7, double 1.71,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc7 = fptrunc double %extra7 to float
%accext8 = fpext float %trunc7 to double
@ -260,11 +260,11 @@ define float @f7(float *%ptr0) {
%mul8 = call double @llvm.experimental.constrained.fmul.f64(
double %accext8, double %ext8,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%extra8 = call double @llvm.experimental.constrained.fmul.f64(
double %mul8, double 1.81,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc8 = fptrunc double %extra8 to float
%accext9 = fpext float %trunc8 to double
@ -272,12 +272,14 @@ define float @f7(float *%ptr0) {
%mul9 = call double @llvm.experimental.constrained.fmul.f64(
double %accext9, double %ext9,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%extra9 = call double @llvm.experimental.constrained.fmul.f64(
double %mul9, double 1.91,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc9 = fptrunc double %extra9 to float
ret float %trunc9
}
attributes #0 = { strictfp }

View File

@ -8,19 +8,19 @@ declare double @foo()
declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
; Check register multiplication.
define double @f1(double %f1, double %f2) {
define double @f1(double %f1, double %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: mdbr %f0, %f2
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the low end of the MDB range.
define double @f2(double %f1, double *%ptr) {
define double @f2(double %f1, double *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: mdb %f0, 0(%r2)
; CHECK: br %r14
@ -28,12 +28,12 @@ define double @f2(double %f1, double *%ptr) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the high end of the aligned MDB range.
define double @f3(double %f1, double *%base) {
define double @f3(double %f1, double *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: mdb %f0, 4088(%r2)
; CHECK: br %r14
@ -42,13 +42,13 @@ define double @f3(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
define double @f4(double %f1, double *%base) {
define double @f4(double %f1, double *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: mdb %f0, 0(%r2)
@ -58,12 +58,12 @@ define double @f4(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check negative displacements, which also need separate address logic.
define double @f5(double %f1, double *%base) {
define double @f5(double %f1, double *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -8
; CHECK: mdb %f0, 0(%r2)
@ -73,12 +73,12 @@ define double @f5(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check that MDB allows indices.
define double @f6(double %f1, double *%base, i64 %index) {
define double @f6(double %f1, double *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: mdb %f0, 800(%r1,%r2)
@ -89,12 +89,12 @@ define double @f6(double %f1, double *%base, i64 %index) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check that multiplications of spilled values can use MDB rather than MDBR.
define double @f7(double *%ptr0) {
define double @f7(double *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: mdb %f0, 160(%r15)
@ -122,52 +122,54 @@ define double @f7(double *%ptr0) {
%val9 = load double, double *%ptr9
%val10 = load double, double *%ptr10
%ret = call double @foo()
%ret = call double @foo() #0
%mul0 = call double @llvm.experimental.constrained.fmul.f64(
double %ret, double %val0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul1 = call double @llvm.experimental.constrained.fmul.f64(
double %mul0, double %val1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul2 = call double @llvm.experimental.constrained.fmul.f64(
double %mul1, double %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul3 = call double @llvm.experimental.constrained.fmul.f64(
double %mul2, double %val3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul4 = call double @llvm.experimental.constrained.fmul.f64(
double %mul3, double %val4,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul5 = call double @llvm.experimental.constrained.fmul.f64(
double %mul4, double %val5,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul6 = call double @llvm.experimental.constrained.fmul.f64(
double %mul5, double %val6,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul7 = call double @llvm.experimental.constrained.fmul.f64(
double %mul6, double %val7,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul8 = call double @llvm.experimental.constrained.fmul.f64(
double %mul7, double %val8,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul9 = call double @llvm.experimental.constrained.fmul.f64(
double %mul8, double %val9,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul10 = call double @llvm.experimental.constrained.fmul.f64(
double %mul9, double %val10,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %mul10
}
attributes #0 = { strictfp }

View File

@ -10,7 +10,7 @@ declare double @foo()
; Check register multiplication. "mxdbr %f0, %f2" is not valid from LLVM's
; point of view, because %f2 is the low register of the FP128 %f0. Pass the
; multiplier in %f4 instead.
define void @f1(double %f1, double %dummy, double %f2, fp128 *%dst) {
define void @f1(double %f1, double %dummy, double %f2, fp128 *%dst) #0 {
; CHECK-LABEL: f1:
; CHECK: mxdbr %f0, %f4
; CHECK: std %f0, 0(%r2)
@ -21,13 +21,13 @@ define void @f1(double %f1, double %dummy, double %f2, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1x, fp128 %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the low end of the MXDB range.
define void @f2(double %f1, double *%ptr, fp128 *%dst) {
define void @f2(double %f1, double *%ptr, fp128 *%dst) #0 {
; CHECK-LABEL: f2:
; CHECK: mxdb %f0, 0(%r2)
; CHECK: std %f0, 0(%r3)
@ -39,13 +39,13 @@ define void @f2(double %f1, double *%ptr, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1x, fp128 %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the high end of the aligned MXDB range.
define void @f3(double %f1, double *%base, fp128 *%dst) {
define void @f3(double %f1, double *%base, fp128 *%dst) #0 {
; CHECK-LABEL: f3:
; CHECK: mxdb %f0, 4088(%r2)
; CHECK: std %f0, 0(%r3)
@ -58,14 +58,14 @@ define void @f3(double %f1, double *%base, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1x, fp128 %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
define void @f4(double %f1, double *%base, fp128 *%dst) {
define void @f4(double %f1, double *%base, fp128 *%dst) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: mxdb %f0, 0(%r2)
@ -79,13 +79,13 @@ define void @f4(double %f1, double *%base, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1x, fp128 %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check negative displacements, which also need separate address logic.
define void @f5(double %f1, double *%base, fp128 *%dst) {
define void @f5(double %f1, double *%base, fp128 *%dst) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -8
; CHECK: mxdb %f0, 0(%r2)
@ -99,13 +99,13 @@ define void @f5(double %f1, double *%base, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1x, fp128 %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check that MXDB allows indices.
define void @f6(double %f1, double *%base, i64 %index, fp128 *%dst) {
define void @f6(double %f1, double *%base, i64 %index, fp128 *%dst) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: mxdb %f0, 800(%r1,%r2)
@ -120,13 +120,13 @@ define void @f6(double %f1, double *%base, i64 %index, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1x, fp128 %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check that multiplications of spilled values can use MXDB rather than MXDBR.
define double @f7(double *%ptr0) {
define double @f7(double *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK: mxdb %f0, 160(%r15)
@ -178,19 +178,19 @@ define double @f7(double *%ptr0) {
store double %frob9, double *%ptr9
store double %frob10, double *%ptr10
%ret = call double @foo()
%ret = call double @foo() #0
%accext0 = fpext double %ret to fp128
%ext0 = fpext double %frob0 to fp128
%mul0 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext0, fp128 %ext0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%const0 = fpext double 1.01 to fp128
%extra0 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul0, fp128 %const0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc0 = fptrunc fp128 %extra0 to double
%accext1 = fpext double %trunc0 to fp128
@ -198,12 +198,12 @@ define double @f7(double *%ptr0) {
%mul1 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext1, fp128 %ext1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%const1 = fpext double 1.11 to fp128
%extra1 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul1, fp128 %const1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc1 = fptrunc fp128 %extra1 to double
%accext2 = fpext double %trunc1 to fp128
@ -211,12 +211,12 @@ define double @f7(double *%ptr0) {
%mul2 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext2, fp128 %ext2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%const2 = fpext double 1.21 to fp128
%extra2 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul2, fp128 %const2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc2 = fptrunc fp128 %extra2 to double
%accext3 = fpext double %trunc2 to fp128
@ -224,12 +224,12 @@ define double @f7(double *%ptr0) {
%mul3 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext3, fp128 %ext3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%const3 = fpext double 1.31 to fp128
%extra3 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul3, fp128 %const3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc3 = fptrunc fp128 %extra3 to double
%accext4 = fpext double %trunc3 to fp128
@ -237,12 +237,12 @@ define double @f7(double *%ptr0) {
%mul4 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext4, fp128 %ext4,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%const4 = fpext double 1.41 to fp128
%extra4 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul4, fp128 %const4,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc4 = fptrunc fp128 %extra4 to double
%accext5 = fpext double %trunc4 to fp128
@ -250,12 +250,12 @@ define double @f7(double *%ptr0) {
%mul5 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext5, fp128 %ext5,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%const5 = fpext double 1.51 to fp128
%extra5 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul5, fp128 %const5,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc5 = fptrunc fp128 %extra5 to double
%accext6 = fpext double %trunc5 to fp128
@ -263,12 +263,12 @@ define double @f7(double *%ptr0) {
%mul6 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext6, fp128 %ext6,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%const6 = fpext double 1.61 to fp128
%extra6 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul6, fp128 %const6,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc6 = fptrunc fp128 %extra6 to double
%accext7 = fpext double %trunc6 to fp128
@ -276,12 +276,12 @@ define double @f7(double *%ptr0) {
%mul7 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext7, fp128 %ext7,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%const7 = fpext double 1.71 to fp128
%extra7 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul7, fp128 %const7,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc7 = fptrunc fp128 %extra7 to double
%accext8 = fpext double %trunc7 to fp128
@ -289,12 +289,12 @@ define double @f7(double *%ptr0) {
%mul8 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext8, fp128 %ext8,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%const8 = fpext double 1.81 to fp128
%extra8 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul8, fp128 %const8,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc8 = fptrunc fp128 %extra8 to double
%accext9 = fpext double %trunc8 to fp128
@ -302,13 +302,15 @@ define double @f7(double *%ptr0) {
%mul9 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext9, fp128 %ext9,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%const9 = fpext double 1.91 to fp128
%extra9 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul9, fp128 %const9,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%trunc9 = fptrunc fp128 %extra9 to double
ret double %trunc9
}
attributes #0 = { strictfp }

View File

@ -5,7 +5,7 @@
declare fp128 @llvm.experimental.constrained.fmul.f128(fp128, fp128, metadata, metadata)
; There is no memory form of 128-bit multiplication.
define void @f1(fp128 *%ptr, float %f2) {
define void @f1(fp128 *%ptr, float %f2) strictfp {
; CHECK-LABEL: f1:
; CHECK-DAG: lxebr %f0, %f0
; CHECK-DAG: ld %f1, 0(%r2)
@ -19,7 +19,7 @@ define void @f1(fp128 *%ptr, float %f2) {
%diff = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1, fp128 %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") strictfp
store fp128 %diff, fp128 *%ptr
ret void
}

View File

@ -5,7 +5,7 @@
declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata)
define float @f1(float %f1, float %f2, float %acc) {
define float @f1(float %f1, float %f2, float %acc) #0 {
; CHECK-LABEL: f1:
; CHECK-SCALAR: maebr %f4, %f0, %f2
; CHECK-SCALAR: ler %f0, %f4
@ -14,11 +14,11 @@ define float @f1(float %f1, float %f2, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f2(float %f1, float *%ptr, float %acc) {
define float @f2(float %f1, float *%ptr, float %acc) #0 {
; CHECK-LABEL: f2:
; CHECK: maeb %f2, %f0, 0(%r2)
; CHECK-SCALAR: ler %f0, %f2
@ -28,11 +28,11 @@ define float @f2(float %f1, float *%ptr, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f3(float %f1, float *%base, float %acc) {
define float @f3(float %f1, float *%base, float %acc) #0 {
; CHECK-LABEL: f3:
; CHECK: maeb %f2, %f0, 4092(%r2)
; CHECK-SCALAR: ler %f0, %f2
@ -43,11 +43,11 @@ define float @f3(float %f1, float *%base, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f4(float %f1, float *%base, float %acc) {
define float @f4(float %f1, float *%base, float %acc) #0 {
; The important thing here is that we don't generate an out-of-range
; displacement. Other sequences besides this one would be OK.
;
@ -62,11 +62,11 @@ define float @f4(float %f1, float *%base, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f5(float %f1, float *%base, float %acc) {
define float @f5(float %f1, float *%base, float %acc) #0 {
; Here too the important thing is that we don't generate an out-of-range
; displacement. Other sequences besides this one would be OK.
;
@ -81,11 +81,11 @@ define float @f5(float %f1, float *%base, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f6(float %f1, float *%base, i64 %index, float %acc) {
define float @f6(float %f1, float *%base, i64 %index, float %acc) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: maeb %f2, %f0, 0(%r1,%r2)
@ -97,11 +97,11 @@ define float @f6(float %f1, float *%base, i64 %index, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f7(float %f1, float *%base, i64 %index, float %acc) {
define float @f7(float %f1, float *%base, i64 %index, float %acc) #0 {
; CHECK-LABEL: f7:
; CHECK: sllg %r1, %r3, 2
; CHECK: maeb %f2, %f0, 4092({{%r1,%r2|%r2,%r1}})
@ -114,11 +114,11 @@ define float @f7(float %f1, float *%base, i64 %index, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f8(float %f1, float *%base, i64 %index, float %acc) {
define float @f8(float %f1, float *%base, i64 %index, float %acc) #0 {
; CHECK-LABEL: f8:
; CHECK: sllg %r1, %r3, 2
; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
@ -132,6 +132,8 @@ define float @f8(float %f1, float *%base, i64 %index, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
attributes #0 = { strictfp }

View File

@ -5,7 +5,7 @@
declare double @llvm.experimental.constrained.fma.f64(double %f1, double %f2, double %f3, metadata, metadata)
define double @f1(double %f1, double %f2, double %acc) {
define double @f1(double %f1, double %f2, double %acc) #0 {
; CHECK-LABEL: f1:
; CHECK-SCALAR: madbr %f4, %f0, %f2
; CHECK-SCALAR: ldr %f0, %f4
@ -14,11 +14,11 @@ define double @f1(double %f1, double %f2, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f2(double %f1, double *%ptr, double %acc) {
define double @f2(double %f1, double *%ptr, double %acc) #0 {
; CHECK-LABEL: f2:
; CHECK: madb %f2, %f0, 0(%r2)
; CHECK: ldr %f0, %f2
@ -27,11 +27,11 @@ define double @f2(double %f1, double *%ptr, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f3(double %f1, double *%base, double %acc) {
define double @f3(double %f1, double *%base, double %acc) #0 {
; CHECK-LABEL: f3:
; CHECK: madb %f2, %f0, 4088(%r2)
; CHECK: ldr %f0, %f2
@ -41,11 +41,11 @@ define double @f3(double %f1, double *%base, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f4(double %f1, double *%base, double %acc) {
define double @f4(double %f1, double *%base, double %acc) #0 {
; The important thing here is that we don't generate an out-of-range
; displacement. Other sequences besides this one would be OK.
;
@ -59,11 +59,11 @@ define double @f4(double %f1, double *%base, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f5(double %f1, double *%base, double %acc) {
define double @f5(double %f1, double *%base, double %acc) #0 {
; Here too the important thing is that we don't generate an out-of-range
; displacement. Other sequences besides this one would be OK.
;
@ -77,11 +77,11 @@ define double @f5(double %f1, double *%base, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f6(double %f1, double *%base, i64 %index, double %acc) {
define double @f6(double %f1, double *%base, i64 %index, double %acc) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: madb %f2, %f0, 0(%r1,%r2)
@ -92,11 +92,11 @@ define double @f6(double %f1, double *%base, i64 %index, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f7(double %f1, double *%base, i64 %index, double %acc) {
define double @f7(double %f1, double *%base, i64 %index, double %acc) #0 {
; CHECK-LABEL: f7:
; CHECK: sllg %r1, %r3, 3
; CHECK: madb %f2, %f0, 4088({{%r1,%r2|%r2,%r1}})
@ -108,11 +108,11 @@ define double @f7(double %f1, double *%base, i64 %index, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f8(double %f1, double *%base, i64 %index, double %acc) {
define double @f8(double %f1, double *%base, i64 %index, double %acc) #0 {
; CHECK-LABEL: f8:
; CHECK: sllg %r1, %r3, 3
; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
@ -125,6 +125,8 @@ define double @f8(double %f1, double *%base, i64 %index, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
attributes #0 = { strictfp }

View File

@ -5,7 +5,7 @@
declare float @llvm.experimental.constrained.fma.f32(float %f1, float %f2, float %f3, metadata, metadata)
define float @f1(float %f1, float %f2, float %acc) {
define float @f1(float %f1, float %f2, float %acc) #0 {
; CHECK-LABEL: f1:
; CHECK-SCALAR: msebr %f4, %f0, %f2
; CHECK-SCALAR: ler %f0, %f4
@ -15,11 +15,11 @@ define float @f1(float %f1, float %f2, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f2(float %f1, float *%ptr, float %acc) {
define float @f2(float %f1, float *%ptr, float %acc) #0 {
; CHECK-LABEL: f2:
; CHECK: mseb %f2, %f0, 0(%r2)
; CHECK-SCALAR: ler %f0, %f2
@ -30,11 +30,11 @@ define float @f2(float %f1, float *%ptr, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f3(float %f1, float *%base, float %acc) {
define float @f3(float %f1, float *%base, float %acc) #0 {
; CHECK-LABEL: f3:
; CHECK: mseb %f2, %f0, 4092(%r2)
; CHECK-SCALAR: ler %f0, %f2
@ -46,11 +46,11 @@ define float @f3(float %f1, float *%base, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f4(float %f1, float *%base, float %acc) {
define float @f4(float %f1, float *%base, float %acc) #0 {
; The important thing here is that we don't generate an out-of-range
; displacement. Other sequences besides this one would be OK.
;
@ -66,11 +66,11 @@ define float @f4(float %f1, float *%base, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f5(float %f1, float *%base, float %acc) {
define float @f5(float %f1, float *%base, float %acc) #0 {
; Here too the important thing is that we don't generate an out-of-range
; displacement. Other sequences besides this one would be OK.
;
@ -86,11 +86,11 @@ define float @f5(float %f1, float *%base, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f6(float %f1, float *%base, i64 %index, float %acc) {
define float @f6(float %f1, float *%base, i64 %index, float %acc) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: mseb %f2, %f0, 0(%r1,%r2)
@ -103,11 +103,11 @@ define float @f6(float %f1, float *%base, i64 %index, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f7(float %f1, float *%base, i64 %index, float %acc) {
define float @f7(float %f1, float *%base, i64 %index, float %acc) #0 {
; CHECK-LABEL: f7:
; CHECK: sllg %r1, %r3, 2
; CHECK: mseb %f2, %f0, 4092({{%r1,%r2|%r2,%r1}})
@ -121,11 +121,11 @@ define float @f7(float %f1, float *%base, i64 %index, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f8(float %f1, float *%base, i64 %index, float %acc) {
define float @f8(float %f1, float *%base, i64 %index, float %acc) #0 {
; CHECK-LABEL: f8:
; CHECK: sllg %r1, %r3, 2
; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
@ -140,6 +140,8 @@ define float @f8(float %f1, float *%base, i64 %index, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
attributes #0 = { strictfp }

View File

@ -5,7 +5,7 @@
declare double @llvm.experimental.constrained.fma.f64(double %f1, double %f2, double %f3, metadata, metadata)
define double @f1(double %f1, double %f2, double %acc) {
define double @f1(double %f1, double %f2, double %acc) #0 {
; CHECK-LABEL: f1:
; CHECK-SCALAR: msdbr %f4, %f0, %f2
; CHECK-SCALAR: ldr %f0, %f4
@ -15,11 +15,11 @@ define double @f1(double %f1, double %f2, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f2(double %f1, double *%ptr, double %acc) {
define double @f2(double %f1, double *%ptr, double %acc) #0 {
; CHECK-LABEL: f2:
; CHECK: msdb %f2, %f0, 0(%r2)
; CHECK: ldr %f0, %f2
@ -29,11 +29,11 @@ define double @f2(double %f1, double *%ptr, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f3(double %f1, double *%base, double %acc) {
define double @f3(double %f1, double *%base, double %acc) #0 {
; CHECK-LABEL: f3:
; CHECK: msdb %f2, %f0, 4088(%r2)
; CHECK: ldr %f0, %f2
@ -44,11 +44,11 @@ define double @f3(double %f1, double *%base, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f4(double %f1, double *%base, double %acc) {
define double @f4(double %f1, double *%base, double %acc) #0 {
; The important thing here is that we don't generate an out-of-range
; displacement. Other sequences besides this one would be OK.
;
@ -63,11 +63,11 @@ define double @f4(double %f1, double *%base, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f5(double %f1, double *%base, double %acc) {
define double @f5(double %f1, double *%base, double %acc) #0 {
; Here too the important thing is that we don't generate an out-of-range
; displacement. Other sequences besides this one would be OK.
;
@ -82,11 +82,11 @@ define double @f5(double %f1, double *%base, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f6(double %f1, double *%base, i64 %index, double %acc) {
define double @f6(double %f1, double *%base, i64 %index, double %acc) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: msdb %f2, %f0, 0(%r1,%r2)
@ -98,11 +98,11 @@ define double @f6(double %f1, double *%base, i64 %index, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f7(double %f1, double *%base, i64 %index, double %acc) {
define double @f7(double %f1, double *%base, i64 %index, double %acc) #0 {
; CHECK-LABEL: f7:
; CHECK: sllg %r1, %r3, 3
; CHECK: msdb %f2, %f0, 4088({{%r1,%r2|%r2,%r1}})
@ -115,11 +115,11 @@ define double @f7(double %f1, double *%base, i64 %index, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f8(double %f1, double *%base, i64 %index, double %acc) {
define double @f8(double %f1, double *%base, i64 %index, double %acc) #0 {
; CHECK-LABEL: f8:
; CHECK: sllg %r1, %r3, 3
; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
@ -133,6 +133,8 @@ define double @f8(double %f1, double *%base, i64 %index, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
attributes #0 = { strictfp }

View File

@ -3,19 +3,19 @@
declare double @llvm.experimental.constrained.fma.f64(double %f1, double %f2, double %f3, metadata, metadata)
declare float @llvm.experimental.constrained.fma.f32(float %f1, float %f2, float %f3, metadata, metadata)
define double @f1(double %f1, double %f2, double %acc) {
define double @f1(double %f1, double %f2, double %acc) #0 {
; CHECK-LABEL: f1:
; CHECK: wfnmadb %f0, %f0, %f2, %f4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%negres = fsub double -0.0, %res
ret double %negres
}
define double @f2(double %f1, double %f2, double %acc) {
define double @f2(double %f1, double %f2, double %acc) #0 {
; CHECK-LABEL: f2:
; CHECK: wfnmsdb %f0, %f0, %f2, %f4
; CHECK: br %r14
@ -23,24 +23,24 @@ define double @f2(double %f1, double %f2, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%negres = fsub double -0.0, %res
ret double %negres
}
define float @f3(float %f1, float %f2, float %acc) {
define float @f3(float %f1, float %f2, float %acc) #0 {
; CHECK-LABEL: f3:
; CHECK: wfnmasb %f0, %f0, %f2, %f4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%negres = fsub float -0.0, %res
ret float %negres
}
define float @f4(float %f1, float %f2, float %acc) {
define float @f4(float %f1, float %f2, float %acc) #0 {
; CHECK-LABEL: f4:
; CHECK: wfnmssb %f0, %f0, %f2, %f4
; CHECK: br %r14
@ -48,8 +48,9 @@ define float @f4(float %f1, float %f2, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%negres = fsub float -0.0, %res
ret float %negres
}
attributes #0 = { strictfp }

View File

@ -4,7 +4,7 @@
declare fp128 @llvm.experimental.constrained.fmul.f128(fp128, fp128, metadata, metadata)
define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
define void @f1(fp128 *%ptr1, fp128 *%ptr2) #0 {
; CHECK-LABEL: f1:
; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
@ -16,12 +16,12 @@ define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
%sum = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1, fp128 %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %sum, fp128 *%ptr1
ret void
}
define void @f2(double %f1, double %f2, fp128 *%dst) {
define void @f2(double %f1, double %f2, fp128 *%dst) #0 {
; CHECK-LABEL: f2:
; CHECK-DAG: wflld [[REG1:%v[0-9]+]], %f0
; CHECK-DAG: wflld [[REG2:%v[0-9]+]], %f2
@ -33,8 +33,9 @@ define void @f2(double %f1, double %f2, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1x, fp128 %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
attributes #0 = { strictfp }

View File

@ -4,33 +4,33 @@
; Test rint for f32.
declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
define float @f1(float %f) {
define float @f1(float %f) #0 {
; CHECK-LABEL: f1:
; CHECK: fiebr %f0, 0, %f0
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.rint.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test rint for f64.
declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
define double @f2(double %f) {
define double @f2(double %f) #0 {
; CHECK-LABEL: f2:
; CHECK: fidbr %f0, 0, %f0
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.rint.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test rint for f128.
declare fp128 @llvm.experimental.constrained.rint.f128(fp128, metadata, metadata)
define void @f3(fp128 *%ptr) {
define void @f3(fp128 *%ptr) #0 {
; CHECK-LABEL: f3:
; CHECK: fixbr %f0, 0, %f0
; CHECK: br %r14
@ -38,40 +38,40 @@ define void @f3(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.rint.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test nearbyint for f32.
declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
define float @f4(float %f) {
define float @f4(float %f) #0 {
; CHECK-LABEL: f4:
; CHECK: brasl %r14, nearbyintf@PLT
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.nearbyint.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test nearbyint for f64.
declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
define double @f5(double %f) {
define double @f5(double %f) #0 {
; CHECK-LABEL: f5:
; CHECK: brasl %r14, nearbyint@PLT
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.nearbyint.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test nearbyint for f128.
declare fp128 @llvm.experimental.constrained.nearbyint.f128(fp128, metadata, metadata)
define void @f6(fp128 *%ptr) {
define void @f6(fp128 *%ptr) #0 {
; CHECK-LABEL: f6:
; CHECK: brasl %r14, nearbyintl@PLT
; CHECK: br %r14
@ -79,40 +79,40 @@ define void @f6(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.nearbyint.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test floor for f32.
declare float @llvm.experimental.constrained.floor.f32(float, metadata, metadata)
define float @f7(float %f) {
define float @f7(float %f) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, floorf@PLT
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.floor.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test floor for f64.
declare double @llvm.experimental.constrained.floor.f64(double, metadata, metadata)
define double @f8(double %f) {
define double @f8(double %f) #0 {
; CHECK-LABEL: f8:
; CHECK: brasl %r14, floor@PLT
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.floor.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test floor for f128.
declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata, metadata)
define void @f9(fp128 *%ptr) {
define void @f9(fp128 *%ptr) #0 {
; CHECK-LABEL: f9:
; CHECK: brasl %r14, floorl@PLT
; CHECK: br %r14
@ -120,40 +120,40 @@ define void @f9(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.floor.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test ceil for f32.
declare float @llvm.experimental.constrained.ceil.f32(float, metadata, metadata)
define float @f10(float %f) {
define float @f10(float %f) #0 {
; CHECK-LABEL: f10:
; CHECK: brasl %r14, ceilf@PLT
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.ceil.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test ceil for f64.
declare double @llvm.experimental.constrained.ceil.f64(double, metadata, metadata)
define double @f11(double %f) {
define double @f11(double %f) #0 {
; CHECK-LABEL: f11:
; CHECK: brasl %r14, ceil@PLT
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.ceil.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test ceil for f128.
declare fp128 @llvm.experimental.constrained.ceil.f128(fp128, metadata, metadata)
define void @f12(fp128 *%ptr) {
define void @f12(fp128 *%ptr) #0 {
; CHECK-LABEL: f12:
; CHECK: brasl %r14, ceill@PLT
; CHECK: br %r14
@ -161,40 +161,40 @@ define void @f12(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.ceil.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test trunc for f32.
declare float @llvm.experimental.constrained.trunc.f32(float, metadata, metadata)
define float @f13(float %f) {
define float @f13(float %f) #0 {
; CHECK-LABEL: f13:
; CHECK: brasl %r14, truncf@PLT
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.trunc.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test trunc for f64.
declare double @llvm.experimental.constrained.trunc.f64(double, metadata, metadata)
define double @f14(double %f) {
define double @f14(double %f) #0 {
; CHECK-LABEL: f14:
; CHECK: brasl %r14, trunc@PLT
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.trunc.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test trunc for f128.
declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata, metadata)
define void @f15(fp128 *%ptr) {
define void @f15(fp128 *%ptr) #0 {
; CHECK-LABEL: f15:
; CHECK: brasl %r14, truncl@PLT
; CHECK: br %r14
@ -202,40 +202,40 @@ define void @f15(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.trunc.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test round for f32.
declare float @llvm.experimental.constrained.round.f32(float, metadata, metadata)
define float @f16(float %f) {
define float @f16(float %f) #0 {
; CHECK-LABEL: f16:
; CHECK: brasl %r14, roundf@PLT
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.round.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test round for f64.
declare double @llvm.experimental.constrained.round.f64(double, metadata, metadata)
define double @f17(double %f) {
define double @f17(double %f) #0 {
; CHECK-LABEL: f17:
; CHECK: brasl %r14, round@PLT
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.round.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test round for f128.
declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata, metadata)
define void @f18(fp128 *%ptr) {
define void @f18(fp128 *%ptr) #0 {
; CHECK-LABEL: f18:
; CHECK: brasl %r14, roundl@PLT
; CHECK: br %r14
@ -243,8 +243,9 @@ define void @f18(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.round.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
attributes #0 = { strictfp }

View File

@ -7,20 +7,20 @@
; Test rint for f32.
declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
define float @f1(float %f) {
define float @f1(float %f) #0 {
; CHECK-LABEL: f1:
; CHECK: fiebr %f0, 0, %f0
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.rint.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test rint for f64.
declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
define double @f2(double %f) {
define double @f2(double %f) #0 {
; CHECK-LABEL: f2:
; CHECK-SCALAR: fidbr %f0, 0, %f0
; CHECK-VECTOR: fidbra %f0, 0, %f0, 0
@ -28,13 +28,13 @@ define double @f2(double %f) {
%res = call double @llvm.experimental.constrained.rint.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test rint for f128.
declare fp128 @llvm.experimental.constrained.rint.f128(fp128, metadata, metadata)
define void @f3(fp128 *%ptr) {
define void @f3(fp128 *%ptr) #0 {
; CHECK-LABEL: f3:
; CHECK: fixbr %f0, 0, %f0
; CHECK: br %r14
@ -42,40 +42,40 @@ define void @f3(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.rint.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test nearbyint for f32.
declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
define float @f4(float %f) {
define float @f4(float %f) #0 {
; CHECK-LABEL: f4:
; CHECK: fiebra %f0, 0, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.nearbyint.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test nearbyint for f64.
declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
define double @f5(double %f) {
define double @f5(double %f) #0 {
; CHECK-LABEL: f5:
; CHECK: fidbra %f0, 0, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.nearbyint.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test nearbyint for f128.
declare fp128 @llvm.experimental.constrained.nearbyint.f128(fp128, metadata, metadata)
define void @f6(fp128 *%ptr) {
define void @f6(fp128 *%ptr) #0 {
; CHECK-LABEL: f6:
; CHECK: fixbra %f0, 0, %f0, 4
; CHECK: br %r14
@ -83,40 +83,40 @@ define void @f6(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.nearbyint.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test floor for f32.
declare float @llvm.experimental.constrained.floor.f32(float, metadata, metadata)
define float @f7(float %f) {
define float @f7(float %f) #0 {
; CHECK-LABEL: f7:
; CHECK: fiebra %f0, 7, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.floor.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test floor for f64.
declare double @llvm.experimental.constrained.floor.f64(double, metadata, metadata)
define double @f8(double %f) {
define double @f8(double %f) #0 {
; CHECK-LABEL: f8:
; CHECK: fidbra %f0, 7, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.floor.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test floor for f128.
declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata, metadata)
define void @f9(fp128 *%ptr) {
define void @f9(fp128 *%ptr) #0 {
; CHECK-LABEL: f9:
; CHECK: fixbra %f0, 7, %f0, 4
; CHECK: br %r14
@ -124,40 +124,40 @@ define void @f9(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.floor.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test ceil for f32.
declare float @llvm.experimental.constrained.ceil.f32(float, metadata, metadata)
define float @f10(float %f) {
define float @f10(float %f) #0 {
; CHECK-LABEL: f10:
; CHECK: fiebra %f0, 6, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.ceil.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test ceil for f64.
declare double @llvm.experimental.constrained.ceil.f64(double, metadata, metadata)
define double @f11(double %f) {
define double @f11(double %f) #0 {
; CHECK-LABEL: f11:
; CHECK: fidbra %f0, 6, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.ceil.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test ceil for f128.
declare fp128 @llvm.experimental.constrained.ceil.f128(fp128, metadata, metadata)
define void @f12(fp128 *%ptr) {
define void @f12(fp128 *%ptr) #0 {
; CHECK-LABEL: f12:
; CHECK: fixbra %f0, 6, %f0, 4
; CHECK: br %r14
@ -165,40 +165,40 @@ define void @f12(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.ceil.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test trunc for f32.
declare float @llvm.experimental.constrained.trunc.f32(float, metadata, metadata)
define float @f13(float %f) {
define float @f13(float %f) #0 {
; CHECK-LABEL: f13:
; CHECK: fiebra %f0, 5, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.trunc.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test trunc for f64.
declare double @llvm.experimental.constrained.trunc.f64(double, metadata, metadata)
define double @f14(double %f) {
define double @f14(double %f) #0 {
; CHECK-LABEL: f14:
; CHECK: fidbra %f0, 5, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.trunc.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test trunc for f128.
declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata, metadata)
define void @f15(fp128 *%ptr) {
define void @f15(fp128 *%ptr) #0 {
; CHECK-LABEL: f15:
; CHECK: fixbra %f0, 5, %f0, 4
; CHECK: br %r14
@ -206,40 +206,40 @@ define void @f15(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.trunc.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test round for f32.
declare float @llvm.experimental.constrained.round.f32(float, metadata, metadata)
define float @f16(float %f) {
define float @f16(float %f) #0 {
; CHECK-LABEL: f16:
; CHECK: fiebra %f0, 1, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.round.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test round for f64.
declare double @llvm.experimental.constrained.round.f64(double, metadata, metadata)
define double @f17(double %f) {
define double @f17(double %f) #0 {
; CHECK-LABEL: f17:
; CHECK: fidbra %f0, 1, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.round.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test round for f128.
declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata, metadata)
define void @f18(fp128 *%ptr) {
define void @f18(fp128 *%ptr) #0 {
; CHECK-LABEL: f18:
; CHECK: fixbra %f0, 1, %f0, 4
; CHECK: br %r14
@ -247,8 +247,9 @@ define void @f18(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.round.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
attributes #0 = { strictfp }

View File

@ -4,33 +4,33 @@
; Test rint for f32.
declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
define float @f1(float %f) {
define float @f1(float %f) #0 {
; CHECK-LABEL: f1:
; CHECK: fiebra %f0, 0, %f0, 0
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.rint.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test rint for f64.
declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
define double @f2(double %f) {
define double @f2(double %f) #0 {
; CHECK-LABEL: f2:
; CHECK: fidbra %f0, 0, %f0, 0
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.rint.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test rint for f128.
declare fp128 @llvm.experimental.constrained.rint.f128(fp128, metadata, metadata)
define void @f3(fp128 *%ptr) {
define void @f3(fp128 *%ptr) #0 {
; CHECK-LABEL: f3:
; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 0, 0
@ -40,40 +40,40 @@ define void @f3(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.rint.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test nearbyint for f32.
declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
define float @f4(float %f) {
define float @f4(float %f) #0 {
; CHECK-LABEL: f4:
; CHECK: fiebra %f0, 0, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.nearbyint.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test nearbyint for f64.
declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
define double @f5(double %f) {
define double @f5(double %f) #0 {
; CHECK-LABEL: f5:
; CHECK: fidbra %f0, 0, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.nearbyint.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test nearbyint for f128.
declare fp128 @llvm.experimental.constrained.nearbyint.f128(fp128, metadata, metadata)
define void @f6(fp128 *%ptr) {
define void @f6(fp128 *%ptr) #0 {
; CHECK-LABEL: f6:
; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 0
@ -83,40 +83,40 @@ define void @f6(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.nearbyint.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test floor for f32.
declare float @llvm.experimental.constrained.floor.f32(float, metadata, metadata)
define float @f7(float %f) {
define float @f7(float %f) #0 {
; CHECK-LABEL: f7:
; CHECK: fiebra %f0, 7, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.floor.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test floor for f64.
declare double @llvm.experimental.constrained.floor.f64(double, metadata, metadata)
define double @f8(double %f) {
define double @f8(double %f) #0 {
; CHECK-LABEL: f8:
; CHECK: fidbra %f0, 7, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.floor.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test floor for f128.
declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata, metadata)
define void @f9(fp128 *%ptr) {
define void @f9(fp128 *%ptr) #0 {
; CHECK-LABEL: f9:
; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 7
@ -126,40 +126,40 @@ define void @f9(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.floor.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test ceil for f32.
declare float @llvm.experimental.constrained.ceil.f32(float, metadata, metadata)
define float @f10(float %f) {
define float @f10(float %f) #0 {
; CHECK-LABEL: f10:
; CHECK: fiebra %f0, 6, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.ceil.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test ceil for f64.
declare double @llvm.experimental.constrained.ceil.f64(double, metadata, metadata)
define double @f11(double %f) {
define double @f11(double %f) #0 {
; CHECK-LABEL: f11:
; CHECK: fidbra %f0, 6, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.ceil.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test ceil for f128.
declare fp128 @llvm.experimental.constrained.ceil.f128(fp128, metadata, metadata)
define void @f12(fp128 *%ptr) {
define void @f12(fp128 *%ptr) #0 {
; CHECK-LABEL: f12:
; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 6
@ -169,40 +169,40 @@ define void @f12(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.ceil.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test trunc for f32.
declare float @llvm.experimental.constrained.trunc.f32(float, metadata, metadata)
define float @f13(float %f) {
define float @f13(float %f) #0 {
; CHECK-LABEL: f13:
; CHECK: fiebra %f0, 5, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.trunc.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test trunc for f64.
declare double @llvm.experimental.constrained.trunc.f64(double, metadata, metadata)
define double @f14(double %f) {
define double @f14(double %f) #0 {
; CHECK-LABEL: f14:
; CHECK: fidbra %f0, 5, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.trunc.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test trunc for f128.
declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata, metadata)
define void @f15(fp128 *%ptr) {
define void @f15(fp128 *%ptr) #0 {
; CHECK-LABEL: f15:
; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 5
@ -212,40 +212,40 @@ define void @f15(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.trunc.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test round for f32.
declare float @llvm.experimental.constrained.round.f32(float, metadata, metadata)
define float @f16(float %f) {
define float @f16(float %f) #0 {
; CHECK-LABEL: f16:
; CHECK: fiebra %f0, 1, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.round.f32(
float %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Test round for f64.
declare double @llvm.experimental.constrained.round.f64(double, metadata, metadata)
define double @f17(double %f) {
define double @f17(double %f) #0 {
; CHECK-LABEL: f17:
; CHECK: fidbra %f0, 1, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.round.f64(
double %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Test round for f128.
declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata, metadata)
define void @f18(fp128 *%ptr) {
define void @f18(fp128 *%ptr) #0 {
; CHECK-LABEL: f18:
; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 1
@ -255,8 +255,9 @@ define void @f18(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.round.f128(
fp128 %src,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
attributes #0 = { strictfp }

View File

@ -7,19 +7,19 @@
declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
; Check register square root.
define float @f1(float %val) {
define float @f1(float %val) #0 {
; CHECK-LABEL: f1:
; CHECK: sqebr %f0, %f0
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.sqrt.f32(
float %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the low end of the SQEB range.
define float @f2(float *%ptr) {
define float @f2(float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: sqeb %f0, 0(%r2)
; CHECK: br %r14
@ -27,12 +27,12 @@ define float @f2(float *%ptr) {
%res = call float @llvm.experimental.constrained.sqrt.f32(
float %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the high end of the aligned SQEB range.
define float @f3(float *%base) {
define float @f3(float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: sqeb %f0, 4092(%r2)
; CHECK: br %r14
@ -41,13 +41,13 @@ define float @f3(float *%base) {
%res = call float @llvm.experimental.constrained.sqrt.f32(
float %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
define float @f4(float *%base) {
define float @f4(float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: sqeb %f0, 0(%r2)
@ -57,12 +57,12 @@ define float @f4(float *%base) {
%res = call float @llvm.experimental.constrained.sqrt.f32(
float %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check negative displacements, which also need separate address logic.
define float @f5(float *%base) {
define float @f5(float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: sqeb %f0, 0(%r2)
@ -72,12 +72,12 @@ define float @f5(float *%base) {
%res = call float @llvm.experimental.constrained.sqrt.f32(
float %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check that SQEB allows indices.
define float @f6(float *%base, i64 %index) {
define float @f6(float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: sqeb %f0, 400(%r1,%r2)
@ -88,7 +88,8 @@ define float @f6(float *%base, i64 %index) {
%res = call float @llvm.experimental.constrained.sqrt.f32(
float %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
attributes #0 = { strictfp }

View File

@ -7,19 +7,19 @@
declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
; Check register square root.
define double @f1(double %val) {
define double @f1(double %val) #0 {
; CHECK-LABEL: f1:
; CHECK: sqdbr %f0, %f0
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.sqrt.f64(
double %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the low end of the SQDB range.
define double @f2(double *%ptr) {
define double @f2(double *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: sqdb %f0, 0(%r2)
; CHECK: br %r14
@ -27,12 +27,12 @@ define double @f2(double *%ptr) {
%res = call double @llvm.experimental.constrained.sqrt.f64(
double %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the high end of the aligned SQDB range.
define double @f3(double *%base) {
define double @f3(double *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: sqdb %f0, 4088(%r2)
; CHECK: br %r14
@ -41,13 +41,13 @@ define double @f3(double *%base) {
%res = call double @llvm.experimental.constrained.sqrt.f64(
double %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
define double @f4(double *%base) {
define double @f4(double *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: sqdb %f0, 0(%r2)
@ -57,12 +57,12 @@ define double @f4(double *%base) {
%res = call double @llvm.experimental.constrained.sqrt.f64(
double %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check negative displacements, which also need separate address logic.
define double @f5(double *%base) {
define double @f5(double *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -8
; CHECK: sqdb %f0, 0(%r2)
@ -72,12 +72,12 @@ define double @f5(double *%base) {
%res = call double @llvm.experimental.constrained.sqrt.f64(
double %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check that SQDB allows indices.
define double @f6(double *%base, i64 %index) {
define double @f6(double *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: sqdb %f0, 800(%r1,%r2)
@ -88,7 +88,8 @@ define double @f6(double *%base, i64 %index) {
%res = call double @llvm.experimental.constrained.sqrt.f64(
double %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
attributes #0 = { strictfp }

View File

@ -5,7 +5,7 @@
declare fp128 @llvm.experimental.constrained.sqrt.f128(fp128, metadata, metadata)
; There's no memory form of SQXBR.
define void @f1(fp128 *%ptr) {
define void @f1(fp128 *%ptr) strictfp {
; CHECK-LABEL: f1:
; CHECK: ld %f0, 0(%r2)
; CHECK: ld %f2, 8(%r2)
@ -17,7 +17,7 @@ define void @f1(fp128 *%ptr) {
%sqrt = call fp128 @llvm.experimental.constrained.sqrt.f128(
fp128 %orig,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") strictfp
store fp128 %sqrt, fp128 *%ptr
ret void
}

View File

@ -4,7 +4,7 @@
declare fp128 @llvm.experimental.constrained.sqrt.f128(fp128, metadata, metadata)
define void @f1(fp128 *%ptr) {
define void @f1(fp128 *%ptr) strictfp {
; CHECK-LABEL: f1:
; CHECK-DAG: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wfsqxb [[RES:%v[0-9]+]], [[REG]]
@ -14,7 +14,7 @@ define void @f1(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.sqrt.f128(
fp128 %f,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") strictfp
store fp128 %res, fp128 *%ptr
ret void
}

View File

@ -8,19 +8,19 @@ declare float @foo()
declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata)
; Check register subtraction.
define float @f1(float %f1, float %f2) {
define float @f1(float %f1, float %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: sebr %f0, %f2
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the low end of the SEB range.
define float @f2(float %f1, float *%ptr) {
define float @f2(float %f1, float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: seb %f0, 0(%r2)
; CHECK: br %r14
@ -28,12 +28,12 @@ define float @f2(float %f1, float *%ptr) {
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the high end of the aligned SEB range.
define float @f3(float %f1, float *%base) {
define float @f3(float %f1, float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: seb %f0, 4092(%r2)
; CHECK: br %r14
@ -42,13 +42,13 @@ define float @f3(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
define float @f4(float %f1, float *%base) {
define float @f4(float %f1, float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: seb %f0, 0(%r2)
@ -58,12 +58,12 @@ define float @f4(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check negative displacements, which also need separate address logic.
define float @f5(float %f1, float *%base) {
define float @f5(float %f1, float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: seb %f0, 0(%r2)
@ -73,12 +73,12 @@ define float @f5(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check that SEB allows indices.
define float @f6(float %f1, float *%base, i64 %index) {
define float @f6(float %f1, float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: seb %f0, 400(%r1,%r2)
@ -89,12 +89,12 @@ define float @f6(float %f1, float *%base, i64 %index) {
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
; Check that subtractions of spilled values can use SEB rather than SEBR.
define float @f7(float *%ptr0) {
define float @f7(float *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: seb %f0, 16{{[04]}}(%r15)
@ -122,52 +122,54 @@ define float @f7(float *%ptr0) {
%val9 = load float, float *%ptr9
%val10 = load float, float *%ptr10
%ret = call float @foo()
%ret = call float @foo() #0
%sub0 = call float @llvm.experimental.constrained.fsub.f32(
float %ret, float %val0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub1 = call float @llvm.experimental.constrained.fsub.f32(
float %sub0, float %val1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub2 = call float @llvm.experimental.constrained.fsub.f32(
float %sub1, float %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub3 = call float @llvm.experimental.constrained.fsub.f32(
float %sub2, float %val3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub4 = call float @llvm.experimental.constrained.fsub.f32(
float %sub3, float %val4,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub5 = call float @llvm.experimental.constrained.fsub.f32(
float %sub4, float %val5,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub6 = call float @llvm.experimental.constrained.fsub.f32(
float %sub5, float %val6,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub7 = call float @llvm.experimental.constrained.fsub.f32(
float %sub6, float %val7,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub8 = call float @llvm.experimental.constrained.fsub.f32(
float %sub7, float %val8,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub9 = call float @llvm.experimental.constrained.fsub.f32(
float %sub8, float %val9,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub10 = call float @llvm.experimental.constrained.fsub.f32(
float %sub9, float %val10,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %sub10
}
attributes #0 = { strictfp }

View File

@ -8,19 +8,19 @@ declare double @foo()
declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata)
; Check register subtraction.
define double @f1(double %f1, double %f2) {
define double @f1(double %f1, double %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: sdbr %f0, %f2
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.fsub.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the low end of the SDB range.
define double @f2(double %f1, double *%ptr) {
define double @f2(double %f1, double *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: sdb %f0, 0(%r2)
; CHECK: br %r14
@ -28,12 +28,12 @@ define double @f2(double %f1, double *%ptr) {
%res = call double @llvm.experimental.constrained.fsub.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the high end of the aligned SDB range.
define double @f3(double %f1, double *%base) {
define double @f3(double %f1, double *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: sdb %f0, 4088(%r2)
; CHECK: br %r14
@ -42,13 +42,13 @@ define double @f3(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fsub.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
define double @f4(double %f1, double *%base) {
define double @f4(double %f1, double *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: sdb %f0, 0(%r2)
@ -58,12 +58,12 @@ define double @f4(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fsub.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check negative displacements, which also need separate address logic.
define double @f5(double %f1, double *%base) {
define double @f5(double %f1, double *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -8
; CHECK: sdb %f0, 0(%r2)
@ -73,12 +73,12 @@ define double @f5(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fsub.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check that SDB allows indices.
define double @f6(double %f1, double *%base, i64 %index) {
define double @f6(double %f1, double *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: sdb %f0, 800(%r1,%r2)
@ -89,12 +89,12 @@ define double @f6(double %f1, double *%base, i64 %index) {
%res = call double @llvm.experimental.constrained.fsub.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
; Check that subtractions of spilled values can use SDB rather than SDBR.
define double @f7(double *%ptr0) {
define double @f7(double *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: sdb %f0, 16{{[04]}}(%r15)
@ -122,52 +122,54 @@ define double @f7(double *%ptr0) {
%val9 = load double, double *%ptr9
%val10 = load double, double *%ptr10
%ret = call double @foo()
%ret = call double @foo() #0
%sub0 = call double @llvm.experimental.constrained.fsub.f64(
double %ret, double %val0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub1 = call double @llvm.experimental.constrained.fsub.f64(
double %sub0, double %val1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub2 = call double @llvm.experimental.constrained.fsub.f64(
double %sub1, double %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub3 = call double @llvm.experimental.constrained.fsub.f64(
double %sub2, double %val3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub4 = call double @llvm.experimental.constrained.fsub.f64(
double %sub3, double %val4,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub5 = call double @llvm.experimental.constrained.fsub.f64(
double %sub4, double %val5,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub6 = call double @llvm.experimental.constrained.fsub.f64(
double %sub5, double %val6,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub7 = call double @llvm.experimental.constrained.fsub.f64(
double %sub6, double %val7,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub8 = call double @llvm.experimental.constrained.fsub.f64(
double %sub7, double %val8,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub9 = call double @llvm.experimental.constrained.fsub.f64(
double %sub8, double %val9,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%sub10 = call double @llvm.experimental.constrained.fsub.f64(
double %sub9, double %val10,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %sub10
}
attributes #0 = { strictfp }

View File

@ -5,7 +5,7 @@
declare fp128 @llvm.experimental.constrained.fsub.f128(fp128, fp128, metadata, metadata)
; There is no memory form of 128-bit subtraction.
define void @f1(fp128 *%ptr, float %f2) {
define void @f1(fp128 *%ptr, float %f2) strictfp {
; CHECK-LABEL: f1:
; CHECK-DAG: lxebr %f0, %f0
; CHECK-DAG: ld %f1, 0(%r2)
@ -19,7 +19,7 @@ define void @f1(fp128 *%ptr, float %f2) {
%sum = call fp128 @llvm.experimental.constrained.fsub.f128(
fp128 %f1, fp128 %f2x,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") strictfp
store fp128 %sum, fp128 *%ptr
ret void
}

View File

@ -4,7 +4,7 @@
declare fp128 @llvm.experimental.constrained.fsub.f128(fp128, fp128, metadata, metadata)
define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
define void @f1(fp128 *%ptr1, fp128 *%ptr2) strictfp {
; CHECK-LABEL: f1:
; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
@ -16,7 +16,7 @@ define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
%sum = call fp128 @llvm.experimental.constrained.fsub.f128(
fp128 %f1, fp128 %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") strictfp
store fp128 %sum, fp128 *%ptr1
ret void
}

View File

@ -7,19 +7,19 @@ declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2
; Test a v2f64 addition.
define <2 x double> @f5(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2) {
<2 x double> %val2) strictfp {
; CHECK-LABEL: f5:
; CHECK: vfadb %v24, %v26, %v28
; CHECK: br %r14
%ret = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(
<2 x double> %val1, <2 x double> %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") strictfp
ret <2 x double> %ret
}
; Test an f64 addition that uses vector registers.
define double @f6(<2 x double> %val1, <2 x double> %val2) {
define double @f6(<2 x double> %val1, <2 x double> %val2) strictfp {
; CHECK-LABEL: f6:
; CHECK: wfadb %f0, %v24, %v26
; CHECK: br %r14
@ -28,6 +28,6 @@ define double @f6(<2 x double> %val1, <2 x double> %val2) {
%ret = call double @llvm.experimental.constrained.fadd.f64(
double %scalar1, double %scalar2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") strictfp
ret double %ret
}

View File

@ -7,19 +7,19 @@ declare <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float>, <4 x
; Test a v4f32 addition.
define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2) {
<4 x float> %val2) strictfp {
; CHECK-LABEL: f1:
; CHECK: vfasb %v24, %v26, %v28
; CHECK: br %r14
%ret = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(
<4 x float> %val1, <4 x float> %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") strictfp
ret <4 x float> %ret
}
; Test an f32 addition that uses vector registers.
define float @f2(<4 x float> %val1, <4 x float> %val2) {
define float @f2(<4 x float> %val1, <4 x float> %val2) strictfp {
; CHECK-LABEL: f2:
; CHECK: wfasb %f0, %v24, %v26
; CHECK: br %r14
@ -28,6 +28,6 @@ define float @f2(<4 x float> %val1, <4 x float> %val2) {
%ret = call float @llvm.experimental.constrained.fadd.f32(
float %scalar1, float %scalar2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") strictfp
ret float %ret
}

View File

@ -14,54 +14,55 @@ declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(<2 x float>,
declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f32(<2 x float>, metadata)
; Test conversion of f64s to signed i64s.
define <2 x i64> @f1(<2 x double> %doubles) {
define <2 x i64> @f1(<2 x double> %doubles) #0 {
; CHECK-LABEL: f1:
; CHECK: vcgdb %v24, %v24, 0, 5
; CHECK: br %r14
%dwords = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %doubles,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x i64> %dwords
}
; Test conversion of f64s to unsigned i64s.
define <2 x i64> @f2(<2 x double> %doubles) {
define <2 x i64> @f2(<2 x double> %doubles) #0 {
; CHECK-LABEL: f2:
; CHECK: vclgdb %v24, %v24, 0, 5
; CHECK: br %r14
%dwords = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %doubles,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x i64> %dwords
}
; Test conversion of f64s to signed i32s, which must compile.
define void @f5(<2 x double> %doubles, <2 x i32> *%ptr) {
define void @f5(<2 x double> %doubles, <2 x i32> *%ptr) #0 {
%words = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double> %doubles,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store <2 x i32> %words, <2 x i32> *%ptr
ret void
}
; Test conversion of f64s to unsigned i32s, which must compile.
define void @f6(<2 x double> %doubles, <2 x i32> *%ptr) {
define void @f6(<2 x double> %doubles, <2 x i32> *%ptr) #0 {
%words = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double> %doubles,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store <2 x i32> %words, <2 x i32> *%ptr
ret void
}
; Test conversion of f32s to signed i64s, which must compile.
define <2 x i64> @f9(<2 x float> *%ptr) {
define <2 x i64> @f9(<2 x float> *%ptr) #0 {
%floats = load <2 x float>, <2 x float> *%ptr
%dwords = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f32(<2 x float> %floats,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x i64> %dwords
}
; Test conversion of f32s to unsigned i64s, which must compile.
define <2 x i64> @f10(<2 x float> *%ptr) {
define <2 x i64> @f10(<2 x float> *%ptr) #0 {
%floats = load <2 x float>, <2 x float> *%ptr
%dwords = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(<2 x float> %floats,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x i64> %dwords
}
attributes #0 = { strictfp }

View File

@ -8,22 +8,23 @@ declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float>,
declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float>, metadata)
; Test conversion of f32s to signed i32s.
define <4 x i32> @f1(<4 x float> %floats) {
define <4 x i32> @f1(<4 x float> %floats) #0 {
; CHECK-LABEL: f1:
; CHECK: vcfeb %v24, %v24, 0, 5
; CHECK: br %r14
%words = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float> %floats,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x i32> %words
}
; Test conversion of f32s to unsigned i32s.
define <4 x i32> @f2(<4 x float> %floats) {
define <4 x i32> @f2(<4 x float> %floats) #0 {
; CHECK-LABEL: f2:
; CHECK: vclfeb %v24, %v24, 0, 5
; CHECK: br %r14
%words = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float> %floats,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x i32> %words
}
attributes #0 = { strictfp }

View File

@ -7,19 +7,19 @@ declare <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double>, <2
; Test a v2f64 division.
define <2 x double> @f5(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2) {
<2 x double> %val2) #0 {
; CHECK-LABEL: f5:
; CHECK: vfddb %v24, %v26, %v28
; CHECK: br %r14
%ret = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(
<2 x double> %val1, <2 x double> %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x double> %ret
}
; Test an f64 division that uses vector registers.
define double @f6(<2 x double> %val1, <2 x double> %val2) {
define double @f6(<2 x double> %val1, <2 x double> %val2) #0 {
; CHECK-LABEL: f6:
; CHECK: wfddb %f0, %v24, %v26
; CHECK: br %r14
@ -28,6 +28,8 @@ define double @f6(<2 x double> %val1, <2 x double> %val2) {
%ret = call double @llvm.experimental.constrained.fdiv.f64(
double %scalar1, double %scalar2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %ret
}
attributes #0 = { strictfp }

View File

@ -7,19 +7,19 @@ declare <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float>, <4 x
; Test a v4f32 division.
define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2) {
<4 x float> %val2) #0 {
; CHECK-LABEL: f1:
; CHECK: vfdsb %v24, %v26, %v28
; CHECK: br %r14
%ret = call <4 x float> @llvm.experimental.constrained.fdiv.v4f32(
<4 x float> %val1, <4 x float> %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
; Test an f32 division that uses vector registers.
define float @f2(<4 x float> %val1, <4 x float> %val2) {
define float @f2(<4 x float> %val1, <4 x float> %val2) #0 {
; CHECK-LABEL: f2:
; CHECK: wfdsb %f0, %v24, %v26
; CHECK: br %r14
@ -28,6 +28,8 @@ define float @f2(<4 x float> %val1, <4 x float> %val2) {
%ret = call float @llvm.experimental.constrained.fdiv.f32(
float %scalar1, float %scalar2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %ret
}
attributes #0 = { strictfp }

View File

@ -11,57 +11,57 @@ declare <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float>, <4
declare fp128 @llvm.experimental.constrained.maxnum.f128(fp128, fp128, metadata, metadata)
; Test the f64 maxnum intrinsic.
define double @f1(double %dummy, double %val1, double %val2) {
define double @f1(double %dummy, double %val1, double %val2) #0 {
; CHECK-LABEL: f1:
; CHECK: wfmaxdb %f0, %f2, %f4, 4
; CHECK: br %r14
%ret = call double @llvm.experimental.constrained.maxnum.f64(
double %val1, double %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %ret
}
; Test the v2f64 maxnum intrinsic.
define <2 x double> @f2(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2) {
<2 x double> %val2) #0 {
; CHECK-LABEL: f2:
; CHECK: vfmaxdb %v24, %v26, %v28, 4
; CHECK: br %r14
%ret = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64(
<2 x double> %val1, <2 x double> %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x double> %ret
}
; Test the f32 maxnum intrinsic.
define float @f3(float %dummy, float %val1, float %val2) {
define float @f3(float %dummy, float %val1, float %val2) #0 {
; CHECK-LABEL: f3:
; CHECK: wfmaxsb %f0, %f2, %f4, 4
; CHECK: br %r14
%ret = call float @llvm.experimental.constrained.maxnum.f32(
float %val1, float %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %ret
}
; Test the v4f32 maxnum intrinsic.
define <4 x float> @f4(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2) {
<4 x float> %val2) #0 {
; CHECK-LABEL: f4:
; CHECK: vfmaxsb %v24, %v26, %v28, 4
; CHECK: br %r14
%ret = call <4 x float> @llvm.experimental.constrained.maxnum.v4f32(
<4 x float> %val1, <4 x float> %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
; Test the f128 maxnum intrinsic.
define void @f5(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) {
define void @f5(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) #0 {
; CHECK-LABEL: f5:
; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
@ -73,8 +73,9 @@ define void @f5(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.maxnum.f128(
fp128 %val1, fp128 %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128* %dst
ret void
}
attributes #0 = { strictfp }

View File

@ -11,57 +11,57 @@ declare <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float>, <4
declare fp128 @llvm.experimental.constrained.minnum.f128(fp128, fp128, metadata, metadata)
; Test the f64 minnum intrinsic.
define double @f1(double %dummy, double %val1, double %val2) {
define double @f1(double %dummy, double %val1, double %val2) #0 {
; CHECK-LABEL: f1:
; CHECK: wfmindb %f0, %f2, %f4, 4
; CHECK: br %r14
%ret = call double @llvm.experimental.constrained.minnum.f64(
double %val1, double %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %ret
}
; Test the v2f64 minnum intrinsic.
define <2 x double> @f2(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2) {
<2 x double> %val2) #0 {
; CHECK-LABEL: f2:
; CHECK: vfmindb %v24, %v26, %v28, 4
; CHECK: br %r14
%ret = call <2 x double> @llvm.experimental.constrained.minnum.v2f64(
<2 x double> %val1, <2 x double> %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x double> %ret
}
; Test the f32 minnum intrinsic.
define float @f3(float %dummy, float %val1, float %val2) {
define float @f3(float %dummy, float %val1, float %val2) #0 {
; CHECK-LABEL: f3:
; CHECK: wfminsb %f0, %f2, %f4, 4
; CHECK: br %r14
%ret = call float @llvm.experimental.constrained.minnum.f32(
float %val1, float %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %ret
}
; Test the v4f32 minnum intrinsic.
define <4 x float> @f4(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2) {
<4 x float> %val2) #0 {
; CHECK-LABEL: f4:
; CHECK: vfminsb %v24, %v26, %v28, 4
; CHECK: br %r14
%ret = call <4 x float> @llvm.experimental.constrained.minnum.v4f32(
<4 x float> %val1, <4 x float> %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
; Test the f128 minnum intrinsic.
define void @f5(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) {
define void @f5(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) #0 {
; CHECK-LABEL: f5:
; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
@ -73,8 +73,9 @@ define void @f5(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.minnum.f128(
fp128 %val1, fp128 %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
store fp128 %res, fp128* %dst
ret void
}
attributes #0 = { strictfp }

View File

@ -7,19 +7,19 @@ declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2
; Test a v2f64 multiplication.
define <2 x double> @f5(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2) {
<2 x double> %val2) #0 {
; CHECK-LABEL: f5:
; CHECK: vfmdb %v24, %v26, %v28
; CHECK: br %r14
%ret = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(
<2 x double> %val1, <2 x double> %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x double> %ret
}
; Test an f64 multiplication that uses vector registers.
define double @f6(<2 x double> %val1, <2 x double> %val2) {
define double @f6(<2 x double> %val1, <2 x double> %val2) #0 {
; CHECK-LABEL: f6:
; CHECK: wfmdb %f0, %v24, %v26
; CHECK: br %r14
@ -28,6 +28,8 @@ define double @f6(<2 x double> %val1, <2 x double> %val2) {
%ret = call double @llvm.experimental.constrained.fmul.f64(
double %scalar1, double %scalar2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %ret
}
attributes #0 = { strictfp }

View File

@ -6,7 +6,7 @@ declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x
; Test a v2f64 multiply-and-add.
define <2 x double> @f4(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2, <2 x double> %val3) {
<2 x double> %val2, <2 x double> %val3) #0 {
; CHECK-LABEL: f4:
; CHECK: vfmadb %v24, %v26, %v28, %v30
; CHECK: br %r14
@ -15,13 +15,13 @@ define <2 x double> @f4(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2,
<2 x double> %val3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x double> %ret
}
; Test a v2f64 multiply-and-subtract.
define <2 x double> @f5(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2, <2 x double> %val3) {
<2 x double> %val2, <2 x double> %val3) #0 {
; CHECK-LABEL: f5:
; CHECK: vfmsdb %v24, %v26, %v28, %v30
; CHECK: br %r14
@ -31,6 +31,8 @@ define <2 x double> @f5(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2,
<2 x double> %negval3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x double> %ret
}
attributes #0 = { strictfp }

View File

@ -7,19 +7,19 @@ declare <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float>, <4 x
; Test a v4f32 multiplication.
define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2) {
<4 x float> %val2) #0 {
; CHECK-LABEL: f1:
; CHECK: vfmsb %v24, %v26, %v28
; CHECK: br %r14
%ret = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(
<4 x float> %val1, <4 x float> %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
; Test an f32 multiplication that uses vector registers.
define float @f2(<4 x float> %val1, <4 x float> %val2) {
define float @f2(<4 x float> %val1, <4 x float> %val2) #0 {
; CHECK-LABEL: f2:
; CHECK: wfmsb %f0, %v24, %v26
; CHECK: br %r14
@ -28,6 +28,8 @@ define float @f2(<4 x float> %val1, <4 x float> %val2) {
%ret = call float @llvm.experimental.constrained.fmul.f32(
float %scalar1, float %scalar2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %ret
}
attributes #0 = { strictfp }

View File

@ -6,7 +6,7 @@ declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x f
; Test a v4f32 multiply-and-add.
define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2, <4 x float> %val3) {
<4 x float> %val2, <4 x float> %val3) #0 {
; CHECK-LABEL: f1:
; CHECK: vfmasb %v24, %v26, %v28, %v30
; CHECK: br %r14
@ -15,13 +15,13 @@ define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2,
<4 x float> %val3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
; Test a v4f32 multiply-and-subtract.
define <4 x float> @f2(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2, <4 x float> %val3) {
<4 x float> %val2, <4 x float> %val3) #0 {
; CHECK-LABEL: f2:
; CHECK: vfmssb %v24, %v26, %v28, %v30
; CHECK: br %r14
@ -32,6 +32,8 @@ define <4 x float> @f2(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2,
<4 x float> %negval3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
attributes #0 = { strictfp }

View File

@ -7,7 +7,7 @@ declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x f
; Test a v2f64 negative multiply-and-add.
define <2 x double> @f1(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2, <2 x double> %val3) {
<2 x double> %val2, <2 x double> %val3) #0 {
; CHECK-LABEL: f1:
; CHECK: vfnmadb %v24, %v26, %v28, %v30
; CHECK: br %r14
@ -16,14 +16,14 @@ define <2 x double> @f1(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2,
<2 x double> %val3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%negret = fsub <2 x double> <double -0.0, double -0.0>, %ret
ret <2 x double> %negret
}
; Test a v2f64 negative multiply-and-subtract.
define <2 x double> @f2(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2, <2 x double> %val3) {
<2 x double> %val2, <2 x double> %val3) #0 {
; CHECK-LABEL: f2:
; CHECK: vfnmsdb %v24, %v26, %v28, %v30
; CHECK: br %r14
@ -33,14 +33,14 @@ define <2 x double> @f2(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2,
<2 x double> %negval3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%negret = fsub <2 x double> <double -0.0, double -0.0>, %ret
ret <2 x double> %negret
}
; Test a v4f32 negative multiply-and-add.
define <4 x float> @f3(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2, <4 x float> %val3) {
<4 x float> %val2, <4 x float> %val3) #0 {
; CHECK-LABEL: f3:
; CHECK: vfnmasb %v24, %v26, %v28, %v30
; CHECK: br %r14
@ -49,7 +49,7 @@ define <4 x float> @f3(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2,
<4 x float> %val3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%negret = fsub <4 x float> <float -0.0, float -0.0,
float -0.0, float -0.0>, %ret
ret <4 x float> %negret
@ -57,7 +57,7 @@ define <4 x float> @f3(<4 x float> %dummy, <4 x float> %val1,
; Test a v4f32 negative multiply-and-subtract.
define <4 x float> @f4(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2, <4 x float> %val3) {
<4 x float> %val2, <4 x float> %val3) #0 {
; CHECK-LABEL: f4:
; CHECK: vfnmssb %v24, %v26, %v28, %v30
; CHECK: br %r14
@ -68,8 +68,10 @@ define <4 x float> @f4(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2,
<4 x float> %negval3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%negret = fsub <4 x float> <float -0.0, float -0.0,
float -0.0, float -0.0>, %ret
ret <4 x float> %negret
}
attributes #0 = { strictfp }

View File

@ -15,73 +15,73 @@ declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, met
declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata, metadata)
define <2 x double> @f1(<2 x double> %val) {
define <2 x double> @f1(<2 x double> %val) #0 {
; CHECK-LABEL: f1:
; CHECK: vfidb %v24, %v24, 0, 0
; CHECK: br %r14
%res = call <2 x double> @llvm.experimental.constrained.rint.v2f64(
<2 x double> %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
define <2 x double> @f2(<2 x double> %val) {
define <2 x double> @f2(<2 x double> %val) #0 {
; CHECK-LABEL: f2:
; CHECK: vfidb %v24, %v24, 4, 0
; CHECK: br %r14
%res = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(
<2 x double> %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
define <2 x double> @f3(<2 x double> %val) {
define <2 x double> @f3(<2 x double> %val) #0 {
; CHECK-LABEL: f3:
; CHECK: vfidb %v24, %v24, 4, 7
; CHECK: br %r14
%res = call <2 x double> @llvm.experimental.constrained.floor.v2f64(
<2 x double> %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
define <2 x double> @f4(<2 x double> %val) {
define <2 x double> @f4(<2 x double> %val) #0 {
; CHECK-LABEL: f4:
; CHECK: vfidb %v24, %v24, 4, 6
; CHECK: br %r14
%res = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(
<2 x double> %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
define <2 x double> @f5(<2 x double> %val) {
define <2 x double> @f5(<2 x double> %val) #0 {
; CHECK-LABEL: f5:
; CHECK: vfidb %v24, %v24, 4, 5
; CHECK: br %r14
%res = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
<2 x double> %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
define <2 x double> @f6(<2 x double> %val) {
define <2 x double> @f6(<2 x double> %val) #0 {
; CHECK-LABEL: f6:
; CHECK: vfidb %v24, %v24, 4, 1
; CHECK: br %r14
%res = call <2 x double> @llvm.experimental.constrained.round.v2f64(
<2 x double> %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
define double @f7(<2 x double> %val) {
define double @f7(<2 x double> %val) #0 {
; CHECK-LABEL: f7:
; CHECK: wfidb %f0, %v24, 0, 0
; CHECK: br %r14
@ -89,11 +89,11 @@ define double @f7(<2 x double> %val) {
%res = call double @llvm.experimental.constrained.rint.f64(
double %scalar,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f8(<2 x double> %val) {
define double @f8(<2 x double> %val) #0 {
; CHECK-LABEL: f8:
; CHECK: wfidb %f0, %v24, 4, 0
; CHECK: br %r14
@ -101,11 +101,11 @@ define double @f8(<2 x double> %val) {
%res = call double @llvm.experimental.constrained.nearbyint.f64(
double %scalar,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f9(<2 x double> %val) {
define double @f9(<2 x double> %val) #0 {
; CHECK-LABEL: f9:
; CHECK: wfidb %f0, %v24, 4, 7
; CHECK: br %r14
@ -113,12 +113,12 @@ define double @f9(<2 x double> %val) {
%res = call double @llvm.experimental.constrained.floor.f64(
double %scalar,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f10(<2 x double> %val) {
define double @f10(<2 x double> %val) #0 {
; CHECK-LABEL: f10:
; CHECK: wfidb %f0, %v24, 4, 6
; CHECK: br %r14
@ -126,11 +126,11 @@ define double @f10(<2 x double> %val) {
%res = call double @llvm.experimental.constrained.ceil.f64(
double %scalar,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f11(<2 x double> %val) {
define double @f11(<2 x double> %val) #0 {
; CHECK-LABEL: f11:
; CHECK: wfidb %f0, %v24, 4, 5
; CHECK: br %r14
@ -138,11 +138,11 @@ define double @f11(<2 x double> %val) {
%res = call double @llvm.experimental.constrained.trunc.f64(
double %scalar,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
define double @f12(<2 x double> %val) {
define double @f12(<2 x double> %val) #0 {
; CHECK-LABEL: f12:
; CHECK: wfidb %f0, %v24, 4, 1
; CHECK: br %r14
@ -150,6 +150,8 @@ define double @f12(<2 x double> %val) {
%res = call double @llvm.experimental.constrained.round.f64(
double %scalar,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %res
}
attributes #0 = { strictfp }

View File

@ -15,73 +15,73 @@ declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metad
declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata, metadata)
declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata, metadata)
define <4 x float> @f1(<4 x float> %val) {
define <4 x float> @f1(<4 x float> %val) #0 {
; CHECK-LABEL: f1:
; CHECK: vfisb %v24, %v24, 0, 0
; CHECK: br %r14
%res = call <4 x float> @llvm.experimental.constrained.rint.v4f32(
<4 x float> %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
define <4 x float> @f2(<4 x float> %val) {
define <4 x float> @f2(<4 x float> %val) #0 {
; CHECK-LABEL: f2:
; CHECK: vfisb %v24, %v24, 4, 0
; CHECK: br %r14
%res = call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(
<4 x float> %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
define <4 x float> @f3(<4 x float> %val) {
define <4 x float> @f3(<4 x float> %val) #0 {
; CHECK-LABEL: f3:
; CHECK: vfisb %v24, %v24, 4, 7
; CHECK: br %r14
%res = call <4 x float> @llvm.experimental.constrained.floor.v4f32(
<4 x float> %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
define <4 x float> @f4(<4 x float> %val) {
define <4 x float> @f4(<4 x float> %val) #0 {
; CHECK-LABEL: f4:
; CHECK: vfisb %v24, %v24, 4, 6
; CHECK: br %r14
%res = call <4 x float> @llvm.experimental.constrained.ceil.v4f32(
<4 x float> %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
define <4 x float> @f5(<4 x float> %val) {
define <4 x float> @f5(<4 x float> %val) #0 {
; CHECK-LABEL: f5:
; CHECK: vfisb %v24, %v24, 4, 5
; CHECK: br %r14
%res = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(
<4 x float> %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
define <4 x float> @f6(<4 x float> %val) {
define <4 x float> @f6(<4 x float> %val) #0 {
; CHECK-LABEL: f6:
; CHECK: vfisb %v24, %v24, 4, 1
; CHECK: br %r14
%res = call <4 x float> @llvm.experimental.constrained.round.v4f32(
<4 x float> %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
define float @f7(<4 x float> %val) {
define float @f7(<4 x float> %val) #0 {
; CHECK-LABEL: f7:
; CHECK: wfisb %f0, %v24, 0, 0
; CHECK: br %r14
@ -89,11 +89,11 @@ define float @f7(<4 x float> %val) {
%res = call float @llvm.experimental.constrained.rint.f32(
float %scalar,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f8(<4 x float> %val) {
define float @f8(<4 x float> %val) #0 {
; CHECK-LABEL: f8:
; CHECK: wfisb %f0, %v24, 4, 0
; CHECK: br %r14
@ -101,11 +101,11 @@ define float @f8(<4 x float> %val) {
%res = call float @llvm.experimental.constrained.nearbyint.f32(
float %scalar,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f9(<4 x float> %val) {
define float @f9(<4 x float> %val) #0 {
; CHECK-LABEL: f9:
; CHECK: wfisb %f0, %v24, 4, 7
; CHECK: br %r14
@ -113,11 +113,11 @@ define float @f9(<4 x float> %val) {
%res = call float @llvm.experimental.constrained.floor.f32(
float %scalar,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f10(<4 x float> %val) {
define float @f10(<4 x float> %val) #0 {
; CHECK-LABEL: f10:
; CHECK: wfisb %f0, %v24, 4, 6
; CHECK: br %r14
@ -125,11 +125,11 @@ define float @f10(<4 x float> %val) {
%res = call float @llvm.experimental.constrained.ceil.f32(
float %scalar,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f11(<4 x float> %val) {
define float @f11(<4 x float> %val) #0 {
; CHECK-LABEL: f11:
; CHECK: wfisb %f0, %v24, 4, 5
; CHECK: br %r14
@ -137,11 +137,11 @@ define float @f11(<4 x float> %val) {
%res = call float @llvm.experimental.constrained.trunc.f32(
float %scalar,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
define float @f12(<4 x float> %val) {
define float @f12(<4 x float> %val) #0 {
; CHECK-LABEL: f12:
; CHECK: wfisb %f0, %v24, 4, 1
; CHECK: br %r14
@ -149,6 +149,8 @@ define float @f12(<4 x float> %val) {
%res = call float @llvm.experimental.constrained.round.f32(
float %scalar,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %res
}
attributes #0 = { strictfp }

View File

@ -5,18 +5,18 @@
declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double>, metadata, metadata)
define <2 x double> @f1(<2 x double> %val) {
define <2 x double> @f1(<2 x double> %val) #0 {
; CHECK-LABEL: f1:
; CHECK: vfsqdb %v24, %v24
; CHECK: br %r14
%ret = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(
<2 x double> %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x double> %ret
}
define double @f2(<2 x double> %val) {
define double @f2(<2 x double> %val) #0 {
; CHECK-LABEL: f2:
; CHECK: wfsqdb %f0, %v24
; CHECK: br %r14
@ -24,6 +24,8 @@ define double @f2(<2 x double> %val) {
%ret = call double @llvm.experimental.constrained.sqrt.f64(
double %scalar,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %ret
}
attributes #0 = { strictfp }

View File

@ -5,18 +5,18 @@
declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
declare <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float>, metadata, metadata)
define <4 x float> @f1(<4 x float> %val) {
define <4 x float> @f1(<4 x float> %val) #0 {
; CHECK-LABEL: f1:
; CHECK: vfsqsb %v24, %v24
; CHECK: br %r14
%ret = call <4 x float> @llvm.experimental.constrained.sqrt.v4f32(
<4 x float> %val,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
define float @f2(<4 x float> %val) {
define float @f2(<4 x float> %val) #0 {
; CHECK-LABEL: f2:
; CHECK: wfsqsb %f0, %v24
; CHECK: br %r14
@ -24,6 +24,8 @@ define float @f2(<4 x float> %val) {
%ret = call float @llvm.experimental.constrained.sqrt.f32(
float %scalar,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %ret
}
attributes #0 = { strictfp }

View File

@ -7,19 +7,19 @@ declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2
; Test a v2f64 subtraction.
define <2 x double> @f6(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2) {
<2 x double> %val2) #0 {
; CHECK-LABEL: f6:
; CHECK: vfsdb %v24, %v26, %v28
; CHECK: br %r14
%ret = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(
<2 x double> %val1, <2 x double> %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x double> %ret
}
; Test an f64 subtraction that uses vector registers.
define double @f7(<2 x double> %val1, <2 x double> %val2) {
define double @f7(<2 x double> %val1, <2 x double> %val2) #0 {
; CHECK-LABEL: f7:
; CHECK: wfsdb %f0, %v24, %v26
; CHECK: br %r14
@ -28,7 +28,8 @@ define double @f7(<2 x double> %val1, <2 x double> %val2) {
%ret = call double @llvm.experimental.constrained.fsub.f64(
double %scalar1, double %scalar2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %ret
}
attributes #0 = { strictfp }

View File

@ -7,19 +7,19 @@ declare <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float>, <4 x
; Test a v4f32 subtraction.
define <4 x float> @f6(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2) {
<4 x float> %val2) #0 {
; CHECK-LABEL: f6:
; CHECK: vfssb %v24, %v26, %v28
; CHECK: br %r14
%ret = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(
<4 x float> %val1, <4 x float> %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
; Test an f32 subtraction that uses vector registers.
define float @f7(<4 x float> %val1, <4 x float> %val2) {
define float @f7(<4 x float> %val1, <4 x float> %val2) #0 {
; CHECK-LABEL: f7:
; CHECK: wfssb %f0, %v24, %v26
; CHECK: br %r14
@ -28,6 +28,8 @@ define float @f7(<4 x float> %val1, <4 x float> %val2) {
%ret = call float @llvm.experimental.constrained.fsub.f32(
float %scalar1, float %scalar2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %ret
}
attributes #0 = { strictfp }

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O3 -mtriple=x86_64-gnu-linux < %s | FileCheck %s
define x86_fp80 @constrained_fpext_f32_as_fp80(float %mem) {
define x86_fp80 @constrained_fpext_f32_as_fp80(float %mem) #0 {
; CHECK-LABEL: constrained_fpext_f32_as_fp80:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss %xmm0, -{{[0-9]+}}(%rsp)
@ -10,11 +10,11 @@ define x86_fp80 @constrained_fpext_f32_as_fp80(float %mem) {
entry:
%ext = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f32(
float %mem,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret x86_fp80 %ext
}
define float @constrained_fptrunc_f80_to_f32(x86_fp80 %reg) {
define float @constrained_fptrunc_f80_to_f32(x86_fp80 %reg) #0 {
; CHECK-LABEL: constrained_fptrunc_f80_to_f32:
; CHECK: # %bb.0:
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
@ -24,11 +24,11 @@ define float @constrained_fptrunc_f80_to_f32(x86_fp80 %reg) {
%trunc = call float @llvm.experimental.constrained.fptrunc.f32.f80(
x86_fp80 %reg,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %trunc
}
define x86_fp80 @constrained_fpext_f64_to_f80(double %mem) {
define x86_fp80 @constrained_fpext_f64_to_f80(double %mem) #0 {
; CHECK-LABEL: constrained_fpext_f64_to_f80:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd %xmm0, -{{[0-9]+}}(%rsp)
@ -37,11 +37,11 @@ define x86_fp80 @constrained_fpext_f64_to_f80(double %mem) {
entry:
%ext = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f64(
double %mem,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret x86_fp80 %ext
}
define double @constrained_fptrunc_f80_to_f64(x86_fp80 %reg) {
define double @constrained_fptrunc_f80_to_f64(x86_fp80 %reg) #0 {
; CHECK-LABEL: constrained_fptrunc_f80_to_f64:
; CHECK: # %bb.0:
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
@ -51,10 +51,12 @@ define double @constrained_fptrunc_f80_to_f64(x86_fp80 %reg) {
%trunc = call double @llvm.experimental.constrained.fptrunc.f64.f80(
x86_fp80 %reg,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %trunc
}
attributes #0 = { strictfp }
declare x86_fp80 @llvm.experimental.constrained.fpext.f80.f32(float, metadata)
declare x86_fp80 @llvm.experimental.constrained.fpext.f80.f64(double, metadata)
declare float @llvm.experimental.constrained.fptrunc.f32.f80(x86_fp80, metadata, metadata)

View File

@ -11,13 +11,13 @@
;
; CHECK-LABEL: f1
; COMMON: divsd
define double @f1() {
define double @f1() #0 {
entry:
%div = call double @llvm.experimental.constrained.fdiv.f64(
double 1.000000e+00,
double 1.000000e+01,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %div
}
@ -31,13 +31,13 @@ entry:
;
; CHECK-LABEL: f2
; COMMON: subsd
define double @f2(double %a) {
define double @f2(double %a) #0 {
entry:
%sub = call double @llvm.experimental.constrained.fsub.f64(
double %a,
double 0.000000e+00,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %sub
}
@ -54,21 +54,21 @@ entry:
; COMMON: subsd
; COMMON: mulsd
; COMMON: subsd
define double @f3(double %a, double %b) {
define double @f3(double %a, double %b) #0 {
entry:
%sub = call double @llvm.experimental.constrained.fsub.f64(
double -0.000000e+00, double %a,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul = call double @llvm.experimental.constrained.fmul.f64(
double %sub, double %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%ret = call double @llvm.experimental.constrained.fsub.f64(
double -0.000000e+00,
double %mul,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %ret
}
@ -87,7 +87,7 @@ entry:
; COMMON: testl
; COMMON: jle
; COMMON: addsd
define double @f4(i32 %n, double %a) {
define double @f4(i32 %n, double %a) #0 {
entry:
%cmp = icmp sgt i32 %n, 0
br i1 %cmp, label %if.then, label %if.end
@ -96,7 +96,7 @@ if.then:
%add = call double @llvm.experimental.constrained.fadd.f64(
double 1.000000e+00, double %a,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
br label %if.end
if.end:
@ -107,112 +107,112 @@ if.end:
; Verify that sqrt(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f5
; COMMON: sqrtsd
define double @f5() {
define double @f5() #0 {
entry:
%result = call double @llvm.experimental.constrained.sqrt.f64(double 42.0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that pow(42.1, 3.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f6
; COMMON: pow
define double @f6() {
define double @f6() #0 {
entry:
%result = call double @llvm.experimental.constrained.pow.f64(double 42.1,
double 3.0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that powi(42.1, 3) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f7
; COMMON: powi
define double @f7() {
define double @f7() #0 {
entry:
%result = call double @llvm.experimental.constrained.powi.f64(double 42.1,
i32 3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that sin(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f8
; COMMON: sin
define double @f8() {
define double @f8() #0 {
entry:
%result = call double @llvm.experimental.constrained.sin.f64(double 42.0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that cos(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f9
; COMMON: cos
define double @f9() {
define double @f9() #0 {
entry:
%result = call double @llvm.experimental.constrained.cos.f64(double 42.0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that exp(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f10
; COMMON: exp
define double @f10() {
define double @f10() #0 {
entry:
%result = call double @llvm.experimental.constrained.exp.f64(double 42.0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that exp2(42.1) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f11
; COMMON: exp2
define double @f11() {
define double @f11() #0 {
entry:
%result = call double @llvm.experimental.constrained.exp2.f64(double 42.1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that log(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f12
; COMMON: log
define double @f12() {
define double @f12() #0 {
entry:
%result = call double @llvm.experimental.constrained.log.f64(double 42.0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that log10(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f13
; COMMON: log10
define double @f13() {
define double @f13() #0 {
entry:
%result = call double @llvm.experimental.constrained.log10.f64(double 42.0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that log2(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f14
; COMMON: log2
define double @f14() {
define double @f14() #0 {
entry:
%result = call double @llvm.experimental.constrained.log2.f64(double 42.0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
@ -220,11 +220,11 @@ entry:
; CHECK-LABEL: f15
; NO-FMA: rint
; HAS-FMA: vroundsd
define double @f15() {
define double @f15() #0 {
entry:
%result = call double @llvm.experimental.constrained.rint.f64(double 42.1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
@ -233,12 +233,12 @@ entry:
; CHECK-LABEL: f16
; NO-FMA: nearbyint
; HAS-FMA: vroundsd
define double @f16() {
define double @f16() #0 {
entry:
%result = call double @llvm.experimental.constrained.nearbyint.f64(
double 42.1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
@ -247,14 +247,14 @@ entry:
; CHECK-LABEL: f17
; FMACALL32: jmp fmaf # TAILCALL
; FMA32: vfmadd213ss
define float @f17() {
define float @f17() #0 {
entry:
%result = call float @llvm.experimental.constrained.fma.f32(
float 3.5,
float 3.5,
float 3.5,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %result
}
@ -263,26 +263,26 @@ entry:
; CHECK-LABEL: f18
; FMACALL64: jmp fma # TAILCALL
; FMA64: vfmadd213sd
define double @f18() {
define double @f18() #0 {
entry:
%result = call double @llvm.experimental.constrained.fma.f64(
double 42.1,
double 42.1,
double 42.1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; CHECK-LABEL: f19
; COMMON: fmod
define double @f19() {
define double @f19() #0 {
entry:
%rem = call double @llvm.experimental.constrained.frem.f64(
double 1.000000e+00,
double 1.000000e+01,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %rem
}
@ -312,10 +312,10 @@ entry:
; HAS-FMA: setae
; HAS-FMA: shll
; HAS-FMA: xorl
define i32 @f20u(double %x) {
define i32 @f20u(double %x) #0 {
entry:
%result = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i32 %result
}
@ -324,24 +324,26 @@ entry:
; Verify that no gross errors happen.
; CHECK-LABEL: @f21
; COMMON: cvtsd2ss
define float @f21() {
define float @f21() #0 {
entry:
%result = call float @llvm.experimental.constrained.fptrunc.f32.f64(
double 42.1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %result
}
; CHECK-LABEL: @f22
; COMMON: cvtss2sd
define double @f22(float %x) {
define double @f22(float %x) #0 {
entry:
%result = call double @llvm.experimental.constrained.fpext.f64.f32(float %x,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
attributes #0 = { strictfp }
@llvm.fp.env = thread_local global i8 zeroinitializer, section "llvm.metadata"
declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata)

View File

@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+fma < %s | FileCheck %s
define <1 x float> @constrained_vector_fma_v1f32() {
define <1 x float> @constrained_vector_fma_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fma_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@ -14,11 +14,11 @@ entry:
<1 x float> <float 2.5>,
<1 x float> <float 4.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <1 x float> %fma
}
define <2 x double> @constrained_vector_fma_v2f64() {
define <2 x double> @constrained_vector_fma_v2f64() #0 {
; CHECK-LABEL: constrained_vector_fma_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovapd {{.*#+}} xmm1 = [1.5E+0,5.0E-1]
@ -31,11 +31,11 @@ entry:
<2 x double> <double 3.5, double 2.5>,
<2 x double> <double 5.5, double 4.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <2 x double> %fma
}
define <3 x float> @constrained_vector_fma_v3f32() {
define <3 x float> @constrained_vector_fma_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fma_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@ -56,11 +56,11 @@ entry:
<3 x float> <float 5.5, float 4.5, float 3.5>,
<3 x float> <float 8.5, float 7.5, float 6.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <3 x float> %fma
}
define <3 x double> @constrained_vector_fma_v3f64() {
define <3 x double> @constrained_vector_fma_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fma_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@ -77,11 +77,11 @@ entry:
<3 x double> <double 5.5, double 4.5, double 3.5>,
<3 x double> <double 8.5, double 7.5, double 6.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <3 x double> %fma
}
define <4 x double> @constrained_vector_fma_v4f64() {
define <4 x double> @constrained_vector_fma_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fma_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [3.5E+0,2.5E+0,1.5E+0,5.0E-1]
@ -94,11 +94,11 @@ entry:
<4 x double> <double 7.5, double 6.5, double 5.5, double 4.5>,
<4 x double> <double 11.5, double 10.5, double 9.5, double 8.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x double> %fma
}
define <4 x float> @constrained_vector_fma_v4f32() {
define <4 x float> @constrained_vector_fma_v4f32() #0 {
; CHECK-LABEL: constrained_vector_fma_v4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovaps {{.*#+}} xmm1 = [3.5E+0,2.5E+0,1.5E+0,5.0E-1]
@ -111,11 +111,11 @@ entry:
<4 x float> <float 7.5, float 6.5, float 5.5, float 4.5>,
<4 x float> <float 11.5, float 10.5, float 9.5, float 8.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <4 x float> %fma
}
define <8 x float> @constrained_vector_fma_v8f32() {
define <8 x float> @constrained_vector_fma_v8f32() #0 {
; CHECK-LABEL: constrained_vector_fma_v8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [3.5E+0,2.5E+0,1.5E+0,5.0E-1,7.5E+0,6.5E+0,5.5E+0,4.5E+0]
@ -131,10 +131,12 @@ entry:
<8 x float> <float 11.5, float 10.5, float 9.5, float 8.5,
float 15.5, float 14.5, float 13.5, float 12.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret <8 x float> %fma
}
attributes #0 = { strictfp }
; Single width declarations
declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata)
declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata)

File diff suppressed because it is too large Load Diff

View File

@ -3,13 +3,13 @@
; Test to verify that constants aren't folded when the rounding mode is unknown.
; CHECK-LABEL: @f1
; CHECK: call double @llvm.experimental.constrained.fdiv.f64
define double @f1() {
define double @f1() #0 {
entry:
%div = call double @llvm.experimental.constrained.fdiv.f64(
double 1.000000e+00,
double 1.000000e+01,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %div
}
@ -23,12 +23,12 @@ entry:
;
; CHECK-LABEL: @f2
; CHECK: call double @llvm.experimental.constrained.fsub.f64
define double @f2(double %a) {
define double @f2(double %a) #0 {
entry:
%div = call double @llvm.experimental.constrained.fsub.f64(
double %a, double 0.000000e+00,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %div
}
@ -45,21 +45,21 @@ entry:
; CHECK: call double @llvm.experimental.constrained.fsub.f64
; CHECK: call double @llvm.experimental.constrained.fmul.f64
; CHECK: call double @llvm.experimental.constrained.fsub.f64
define double @f3(double %a, double %b) {
define double @f3(double %a, double %b) #0 {
entry:
%sub = call double @llvm.experimental.constrained.fsub.f64(
double -0.000000e+00, double %a,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%mul = call double @llvm.experimental.constrained.fmul.f64(
double %sub, double %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
%ret = call double @llvm.experimental.constrained.fsub.f64(
double -0.000000e+00,
double %mul,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %ret
}
@ -77,7 +77,7 @@ entry:
; CHECK-LABEL: @f4
; CHECK-NOT: select
; CHECK: br i1 %cmp
define double @f4(i32 %n, double %a) {
define double @f4(i32 %n, double %a) #0 {
entry:
%cmp = icmp sgt i32 %n, 0
br i1 %cmp, label %if.then, label %if.end
@ -86,7 +86,7 @@ if.then:
%add = call double @llvm.experimental.constrained.fadd.f64(
double 1.000000e+00, double %a,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
br label %if.end
if.end:
@ -97,123 +97,123 @@ if.end:
; Verify that sqrt(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f5
; CHECK: call double @llvm.experimental.constrained.sqrt
define double @f5() {
define double @f5() #0 {
entry:
%result = call double @llvm.experimental.constrained.sqrt.f64(double 42.0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that pow(42.1, 3.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f6
; CHECK: call double @llvm.experimental.constrained.pow
define double @f6() {
define double @f6() #0 {
entry:
%result = call double @llvm.experimental.constrained.pow.f64(double 42.1,
double 3.0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that powi(42.1, 3) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f7
; CHECK: call double @llvm.experimental.constrained.powi
define double @f7() {
define double @f7() #0 {
entry:
%result = call double @llvm.experimental.constrained.powi.f64(double 42.1,
i32 3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that sin(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f8
; CHECK: call double @llvm.experimental.constrained.sin
define double @f8() {
define double @f8() #0 {
entry:
%result = call double @llvm.experimental.constrained.sin.f64(double 42.0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that cos(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f9
; CHECK: call double @llvm.experimental.constrained.cos
define double @f9() {
define double @f9() #0 {
entry:
%result = call double @llvm.experimental.constrained.cos.f64(double 42.0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that exp(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f10
; CHECK: call double @llvm.experimental.constrained.exp
define double @f10() {
define double @f10() #0 {
entry:
%result = call double @llvm.experimental.constrained.exp.f64(double 42.0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that exp2(42.1) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f11
; CHECK: call double @llvm.experimental.constrained.exp2
define double @f11() {
define double @f11() #0 {
entry:
%result = call double @llvm.experimental.constrained.exp2.f64(double 42.1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that log(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f12
; CHECK: call double @llvm.experimental.constrained.log
define double @f12() {
define double @f12() #0 {
entry:
%result = call double @llvm.experimental.constrained.log.f64(double 42.0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that log10(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f13
; CHECK: call double @llvm.experimental.constrained.log10
define double @f13() {
define double @f13() #0 {
entry:
%result = call double @llvm.experimental.constrained.log10.f64(double 42.0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that log2(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f14
; CHECK: call double @llvm.experimental.constrained.log2
define double @f14() {
define double @f14() #0 {
entry:
%result = call double @llvm.experimental.constrained.log2.f64(double 42.0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that rint(42.1) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f15
; CHECK: call double @llvm.experimental.constrained.rint
define double @f15() {
define double @f15() #0 {
entry:
%result = call double @llvm.experimental.constrained.rint.f64(double 42.1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
@ -221,12 +221,12 @@ entry:
; unknown.
; CHECK-LABEL: f16
; CHECK: call double @llvm.experimental.constrained.nearbyint
define double @f16() {
define double @f16() #0 {
entry:
%result = call double @llvm.experimental.constrained.nearbyint.f64(
double 42.1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
@ -234,11 +234,11 @@ entry:
; unknown.
; CHECK-LABEL: f17
; CHECK: call double @llvm.experimental.constrained.fma
define double @f17() {
define double @f17() #0 {
entry:
%result = call double @llvm.experimental.constrained.fma.f64(double 42.1, double 42.1, double 42.1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
@ -246,11 +246,11 @@ entry:
; unknown.
; CHECK-LABEL: f18
; CHECK: call zeroext i32 @llvm.experimental.constrained.fptoui
define zeroext i32 @f18() {
define zeroext i32 @f18() #0 {
entry:
%result = call zeroext i32 @llvm.experimental.constrained.fptoui.i32.f64(
double 42.1,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i32 %result
}
@ -258,10 +258,10 @@ entry:
; unknown.
; CHECK-LABEL: f19
; CHECK: call i32 @llvm.experimental.constrained.fptosi
define i32 @f19() {
define i32 @f19() #0 {
entry:
%result = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double 42.1,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret i32 %result
}
@ -269,12 +269,12 @@ entry:
; unknown.
; CHECK-LABEL: f20
; CHECK: call float @llvm.experimental.constrained.fptrunc
define float @f20() {
define float @f20() #0 {
entry:
%result = call float @llvm.experimental.constrained.fptrunc.f32.f64(
double 42.1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret float %result
}
@ -282,13 +282,15 @@ entry:
; unknown.
; CHECK-LABEL: f21
; CHECK: call double @llvm.experimental.constrained.fpext
define double @f21() {
define double @f21() #0 {
entry:
%result = call double @llvm.experimental.constrained.fpext.f64.f32(float 42.0,
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %result
}
attributes #0 = { strictfp }
@llvm.fp.env = thread_local global i8 zeroinitializer, section "llvm.metadata"
declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)

View File

@ -76,10 +76,6 @@ entry:
; CHECK-NEXT: %cos3 = call double @cos(double 0.000000e+00)
%cos3 = call double @cos(double 0.000000e+00) nobuiltin
; cos(1) strictfp sets FP status flags
; CHECK-NEXT: %cos4 = call double @cos(double 1.000000e+00)
%cos4 = call double @cos(double 1.000000e+00) strictfp
; pow(0, 1) is 0
%pow1 = call double @pow(double 0x7FF0000000000000, double 1.000000e+00)
@ -97,3 +93,16 @@ entry:
; CHECK-NEXT: ret void
ret void
}
define void @Tstrict() strictfp {
entry:
; CHECK-LABEL: @Tstrict(
; CHECK-NEXT: entry:
; cos(1) strictfp sets FP status flags
; CHECK-NEXT: %cos4 = call double @cos(double 1.000000e+00)
%cos4 = call double @cos(double 1.000000e+00) strictfp
; CHECK-NEXT: ret void
ret void
}

View File

@ -23,7 +23,7 @@ define double @test_acos_nobuiltin() {
; Check that we don't constant fold strictfp results that require rounding.
define double @test_acos_strictfp() {
define double @test_acos_strictfp() strictfp {
; CHECK-LABEL: @test_acos_strictfp
%pi = call double @acos(double -1.000000e+00) strictfp
; CHECK: call double @acos(double -1.000000e+00)

View File

@ -20,7 +20,7 @@ define i8* @test_simplify1(i8* %mem1, i8* %mem2, i32 %size) {
; Verify that the strictfp attr doesn't block this optimization.
define i8* @test_simplify2(i8* %mem1, i8* %mem2, i32 %size) {
define i8* @test_simplify2(i8* %mem1, i8* %mem2, i32 %size) strictfp {
; CHECK-LABEL: @test_simplify2(
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[MEM1:%.*]], i8* align 1 [[MEM2:%.*]], i32 [[SIZE:%.*]], i1 false)
; CHECK-NEXT: ret i8* [[MEM1]]

View File

@ -15,64 +15,66 @@ declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadat
; CHECK1: attributes #[[ATTR]] = { inaccessiblememonly nounwind willreturn }
; Note: FP exceptions aren't usually caught through normal unwind mechanisms,
; but we may want to revisit this for asynchronous exception handling.
define double @f1(double %a, double %b) {
define double @f1(double %a, double %b) #0 {
entry:
%fadd = call double @llvm.experimental.constrained.fadd.f64(
double %a, double %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %fadd
}
define double @f1u(double %a) {
define double @f1u(double %a) #0 {
entry:
%fsqrt = call double @llvm.experimental.constrained.sqrt.f64(
double %a,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
metadata !"fpexcept.strict") #0
ret double %fsqrt
}
; Test an illegal value for the rounding mode argument.
; CHECK2: invalid rounding mode argument
;T2: define double @f2(double %a, double %b) {
;T2: define double @f2(double %a, double %b) #0 {
;T2: entry:
;T2: %fadd = call double @llvm.experimental.constrained.fadd.f64(
;T2: double %a, double %b,
;T2: metadata !"round.dynomite",
;T2: metadata !"fpexcept.strict")
;T2: metadata !"fpexcept.strict") #0
;T2: ret double %fadd
;T2: }
; Test an illegal value for the exception behavior argument.
; CHECK3: invalid exception behavior argument
;T3: define double @f3(double %a, double %b) {
;T3: define double @f3(double %a, double %b) #0 {
;T3: entry:
;T3: %fadd = call double @llvm.experimental.constrained.fadd.f64(
;T3: double %a, double %b,
;T3: metadata !"round.dynamic",
;T3: metadata !"fpexcept.restrict")
;T3: metadata !"fpexcept.restrict") #0
;T3: ret double %fadd
;T3: }
; Test an illegal value for the rounding mode argument.
; CHECK4: invalid rounding mode argument
;T4: define double @f4(double %a) {
;T4: define double @f4(double %a) #0 {
;T4: entry:
;T4: %fadd = call double @llvm.experimental.constrained.sqrt.f64(
;T4: double %a,
;T4: metadata !"round.dynomite",
;T4: metadata !"fpexcept.strict")
;T4: metadata !"fpexcept.strict") #0
;T4: ret double %fadd
;T4: }
; Test an illegal value for the exception behavior argument.
; CHECK5: invalid exception behavior argument
;T5: define double @f5(double %a) {
;T5: define double @f5(double %a) #0 {
;T5: entry:
;T5: %fadd = call double @llvm.experimental.constrained.sqrt.f64(
;T5: double %a,
;T5: metadata !"round.dynamic",
;T5: metadata !"fpexcept.restrict")
;T5: metadata !"fpexcept.restrict") #0
;T5: ret double %fadd
;T5: }
attributes #0 = { strictfp }