2014-12-28 21:15:05 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse2 -mcpu=nehalem | FileCheck %s --check-prefix=SSE
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse2 -mcpu=nehalem | FileCheck %s --check-prefix=SSE
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+avx -mcpu=corei7-avx | FileCheck %s --check-prefix=AVX
|
2018-07-03 12:43:49 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+avx512vl -mcpu=skx | FileCheck %s --check-prefix=AVX
|
2009-12-18 15:40:29 +08:00
|
|
|
|
|
|
|
define double @t1(float* nocapture %x) nounwind readonly ssp {
|
|
|
|
entry:
|
2014-12-28 21:15:05 +08:00
|
|
|
; SSE-LABEL: t1:
|
|
|
|
; SSE: movss ([[A0:%rdi|%rcx]]), %xmm0
|
|
|
|
; SSE: cvtss2sd %xmm0, %xmm0
|
2009-12-18 15:40:29 +08:00
|
|
|
|
2015-02-28 05:17:42 +08:00
|
|
|
%0 = load float, float* %x, align 4
|
2009-12-18 15:40:29 +08:00
|
|
|
%1 = fpext float %0 to double
|
|
|
|
ret double %1
|
|
|
|
}
|
|
|
|
|
2009-12-23 01:47:23 +08:00
|
|
|
define float @t2(double* nocapture %x) nounwind readonly ssp optsize {
|
2009-12-18 15:40:29 +08:00
|
|
|
entry:
|
2014-12-28 21:15:05 +08:00
|
|
|
; SSE-LABEL: t2:
|
|
|
|
; SSE: cvtsd2ss ([[A0]]), %xmm0
|
2015-02-28 05:17:42 +08:00
|
|
|
%0 = load double, double* %x, align 8
|
2009-12-18 15:40:29 +08:00
|
|
|
%1 = fptrunc double %0 to float
|
|
|
|
ret float %1
|
|
|
|
}
|
2010-07-13 04:46:04 +08:00
|
|
|
|
|
|
|
define float @squirtf(float* %x) nounwind {
|
|
|
|
entry:
|
2014-12-28 21:15:05 +08:00
|
|
|
; SSE-LABEL: squirtf:
|
|
|
|
; SSE: movss ([[A0]]), %xmm0
|
|
|
|
; SSE: sqrtss %xmm0, %xmm0
|
2015-02-28 05:17:42 +08:00
|
|
|
%z = load float, float* %x
|
2010-07-13 04:46:04 +08:00
|
|
|
%t = call float @llvm.sqrt.f32(float %z)
|
|
|
|
ret float %t
|
|
|
|
}
|
|
|
|
|
|
|
|
define double @squirt(double* %x) nounwind {
|
|
|
|
entry:
|
2014-12-28 21:15:05 +08:00
|
|
|
; SSE-LABEL: squirt:
|
|
|
|
; SSE: movsd ([[A0]]), %xmm0
|
|
|
|
; SSE: sqrtsd %xmm0, %xmm0
|
2015-02-28 05:17:42 +08:00
|
|
|
%z = load double, double* %x
|
2010-07-13 04:46:04 +08:00
|
|
|
%t = call double @llvm.sqrt.f64(double %z)
|
|
|
|
ret double %t
|
|
|
|
}
|
|
|
|
|
|
|
|
define float @squirtf_size(float* %x) nounwind optsize {
|
|
|
|
entry:
|
2014-12-28 21:15:05 +08:00
|
|
|
; SSE-LABEL: squirtf_size:
|
|
|
|
; SSE: sqrtss ([[A0]]), %xmm0
|
2015-02-28 05:17:42 +08:00
|
|
|
%z = load float, float* %x
|
2010-07-13 04:46:04 +08:00
|
|
|
%t = call float @llvm.sqrt.f32(float %z)
|
|
|
|
ret float %t
|
|
|
|
}
|
|
|
|
|
|
|
|
define double @squirt_size(double* %x) nounwind optsize {
|
|
|
|
entry:
|
2014-12-28 21:15:05 +08:00
|
|
|
; SSE-LABEL: squirt_size:
|
|
|
|
; SSE: sqrtsd ([[A0]]), %xmm0
|
2015-02-28 05:17:42 +08:00
|
|
|
%z = load double, double* %x
|
2010-07-13 04:46:04 +08:00
|
|
|
%t = call double @llvm.sqrt.f64(double %z)
|
|
|
|
ret double %t
|
|
|
|
}
|
|
|
|
|
|
|
|
declare float @llvm.sqrt.f32(float)
|
|
|
|
declare double @llvm.sqrt.f64(double)
|
2014-12-15 21:18:21 +08:00
|
|
|
|
2014-12-28 21:15:05 +08:00
|
|
|
; SSE-LABEL: loopdep1
|
2016-04-05 22:06:20 +08:00
|
|
|
; SSE: for.body{{$}}
|
2014-12-15 21:18:21 +08:00
|
|
|
;
|
|
|
|
; This loop contains two cvtsi2ss instructions that update the same xmm
|
2018-01-22 18:05:23 +08:00
|
|
|
; register. Verify that the break false dependency fix pass breaks those
|
2014-12-15 21:18:21 +08:00
|
|
|
; dependencies by inserting xorps instructions.
|
|
|
|
;
|
|
|
|
; If the register allocator chooses different registers for the two cvtsi2ss
|
|
|
|
; instructions, they are still dependent on themselves.
|
2014-12-28 21:15:05 +08:00
|
|
|
; SSE: xorps [[XMM1:%xmm[0-9]+]]
|
|
|
|
; SSE: , [[XMM1]]
|
2019-05-07 05:39:51 +08:00
|
|
|
; SSE: cvtsi2ss %{{.*}}, [[XMM1]]
|
2014-12-28 21:15:05 +08:00
|
|
|
; SSE: xorps [[XMM2:%xmm[0-9]+]]
|
|
|
|
; SSE: , [[XMM2]]
|
2019-05-07 05:39:51 +08:00
|
|
|
; SSE: cvtsi2ss %{{.*}}, [[XMM2]]
|
|
|
|
|
2014-12-15 21:18:21 +08:00
|
|
|
define float @loopdep1(i32 %m) nounwind uwtable readnone ssp {
|
|
|
|
entry:
|
|
|
|
%tobool3 = icmp eq i32 %m, 0
|
|
|
|
br i1 %tobool3, label %for.end, label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %entry, %for.body
|
|
|
|
%m.addr.07 = phi i32 [ %dec, %for.body ], [ %m, %entry ]
|
|
|
|
%s1.06 = phi float [ %add, %for.body ], [ 0.000000e+00, %entry ]
|
|
|
|
%s2.05 = phi float [ %add2, %for.body ], [ 0.000000e+00, %entry ]
|
|
|
|
%n.04 = phi i32 [ %inc, %for.body ], [ 1, %entry ]
|
|
|
|
%conv = sitofp i32 %n.04 to float
|
|
|
|
%add = fadd float %s1.06, %conv
|
|
|
|
%conv1 = sitofp i32 %m.addr.07 to float
|
|
|
|
%add2 = fadd float %s2.05, %conv1
|
|
|
|
%inc = add nsw i32 %n.04, 1
|
|
|
|
%dec = add nsw i32 %m.addr.07, -1
|
|
|
|
%tobool = icmp eq i32 %dec, 0
|
|
|
|
br i1 %tobool, label %for.end, label %for.body
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body, %entry
|
|
|
|
%s1.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
|
|
|
|
%s2.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %add2, %for.body ]
|
|
|
|
%sub = fsub float %s1.0.lcssa, %s2.0.lcssa
|
|
|
|
ret float %sub
|
|
|
|
}
|
|
|
|
|
2014-12-28 21:15:05 +08:00
|
|
|
; rdar:15221834 False AVX register dependencies cause 5x slowdown on
|
2019-05-07 05:39:51 +08:00
|
|
|
; flops-6. Make sure the unused register read by vcvtsi2sd is zeroed
|
2014-12-28 21:15:05 +08:00
|
|
|
; to avoid cyclic dependence on a write to the same register in a
|
|
|
|
; previous iteration.
|
|
|
|
|
|
|
|
; AVX-LABEL: loopdep2:
|
|
|
|
; AVX-LABEL: %loop
|
|
|
|
; AVX: vxorps %[[REG:xmm.]], %{{xmm.}}, %{{xmm.}}
|
2019-05-07 05:39:51 +08:00
|
|
|
; AVX: vcvtsi2sd %{{r[0-9a-x]+}}, %[[REG]], %{{xmm.}}
|
2014-12-28 21:15:05 +08:00
|
|
|
; SSE-LABEL: loopdep2:
|
|
|
|
; SSE-LABEL: %loop
|
|
|
|
; SSE: xorps %[[REG:xmm.]], %[[REG]]
|
2019-05-07 05:39:51 +08:00
|
|
|
; SSE: cvtsi2sd %{{r[0-9a-x]+}}, %[[REG]]
|
2014-12-28 21:15:05 +08:00
|
|
|
define i64 @loopdep2(i64* nocapture %x, double* nocapture %y) nounwind {
|
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%vx = load i64, i64* %x
|
2014-12-28 21:15:05 +08:00
|
|
|
br label %loop
|
|
|
|
loop:
|
|
|
|
%i = phi i64 [ 1, %entry ], [ %inc, %loop ]
|
|
|
|
%s1 = phi i64 [ %vx, %entry ], [ %s2, %loop ]
|
|
|
|
%fi = sitofp i64 %i to double
|
2018-07-03 12:43:49 +08:00
|
|
|
tail call void asm sideeffect "", "~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{dirflag},~{fpsr},~{flags}"()
|
2015-02-28 05:17:42 +08:00
|
|
|
%vy = load double, double* %y
|
2014-12-28 21:15:05 +08:00
|
|
|
%fipy = fadd double %fi, %vy
|
|
|
|
%iipy = fptosi double %fipy to i64
|
|
|
|
%s2 = add i64 %s1, %iipy
|
|
|
|
%inc = add nsw i64 %i, 1
|
|
|
|
%exitcond = icmp eq i64 %inc, 156250000
|
|
|
|
br i1 %exitcond, label %ret, label %loop
|
|
|
|
ret:
|
|
|
|
ret i64 %s2
|
|
|
|
}
|
|
|
|
|
2014-12-15 21:18:21 +08:00
|
|
|
; This loop contains a cvtsi2sd instruction that has a loop-carried
|
|
|
|
; false dependency on an xmm that is modified by other scalar instructions
|
2016-04-05 22:06:20 +08:00
|
|
|
; that follow it in the loop. Additionally, the source of convert is a
|
2018-01-22 18:05:23 +08:00
|
|
|
; memory operand. Verify the break false dependency fix pass breaks this
|
2014-12-15 21:18:21 +08:00
|
|
|
; dependency by inserting a xor before the convert.
|
|
|
|
@x = common global [1024 x double] zeroinitializer, align 16
|
|
|
|
@y = common global [1024 x double] zeroinitializer, align 16
|
|
|
|
@z = common global [1024 x double] zeroinitializer, align 16
|
|
|
|
@w = common global [1024 x double] zeroinitializer, align 16
|
|
|
|
@v = common global [1024 x i32] zeroinitializer, align 16
|
|
|
|
|
2014-12-28 21:15:05 +08:00
|
|
|
define void @loopdep3() {
|
2014-12-15 21:18:21 +08:00
|
|
|
entry:
|
|
|
|
br label %for.cond1.preheader
|
|
|
|
|
|
|
|
for.cond1.preheader: ; preds = %for.inc14, %entry
|
|
|
|
%i.025 = phi i32 [ 0, %entry ], [ %inc15, %for.inc14 ]
|
|
|
|
br label %for.body3
|
|
|
|
|
|
|
|
for.body3:
|
|
|
|
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @v, i64 0, i64 %indvars.iv
|
2015-02-28 05:17:42 +08:00
|
|
|
%0 = load i32, i32* %arrayidx, align 4
|
2014-12-15 21:18:21 +08:00
|
|
|
%conv = sitofp i32 %0 to double
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%arrayidx5 = getelementptr inbounds [1024 x double], [1024 x double]* @x, i64 0, i64 %indvars.iv
|
2015-02-28 05:17:42 +08:00
|
|
|
%1 = load double, double* %arrayidx5, align 8
|
2014-12-15 21:18:21 +08:00
|
|
|
%mul = fmul double %conv, %1
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%arrayidx7 = getelementptr inbounds [1024 x double], [1024 x double]* @y, i64 0, i64 %indvars.iv
|
2015-02-28 05:17:42 +08:00
|
|
|
%2 = load double, double* %arrayidx7, align 8
|
2014-12-15 21:18:21 +08:00
|
|
|
%mul8 = fmul double %mul, %2
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%arrayidx10 = getelementptr inbounds [1024 x double], [1024 x double]* @z, i64 0, i64 %indvars.iv
|
2015-02-28 05:17:42 +08:00
|
|
|
%3 = load double, double* %arrayidx10, align 8
|
2014-12-15 21:18:21 +08:00
|
|
|
%mul11 = fmul double %mul8, %3
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%arrayidx13 = getelementptr inbounds [1024 x double], [1024 x double]* @w, i64 0, i64 %indvars.iv
|
2014-12-15 21:18:21 +08:00
|
|
|
store double %mul11, double* %arrayidx13, align 8
|
|
|
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
|
|
|
%exitcond = icmp eq i64 %indvars.iv.next, 1024
|
2018-07-03 12:43:49 +08:00
|
|
|
tail call void asm sideeffect "", "~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{dirflag},~{fpsr},~{flags}"()
|
2014-12-15 21:18:21 +08:00
|
|
|
br i1 %exitcond, label %for.inc14, label %for.body3
|
|
|
|
|
|
|
|
for.inc14: ; preds = %for.body3
|
|
|
|
%inc15 = add nsw i32 %i.025, 1
|
|
|
|
%exitcond26 = icmp eq i32 %inc15, 100000
|
|
|
|
br i1 %exitcond26, label %for.end16, label %for.cond1.preheader
|
|
|
|
|
|
|
|
for.end16: ; preds = %for.inc14
|
|
|
|
ret void
|
|
|
|
|
2014-12-28 21:15:05 +08:00
|
|
|
;SSE-LABEL:@loopdep3
|
|
|
|
;SSE: xorps [[XMM0:%xmm[0-9]+]], [[XMM0]]
|
|
|
|
;SSE-NEXT: cvtsi2sdl {{.*}}, [[XMM0]]
|
|
|
|
;SSE-NEXT: mulsd {{.*}}, [[XMM0]]
|
|
|
|
;SSE-NEXT: mulsd {{.*}}, [[XMM0]]
|
|
|
|
;SSE-NEXT: mulsd {{.*}}, [[XMM0]]
|
|
|
|
;SSE-NEXT: movsd [[XMM0]],
|
|
|
|
;AVX-LABEL:@loopdep3
|
|
|
|
;AVX: vxorps [[XMM0:%xmm[0-9]+]], [[XMM0]]
|
2016-08-11 15:32:08 +08:00
|
|
|
;AVX-NEXT: vcvtsi2sdl {{.*}}, [[XMM0]], {{%xmm[0-9]+}}
|
2014-12-28 21:15:05 +08:00
|
|
|
;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]]
|
|
|
|
;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]]
|
|
|
|
;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]]
|
|
|
|
;AVX-NEXT: vmovsd [[XMM0]],
|
2014-12-15 21:18:21 +08:00
|
|
|
}
|
2016-07-21 20:37:07 +08:00
|
|
|
|
|
|
|
define double @inlineasmdep(i64 %arg) {
|
|
|
|
top:
|
2016-08-11 15:32:08 +08:00
|
|
|
tail call void asm sideeffect "", "~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{dirflag},~{fpsr},~{flags}"()
|
2018-07-03 12:43:49 +08:00
|
|
|
tail call void asm sideeffect "", "~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{dirflag},~{fpsr},~{flags}"()
|
2016-07-21 20:37:07 +08:00
|
|
|
%tmp1 = sitofp i64 %arg to double
|
|
|
|
ret double %tmp1
|
|
|
|
;AVX-LABEL:@inlineasmdep
|
|
|
|
;AVX: vxorps [[XMM0:%xmm[0-9]+]], [[XMM0]], [[XMM0]]
|
2019-05-07 05:39:51 +08:00
|
|
|
;AVX-NEXT: vcvtsi2sd {{.*}}, [[XMM0]], {{%xmm[0-9]+}}
|
2016-07-21 20:37:07 +08:00
|
|
|
}
|
2016-08-11 15:32:08 +08:00
|
|
|
|
|
|
|
; Make sure we are making a smart choice regarding undef registers and
|
|
|
|
; hiding the false dependency behind a true dependency
|
|
|
|
define double @truedeps(float %arg) {
|
|
|
|
top:
|
|
|
|
tail call void asm sideeffect "", "~{xmm6},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm4},~{xmm5},~{xmm7},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{dirflag},~{fpsr},~{flags}"()
|
2018-07-03 12:43:49 +08:00
|
|
|
tail call void asm sideeffect "", "~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{dirflag},~{fpsr},~{flags}"()
|
2016-08-11 15:32:08 +08:00
|
|
|
%tmp1 = fpext float %arg to double
|
|
|
|
ret double %tmp1
|
|
|
|
;AVX-LABEL:@truedeps
|
2018-06-16 01:56:17 +08:00
|
|
|
;AVX-NOT: vxorps
|
|
|
|
;AVX: vcvtss2sd [[XMM0:%xmm[0-9]+]], [[XMM0]], {{%xmm[0-9]+}}
|
2016-08-11 15:32:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
; Make sure we are making a smart choice regarding undef registers and
|
|
|
|
; choosing the register with the highest clearence
|
|
|
|
define double @clearence(i64 %arg) {
|
|
|
|
top:
|
|
|
|
tail call void asm sideeffect "", "~{xmm6},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm4},~{xmm5},~{xmm7},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{dirflag},~{fpsr},~{flags}"()
|
2018-07-03 12:43:49 +08:00
|
|
|
tail call void asm sideeffect "", "~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{dirflag},~{fpsr},~{flags}"()
|
2016-08-11 15:32:08 +08:00
|
|
|
%tmp1 = sitofp i64 %arg to double
|
|
|
|
ret double %tmp1
|
|
|
|
;AVX-LABEL:@clearence
|
|
|
|
;AVX: vxorps [[XMM6:%xmm6]], [[XMM6]], [[XMM6]]
|
2019-05-07 05:39:51 +08:00
|
|
|
;AVX-NEXT: vcvtsi2sd {{.*}}, [[XMM6]], {{%xmm[0-9]+}}
|
2016-08-11 15:32:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
; Make sure we are making a smart choice regarding undef registers in order to
|
|
|
|
; avoid a cyclic dependence on a write to the same register in a previous
|
|
|
|
; iteration, especially when we cannot zero out the undef register because it
|
|
|
|
; is alive.
|
|
|
|
define i64 @loopclearence(i64* nocapture %x, double* nocapture %y) nounwind {
|
|
|
|
entry:
|
|
|
|
%vx = load i64, i64* %x
|
|
|
|
br label %loop
|
|
|
|
loop:
|
|
|
|
%i = phi i64 [ 1, %entry ], [ %inc, %loop ]
|
|
|
|
%s1 = phi i64 [ %vx, %entry ], [ %s2, %loop ]
|
|
|
|
%fi = sitofp i64 %i to double
|
|
|
|
tail call void asm sideeffect "", "~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{dirflag},~{fpsr},~{flags}"()
|
2018-07-03 12:43:49 +08:00
|
|
|
tail call void asm sideeffect "", "~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{dirflag},~{fpsr},~{flags}"()
|
2016-08-11 15:32:08 +08:00
|
|
|
%vy = load double, double* %y
|
|
|
|
%fipy = fadd double %fi, %vy
|
|
|
|
%iipy = fptosi double %fipy to i64
|
|
|
|
%s2 = add i64 %s1, %iipy
|
|
|
|
%inc = add nsw i64 %i, 1
|
|
|
|
%exitcond = icmp eq i64 %inc, 156250000
|
|
|
|
br i1 %exitcond, label %ret, label %loop
|
|
|
|
ret:
|
|
|
|
ret i64 %s2
|
|
|
|
;AVX-LABEL:@loopclearence
|
|
|
|
;Registers 4-7 are not used and therefore one of them should be chosen
|
|
|
|
;AVX-NOT: {{%xmm[4-7]}}
|
2019-05-07 05:39:51 +08:00
|
|
|
;AVX: vcvtsi2sd {{.*}}, [[XMM4_7:%xmm[4-7]]], {{%xmm[0-9]+}}
|
2016-08-11 15:32:08 +08:00
|
|
|
;AVX-NOT: [[XMM4_7]]
|
|
|
|
}
|
[ExecutionDepsFix] Improve clearance calculation for loops
Summary:
In revision rL278321, ExecutionDepsFix learned how to pick a better
register for undef register reads, e.g. for instructions such as
`vcvtsi2sdq`. While this revision improved performance on a good number
of our benchmarks, it unfortunately also caused significant regressions
(up to 3x) on others. This regression turned out to be caused by loops
such as:
PH -> A -> B (xmm<Undef> -> xmm<Def>) -> C -> D -> EXIT
^ |
+----------------------------------+
In the previous version of the clearance calculation, we would visit
the blocks in order, remembering for each whether there were any
incoming backedges from blocks that we hadn't processed yet and if
so queuing up the block to be re-processed. However, for loop structures
such as the above, this is clearly insufficient, since the block B
does not have any unknown backedges, so we do not see the false
dependency from the previous interation's Def of xmm registers in B.
To fix this, we need to consider all blocks that are part of the loop
and reprocess them one the correct clearance values are known. As
an optimization, we also want to avoid reprocessing any later blocks
that are not part of the loop.
In summary, the iteration order is as follows:
Before: PH A B C D A'
Corrected (Naive): PH A B C D A' B' C' D'
Corrected (w/ optimization): PH A B C A' B' C' D
To facilitate this optimization we introduce two new counters for each
basic block. The first counts how many of it's predecssors have
completed primary processing. The second counts how many of its
predecessors have completed all processing (we will call such a block
*done*. Now, the criteria to reprocess a block is as follows:
- All Predecessors have completed primary processing
- For x the number of predecessors that have completed primary
processing *at the time of primary processing of this block*,
the number of predecessors that are done has reached x.
The intuition behind this criterion is as follows:
We need to perform primary processing on all predecessors in order to
find out any direct defs in those predecessors. When predecessors are
done, we also know that we have information about indirect defs (e.g.
in block B though that were inherited through B->C->A->B). However,
we can't wait for all predecessors to be done, since that would
cause cyclic dependencies. However, it is guaranteed that all those
predecessors that are prior to us in reverse postorder will be done
before us. Since we iterate of the basic blocks in reverse postorder,
the number x above, is precisely the count of the number of predecessors
prior to us in reverse postorder.
Reviewers: myatsina
Differential Revision: https://reviews.llvm.org/D28759
llvm-svn: 293571
2017-01-31 07:37:03 +08:00
|
|
|
|
|
|
|
; Make sure we are making a smart choice regarding undef registers even for more
|
|
|
|
; complicated loop structures. This example is the inner loop from
|
|
|
|
; julia> a = falses(10000); a[1:4:end] = true
|
|
|
|
; julia> linspace(1.0,2.0,10000)[a]
|
|
|
|
define void @loopclearance2(double* nocapture %y, i64* %x, double %c1, double %c2, double %c3, double %c4, i64 %size) {
|
|
|
|
entry:
|
|
|
|
tail call void asm sideeffect "", "~{xmm7},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{dirflag},~{fpsr},~{flags}"()
|
2018-07-03 12:43:49 +08:00
|
|
|
tail call void asm sideeffect "", "~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{dirflag},~{fpsr},~{flags}"()
|
|
|
|
tail call void asm sideeffect "", "~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{dirflag},~{fpsr},~{flags}"()
|
[ExecutionDepsFix] Improve clearance calculation for loops
Summary:
In revision rL278321, ExecutionDepsFix learned how to pick a better
register for undef register reads, e.g. for instructions such as
`vcvtsi2sdq`. While this revision improved performance on a good number
of our benchmarks, it unfortunately also caused significant regressions
(up to 3x) on others. This regression turned out to be caused by loops
such as:
PH -> A -> B (xmm<Undef> -> xmm<Def>) -> C -> D -> EXIT
^ |
+----------------------------------+
In the previous version of the clearance calculation, we would visit
the blocks in order, remembering for each whether there were any
incoming backedges from blocks that we hadn't processed yet and if
so queuing up the block to be re-processed. However, for loop structures
such as the above, this is clearly insufficient, since the block B
does not have any unknown backedges, so we do not see the false
dependency from the previous interation's Def of xmm registers in B.
To fix this, we need to consider all blocks that are part of the loop
and reprocess them one the correct clearance values are known. As
an optimization, we also want to avoid reprocessing any later blocks
that are not part of the loop.
In summary, the iteration order is as follows:
Before: PH A B C D A'
Corrected (Naive): PH A B C D A' B' C' D'
Corrected (w/ optimization): PH A B C A' B' C' D
To facilitate this optimization we introduce two new counters for each
basic block. The first counts how many of it's predecssors have
completed primary processing. The second counts how many of its
predecessors have completed all processing (we will call such a block
*done*. Now, the criteria to reprocess a block is as follows:
- All Predecessors have completed primary processing
- For x the number of predecessors that have completed primary
processing *at the time of primary processing of this block*,
the number of predecessors that are done has reached x.
The intuition behind this criterion is as follows:
We need to perform primary processing on all predecessors in order to
find out any direct defs in those predecessors. When predecessors are
done, we also know that we have information about indirect defs (e.g.
in block B though that were inherited through B->C->A->B). However,
we can't wait for all predecessors to be done, since that would
cause cyclic dependencies. However, it is guaranteed that all those
predecessors that are prior to us in reverse postorder will be done
before us. Since we iterate of the basic blocks in reverse postorder,
the number x above, is precisely the count of the number of predecessors
prior to us in reverse postorder.
Reviewers: myatsina
Differential Revision: https://reviews.llvm.org/D28759
llvm-svn: 293571
2017-01-31 07:37:03 +08:00
|
|
|
br label %loop
|
|
|
|
|
|
|
|
loop:
|
|
|
|
%phi_i = phi i64 [ 1, %entry ], [ %nexti, %loop_end ]
|
|
|
|
%phi_j = phi i64 [ 1, %entry ], [ %nextj, %loop_end ]
|
|
|
|
%phi_k = phi i64 [ 0, %entry ], [ %nextk, %loop_end ]
|
|
|
|
br label %inner_loop
|
|
|
|
|
|
|
|
inner_loop:
|
|
|
|
%phi = phi i64 [ %phi_k, %loop ], [ %nextk, %inner_loop ]
|
|
|
|
%idx = lshr i64 %phi, 6
|
|
|
|
%inputptr = getelementptr i64, i64* %x, i64 %idx
|
|
|
|
%input = load i64, i64* %inputptr, align 8
|
|
|
|
%masked = and i64 %phi, 63
|
|
|
|
%shiftedmasked = shl i64 1, %masked
|
|
|
|
%maskedinput = and i64 %input, %shiftedmasked
|
|
|
|
%cmp = icmp eq i64 %maskedinput, 0
|
|
|
|
%nextk = add i64 %phi, 1
|
|
|
|
br i1 %cmp, label %inner_loop, label %loop_end
|
|
|
|
|
|
|
|
loop_end:
|
|
|
|
%nexti = add i64 %phi_i, 1
|
|
|
|
%nextj = add i64 %phi_j, 1
|
|
|
|
; Register use, plus us clobbering 7-15 above, basically forces xmm6 here as
|
|
|
|
; the only reasonable choice. The primary thing we care about is that it's
|
|
|
|
; not one of the registers used in the loop (e.g. not the output reg here)
|
|
|
|
;AVX-NOT: %xmm6
|
2019-05-07 05:39:51 +08:00
|
|
|
;AVX: vcvtsi2sd {{.*}}, %xmm6, {{%xmm[0-9]+}}
|
[ExecutionDepsFix] Improve clearance calculation for loops
Summary:
In revision rL278321, ExecutionDepsFix learned how to pick a better
register for undef register reads, e.g. for instructions such as
`vcvtsi2sdq`. While this revision improved performance on a good number
of our benchmarks, it unfortunately also caused significant regressions
(up to 3x) on others. This regression turned out to be caused by loops
such as:
PH -> A -> B (xmm<Undef> -> xmm<Def>) -> C -> D -> EXIT
^ |
+----------------------------------+
In the previous version of the clearance calculation, we would visit
the blocks in order, remembering for each whether there were any
incoming backedges from blocks that we hadn't processed yet and if
so queuing up the block to be re-processed. However, for loop structures
such as the above, this is clearly insufficient, since the block B
does not have any unknown backedges, so we do not see the false
dependency from the previous interation's Def of xmm registers in B.
To fix this, we need to consider all blocks that are part of the loop
and reprocess them one the correct clearance values are known. As
an optimization, we also want to avoid reprocessing any later blocks
that are not part of the loop.
In summary, the iteration order is as follows:
Before: PH A B C D A'
Corrected (Naive): PH A B C D A' B' C' D'
Corrected (w/ optimization): PH A B C A' B' C' D
To facilitate this optimization we introduce two new counters for each
basic block. The first counts how many of it's predecssors have
completed primary processing. The second counts how many of its
predecessors have completed all processing (we will call such a block
*done*. Now, the criteria to reprocess a block is as follows:
- All Predecessors have completed primary processing
- For x the number of predecessors that have completed primary
processing *at the time of primary processing of this block*,
the number of predecessors that are done has reached x.
The intuition behind this criterion is as follows:
We need to perform primary processing on all predecessors in order to
find out any direct defs in those predecessors. When predecessors are
done, we also know that we have information about indirect defs (e.g.
in block B though that were inherited through B->C->A->B). However,
we can't wait for all predecessors to be done, since that would
cause cyclic dependencies. However, it is guaranteed that all those
predecessors that are prior to us in reverse postorder will be done
before us. Since we iterate of the basic blocks in reverse postorder,
the number x above, is precisely the count of the number of predecessors
prior to us in reverse postorder.
Reviewers: myatsina
Differential Revision: https://reviews.llvm.org/D28759
llvm-svn: 293571
2017-01-31 07:37:03 +08:00
|
|
|
;AVX-NOT: %xmm6
|
|
|
|
%nexti_f = sitofp i64 %nexti to double
|
|
|
|
%sub = fsub double %c1, %nexti_f
|
|
|
|
%mul = fmul double %sub, %c2
|
2019-05-07 05:39:51 +08:00
|
|
|
;AVX: vcvtsi2sd {{.*}}, %xmm6, {{%xmm[0-9]+}}
|
[ExecutionDepsFix] Improve clearance calculation for loops
Summary:
In revision rL278321, ExecutionDepsFix learned how to pick a better
register for undef register reads, e.g. for instructions such as
`vcvtsi2sdq`. While this revision improved performance on a good number
of our benchmarks, it unfortunately also caused significant regressions
(up to 3x) on others. This regression turned out to be caused by loops
such as:
PH -> A -> B (xmm<Undef> -> xmm<Def>) -> C -> D -> EXIT
^ |
+----------------------------------+
In the previous version of the clearance calculation, we would visit
the blocks in order, remembering for each whether there were any
incoming backedges from blocks that we hadn't processed yet and if
so queuing up the block to be re-processed. However, for loop structures
such as the above, this is clearly insufficient, since the block B
does not have any unknown backedges, so we do not see the false
dependency from the previous interation's Def of xmm registers in B.
To fix this, we need to consider all blocks that are part of the loop
and reprocess them one the correct clearance values are known. As
an optimization, we also want to avoid reprocessing any later blocks
that are not part of the loop.
In summary, the iteration order is as follows:
Before: PH A B C D A'
Corrected (Naive): PH A B C D A' B' C' D'
Corrected (w/ optimization): PH A B C A' B' C' D
To facilitate this optimization we introduce two new counters for each
basic block. The first counts how many of it's predecssors have
completed primary processing. The second counts how many of its
predecessors have completed all processing (we will call such a block
*done*. Now, the criteria to reprocess a block is as follows:
- All Predecessors have completed primary processing
- For x the number of predecessors that have completed primary
processing *at the time of primary processing of this block*,
the number of predecessors that are done has reached x.
The intuition behind this criterion is as follows:
We need to perform primary processing on all predecessors in order to
find out any direct defs in those predecessors. When predecessors are
done, we also know that we have information about indirect defs (e.g.
in block B though that were inherited through B->C->A->B). However,
we can't wait for all predecessors to be done, since that would
cause cyclic dependencies. However, it is guaranteed that all those
predecessors that are prior to us in reverse postorder will be done
before us. Since we iterate of the basic blocks in reverse postorder,
the number x above, is precisely the count of the number of predecessors
prior to us in reverse postorder.
Reviewers: myatsina
Differential Revision: https://reviews.llvm.org/D28759
llvm-svn: 293571
2017-01-31 07:37:03 +08:00
|
|
|
;AVX-NOT: %xmm6
|
|
|
|
%phi_f = sitofp i64 %phi to double
|
|
|
|
%mul2 = fmul double %phi_f, %c3
|
|
|
|
%add2 = fadd double %mul, %mul2
|
|
|
|
%div = fdiv double %add2, %c4
|
|
|
|
%prev_j = add i64 %phi_j, -1
|
|
|
|
%outptr = getelementptr double, double* %y, i64 %prev_j
|
|
|
|
store double %div, double* %outptr, align 8
|
|
|
|
%done = icmp slt i64 %size, %nexti
|
|
|
|
br i1 %done, label %loopdone, label %loop
|
|
|
|
|
|
|
|
loopdone:
|
|
|
|
ret void
|
|
|
|
}
|