2017-08-02 08:28:10 +08:00
|
|
|
; RUN: llc -stack-symbol-ordering=0 %s -o - -mattr=-avx -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=SSE
|
|
|
|
; RUN: llc -stack-symbol-ordering=0 %s -o - -mattr=+avx -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=AVX
|
2009-09-09 22:22:57 +08:00
|
|
|
; PR4891
|
2011-11-29 04:42:56 +08:00
|
|
|
; PR5626
|
2009-09-09 22:22:57 +08:00
|
|
|
|
|
|
|
; This load should be before the call, not after.
|
|
|
|
|
2018-10-12 04:36:06 +08:00
|
|
|
; SSE: movsd compl+128(%rip), %xmm0
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
; SSE: movaps %xmm0, (%rsp)
|
|
|
|
; SSE: callq killcommon
|
|
|
|
|
2018-10-12 04:36:06 +08:00
|
|
|
; AVX: vmovsd compl+128(%rip), %xmm0
|
2015-04-18 01:02:37 +08:00
|
|
|
; AVX: vmovaps %xmm0, (%rsp)
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
; AVX: callq killcommon
|
2009-09-09 22:22:57 +08:00
|
|
|
|
|
|
|
@compl = linkonce global [20 x i64] zeroinitializer, align 64 ; <[20 x i64]*> [#uses=1]
|
|
|
|
|
|
|
|
declare void @killcommon(i32* noalias)
|
|
|
|
|
|
|
|
define void @reset(<2 x float>* noalias %garbage1) {
|
|
|
|
"file complex.c, line 27, bb1":
|
|
|
|
%changed = alloca i32, align 4 ; <i32*> [#uses=3]
|
|
|
|
br label %"file complex.c, line 27, bb13"
|
|
|
|
|
|
|
|
"file complex.c, line 27, bb13": ; preds = %"file complex.c, line 27, bb1"
|
|
|
|
store i32 0, i32* %changed, align 4
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%r2 = getelementptr float, float* bitcast ([20 x i64]* @compl to float*), i64 32 ; <float*> [#uses=1]
|
2009-09-09 22:22:57 +08:00
|
|
|
%r3 = bitcast float* %r2 to <2 x float>* ; <<2 x float>*> [#uses=1]
|
2015-02-28 05:17:42 +08:00
|
|
|
%r4 = load <2 x float>, <2 x float>* %r3, align 4 ; <<2 x float>> [#uses=1]
|
2009-09-09 22:22:57 +08:00
|
|
|
call void @killcommon(i32* %changed)
|
|
|
|
br label %"file complex.c, line 34, bb4"
|
|
|
|
|
|
|
|
"file complex.c, line 34, bb4": ; preds = %"file complex.c, line 27, bb13"
|
2015-02-28 05:17:42 +08:00
|
|
|
%r5 = load i32, i32* %changed, align 4 ; <i32> [#uses=1]
|
2009-09-09 22:22:57 +08:00
|
|
|
%r6 = icmp eq i32 %r5, 0 ; <i1> [#uses=1]
|
|
|
|
%r7 = zext i1 %r6 to i32 ; <i32> [#uses=1]
|
|
|
|
%r8 = icmp ne i32 %r7, 0 ; <i1> [#uses=1]
|
|
|
|
br i1 %r8, label %"file complex.c, line 34, bb7", label %"file complex.c, line 27, bb5"
|
|
|
|
|
|
|
|
"file complex.c, line 27, bb5": ; preds = %"file complex.c, line 34, bb4"
|
|
|
|
br label %"file complex.c, line 35, bb6"
|
|
|
|
|
|
|
|
"file complex.c, line 35, bb6": ; preds = %"file complex.c, line 27, bb5"
|
|
|
|
%r11 = ptrtoint <2 x float>* %garbage1 to i64 ; <i64> [#uses=1]
|
|
|
|
%r12 = inttoptr i64 %r11 to <2 x float>* ; <<2 x float>*> [#uses=1]
|
|
|
|
store <2 x float> %r4, <2 x float>* %r12, align 4
|
|
|
|
br label %"file complex.c, line 34, bb7"
|
|
|
|
|
|
|
|
"file complex.c, line 34, bb7": ; preds = %"file complex.c, line 35, bb6", %"file complex.c, line 34, bb4"
|
|
|
|
ret void
|
|
|
|
}
|