2019-05-22 05:49:10 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2014-03-22 05:46:24 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-apple-macosx -regalloc=greedy | FileCheck %s
|
|
|
|
|
2015-12-17 07:10:53 +08:00
|
|
|
; This testing case is reduced from 254.gap SyFgets function.
|
2016-04-13 11:08:27 +08:00
|
|
|
; We make sure a spill is hoisted to a cold BB inside the hotter outer loop.
|
2014-03-22 05:46:24 +08:00
|
|
|
|
|
|
|
%struct.TMP.1 = type { %struct.TMP.2*, %struct.TMP.2*, [1024 x i8] }
|
|
|
|
%struct.TMP.2 = type { i8*, i32, i32, i16, i16, %struct.TMP.3, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.TMP.3, %struct.TMP.4*, i32, [3 x i8], [1 x i8], %struct.TMP.3, i32, i64 }
|
|
|
|
%struct.TMP.4 = type opaque
|
|
|
|
%struct.TMP.3 = type { i8*, i32 }
|
|
|
|
|
|
|
|
@syBuf = external global [16 x %struct.TMP.1], align 16
|
|
|
|
@syHistory = external global [8192 x i8], align 16
|
|
|
|
@SyFgets.yank = external global [512 x i8], align 16
|
|
|
|
@syCTRO = external global i32, align 4
|
|
|
|
|
|
|
|
define i8* @SyFgets(i8* %line, i64 %length, i64 %fid) {
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-LABEL: SyFgets:
|
|
|
|
; CHECK: ## %bb.0: ## %entry
|
|
|
|
; CHECK-NEXT: pushq %rbp
|
|
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
|
|
|
; CHECK-NEXT: pushq %r15
|
|
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 24
|
|
|
|
; CHECK-NEXT: pushq %r14
|
|
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
|
|
|
; CHECK-NEXT: pushq %r13
|
|
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 40
|
|
|
|
; CHECK-NEXT: pushq %r12
|
|
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 48
|
|
|
|
; CHECK-NEXT: pushq %rbx
|
|
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 56
|
|
|
|
; CHECK-NEXT: subq $536, %rsp ## imm = 0x218
|
|
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 592
|
|
|
|
; CHECK-NEXT: .cfi_offset %rbx, -56
|
|
|
|
; CHECK-NEXT: .cfi_offset %r12, -48
|
|
|
|
; CHECK-NEXT: .cfi_offset %r13, -40
|
|
|
|
; CHECK-NEXT: .cfi_offset %r14, -32
|
|
|
|
; CHECK-NEXT: .cfi_offset %r15, -24
|
|
|
|
; CHECK-NEXT: .cfi_offset %rbp, -16
|
|
|
|
; CHECK-NEXT: movq %rdx, %rax
|
|
|
|
; CHECK-NEXT: orq $2, %rax
|
|
|
|
; CHECK-NEXT: cmpq $2, %rax
|
|
|
|
; CHECK-NEXT: jne LBB0_4
|
|
|
|
; CHECK-NEXT: ## %bb.1: ## %if.end
|
|
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
|
|
; CHECK-NEXT: testb %al, %al
|
|
|
|
; CHECK-NEXT: jne LBB0_5
|
|
|
|
; CHECK-NEXT: ## %bb.2: ## %if.then4
|
|
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
|
|
; CHECK-NEXT: testb %al, %al
|
|
|
|
; CHECK-NEXT: je LBB0_55
|
|
|
|
; CHECK-NEXT: ## %bb.3: ## %SyTime.exit
|
|
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
|
|
; CHECK-NEXT: testb %al, %al
|
|
|
|
; CHECK-NEXT: je LBB0_55
|
|
|
|
; CHECK-NEXT: LBB0_4: ## %cleanup
|
|
|
|
; CHECK-NEXT: addq $536, %rsp ## imm = 0x218
|
|
|
|
; CHECK-NEXT: popq %rbx
|
|
|
|
; CHECK-NEXT: popq %r12
|
|
|
|
; CHECK-NEXT: popq %r13
|
|
|
|
; CHECK-NEXT: popq %r14
|
|
|
|
; CHECK-NEXT: popq %r15
|
|
|
|
; CHECK-NEXT: popq %rbp
|
|
|
|
; CHECK-NEXT: retq
|
|
|
|
; CHECK-NEXT: LBB0_5: ## %if.end25
|
|
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
|
|
; CHECK-NEXT: testb %al, %al
|
|
|
|
; CHECK-NEXT: je LBB0_55
|
|
|
|
; CHECK-NEXT: ## %bb.6: ## %SyTime.exit2720
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: movq %rdx, %rbx
|
|
|
|
; CHECK-NEXT: movq %rdi, %rbp
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rax
|
|
|
|
; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
|
|
|
|
; CHECK-NEXT: cmpq %rax, %rcx
|
|
|
|
; CHECK-NEXT: jae LBB0_8
|
|
|
|
; CHECK-NEXT: ## %bb.7: ## %for.body.lr.ph
|
|
|
|
; CHECK-NEXT: movl $512, %edx ## imm = 0x200
|
|
|
|
; CHECK-NEXT: movl $32, %esi
|
|
|
|
; CHECK-NEXT: callq _memset
|
|
|
|
; CHECK-NEXT: LBB0_8: ## %while.body.preheader
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill
|
|
|
|
; CHECK-NEXT: imulq $1040, %rbx, %rax ## imm = 0x410
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: movq _syBuf@{{.*}}(%rip), %rcx
|
|
|
|
; CHECK-NEXT: leaq 8(%rcx,%rax), %rbx
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: movl $1, %r15d
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: movq _syCTRO@{{.*}}(%rip), %rax
|
|
|
|
; CHECK-NEXT: movb $1, %cl
|
|
|
|
; CHECK-NEXT: .p2align 4, 0x90
|
|
|
|
; CHECK-NEXT: LBB0_9: ## %do.body
|
|
|
|
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
|
|
; CHECK-NEXT: movl $0, (%rax)
|
|
|
|
; CHECK-NEXT: testb %cl, %cl
|
|
|
|
; CHECK-NEXT: jne LBB0_9
|
|
|
|
; CHECK-NEXT: ## %bb.10: ## %do.end
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: xorl %r14d, %r14d
|
|
|
|
; CHECK-NEXT: testb %r14b, %r14b
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: jne LBB0_11
|
|
|
|
; CHECK-NEXT: ## %bb.12: ## %while.body200.preheader
|
|
|
|
; CHECK-NEXT: xorl %edx, %edx
|
|
|
|
; CHECK-NEXT: leaq {{.*}}(%rip), %rsi
|
|
|
|
; CHECK-NEXT: leaq {{.*}}(%rip), %rdi
|
|
|
|
; CHECK-NEXT: xorl %ebp, %ebp
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: xorl %r13d, %r13d
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: jmp LBB0_13
|
|
|
|
; CHECK-NEXT: .p2align 4, 0x90
|
2019-06-15 07:08:59 +08:00
|
|
|
; CHECK-NEXT: LBB0_20: ## %sw.bb256
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
|
|
|
; CHECK-NEXT: movl %r14d, %r13d
|
|
|
|
; CHECK-NEXT: LBB0_21: ## %while.cond197.backedge
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
|
|
|
; CHECK-NEXT: decl %r15d
|
|
|
|
; CHECK-NEXT: testl %r15d, %r15d
|
|
|
|
; CHECK-NEXT: movl %r13d, %r14d
|
|
|
|
; CHECK-NEXT: jle LBB0_22
|
|
|
|
; CHECK-NEXT: LBB0_13: ## %while.body200
|
|
|
|
; CHECK-NEXT: ## =>This Loop Header: Depth=1
|
|
|
|
; CHECK-NEXT: ## Child Loop BB0_30 Depth 2
|
|
|
|
; CHECK-NEXT: ## Child Loop BB0_38 Depth 2
|
|
|
|
; CHECK-NEXT: leal -268(%r14), %eax
|
|
|
|
; CHECK-NEXT: cmpl $105, %eax
|
|
|
|
; CHECK-NEXT: ja LBB0_14
|
|
|
|
; CHECK-NEXT: ## %bb.56: ## %while.body200
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
|
|
|
; CHECK-NEXT: movslq (%rdi,%rax,4), %rax
|
|
|
|
; CHECK-NEXT: addq %rdi, %rax
|
|
|
|
; CHECK-NEXT: jmpq *%rax
|
|
|
|
; CHECK-NEXT: LBB0_44: ## %while.cond1037.preheader
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
|
|
|
; CHECK-NEXT: testb %dl, %dl
|
|
|
|
; CHECK-NEXT: movl %r14d, %r13d
|
|
|
|
; CHECK-NEXT: jne LBB0_21
|
|
|
|
; CHECK-NEXT: jmp LBB0_55
|
|
|
|
; CHECK-NEXT: .p2align 4, 0x90
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: LBB0_14: ## %while.body200
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: leal 1(%r14), %eax
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: cmpl $21, %eax
|
|
|
|
; CHECK-NEXT: ja LBB0_20
|
|
|
|
; CHECK-NEXT: ## %bb.15: ## %while.body200
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: movl $-1, %r13d
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: movslq (%rsi,%rax,4), %rax
|
|
|
|
; CHECK-NEXT: addq %rsi, %rax
|
|
|
|
; CHECK-NEXT: jmpq *%rax
|
|
|
|
; CHECK-NEXT: LBB0_18: ## %while.cond201.preheader
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: movl $1, %r13d
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: jmp LBB0_21
|
|
|
|
; CHECK-NEXT: LBB0_26: ## %sw.bb474
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
|
|
|
; CHECK-NEXT: testb %dl, %dl
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: ## implicit-def: $r12
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: jne LBB0_34
|
|
|
|
; CHECK-NEXT: ## %bb.27: ## %do.body479.preheader
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
|
|
|
; CHECK-NEXT: testb %dl, %dl
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: ## implicit-def: $r12
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: jne LBB0_34
|
|
|
|
; CHECK-NEXT: ## %bb.28: ## %land.rhs485.preheader
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
|
|
|
; CHECK-NEXT: ## implicit-def: $rax
|
2019-06-15 07:08:59 +08:00
|
|
|
; CHECK-NEXT: testb %al, %al
|
|
|
|
; CHECK-NEXT: jns LBB0_30
|
|
|
|
; CHECK-NEXT: jmp LBB0_55
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: .p2align 4, 0x90
|
2019-06-15 07:08:59 +08:00
|
|
|
; CHECK-NEXT: LBB0_32: ## %do.body479.backedge
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_30 Depth=2
|
|
|
|
; CHECK-NEXT: leaq 1(%r12), %rax
|
|
|
|
; CHECK-NEXT: testb %dl, %dl
|
|
|
|
; CHECK-NEXT: je LBB0_33
|
|
|
|
; CHECK-NEXT: ## %bb.29: ## %land.rhs485
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_30 Depth=2
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: testb %al, %al
|
|
|
|
; CHECK-NEXT: js LBB0_55
|
2019-06-15 07:08:59 +08:00
|
|
|
; CHECK-NEXT: LBB0_30: ## %cond.true.i.i2780
|
|
|
|
; CHECK-NEXT: ## Parent Loop BB0_13 Depth=1
|
|
|
|
; CHECK-NEXT: ## => This Inner Loop Header: Depth=2
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: movq %rax, %r12
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: testb %dl, %dl
|
|
|
|
; CHECK-NEXT: jne LBB0_32
|
|
|
|
; CHECK-NEXT: ## %bb.31: ## %lor.rhs500
|
2019-06-15 07:08:59 +08:00
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_30 Depth=2
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: movl $256, %esi ## imm = 0x100
|
|
|
|
; CHECK-NEXT: callq ___maskrune
|
|
|
|
; CHECK-NEXT: xorl %edx, %edx
|
|
|
|
; CHECK-NEXT: testb %dl, %dl
|
2019-06-15 07:08:59 +08:00
|
|
|
; CHECK-NEXT: jne LBB0_32
|
|
|
|
; CHECK-NEXT: jmp LBB0_34
|
|
|
|
; CHECK-NEXT: LBB0_45: ## %sw.bb1134
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
|
|
|
; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rax
|
|
|
|
; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
|
|
|
|
; CHECK-NEXT: cmpq %rax, %rcx
|
|
|
|
; CHECK-NEXT: jb LBB0_55
|
|
|
|
; CHECK-NEXT: ## %bb.46: ## in Loop: Header=BB0_13 Depth=1
|
|
|
|
; CHECK-NEXT: xorl %ebp, %ebp
|
|
|
|
; CHECK-NEXT: movl $268, %r13d ## imm = 0x10C
|
|
|
|
; CHECK-NEXT: jmp LBB0_21
|
|
|
|
; CHECK-NEXT: LBB0_19: ## %sw.bb243
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
|
|
|
; CHECK-NEXT: movl $2, %r13d
|
|
|
|
; CHECK-NEXT: jmp LBB0_21
|
|
|
|
; CHECK-NEXT: LBB0_40: ## %sw.bb566
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
|
|
|
; CHECK-NEXT: movl $20, %r13d
|
|
|
|
; CHECK-NEXT: jmp LBB0_21
|
|
|
|
; CHECK-NEXT: LBB0_33: ## %if.end517.loopexitsplit
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: incq %r12
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: LBB0_34: ## %if.end517
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: leal -324(%r13), %eax
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: cmpl $59, %eax
|
|
|
|
; CHECK-NEXT: ja LBB0_35
|
|
|
|
; CHECK-NEXT: ## %bb.57: ## %if.end517
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
|
|
|
; CHECK-NEXT: movabsq $576460756598390785, %rcx ## imm = 0x800000100000001
|
|
|
|
; CHECK-NEXT: btq %rax, %rcx
|
|
|
|
; CHECK-NEXT: jb LBB0_38
|
|
|
|
; CHECK-NEXT: LBB0_35: ## %if.end517
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: cmpl $11, %r13d
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: je LBB0_38
|
|
|
|
; CHECK-NEXT: ## %bb.36: ## %if.end517
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: cmpl $24, %r13d
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: je LBB0_38
|
|
|
|
; CHECK-NEXT: ## %bb.37: ## %if.then532
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
|
|
|
; CHECK-NEXT: movq _SyFgets.yank@{{.*}}(%rip), %rax
|
|
|
|
; CHECK-NEXT: movb $0, (%rax)
|
|
|
|
; CHECK-NEXT: .p2align 4, 0x90
|
|
|
|
; CHECK-NEXT: LBB0_38: ## %for.cond534
|
|
|
|
; CHECK-NEXT: ## Parent Loop BB0_13 Depth=1
|
|
|
|
; CHECK-NEXT: ## => This Inner Loop Header: Depth=2
|
|
|
|
; CHECK-NEXT: testb %dl, %dl
|
|
|
|
; CHECK-NEXT: jne LBB0_38
|
|
|
|
; CHECK-NEXT: ## %bb.39: ## %for.cond542.preheader
|
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1
|
|
|
|
; CHECK-NEXT: testb %dl, %dl
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: movb $0, (%r12)
|
|
|
|
; CHECK-NEXT: movl %r14d, %r13d
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: leaq {{.*}}(%rip), %rsi
|
|
|
|
; CHECK-NEXT: leaq {{.*}}(%rip), %rdi
|
|
|
|
; CHECK-NEXT: jmp LBB0_21
|
|
|
|
; CHECK-NEXT: .p2align 4, 0x90
|
|
|
|
; CHECK-NEXT: LBB0_42: ## %while.cond864
|
|
|
|
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
|
|
; CHECK-NEXT: jmp LBB0_42
|
|
|
|
; CHECK-NEXT: .p2align 4, 0x90
|
|
|
|
; CHECK-NEXT: LBB0_43: ## %while.cond962
|
|
|
|
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
|
|
; CHECK-NEXT: jmp LBB0_43
|
|
|
|
; CHECK-NEXT: .p2align 4, 0x90
|
|
|
|
; CHECK-NEXT: LBB0_25: ## %for.cond357
|
|
|
|
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
|
|
; CHECK-NEXT: jmp LBB0_25
|
|
|
|
; CHECK-NEXT: LBB0_11:
|
|
|
|
; CHECK-NEXT: xorl %ebp, %ebp
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: xorl %r13d, %r13d
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: LBB0_22: ## %while.end1465
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: incl %r13d
|
|
|
|
; CHECK-NEXT: cmpl $16, %r13d
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: ja LBB0_50
|
|
|
|
; CHECK-NEXT: ## %bb.23: ## %while.end1465
|
|
|
|
; CHECK-NEXT: movl $83969, %eax ## imm = 0x14801
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: btl %r13d, %eax
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: jae LBB0_50
|
|
|
|
; CHECK-NEXT: ## %bb.24:
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: xorl %ebp, %ebp
|
|
|
|
; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx ## 8-byte Reload
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: LBB0_48: ## %if.then1477
|
|
|
|
; CHECK-NEXT: movl $1, %edx
|
|
|
|
; CHECK-NEXT: callq _write
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: subq %rbp, %rbx
|
|
|
|
; CHECK-NEXT: movq _syHistory@{{.*}}(%rip), %rax
|
|
|
|
; CHECK-NEXT: leaq 8189(%rbx,%rax), %rax
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: .p2align 4, 0x90
|
|
|
|
; CHECK-NEXT: LBB0_49: ## %for.body1723
|
|
|
|
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: decq %rax
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: jmp LBB0_49
|
|
|
|
; CHECK-NEXT: LBB0_50: ## %for.cond1480.preheader
|
|
|
|
; CHECK-NEXT: movl $512, %eax ## imm = 0x200
|
|
|
|
; CHECK-NEXT: cmpq %rax, %rax
|
|
|
|
; CHECK-NEXT: jae LBB0_55
|
|
|
|
; CHECK-NEXT: ## %bb.51: ## %for.body1664.lr.ph
|
|
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
|
|
; CHECK-NEXT: testb %al, %al
|
|
|
|
; CHECK-NEXT: jne LBB0_54
|
|
|
|
; CHECK-NEXT: ## %bb.52: ## %while.body1679.preheader
|
|
|
|
; CHECK-NEXT: incl %ebp
|
|
|
|
; CHECK-NEXT: .p2align 4, 0x90
|
|
|
|
; CHECK-NEXT: LBB0_53: ## %while.body1679
|
|
|
|
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
|
|
; CHECK-NEXT: movq (%rbx), %rdi
|
|
|
|
; CHECK-NEXT: callq _fileno
|
|
|
|
; CHECK-NEXT: movslq %ebp, %rax
|
|
|
|
; CHECK-NEXT: leal 1(%rax), %ebp
|
|
|
|
; CHECK-NEXT: cmpq %rax, %rax
|
|
|
|
; CHECK-NEXT: jl LBB0_53
|
|
|
|
; CHECK-NEXT: LBB0_54: ## %while.cond1683.preheader
|
|
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
|
|
; CHECK-NEXT: testb %al, %al
|
|
|
|
; CHECK-NEXT: LBB0_55: ## %if.then.i
|
|
|
|
; CHECK-NEXT: ud2
|
|
|
|
; CHECK-NEXT: LBB0_47: ## %if.then1477.loopexit
|
[DAGCombiner][X86][AArch64][AMDGPU] (x + C) - y -> (x - y) + C fold. Try 3
Summary:
The main motivation is shown by all these `neg` instructions that are now created.
In particular, the `@reg32_lshr_by_negated_unfolded_sub_b` test.
AArch64 test changes all look good (`neg` created), or neutral.
X86 changes look neutral (vectors), or good (`neg` / `xor eax, eax` created).
I'm not sure about `X86/ragreedy-hoist-spill.ll`, it looks like the spill
is now hoisted into preheader (which should still be good?),
2 4-byte reloads become 1 8-byte reload, and are elsewhere,
but i'm not sure how that affects that loop.
I'm unable to interpret AMDGPU change, looks neutral-ish?
This is hopefully a step towards solving [[ https://bugs.llvm.org/show_bug.cgi?id=41952 | PR41952 ]].
https://rise4fun.com/Alive/pkdq (we are missing more patterns, i'll submit them later)
This is a recommit, originally committed in rL361852, but reverted
to investigate test-suite compile-time hangs, and then reverted in
rL362109 to fix missing constant folds that were causing
endless combine loops.
Reviewers: craig.topper, RKSimon, spatel, arsenm
Reviewed By: RKSimon
Subscribers: bjope, qcolombet, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, javed.absar, dstuttard, tpr, t-tye, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62223
llvm-svn: 362142
2019-05-31 04:36:54 +08:00
|
|
|
; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx ## 8-byte Reload
|
|
|
|
; CHECK-NEXT: movq %rbx, %rbp
|
2019-05-22 05:49:10 +08:00
|
|
|
; CHECK-NEXT: jmp LBB0_48
|
|
|
|
; CHECK-NEXT: LBB0_16: ## %while.cond635.preheader
|
|
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
|
|
; CHECK-NEXT: testb %al, %al
|
|
|
|
; CHECK-NEXT: je LBB0_41
|
|
|
|
; CHECK-NEXT: .p2align 4, 0x90
|
|
|
|
; CHECK-NEXT: LBB0_17: ## %for.body643.us
|
|
|
|
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
|
|
; CHECK-NEXT: jmp LBB0_17
|
|
|
|
; CHECK-NEXT: .p2align 4, 0x90
|
|
|
|
; CHECK-NEXT: LBB0_41: ## %while.cond661
|
|
|
|
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
|
|
; CHECK-NEXT: jmp LBB0_41
|
2014-03-22 05:46:24 +08:00
|
|
|
entry:
|
|
|
|
%sub.ptr.rhs.cast646 = ptrtoint i8* %line to i64
|
|
|
|
%old = alloca [512 x i8], align 16
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%0 = getelementptr inbounds [512 x i8], [512 x i8]* %old, i64 0, i64 0
|
2014-03-22 05:46:24 +08:00
|
|
|
switch i64 %fid, label %if.then [
|
|
|
|
i64 2, label %if.end
|
|
|
|
i64 0, label %if.end
|
|
|
|
]
|
|
|
|
|
|
|
|
if.then:
|
|
|
|
br label %cleanup
|
|
|
|
|
|
|
|
if.end:
|
|
|
|
switch i64 undef, label %if.end25 [
|
|
|
|
i64 0, label %if.then4
|
2015-04-28 07:35:22 +08:00
|
|
|
i64 1, label %if.end25
|
2014-03-22 05:46:24 +08:00
|
|
|
]
|
|
|
|
|
|
|
|
if.then4:
|
|
|
|
br i1 undef, label %SyTime.exit, label %if.then.i
|
|
|
|
|
|
|
|
if.then.i:
|
|
|
|
unreachable
|
|
|
|
|
|
|
|
SyTime.exit:
|
|
|
|
br i1 undef, label %SyTime.exit2681, label %if.then.i2673
|
|
|
|
|
|
|
|
if.then.i2673:
|
|
|
|
unreachable
|
|
|
|
|
|
|
|
SyTime.exit2681:
|
|
|
|
br label %cleanup
|
|
|
|
|
|
|
|
land.lhs.true14:
|
|
|
|
unreachable
|
|
|
|
|
|
|
|
if.end25:
|
|
|
|
br i1 undef, label %SyTime.exit2720, label %if.then.i2712
|
|
|
|
|
|
|
|
if.then.i2712:
|
|
|
|
unreachable
|
|
|
|
|
|
|
|
SyTime.exit2720:
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%add.ptr = getelementptr [512 x i8], [512 x i8]* %old, i64 0, i64 512
|
2014-03-22 05:46:24 +08:00
|
|
|
%cmp293427 = icmp ult i8* %0, %add.ptr
|
|
|
|
br i1 %cmp293427, label %for.body.lr.ph, label %while.body.preheader
|
|
|
|
|
|
|
|
for.body.lr.ph:
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-20 01:13:12 +08:00
|
|
|
call void @llvm.memset.p0i8.i64(i8* align 16 undef, i8 32, i64 512, i1 false)
|
2014-03-22 05:46:24 +08:00
|
|
|
br label %while.body.preheader
|
|
|
|
|
|
|
|
while.body.preheader:
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%add.ptr1603 = getelementptr [512 x i8], [512 x i8]* null, i64 0, i64 512
|
|
|
|
%echo.i3101 = getelementptr [16 x %struct.TMP.1], [16 x %struct.TMP.1]* @syBuf, i64 0, i64 %fid, i32 1
|
2014-03-22 05:46:24 +08:00
|
|
|
%1 = xor i64 %sub.ptr.rhs.cast646, -1
|
|
|
|
br label %do.body
|
|
|
|
|
|
|
|
do.body:
|
|
|
|
%ch2.0 = phi i32 [ 0, %while.body.preheader ], [ %ch.12.ch2.12, %do.body ]
|
|
|
|
%rep.0 = phi i32 [ 1, %while.body.preheader ], [ %rep.6, %do.body ]
|
|
|
|
store i32 0, i32* @syCTRO, align 4, !tbaa !1
|
|
|
|
%ch.0.ch2.0 = select i1 undef, i32 14, i32 %ch2.0
|
|
|
|
%ch2.2 = select i1 undef, i32 0, i32 %ch.0.ch2.0
|
|
|
|
%ch.2.ch2.2 = select i1 undef, i32 0, i32 %ch2.2
|
|
|
|
%ch2.4 = select i1 undef, i32 278, i32 %ch.2.ch2.2
|
|
|
|
%ch2.5 = select i1 undef, i32 0, i32 %ch2.4
|
|
|
|
%rep.2 = select i1 undef, i32 undef, i32 %rep.0
|
|
|
|
%ch.5.ch2.5 = select i1 undef, i32 undef, i32 %ch2.5
|
|
|
|
%ch2.7 = select i1 undef, i32 0, i32 %ch.5.ch2.5
|
|
|
|
%rep.3 = select i1 undef, i32 undef, i32 %rep.2
|
|
|
|
%ch.7.ch2.7 = select i1 false, i32 0, i32 %ch2.7
|
|
|
|
%mul98.rep.3 = select i1 false, i32 0, i32 %rep.3
|
|
|
|
%ch2.9 = select i1 undef, i32 undef, i32 %ch.7.ch2.7
|
|
|
|
%rep.5 = select i1 undef, i32 undef, i32 %mul98.rep.3
|
|
|
|
%ch2.10 = select i1 false, i32 undef, i32 %ch2.9
|
|
|
|
%rep.6 = select i1 false, i32 undef, i32 %rep.5
|
|
|
|
%isdigittmp = add i32 %ch2.10, -48
|
|
|
|
%isdigit = icmp ult i32 %isdigittmp, 10
|
|
|
|
%cmp119 = icmp eq i32 undef, 22
|
|
|
|
%or.cond1875 = and i1 %isdigit, %cmp119
|
|
|
|
%ch.10.ch2.10 = select i1 %or.cond1875, i32 undef, i32 %ch2.10
|
|
|
|
%.ch.10 = select i1 %or.cond1875, i32 0, i32 undef
|
|
|
|
%ch2.12 = select i1 undef, i32 %.ch.10, i32 %ch.10.ch2.10
|
|
|
|
%ch.12 = select i1 undef, i32 0, i32 %.ch.10
|
|
|
|
%ch.12.ch2.12 = select i1 false, i32 %ch.12, i32 %ch2.12
|
|
|
|
%.ch.12 = select i1 false, i32 0, i32 %ch.12
|
|
|
|
%cmp147 = icmp eq i32 %.ch.12, 0
|
|
|
|
br i1 %cmp147, label %do.body, label %do.end
|
|
|
|
|
|
|
|
do.end:
|
|
|
|
%cmp164 = icmp eq i32 %ch.12.ch2.12, 21
|
|
|
|
%mul167 = shl i32 %rep.6, 2
|
|
|
|
%rep.8 = select i1 %cmp164, i32 %mul167, i32 %rep.6
|
|
|
|
%..ch.19 = select i1 false, i32 2, i32 0
|
|
|
|
br i1 undef, label %while.body200, label %while.end1465
|
|
|
|
|
|
|
|
while.body200:
|
|
|
|
%dec3386.in = phi i32 [ %dec3386, %while.cond197.backedge ], [ %rep.8, %do.end ]
|
|
|
|
%oldc.13384 = phi i32 [ %oldc.1.be, %while.cond197.backedge ], [ 0, %do.end ]
|
|
|
|
%ch.213379 = phi i32 [ %last.1.be, %while.cond197.backedge ], [ %..ch.19, %do.end ]
|
|
|
|
%last.13371 = phi i32 [ %last.1.be, %while.cond197.backedge ], [ 0, %do.end ]
|
|
|
|
%dec3386 = add i32 %dec3386.in, -1
|
|
|
|
switch i32 %ch.213379, label %sw.default [
|
|
|
|
i32 1, label %while.cond201.preheader
|
|
|
|
i32 322, label %sw.bb206
|
|
|
|
i32 354, label %sw.bb206
|
|
|
|
i32 2, label %sw.bb243
|
|
|
|
i32 364, label %sw.bb1077
|
|
|
|
i32 326, label %sw.bb256
|
|
|
|
i32 358, label %sw.bb256
|
|
|
|
i32 341, label %sw.bb979
|
|
|
|
i32 323, label %while.cond1037.preheader
|
|
|
|
i32 373, label %sw.bb979
|
|
|
|
i32 4, label %if.then1477
|
|
|
|
i32 332, label %sw.bb1077
|
|
|
|
i32 11, label %for.cond357
|
|
|
|
i32 355, label %while.cond1037.preheader
|
|
|
|
i32 324, label %sw.bb474
|
|
|
|
i32 356, label %sw.bb474
|
|
|
|
i32 20, label %sw.bb566
|
|
|
|
i32 -1, label %while.cond197.backedge
|
|
|
|
i32 268, label %sw.bb1134
|
|
|
|
i32 16, label %while.cond635.preheader
|
|
|
|
i32 18, label %sw.bb956
|
|
|
|
i32 316, label %while.cond864
|
|
|
|
]
|
|
|
|
|
|
|
|
while.cond1037.preheader:
|
|
|
|
%cmp10393273 = icmp eq i8 undef, 0
|
|
|
|
br i1 %cmp10393273, label %if.end1070, label %land.rhs1041
|
|
|
|
|
|
|
|
while.cond635.preheader:
|
|
|
|
br i1 undef, label %for.body643.us, label %while.cond661
|
|
|
|
|
|
|
|
for.body643.us:
|
|
|
|
br label %for.body643.us
|
|
|
|
|
|
|
|
while.cond201.preheader:
|
|
|
|
%umax = select i1 false, i64 undef, i64 %1
|
|
|
|
%2 = xor i64 %umax, -1
|
|
|
|
%3 = inttoptr i64 %2 to i8*
|
|
|
|
br label %while.cond197.backedge
|
|
|
|
|
|
|
|
sw.bb206:
|
|
|
|
br label %while.cond197.backedge
|
|
|
|
|
|
|
|
sw.bb243:
|
|
|
|
br label %while.cond197.backedge
|
|
|
|
|
|
|
|
sw.bb256:
|
|
|
|
br label %while.cond197.backedge
|
|
|
|
|
|
|
|
while.cond197.backedge:
|
|
|
|
%last.1.be = phi i32 [ %ch.213379, %sw.default ], [ -1, %while.body200 ], [ %ch.213379, %sw.bb1077 ], [ %ch.213379, %sw.bb979 ], [ 18, %sw.bb956 ], [ 20, %sw.bb566 ], [ %ch.213379, %for.end552 ], [ %ch.213379, %sw.bb256 ], [ 2, %sw.bb243 ], [ 1, %while.cond201.preheader ], [ 268, %for.cond1145.preheader ], [ %ch.213379, %sw.bb206 ]
|
|
|
|
%oldc.1.be = phi i32 [ %oldc.13384, %sw.default ], [ %oldc.13384, %while.body200 ], [ %oldc.13384, %sw.bb1077 ], [ %oldc.13384, %sw.bb979 ], [ %oldc.13384, %sw.bb956 ], [ %oldc.13384, %sw.bb566 ], [ %oldc.13384, %for.end552 ], [ %oldc.13384, %sw.bb256 ], [ %oldc.13384, %sw.bb243 ], [ %oldc.13384, %while.cond201.preheader ], [ 0, %for.cond1145.preheader ], [ %oldc.13384, %sw.bb206 ]
|
|
|
|
%cmp198 = icmp sgt i32 %dec3386, 0
|
|
|
|
br i1 %cmp198, label %while.body200, label %while.end1465
|
|
|
|
|
|
|
|
for.cond357:
|
|
|
|
br label %for.cond357
|
|
|
|
|
|
|
|
sw.bb474:
|
[CodeGenPrep] Skip merging empty case blocks
This is recommit of r287553 after fixing the invalid loop info after eliminating an empty block and unit test failures in AVR and WebAssembly :
Summary: Merging an empty case block into the header block of switch could cause ISel to add COPY instructions in the header of switch, instead of the case block, if the case block is used as an incoming block of a PHI. This could potentially increase dynamic instructions, especially when the switch is in a loop. I added a test case which was reduced from the benchmark I was targetting.
Reviewers: t.p.northover, mcrosier, manmanren, wmi, joerg, davidxl
Subscribers: joerg, qcolombet, danielcdh, hfinkel, mcrosier, llvm-commits
Differential Revision: https://reviews.llvm.org/D22696
llvm-svn: 289988
2016-12-17 04:38:39 +08:00
|
|
|
; spill is hoisted here. Although loop depth1 is even hotter than loop depth2, sw.bb474 is still cold.
|
2014-03-22 05:46:24 +08:00
|
|
|
%cmp476 = icmp eq i8 undef, 0
|
|
|
|
br i1 %cmp476, label %if.end517, label %do.body479.preheader
|
|
|
|
|
|
|
|
do.body479.preheader:
|
|
|
|
%cmp4833314 = icmp eq i8 undef, 0
|
|
|
|
br i1 %cmp4833314, label %if.end517, label %land.rhs485
|
|
|
|
|
|
|
|
land.rhs485:
|
|
|
|
%incdec.ptr4803316 = phi i8* [ %incdec.ptr480, %do.body479.backedge.land.rhs485_crit_edge ], [ undef, %do.body479.preheader ]
|
|
|
|
%isascii.i.i27763151 = icmp sgt i8 undef, -1
|
|
|
|
br i1 %isascii.i.i27763151, label %cond.true.i.i2780, label %cond.false.i.i2782
|
|
|
|
|
|
|
|
cond.true.i.i2780:
|
|
|
|
br i1 undef, label %land.lhs.true490, label %lor.rhs500
|
|
|
|
|
|
|
|
cond.false.i.i2782:
|
|
|
|
unreachable
|
|
|
|
|
|
|
|
land.lhs.true490:
|
|
|
|
br i1 false, label %lor.rhs500, label %do.body479.backedge
|
|
|
|
|
|
|
|
lor.rhs500:
|
2016-04-13 11:08:27 +08:00
|
|
|
; Make sure spill is hoisted to a cold preheader in outside loop.
|
2014-03-22 05:46:24 +08:00
|
|
|
%call3.i.i2792 = call i32 @__maskrune(i32 undef, i64 256)
|
|
|
|
br i1 undef, label %land.lhs.true504, label %do.body479.backedge
|
|
|
|
|
|
|
|
land.lhs.true504:
|
|
|
|
br i1 undef, label %do.body479.backedge, label %if.end517
|
|
|
|
|
|
|
|
do.body479.backedge:
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%incdec.ptr480 = getelementptr i8, i8* %incdec.ptr4803316, i64 1
|
2014-03-22 05:46:24 +08:00
|
|
|
%cmp483 = icmp eq i8 undef, 0
|
|
|
|
br i1 %cmp483, label %if.end517, label %do.body479.backedge.land.rhs485_crit_edge
|
|
|
|
|
|
|
|
do.body479.backedge.land.rhs485_crit_edge:
|
|
|
|
br label %land.rhs485
|
|
|
|
|
|
|
|
if.end517:
|
|
|
|
%q.4 = phi i8* [ undef, %sw.bb474 ], [ undef, %do.body479.preheader ], [ %incdec.ptr480, %do.body479.backedge ], [ %incdec.ptr4803316, %land.lhs.true504 ]
|
|
|
|
switch i32 %last.13371, label %if.then532 [
|
|
|
|
i32 383, label %for.cond534
|
|
|
|
i32 356, label %for.cond534
|
|
|
|
i32 324, label %for.cond534
|
|
|
|
i32 24, label %for.cond534
|
|
|
|
i32 11, label %for.cond534
|
|
|
|
]
|
|
|
|
|
|
|
|
if.then532:
|
2015-03-14 02:20:45 +08:00
|
|
|
store i8 0, i8* getelementptr inbounds ([512 x i8], [512 x i8]* @SyFgets.yank, i64 0, i64 0), align 16, !tbaa !5
|
2014-03-22 05:46:24 +08:00
|
|
|
br label %for.cond534
|
|
|
|
|
|
|
|
for.cond534:
|
|
|
|
%cmp536 = icmp eq i8 undef, 0
|
|
|
|
br i1 %cmp536, label %for.cond542.preheader, label %for.cond534
|
|
|
|
|
|
|
|
for.cond542.preheader:
|
|
|
|
br i1 undef, label %for.body545, label %for.end552
|
|
|
|
|
|
|
|
for.body545:
|
|
|
|
br i1 undef, label %for.end552, label %for.body545
|
|
|
|
|
|
|
|
for.end552:
|
|
|
|
%s.2.lcssa = phi i8* [ undef, %for.cond542.preheader ], [ %q.4, %for.body545 ]
|
|
|
|
%sub.ptr.lhs.cast553 = ptrtoint i8* %s.2.lcssa to i64
|
|
|
|
%sub.ptr.sub555 = sub i64 %sub.ptr.lhs.cast553, 0
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%arrayidx556 = getelementptr i8, i8* null, i64 %sub.ptr.sub555
|
2014-03-22 05:46:24 +08:00
|
|
|
store i8 0, i8* %arrayidx556, align 1, !tbaa !5
|
|
|
|
br label %while.cond197.backedge
|
|
|
|
|
|
|
|
sw.bb566:
|
|
|
|
br label %while.cond197.backedge
|
|
|
|
|
|
|
|
while.cond661:
|
|
|
|
br label %while.cond661
|
|
|
|
|
|
|
|
while.cond864:
|
|
|
|
br label %while.cond864
|
|
|
|
|
|
|
|
sw.bb956:
|
|
|
|
br i1 undef, label %if.then959, label %while.cond197.backedge
|
|
|
|
|
|
|
|
if.then959:
|
|
|
|
br label %while.cond962
|
|
|
|
|
|
|
|
while.cond962:
|
|
|
|
br label %while.cond962
|
|
|
|
|
|
|
|
sw.bb979:
|
|
|
|
br label %while.cond197.backedge
|
|
|
|
|
|
|
|
land.rhs1041:
|
|
|
|
unreachable
|
|
|
|
|
|
|
|
if.end1070:
|
|
|
|
br label %sw.bb1077
|
|
|
|
|
|
|
|
sw.bb1077:
|
|
|
|
br label %while.cond197.backedge
|
|
|
|
|
|
|
|
sw.bb1134:
|
|
|
|
br i1 false, label %for.body1139, label %for.cond1145.preheader
|
|
|
|
|
|
|
|
for.cond1145.preheader:
|
|
|
|
br i1 %cmp293427, label %for.body1150.lr.ph, label %while.cond197.backedge
|
|
|
|
|
|
|
|
for.body1150.lr.ph:
|
|
|
|
unreachable
|
|
|
|
|
|
|
|
for.body1139:
|
|
|
|
unreachable
|
|
|
|
|
|
|
|
sw.default:
|
|
|
|
br label %while.cond197.backedge
|
|
|
|
|
|
|
|
while.end1465:
|
|
|
|
%oldc.1.lcssa = phi i32 [ 0, %do.end ], [ %oldc.1.be, %while.cond197.backedge ]
|
|
|
|
%ch.21.lcssa = phi i32 [ %..ch.19, %do.end ], [ %last.1.be, %while.cond197.backedge ]
|
|
|
|
switch i32 %ch.21.lcssa, label %for.cond1480.preheader [
|
|
|
|
i32 -1, label %if.then1477
|
|
|
|
i32 15, label %if.then1477
|
|
|
|
i32 13, label %if.then1477
|
|
|
|
i32 10, label %if.then1477
|
|
|
|
]
|
|
|
|
|
|
|
|
for.cond1480.preheader:
|
|
|
|
br i1 undef, label %for.body1606.lr.ph, label %for.end1609
|
|
|
|
|
|
|
|
if.then1477:
|
|
|
|
%p.1.lcssa3539 = phi i8* [ null, %while.end1465 ], [ null, %while.end1465 ], [ null, %while.end1465 ], [ null, %while.end1465 ], [ %line, %while.body200 ]
|
|
|
|
%call1.i3057 = call i64 @"\01_write"(i32 undef, i8* undef, i64 1)
|
|
|
|
%sub.ptr.lhs.cast1717 = ptrtoint i8* %p.1.lcssa3539 to i64
|
|
|
|
%sub.ptr.sub1719 = sub i64 %sub.ptr.lhs.cast1717, %sub.ptr.rhs.cast646
|
|
|
|
%idx.neg1727 = sub i64 0, %sub.ptr.sub1719
|
|
|
|
br label %for.body1723
|
|
|
|
|
|
|
|
for.body1606.lr.ph:
|
|
|
|
br label %for.end1609
|
|
|
|
|
|
|
|
for.end1609:
|
|
|
|
br i1 undef, label %for.cond1659.preheader, label %land.lhs.true1614
|
|
|
|
|
|
|
|
land.lhs.true1614:
|
|
|
|
br label %for.cond1659.preheader
|
|
|
|
|
|
|
|
for.cond1659.preheader:
|
|
|
|
%cmp16623414 = icmp ult i8* undef, %add.ptr1603
|
|
|
|
br i1 %cmp16623414, label %for.body1664.lr.ph, label %while.body1703.lr.ph
|
|
|
|
|
|
|
|
for.body1664.lr.ph:
|
|
|
|
%cmp16773405 = icmp slt i64 undef, undef
|
|
|
|
br i1 %cmp16773405, label %while.body1679, label %while.cond1683.preheader
|
|
|
|
|
|
|
|
while.body1703.lr.ph:
|
|
|
|
unreachable
|
|
|
|
|
|
|
|
while.cond1683.preheader:
|
|
|
|
br i1 undef, label %while.body1691, label %while.end1693
|
|
|
|
|
|
|
|
while.body1679:
|
|
|
|
%oldc.43406 = phi i32 [ %inc, %syEchoch.exit3070 ], [ %oldc.1.lcssa, %for.body1664.lr.ph ]
|
2015-02-28 05:17:42 +08:00
|
|
|
%4 = load %struct.TMP.2*, %struct.TMP.2** %echo.i3101, align 8, !tbaa !6
|
2014-03-22 05:46:24 +08:00
|
|
|
%call.i3062 = call i32 @fileno(%struct.TMP.2* %4)
|
|
|
|
br i1 undef, label %if.then.i3069, label %syEchoch.exit3070
|
|
|
|
|
|
|
|
if.then.i3069:
|
|
|
|
br label %syEchoch.exit3070
|
|
|
|
|
|
|
|
syEchoch.exit3070:
|
|
|
|
%inc = add i32 %oldc.43406, 1
|
|
|
|
%conv1672 = sext i32 %inc to i64
|
|
|
|
%cmp1677 = icmp slt i64 %conv1672, undef
|
|
|
|
br i1 %cmp1677, label %while.body1679, label %while.cond1683.preheader
|
|
|
|
|
|
|
|
while.body1691:
|
|
|
|
unreachable
|
|
|
|
|
|
|
|
while.end1693:
|
|
|
|
unreachable
|
|
|
|
|
|
|
|
for.body1723:
|
2015-03-14 02:20:45 +08:00
|
|
|
%q.303203 = phi i8* [ getelementptr inbounds ([8192 x i8], [8192 x i8]* @syHistory, i64 0, i64 8189), %if.then1477 ], [ %incdec.ptr1730, %for.body1723 ]
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%add.ptr1728 = getelementptr i8, i8* %q.303203, i64 %idx.neg1727
|
2015-02-28 05:17:42 +08:00
|
|
|
%5 = load i8, i8* %add.ptr1728, align 1, !tbaa !5
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%incdec.ptr1730 = getelementptr i8, i8* %q.303203, i64 -1
|
2014-03-22 05:46:24 +08:00
|
|
|
br label %for.body1723
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
ret i8* undef
|
|
|
|
}
|
|
|
|
|
|
|
|
declare i32 @fileno(%struct.TMP.2* nocapture)
|
|
|
|
declare i64 @"\01_write"(i32, i8*, i64)
|
|
|
|
declare i32 @__maskrune(i32, i64)
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-20 01:13:12 +08:00
|
|
|
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
|
2014-03-22 05:46:24 +08:00
|
|
|
|
|
|
|
!llvm.ident = !{!0}
|
|
|
|
|
IR: Make metadata typeless in assembly
Now that `Metadata` is typeless, reflect that in the assembly. These
are the matching assembly changes for the metadata/value split in
r223802.
- Only use the `metadata` type when referencing metadata from a call
intrinsic -- i.e., only when it's used as a `Value`.
- Stop pretending that `ValueAsMetadata` is wrapped in an `MDNode`
when referencing it from call intrinsics.
So, assembly like this:
define @foo(i32 %v) {
call void @llvm.foo(metadata !{i32 %v}, metadata !0)
call void @llvm.foo(metadata !{i32 7}, metadata !0)
call void @llvm.foo(metadata !1, metadata !0)
call void @llvm.foo(metadata !3, metadata !0)
call void @llvm.foo(metadata !{metadata !3}, metadata !0)
ret void, !bar !2
}
!0 = metadata !{metadata !2}
!1 = metadata !{i32* @global}
!2 = metadata !{metadata !3}
!3 = metadata !{}
turns into this:
define @foo(i32 %v) {
call void @llvm.foo(metadata i32 %v, metadata !0)
call void @llvm.foo(metadata i32 7, metadata !0)
call void @llvm.foo(metadata i32* @global, metadata !0)
call void @llvm.foo(metadata !3, metadata !0)
call void @llvm.foo(metadata !{!3}, metadata !0)
ret void, !bar !2
}
!0 = !{!2}
!1 = !{i32* @global}
!2 = !{!3}
!3 = !{}
I wrote an upgrade script that handled almost all of the tests in llvm
and many of the tests in cfe (even handling many `CHECK` lines). I've
attached it (or will attach it in a moment if you're speedy) to PR21532
to help everyone update their out-of-tree testcases.
This is part of PR21532.
llvm-svn: 224257
2014-12-16 03:07:53 +08:00
|
|
|
!0 = !{!"clang version 3.5.0 (trunk 204257)"}
|
|
|
|
!1 = !{!2, !2, i64 0}
|
|
|
|
!2 = !{!"int", !3, i64 0}
|
|
|
|
!3 = !{!"omnipotent char", !4, i64 0}
|
|
|
|
!4 = !{!"Simple C/C++ TBAA"}
|
|
|
|
!5 = !{!3, !3, i64 0}
|
|
|
|
!6 = !{!7, !8, i64 8}
|
|
|
|
!7 = !{!"", !8, i64 0, !8, i64 8, !3, i64 16}
|
|
|
|
!8 = !{!"any pointer", !3, i64 0}
|