2022-03-14 21:39:25 +08:00
|
|
|
; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-optree -polly-codegen -disable-output < %s | FileCheck %s -match-full-lines
|
2017-08-08 02:40:29 +08:00
|
|
|
;
|
2017-08-09 01:26:19 +08:00
|
|
|
; %val1 is used three times: Twice by its own operand tree of %val2 and once
|
|
|
|
; more by the store in %bodyB.
|
|
|
|
; Verify that we can handle multiple uses by the same instruction and uses
|
|
|
|
; in multiple statements as well.
|
|
|
|
; The result processing may depend on the order in which the values are used,
|
|
|
|
; hence we check both orderings.
|
|
|
|
;
|
|
|
|
; for (int j = 0; j < n; j += 1) {
|
|
|
|
; bodyA:
|
|
|
|
; double val1 = A[j];
|
|
|
|
; double val2 = val1 + val1;
|
|
|
|
;
|
|
|
|
; bodyB:
|
|
|
|
; B[j] = val1;
|
|
|
|
; C[j] = val2;
|
|
|
|
; }
|
|
|
|
;
|
2017-08-08 02:40:29 +08:00
|
|
|
define void @func1(i32 %n, double* noalias nonnull %A, double* noalias nonnull %B, double* noalias nonnull %C) {
|
|
|
|
entry:
|
|
|
|
br label %for
|
|
|
|
|
|
|
|
for:
|
|
|
|
%j = phi i32 [0, %entry], [%j.inc, %inc]
|
|
|
|
%j.cmp = icmp slt i32 %j, %n
|
|
|
|
br i1 %j.cmp, label %bodyA, label %exit
|
|
|
|
|
|
|
|
bodyA:
|
|
|
|
%A_idx = getelementptr inbounds double, double* %A, i32 %j
|
|
|
|
%val1 = load double, double* %A_idx
|
|
|
|
%val2 = fadd double %val1, %val1
|
|
|
|
br label %bodyB
|
|
|
|
|
|
|
|
bodyB:
|
|
|
|
%B_idx = getelementptr inbounds double, double* %B, i32 %j
|
|
|
|
store double %val1, double* %B_idx
|
|
|
|
%C_idx = getelementptr inbounds double, double* %C, i32 %j
|
|
|
|
store double %val2, double* %C_idx
|
|
|
|
br label %inc
|
|
|
|
|
|
|
|
inc:
|
|
|
|
%j.inc = add nuw nsw i32 %j, 1
|
|
|
|
br label %for
|
|
|
|
|
|
|
|
exit:
|
|
|
|
br label %return
|
|
|
|
|
|
|
|
return:
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
; CHECK: Statistics {
|
|
|
|
; CHECK: Instructions copied: 1
|
2020-10-17 01:37:34 +08:00
|
|
|
; CHECK: Known loads forwarded: 2
|
2017-08-08 02:40:29 +08:00
|
|
|
; CHECK: Operand trees forwarded: 2
|
|
|
|
; CHECK: Statements with forwarded operand trees: 1
|
|
|
|
; CHECK: }
|
|
|
|
|
|
|
|
; CHECK: After statements {
|
|
|
|
; CHECK-NEXT: Stmt_bodyA
|
|
|
|
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
|
|
|
|
; CHECK-NEXT: [n] -> { Stmt_bodyA[i0] -> MemRef_A[i0] };
|
|
|
|
; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1]
|
|
|
|
; CHECK-NEXT: [n] -> { Stmt_bodyA[i0] -> MemRef_val1[] };
|
|
|
|
; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1]
|
|
|
|
; CHECK-NEXT: [n] -> { Stmt_bodyA[i0] -> MemRef_val2[] };
|
|
|
|
; CHECK-NEXT: Instructions {
|
Infer alignment of unmarked loads in IR/bitcode parsing.
For IR generated by a compiler, this is really simple: you just take the
datalayout from the beginning of the file, and apply it to all the IR
later in the file. For optimization testcases that don't care about the
datalayout, this is also really simple: we just use the default
datalayout.
The complexity here comes from the fact that some LLVM tools allow
overriding the datalayout: some tools have an explicit flag for this,
some tools will infer a datalayout based on the code generation target.
Supporting this properly required plumbing through a bunch of new
machinery: we want to allow overriding the datalayout after the
datalayout is parsed from the file, but before we use any information
from it. Therefore, IR/bitcode parsing now has a callback to allow tools
to compute the datalayout at the appropriate time.
Not sure if I covered all the LLVM tools that want to use the callback.
(clang? lli? Misc IR manipulation tools like llvm-link?). But this is at
least enough for all the LLVM regression tests, and IR without a
datalayout is not something frontends should generate.
This change had some sort of weird effects for certain CodeGen
regression tests: if the datalayout is overridden with a datalayout with
a different program or stack address space, we now parse IR based on the
overridden datalayout, instead of the one written in the file (or the
default one, if none is specified). This broke a few AVR tests, and one
AMDGPU test.
Outside the CodeGen tests I mentioned, the test changes are all just
fixing CHECK lines and moving around datalayout lines in weird places.
Differential Revision: https://reviews.llvm.org/D78403
2020-05-15 03:59:45 +08:00
|
|
|
; CHECK-NEXT: %val1 = load double, double* %A_idx, align 8
|
2017-08-08 02:40:29 +08:00
|
|
|
; CHECK-NEXT: %val2 = fadd double %val1, %val1
|
|
|
|
; CHECK-NEXT: }
|
|
|
|
; CHECK-NEXT: Stmt_bodyB
|
|
|
|
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
|
2017-11-20 06:13:34 +08:00
|
|
|
; CHECK-NEXT: ;
|
2017-08-08 02:40:29 +08:00
|
|
|
; CHECK-NEXT: new: [n] -> { Stmt_bodyB[i0] -> MemRef_A[i0] };
|
|
|
|
; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 0]
|
|
|
|
; CHECK-NEXT: [n] -> { Stmt_bodyB[i0] -> MemRef_B[i0] };
|
|
|
|
; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 0]
|
|
|
|
; CHECK-NEXT: [n] -> { Stmt_bodyB[i0] -> MemRef_C[i0] };
|
|
|
|
; CHECK-NEXT: Instructions {
|
Infer alignment of unmarked loads in IR/bitcode parsing.
For IR generated by a compiler, this is really simple: you just take the
datalayout from the beginning of the file, and apply it to all the IR
later in the file. For optimization testcases that don't care about the
datalayout, this is also really simple: we just use the default
datalayout.
The complexity here comes from the fact that some LLVM tools allow
overriding the datalayout: some tools have an explicit flag for this,
some tools will infer a datalayout based on the code generation target.
Supporting this properly required plumbing through a bunch of new
machinery: we want to allow overriding the datalayout after the
datalayout is parsed from the file, but before we use any information
from it. Therefore, IR/bitcode parsing now has a callback to allow tools
to compute the datalayout at the appropriate time.
Not sure if I covered all the LLVM tools that want to use the callback.
(clang? lli? Misc IR manipulation tools like llvm-link?). But this is at
least enough for all the LLVM regression tests, and IR without a
datalayout is not something frontends should generate.
This change had some sort of weird effects for certain CodeGen
regression tests: if the datalayout is overridden with a datalayout with
a different program or stack address space, we now parse IR based on the
overridden datalayout, instead of the one written in the file (or the
default one, if none is specified). This broke a few AVR tests, and one
AMDGPU test.
Outside the CodeGen tests I mentioned, the test changes are all just
fixing CHECK lines and moving around datalayout lines in weird places.
Differential Revision: https://reviews.llvm.org/D78403
2020-05-15 03:59:45 +08:00
|
|
|
; CHECK-NEXT: %val1 = load double, double* %A_idx, align 8
|
2017-08-08 02:40:29 +08:00
|
|
|
; CHECK-NEXT: %val2 = fadd double %val1, %val1
|
Infer alignment of unmarked loads in IR/bitcode parsing.
For IR generated by a compiler, this is really simple: you just take the
datalayout from the beginning of the file, and apply it to all the IR
later in the file. For optimization testcases that don't care about the
datalayout, this is also really simple: we just use the default
datalayout.
The complexity here comes from the fact that some LLVM tools allow
overriding the datalayout: some tools have an explicit flag for this,
some tools will infer a datalayout based on the code generation target.
Supporting this properly required plumbing through a bunch of new
machinery: we want to allow overriding the datalayout after the
datalayout is parsed from the file, but before we use any information
from it. Therefore, IR/bitcode parsing now has a callback to allow tools
to compute the datalayout at the appropriate time.
Not sure if I covered all the LLVM tools that want to use the callback.
(clang? lli? Misc IR manipulation tools like llvm-link?). But this is at
least enough for all the LLVM regression tests, and IR without a
datalayout is not something frontends should generate.
This change had some sort of weird effects for certain CodeGen
regression tests: if the datalayout is overridden with a datalayout with
a different program or stack address space, we now parse IR based on the
overridden datalayout, instead of the one written in the file (or the
default one, if none is specified). This broke a few AVR tests, and one
AMDGPU test.
Outside the CodeGen tests I mentioned, the test changes are all just
fixing CHECK lines and moving around datalayout lines in weird places.
Differential Revision: https://reviews.llvm.org/D78403
2020-05-15 03:59:45 +08:00
|
|
|
; CHECK-NEXT: %val1 = load double, double* %A_idx, align 8
|
2020-05-16 06:15:09 +08:00
|
|
|
; CHECK-NEXT: store double %val1, double* %B_idx, align 8
|
|
|
|
; CHECK-NEXT: store double %val2, double* %C_idx, align 8
|
2017-08-08 02:40:29 +08:00
|
|
|
; CHECK-NEXT: }
|
|
|
|
; CHECK-NEXT: }
|
|
|
|
|
|
|
|
|
|
|
|
define void @func2(i32 %n, double* noalias nonnull %A, double* noalias nonnull %B, double* noalias nonnull %C) {
|
|
|
|
entry:
|
|
|
|
br label %for
|
|
|
|
|
|
|
|
for:
|
|
|
|
%j = phi i32 [0, %entry], [%j.inc, %inc]
|
|
|
|
%j.cmp = icmp slt i32 %j, %n
|
|
|
|
br i1 %j.cmp, label %bodyA, label %exit
|
|
|
|
|
|
|
|
bodyA:
|
|
|
|
%A_idx = getelementptr inbounds double, double* %A, i32 %j
|
|
|
|
%val1 = load double, double* %A_idx
|
|
|
|
%val2 = fadd double %val1, %val1
|
|
|
|
br label %bodyB
|
|
|
|
|
|
|
|
bodyB:
|
|
|
|
%B_idx = getelementptr inbounds double, double* %B, i32 %j
|
|
|
|
store double %val2, double* %B_idx
|
|
|
|
%C_idx = getelementptr inbounds double, double* %C, i32 %j
|
|
|
|
store double %val1, double* %C_idx
|
|
|
|
br label %inc
|
|
|
|
|
|
|
|
inc:
|
|
|
|
%j.inc = add nuw nsw i32 %j, 1
|
|
|
|
br label %for
|
|
|
|
|
|
|
|
exit:
|
|
|
|
br label %return
|
|
|
|
|
|
|
|
return:
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
; CHECK: Statistics {
|
|
|
|
; CHECK: Instructions copied: 1
|
2020-10-17 01:37:34 +08:00
|
|
|
; CHECK: Known loads forwarded: 2
|
[ForwardOp] Remove read accesses for all instructions that have been moved
Before this patch, OpTree did not consider forwarding an operand tree consisting
of only single LoadInst as useful. The motivation was that, like an access to a
read-only variable, it would just replace one MemoryAccess by another. However,
in contrast to read-only accesses, this would replace a scalar access by an
array access, which is something worth doing.
In addition, leaving scalar MemoryAccess is problematic in that VirtualUse
prioritizes inter-Stmt use over intra-Stmt. It was possible that the same LLVM
value has a MemoryAccess for accessing the remote Stmt's LoadInst as well as
having the same LoadInst in its own instruction list (due to being forwarded
from another operand tree).
With this patch we ensure that if a LoadInst is forwarded is any operand tree,
also the operand tree containing just the LoadInst is forwarded as well, which
effectively removes the scalar MemoryAccess such that only the array access
remains, not both.
Thanks Michael for the detailed explanation.
Reviewers: Meinersbur, bellu, singam-sanjay, gareevroman
Subscribers: hfinkel, pollydev, llvm-commits
Tags: #polly
Differential Revision: https://reviews.llvm.org/D37424
llvm-svn: 312456
2017-09-04 03:52:15 +08:00
|
|
|
; CHECK: Operand trees forwarded: 2
|
2017-08-08 02:40:29 +08:00
|
|
|
; CHECK: Statements with forwarded operand trees: 1
|
|
|
|
; CHECK: }
|
|
|
|
|
|
|
|
; CHECK: After statements {
|
|
|
|
; CHECK-NEXT: Stmt_bodyA
|
|
|
|
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
|
|
|
|
; CHECK-NEXT: [n] -> { Stmt_bodyA[i0] -> MemRef_A[i0] };
|
|
|
|
; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1]
|
|
|
|
; CHECK-NEXT: [n] -> { Stmt_bodyA[i0] -> MemRef_val2[] };
|
|
|
|
; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1]
|
|
|
|
; CHECK-NEXT: [n] -> { Stmt_bodyA[i0] -> MemRef_val1[] };
|
|
|
|
; CHECK-NEXT: Instructions {
|
Infer alignment of unmarked loads in IR/bitcode parsing.
For IR generated by a compiler, this is really simple: you just take the
datalayout from the beginning of the file, and apply it to all the IR
later in the file. For optimization testcases that don't care about the
datalayout, this is also really simple: we just use the default
datalayout.
The complexity here comes from the fact that some LLVM tools allow
overriding the datalayout: some tools have an explicit flag for this,
some tools will infer a datalayout based on the code generation target.
Supporting this properly required plumbing through a bunch of new
machinery: we want to allow overriding the datalayout after the
datalayout is parsed from the file, but before we use any information
from it. Therefore, IR/bitcode parsing now has a callback to allow tools
to compute the datalayout at the appropriate time.
Not sure if I covered all the LLVM tools that want to use the callback.
(clang? lli? Misc IR manipulation tools like llvm-link?). But this is at
least enough for all the LLVM regression tests, and IR without a
datalayout is not something frontends should generate.
This change had some sort of weird effects for certain CodeGen
regression tests: if the datalayout is overridden with a datalayout with
a different program or stack address space, we now parse IR based on the
overridden datalayout, instead of the one written in the file (or the
default one, if none is specified). This broke a few AVR tests, and one
AMDGPU test.
Outside the CodeGen tests I mentioned, the test changes are all just
fixing CHECK lines and moving around datalayout lines in weird places.
Differential Revision: https://reviews.llvm.org/D78403
2020-05-15 03:59:45 +08:00
|
|
|
; CHECK-NEXT: %val1 = load double, double* %A_idx, align 8
|
2017-08-08 02:40:29 +08:00
|
|
|
; CHECK-NEXT: %val2 = fadd double %val1, %val1
|
|
|
|
; CHECK-NEXT: }
|
|
|
|
; CHECK-NEXT: Stmt_bodyB
|
|
|
|
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
|
2017-11-20 06:13:34 +08:00
|
|
|
; CHECK-NEXT: ;
|
2017-08-08 02:40:29 +08:00
|
|
|
; CHECK-NEXT: new: [n] -> { Stmt_bodyB[i0] -> MemRef_A[i0] };
|
|
|
|
; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 0]
|
|
|
|
; CHECK-NEXT: [n] -> { Stmt_bodyB[i0] -> MemRef_B[i0] };
|
|
|
|
; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 0]
|
|
|
|
; CHECK-NEXT: [n] -> { Stmt_bodyB[i0] -> MemRef_C[i0] };
|
|
|
|
; CHECK-NEXT: Instructions {
|
Infer alignment of unmarked loads in IR/bitcode parsing.
For IR generated by a compiler, this is really simple: you just take the
datalayout from the beginning of the file, and apply it to all the IR
later in the file. For optimization testcases that don't care about the
datalayout, this is also really simple: we just use the default
datalayout.
The complexity here comes from the fact that some LLVM tools allow
overriding the datalayout: some tools have an explicit flag for this,
some tools will infer a datalayout based on the code generation target.
Supporting this properly required plumbing through a bunch of new
machinery: we want to allow overriding the datalayout after the
datalayout is parsed from the file, but before we use any information
from it. Therefore, IR/bitcode parsing now has a callback to allow tools
to compute the datalayout at the appropriate time.
Not sure if I covered all the LLVM tools that want to use the callback.
(clang? lli? Misc IR manipulation tools like llvm-link?). But this is at
least enough for all the LLVM regression tests, and IR without a
datalayout is not something frontends should generate.
This change had some sort of weird effects for certain CodeGen
regression tests: if the datalayout is overridden with a datalayout with
a different program or stack address space, we now parse IR based on the
overridden datalayout, instead of the one written in the file (or the
default one, if none is specified). This broke a few AVR tests, and one
AMDGPU test.
Outside the CodeGen tests I mentioned, the test changes are all just
fixing CHECK lines and moving around datalayout lines in weird places.
Differential Revision: https://reviews.llvm.org/D78403
2020-05-15 03:59:45 +08:00
|
|
|
; CHECK-NEXT: %val1 = load double, double* %A_idx, align 8
|
|
|
|
; CHECK-NEXT: %val1 = load double, double* %A_idx, align 8
|
2017-08-08 02:40:29 +08:00
|
|
|
; CHECK-NEXT: %val2 = fadd double %val1, %val1
|
2020-05-16 06:15:09 +08:00
|
|
|
; CHECK-NEXT: store double %val2, double* %B_idx, align 8
|
|
|
|
; CHECK-NEXT: store double %val1, double* %C_idx, align 8
|
2017-08-08 02:40:29 +08:00
|
|
|
; CHECK-NEXT: }
|
|
|
|
; CHECK-NEXT: }
|