2021-09-05 07:43:53 +08:00
|
|
|
; RUN: opt -aa-pipeline=basic-aa -passes=loop-distribute -enable-loop-distribute -verify-loop-info -verify-dom-info -S \
|
2019-04-17 12:52:47 +08:00
|
|
|
; RUN: < %s | FileCheck %s
|
|
|
|
|
2021-09-05 07:43:53 +08:00
|
|
|
; RUN: opt -aa-pipeline=basic-aa -passes='loop-distribute,loop-vectorize' -enable-loop-distribute -force-vector-width=4 \
|
2019-04-17 12:52:47 +08:00
|
|
|
; RUN: -verify-loop-info -verify-dom-info -S < %s | \
|
|
|
|
; RUN: FileCheck --check-prefix=VECTORIZE %s
|
|
|
|
|
2021-09-05 07:43:53 +08:00
|
|
|
; RUN: opt -aa-pipeline=basic-aa -passes='loop-distribute,print-access-info' -enable-loop-distribute \
|
|
|
|
; RUN: -verify-loop-info -verify-dom-info -disable-output < %s 2>&1 | FileCheck %s --check-prefix=ANALYSIS
|
2019-06-12 21:34:19 +08:00
|
|
|
|
2019-04-17 12:52:47 +08:00
|
|
|
; The memcheck version of basic.ll. We should distribute and vectorize the
|
|
|
|
; second part of this loop with 5 memchecks (A+1 x {C, D, E} + C x {A, B})
|
|
|
|
;
|
|
|
|
; for (i = 0; i < n; i++) {
|
|
|
|
; A[i + 1] = A[i] * B[i];
|
|
|
|
; -------------------------------
|
|
|
|
; C[i] = D[i] * E[i];
|
|
|
|
; }
|
|
|
|
|
|
|
|
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
|
|
|
|
target triple = "x86_64-apple-macosx10.10.0"
|
|
|
|
|
|
|
|
@B = common global i32* null, align 8
|
|
|
|
@A = common global i32* null, align 8
|
|
|
|
@C = common global i32* null, align 8
|
|
|
|
@D = common global i32* null, align 8
|
|
|
|
@E = common global i32* null, align 8
|
|
|
|
|
2019-06-12 21:15:59 +08:00
|
|
|
; CHECK-LABEL: @f(
|
2019-04-17 12:52:47 +08:00
|
|
|
define void @f() {
|
|
|
|
entry:
|
|
|
|
%a = load i32*, i32** @A, align 8
|
|
|
|
%b = load i32*, i32** @B, align 8
|
|
|
|
%c = load i32*, i32** @C, align 8
|
|
|
|
%d = load i32*, i32** @D, align 8
|
|
|
|
%e = load i32*, i32** @E, align 8
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
; We have two compares for each array overlap check.
|
|
|
|
; Since the checks to A and A + 4 get merged, this will give us a
|
|
|
|
; total of 8 compares.
|
|
|
|
;
|
|
|
|
; CHECK: for.body.lver.check:
|
|
|
|
; CHECK: = icmp
|
|
|
|
; CHECK: = icmp
|
|
|
|
|
|
|
|
; CHECK: = icmp
|
|
|
|
; CHECK: = icmp
|
|
|
|
|
|
|
|
; CHECK: = icmp
|
|
|
|
; CHECK: = icmp
|
|
|
|
|
|
|
|
; CHECK: = icmp
|
|
|
|
; CHECK: = icmp
|
|
|
|
|
|
|
|
; CHECK-NOT: = icmp
|
2021-10-19 00:55:48 +08:00
|
|
|
; CHECK: br i1 %conflict.rdx25, label %for.body.ph.lver.orig, label %for.body.ph.ldist1
|
2019-04-17 12:52:47 +08:00
|
|
|
|
|
|
|
; The non-distributed loop that the memchecks fall back on.
|
|
|
|
|
|
|
|
; CHECK: for.body.ph.lver.orig:
|
|
|
|
; CHECK: br label %for.body.lver.orig
|
|
|
|
; CHECK: for.body.lver.orig:
|
2020-10-24 21:39:42 +08:00
|
|
|
; CHECK: br i1 %exitcond.lver.orig, label %for.end.loopexit, label %for.body.lver.orig
|
2019-04-17 12:52:47 +08:00
|
|
|
|
|
|
|
; Verify the two distributed loops.
|
|
|
|
|
|
|
|
; CHECK: for.body.ph.ldist1:
|
|
|
|
; CHECK: br label %for.body.ldist1
|
|
|
|
; CHECK: for.body.ldist1:
|
|
|
|
; CHECK: %mulA.ldist1 = mul i32 %loadB.ldist1, %loadA.ldist1
|
|
|
|
; CHECK: br i1 %exitcond.ldist1, label %for.body.ph, label %for.body.ldist1
|
|
|
|
|
|
|
|
; CHECK: for.body.ph:
|
|
|
|
; CHECK: br label %for.body
|
|
|
|
; CHECK: for.body:
|
|
|
|
; CHECK: %mulC = mul i32 %loadD, %loadE
|
|
|
|
; CHECK: for.end:
|
|
|
|
|
|
|
|
|
|
|
|
; VECTORIZE: mul <4 x i32>
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body, %entry
|
|
|
|
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
|
|
|
|
|
|
|
|
%arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
|
|
|
|
%loadA = load i32, i32* %arrayidxA, align 4
|
|
|
|
|
|
|
|
%arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
|
|
|
|
%loadB = load i32, i32* %arrayidxB, align 4
|
|
|
|
|
|
|
|
%mulA = mul i32 %loadB, %loadA
|
|
|
|
|
|
|
|
%add = add nuw nsw i64 %ind, 1
|
|
|
|
%arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
|
|
|
|
store i32 %mulA, i32* %arrayidxA_plus_4, align 4
|
|
|
|
|
|
|
|
%arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
|
|
|
|
%loadD = load i32, i32* %arrayidxD, align 4
|
|
|
|
|
|
|
|
%arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
|
|
|
|
%loadE = load i32, i32* %arrayidxE, align 4
|
|
|
|
|
|
|
|
%mulC = mul i32 %loadD, %loadE
|
|
|
|
|
|
|
|
%arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
|
|
|
|
store i32 %mulC, i32* %arrayidxC, align 4
|
|
|
|
|
|
|
|
%exitcond = icmp eq i64 %add, 20
|
|
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body
|
|
|
|
ret void
|
|
|
|
}
|
2019-06-12 21:15:59 +08:00
|
|
|
|
|
|
|
; Make sure there's no "Multiple reports generated" assert with a
|
|
|
|
; volatile load, and no distribution
|
|
|
|
|
|
|
|
; TODO: Distribution of volatile may be possible under some
|
|
|
|
; circumstance, but the current implementation does not touch them.
|
|
|
|
|
|
|
|
; CHECK-LABEL: @f_volatile_load(
|
|
|
|
; CHECK: br label %for.body{{$}}
|
|
|
|
|
|
|
|
; CHECK-NOT: load
|
|
|
|
|
|
|
|
; CHECK: {{^}}for.body:
|
|
|
|
; CHECK: load i32
|
|
|
|
; CHECK: load i32
|
|
|
|
; CHECK: load volatile i32
|
|
|
|
; CHECK: load i32
|
|
|
|
; CHECK: br i1 %exitcond, label %for.end, label %for.body{{$}}
|
|
|
|
|
|
|
|
; CHECK-NOT: load
|
|
|
|
|
|
|
|
; VECTORIZE-NOT: load <4 x i32>
|
|
|
|
; VECTORIZE-NOT: mul <4 x i32>
|
|
|
|
define void @f_volatile_load() {
|
|
|
|
entry:
|
|
|
|
%a = load i32*, i32** @A, align 8
|
|
|
|
%b = load i32*, i32** @B, align 8
|
|
|
|
%c = load i32*, i32** @C, align 8
|
|
|
|
%d = load i32*, i32** @D, align 8
|
|
|
|
%e = load i32*, i32** @E, align 8
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body:
|
|
|
|
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
|
|
|
|
|
|
|
|
%arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
|
|
|
|
%loadA = load i32, i32* %arrayidxA, align 4
|
|
|
|
|
|
|
|
%arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
|
|
|
|
%loadB = load i32, i32* %arrayidxB, align 4
|
|
|
|
|
|
|
|
%mulA = mul i32 %loadB, %loadA
|
|
|
|
|
|
|
|
%add = add nuw nsw i64 %ind, 1
|
|
|
|
%arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
|
|
|
|
store i32 %mulA, i32* %arrayidxA_plus_4, align 4
|
|
|
|
|
|
|
|
%arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
|
|
|
|
%loadD = load volatile i32, i32* %arrayidxD, align 4
|
|
|
|
|
|
|
|
%arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
|
|
|
|
%loadE = load i32, i32* %arrayidxE, align 4
|
|
|
|
|
|
|
|
%mulC = mul i32 %loadD, %loadE
|
|
|
|
|
|
|
|
%arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
|
|
|
|
store i32 %mulC, i32* %arrayidxC, align 4
|
|
|
|
|
|
|
|
%exitcond = icmp eq i64 %add, 20
|
|
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
|
|
|
|
for.end:
|
|
|
|
ret void
|
|
|
|
}
|
2019-06-12 21:34:19 +08:00
|
|
|
|
|
|
|
declare i32 @llvm.convergent(i32) #0
|
|
|
|
|
|
|
|
; This is the same as f, and would require the same bounds
|
|
|
|
; check. However, it is not OK to introduce new control dependencies
|
|
|
|
; on the convergent call.
|
|
|
|
|
|
|
|
; CHECK-LABEL: @f_with_convergent(
|
|
|
|
; CHECK: call i32 @llvm.convergent
|
|
|
|
; CHECK-NOT: call i32 @llvm.convergent
|
|
|
|
|
|
|
|
; ANALYSIS: for.body:
|
|
|
|
; ANALYSIS: Report: cannot add control dependency to convergent operation
|
|
|
|
define void @f_with_convergent() #1 {
|
|
|
|
entry:
|
|
|
|
%a = load i32*, i32** @A, align 8
|
|
|
|
%b = load i32*, i32** @B, align 8
|
|
|
|
%c = load i32*, i32** @C, align 8
|
|
|
|
%d = load i32*, i32** @D, align 8
|
|
|
|
%e = load i32*, i32** @E, align 8
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body, %entry
|
|
|
|
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
|
|
|
|
|
|
|
|
%arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
|
|
|
|
%loadA = load i32, i32* %arrayidxA, align 4
|
|
|
|
|
|
|
|
%arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
|
|
|
|
%loadB = load i32, i32* %arrayidxB, align 4
|
|
|
|
|
|
|
|
%mulA = mul i32 %loadB, %loadA
|
|
|
|
|
|
|
|
%add = add nuw nsw i64 %ind, 1
|
|
|
|
%arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
|
|
|
|
store i32 %mulA, i32* %arrayidxA_plus_4, align 4
|
|
|
|
|
|
|
|
%arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
|
|
|
|
%loadD = load i32, i32* %arrayidxD, align 4
|
|
|
|
|
|
|
|
%arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
|
|
|
|
%loadE = load i32, i32* %arrayidxE, align 4
|
|
|
|
|
|
|
|
%convergentD = call i32 @llvm.convergent(i32 %loadD)
|
|
|
|
%mulC = mul i32 %convergentD, %loadE
|
|
|
|
|
|
|
|
%arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
|
|
|
|
store i32 %mulC, i32* %arrayidxC, align 4
|
|
|
|
|
|
|
|
%exitcond = icmp eq i64 %add, 20
|
|
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; Make sure an explicit request for distribution is ignored if it
|
|
|
|
; requires possibly illegal checks.
|
|
|
|
|
|
|
|
; CHECK-LABEL: @f_with_convergent_forced_distribute(
|
|
|
|
; CHECK: call i32 @llvm.convergent
|
|
|
|
; CHECK-NOT: call i32 @llvm.convergent
|
|
|
|
define void @f_with_convergent_forced_distribute() #1 {
|
|
|
|
entry:
|
|
|
|
%a = load i32*, i32** @A, align 8
|
|
|
|
%b = load i32*, i32** @B, align 8
|
|
|
|
%c = load i32*, i32** @C, align 8
|
|
|
|
%d = load i32*, i32** @D, align 8
|
|
|
|
%e = load i32*, i32** @E, align 8
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body, %entry
|
|
|
|
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
|
|
|
|
|
|
|
|
%arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
|
|
|
|
%loadA = load i32, i32* %arrayidxA, align 4
|
|
|
|
|
|
|
|
%arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
|
|
|
|
%loadB = load i32, i32* %arrayidxB, align 4
|
|
|
|
|
|
|
|
%mulA = mul i32 %loadB, %loadA
|
|
|
|
|
|
|
|
%add = add nuw nsw i64 %ind, 1
|
|
|
|
%arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
|
|
|
|
store i32 %mulA, i32* %arrayidxA_plus_4, align 4
|
|
|
|
|
|
|
|
%arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
|
|
|
|
%loadD = load i32, i32* %arrayidxD, align 4
|
|
|
|
|
|
|
|
%arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
|
|
|
|
%loadE = load i32, i32* %arrayidxE, align 4
|
|
|
|
|
|
|
|
%convergentD = call i32 @llvm.convergent(i32 %loadD)
|
|
|
|
%mulC = mul i32 %convergentD, %loadE
|
|
|
|
|
|
|
|
%arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
|
|
|
|
store i32 %mulC, i32* %arrayidxC, align 4
|
|
|
|
|
|
|
|
%exitcond = icmp eq i64 %add, 20
|
|
|
|
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
attributes #0 = { nounwind readnone convergent }
|
|
|
|
attributes #1 = { nounwind convergent }
|
|
|
|
|
|
|
|
!0 = distinct !{!0, !1}
|
|
|
|
!1 = !{!"llvm.loop.distribute.enable", i1 true}
|