From f13a59bcff79293f1424a32f6f14d47a4a4b3d46 Mon Sep 17 00:00:00 2001 From: Florian Hahn Date: Mon, 20 Jul 2020 20:50:49 +0100 Subject: [PATCH] [Matrix] Use TileInfo to create tiled loop nest for matrix multiply. This patch uses the TileInfo introduced in D77550 to generate a loop nest for tiled matrix multiplication, instead of generating the unrolled code for the whole multiplication. This makes code-generation more scalable for larger matrixes. Initially loops are only used if both the number of rows and columns are divisible by the tile size. Other cases will be added as follow-up. Reviewers: anemet, Gerolf, hfinkel, andrew.w.kaylor, LuoYuanke, nicolasvasilache Reviewed By: anemet Differential Revision: https://reviews.llvm.org/D81308 --- .../Scalar/LowerMatrixIntrinsics.cpp | 108 +++- .../multiply-fused-loops.ll | 397 +++++++++++++++ .../multiply-fused-volatile.ll | 462 ++++++++++++++---- .../LowerMatrixIntrinsics/multiply-fused.ll | 4 +- 4 files changed, 845 insertions(+), 126 deletions(-) create mode 100644 llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-loops.ll diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp index 1a700712eb84..4e5d0dc30cc4 100644 --- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp +++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp @@ -42,6 +42,8 @@ #include "llvm/Support/Debug.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/LoopUtils.h" +#include "llvm/Transforms/Utils/MatrixUtils.h" using namespace llvm; using namespace PatternMatch; @@ -61,6 +63,9 @@ static cl::opt TileSize( "fuse-matrix-tile-size", cl::init(4), cl::Hidden, cl::desc( "Tile size for matrix instruction fusion using square-shaped tiles.")); +static cl::opt TileUseLoops("fuse-matrix-use-loops", cl::init(false), + cl::Hidden, + cl::desc("Generate loop nest for tiling.")); static cl::opt ForceFusion( "force-fuse-matrix", cl::init(false), cl::Hidden, cl::desc("Force matrix instruction fusion even if not profitable.")); @@ -1204,6 +1209,63 @@ public: return Res; } + void createTiledLoops(CallInst *MatMul, Value *LPtr, ShapeInfo LShape, + Value *RPtr, ShapeInfo RShape, StoreInst *Store, + bool AllowContract) { + auto *EltType = cast(MatMul->getType())->getElementType(); + + // Create the main tiling loop nest. + TileInfo TI(LShape.NumRows, RShape.NumColumns, LShape.NumColumns, TileSize); + DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy); + Instruction *InsertI = cast(MatMul); + BasicBlock *Start = InsertI->getParent(); + BasicBlock *End = + SplitBlock(InsertI->getParent(), InsertI, DT, LI, nullptr, "continue"); + IRBuilder<> Builder(MatMul); + BasicBlock *InnerBody = TI.CreateTiledLoops(Start, End, Builder, DTU, *LI); + + Type *TileVecTy = + FixedVectorType::get(MatMul->getType()->getScalarType(), TileSize); + MatrixTy TileResult; + // Insert in the inner loop header. + Builder.SetInsertPoint(TI.InnerLoopHeader->getTerminator()); + // Create PHI nodes for the result columns to accumulate across iterations. + SmallVector ColumnPhis; + for (unsigned I = 0; I < TileSize; I++) { + auto *Phi = Builder.CreatePHI(TileVecTy, 2, "result.vec." + Twine(I)); + Phi->addIncoming(ConstantAggregateZero::get(TileVecTy), + TI.RowLoopHeader->getSingleSuccessor()); + TileResult.addVector(Phi); + ColumnPhis.push_back(Phi); + } + + // Insert in the inner loop body, which computes + // Res += Load(CurrentRow, K) * Load(K, CurrentColumn) + Builder.SetInsertPoint(InnerBody->getTerminator()); + // Load tiles of the operands. + MatrixTy A = loadMatrix(LPtr, {}, false, LShape, TI.CurrentRow, TI.CurrentK, + {TileSize, TileSize}, EltType, Builder); + MatrixTy B = loadMatrix(RPtr, {}, false, RShape, TI.CurrentK, TI.CurrentCol, + {TileSize, TileSize}, EltType, Builder); + emitMatrixMultiply(TileResult, A, B, AllowContract, Builder, true); + // Store result after the inner loop is done. + Builder.SetInsertPoint(TI.RowLoopLatch->getTerminator()); + storeMatrix(TileResult, Store->getPointerOperand(), Store->getAlign(), + Store->isVolatile(), {LShape.NumRows, RShape.NumColumns}, + TI.CurrentRow, TI.CurrentCol, EltType, Builder); + + for (unsigned I = 0; I < TileResult.getNumVectors(); I++) + ColumnPhis[I]->addIncoming(TileResult.getVector(I), TI.InnerLoopLatch); + + // Force unrolling of a few iterations of the inner loop, to make sure there + // is enough work per iteration. + // FIXME: The unroller should make this decision directly instead, but + // currently the cost-model is not up to the task. + unsigned InnerLoopUnrollCount = std::min(10u, LShape.NumColumns / TileSize); + addStringMetadataToLoop(LI->getLoopFor(TI.InnerLoopHeader), + "llvm.loop.unroll.count", InnerLoopUnrollCount); + } + void emitSIMDTiling(CallInst *MatMul, LoadInst *LoadOp0, LoadInst *LoadOp1, StoreInst *Store, SmallPtrSetImpl &FusedInsts) { @@ -1226,28 +1288,34 @@ public: bool AllowContract = AllowContractEnabled || (isa(MatMul) && MatMul->hasAllowContract()); - IRBuilder<> Builder(Store); - for (unsigned J = 0; J < C; J += TileSize) - for (unsigned I = 0; I < R; I += TileSize) { - const unsigned TileR = std::min(R - I, unsigned(TileSize)); - const unsigned TileC = std::min(C - J, unsigned(TileSize)); - MatrixTy Res = getZeroMatrix(EltType, TileR, TileC); + if (TileUseLoops && (R % TileSize == 0 && C % TileSize == 0)) + createTiledLoops(MatMul, APtr, LShape, BPtr, RShape, Store, + AllowContract); + else { + IRBuilder<> Builder(Store); + for (unsigned J = 0; J < C; J += TileSize) + for (unsigned I = 0; I < R; I += TileSize) { + const unsigned TileR = std::min(R - I, unsigned(TileSize)); + const unsigned TileC = std::min(C - J, unsigned(TileSize)); + MatrixTy Res = getZeroMatrix(EltType, TileR, TileC); - for (unsigned K = 0; K < M; K += TileSize) { - const unsigned TileM = std::min(M - K, unsigned(TileSize)); - MatrixTy A = - loadMatrix(APtr, LoadOp0->getAlign(), LoadOp0->isVolatile(), - LShape, Builder.getInt64(I), Builder.getInt64(K), - {TileR, TileM}, EltType, Builder); - MatrixTy B = - loadMatrix(BPtr, LoadOp1->getAlign(), LoadOp1->isVolatile(), - RShape, Builder.getInt64(K), Builder.getInt64(J), - {TileM, TileC}, EltType, Builder); - emitMatrixMultiply(Res, A, B, AllowContract, Builder, true); + for (unsigned K = 0; K < M; K += TileSize) { + const unsigned TileM = std::min(M - K, unsigned(TileSize)); + MatrixTy A = + loadMatrix(APtr, LoadOp0->getAlign(), LoadOp0->isVolatile(), + LShape, Builder.getInt64(I), Builder.getInt64(K), + {TileR, TileM}, EltType, Builder); + MatrixTy B = + loadMatrix(BPtr, LoadOp1->getAlign(), LoadOp1->isVolatile(), + RShape, Builder.getInt64(K), Builder.getInt64(J), + {TileM, TileC}, EltType, Builder); + emitMatrixMultiply(Res, A, B, AllowContract, Builder, true); + } + storeMatrix(Res, CPtr, Store->getAlign(), Store->isVolatile(), {R, M}, + Builder.getInt64(I), Builder.getInt64(J), EltType, + Builder); } - storeMatrix(Res, CPtr, Store->getAlign(), Store->isVolatile(), {R, M}, - Builder.getInt64(I), Builder.getInt64(J), EltType, Builder); - } + } // Mark eliminated instructions as fused and remove them. FusedInsts.insert(Store); diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-loops.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-loops.ll new file mode 100644 index 000000000000..d0503b7371d4 --- /dev/null +++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-loops.ll @@ -0,0 +1,397 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -lower-matrix-intrinsics -fuse-matrix-use-loops -fuse-matrix-tile-size=2 -matrix-allow-contract -force-fuse-matrix -instcombine -verify-dom-info %s -S | FileCheck %s + +; REQUIRES: aarch64-registered-target + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "aarch64-apple-ios" + +define void @multiply_noalias_4x4(<16 x double>* noalias %A, <16 x double>* noalias %B, <16 x double>* noalias %C) { +; CHECK-LABEL: @multiply_noalias_4x4( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[COLS_HEADER:%.*]] +; CHECK: cols.header: +; CHECK-NEXT: [[COLS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[COLS_STEP:%.*]], [[COLS_LATCH:%.*]] ] +; CHECK-NEXT: br label [[COLS_BODY:%.*]] +; CHECK: cols.body: +; CHECK-NEXT: br label [[ROWS_HEADER:%.*]] +; CHECK: rows.header: +; CHECK-NEXT: [[ROWS_IV:%.*]] = phi i64 [ 0, [[COLS_BODY]] ], [ [[ROWS_STEP:%.*]], [[ROWS_LATCH:%.*]] ] +; CHECK-NEXT: br label [[ROWS_BODY:%.*]] +; CHECK: rows.body: +; CHECK-NEXT: br label [[INNER_HEADER:%.*]] +; CHECK: inner.header: +; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ 0, [[ROWS_BODY]] ], [ [[INNER_STEP:%.*]], [[INNER_LATCH:%.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x double> [ zeroinitializer, [[ROWS_BODY]] ], [ [[TMP9:%.*]], [[INNER_LATCH]] ] +; CHECK-NEXT: [[TMP1:%.*]] = phi <2 x double> [ zeroinitializer, [[ROWS_BODY]] ], [ [[TMP11:%.*]], [[INNER_LATCH]] ] +; CHECK-NEXT: br label [[INNER_BODY:%.*]] +; CHECK: inner.body: +; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[INNER_IV]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], [[ROWS_IV]] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr <16 x double>, <16 x double>* [[A:%.*]], i64 0, i64 [[TMP3]] +; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[TMP4]] to <2 x double>* +; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST]], align 8 +; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP4]], i64 4 +; CHECK-NEXT: [[VEC_CAST1:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>* +; CHECK-NEXT: [[COL_LOAD2:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST1]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP5]], [[INNER_IV]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr <16 x double>, <16 x double>* [[B:%.*]], i64 0, i64 [[TMP6]] +; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast double* [[TMP7]] to <2 x double>* +; CHECK-NEXT: [[COL_LOAD5:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST4]], align 8 +; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP7]], i64 4 +; CHECK-NEXT: [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <2 x double>* +; CHECK-NEXT: [[COL_LOAD8:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST7]], align 8 +; CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <2 x double> [[COL_LOAD5]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD]], <2 x double> [[SPLAT_SPLAT]], <2 x double> [[TMP0]]) +; CHECK-NEXT: [[SPLAT_SPLAT12:%.*]] = shufflevector <2 x double> [[COL_LOAD5]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP9]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD2]], <2 x double> [[SPLAT_SPLAT12]], <2 x double> [[TMP8]]) +; CHECK-NEXT: [[SPLAT_SPLAT16:%.*]] = shufflevector <2 x double> [[COL_LOAD8]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD]], <2 x double> [[SPLAT_SPLAT16]], <2 x double> [[TMP1]]) +; CHECK-NEXT: [[SPLAT_SPLAT19:%.*]] = shufflevector <2 x double> [[COL_LOAD8]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP11]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD2]], <2 x double> [[SPLAT_SPLAT19]], <2 x double> [[TMP10]]) +; CHECK-NEXT: br label [[INNER_LATCH]] +; CHECK: inner.latch: +; CHECK-NEXT: [[INNER_STEP]] = add i64 [[INNER_IV]], 2 +; CHECK-NEXT: [[INNER_COND_NOT:%.*]] = icmp eq i64 [[INNER_STEP]], 4 +; CHECK-NEXT: br i1 [[INNER_COND_NOT]], label [[ROWS_LATCH]], label [[INNER_HEADER]], !llvm.loop !0 +; CHECK: rows.latch: +; CHECK-NEXT: [[ROWS_STEP]] = add i64 [[ROWS_IV]], 2 +; CHECK-NEXT: [[ROWS_COND_NOT:%.*]] = icmp eq i64 [[ROWS_STEP]], 4 +; CHECK-NEXT: [[TMP12:%.*]] = shl i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], [[ROWS_IV]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr <16 x double>, <16 x double>* [[C:%.*]], i64 0, i64 [[TMP13]] +; CHECK-NEXT: [[VEC_CAST21:%.*]] = bitcast double* [[TMP14]] to <2 x double>* +; CHECK-NEXT: store <2 x double> [[TMP9]], <2 x double>* [[VEC_CAST21]], align 8 +; CHECK-NEXT: [[VEC_GEP22:%.*]] = getelementptr double, double* [[TMP14]], i64 4 +; CHECK-NEXT: [[VEC_CAST23:%.*]] = bitcast double* [[VEC_GEP22]] to <2 x double>* +; CHECK-NEXT: store <2 x double> [[TMP11]], <2 x double>* [[VEC_CAST23]], align 8 +; CHECK-NEXT: br i1 [[ROWS_COND_NOT]], label [[COLS_LATCH]], label [[ROWS_HEADER]] +; CHECK: cols.latch: +; CHECK-NEXT: [[COLS_STEP]] = add i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[COLS_COND_NOT:%.*]] = icmp eq i64 [[COLS_STEP]], 4 +; CHECK-NEXT: br i1 [[COLS_COND_NOT]], label [[CONTINUE:%.*]], label [[COLS_HEADER]] +; CHECK: continue: +; CHECK-NEXT: ret void +; + +entry: + %a = load <16 x double>, <16 x double>* %A, align 8 + %b = load <16 x double>, <16 x double>* %B, align 8 + + %c = call <16 x double> @llvm.matrix.multiply.v16f64.v16f64.v16f64(<16 x double> %a, <16 x double> %b, i32 4, i32 4, i32 4) + + store <16 x double> %c, <16 x double>* %C, align 8 + ret void +} + + +declare <16 x double> @llvm.matrix.multiply.v16f64.v16f64.v16f64(<16 x double>, <16 x double>, i32, i32, i32) + +define void @multiply_noalias_2x4(<8 x i64>* noalias %A, <8 x i64>* noalias %B, <4 x i64>* noalias %C) { +; CHECK-LABEL: @multiply_noalias_2x4( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[COLS_HEADER:%.*]] +; CHECK: cols.header: +; CHECK-NEXT: [[COLS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[COLS_STEP:%.*]], [[COLS_LATCH:%.*]] ] +; CHECK-NEXT: br label [[COLS_BODY:%.*]] +; CHECK: cols.body: +; CHECK-NEXT: br label [[ROWS_HEADER:%.*]] +; CHECK: rows.header: +; CHECK-NEXT: [[ROWS_IV:%.*]] = phi i64 [ 0, [[COLS_BODY]] ], [ [[ROWS_STEP:%.*]], [[ROWS_LATCH:%.*]] ] +; CHECK-NEXT: br label [[ROWS_BODY:%.*]] +; CHECK: rows.body: +; CHECK-NEXT: br label [[INNER_HEADER:%.*]] +; CHECK: inner.header: +; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ 0, [[ROWS_BODY]] ], [ [[INNER_STEP:%.*]], [[INNER_LATCH:%.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x i64> [ zeroinitializer, [[ROWS_BODY]] ], [ [[TMP11:%.*]], [[INNER_LATCH]] ] +; CHECK-NEXT: [[TMP1:%.*]] = phi <2 x i64> [ zeroinitializer, [[ROWS_BODY]] ], [ [[TMP15:%.*]], [[INNER_LATCH]] ] +; CHECK-NEXT: br label [[INNER_BODY:%.*]] +; CHECK: inner.body: +; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[INNER_IV]], 1 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], [[ROWS_IV]] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr <8 x i64>, <8 x i64>* [[A:%.*]], i64 0, i64 [[TMP3]] +; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast i64* [[TMP4]] to <2 x i64>* +; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x i64>, <2 x i64>* [[VEC_CAST]], align 8 +; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr i64, i64* [[TMP4]], i64 2 +; CHECK-NEXT: [[VEC_CAST1:%.*]] = bitcast i64* [[VEC_GEP]] to <2 x i64>* +; CHECK-NEXT: [[COL_LOAD2:%.*]] = load <2 x i64>, <2 x i64>* [[VEC_CAST1]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP5]], [[INNER_IV]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr <8 x i64>, <8 x i64>* [[B:%.*]], i64 0, i64 [[TMP6]] +; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast i64* [[TMP7]] to <2 x i64>* +; CHECK-NEXT: [[COL_LOAD5:%.*]] = load <2 x i64>, <2 x i64>* [[VEC_CAST4]], align 8 +; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr i64, i64* [[TMP7]], i64 4 +; CHECK-NEXT: [[VEC_CAST7:%.*]] = bitcast i64* [[VEC_GEP6]] to <2 x i64>* +; CHECK-NEXT: [[COL_LOAD8:%.*]] = load <2 x i64>, <2 x i64>* [[VEC_CAST7]], align 8 +; CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <2 x i64> [[COL_LOAD5]], <2 x i64> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP8:%.*]] = mul <2 x i64> [[COL_LOAD]], [[SPLAT_SPLAT]] +; CHECK-NEXT: [[TMP9:%.*]] = add <2 x i64> [[TMP0]], [[TMP8]] +; CHECK-NEXT: [[SPLAT_SPLAT12:%.*]] = shufflevector <2 x i64> [[COL_LOAD5]], <2 x i64> undef, <2 x i32> +; CHECK-NEXT: [[TMP10:%.*]] = mul <2 x i64> [[COL_LOAD2]], [[SPLAT_SPLAT12]] +; CHECK-NEXT: [[TMP11]] = add <2 x i64> [[TMP9]], [[TMP10]] +; CHECK-NEXT: [[SPLAT_SPLAT16:%.*]] = shufflevector <2 x i64> [[COL_LOAD8]], <2 x i64> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = mul <2 x i64> [[COL_LOAD]], [[SPLAT_SPLAT16]] +; CHECK-NEXT: [[TMP13:%.*]] = add <2 x i64> [[TMP1]], [[TMP12]] +; CHECK-NEXT: [[SPLAT_SPLAT19:%.*]] = shufflevector <2 x i64> [[COL_LOAD8]], <2 x i64> undef, <2 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = mul <2 x i64> [[COL_LOAD2]], [[SPLAT_SPLAT19]] +; CHECK-NEXT: [[TMP15]] = add <2 x i64> [[TMP13]], [[TMP14]] +; CHECK-NEXT: br label [[INNER_LATCH]] +; CHECK: inner.latch: +; CHECK-NEXT: [[INNER_STEP]] = add i64 [[INNER_IV]], 2 +; CHECK-NEXT: [[INNER_COND_NOT:%.*]] = icmp eq i64 [[INNER_STEP]], 4 +; CHECK-NEXT: br i1 [[INNER_COND_NOT]], label [[ROWS_LATCH]], label [[INNER_HEADER]], !llvm.loop !2 +; CHECK: rows.latch: +; CHECK-NEXT: [[ROWS_STEP]] = add i64 [[ROWS_IV]], 2 +; CHECK-NEXT: [[ROWS_COND_NOT:%.*]] = icmp eq i64 [[ROWS_IV]], 0 +; CHECK-NEXT: [[TMP16:%.*]] = shl i64 [[COLS_IV]], 1 +; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[TMP16]], [[ROWS_IV]] +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr <4 x i64>, <4 x i64>* [[C:%.*]], i64 0, i64 [[TMP17]] +; CHECK-NEXT: [[VEC_CAST21:%.*]] = bitcast i64* [[TMP18]] to <2 x i64>* +; CHECK-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* [[VEC_CAST21]], align 8 +; CHECK-NEXT: [[VEC_GEP22:%.*]] = getelementptr i64, i64* [[TMP18]], i64 2 +; CHECK-NEXT: [[VEC_CAST23:%.*]] = bitcast i64* [[VEC_GEP22]] to <2 x i64>* +; CHECK-NEXT: store <2 x i64> [[TMP15]], <2 x i64>* [[VEC_CAST23]], align 8 +; CHECK-NEXT: br i1 [[ROWS_COND_NOT]], label [[COLS_LATCH]], label [[ROWS_HEADER]] +; CHECK: cols.latch: +; CHECK-NEXT: [[COLS_STEP]] = add i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[COLS_COND_NOT:%.*]] = icmp eq i64 [[COLS_IV]], 0 +; CHECK-NEXT: br i1 [[COLS_COND_NOT]], label [[CONTINUE:%.*]], label [[COLS_HEADER]] +; CHECK: continue: +; CHECK-NEXT: ret void +; + +; In the inner loop, compute +; Result += Load(A, ROWS_IV, INNER_IV) * Load(B, INNER_IV, COLS_IV) + + +; Store the current 2x2 tile. + +entry: + %a = load <8 x i64>, <8 x i64>* %A, align 8 + %b = load <8 x i64>, <8 x i64>* %B, align 8 + + %c = call <4 x i64> @llvm.matrix.multiply.v4i64.v8i64.v8i64(<8 x i64> %a, <8 x i64> %b, i32 2, i32 4, i32 2) + + store <4 x i64> %c, <4 x i64>* %C, align 8 + ret void +} + + +declare <4 x i64> @llvm.matrix.multiply.v4i64.v8i64.v8i64(<8 x i64>, <8 x i64>, i32, i32, i32) + +define void @multiply_noalias_4x2_2x8(<8 x i64>* noalias %A, <16 x i64>* noalias %B, <32 x i64>* noalias %C) { +; CHECK-LABEL: @multiply_noalias_4x2_2x8( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[COLS_HEADER:%.*]] +; CHECK: cols.header: +; CHECK-NEXT: [[COLS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[COLS_STEP:%.*]], [[COLS_LATCH:%.*]] ] +; CHECK-NEXT: br label [[COLS_BODY:%.*]] +; CHECK: cols.body: +; CHECK-NEXT: br label [[ROWS_HEADER:%.*]] +; CHECK: rows.header: +; CHECK-NEXT: [[ROWS_IV:%.*]] = phi i64 [ 0, [[COLS_BODY]] ], [ [[ROWS_STEP:%.*]], [[ROWS_LATCH:%.*]] ] +; CHECK-NEXT: br label [[ROWS_BODY:%.*]] +; CHECK: rows.body: +; CHECK-NEXT: br label [[INNER_HEADER:%.*]] +; CHECK: inner.header: +; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ 0, [[ROWS_BODY]] ], [ [[INNER_STEP:%.*]], [[INNER_LATCH:%.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x i64> [ zeroinitializer, [[ROWS_BODY]] ], [ [[TMP11:%.*]], [[INNER_LATCH]] ] +; CHECK-NEXT: [[TMP1:%.*]] = phi <2 x i64> [ zeroinitializer, [[ROWS_BODY]] ], [ [[TMP15:%.*]], [[INNER_LATCH]] ] +; CHECK-NEXT: br label [[INNER_BODY:%.*]] +; CHECK: inner.body: +; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[INNER_IV]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], [[ROWS_IV]] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr <8 x i64>, <8 x i64>* [[A:%.*]], i64 0, i64 [[TMP3]] +; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast i64* [[TMP4]] to <2 x i64>* +; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x i64>, <2 x i64>* [[VEC_CAST]], align 8 +; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr i64, i64* [[TMP4]], i64 4 +; CHECK-NEXT: [[VEC_CAST1:%.*]] = bitcast i64* [[VEC_GEP]] to <2 x i64>* +; CHECK-NEXT: [[COL_LOAD2:%.*]] = load <2 x i64>, <2 x i64>* [[VEC_CAST1]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[COLS_IV]], 1 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP5]], [[INNER_IV]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr <16 x i64>, <16 x i64>* [[B:%.*]], i64 0, i64 [[TMP6]] +; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast i64* [[TMP7]] to <2 x i64>* +; CHECK-NEXT: [[COL_LOAD5:%.*]] = load <2 x i64>, <2 x i64>* [[VEC_CAST4]], align 8 +; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr i64, i64* [[TMP7]], i64 2 +; CHECK-NEXT: [[VEC_CAST7:%.*]] = bitcast i64* [[VEC_GEP6]] to <2 x i64>* +; CHECK-NEXT: [[COL_LOAD8:%.*]] = load <2 x i64>, <2 x i64>* [[VEC_CAST7]], align 8 +; CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <2 x i64> [[COL_LOAD5]], <2 x i64> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP8:%.*]] = mul <2 x i64> [[COL_LOAD]], [[SPLAT_SPLAT]] +; CHECK-NEXT: [[TMP9:%.*]] = add <2 x i64> [[TMP0]], [[TMP8]] +; CHECK-NEXT: [[SPLAT_SPLAT12:%.*]] = shufflevector <2 x i64> [[COL_LOAD5]], <2 x i64> undef, <2 x i32> +; CHECK-NEXT: [[TMP10:%.*]] = mul <2 x i64> [[COL_LOAD2]], [[SPLAT_SPLAT12]] +; CHECK-NEXT: [[TMP11]] = add <2 x i64> [[TMP9]], [[TMP10]] +; CHECK-NEXT: [[SPLAT_SPLAT16:%.*]] = shufflevector <2 x i64> [[COL_LOAD8]], <2 x i64> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = mul <2 x i64> [[COL_LOAD]], [[SPLAT_SPLAT16]] +; CHECK-NEXT: [[TMP13:%.*]] = add <2 x i64> [[TMP1]], [[TMP12]] +; CHECK-NEXT: [[SPLAT_SPLAT19:%.*]] = shufflevector <2 x i64> [[COL_LOAD8]], <2 x i64> undef, <2 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = mul <2 x i64> [[COL_LOAD2]], [[SPLAT_SPLAT19]] +; CHECK-NEXT: [[TMP15]] = add <2 x i64> [[TMP13]], [[TMP14]] +; CHECK-NEXT: br label [[INNER_LATCH]] +; CHECK: inner.latch: +; CHECK-NEXT: [[INNER_STEP]] = add i64 [[INNER_IV]], 2 +; CHECK-NEXT: [[INNER_COND_NOT:%.*]] = icmp eq i64 [[INNER_IV]], 0 +; CHECK-NEXT: br i1 [[INNER_COND_NOT]], label [[ROWS_LATCH]], label [[INNER_HEADER]], !llvm.loop !3 +; CHECK: rows.latch: +; CHECK-NEXT: [[ROWS_STEP]] = add i64 [[ROWS_IV]], 2 +; CHECK-NEXT: [[ROWS_COND_NOT:%.*]] = icmp eq i64 [[ROWS_STEP]], 4 +; CHECK-NEXT: [[TMP16:%.*]] = shl i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[TMP16]], [[ROWS_IV]] +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr <32 x i64>, <32 x i64>* [[C:%.*]], i64 0, i64 [[TMP17]] +; CHECK-NEXT: [[VEC_CAST21:%.*]] = bitcast i64* [[TMP18]] to <2 x i64>* +; CHECK-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* [[VEC_CAST21]], align 8 +; CHECK-NEXT: [[VEC_GEP22:%.*]] = getelementptr i64, i64* [[TMP18]], i64 4 +; CHECK-NEXT: [[VEC_CAST23:%.*]] = bitcast i64* [[VEC_GEP22]] to <2 x i64>* +; CHECK-NEXT: store <2 x i64> [[TMP15]], <2 x i64>* [[VEC_CAST23]], align 8 +; CHECK-NEXT: br i1 [[ROWS_COND_NOT]], label [[COLS_LATCH]], label [[ROWS_HEADER]] +; CHECK: cols.latch: +; CHECK-NEXT: [[COLS_STEP]] = add i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[COLS_COND_NOT:%.*]] = icmp eq i64 [[COLS_STEP]], 8 +; CHECK-NEXT: br i1 [[COLS_COND_NOT]], label [[CONTINUE:%.*]], label [[COLS_HEADER]] +; CHECK: continue: +; CHECK-NEXT: ret void +; + +; In the inner loop, compute +; Result += Load(A, ROWS_IV, INNER_IV) * Load(B, INNER_IV, COLS_IV) + + +; Store the current 2x2 tile. + +entry: + %a = load <8 x i64>, <8 x i64>* %A, align 8 + %b = load <16 x i64>, <16 x i64>* %B, align 8 + + %c = call <32 x i64> @llvm.matrix.multiply.v32i64.v8i64.v16i64(<8 x i64> %a, <16 x i64> %b, i32 4, i32 2, i32 8) + + store <32 x i64> %c, <32 x i64>* %C, align 8 + ret void +} + +declare <32 x i64> @llvm.matrix.multiply.v32i64.v8i64.v16i64(<8 x i64>, <16 x i64>, i32, i32, i32) + + +; Check the runtime aliasing checks. +define void @multiply_alias_2x2(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) { +; CHECK-LABEL: @multiply_alias_2x2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[STORE_BEGIN:%.*]] = ptrtoint <4 x float>* [[C:%.*]] to i64 +; CHECK-NEXT: [[STORE_END:%.*]] = add nuw nsw i64 [[STORE_BEGIN]], 16 +; CHECK-NEXT: [[LOAD_BEGIN:%.*]] = ptrtoint <4 x float>* [[A:%.*]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = icmp ugt i64 [[STORE_END]], [[LOAD_BEGIN]] +; CHECK-NEXT: br i1 [[TMP0]], label [[ALIAS_CONT:%.*]], label [[NO_ALIAS:%.*]] +; CHECK: alias_cont: +; CHECK-NEXT: [[LOAD_END:%.*]] = add nuw nsw i64 [[LOAD_BEGIN]], 16 +; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[LOAD_END]], [[STORE_BEGIN]] +; CHECK-NEXT: br i1 [[TMP1]], label [[COPY:%.*]], label [[NO_ALIAS]] +; CHECK: copy: +; CHECK-NEXT: [[TMP2:%.*]] = alloca <4 x float>, align 16 +; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x float>* [[TMP2]] to i8* +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x float>* [[A]] to i8* +; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 16 dereferenceable(16) [[TMP3]], i8* nonnull align 8 dereferenceable(16) [[TMP4]], i64 16, i1 false) +; CHECK-NEXT: br label [[NO_ALIAS]] +; CHECK: no_alias: +; CHECK-NEXT: [[TMP5:%.*]] = phi <4 x float>* [ [[A]], [[ENTRY:%.*]] ], [ [[A]], [[ALIAS_CONT]] ], [ [[TMP2]], [[COPY]] ] +; CHECK-NEXT: [[STORE_BEGIN4:%.*]] = ptrtoint <4 x float>* [[C]] to i64 +; CHECK-NEXT: [[STORE_END5:%.*]] = add nuw nsw i64 [[STORE_BEGIN4]], 16 +; CHECK-NEXT: [[LOAD_BEGIN6:%.*]] = ptrtoint <4 x float>* [[B:%.*]] to i64 +; CHECK-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[STORE_END5]], [[LOAD_BEGIN6]] +; CHECK-NEXT: br i1 [[TMP6]], label [[ALIAS_CONT1:%.*]], label [[NO_ALIAS3:%.*]] +; CHECK: alias_cont1: +; CHECK-NEXT: [[LOAD_END7:%.*]] = add nuw nsw i64 [[LOAD_BEGIN6]], 16 +; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[LOAD_END7]], [[STORE_BEGIN4]] +; CHECK-NEXT: br i1 [[TMP7]], label [[COPY2:%.*]], label [[NO_ALIAS3]] +; CHECK: copy2: +; CHECK-NEXT: [[TMP8:%.*]] = alloca <4 x float>, align 16 +; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x float>* [[TMP8]] to i8* +; CHECK-NEXT: [[TMP10:%.*]] = bitcast <4 x float>* [[B]] to i8* +; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 16 dereferenceable(16) [[TMP9]], i8* nonnull align 8 dereferenceable(16) [[TMP10]], i64 16, i1 false) +; CHECK-NEXT: br label [[NO_ALIAS3]] +; CHECK: no_alias3: +; CHECK-NEXT: [[TMP11:%.*]] = phi <4 x float>* [ [[B]], [[NO_ALIAS]] ], [ [[B]], [[ALIAS_CONT1]] ], [ [[TMP8]], [[COPY2]] ] +; CHECK-NEXT: br label [[COLS_HEADER:%.*]] +; CHECK: cols.header: +; CHECK-NEXT: [[COLS_IV:%.*]] = phi i64 [ 0, [[NO_ALIAS3]] ], [ [[COLS_STEP:%.*]], [[COLS_LATCH:%.*]] ] +; CHECK-NEXT: br label [[COLS_BODY:%.*]] +; CHECK: cols.body: +; CHECK-NEXT: br label [[ROWS_HEADER:%.*]] +; CHECK: rows.header: +; CHECK-NEXT: [[ROWS_IV:%.*]] = phi i64 [ 0, [[COLS_BODY]] ], [ [[ROWS_STEP:%.*]], [[ROWS_LATCH:%.*]] ] +; CHECK-NEXT: br label [[ROWS_BODY:%.*]] +; CHECK: rows.body: +; CHECK-NEXT: br label [[INNER_HEADER:%.*]] +; CHECK: inner.header: +; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ 0, [[ROWS_BODY]] ], [ [[INNER_STEP:%.*]], [[INNER_LATCH:%.*]] ] +; CHECK-NEXT: [[TMP12:%.*]] = phi <2 x float> [ zeroinitializer, [[ROWS_BODY]] ], [ [[TMP21:%.*]], [[INNER_LATCH]] ] +; CHECK-NEXT: [[TMP13:%.*]] = phi <2 x float> [ zeroinitializer, [[ROWS_BODY]] ], [ [[TMP23:%.*]], [[INNER_LATCH]] ] +; CHECK-NEXT: br label [[INNER_BODY:%.*]] +; CHECK: inner.body: +; CHECK-NEXT: [[TMP14:%.*]] = shl i64 [[INNER_IV]], 1 +; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], [[ROWS_IV]] +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr <4 x float>, <4 x float>* [[TMP5]], i64 0, i64 [[TMP15]] +; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast float* [[TMP16]] to <2 x float>* +; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x float>, <2 x float>* [[VEC_CAST]], align 4 +; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr float, float* [[TMP16]], i64 2 +; CHECK-NEXT: [[VEC_CAST8:%.*]] = bitcast float* [[VEC_GEP]] to <2 x float>* +; CHECK-NEXT: [[COL_LOAD9:%.*]] = load <2 x float>, <2 x float>* [[VEC_CAST8]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = shl i64 [[COLS_IV]], 1 +; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], [[INNER_IV]] +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr <4 x float>, <4 x float>* [[TMP11]], i64 0, i64 [[TMP18]] +; CHECK-NEXT: [[VEC_CAST11:%.*]] = bitcast float* [[TMP19]] to <2 x float>* +; CHECK-NEXT: [[COL_LOAD12:%.*]] = load <2 x float>, <2 x float>* [[VEC_CAST11]], align 4 +; CHECK-NEXT: [[VEC_GEP13:%.*]] = getelementptr float, float* [[TMP19]], i64 2 +; CHECK-NEXT: [[VEC_CAST14:%.*]] = bitcast float* [[VEC_GEP13]] to <2 x float>* +; CHECK-NEXT: [[COL_LOAD15:%.*]] = load <2 x float>, <2 x float>* [[VEC_CAST14]], align 4 +; CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <2 x float> [[COL_LOAD12]], <2 x float> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP20:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[COL_LOAD]], <2 x float> [[SPLAT_SPLAT]], <2 x float> [[TMP12]]) +; CHECK-NEXT: [[SPLAT_SPLAT19:%.*]] = shufflevector <2 x float> [[COL_LOAD12]], <2 x float> undef, <2 x i32> +; CHECK-NEXT: [[TMP21]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[COL_LOAD9]], <2 x float> [[SPLAT_SPLAT19]], <2 x float> [[TMP20]]) +; CHECK-NEXT: [[SPLAT_SPLAT23:%.*]] = shufflevector <2 x float> [[COL_LOAD15]], <2 x float> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[COL_LOAD]], <2 x float> [[SPLAT_SPLAT23]], <2 x float> [[TMP13]]) +; CHECK-NEXT: [[SPLAT_SPLAT26:%.*]] = shufflevector <2 x float> [[COL_LOAD15]], <2 x float> undef, <2 x i32> +; CHECK-NEXT: [[TMP23]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[COL_LOAD9]], <2 x float> [[SPLAT_SPLAT26]], <2 x float> [[TMP22]]) +; CHECK-NEXT: br label [[INNER_LATCH]] +; CHECK: inner.latch: +; CHECK-NEXT: [[INNER_STEP]] = add i64 [[INNER_IV]], 2 +; CHECK-NEXT: [[INNER_COND_NOT:%.*]] = icmp eq i64 [[INNER_IV]], 0 +; CHECK-NEXT: br i1 [[INNER_COND_NOT]], label [[ROWS_LATCH]], label [[INNER_HEADER]], !llvm.loop !5 +; CHECK: rows.latch: +; CHECK-NEXT: [[ROWS_STEP]] = add i64 [[ROWS_IV]], 2 +; CHECK-NEXT: [[ROWS_COND_NOT:%.*]] = icmp eq i64 [[ROWS_IV]], 0 +; CHECK-NEXT: [[TMP24:%.*]] = shl i64 [[COLS_IV]], 1 +; CHECK-NEXT: [[TMP25:%.*]] = add i64 [[TMP24]], [[ROWS_IV]] +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr <4 x float>, <4 x float>* [[C]], i64 0, i64 [[TMP25]] +; CHECK-NEXT: [[VEC_CAST28:%.*]] = bitcast float* [[TMP26]] to <2 x float>* +; CHECK-NEXT: store <2 x float> [[TMP21]], <2 x float>* [[VEC_CAST28]], align 8 +; CHECK-NEXT: [[VEC_GEP29:%.*]] = getelementptr float, float* [[TMP26]], i64 2 +; CHECK-NEXT: [[VEC_CAST30:%.*]] = bitcast float* [[VEC_GEP29]] to <2 x float>* +; CHECK-NEXT: store <2 x float> [[TMP23]], <2 x float>* [[VEC_CAST30]], align 8 +; CHECK-NEXT: br i1 [[ROWS_COND_NOT]], label [[COLS_LATCH]], label [[ROWS_HEADER]] +; CHECK: cols.latch: +; CHECK-NEXT: [[COLS_STEP]] = add i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[COLS_COND_NOT:%.*]] = icmp eq i64 [[COLS_IV]], 0 +; CHECK-NEXT: br i1 [[COLS_COND_NOT]], label [[CONTINUE:%.*]], label [[COLS_HEADER]] +; CHECK: continue: +; CHECK-NEXT: ret void +; + +; First, check for aliasing at runtime, create non-aliasing copies if required. +entry: + %a = load <4 x float>, <4 x float>* %A, align 8 + %b = load <4 x float>, <4 x float>* %B, align 8 + + %c = call <4 x float> @llvm.matrix.multiply.v4f32.v4f32.v4f32(<4 x float> %a, <4 x float> %b, i32 2, i32 2, i32 2) + + store <4 x float> %c, <4 x float>* %C, align 8 + ret void +} + +declare <4 x float> @llvm.matrix.multiply.v4f32.v4f32.v4f32(<4 x float>, <4 x float>, i32, i32, i32) + +; CHECK: !0 = distinct !{!0, !1} +; CHECK-NEXT: !1 = !{!"llvm.loop.unroll.count", i32 2} +; CHECK-NEXT: !2 = distinct !{!2, !1} +; CHECK-NEXT: !3 = distinct !{!3, !4} +; CHECK-NEXT: !4 = !{!"llvm.loop.unroll.count", i32 1} +; CHECK-NEXT: !5 = distinct !{!5, !4} diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-volatile.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-volatile.ll index 21365202bf19..3eb1bd40387c 100644 --- a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-volatile.ll +++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-volatile.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -lower-matrix-intrinsics -fuse-matrix-tile-size=2 -matrix-allow-contract -force-fuse-matrix -verify-dom-info %s -S | FileCheck %s +; RUN: opt -lower-matrix-intrinsics -fuse-matrix-use-loops -fuse-matrix-tile-size=2 -matrix-allow-contract -force-fuse-matrix -verify-dom-info %s -S | FileCheck %s ; REQUIRES: aarch64-registered-target @@ -9,37 +9,100 @@ target triple = "aarch64-apple-ios" define void @multiply_all_volatile(<4 x double>* noalias %A, <4 x double>* noalias %B, <4 x double>* noalias %C) { ; CHECK-LABEL: @multiply_all_volatile( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x double>* [[A:%.*]] to double* -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, double* [[TMP0]], i64 0 -; CHECK-NEXT: [[COL_CAST:%.*]] = bitcast double* [[TMP1]] to <4 x double>* -; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x double>* [[COL_CAST]] to double* -; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[TMP2]] to <2 x double>* -; CHECK-NEXT: load volatile <2 x double>, <2 x double>* [[VEC_CAST]], align 8 -; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP2]], i64 2 +; CHECK-NEXT: br label [[COLS_HEADER:%.*]] +; CHECK: cols.header: +; CHECK-NEXT: [[COLS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[COLS_STEP:%.*]], [[COLS_LATCH:%.*]] ] +; CHECK-NEXT: br label [[COLS_BODY:%.*]] +; CHECK: cols.body: +; CHECK-NEXT: br label [[ROWS_HEADER:%.*]] +; CHECK: rows.header: +; CHECK-NEXT: [[ROWS_IV:%.*]] = phi i64 [ 0, [[COLS_BODY]] ], [ [[ROWS_STEP:%.*]], [[ROWS_LATCH:%.*]] ] +; CHECK-NEXT: br label [[ROWS_BODY:%.*]] +; CHECK: rows.body: +; CHECK-NEXT: br label [[INNER_HEADER:%.*]] +; CHECK: inner.header: +; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ 0, [[ROWS_BODY]] ], [ [[INNER_STEP:%.*]], [[INNER_LATCH:%.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x double> [ zeroinitializer, [[ROWS_BODY]] ], [ [[TMP17:%.*]], [[INNER_LATCH]] ] +; CHECK-NEXT: [[TMP1:%.*]] = phi <2 x double> [ zeroinitializer, [[ROWS_BODY]] ], [ [[TMP23:%.*]], [[INNER_LATCH]] ] +; CHECK-NEXT: br label [[INNER_BODY:%.*]] +; CHECK: inner.body: +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[INNER_IV]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], [[ROWS_IV]] +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x double>* [[A:%.*]] to double* +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr double, double* [[TMP4]], i64 [[TMP3]] +; CHECK-NEXT: [[COL_CAST:%.*]] = bitcast double* [[TMP5]] to <4 x double>* +; CHECK-NEXT: [[TMP6:%.*]] = bitcast <4 x double>* [[COL_CAST]] to double* +; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[TMP6]] to <2 x double>* +; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST]], align 8 +; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP6]], i64 2 ; CHECK-NEXT: [[VEC_CAST1:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>* -; CHECK-NEXT: load volatile <2 x double>, <2 x double>* [[VEC_CAST1]], align 8 -; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x double>* [[B:%.*]] to double* -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr double, double* [[TMP3]], i64 0 -; CHECK-NEXT: [[COL_CAST3:%.*]] = bitcast double* [[TMP4]] to <4 x double>* -; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x double>* [[COL_CAST3]] to double* -; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast double* [[TMP5]] to <2 x double>* -; CHECK-NEXT: load volatile <2 x double>, <2 x double>* [[VEC_CAST4]], align 8 -; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP5]], i64 2 +; CHECK-NEXT: [[COL_LOAD2:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST1]], align 8 +; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], [[INNER_IV]] +; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x double>* [[B:%.*]] to double* +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr double, double* [[TMP9]], i64 [[TMP8]] +; CHECK-NEXT: [[COL_CAST3:%.*]] = bitcast double* [[TMP10]] to <4 x double>* +; CHECK-NEXT: [[TMP11:%.*]] = bitcast <4 x double>* [[COL_CAST3]] to double* +; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast double* [[TMP11]] to <2 x double>* +; CHECK-NEXT: [[COL_LOAD5:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST4]], align 8 +; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP11]], i64 2 ; CHECK-NEXT: [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <2 x double>* -; CHECK-NEXT: load volatile <2 x double>, <2 x double>* [[VEC_CAST7]], align 8 - -; CHECK: [[TMP18:%.*]] = bitcast <4 x double>* [[C:%.*]] to double* -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr double, double* [[TMP18]], i64 0 -; CHECK-NEXT: [[COL_CAST18:%.*]] = bitcast double* [[TMP19]] to <4 x double>* -; CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x double>* [[COL_CAST18]] to double* -; CHECK-NEXT: [[VEC_CAST19:%.*]] = bitcast double* [[TMP20]] to <2 x double>* -; CHECK-NEXT: store volatile <2 x double> {{.*}}, <2 x double>* [[VEC_CAST19]], align 8 -; CHECK-NEXT: [[VEC_GEP20:%.*]] = getelementptr double, double* [[TMP20]], i64 2 -; CHECK-NEXT: [[VEC_CAST21:%.*]] = bitcast double* [[VEC_GEP20]] to <2 x double>* -; CHECK-NEXT: store volatile <2 x double> {{.*}}, <2 x double>* [[VEC_CAST21]], align 8 +; CHECK-NEXT: [[COL_LOAD8:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST7]], align 8 +; CHECK-NEXT: [[BLOCK:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[BLOCK9:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x double> [[COL_LOAD5]], i64 0 +; CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <2 x double> undef, double [[TMP12]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <2 x double> [[SPLAT_SPLATINSERT]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[BLOCK9]], <2 x double> [[SPLAT_SPLAT]], <2 x double> [[BLOCK]]) +; CHECK-NEXT: [[BLOCK10:%.*]] = shufflevector <2 x double> [[COL_LOAD2]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x double> [[COL_LOAD5]], i64 1 +; CHECK-NEXT: [[SPLAT_SPLATINSERT11:%.*]] = insertelement <2 x double> undef, double [[TMP14]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT12:%.*]] = shufflevector <2 x double> [[SPLAT_SPLATINSERT11]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP15:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[BLOCK10]], <2 x double> [[SPLAT_SPLAT12]], <2 x double> [[TMP13]]) +; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <2 x double> [[TMP15]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP17]] = shufflevector <2 x double> [[TMP0]], <2 x double> [[TMP16]], <2 x i32> +; CHECK-NEXT: [[BLOCK13:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[BLOCK14:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP18:%.*]] = extractelement <2 x double> [[COL_LOAD8]], i64 0 +; CHECK-NEXT: [[SPLAT_SPLATINSERT15:%.*]] = insertelement <2 x double> undef, double [[TMP18]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT16:%.*]] = shufflevector <2 x double> [[SPLAT_SPLATINSERT15]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP19:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[BLOCK14]], <2 x double> [[SPLAT_SPLAT16]], <2 x double> [[BLOCK13]]) +; CHECK-NEXT: [[BLOCK17:%.*]] = shufflevector <2 x double> [[COL_LOAD2]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP20:%.*]] = extractelement <2 x double> [[COL_LOAD8]], i64 1 +; CHECK-NEXT: [[SPLAT_SPLATINSERT18:%.*]] = insertelement <2 x double> undef, double [[TMP20]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT19:%.*]] = shufflevector <2 x double> [[SPLAT_SPLATINSERT18]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[BLOCK17]], <2 x double> [[SPLAT_SPLAT19]], <2 x double> [[TMP19]]) +; CHECK-NEXT: [[TMP22:%.*]] = shufflevector <2 x double> [[TMP21]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP23]] = shufflevector <2 x double> [[TMP1]], <2 x double> [[TMP22]], <2 x i32> +; CHECK-NEXT: br label [[INNER_LATCH]] +; CHECK: inner.latch: +; CHECK-NEXT: [[INNER_STEP]] = add i64 [[INNER_IV]], 2 +; CHECK-NEXT: [[INNER_COND:%.*]] = icmp ne i64 [[INNER_STEP]], 2 +; CHECK-NEXT: br i1 [[INNER_COND]], label [[INNER_HEADER]], label [[ROWS_LATCH]], !llvm.loop !0 +; CHECK: rows.latch: +; CHECK-NEXT: [[ROWS_STEP]] = add i64 [[ROWS_IV]], 2 +; CHECK-NEXT: [[ROWS_COND:%.*]] = icmp ne i64 [[ROWS_STEP]], 2 +; CHECK-NEXT: [[TMP24:%.*]] = mul i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[TMP25:%.*]] = add i64 [[TMP24]], [[ROWS_IV]] +; CHECK-NEXT: [[TMP26:%.*]] = bitcast <4 x double>* [[C:%.*]] to double* +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr double, double* [[TMP26]], i64 [[TMP25]] +; CHECK-NEXT: [[COL_CAST20:%.*]] = bitcast double* [[TMP27]] to <4 x double>* +; CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x double>* [[COL_CAST20]] to double* +; CHECK-NEXT: [[VEC_CAST21:%.*]] = bitcast double* [[TMP28]] to <2 x double>* +; CHECK-NEXT: store volatile <2 x double> [[TMP17]], <2 x double>* [[VEC_CAST21]], align 8 +; CHECK-NEXT: [[VEC_GEP22:%.*]] = getelementptr double, double* [[TMP28]], i64 2 +; CHECK-NEXT: [[VEC_CAST23:%.*]] = bitcast double* [[VEC_GEP22]] to <2 x double>* +; CHECK-NEXT: store volatile <2 x double> [[TMP23]], <2 x double>* [[VEC_CAST23]], align 8 +; CHECK-NEXT: br i1 [[ROWS_COND]], label [[ROWS_HEADER]], label [[COLS_LATCH]] +; CHECK: cols.latch: +; CHECK-NEXT: [[COLS_STEP]] = add i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[COLS_COND:%.*]] = icmp ne i64 [[COLS_STEP]], 2 +; CHECK-NEXT: br i1 [[COLS_COND]], label [[COLS_HEADER]], label [[CONTINUE:%.*]] +; CHECK: continue: ; CHECK-NEXT: ret void ; + entry: %a = load volatile <4 x double>, <4 x double>* %A, align 8 %b = load volatile <4 x double>, <4 x double>* %B, align 8 @@ -54,37 +117,100 @@ entry: define void @multiply_load0_volatile(<4 x double>* noalias %A, <4 x double>* noalias %B, <4 x double>* noalias %C) { ; CHECK-LABEL: @multiply_load0_volatile( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x double>* [[A:%.*]] to double* -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, double* [[TMP0]], i64 0 -; CHECK-NEXT: [[COL_CAST:%.*]] = bitcast double* [[TMP1]] to <4 x double>* -; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x double>* [[COL_CAST]] to double* -; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[TMP2]] to <2 x double>* -; CHECK-NEXT: load volatile <2 x double>, <2 x double>* [[VEC_CAST]], align 8 -; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP2]], i64 2 +; CHECK-NEXT: br label [[COLS_HEADER:%.*]] +; CHECK: cols.header: +; CHECK-NEXT: [[COLS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[COLS_STEP:%.*]], [[COLS_LATCH:%.*]] ] +; CHECK-NEXT: br label [[COLS_BODY:%.*]] +; CHECK: cols.body: +; CHECK-NEXT: br label [[ROWS_HEADER:%.*]] +; CHECK: rows.header: +; CHECK-NEXT: [[ROWS_IV:%.*]] = phi i64 [ 0, [[COLS_BODY]] ], [ [[ROWS_STEP:%.*]], [[ROWS_LATCH:%.*]] ] +; CHECK-NEXT: br label [[ROWS_BODY:%.*]] +; CHECK: rows.body: +; CHECK-NEXT: br label [[INNER_HEADER:%.*]] +; CHECK: inner.header: +; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ 0, [[ROWS_BODY]] ], [ [[INNER_STEP:%.*]], [[INNER_LATCH:%.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x double> [ zeroinitializer, [[ROWS_BODY]] ], [ [[TMP17:%.*]], [[INNER_LATCH]] ] +; CHECK-NEXT: [[TMP1:%.*]] = phi <2 x double> [ zeroinitializer, [[ROWS_BODY]] ], [ [[TMP23:%.*]], [[INNER_LATCH]] ] +; CHECK-NEXT: br label [[INNER_BODY:%.*]] +; CHECK: inner.body: +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[INNER_IV]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], [[ROWS_IV]] +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x double>* [[A:%.*]] to double* +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr double, double* [[TMP4]], i64 [[TMP3]] +; CHECK-NEXT: [[COL_CAST:%.*]] = bitcast double* [[TMP5]] to <4 x double>* +; CHECK-NEXT: [[TMP6:%.*]] = bitcast <4 x double>* [[COL_CAST]] to double* +; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[TMP6]] to <2 x double>* +; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST]], align 8 +; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP6]], i64 2 ; CHECK-NEXT: [[VEC_CAST1:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>* -; CHECK-NEXT: load volatile <2 x double>, <2 x double>* [[VEC_CAST1]], align 8 -; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x double>* [[B:%.*]] to double* -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr double, double* [[TMP3]], i64 0 -; CHECK-NEXT: [[COL_CAST3:%.*]] = bitcast double* [[TMP4]] to <4 x double>* -; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x double>* [[COL_CAST3]] to double* -; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast double* [[TMP5]] to <2 x double>* -; CHECK-NEXT: load <2 x double>, <2 x double>* [[VEC_CAST4]], align 8 -; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP5]], i64 2 +; CHECK-NEXT: [[COL_LOAD2:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST1]], align 8 +; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], [[INNER_IV]] +; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x double>* [[B:%.*]] to double* +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr double, double* [[TMP9]], i64 [[TMP8]] +; CHECK-NEXT: [[COL_CAST3:%.*]] = bitcast double* [[TMP10]] to <4 x double>* +; CHECK-NEXT: [[TMP11:%.*]] = bitcast <4 x double>* [[COL_CAST3]] to double* +; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast double* [[TMP11]] to <2 x double>* +; CHECK-NEXT: [[COL_LOAD5:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST4]], align 8 +; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP11]], i64 2 ; CHECK-NEXT: [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <2 x double>* -; CHECK-NEXT: load <2 x double>, <2 x double>* [[VEC_CAST7]], align 8 - -; CHECK: [[TMP18:%.*]] = bitcast <4 x double>* [[C:%.*]] to double* -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr double, double* [[TMP18]], i64 0 -; CHECK-NEXT: [[COL_CAST18:%.*]] = bitcast double* [[TMP19]] to <4 x double>* -; CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x double>* [[COL_CAST18]] to double* -; CHECK-NEXT: [[VEC_CAST19:%.*]] = bitcast double* [[TMP20]] to <2 x double>* -; CHECK-NEXT: store <2 x double> {{.*}}, <2 x double>* [[VEC_CAST19]], align 8 -; CHECK-NEXT: [[VEC_GEP20:%.*]] = getelementptr double, double* [[TMP20]], i64 2 -; CHECK-NEXT: [[VEC_CAST21:%.*]] = bitcast double* [[VEC_GEP20]] to <2 x double>* -; CHECK-NEXT: store <2 x double> {{.*}}, <2 x double>* [[VEC_CAST21]], align 8 +; CHECK-NEXT: [[COL_LOAD8:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST7]], align 8 +; CHECK-NEXT: [[BLOCK:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[BLOCK9:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x double> [[COL_LOAD5]], i64 0 +; CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <2 x double> undef, double [[TMP12]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <2 x double> [[SPLAT_SPLATINSERT]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[BLOCK9]], <2 x double> [[SPLAT_SPLAT]], <2 x double> [[BLOCK]]) +; CHECK-NEXT: [[BLOCK10:%.*]] = shufflevector <2 x double> [[COL_LOAD2]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x double> [[COL_LOAD5]], i64 1 +; CHECK-NEXT: [[SPLAT_SPLATINSERT11:%.*]] = insertelement <2 x double> undef, double [[TMP14]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT12:%.*]] = shufflevector <2 x double> [[SPLAT_SPLATINSERT11]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP15:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[BLOCK10]], <2 x double> [[SPLAT_SPLAT12]], <2 x double> [[TMP13]]) +; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <2 x double> [[TMP15]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP17]] = shufflevector <2 x double> [[TMP0]], <2 x double> [[TMP16]], <2 x i32> +; CHECK-NEXT: [[BLOCK13:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[BLOCK14:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP18:%.*]] = extractelement <2 x double> [[COL_LOAD8]], i64 0 +; CHECK-NEXT: [[SPLAT_SPLATINSERT15:%.*]] = insertelement <2 x double> undef, double [[TMP18]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT16:%.*]] = shufflevector <2 x double> [[SPLAT_SPLATINSERT15]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP19:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[BLOCK14]], <2 x double> [[SPLAT_SPLAT16]], <2 x double> [[BLOCK13]]) +; CHECK-NEXT: [[BLOCK17:%.*]] = shufflevector <2 x double> [[COL_LOAD2]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP20:%.*]] = extractelement <2 x double> [[COL_LOAD8]], i64 1 +; CHECK-NEXT: [[SPLAT_SPLATINSERT18:%.*]] = insertelement <2 x double> undef, double [[TMP20]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT19:%.*]] = shufflevector <2 x double> [[SPLAT_SPLATINSERT18]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[BLOCK17]], <2 x double> [[SPLAT_SPLAT19]], <2 x double> [[TMP19]]) +; CHECK-NEXT: [[TMP22:%.*]] = shufflevector <2 x double> [[TMP21]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP23]] = shufflevector <2 x double> [[TMP1]], <2 x double> [[TMP22]], <2 x i32> +; CHECK-NEXT: br label [[INNER_LATCH]] +; CHECK: inner.latch: +; CHECK-NEXT: [[INNER_STEP]] = add i64 [[INNER_IV]], 2 +; CHECK-NEXT: [[INNER_COND:%.*]] = icmp ne i64 [[INNER_STEP]], 2 +; CHECK-NEXT: br i1 [[INNER_COND]], label [[INNER_HEADER]], label [[ROWS_LATCH]], !llvm.loop !2 +; CHECK: rows.latch: +; CHECK-NEXT: [[ROWS_STEP]] = add i64 [[ROWS_IV]], 2 +; CHECK-NEXT: [[ROWS_COND:%.*]] = icmp ne i64 [[ROWS_STEP]], 2 +; CHECK-NEXT: [[TMP24:%.*]] = mul i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[TMP25:%.*]] = add i64 [[TMP24]], [[ROWS_IV]] +; CHECK-NEXT: [[TMP26:%.*]] = bitcast <4 x double>* [[C:%.*]] to double* +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr double, double* [[TMP26]], i64 [[TMP25]] +; CHECK-NEXT: [[COL_CAST20:%.*]] = bitcast double* [[TMP27]] to <4 x double>* +; CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x double>* [[COL_CAST20]] to double* +; CHECK-NEXT: [[VEC_CAST21:%.*]] = bitcast double* [[TMP28]] to <2 x double>* +; CHECK-NEXT: store <2 x double> [[TMP17]], <2 x double>* [[VEC_CAST21]], align 8 +; CHECK-NEXT: [[VEC_GEP22:%.*]] = getelementptr double, double* [[TMP28]], i64 2 +; CHECK-NEXT: [[VEC_CAST23:%.*]] = bitcast double* [[VEC_GEP22]] to <2 x double>* +; CHECK-NEXT: store <2 x double> [[TMP23]], <2 x double>* [[VEC_CAST23]], align 8 +; CHECK-NEXT: br i1 [[ROWS_COND]], label [[ROWS_HEADER]], label [[COLS_LATCH]] +; CHECK: cols.latch: +; CHECK-NEXT: [[COLS_STEP]] = add i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[COLS_COND:%.*]] = icmp ne i64 [[COLS_STEP]], 2 +; CHECK-NEXT: br i1 [[COLS_COND]], label [[COLS_HEADER]], label [[CONTINUE:%.*]] +; CHECK: continue: ; CHECK-NEXT: ret void ; + entry: %a = load volatile <4 x double>, <4 x double>* %A, align 8 %b = load <4 x double>, <4 x double>* %B, align 8 @@ -98,37 +224,100 @@ entry: define void @multiply_load1_volatile(<4 x double>* noalias %A, <4 x double>* noalias %B, <4 x double>* noalias %C) { ; CHECK-LABEL: @multiply_load1_volatile( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x double>* [[A:%.*]] to double* -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, double* [[TMP0]], i64 0 -; CHECK-NEXT: [[COL_CAST:%.*]] = bitcast double* [[TMP1]] to <4 x double>* -; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x double>* [[COL_CAST]] to double* -; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[TMP2]] to <2 x double>* -; CHECK-NEXT: load <2 x double>, <2 x double>* [[VEC_CAST]], align 8 -; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP2]], i64 2 +; CHECK-NEXT: br label [[COLS_HEADER:%.*]] +; CHECK: cols.header: +; CHECK-NEXT: [[COLS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[COLS_STEP:%.*]], [[COLS_LATCH:%.*]] ] +; CHECK-NEXT: br label [[COLS_BODY:%.*]] +; CHECK: cols.body: +; CHECK-NEXT: br label [[ROWS_HEADER:%.*]] +; CHECK: rows.header: +; CHECK-NEXT: [[ROWS_IV:%.*]] = phi i64 [ 0, [[COLS_BODY]] ], [ [[ROWS_STEP:%.*]], [[ROWS_LATCH:%.*]] ] +; CHECK-NEXT: br label [[ROWS_BODY:%.*]] +; CHECK: rows.body: +; CHECK-NEXT: br label [[INNER_HEADER:%.*]] +; CHECK: inner.header: +; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ 0, [[ROWS_BODY]] ], [ [[INNER_STEP:%.*]], [[INNER_LATCH:%.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x double> [ zeroinitializer, [[ROWS_BODY]] ], [ [[TMP17:%.*]], [[INNER_LATCH]] ] +; CHECK-NEXT: [[TMP1:%.*]] = phi <2 x double> [ zeroinitializer, [[ROWS_BODY]] ], [ [[TMP23:%.*]], [[INNER_LATCH]] ] +; CHECK-NEXT: br label [[INNER_BODY:%.*]] +; CHECK: inner.body: +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[INNER_IV]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], [[ROWS_IV]] +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x double>* [[A:%.*]] to double* +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr double, double* [[TMP4]], i64 [[TMP3]] +; CHECK-NEXT: [[COL_CAST:%.*]] = bitcast double* [[TMP5]] to <4 x double>* +; CHECK-NEXT: [[TMP6:%.*]] = bitcast <4 x double>* [[COL_CAST]] to double* +; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[TMP6]] to <2 x double>* +; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST]], align 8 +; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP6]], i64 2 ; CHECK-NEXT: [[VEC_CAST1:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>* -; CHECK-NEXT: load <2 x double>, <2 x double>* [[VEC_CAST1]], align 8 -; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x double>* [[B:%.*]] to double* -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr double, double* [[TMP3]], i64 0 -; CHECK-NEXT: [[COL_CAST3:%.*]] = bitcast double* [[TMP4]] to <4 x double>* -; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x double>* [[COL_CAST3]] to double* -; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast double* [[TMP5]] to <2 x double>* -; CHECK-NEXT: load volatile <2 x double>, <2 x double>* [[VEC_CAST4]], align 8 -; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP5]], i64 2 +; CHECK-NEXT: [[COL_LOAD2:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST1]], align 8 +; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], [[INNER_IV]] +; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x double>* [[B:%.*]] to double* +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr double, double* [[TMP9]], i64 [[TMP8]] +; CHECK-NEXT: [[COL_CAST3:%.*]] = bitcast double* [[TMP10]] to <4 x double>* +; CHECK-NEXT: [[TMP11:%.*]] = bitcast <4 x double>* [[COL_CAST3]] to double* +; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast double* [[TMP11]] to <2 x double>* +; CHECK-NEXT: [[COL_LOAD5:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST4]], align 8 +; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP11]], i64 2 ; CHECK-NEXT: [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <2 x double>* -; CHECK-NEXT: load volatile <2 x double>, <2 x double>* [[VEC_CAST7]], align 8 - -; CHECK: [[TMP18:%.*]] = bitcast <4 x double>* [[C:%.*]] to double* -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr double, double* [[TMP18]], i64 0 -; CHECK-NEXT: [[COL_CAST18:%.*]] = bitcast double* [[TMP19]] to <4 x double>* -; CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x double>* [[COL_CAST18]] to double* -; CHECK-NEXT: [[VEC_CAST19:%.*]] = bitcast double* [[TMP20]] to <2 x double>* -; CHECK-NEXT: store <2 x double> {{.*}}, <2 x double>* [[VEC_CAST19]], align 8 -; CHECK-NEXT: [[VEC_GEP20:%.*]] = getelementptr double, double* [[TMP20]], i64 2 -; CHECK-NEXT: [[VEC_CAST21:%.*]] = bitcast double* [[VEC_GEP20]] to <2 x double>* -; CHECK-NEXT: store <2 x double> {{.*}}, <2 x double>* [[VEC_CAST21]], align 8 +; CHECK-NEXT: [[COL_LOAD8:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST7]], align 8 +; CHECK-NEXT: [[BLOCK:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[BLOCK9:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x double> [[COL_LOAD5]], i64 0 +; CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <2 x double> undef, double [[TMP12]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <2 x double> [[SPLAT_SPLATINSERT]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[BLOCK9]], <2 x double> [[SPLAT_SPLAT]], <2 x double> [[BLOCK]]) +; CHECK-NEXT: [[BLOCK10:%.*]] = shufflevector <2 x double> [[COL_LOAD2]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x double> [[COL_LOAD5]], i64 1 +; CHECK-NEXT: [[SPLAT_SPLATINSERT11:%.*]] = insertelement <2 x double> undef, double [[TMP14]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT12:%.*]] = shufflevector <2 x double> [[SPLAT_SPLATINSERT11]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP15:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[BLOCK10]], <2 x double> [[SPLAT_SPLAT12]], <2 x double> [[TMP13]]) +; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <2 x double> [[TMP15]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP17]] = shufflevector <2 x double> [[TMP0]], <2 x double> [[TMP16]], <2 x i32> +; CHECK-NEXT: [[BLOCK13:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[BLOCK14:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP18:%.*]] = extractelement <2 x double> [[COL_LOAD8]], i64 0 +; CHECK-NEXT: [[SPLAT_SPLATINSERT15:%.*]] = insertelement <2 x double> undef, double [[TMP18]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT16:%.*]] = shufflevector <2 x double> [[SPLAT_SPLATINSERT15]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP19:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[BLOCK14]], <2 x double> [[SPLAT_SPLAT16]], <2 x double> [[BLOCK13]]) +; CHECK-NEXT: [[BLOCK17:%.*]] = shufflevector <2 x double> [[COL_LOAD2]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP20:%.*]] = extractelement <2 x double> [[COL_LOAD8]], i64 1 +; CHECK-NEXT: [[SPLAT_SPLATINSERT18:%.*]] = insertelement <2 x double> undef, double [[TMP20]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT19:%.*]] = shufflevector <2 x double> [[SPLAT_SPLATINSERT18]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[BLOCK17]], <2 x double> [[SPLAT_SPLAT19]], <2 x double> [[TMP19]]) +; CHECK-NEXT: [[TMP22:%.*]] = shufflevector <2 x double> [[TMP21]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP23]] = shufflevector <2 x double> [[TMP1]], <2 x double> [[TMP22]], <2 x i32> +; CHECK-NEXT: br label [[INNER_LATCH]] +; CHECK: inner.latch: +; CHECK-NEXT: [[INNER_STEP]] = add i64 [[INNER_IV]], 2 +; CHECK-NEXT: [[INNER_COND:%.*]] = icmp ne i64 [[INNER_STEP]], 2 +; CHECK-NEXT: br i1 [[INNER_COND]], label [[INNER_HEADER]], label [[ROWS_LATCH]], !llvm.loop !3 +; CHECK: rows.latch: +; CHECK-NEXT: [[ROWS_STEP]] = add i64 [[ROWS_IV]], 2 +; CHECK-NEXT: [[ROWS_COND:%.*]] = icmp ne i64 [[ROWS_STEP]], 2 +; CHECK-NEXT: [[TMP24:%.*]] = mul i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[TMP25:%.*]] = add i64 [[TMP24]], [[ROWS_IV]] +; CHECK-NEXT: [[TMP26:%.*]] = bitcast <4 x double>* [[C:%.*]] to double* +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr double, double* [[TMP26]], i64 [[TMP25]] +; CHECK-NEXT: [[COL_CAST20:%.*]] = bitcast double* [[TMP27]] to <4 x double>* +; CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x double>* [[COL_CAST20]] to double* +; CHECK-NEXT: [[VEC_CAST21:%.*]] = bitcast double* [[TMP28]] to <2 x double>* +; CHECK-NEXT: store <2 x double> [[TMP17]], <2 x double>* [[VEC_CAST21]], align 8 +; CHECK-NEXT: [[VEC_GEP22:%.*]] = getelementptr double, double* [[TMP28]], i64 2 +; CHECK-NEXT: [[VEC_CAST23:%.*]] = bitcast double* [[VEC_GEP22]] to <2 x double>* +; CHECK-NEXT: store <2 x double> [[TMP23]], <2 x double>* [[VEC_CAST23]], align 8 +; CHECK-NEXT: br i1 [[ROWS_COND]], label [[ROWS_HEADER]], label [[COLS_LATCH]] +; CHECK: cols.latch: +; CHECK-NEXT: [[COLS_STEP]] = add i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[COLS_COND:%.*]] = icmp ne i64 [[COLS_STEP]], 2 +; CHECK-NEXT: br i1 [[COLS_COND]], label [[COLS_HEADER]], label [[CONTINUE:%.*]] +; CHECK: continue: ; CHECK-NEXT: ret void ; + entry: %a = load <4 x double>, <4 x double>* %A, align 8 %b = load volatile <4 x double>, <4 x double>* %B, align 8 @@ -142,36 +331,99 @@ entry: define void @multiply_store_volatile(<4 x double>* noalias %A, <4 x double>* noalias %B, <4 x double>* noalias %C) { ; CHECK-LABEL: @multiply_store_volatile( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x double>* [[A:%.*]] to double* -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, double* [[TMP0]], i64 0 -; CHECK-NEXT: [[COL_CAST:%.*]] = bitcast double* [[TMP1]] to <4 x double>* -; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x double>* [[COL_CAST]] to double* -; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[TMP2]] to <2 x double>* -; CHECK-NEXT: load <2 x double>, <2 x double>* [[VEC_CAST]], align 8 -; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP2]], i64 2 +; CHECK-NEXT: br label [[COLS_HEADER:%.*]] +; CHECK: cols.header: +; CHECK-NEXT: [[COLS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[COLS_STEP:%.*]], [[COLS_LATCH:%.*]] ] +; CHECK-NEXT: br label [[COLS_BODY:%.*]] +; CHECK: cols.body: +; CHECK-NEXT: br label [[ROWS_HEADER:%.*]] +; CHECK: rows.header: +; CHECK-NEXT: [[ROWS_IV:%.*]] = phi i64 [ 0, [[COLS_BODY]] ], [ [[ROWS_STEP:%.*]], [[ROWS_LATCH:%.*]] ] +; CHECK-NEXT: br label [[ROWS_BODY:%.*]] +; CHECK: rows.body: +; CHECK-NEXT: br label [[INNER_HEADER:%.*]] +; CHECK: inner.header: +; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ 0, [[ROWS_BODY]] ], [ [[INNER_STEP:%.*]], [[INNER_LATCH:%.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x double> [ zeroinitializer, [[ROWS_BODY]] ], [ [[TMP17:%.*]], [[INNER_LATCH]] ] +; CHECK-NEXT: [[TMP1:%.*]] = phi <2 x double> [ zeroinitializer, [[ROWS_BODY]] ], [ [[TMP23:%.*]], [[INNER_LATCH]] ] +; CHECK-NEXT: br label [[INNER_BODY:%.*]] +; CHECK: inner.body: +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[INNER_IV]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], [[ROWS_IV]] +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x double>* [[A:%.*]] to double* +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr double, double* [[TMP4]], i64 [[TMP3]] +; CHECK-NEXT: [[COL_CAST:%.*]] = bitcast double* [[TMP5]] to <4 x double>* +; CHECK-NEXT: [[TMP6:%.*]] = bitcast <4 x double>* [[COL_CAST]] to double* +; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[TMP6]] to <2 x double>* +; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST]], align 8 +; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP6]], i64 2 ; CHECK-NEXT: [[VEC_CAST1:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>* -; CHECK-NEXT: load <2 x double>, <2 x double>* [[VEC_CAST1]], align 8 -; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x double>* [[B:%.*]] to double* -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr double, double* [[TMP3]], i64 0 -; CHECK-NEXT: [[COL_CAST3:%.*]] = bitcast double* [[TMP4]] to <4 x double>* -; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x double>* [[COL_CAST3]] to double* -; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast double* [[TMP5]] to <2 x double>* -; CHECK-NEXT: load <2 x double>, <2 x double>* [[VEC_CAST4]], align 8 -; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP5]], i64 2 +; CHECK-NEXT: [[COL_LOAD2:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST1]], align 8 +; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], [[INNER_IV]] +; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x double>* [[B:%.*]] to double* +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr double, double* [[TMP9]], i64 [[TMP8]] +; CHECK-NEXT: [[COL_CAST3:%.*]] = bitcast double* [[TMP10]] to <4 x double>* +; CHECK-NEXT: [[TMP11:%.*]] = bitcast <4 x double>* [[COL_CAST3]] to double* +; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast double* [[TMP11]] to <2 x double>* +; CHECK-NEXT: [[COL_LOAD5:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST4]], align 8 +; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP11]], i64 2 ; CHECK-NEXT: [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <2 x double>* -; CHECK-NEXT: load <2 x double>, <2 x double>* [[VEC_CAST7]], align 8 - -; CHECK: [[TMP18:%.*]] = bitcast <4 x double>* [[C:%.*]] to double* -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr double, double* [[TMP18]], i64 0 -; CHECK-NEXT: [[COL_CAST18:%.*]] = bitcast double* [[TMP19]] to <4 x double>* -; CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x double>* [[COL_CAST18]] to double* -; CHECK-NEXT: [[VEC_CAST19:%.*]] = bitcast double* [[TMP20]] to <2 x double>* -; CHECK-NEXT: store volatile <2 x double> {{.*}}, <2 x double>* [[VEC_CAST19]], align 8 -; CHECK-NEXT: [[VEC_GEP20:%.*]] = getelementptr double, double* [[TMP20]], i64 2 -; CHECK-NEXT: [[VEC_CAST21:%.*]] = bitcast double* [[VEC_GEP20]] to <2 x double>* -; CHECK-NEXT: store volatile <2 x double> {{.*}}, <2 x double>* [[VEC_CAST21]], align 8 +; CHECK-NEXT: [[COL_LOAD8:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST7]], align 8 +; CHECK-NEXT: [[BLOCK:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[BLOCK9:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x double> [[COL_LOAD5]], i64 0 +; CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <2 x double> undef, double [[TMP12]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <2 x double> [[SPLAT_SPLATINSERT]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[BLOCK9]], <2 x double> [[SPLAT_SPLAT]], <2 x double> [[BLOCK]]) +; CHECK-NEXT: [[BLOCK10:%.*]] = shufflevector <2 x double> [[COL_LOAD2]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x double> [[COL_LOAD5]], i64 1 +; CHECK-NEXT: [[SPLAT_SPLATINSERT11:%.*]] = insertelement <2 x double> undef, double [[TMP14]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT12:%.*]] = shufflevector <2 x double> [[SPLAT_SPLATINSERT11]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP15:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[BLOCK10]], <2 x double> [[SPLAT_SPLAT12]], <2 x double> [[TMP13]]) +; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <2 x double> [[TMP15]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP17]] = shufflevector <2 x double> [[TMP0]], <2 x double> [[TMP16]], <2 x i32> +; CHECK-NEXT: [[BLOCK13:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[BLOCK14:%.*]] = shufflevector <2 x double> [[COL_LOAD]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP18:%.*]] = extractelement <2 x double> [[COL_LOAD8]], i64 0 +; CHECK-NEXT: [[SPLAT_SPLATINSERT15:%.*]] = insertelement <2 x double> undef, double [[TMP18]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT16:%.*]] = shufflevector <2 x double> [[SPLAT_SPLATINSERT15]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP19:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[BLOCK14]], <2 x double> [[SPLAT_SPLAT16]], <2 x double> [[BLOCK13]]) +; CHECK-NEXT: [[BLOCK17:%.*]] = shufflevector <2 x double> [[COL_LOAD2]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP20:%.*]] = extractelement <2 x double> [[COL_LOAD8]], i64 1 +; CHECK-NEXT: [[SPLAT_SPLATINSERT18:%.*]] = insertelement <2 x double> undef, double [[TMP20]], i32 0 +; CHECK-NEXT: [[SPLAT_SPLAT19:%.*]] = shufflevector <2 x double> [[SPLAT_SPLATINSERT18]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[BLOCK17]], <2 x double> [[SPLAT_SPLAT19]], <2 x double> [[TMP19]]) +; CHECK-NEXT: [[TMP22:%.*]] = shufflevector <2 x double> [[TMP21]], <2 x double> undef, <2 x i32> +; CHECK-NEXT: [[TMP23]] = shufflevector <2 x double> [[TMP1]], <2 x double> [[TMP22]], <2 x i32> +; CHECK-NEXT: br label [[INNER_LATCH]] +; CHECK: inner.latch: +; CHECK-NEXT: [[INNER_STEP]] = add i64 [[INNER_IV]], 2 +; CHECK-NEXT: [[INNER_COND:%.*]] = icmp ne i64 [[INNER_STEP]], 2 +; CHECK-NEXT: br i1 [[INNER_COND]], label [[INNER_HEADER]], label [[ROWS_LATCH]], !llvm.loop !4 +; CHECK: rows.latch: +; CHECK-NEXT: [[ROWS_STEP]] = add i64 [[ROWS_IV]], 2 +; CHECK-NEXT: [[ROWS_COND:%.*]] = icmp ne i64 [[ROWS_STEP]], 2 +; CHECK-NEXT: [[TMP24:%.*]] = mul i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[TMP25:%.*]] = add i64 [[TMP24]], [[ROWS_IV]] +; CHECK-NEXT: [[TMP26:%.*]] = bitcast <4 x double>* [[C:%.*]] to double* +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr double, double* [[TMP26]], i64 [[TMP25]] +; CHECK-NEXT: [[COL_CAST20:%.*]] = bitcast double* [[TMP27]] to <4 x double>* +; CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x double>* [[COL_CAST20]] to double* +; CHECK-NEXT: [[VEC_CAST21:%.*]] = bitcast double* [[TMP28]] to <2 x double>* +; CHECK-NEXT: store volatile <2 x double> [[TMP17]], <2 x double>* [[VEC_CAST21]], align 8 +; CHECK-NEXT: [[VEC_GEP22:%.*]] = getelementptr double, double* [[TMP28]], i64 2 +; CHECK-NEXT: [[VEC_CAST23:%.*]] = bitcast double* [[VEC_GEP22]] to <2 x double>* +; CHECK-NEXT: store volatile <2 x double> [[TMP23]], <2 x double>* [[VEC_CAST23]], align 8 +; CHECK-NEXT: br i1 [[ROWS_COND]], label [[ROWS_HEADER]], label [[COLS_LATCH]] +; CHECK: cols.latch: +; CHECK-NEXT: [[COLS_STEP]] = add i64 [[COLS_IV]], 2 +; CHECK-NEXT: [[COLS_COND:%.*]] = icmp ne i64 [[COLS_STEP]], 2 +; CHECK-NEXT: br i1 [[COLS_COND]], label [[COLS_HEADER]], label [[CONTINUE:%.*]] +; CHECK: continue: ; CHECK-NEXT: ret void ; + entry: %a = load <4 x double>, <4 x double>* %A, align 8 %b = load <4 x double>, <4 x double>* %B, align 8 diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused.ll index 3ec7c4285e75..7300a5ff7703 100644 --- a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused.ll +++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused.ll @@ -1,10 +1,12 @@ -; RUN: opt -lower-matrix-intrinsics -fuse-matrix-tile-size=2 -matrix-allow-contract -force-fuse-matrix -instcombine -verify-dom-info %s -S | FileCheck %s +; RUN: opt -lower-matrix-intrinsics -fuse-matrix-use-loops=false -fuse-matrix-tile-size=2 -matrix-allow-contract -force-fuse-matrix -instcombine -verify-dom-info %s -S | FileCheck %s ; REQUIRES: aarch64-registered-target target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" target triple = "aarch64-apple-ios" +; Test tiling without generating explicit loops. + define void @multiply(<16 x double> * %A, <16 x double> * %B, <16 x double>* %C) { ; CHECK-LABEL: @multiply( ; CHECK-NEXT: entry: