Loop invariant code motion - remove reliance on getForwardSlice. Add more tests.

--

PiperOrigin-RevId: 250950703
This commit is contained in:
Amit Sabne 2019-05-31 13:56:47 -07:00 committed by Mehdi Amini
parent 05bb27fac2
commit 7a43da6060
2 changed files with 509 additions and 40 deletions

View File

@ -35,15 +35,13 @@
#include "mlir/Transforms/Utils.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#define DEBUG_TYPE "licm"
using llvm::SetVector;
using namespace mlir;
namespace {
@ -57,45 +55,177 @@ struct LoopInvariantCodeMotion : public FunctionPass<LoopInvariantCodeMotion> {
};
} // end anonymous namespace
static bool
checkInvarianceOfNestedIfOps(Operation *op, Value *indVar,
SmallPtrSetImpl<Operation *> &definedOps,
SmallPtrSetImpl<Operation *> &opsToHoist);
static bool isOpLoopInvariant(Operation &op, Value *indVar,
SmallPtrSetImpl<Operation *> &definedOps,
SmallPtrSetImpl<Operation *> &opsToHoist);
static bool
areAllOpsInTheBlockListInvariant(Region &blockList, Value *indVar,
SmallPtrSetImpl<Operation *> &definedOps,
SmallPtrSetImpl<Operation *> &opsToHoist);
static bool isMemRefDereferencingOp(Operation &op) {
// TODO(asabne): Support DMA Ops.
if (isa<LoadOp>(op) || isa<StoreOp>(op)) {
return true;
}
return false;
}
FunctionPassBase *mlir::createLoopInvariantCodeMotionPass() {
return new LoopInvariantCodeMotion();
}
void LoopInvariantCodeMotion::runOnAffineForOp(AffineForOp forOp) {
auto *loopBody = forOp.getBody();
// Returns true if the individual op is loop invariant.
bool isOpLoopInvariant(Operation &op, Value *indVar,
SmallPtrSetImpl<Operation *> &definedOps,
SmallPtrSetImpl<Operation *> &opsToHoist) {
LLVM_DEBUG(llvm::dbgs() << "iterating on op: " << op;);
// This is the place where hoisted instructions would reside.
FuncBuilder b(forOp.getOperation());
if (isa<AffineIfOp>(op)) {
if (!checkInvarianceOfNestedIfOps(&op, indVar, definedOps, opsToHoist)) {
return false;
}
} else if (isa<AffineForOp>(op)) {
// If the body of a predicated region has a for loop, we don't hoist the
// 'affine.if'.
return false;
} else if (isa<DmaStartOp>(op) || isa<DmaWaitOp>(op)) {
// TODO(asabne): Support DMA ops.
return false;
} else if (!isa<ConstantOp>(op)) {
if (isMemRefDereferencingOp(op)) {
Value *memref = isa<LoadOp>(op) ? cast<LoadOp>(op).getMemRef()
: cast<StoreOp>(op).getMemRef();
for (auto *user : memref->getUsers()) {
// If this memref has a user that is a DMA, give up because these
// operations write to this memref.
if (isa<DmaStartOp>(op) || isa<DmaWaitOp>(op)) {
return false;
}
// If the memref used by the load/store is used in a store elsewhere in
// the loop nest, we do not hoist. Similarly, if the memref used in a
// load is also being stored too, we do not hoist the load.
if (isa<StoreOp>(user) || (isa<LoadOp>(user) && isa<StoreOp>(op))) {
if (&op != user) {
SmallVector<AffineForOp, 8> userIVs;
getLoopIVs(*user, &userIVs);
// Check that userIVs don't contain the for loop around the op.
if (llvm::is_contained(userIVs, getForInductionVarOwner(indVar))) {
return false;
}
}
}
}
}
// This vector is used to place loop invariant operations.
SmallVector<Operation *, 8> opsToMove;
// Insert this op in the defined ops list.
definedOps.insert(&op);
SetVector<Operation *> loopDefinedOps;
// Generate forward slice which contains ops that fall under the transitive
// definition closure following the loop induction variable.
getForwardSlice(forOp, &loopDefinedOps);
if (op.getNumOperands() == 0 && !isa<AffineTerminatorOp>(op)) {
LLVM_DEBUG(llvm::dbgs() << "\nNon-constant op with 0 operands\n");
return false;
}
for (unsigned int i = 0; i < op.getNumOperands(); ++i) {
auto *operandSrc = op.getOperand(i)->getDefiningOp();
LLVM_DEBUG(for (auto i
: loopDefinedOps) {
i->print(llvm::dbgs() << "\nLoop-dependent op\n");
});
LLVM_DEBUG(
op.getOperand(i)->print(llvm::dbgs() << "\nIterating on operand\n"));
for (auto &op : *loopBody) {
// If the operation is loop invariant, insert it into opsToMove.
if (!isa<AffineForOp>(op) && !isa<AffineTerminatorOp>(op) &&
loopDefinedOps.count(&op) != 1) {
LLVM_DEBUG(op.print(llvm::dbgs() << "\nLICM'ing op\n"));
opsToMove.push_back(&op);
// If the loop IV is the operand, this op isn't loop invariant.
if (indVar == op.getOperand(i)) {
LLVM_DEBUG(llvm::dbgs() << "\nLoop IV is the operand\n");
return false;
}
if (operandSrc != nullptr) {
LLVM_DEBUG(llvm::dbgs()
<< *operandSrc << "\nIterating on operand src\n");
// If the value was defined in the loop (outside of the
// if/else region), and that operation itself wasn't meant to
// be hoisted, then mark this operation loop dependent.
if (definedOps.count(operandSrc) && opsToHoist.count(operandSrc) == 0) {
return false;
}
}
}
}
// For all instructions that we found to be invariant, place them sequentially
// If no operand was loop variant, mark this op for motion.
opsToHoist.insert(&op);
return true;
}
// Checks if all ops in a region (i.e. list of blocks) are loop invariant.
bool areAllOpsInTheBlockListInvariant(
Region &blockList, Value *indVar, SmallPtrSetImpl<Operation *> &definedOps,
SmallPtrSetImpl<Operation *> &opsToHoist) {
for (auto &b : blockList) {
for (auto &op : b) {
if (!isOpLoopInvariant(op, indVar, definedOps, opsToHoist)) {
return false;
}
}
}
return true;
}
// Returns true if the affine.if op can be hoisted.
bool checkInvarianceOfNestedIfOps(Operation *op, Value *indVar,
SmallPtrSetImpl<Operation *> &definedOps,
SmallPtrSetImpl<Operation *> &opsToHoist) {
assert(isa<AffineIfOp>(op));
auto ifOp = cast<AffineIfOp>(op);
if (!areAllOpsInTheBlockListInvariant(ifOp.getThenBlocks(), indVar,
definedOps, opsToHoist)) {
return false;
}
if (!areAllOpsInTheBlockListInvariant(ifOp.getElseBlocks(), indVar,
definedOps, opsToHoist)) {
return false;
}
return true;
}
void LoopInvariantCodeMotion::runOnAffineForOp(AffineForOp forOp) {
auto *loopBody = forOp.getBody();
auto *indVar = forOp.getInductionVar();
SmallPtrSet<Operation *, 8> definedOps;
// This is the place where hoisted instructions would reside.
FuncBuilder b(forOp.getOperation());
SmallPtrSet<Operation *, 8> opsToHoist;
SmallVector<Operation *, 8> opsToMove;
for (auto &op : *loopBody) {
// We don't hoist for loops.
if (!isa<AffineForOp>(op)) {
if (!isa<AffineTerminatorOp>(op)) {
if (isOpLoopInvariant(op, indVar, definedOps, opsToHoist)) {
opsToMove.push_back(&op);
}
}
}
}
// For all instructions that we found to be invariant, place sequentially
// right before the for loop.
for (auto *op : opsToMove) {
op->moveBefore(forOp);
}
LLVM_DEBUG(forOp.getOperation()->print(llvm::dbgs() << "\nModified loop\n"));
LLVM_DEBUG(forOp.getOperation()->print(llvm::dbgs() << "Modified loop\n"));
// If the for loop body has a single operation (the terminator), erase it.
if (forOp.getBody()->getOperations().size() == 1) {
@ -105,7 +235,6 @@ void LoopInvariantCodeMotion::runOnAffineForOp(AffineForOp forOp) {
}
void LoopInvariantCodeMotion::runOnFunction() {
// Walk through all loops in a function in innermost-loop-first order. This
// way, we first LICM from the inner loop, and place the ops in
// the outer loop, which in turn can be further LICM'ed.

View File

@ -113,7 +113,7 @@ func @invariant_code_inside_affine_if() {
}
func @nested_loops_with_common_and_uncommon_invariant_code() {
func @dependent_stores() {
%m = alloc() : memref<10xf32>
%cf7 = constant 7.0 : f32
%cf8 = constant 8.0 : f32
@ -122,7 +122,7 @@ func @nested_loops_with_common_and_uncommon_invariant_code() {
%v0 = addf %cf7, %cf8 : f32
affine.for %i1 = 0 to 10 {
%v1 = addf %cf7, %cf7 : f32
store %v0, %m[%i1] : memref<10xf32>
store %v1, %m[%i1] : memref<10xf32>
store %v0, %m[%i0] : memref<10xf32>
}
}
@ -133,9 +133,97 @@ func @nested_loops_with_common_and_uncommon_invariant_code() {
// CHECK-NEXT: %1 = addf %cst, %cst_0 : f32
// CHECK-NEXT: %2 = addf %cst, %cst : f32
// CHECK-NEXT: affine.for %i0 = 0 to 10 {
// CHECK-NEXT: store %1, %0[%i0] : memref<10xf32>
// CHECK-NEXT: affine.for %i1 = 0 to 10 {
// CHECK-NEXT: store %2, %0[%i1] : memref<10xf32>
// CHECK-NEXT: store %1, %0[%i0] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: return
return
}
func @independent_stores() {
%m = alloc() : memref<10xf32>
%cf7 = constant 7.0 : f32
%cf8 = constant 8.0 : f32
affine.for %i0 = 0 to 10 {
%v0 = addf %cf7, %cf8 : f32
affine.for %i1 = 0 to 10 {
%v1 = addf %cf7, %cf7 : f32
store %v0, %m[%i0] : memref<10xf32>
store %v1, %m[%i1] : memref<10xf32>
}
}
// CHECK: %0 = alloc() : memref<10xf32>
// CHECK-NEXT: %cst = constant 7.000000e+00 : f32
// CHECK-NEXT: %cst_0 = constant 8.000000e+00 : f32
// CHECK-NEXT: %1 = addf %cst, %cst_0 : f32
// CHECK-NEXT: %2 = addf %cst, %cst : f32
// CHECK-NEXT: affine.for %i0 = 0 to 10 {
// CHECK-NEXT: affine.for %i1 = 0 to 10 {
// CHECK-NEXT: store %1, %0[%i0] : memref<10xf32>
// CHECK-NEXT: store %2, %0[%i1] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: return
return
}
func @load_dependent_store() {
%m = alloc() : memref<10xf32>
%cf7 = constant 7.0 : f32
%cf8 = constant 8.0 : f32
affine.for %i0 = 0 to 10 {
%v0 = addf %cf7, %cf8 : f32
affine.for %i1 = 0 to 10 {
%v1 = addf %cf7, %cf7 : f32
store %v0, %m[%i1] : memref<10xf32>
%v2 = load %m[%i0] : memref<10xf32>
}
}
// CHECK: %0 = alloc() : memref<10xf32>
// CHECK-NEXT: %cst = constant 7.000000e+00 : f32
// CHECK-NEXT: %cst_0 = constant 8.000000e+00 : f32
// CHECK-NEXT: %1 = addf %cst, %cst_0 : f32
// CHECK-NEXT: %2 = addf %cst, %cst : f32
// CHECK-NEXT: affine.for %i0 = 0 to 10 {
// CHECK-NEXT: affine.for %i1 = 0 to 10 {
// CHECK-NEXT: store %1, %0[%i1] : memref<10xf32>
// CHECK-NEXT: %3 = load %0[%i0] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: return
return
}
func @load_after_load() {
%m = alloc() : memref<10xf32>
%cf7 = constant 7.0 : f32
%cf8 = constant 8.0 : f32
affine.for %i0 = 0 to 10 {
%v0 = addf %cf7, %cf8 : f32
affine.for %i1 = 0 to 10 {
%v1 = addf %cf7, %cf7 : f32
%v3 = load %m[%i1] : memref<10xf32>
%v2 = load %m[%i0] : memref<10xf32>
}
}
// CHECK: %0 = alloc() : memref<10xf32>
// CHECK-NEXT: %cst = constant 7.000000e+00 : f32
// CHECK-NEXT: %cst_0 = constant 8.000000e+00 : f32
// CHECK-NEXT: %1 = addf %cst, %cst_0 : f32
// CHECK-NEXT: %2 = addf %cst, %cst : f32
// CHECK-NEXT: affine.for %i0 = 0 to 10 {
// CHECK-NEXT: %3 = load %0[%i0] : memref<10xf32>
// CHECK-NEXT: affine.for %i1 = 0 to 10 {
// CHECK-NEXT: %4 = load %0[%i1] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: return
@ -168,22 +256,274 @@ func @invariant_affine_if() {
return
}
func @invariant_constant_and_load() {
%m = alloc() : memref<100xf32>
affine.for %i0 = 0 to 5 {
%c0 = constant 0 : index
%v = load %m[%c0] : memref<100xf32>
store %v, %m[%i0] : memref<100xf32>
func @invariant_affine_if2() {
%m = alloc() : memref<10xf32>
%cf8 = constant 8.0 : f32
affine.for %i0 = 0 to 10 {
affine.for %i1 = 0 to 10 {
affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
%cf9 = addf %cf8, %cf8 : f32
store %cf9, %m[%i1] : memref<10xf32>
}
}
}
// CHECK: %0 = alloc() : memref<100xf32>
// CHECK-NEXT: %c0 = constant 0 : index
// CHECK-NEXT: %1 = load %0[%c0] : memref<100xf32>
// CHECK-NEXT: affine.for %i0 = 0 to 5 {
// CHECK-NEXT: store %1, %0[%i0] : memref<100xf32>
// CHECK: %0 = alloc() : memref<10xf32>
// CHECK-NEXT: %cst = constant 8.000000e+00 : f32
// CHECK-NEXT: affine.for %i0 = 0 to 10 {
// CHECK-NEXT: affine.for %i1 = 0 to 10 {
// CHECK-NEXT: affine.if #set0(%i0, %i0) {
// CHECK-NEXT: %1 = addf %cst, %cst : f32
// CHECK-NEXT: store %1, %0[%i1] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: return
return
}
func @invariant_affine_nested_if() {
%m = alloc() : memref<10xf32>
%cf8 = constant 8.0 : f32
affine.for %i0 = 0 to 10 {
affine.for %i1 = 0 to 10 {
affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
%cf9 = addf %cf8, %cf8 : f32
store %cf9, %m[%i0] : memref<10xf32>
affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
store %cf9, %m[%i1] : memref<10xf32>
}
}
}
}
// CHECK: %0 = alloc() : memref<10xf32>
// CHECK-NEXT: %cst = constant 8.000000e+00 : f32
// CHECK-NEXT: affine.for %i0 = 0 to 10 {
// CHECK-NEXT: affine.for %i1 = 0 to 10 {
// CHECK-NEXT: affine.if #set0(%i0, %i0) {
// CHECK-NEXT: %1 = addf %cst, %cst : f32
// CHECK-NEXT: store %1, %0[%i0] : memref<10xf32>
// CHECK-NEXT: affine.if #set0(%i0, %i0) {
// CHECK-NEXT: store %1, %0[%i1] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: return
return
}
func @invariant_affine_nested_if_else() {
%m = alloc() : memref<10xf32>
%cf8 = constant 8.0 : f32
affine.for %i0 = 0 to 10 {
affine.for %i1 = 0 to 10 {
affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
%cf9 = addf %cf8, %cf8 : f32
store %cf9, %m[%i0] : memref<10xf32>
affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
store %cf9, %m[%i0] : memref<10xf32>
} else {
store %cf9, %m[%i1] : memref<10xf32>
}
}
}
}
// CHECK: %0 = alloc() : memref<10xf32>
// CHECK-NEXT: %cst = constant 8.000000e+00 : f32
// CHECK-NEXT: affine.for %i0 = 0 to 10 {
// CHECK-NEXT: affine.for %i1 = 0 to 10 {
// CHECK-NEXT: affine.if #set0(%i0, %i0) {
// CHECK-NEXT: %1 = addf %cst, %cst : f32
// CHECK-NEXT: store %1, %0[%i0] : memref<10xf32>
// CHECK-NEXT: affine.if #set0(%i0, %i0) {
// CHECK-NEXT: store %1, %0[%i0] : memref<10xf32>
// CHECK-NEXT: } else {
// CHECK-NEXT: store %1, %0[%i1] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: return
return
}
func @invariant_affine_nested_if_else2() {
%m = alloc() : memref<10xf32>
%m2 = alloc() : memref<10xf32>
%cf8 = constant 8.0 : f32
affine.for %i0 = 0 to 10 {
affine.for %i1 = 0 to 10 {
affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
%cf9 = addf %cf8, %cf8 : f32
%tload1 = load %m[%i0] : memref<10xf32>
affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
store %cf9, %m2[%i0] : memref<10xf32>
} else {
%tload2 = load %m[%i0] : memref<10xf32>
}
}
}
}
// CHECK: %0 = alloc() : memref<10xf32>
// CHECK-NEXT: %1 = alloc() : memref<10xf32>
// CHECK-NEXT: %cst = constant 8.000000e+00 : f32
// CHECK-NEXT: affine.for %i0 = 0 to 10 {
// CHECK-NEXT: affine.if #set0(%i0, %i0) {
// CHECK-NEXT: %2 = addf %cst, %cst : f32
// CHECK-NEXT: %3 = load %0[%i0] : memref<10xf32>
// CHECK-NEXT: affine.if #set0(%i0, %i0) {
// CHECK-NEXT: store %2, %1[%i0] : memref<10xf32>
// CHECK-NEXT: } else {
// CHECK-NEXT: %4 = load %0[%i0] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: return
return
}
func @invariant_affine_nested_if2() {
%m = alloc() : memref<10xf32>
%cf8 = constant 8.0 : f32
affine.for %i0 = 0 to 10 {
affine.for %i1 = 0 to 10 {
affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
%cf9 = addf %cf8, %cf8 : f32
%v1 = load %m[%i0] : memref<10xf32>
affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
%v2 = load %m[%i0] : memref<10xf32>
}
}
}
}
// CHECK: %0 = alloc() : memref<10xf32>
// CHECK-NEXT: %cst = constant 8.000000e+00 : f32
// CHECK-NEXT: affine.for %i0 = 0 to 10 {
// CHECK-NEXT: affine.if #set0(%i0, %i0) {
// CHECK-NEXT: %1 = addf %cst, %cst : f32
// CHECK-NEXT: %2 = load %0[%i0] : memref<10xf32>
// CHECK-NEXT: affine.if #set0(%i0, %i0) {
// CHECK-NEXT: %3 = load %0[%i0] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: return
return
}
func @invariant_affine_for_inside_affine_if() {
%m = alloc() : memref<10xf32>
%cf8 = constant 8.0 : f32
affine.for %i0 = 0 to 10 {
affine.for %i1 = 0 to 10 {
affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
%cf9 = addf %cf8, %cf8 : f32
store %cf9, %m[%i0] : memref<10xf32>
affine.for %i2 = 0 to 10 {
store %cf9, %m[%i2] : memref<10xf32>
}
}
}
}
// CHECK: %0 = alloc() : memref<10xf32>
// CHECK-NEXT: %cst = constant 8.000000e+00 : f32
// CHECK-NEXT: affine.for %i0 = 0 to 10 {
// CHECK-NEXT: affine.for %i1 = 0 to 10 {
// CHECK-NEXT: affine.if #set0(%i0, %i0) {
// CHECK-NEXT: %1 = addf %cst, %cst : f32
// CHECK-NEXT: store %1, %0[%i0] : memref<10xf32>
// CHECK-NEXT: affine.for %i2 = 0 to 10 {
// CHECK-NEXT: store %1, %0[%i2] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: return
return
}
func @invariant_constant_and_load() {
%m = alloc() : memref<100xf32>
%m2 = alloc() : memref<100xf32>
affine.for %i0 = 0 to 5 {
%c0 = constant 0 : index
%v = load %m2[%c0] : memref<100xf32>
store %v, %m[%i0] : memref<100xf32>
}
// CHECK: %0 = alloc() : memref<100xf32>
// CHECK-NEXT: %1 = alloc() : memref<100xf32>
// CHECK-NEXT: %c0 = constant 0 : index
// CHECK-NEXT: %2 = load %1[%c0] : memref<100xf32>
// CHECK-NEXT: affine.for %i0 = 0 to 5 {
// CHECK-NEXT: store %2, %0[%i0] : memref<100xf32>
// CHECK-NEXT: }
// CHECK-NEXT: return
return
}
func @nested_load_store_same_memref() {
%m = alloc() : memref<10xf32>
%cst = constant 8.0 : f32
%c0 = constant 0 : index
affine.for %i0 = 0 to 10 {
%v0 = load %m[%c0] : memref<10xf32>
affine.for %i1 = 0 to 10 {
store %cst, %m[%i1] : memref<10xf32>
}
}
// CHECK: %0 = alloc() : memref<10xf32>
// CHECK-NEXT: %cst = constant 8.000000e+00 : f32
// CHECK-NEXT: %c0 = constant 0 : index
// CHECK-NEXT: affine.for %i0 = 0 to 10 {
// CHECK-NEXT: %1 = load %0[%c0] : memref<10xf32>
// CHECK-NEXT: affine.for %i1 = 0 to 10 {
// CHECK-NEXT: store %cst, %0[%i1] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: return
return
}
func @nested_load_store_same_memref2() {
%m = alloc() : memref<10xf32>
%cst = constant 8.0 : f32
%c0 = constant 0 : index
affine.for %i0 = 0 to 10 {
store %cst, %m[%c0] : memref<10xf32>
affine.for %i1 = 0 to 10 {
%v0 = load %m[%i0] : memref<10xf32>
}
}
// CHECK: %0 = alloc() : memref<10xf32>
// CHECK-NEXT: %cst = constant 8.000000e+00 : f32
// CHECK-NEXT: %c0 = constant 0 : index
// CHECK-NEXT: affine.for %i0 = 0 to 10 {
// CHECK-NEXT: store %cst, %0[%c0] : memref<10xf32>
// CHECK-NEXT: %1 = load %0[%i0] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: return
return
}