[mlir][VectorToSCF] 128 byte alignment of alloc ops

Added 128 byte alignment to alloc ops created in VectorToSCF pass.
128b alignment was already introduced to this pass but not to all alloc
ops. This commit changes that by adding 128b alignment to the remaining ops.
The point of specifying alignment is to prevent possible memory alignment errors
on weakly tested architectures.

Differential Revision: https://reviews.llvm.org/D86454
This commit is contained in:
Jakub Lichman 2020-08-26 16:41:04 +00:00
parent 626c3738cd
commit f5ed22f09d
2 changed files with 10 additions and 6 deletions

View File

@ -35,6 +35,8 @@
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/Passes.h"
#define ALIGNMENT_SIZE 128
using namespace mlir;
using namespace mlir::edsc;
using namespace mlir::edsc::intrinsics;
@ -232,8 +234,8 @@ static Value setAllocAtFunctionEntry(MemRefType memRefMinorVectorType,
op->getParentWithTrait<OpTrait::AutomaticAllocationScope>();
assert(scope && "Expected op to be inside automatic allocation scope");
b.setInsertionPointToStart(&scope->getRegion(0).front());
Value res =
std_alloca(memRefMinorVectorType, ValueRange{}, b.getI64IntegerAttr(128));
Value res = std_alloca(memRefMinorVectorType, ValueRange{},
b.getI64IntegerAttr(ALIGNMENT_SIZE));
return res;
}
@ -575,7 +577,8 @@ LogicalResult VectorTransferRewriter<TransferReadOp>::matchAndRewrite(
steps.push_back(std_constant_index(step));
// 2. Emit alloc-copy-load-dealloc.
Value tmp = std_alloc(tmpMemRefType(transfer));
Value tmp = std_alloc(tmpMemRefType(transfer), ValueRange{},
rewriter.getI64IntegerAttr(ALIGNMENT_SIZE));
StdIndexedValue local(tmp);
Value vec = vector_type_cast(tmp);
loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) {
@ -648,7 +651,8 @@ LogicalResult VectorTransferRewriter<TransferWriteOp>::matchAndRewrite(
steps.push_back(std_constant_index(step));
// 2. Emit alloc-store-copy-dealloc.
Value tmp = std_alloc(tmpMemRefType(transfer));
Value tmp = std_alloc(tmpMemRefType(transfer), ValueRange{},
rewriter.getI64IntegerAttr(ALIGNMENT_SIZE));
StdIndexedValue local(tmp);
Value vec = vector_type_cast(tmp);
std_store(vectorValue, vec);

View File

@ -68,7 +68,7 @@ func @materialize_read(%M: index, %N: index, %O: index, %P: index) {
// CHECK-NEXT: affine.for %[[I1:.*]] = 0 to %{{.*}} {
// CHECK-NEXT: affine.for %[[I2:.*]] = 0 to %{{.*}} {
// CHECK-NEXT: affine.for %[[I3:.*]] = 0 to %{{.*}} step 5 {
// CHECK: %[[ALLOC:.*]] = alloc() : memref<5x4x3xf32>
// CHECK: %[[ALLOC:.*]] = alloc() {alignment = 128 : i64} : memref<5x4x3xf32>
// CHECK-NEXT: scf.for %[[I4:.*]] = %[[C0]] to %[[C3]] step %[[C1]] {
// CHECK-NEXT: scf.for %[[I5:.*]] = %[[C0]] to %[[C4]] step %[[C1]] {
// CHECK-NEXT: scf.for %[[I6:.*]] = %[[C0]] to %[[C5]] step %[[C1]] {
@ -145,7 +145,7 @@ func @materialize_write(%M: index, %N: index, %O: index, %P: index) {
// CHECK-NEXT: affine.for %[[I1:.*]] = 0 to %{{.*}} step 4 {
// CHECK-NEXT: affine.for %[[I2:.*]] = 0 to %{{.*}} {
// CHECK-NEXT: affine.for %[[I3:.*]] = 0 to %{{.*}} step 5 {
// CHECK: %[[ALLOC:.*]] = alloc() : memref<5x4x3xf32>
// CHECK: %[[ALLOC:.*]] = alloc() {alignment = 128 : i64} : memref<5x4x3xf32>
// CHECK-NEXT: %[[VECTOR_VIEW:.*]] = vector.type_cast {{.*}} : memref<5x4x3xf32>
// CHECK: store %{{.*}}, {{.*}} : memref<vector<5x4x3xf32>>
// CHECK-NEXT: scf.for %[[I4:.*]] = %[[C0]] to %[[C3]] step %[[C1]] {