forked from OSchip/llvm-project
[mlir][OpenMP] Change the syntax of omp.atomic.read op
This patch changes the syntax of omp.atomic.read to take the address of destination, instead of having the value in a result. This will allow using omp.atomic.read operation within an omp.atomic.capture operation thus making its implementation less complex. Reviewed By: peixin Differential Revision: https://reviews.llvm.org/D116396
This commit is contained in:
parent
8e773f4491
commit
a8586b573e
|
@ -582,8 +582,8 @@ def AtomicReadOp : OpenMP_Op<"atomic.read"> {
|
|||
let description = [{
|
||||
This operation performs an atomic read.
|
||||
|
||||
The operand `address` is the address from where the value is atomically
|
||||
read.
|
||||
The operand `x` is the address from where the value is atomically read.
|
||||
The operand `v` is the address where the value is stored after reading.
|
||||
|
||||
`hint` is the value of hint (as specified in the hint clause). It is a
|
||||
compile time constant. As the name suggests, this is just a hint for
|
||||
|
@ -593,10 +593,10 @@ def AtomicReadOp : OpenMP_Op<"atomic.read"> {
|
|||
can be one of `seq_cst`, `acq_rel`, `release`, `acquire` or `relaxed`.
|
||||
}];
|
||||
|
||||
let arguments = (ins OpenMP_PointerLikeType:$address,
|
||||
let arguments = (ins OpenMP_PointerLikeType:$x,
|
||||
OpenMP_PointerLikeType:$v,
|
||||
DefaultValuedAttr<I64Attr, "0">:$hint,
|
||||
OptionalAttr<MemoryOrderKind>:$memory_order);
|
||||
let results = (outs AnyType);
|
||||
let parser = [{ return parseAtomicReadOp(parser, result); }];
|
||||
let printer = [{ return printAtomicReadOp(p, *this); }];
|
||||
let verifier = [{ return verifyAtomicReadOp(*this); }];
|
||||
|
|
|
@ -1333,32 +1333,30 @@ static LogicalResult verifyOrderedRegionOp(OrderedRegionOp op) {
|
|||
/// address ::= operand `:` type
|
||||
static ParseResult parseAtomicReadOp(OpAsmParser &parser,
|
||||
OperationState &result) {
|
||||
OpAsmParser::OperandType address;
|
||||
OpAsmParser::OperandType x, v;
|
||||
Type addressType;
|
||||
SmallVector<ClauseType> clauses = {memoryOrderClause, hintClause};
|
||||
SmallVector<int> segments;
|
||||
|
||||
if (parser.parseOperand(address) ||
|
||||
if (parser.parseOperand(v) || parser.parseEqual() || parser.parseOperand(x) ||
|
||||
parseClauses(parser, result, clauses, segments) ||
|
||||
parser.parseColonType(addressType) ||
|
||||
parser.resolveOperand(address, addressType, result.operands))
|
||||
parser.resolveOperand(x, addressType, result.operands) ||
|
||||
parser.resolveOperand(v, addressType, result.operands))
|
||||
return failure();
|
||||
|
||||
SmallVector<Type> resultType;
|
||||
if (parser.parseArrowTypeList(resultType))
|
||||
return failure();
|
||||
result.addTypes(resultType);
|
||||
return success();
|
||||
}
|
||||
|
||||
/// Printer for AtomicReadOp
|
||||
static void printAtomicReadOp(OpAsmPrinter &p, AtomicReadOp op) {
|
||||
p << " " << op.address() << " ";
|
||||
p << " " << op.v() << " = " << op.x() << " ";
|
||||
if (op.memory_order())
|
||||
p << "memory_order(" << op.memory_order().getValue() << ") ";
|
||||
if (op.hintAttr())
|
||||
printSynchronizationHint(p << " ", op, op.hintAttr());
|
||||
p << ": " << op.address().getType() << " -> " << op.getType();
|
||||
p << ": " << op.x().getType();
|
||||
return;
|
||||
}
|
||||
|
||||
/// Verifier for AtomicReadOp
|
||||
|
@ -1369,6 +1367,9 @@ static LogicalResult verifyAtomicReadOp(AtomicReadOp op) {
|
|||
return op.emitError(
|
||||
"memory-order must not be acq_rel or release for atomic reads");
|
||||
}
|
||||
if (op.x() == op.v())
|
||||
return op.emitError(
|
||||
"read and write must not be to the same location for atomic reads");
|
||||
return verifySynchronizationHint(op, op.hint());
|
||||
}
|
||||
|
||||
|
|
|
@ -889,23 +889,12 @@ convertOmpAtomicRead(Operation &opInst, llvm::IRBuilderBase &builder,
|
|||
moduleTranslation.translateLoc(opInst.getLoc(), subprogram);
|
||||
llvm::OpenMPIRBuilder::LocationDescription ompLoc(builder.saveIP(),
|
||||
llvm::DebugLoc(diLoc));
|
||||
llvm::AtomicOrdering ao = convertAtomicOrdering(readOp.memory_order());
|
||||
llvm::Value *address = moduleTranslation.lookupValue(readOp.address());
|
||||
llvm::OpenMPIRBuilder::InsertPointTy currentIP = builder.saveIP();
|
||||
|
||||
// Insert alloca for result.
|
||||
llvm::OpenMPIRBuilder::InsertPointTy allocaIP =
|
||||
findAllocaInsertPoint(builder, moduleTranslation);
|
||||
builder.restoreIP(allocaIP);
|
||||
llvm::Value *v = builder.CreateAlloca(
|
||||
moduleTranslation.convertType(readOp.getResult().getType()));
|
||||
moduleTranslation.mapValue(readOp.getResult(), v);
|
||||
|
||||
// Restore the IP and insert Atomic Read.
|
||||
builder.restoreIP(currentIP);
|
||||
llvm::OpenMPIRBuilder::AtomicOpValue atomicV = {v, false, false};
|
||||
llvm::OpenMPIRBuilder::AtomicOpValue x = {address, false, false};
|
||||
builder.restoreIP(ompBuilder->createAtomicRead(ompLoc, x, atomicV, ao));
|
||||
llvm::AtomicOrdering AO = convertAtomicOrdering(readOp.memory_order());
|
||||
llvm::Value *x = moduleTranslation.lookupValue(readOp.x());
|
||||
llvm::Value *v = moduleTranslation.lookupValue(readOp.v());
|
||||
llvm::OpenMPIRBuilder::AtomicOpValue V = {v, false, false};
|
||||
llvm::OpenMPIRBuilder::AtomicOpValue X = {x, false, false};
|
||||
builder.restoreIP(ompBuilder->createAtomicRead(ompLoc, X, V, AO));
|
||||
return success();
|
||||
}
|
||||
|
||||
|
|
|
@ -505,49 +505,57 @@ func @omp_ordered5(%arg1 : i32, %arg2 : i32, %arg3 : i32, %vec0 : i64, %vec1 : i
|
|||
|
||||
// -----
|
||||
|
||||
func @omp_atomic_read1(%addr : memref<i32>) {
|
||||
func @omp_atomic_read1(%x: memref<i32>, %v: memref<i32>) {
|
||||
// expected-error @below {{the hints omp_sync_hint_nonspeculative and omp_sync_hint_speculative cannot be combined.}}
|
||||
%1 = omp.atomic.read %addr hint(speculative, nonspeculative) : memref<i32> -> i32
|
||||
omp.atomic.read %v = %x hint(speculative, nonspeculative) : memref<i32>
|
||||
return
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @omp_atomic_read2(%addr : memref<i32>) {
|
||||
func @omp_atomic_read2(%x: memref<i32>, %v: memref<i32>) {
|
||||
// expected-error @below {{attribute 'memory_order' failed to satisfy constraint: MemoryOrderKind Clause}}
|
||||
%1 = omp.atomic.read %addr memory_order(xyz) : memref<i32> -> i32
|
||||
omp.atomic.read %v = %x memory_order(xyz) : memref<i32>
|
||||
return
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @omp_atomic_read3(%addr : memref<i32>) {
|
||||
func @omp_atomic_read3(%x: memref<i32>, %v: memref<i32>) {
|
||||
// expected-error @below {{memory-order must not be acq_rel or release for atomic reads}}
|
||||
%1 = omp.atomic.read %addr memory_order(acq_rel) : memref<i32> -> i32
|
||||
omp.atomic.read %v = %x memory_order(acq_rel) : memref<i32>
|
||||
return
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @omp_atomic_read4(%addr : memref<i32>) {
|
||||
func @omp_atomic_read4(%x: memref<i32>, %v: memref<i32>) {
|
||||
// expected-error @below {{memory-order must not be acq_rel or release for atomic reads}}
|
||||
%1 = omp.atomic.read %addr memory_order(release) : memref<i32> -> i32
|
||||
omp.atomic.read %v = %x memory_order(release) : memref<i32>
|
||||
return
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @omp_atomic_read5(%addr : memref<i32>) {
|
||||
func @omp_atomic_read5(%x: memref<i32>, %v: memref<i32>) {
|
||||
// expected-error @below {{at most one memory_order clause can appear on the omp.atomic.read operation}}
|
||||
%1 = omp.atomic.read %addr memory_order(acquire) memory_order(relaxed) : memref<i32> -> i32
|
||||
omp.atomic.read %v = %x memory_order(acquire) memory_order(relaxed) : memref<i32>
|
||||
return
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @omp_atomic_read6(%addr : memref<i32>) {
|
||||
func @omp_atomic_read6(%x: memref<i32>, %v: memref<i32>) {
|
||||
// expected-error @below {{at most one hint clause can appear on the omp.atomic.read operation}}
|
||||
%1 = omp.atomic.read %addr hint(speculative) hint(contended) : memref<i32> -> i32
|
||||
omp.atomic.read %v = %x hint(speculative) hint(contended) : memref<i32>
|
||||
return
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @omp_atomic_read6(%x: memref<i32>, %v: memref<i32>) {
|
||||
// expected-error @below {{read and write must not be to the same location for atomic reads}}
|
||||
omp.atomic.read %x = %x hint(speculative) : memref<i32>
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -491,20 +491,20 @@ func @omp_ordered(%arg1 : i32, %arg2 : i32, %arg3 : i32,
|
|||
}
|
||||
|
||||
// CHECK-LABEL: omp_atomic_read
|
||||
// CHECK-SAME: (%[[ADDR:.*]]: memref<i32>)
|
||||
func @omp_atomic_read(%addr : memref<i32>) {
|
||||
// CHECK: %{{.*}} = omp.atomic.read %[[ADDR]] : memref<i32> -> i32
|
||||
%1 = omp.atomic.read %addr : memref<i32> -> i32
|
||||
// CHECK: %{{.*}} = omp.atomic.read %[[ADDR]] memory_order(seq_cst) : memref<i32> -> i32
|
||||
%2 = omp.atomic.read %addr memory_order(seq_cst) : memref<i32> -> i32
|
||||
// CHECK: %{{.*}} = omp.atomic.read %[[ADDR]] memory_order(acquire) : memref<i32> -> i32
|
||||
%5 = omp.atomic.read %addr memory_order(acquire) : memref<i32> -> i32
|
||||
// CHECK: %{{.*}} = omp.atomic.read %[[ADDR]] memory_order(relaxed) : memref<i32> -> i32
|
||||
%6 = omp.atomic.read %addr memory_order(relaxed) : memref<i32> -> i32
|
||||
// CHECK: %{{.*}} = omp.atomic.read %[[ADDR]] hint(contended, nonspeculative) : memref<i32> -> i32
|
||||
%7 = omp.atomic.read %addr hint(nonspeculative, contended) : memref<i32> -> i32
|
||||
// CHECK: %{{.*}} = omp.atomic.read %[[ADDR]] memory_order(seq_cst) hint(contended, speculative) : memref<i32> -> i32
|
||||
%8 = omp.atomic.read %addr hint(speculative, contended) memory_order(seq_cst) : memref<i32> -> i32
|
||||
// CHECK-SAME: (%[[v:.*]]: memref<i32>, %[[x:.*]]: memref<i32>)
|
||||
func @omp_atomic_read(%v: memref<i32>, %x: memref<i32>) {
|
||||
// CHECK: omp.atomic.read %[[v]] = %[[x]] : memref<i32>
|
||||
omp.atomic.read %v = %x : memref<i32>
|
||||
// CHECK: omp.atomic.read %[[v]] = %[[x]] memory_order(seq_cst) : memref<i32>
|
||||
omp.atomic.read %v = %x memory_order(seq_cst) : memref<i32>
|
||||
// CHECK: omp.atomic.read %[[v]] = %[[x]] memory_order(acquire) : memref<i32>
|
||||
omp.atomic.read %v = %x memory_order(acquire) : memref<i32>
|
||||
// CHECK: omp.atomic.read %[[v]] = %[[x]] memory_order(relaxed) : memref<i32>
|
||||
omp.atomic.read %v = %x memory_order(relaxed) : memref<i32>
|
||||
// CHECK: omp.atomic.read %[[v]] = %[[x]] hint(contended, nonspeculative) : memref<i32>
|
||||
omp.atomic.read %v = %x hint(nonspeculative, contended) : memref<i32>
|
||||
// CHECK: omp.atomic.read %[[v]] = %[[x]] memory_order(seq_cst) hint(contended, speculative) : memref<i32>
|
||||
omp.atomic.read %v = %x hint(speculative, contended) memory_order(seq_cst) : memref<i32>
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -710,30 +710,26 @@ llvm.func @omp_ordered(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i64,
|
|||
// -----
|
||||
|
||||
// CHECK-LABEL: @omp_atomic_read
|
||||
// CHECK-SAME: (i32* %[[ARG0:.*]])
|
||||
llvm.func @omp_atomic_read(%arg0 : !llvm.ptr<i32>) -> () {
|
||||
// CHECK: %{{.*}} = alloca i32, align 4
|
||||
// CHECK: %{{.*}} = alloca i32, align 4
|
||||
// CHECK: %{{.*}} = alloca i32, align 4
|
||||
// CHECK: %{{.*}} = alloca i32, align 4
|
||||
// CHECK-SAME: (i32* %[[ARG0:.*]], i32* %[[ARG1:.*]])
|
||||
llvm.func @omp_atomic_read(%arg0 : !llvm.ptr<i32>, %arg1 : !llvm.ptr<i32>) -> () {
|
||||
|
||||
// CHECK: %[[X1:.*]] = load atomic i32, i32* %[[ARG0]] monotonic, align 4
|
||||
// CHECK: store i32 %[[X1]], i32* %{{.*}}, align 4
|
||||
%x1 = omp.atomic.read %arg0 : !llvm.ptr<i32> -> i32
|
||||
// CHECK: store i32 %[[X1]], i32* %[[ARG1]], align 4
|
||||
omp.atomic.read %arg1 = %arg0 : !llvm.ptr<i32>
|
||||
|
||||
// CHECK: %[[X2:.*]] = load atomic i32, i32* %[[ARG0]] seq_cst, align 4
|
||||
// CHECK: call void @__kmpc_flush(%{{.*}})
|
||||
// CHECK: store i32 %[[X2]], i32* %{{.*}}, align 4
|
||||
%x2 = omp.atomic.read %arg0 memory_order(seq_cst) : !llvm.ptr<i32> -> i32
|
||||
// CHECK: store i32 %[[X2]], i32* %[[ARG1]], align 4
|
||||
omp.atomic.read %arg1 = %arg0 memory_order(seq_cst) : !llvm.ptr<i32>
|
||||
|
||||
// CHECK: %[[X3:.*]] = load atomic i32, i32* %[[ARG0]] acquire, align 4
|
||||
// CHECK: call void @__kmpc_flush(%{{.*}})
|
||||
// CHECK: store i32 %[[X3]], i32* %{{.*}}, align 4
|
||||
%x3 = omp.atomic.read %arg0 memory_order(acquire) : !llvm.ptr<i32> -> i32
|
||||
// CHECK: store i32 %[[X3]], i32* %[[ARG1]], align 4
|
||||
omp.atomic.read %arg1 = %arg0 memory_order(acquire) : !llvm.ptr<i32>
|
||||
|
||||
// CHECK: %[[X4:.*]] = load atomic i32, i32* %[[ARG0]] monotonic, align 4
|
||||
// CHECK: store i32 %[[X4]], i32* %{{.*}}, align 4
|
||||
%x4 = omp.atomic.read %arg0 memory_order(relaxed) : !llvm.ptr<i32> -> i32
|
||||
// CHECK: store i32 %[[X4]], i32* %[[ARG1]], align 4
|
||||
omp.atomic.read %arg1 = %arg0 memory_order(relaxed) : !llvm.ptr<i32>
|
||||
llvm.return
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue