forked from OSchip/llvm-project
[mlir][OpenMP] Add memory_order clause tests
This patch adds tests for memory_order clause for atomic update and capture operations. This patch also adds a check for making sure that the operations inside and omp.atomic.capture region do not specify the memory_order clause. Reviewed By: kiranchandramohan, peixin Differential Revision: https://reviews.llvm.org/D126195
This commit is contained in:
parent
ad742cf85d
commit
f5d29c15bf
|
@ -956,6 +956,11 @@ LogicalResult AtomicCaptureOp::verifyRegions() {
|
|||
if (getFirstOp()->getAttr("hint_val") || getSecondOp()->getAttr("hint_val"))
|
||||
return emitOpError(
|
||||
"operations inside capture region must not have hint clause");
|
||||
|
||||
if (getFirstOp()->getAttr("memory_order_val") ||
|
||||
getSecondOp()->getAttr("memory_order_val"))
|
||||
return emitOpError(
|
||||
"operations inside capture region must not have memory_order clause");
|
||||
return success();
|
||||
}
|
||||
|
||||
|
|
|
@ -952,6 +952,36 @@ func.func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
|
|||
|
||||
// -----
|
||||
|
||||
func.func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
|
||||
// expected-error @below {{operations inside capture region must not have memory_order clause}}
|
||||
omp.atomic.capture {
|
||||
omp.atomic.update memory_order(seq_cst) %x : memref<i32> {
|
||||
^bb0(%xval: i32):
|
||||
%newval = llvm.add %xval, %expr : i32
|
||||
omp.yield(%newval : i32)
|
||||
}
|
||||
omp.atomic.read %v = %x : memref<i32>
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func.func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
|
||||
// expected-error @below {{operations inside capture region must not have memory_order clause}}
|
||||
omp.atomic.capture {
|
||||
omp.atomic.update %x : memref<i32> {
|
||||
^bb0(%xval: i32):
|
||||
%newval = llvm.add %xval, %expr : i32
|
||||
omp.yield(%newval : i32)
|
||||
}
|
||||
omp.atomic.read %v = %x memory_order(seq_cst) : memref<i32>
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func.func @omp_sections(%data_var : memref<i32>) -> () {
|
||||
// expected-error @below {{expected equal sizes for allocate and allocator variables}}
|
||||
"omp.sections" (%data_var) ({
|
||||
|
|
|
@ -839,6 +839,46 @@ func.func @omp_atomic_update(%x : memref<i32>, %expr : i32, %xBool : memref<i1>,
|
|||
omp.yield(%newval : i32)
|
||||
}
|
||||
|
||||
// CHECK: omp.atomic.update memory_order(seq_cst) %[[X]] : memref<i32>
|
||||
// CHECK-NEXT: (%[[XVAL:.*]]: i32):
|
||||
// CHECK-NEXT: %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
|
||||
// CHECK-NEXT: omp.yield(%[[NEWVAL]] : i32)
|
||||
omp.atomic.update memory_order(seq_cst) %x : memref<i32> {
|
||||
^bb0(%xval: i32):
|
||||
%newval = llvm.add %xval, %expr : i32
|
||||
omp.yield(%newval : i32)
|
||||
}
|
||||
|
||||
// CHECK: omp.atomic.update memory_order(release) %[[X]] : memref<i32>
|
||||
// CHECK-NEXT: (%[[XVAL:.*]]: i32):
|
||||
// CHECK-NEXT: %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
|
||||
// CHECK-NEXT: omp.yield(%[[NEWVAL]] : i32)
|
||||
omp.atomic.update memory_order(release) %x : memref<i32> {
|
||||
^bb0(%xval: i32):
|
||||
%newval = llvm.add %xval, %expr : i32
|
||||
omp.yield(%newval : i32)
|
||||
}
|
||||
|
||||
// CHECK: omp.atomic.update memory_order(relaxed) %[[X]] : memref<i32>
|
||||
// CHECK-NEXT: (%[[XVAL:.*]]: i32):
|
||||
// CHECK-NEXT: %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
|
||||
// CHECK-NEXT: omp.yield(%[[NEWVAL]] : i32)
|
||||
omp.atomic.update memory_order(relaxed) %x : memref<i32> {
|
||||
^bb0(%xval: i32):
|
||||
%newval = llvm.add %xval, %expr : i32
|
||||
omp.yield(%newval : i32)
|
||||
}
|
||||
|
||||
// CHECK: omp.atomic.update memory_order(seq_cst) hint(uncontended, speculative) %[[X]] : memref<i32>
|
||||
// CHECK-NEXT: (%[[XVAL:.*]]: i32):
|
||||
// CHECK-NEXT: %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
|
||||
// CHECK-NEXT: omp.yield(%[[NEWVAL]] : i32)
|
||||
omp.atomic.update memory_order(seq_cst) hint(uncontended, speculative) %x : memref<i32> {
|
||||
^bb0(%xval: i32):
|
||||
%newval = llvm.add %xval, %expr : i32
|
||||
omp.yield(%newval : i32)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1038,6 +1078,109 @@ func.func @omp_atomic_capture(%v: memref<i32>, %x: memref<i32>, %expr: i32) {
|
|||
}
|
||||
omp.atomic.read %v = %x : memref<i32>
|
||||
}
|
||||
|
||||
// CHECK: omp.atomic.capture memory_order(seq_cst) {
|
||||
// CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
|
||||
// CHECK-NEXT: (%[[xval:.*]]: i32):
|
||||
// CHECK-NEXT: %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
|
||||
// CHECK-NEXT: omp.yield(%[[newval]] : i32)
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>
|
||||
// CHECK-NEXT: }
|
||||
omp.atomic.capture memory_order(seq_cst) {
|
||||
omp.atomic.update %x : memref<i32> {
|
||||
^bb0(%xval: i32):
|
||||
%newval = llvm.add %xval, %expr : i32
|
||||
omp.yield(%newval : i32)
|
||||
}
|
||||
omp.atomic.read %v = %x : memref<i32>
|
||||
}
|
||||
|
||||
// CHECK: omp.atomic.capture memory_order(acq_rel) {
|
||||
// CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
|
||||
// CHECK-NEXT: (%[[xval:.*]]: i32):
|
||||
// CHECK-NEXT: %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
|
||||
// CHECK-NEXT: omp.yield(%[[newval]] : i32)
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>
|
||||
// CHECK-NEXT: }
|
||||
omp.atomic.capture memory_order(acq_rel) {
|
||||
omp.atomic.update %x : memref<i32> {
|
||||
^bb0(%xval: i32):
|
||||
%newval = llvm.add %xval, %expr : i32
|
||||
omp.yield(%newval : i32)
|
||||
}
|
||||
omp.atomic.read %v = %x : memref<i32>
|
||||
}
|
||||
|
||||
// CHECK: omp.atomic.capture memory_order(acquire) {
|
||||
// CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
|
||||
// CHECK-NEXT: (%[[xval:.*]]: i32):
|
||||
// CHECK-NEXT: %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
|
||||
// CHECK-NEXT: omp.yield(%[[newval]] : i32)
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>
|
||||
// CHECK-NEXT: }
|
||||
omp.atomic.capture memory_order(acquire) {
|
||||
omp.atomic.update %x : memref<i32> {
|
||||
^bb0(%xval: i32):
|
||||
%newval = llvm.add %xval, %expr : i32
|
||||
omp.yield(%newval : i32)
|
||||
}
|
||||
omp.atomic.read %v = %x : memref<i32>
|
||||
}
|
||||
|
||||
// CHECK: omp.atomic.capture memory_order(release) {
|
||||
// CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
|
||||
// CHECK-NEXT: (%[[xval:.*]]: i32):
|
||||
// CHECK-NEXT: %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
|
||||
// CHECK-NEXT: omp.yield(%[[newval]] : i32)
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>
|
||||
// CHECK-NEXT: }
|
||||
omp.atomic.capture memory_order(release) {
|
||||
omp.atomic.update %x : memref<i32> {
|
||||
^bb0(%xval: i32):
|
||||
%newval = llvm.add %xval, %expr : i32
|
||||
omp.yield(%newval : i32)
|
||||
}
|
||||
omp.atomic.read %v = %x : memref<i32>
|
||||
}
|
||||
|
||||
// CHECK: omp.atomic.capture memory_order(relaxed) {
|
||||
// CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
|
||||
// CHECK-NEXT: (%[[xval:.*]]: i32):
|
||||
// CHECK-NEXT: %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
|
||||
// CHECK-NEXT: omp.yield(%[[newval]] : i32)
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>
|
||||
// CHECK-NEXT: }
|
||||
omp.atomic.capture memory_order(relaxed) {
|
||||
omp.atomic.update %x : memref<i32> {
|
||||
^bb0(%xval: i32):
|
||||
%newval = llvm.add %xval, %expr : i32
|
||||
omp.yield(%newval : i32)
|
||||
}
|
||||
omp.atomic.read %v = %x : memref<i32>
|
||||
}
|
||||
|
||||
// CHECK: omp.atomic.capture memory_order(seq_cst) hint(contended, speculative) {
|
||||
// CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
|
||||
// CHECK-NEXT: (%[[xval:.*]]: i32):
|
||||
// CHECK-NEXT: %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32
|
||||
// CHECK-NEXT: omp.yield(%[[newval]] : i32)
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref<i32>
|
||||
// CHECK-NEXT: }
|
||||
omp.atomic.capture hint(contended, speculative) memory_order(seq_cst) {
|
||||
omp.atomic.update %x : memref<i32> {
|
||||
^bb0(%xval: i32):
|
||||
%newval = llvm.add %xval, %expr : i32
|
||||
omp.yield(%newval : i32)
|
||||
}
|
||||
omp.atomic.read %v = %x : memref<i32>
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue