From f5d29c15bf8f70bcab6a697e7c1cbc04607bee95 Mon Sep 17 00:00:00 2001 From: Shraiysh Vaishay Date: Fri, 3 Jun 2022 13:01:07 +0530 Subject: [PATCH] [mlir][OpenMP] Add memory_order clause tests This patch adds tests for memory_order clause for atomic update and capture operations. This patch also adds a check for making sure that the operations inside and omp.atomic.capture region do not specify the memory_order clause. Reviewed By: kiranchandramohan, peixin Differential Revision: https://reviews.llvm.org/D126195 --- mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp | 5 + mlir/test/Dialect/OpenMP/invalid.mlir | 30 ++++ mlir/test/Dialect/OpenMP/ops.mlir | 143 +++++++++++++++++++ 3 files changed, 178 insertions(+) diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp index 19615c06b08d..5f05d71d0c77 100644 --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -956,6 +956,11 @@ LogicalResult AtomicCaptureOp::verifyRegions() { if (getFirstOp()->getAttr("hint_val") || getSecondOp()->getAttr("hint_val")) return emitOpError( "operations inside capture region must not have hint clause"); + + if (getFirstOp()->getAttr("memory_order_val") || + getSecondOp()->getAttr("memory_order_val")) + return emitOpError( + "operations inside capture region must not have memory_order clause"); return success(); } diff --git a/mlir/test/Dialect/OpenMP/invalid.mlir b/mlir/test/Dialect/OpenMP/invalid.mlir index ed5bc00cd7cb..c08b80e5b79e 100644 --- a/mlir/test/Dialect/OpenMP/invalid.mlir +++ b/mlir/test/Dialect/OpenMP/invalid.mlir @@ -952,6 +952,36 @@ func.func @omp_atomic_capture(%x: memref, %v: memref, %expr: i32) { // ----- +func.func @omp_atomic_capture(%x: memref, %v: memref, %expr: i32) { + // expected-error @below {{operations inside capture region must not have memory_order clause}} + omp.atomic.capture { + omp.atomic.update memory_order(seq_cst) %x : memref { + ^bb0(%xval: i32): + %newval = llvm.add %xval, %expr : i32 + omp.yield(%newval : i32) + } + omp.atomic.read %v = %x : memref + } + return +} + +// ----- + +func.func @omp_atomic_capture(%x: memref, %v: memref, %expr: i32) { + // expected-error @below {{operations inside capture region must not have memory_order clause}} + omp.atomic.capture { + omp.atomic.update %x : memref { + ^bb0(%xval: i32): + %newval = llvm.add %xval, %expr : i32 + omp.yield(%newval : i32) + } + omp.atomic.read %v = %x memory_order(seq_cst) : memref + } + return +} + +// ----- + func.func @omp_sections(%data_var : memref) -> () { // expected-error @below {{expected equal sizes for allocate and allocator variables}} "omp.sections" (%data_var) ({ diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir index 4821d6cea9a6..fe85130a8a10 100644 --- a/mlir/test/Dialect/OpenMP/ops.mlir +++ b/mlir/test/Dialect/OpenMP/ops.mlir @@ -839,6 +839,46 @@ func.func @omp_atomic_update(%x : memref, %expr : i32, %xBool : memref, omp.yield(%newval : i32) } + // CHECK: omp.atomic.update memory_order(seq_cst) %[[X]] : memref + // CHECK-NEXT: (%[[XVAL:.*]]: i32): + // CHECK-NEXT: %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32 + // CHECK-NEXT: omp.yield(%[[NEWVAL]] : i32) + omp.atomic.update memory_order(seq_cst) %x : memref { + ^bb0(%xval: i32): + %newval = llvm.add %xval, %expr : i32 + omp.yield(%newval : i32) + } + + // CHECK: omp.atomic.update memory_order(release) %[[X]] : memref + // CHECK-NEXT: (%[[XVAL:.*]]: i32): + // CHECK-NEXT: %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32 + // CHECK-NEXT: omp.yield(%[[NEWVAL]] : i32) + omp.atomic.update memory_order(release) %x : memref { + ^bb0(%xval: i32): + %newval = llvm.add %xval, %expr : i32 + omp.yield(%newval : i32) + } + + // CHECK: omp.atomic.update memory_order(relaxed) %[[X]] : memref + // CHECK-NEXT: (%[[XVAL:.*]]: i32): + // CHECK-NEXT: %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32 + // CHECK-NEXT: omp.yield(%[[NEWVAL]] : i32) + omp.atomic.update memory_order(relaxed) %x : memref { + ^bb0(%xval: i32): + %newval = llvm.add %xval, %expr : i32 + omp.yield(%newval : i32) + } + + // CHECK: omp.atomic.update memory_order(seq_cst) hint(uncontended, speculative) %[[X]] : memref + // CHECK-NEXT: (%[[XVAL:.*]]: i32): + // CHECK-NEXT: %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32 + // CHECK-NEXT: omp.yield(%[[NEWVAL]] : i32) + omp.atomic.update memory_order(seq_cst) hint(uncontended, speculative) %x : memref { + ^bb0(%xval: i32): + %newval = llvm.add %xval, %expr : i32 + omp.yield(%newval : i32) + } + return } @@ -1038,6 +1078,109 @@ func.func @omp_atomic_capture(%v: memref, %x: memref, %expr: i32) { } omp.atomic.read %v = %x : memref } + + // CHECK: omp.atomic.capture memory_order(seq_cst) { + // CHECK-NEXT: omp.atomic.update %[[x]] : memref + // CHECK-NEXT: (%[[xval:.*]]: i32): + // CHECK-NEXT: %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32 + // CHECK-NEXT: omp.yield(%[[newval]] : i32) + // CHECK-NEXT: } + // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref + // CHECK-NEXT: } + omp.atomic.capture memory_order(seq_cst) { + omp.atomic.update %x : memref { + ^bb0(%xval: i32): + %newval = llvm.add %xval, %expr : i32 + omp.yield(%newval : i32) + } + omp.atomic.read %v = %x : memref + } + + // CHECK: omp.atomic.capture memory_order(acq_rel) { + // CHECK-NEXT: omp.atomic.update %[[x]] : memref + // CHECK-NEXT: (%[[xval:.*]]: i32): + // CHECK-NEXT: %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32 + // CHECK-NEXT: omp.yield(%[[newval]] : i32) + // CHECK-NEXT: } + // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref + // CHECK-NEXT: } + omp.atomic.capture memory_order(acq_rel) { + omp.atomic.update %x : memref { + ^bb0(%xval: i32): + %newval = llvm.add %xval, %expr : i32 + omp.yield(%newval : i32) + } + omp.atomic.read %v = %x : memref + } + + // CHECK: omp.atomic.capture memory_order(acquire) { + // CHECK-NEXT: omp.atomic.update %[[x]] : memref + // CHECK-NEXT: (%[[xval:.*]]: i32): + // CHECK-NEXT: %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32 + // CHECK-NEXT: omp.yield(%[[newval]] : i32) + // CHECK-NEXT: } + // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref + // CHECK-NEXT: } + omp.atomic.capture memory_order(acquire) { + omp.atomic.update %x : memref { + ^bb0(%xval: i32): + %newval = llvm.add %xval, %expr : i32 + omp.yield(%newval : i32) + } + omp.atomic.read %v = %x : memref + } + + // CHECK: omp.atomic.capture memory_order(release) { + // CHECK-NEXT: omp.atomic.update %[[x]] : memref + // CHECK-NEXT: (%[[xval:.*]]: i32): + // CHECK-NEXT: %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32 + // CHECK-NEXT: omp.yield(%[[newval]] : i32) + // CHECK-NEXT: } + // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref + // CHECK-NEXT: } + omp.atomic.capture memory_order(release) { + omp.atomic.update %x : memref { + ^bb0(%xval: i32): + %newval = llvm.add %xval, %expr : i32 + omp.yield(%newval : i32) + } + omp.atomic.read %v = %x : memref + } + + // CHECK: omp.atomic.capture memory_order(relaxed) { + // CHECK-NEXT: omp.atomic.update %[[x]] : memref + // CHECK-NEXT: (%[[xval:.*]]: i32): + // CHECK-NEXT: %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32 + // CHECK-NEXT: omp.yield(%[[newval]] : i32) + // CHECK-NEXT: } + // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref + // CHECK-NEXT: } + omp.atomic.capture memory_order(relaxed) { + omp.atomic.update %x : memref { + ^bb0(%xval: i32): + %newval = llvm.add %xval, %expr : i32 + omp.yield(%newval : i32) + } + omp.atomic.read %v = %x : memref + } + + // CHECK: omp.atomic.capture memory_order(seq_cst) hint(contended, speculative) { + // CHECK-NEXT: omp.atomic.update %[[x]] : memref + // CHECK-NEXT: (%[[xval:.*]]: i32): + // CHECK-NEXT: %[[newval:.*]] = llvm.add %[[xval]], %[[expr]] : i32 + // CHECK-NEXT: omp.yield(%[[newval]] : i32) + // CHECK-NEXT: } + // CHECK-NEXT: omp.atomic.read %[[v]] = %[[x]] : memref + // CHECK-NEXT: } + omp.atomic.capture hint(contended, speculative) memory_order(seq_cst) { + omp.atomic.update %x : memref { + ^bb0(%xval: i32): + %newval = llvm.add %xval, %expr : i32 + omp.yield(%newval : i32) + } + omp.atomic.read %v = %x : memref + } + return }