diff --git a/llvm/test/CodeGen/R600/rotr.ll b/llvm/test/CodeGen/R600/rotr.ll index 8bb5eaec8208..ff4da412d6b6 100644 --- a/llvm/test/CodeGen/R600/rotr.ll +++ b/llvm/test/CodeGen/R600/rotr.ll @@ -1,11 +1,9 @@ -; RUN: llc < %s -debug-only=isel -march=r600 -mcpu=redwood -o - 2>&1 | FileCheck --check-prefix=R600-CHECK %s -; RUN: llc < %s -debug-only=isel -march=r600 -mcpu=SI -o - 2>&1 | FileCheck --check-prefix=SI-CHECK %s +; RUN: llc < %s -march=r600 -mcpu=redwood -o - | FileCheck --check-prefix=R600-CHECK %s +; RUN: llc < %s -march=r600 -mcpu=SI -o - | FileCheck --check-prefix=SI-CHECK %s -; R600-CHECK: rotr ; R600-CHECK: @rotr ; R600-CHECK: BIT_ALIGN_INT -; SI-CHECK: rotr ; SI-CHECK: @rotr ; SI-CHECK: V_ALIGNBIT_B32 define void @rotr(i32 addrspace(1)* %in, i32 %x, i32 %y) { @@ -18,13 +16,11 @@ entry: ret void } -; R600-CHECK: rotr ; R600-CHECK: @rotl ; R600-CHECK: SUB_INT {{\** T[0-9]+\.[XYZW]}}, literal.x ; R600-CHECK-NEXT: 32 ; R600-CHECK: BIT_ALIGN_INT {{\** T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], PV.[xyzw]}} -; SI-CHECK: rotr ; SI-CHECK: @rotl ; SI-CHECK: V_SUB_I32_e32 [[DST:VGPR[0-9]+]], 32, {{VGPR[0-9]+}} ; SI-CHECK: V_ALIGNBIT_B32 {{VGPR[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}, [[DST]]