llvm-project/llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir

151 lines
4.2 KiB
YAML

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
#
# Test that we can produce a tbz when we have a sgt compare against -1.
#
# The bit tested should be the size of the test register minus 1.
#
...
---
name: tbzx_sgt
alignment: 4
legalized: true
regBankSelected: true
body: |
; CHECK-LABEL: name: tbzx_sgt
; CHECK: bb.0:
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
; CHECK: %copy:gpr64 = COPY $x0
; CHECK: TBZX %copy, 63, %bb.1
; CHECK: B %bb.0
; CHECK: bb.1:
; CHECK: RET_ReallyLR
bb.0:
successors: %bb.0, %bb.1
liveins: $x0
%copy:gpr(s64) = COPY $x0
%negative_one:gpr(s64) = G_CONSTANT i64 -1
%cmp:gpr(s32) = G_ICMP intpred(sgt), %copy(s64), %negative_one
%cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
G_BRCOND %cmp_trunc(s1), %bb.1
G_BR %bb.0
bb.1:
RET_ReallyLR
...
---
name: tbzw_sgt
alignment: 4
legalized: true
regBankSelected: true
body: |
; CHECK-LABEL: name: tbzw_sgt
; CHECK: bb.0:
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
; CHECK: %copy:gpr32 = COPY $w0
; CHECK: TBZW %copy, 31, %bb.1
; CHECK: B %bb.0
; CHECK: bb.1:
; CHECK: RET_ReallyLR
bb.0:
successors: %bb.0, %bb.1
liveins: $x0
%copy:gpr(s32) = COPY $w0
%negative_one:gpr(s32) = G_CONSTANT i32 -1
%cmp:gpr(s32) = G_ICMP intpred(sgt), %copy(s32), %negative_one
%cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
G_BRCOND %cmp_trunc(s1), %bb.1
G_BR %bb.0
bb.1:
RET_ReallyLR
...
---
name: no_tbz_not_negative_one
alignment: 4
legalized: true
regBankSelected: true
body: |
; CHECK-LABEL: name: no_tbz_not_negative_one
; CHECK: bb.0:
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
; CHECK: %copy:gpr32sp = COPY $w0
; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %copy, 1, 0, implicit-def $nzcv
; CHECK: Bcc 12, %bb.1, implicit $nzcv
; CHECK: B %bb.0
; CHECK: bb.1:
; CHECK: RET_ReallyLR
bb.0:
successors: %bb.0, %bb.1
liveins: $x0
%copy:gpr(s32) = COPY $w0
%one:gpr(s32) = G_CONSTANT i32 1
%cmp:gpr(s32) = G_ICMP intpred(sgt), %copy(s32), %one
%cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
G_BRCOND %cmp_trunc(s1), %bb.1
G_BR %bb.0
bb.1:
RET_ReallyLR
...
---
name: dont_fold_and
alignment: 4
legalized: true
regBankSelected: true
body: |
; CHECK-LABEL: name: dont_fold_and
; CHECK: bb.0:
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
; CHECK: %copy:gpr64 = COPY $x0
; CHECK: %and:gpr64sp = ANDXri %copy, 8000
; CHECK: [[ADDSXri:%[0-9]+]]:gpr64 = ADDSXri %and, 1, 0, implicit-def $nzcv
; CHECK: Bcc 12, %bb.1, implicit $nzcv
; CHECK: B %bb.0
; CHECK: bb.1:
; CHECK: RET_ReallyLR
bb.0:
successors: %bb.0, %bb.1
liveins: $x0
%copy:gpr(s64) = COPY $x0
%bit:gpr(s64) = G_CONSTANT i64 8
%negative_one:gpr(s64) = G_CONSTANT i64 -1
%c:gpr(s64) = G_CONSTANT i64 8
%and:gpr(s64) = G_AND %copy, %bit
%cmp:gpr(s32) = G_ICMP intpred(sgt), %and(s64), %negative_one
%cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
G_BRCOND %cmp_trunc(s1), %bb.1
G_BR %bb.0
bb.1:
RET_ReallyLR
...
---
name: dont_commute
alignment: 4
legalized: true
regBankSelected: true
body: |
; CHECK-LABEL: name: dont_commute
; CHECK: bb.0:
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
; CHECK: %copy:gpr64 = COPY $x0
; CHECK: %negative_one:gpr64 = MOVi64imm -1
; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %negative_one, %copy, implicit-def $nzcv
; CHECK: Bcc 12, %bb.1, implicit $nzcv
; CHECK: B %bb.0
; CHECK: bb.1:
; CHECK: RET_ReallyLR
bb.0:
successors: %bb.0, %bb.1
liveins: $x0
%copy:gpr(s64) = COPY $x0
%negative_one:gpr(s64) = G_CONSTANT i64 -1
%cmp:gpr(s32) = G_ICMP intpred(sgt), %negative_one, %copy(s64)
%cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
G_BRCOND %cmp_trunc(s1), %bb.1
G_BR %bb.0
bb.1:
RET_ReallyLR