forked from OSchip/llvm-project
90 lines
2.7 KiB
YAML
90 lines
2.7 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc -debugify-and-strip-all-safe -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="opt_brcond_by_inverting_cond" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
|
|
|
|
# Need asserts for the only-enable-rule to work.
|
|
|
|
# REQUIRES: asserts
|
|
|
|
--- |
|
|
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
|
|
target triple = "arm64-apple-ios5.0.0"
|
|
|
|
define i32 @foo(i32 %a, i32 %b) {
|
|
entry:
|
|
%cmp = icmp sgt i32 %a, 0
|
|
br i1 %cmp, label %if.then, label %if.end
|
|
|
|
if.then:
|
|
%add = add nsw i32 %b, %a
|
|
%add1 = add nsw i32 %a, %b
|
|
br label %return
|
|
|
|
if.end:
|
|
%mul = mul nsw i32 %b, %b
|
|
%add2 = add nuw nsw i32 %mul, 2
|
|
br label %return
|
|
|
|
return:
|
|
%retval.0 = phi i32 [ %add1, %if.then ], [ %add2, %if.end ]
|
|
ret i32 %retval.0
|
|
}
|
|
|
|
|
|
...
|
|
---
|
|
name: foo
|
|
tracksRegLiveness: true
|
|
body: |
|
|
; CHECK-LABEL: name: foo
|
|
; CHECK: bb.0.entry:
|
|
; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000)
|
|
; CHECK: liveins: $w0, $w1
|
|
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
|
|
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
|
|
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
|
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
|
|
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
|
|
; CHECK: [[C2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
|
|
; CHECK: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP]], [[C2]]
|
|
; CHECK: G_BRCOND [[XOR]](s1), %bb.2
|
|
; CHECK: G_BR %bb.1
|
|
; CHECK: bb.1.if.then:
|
|
; CHECK: successors: %bb.3(0x80000000)
|
|
; CHECK: [[ADD:%[0-9]+]]:_(s32) = nsw G_ADD [[COPY1]], [[COPY]]
|
|
; CHECK: [[ADD1:%[0-9]+]]:_(s32) = nsw G_ADD [[ADD]], [[COPY1]]
|
|
; CHECK: G_BR %bb.3
|
|
; CHECK: bb.2.if.end:
|
|
; CHECK: successors: %bb.3(0x80000000)
|
|
; CHECK: [[MUL:%[0-9]+]]:_(s32) = nsw G_MUL [[COPY1]], [[COPY1]]
|
|
; CHECK: [[ADD2:%[0-9]+]]:_(s32) = nuw nsw G_ADD [[MUL]], [[C1]]
|
|
; CHECK: bb.3.return:
|
|
; CHECK: [[PHI:%[0-9]+]]:_(s32) = G_PHI [[ADD1]](s32), %bb.1, [[ADD2]](s32), %bb.2
|
|
; CHECK: $w0 = COPY [[PHI]](s32)
|
|
; CHECK: RET_ReallyLR implicit $w0
|
|
bb.1.entry:
|
|
liveins: $w0, $w1
|
|
|
|
%0:_(s32) = COPY $w0
|
|
%1:_(s32) = COPY $w1
|
|
%2:_(s32) = G_CONSTANT i32 0
|
|
%5:_(s32) = G_CONSTANT i32 2
|
|
%3:_(s1) = G_ICMP intpred(sgt), %0(s32), %2
|
|
G_BRCOND %3(s1), %bb.2
|
|
G_BR %bb.3
|
|
|
|
bb.2.if.then:
|
|
%7:_(s32) = nsw G_ADD %1, %0
|
|
%8:_(s32) = nsw G_ADD %7, %1
|
|
G_BR %bb.4
|
|
|
|
bb.3.if.end:
|
|
%4:_(s32) = nsw G_MUL %1, %1
|
|
%6:_(s32) = nuw nsw G_ADD %4, %5
|
|
|
|
bb.4.return:
|
|
%10:_(s32) = G_PHI %8(s32), %bb.2, %6(s32), %bb.3
|
|
$w0 = COPY %10(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|