forked from OSchip/llvm-project
158 lines
6.0 KiB
YAML
158 lines
6.0 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc -march=aarch64 -run-pass=legalizer -global-isel-abort=1 %s -o - | FileCheck %s
|
|
---
|
|
name: test_scalar_mul_small
|
|
body: |
|
|
bb.0.entry:
|
|
; CHECK-LABEL: name: test_scalar_mul_small
|
|
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
|
|
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
|
|
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
|
|
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
|
|
; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[TRUNC]], [[TRUNC1]]
|
|
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[MUL]](s32)
|
|
; CHECK: $x0 = COPY [[ANYEXT]](s64)
|
|
%0:_(s64) = COPY $x0
|
|
%1:_(s64) = COPY $x1
|
|
%2:_(s8) = G_TRUNC %0(s64)
|
|
%3:_(s8) = G_TRUNC %1(s64)
|
|
%4:_(s8) = G_MUL %2, %3
|
|
%5:_(s64) = G_ANYEXT %4(s8)
|
|
$x0 = COPY %5(s64)
|
|
|
|
...
|
|
---
|
|
name: test_smul_overflow
|
|
body: |
|
|
bb.0:
|
|
; CHECK-LABEL: name: test_smul_overflow
|
|
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
|
|
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
|
|
; CHECK: [[SMULH:%[0-9]+]]:_(s64) = G_SMULH [[COPY]], [[COPY1]]
|
|
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]]
|
|
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
|
|
; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MUL]], [[C]](s64)
|
|
; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SMULH]](s64), [[ASHR]]
|
|
; CHECK: $x0 = COPY [[MUL]](s64)
|
|
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
|
|
; CHECK: $w0 = COPY [[COPY2]](s32)
|
|
%0:_(s64) = COPY $x0
|
|
%1:_(s64) = COPY $x1
|
|
%2:_(s64), %3:_(s1) = G_SMULO %0, %1
|
|
$x0 = COPY %2(s64)
|
|
%4:_(s32) = G_ANYEXT %3(s1)
|
|
$w0 = COPY %4(s32)
|
|
|
|
...
|
|
---
|
|
name: test_umul_overflow
|
|
body: |
|
|
bb.0:
|
|
; CHECK-LABEL: name: test_umul_overflow
|
|
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
|
|
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
|
|
; CHECK: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[COPY]], [[COPY1]]
|
|
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]]
|
|
; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UMULH]](s64), [[C]]
|
|
; CHECK: $x0 = COPY [[MUL]](s64)
|
|
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
|
|
; CHECK: $w0 = COPY [[COPY2]](s32)
|
|
%0:_(s64) = COPY $x0
|
|
%1:_(s64) = COPY $x1
|
|
%2:_(s64), %3:_(s1) = G_UMULO %0, %1
|
|
$x0 = COPY %2(s64)
|
|
%4:_(s32) = G_ANYEXT %3(s1)
|
|
$w0 = COPY %4(s32)
|
|
|
|
...
|
|
---
|
|
name: vector_mul_scalarize
|
|
liveins:
|
|
- { reg: '$q0' }
|
|
- { reg: '$q1' }
|
|
body: |
|
|
bb.1:
|
|
liveins: $q0, $q1
|
|
|
|
; CHECK-LABEL: name: vector_mul_scalarize
|
|
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
|
|
; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
|
|
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
|
|
; CHECK: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
|
|
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[UV]], [[UV2]]
|
|
; CHECK: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[UV1]], [[UV3]]
|
|
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MUL]](s64), [[MUL1]](s64)
|
|
; CHECK: $q0 = COPY [[BUILD_VECTOR]](<2 x s64>)
|
|
; CHECK: RET_ReallyLR implicit $q0
|
|
%0:_(<2 x s64>) = COPY $q0
|
|
%1:_(<2 x s64>) = COPY $q1
|
|
%2:_(<2 x s64>) = G_MUL %0, %1
|
|
$q0 = COPY %2(<2 x s64>)
|
|
RET_ReallyLR implicit $q0
|
|
...
|
|
---
|
|
name: test_umulo_overflow_no_invalid_mir
|
|
alignment: 4
|
|
tracksRegLiveness: true
|
|
liveins:
|
|
- { reg: '$x0' }
|
|
- { reg: '$x1' }
|
|
- { reg: '$x2' }
|
|
frameInfo:
|
|
maxAlignment: 16
|
|
stack:
|
|
- { id: 0, size: 8, alignment: 8 }
|
|
- { id: 1, size: 8, alignment: 8 }
|
|
- { id: 2, size: 16, alignment: 16 }
|
|
- { id: 3, size: 16, alignment: 8 }
|
|
machineFunctionInfo: {}
|
|
body: |
|
|
bb.1:
|
|
liveins: $x0, $x1, $x2
|
|
; Check that the overflow result doesn't generate incorrect MIR by using a G_CONSTANT 0
|
|
; before it's been defined.
|
|
; CHECK-LABEL: name: test_umulo_overflow_no_invalid_mir
|
|
; CHECK: liveins: $x0, $x1, $x2
|
|
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
|
|
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
|
|
; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
|
|
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
|
|
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
|
|
; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.3
|
|
; CHECK: G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store 8)
|
|
; CHECK: G_STORE [[COPY1]](s64), [[FRAME_INDEX1]](p0) :: (store 8)
|
|
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load 8)
|
|
; CHECK: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load 8)
|
|
; CHECK: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[LOAD]], [[LOAD1]]
|
|
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[LOAD]], [[LOAD1]]
|
|
; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UMULH]](s64), [[C]]
|
|
; CHECK: G_STORE [[C]](s64), [[FRAME_INDEX2]](p0) :: (store 8, align 1)
|
|
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
|
|
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ICMP]](s32)
|
|
; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
|
|
; CHECK: $x0 = COPY [[MUL]](s64)
|
|
; CHECK: $x1 = COPY [[AND]](s64)
|
|
; CHECK: RET_ReallyLR implicit $x0
|
|
%0:_(p0) = COPY $x0
|
|
%1:_(s64) = COPY $x1
|
|
%2:_(s64) = COPY $x2
|
|
%25:_(s32) = G_CONSTANT i32 0
|
|
%3:_(p0) = G_FRAME_INDEX %stack.0
|
|
%4:_(p0) = G_FRAME_INDEX %stack.1
|
|
%6:_(p0) = G_FRAME_INDEX %stack.3
|
|
G_STORE %2(s64), %3(p0) :: (store 8)
|
|
G_STORE %1(s64), %4(p0) :: (store 8)
|
|
%7:_(s64) = G_LOAD %3(p0) :: (dereferenceable load 8)
|
|
%8:_(s64) = G_LOAD %4(p0) :: (dereferenceable load 8)
|
|
%9:_(s64), %10:_(s1) = G_UMULO %7, %8
|
|
%31:_(s64) = G_CONSTANT i64 0
|
|
G_STORE %31(s64), %6(p0) :: (store 8, align 1)
|
|
%16:_(s64) = G_ZEXT %10(s1)
|
|
$x0 = COPY %9(s64)
|
|
$x1 = COPY %16(s64)
|
|
RET_ReallyLR implicit $x0
|
|
|
|
...
|