Revert "[AArch64][GlobalISel] Make G_USUBO legal and select it."

This reverts commit 3dedad475d.

Broke UBSan on Android:
http://lab.llvm.org:8011/#/builders/77/builds/3082

More details at: https://reviews.llvm.org/D95032
This commit is contained in:
Mitch Phillips 2021-01-22 11:50:35 -08:00
parent e3a7532cc9
commit 19ec559c66
8 changed files with 4 additions and 636 deletions

View File

@ -2745,8 +2745,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
}
case TargetOpcode::G_SADDO:
case TargetOpcode::G_UADDO:
case TargetOpcode::G_SSUBO:
case TargetOpcode::G_USUBO: {
case TargetOpcode::G_SSUBO: {
// Emit the operation and get the correct condition code.
MachineIRBuilder MIRBuilder(I);
auto OpAndCC = emitOverflowOp(Opcode, I.getOperand(0).getReg(),
@ -4377,8 +4376,6 @@ AArch64InstructionSelector::emitOverflowOp(unsigned Opcode, Register Dst,
return std::make_pair(emitADDS(Dst, LHS, RHS, MIRBuilder), AArch64CC::HS);
case TargetOpcode::G_SSUBO:
return std::make_pair(emitSUBS(Dst, LHS, RHS, MIRBuilder), AArch64CC::VS);
case TargetOpcode::G_USUBO:
return std::make_pair(emitSUBS(Dst, LHS, RHS, MIRBuilder), AArch64CC::HS);
}
}

View File

@ -165,8 +165,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
getActionDefinitionsBuilder({G_SMULH, G_UMULH}).legalFor({s32, s64});
getActionDefinitionsBuilder(
{G_UADDE, G_USUBE, G_SADDO, G_SSUBO, G_UADDO, G_USUBO})
getActionDefinitionsBuilder({G_UADDE, G_USUBE, G_SADDO, G_SSUBO, G_UADDO})
.legalFor({{s32, s1}, {s64, s1}})
.minScalar(0, s32);

View File

@ -73,44 +73,6 @@ body: |
%5:_(s64) = G_ANYEXT %4(s8)
$x0 = COPY %5(s64)
...
---
name: test_scalar_uaddo_32
body: |
bb.0.entry:
; CHECK-LABEL: name: test_scalar_uaddo_32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[COPY]], [[COPY1]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UADDO1]](s1)
; CHECK: $w0 = COPY [[UADDO]](s32)
; CHECK: $w1 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $w0
%1:_(s32) = COPY $w1
%2:_(s32), %3:_(s1) = G_UADDO %0, %1
%4:_(s32) = G_ANYEXT %3
$w0 = COPY %2(s32)
$w1 = COPY %4(s32)
...
---
name: test_scalar_saddo_32
body: |
bb.0.entry:
; CHECK-LABEL: name: test_scalar_saddo_32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: [[SADDO:%[0-9]+]]:_(s32), [[SADDO1:%[0-9]+]]:_(s1) = G_SADDO [[COPY]], [[COPY1]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SADDO1]](s1)
; CHECK: $w0 = COPY [[SADDO]](s32)
; CHECK: $w1 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $w0
%1:_(s32) = COPY $w1
%2:_(s32), %3:_(s1) = G_SADDO %0, %1
%4:_(s32) = G_ANYEXT %3
$w0 = COPY %2(s32)
$w1 = COPY %4(s32)
...
---
name: test_vector_add

View File

@ -1,59 +1,6 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -march=aarch64 -run-pass=legalizer %s -o - | FileCheck %s
---
name: test_scalar_sub_big
body: |
bb.0.entry:
; CHECK-LABEL: name: test_scalar_sub_big
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
; CHECK: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[COPY]], [[COPY2]]
; CHECK: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[COPY1]], [[COPY3]], [[USUBO1]]
; CHECK: $x0 = COPY [[USUBO]](s64)
; CHECK: $x1 = COPY [[USUBE]](s64)
%0:_(s64) = COPY $x0
%1:_(s64) = COPY $x1
%2:_(s64) = COPY $x2
%3:_(s64) = COPY $x3
%4:_(s128) = G_MERGE_VALUES %0(s64), %1(s64)
%5:_(s128) = G_MERGE_VALUES %2(s64), %3(s64)
%6:_(s128) = G_SUB %4, %5
%7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %6(s128)
$x0 = COPY %7(s64)
$x1 = COPY %8(s64)
...
---
name: test_scalar_sub_big_nonpow2
body: |
bb.0.entry:
; CHECK-LABEL: name: test_scalar_sub_big_nonpow2
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
; CHECK: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[COPY]], [[COPY1]]
; CHECK: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[COPY1]], [[COPY2]], [[USUBO1]]
; CHECK: [[USUBE2:%[0-9]+]]:_(s64), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[COPY2]], [[COPY3]], [[USUBE1]]
; CHECK: $x0 = COPY [[USUBO]](s64)
; CHECK: $x1 = COPY [[USUBE]](s64)
; CHECK: $x2 = COPY [[USUBE2]](s64)
%0:_(s64) = COPY $x0
%1:_(s64) = COPY $x1
%2:_(s64) = COPY $x2
%3:_(s64) = COPY $x3
%4:_(s192) = G_MERGE_VALUES %0(s64), %1(s64), %2(s64)
%5:_(s192) = G_MERGE_VALUES %1(s64), %2(s64), %3(s64)
%6:_(s192) = G_SUB %4, %5
%7:_(s64), %8:_(s64), %9:_(s64) = G_UNMERGE_VALUES %6(s192)
$x0 = COPY %7(s64)
$x1 = COPY %8(s64)
$x2 = COPY %9(s64)
...
---
name: test_scalar_sub_small
body: |
bb.0.entry:
@ -74,41 +21,3 @@ body: |
$x0 = COPY %5(s64)
...
---
name: test_scalar_usubo_32
body: |
bb.0.entry:
; CHECK-LABEL: name: test_scalar_usubo_32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[COPY]], [[COPY1]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[USUBO1]](s1)
; CHECK: $w0 = COPY [[USUBO]](s32)
; CHECK: $w1 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $w0
%1:_(s32) = COPY $w1
%2:_(s32), %3:_(s1) = G_USUBO %0, %1
%4:_(s32) = G_ANYEXT %3
$w0 = COPY %2(s32)
$w1 = COPY %4(s32)
...
---
name: test_scalar_ssubo_32
body: |
bb.0.entry:
; CHECK-LABEL: name: test_scalar_ssubo_32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: [[SSUBO:%[0-9]+]]:_(s32), [[SSUBO1:%[0-9]+]]:_(s1) = G_SSUBO [[COPY]], [[COPY1]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SSUBO1]](s1)
; CHECK: $w0 = COPY [[SSUBO]](s32)
; CHECK: $w1 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $w0
%1:_(s32) = COPY $w1
%2:_(s32), %3:_(s1) = G_SSUBO %0, %1
%4:_(s32) = G_ANYEXT %3
$w0 = COPY %2(s32)
$w1 = COPY %4(s32)
...

View File

@ -300,9 +300,8 @@
# DEBUG-NEXT: .. the first uncovered type index: 2, OK
# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
# DEBUG-NEXT: G_USUBO (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
# DEBUG-NEXT: .. the first uncovered type index: 2, OK
# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: G_USUBE (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
# DEBUG-NEXT: .. the first uncovered type index: 2, OK

View File

@ -1,166 +0,0 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -verify-machineinstrs -mtriple aarch64-unknown-uknown -global-isel -run-pass=instruction-select %s -o - | FileCheck %s
...
---
name: saddo_s32
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w0, $w1, $x2
; CHECK-LABEL: name: saddo_s32
; CHECK: liveins: $w0, $w1, $x2
; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
; CHECK: [[ADDSWrr:%[0-9]+]]:gpr32 = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[CSINCWr]], 0, 0
; CHECK: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[UBFMWri]], 0, 7
; CHECK: $w0 = COPY [[UBFMWri1]]
; CHECK: RET_ReallyLR implicit $w0
%0:gpr(s32) = COPY $w0
%1:gpr(s32) = COPY $w1
%3:gpr(s32), %4:gpr(s1) = G_SADDO %0, %1
%5:gpr(s8) = G_ZEXT %4(s1)
%6:gpr(s32) = G_ZEXT %5(s8)
$w0 = COPY %6(s32)
RET_ReallyLR implicit $w0
...
---
name: saddo_s64
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: saddo_s64
; CHECK: liveins: $x0, $x1, $x2
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
; CHECK: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[CSINCWr]], 0, 0
; CHECK: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[UBFMWri]], 0, 7
; CHECK: $w0 = COPY [[UBFMWri1]]
; CHECK: RET_ReallyLR implicit $w0
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = COPY $x1
%3:gpr(s64), %4:gpr(s1) = G_SADDO %0, %1
%5:gpr(s8) = G_ZEXT %4(s1)
%6:gpr(s32) = G_ZEXT %5(s8)
$w0 = COPY %6(s32)
RET_ReallyLR implicit $w0
...
---
name: saddo_s32_imm
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w0, $w1, $x2
; Check that we get ADDSWri when we can fold in a constant.
;
; CHECK-LABEL: name: saddo_s32_imm
; CHECK: liveins: $w0, $w1, $x2
; CHECK: %copy:gpr32sp = COPY $w0
; CHECK: %add:gpr32 = ADDSWri %copy, 16, 0, implicit-def $nzcv
; CHECK: %overflow:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
; CHECK: $w0 = COPY %add
; CHECK: RET_ReallyLR implicit $w0
%copy:gpr(s32) = COPY $w0
%constant:gpr(s32) = G_CONSTANT i32 16
%add:gpr(s32), %overflow:gpr(s1) = G_SADDO %copy, %constant
$w0 = COPY %add(s32)
RET_ReallyLR implicit $w0
...
---
name: saddo_s32_shifted
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w0, $w1, $x2
; Check that we get ADDSWrs when we can fold in a shift.
;
; CHECK-LABEL: name: saddo_s32_shifted
; CHECK: liveins: $w0, $w1, $x2
; CHECK: %copy1:gpr32 = COPY $w0
; CHECK: %copy2:gpr32 = COPY $w1
; CHECK: %add:gpr32 = ADDSWrs %copy1, %copy2, 16, implicit-def $nzcv
; CHECK: %overflow:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
; CHECK: $w0 = COPY %add
; CHECK: RET_ReallyLR implicit $w0
%copy1:gpr(s32) = COPY $w0
%copy2:gpr(s32) = COPY $w1
%constant:gpr(s32) = G_CONSTANT i32 16
%shift:gpr(s32) = G_SHL %copy2(s32), %constant(s32)
%add:gpr(s32), %overflow:gpr(s1) = G_SADDO %copy1, %shift
$w0 = COPY %add(s32)
RET_ReallyLR implicit $w0
...
---
name: saddo_s32_neg_imm
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w0, $w1, $x2
; Check that we get SUBSWri when we can fold in a negative constant.
;
; CHECK-LABEL: name: saddo_s32_neg_imm
; CHECK: liveins: $w0, $w1, $x2
; CHECK: %copy:gpr32sp = COPY $w0
; CHECK: %add:gpr32 = SUBSWri %copy, 16, 0, implicit-def $nzcv
; CHECK: %overflow:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
; CHECK: $w0 = COPY %add
; CHECK: RET_ReallyLR implicit $w0
%copy:gpr(s32) = COPY $w0
%constant:gpr(s32) = G_CONSTANT i32 -16
%add:gpr(s32), %overflow:gpr(s1) = G_SADDO %copy, %constant
$w0 = COPY %add(s32)
RET_ReallyLR implicit $w0
...
---
name: saddo_arith_extended
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w0, $x0
; Check that we get ADDSXrx.
; CHECK-LABEL: name: saddo_arith_extended
; CHECK: liveins: $w0, $x0
; CHECK: %reg0:gpr64sp = COPY $x0
; CHECK: %reg1:gpr32 = COPY $w0
; CHECK: %add:gpr64 = ADDSXrx %reg0, %reg1, 18, implicit-def $nzcv
; CHECK: %flags:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
; CHECK: $x0 = COPY %add
; CHECK: RET_ReallyLR implicit $x0
%reg0:gpr(s64) = COPY $x0
%reg1:gpr(s32) = COPY $w0
%ext:gpr(s64) = G_ZEXT %reg1(s32)
%cst:gpr(s64) = G_CONSTANT i64 2
%shift:gpr(s64) = G_SHL %ext, %cst(s64)
%add:gpr(s64), %flags:gpr(s1) = G_SADDO %reg0, %shift
$x0 = COPY %add(s64)
RET_ReallyLR implicit $x0

View File

@ -1,166 +0,0 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -verify-machineinstrs -mtriple aarch64-unknown-uknown -global-isel -run-pass=instruction-select %s -o - | FileCheck %s
...
---
name: ssubo_s32
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w0, $w1, $x2
; CHECK-LABEL: name: ssubo_s32
; CHECK: liveins: $w0, $w1, $x2
; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[COPY1]], implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[CSINCWr]], 0, 0
; CHECK: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[UBFMWri]], 0, 7
; CHECK: $w0 = COPY [[UBFMWri1]]
; CHECK: RET_ReallyLR implicit $w0
%0:gpr(s32) = COPY $w0
%1:gpr(s32) = COPY $w1
%3:gpr(s32), %4:gpr(s1) = G_SSUBO %0, %1
%5:gpr(s8) = G_ZEXT %4(s1)
%6:gpr(s32) = G_ZEXT %5(s8)
$w0 = COPY %6(s32)
RET_ReallyLR implicit $w0
...
---
name: ssubo_s64
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: ssubo_s64
; CHECK: liveins: $x0, $x1, $x2
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[CSINCWr]], 0, 0
; CHECK: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[UBFMWri]], 0, 7
; CHECK: $w0 = COPY [[UBFMWri1]]
; CHECK: RET_ReallyLR implicit $w0
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = COPY $x1
%3:gpr(s64), %4:gpr(s1) = G_SSUBO %0, %1
%5:gpr(s8) = G_ZEXT %4(s1)
%6:gpr(s32) = G_ZEXT %5(s8)
$w0 = COPY %6(s32)
RET_ReallyLR implicit $w0
...
---
name: ssubo_s32_imm
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w0, $w1, $x2
; Check that we get ADDSWri when we can fold in a constant.
;
; CHECK-LABEL: name: ssubo_s32_imm
; CHECK: liveins: $w0, $w1, $x2
; CHECK: %copy:gpr32sp = COPY $w0
; CHECK: %add:gpr32 = SUBSWri %copy, 16, 0, implicit-def $nzcv
; CHECK: %overflow:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
; CHECK: $w0 = COPY %add
; CHECK: RET_ReallyLR implicit $w0
%copy:gpr(s32) = COPY $w0
%constant:gpr(s32) = G_CONSTANT i32 16
%add:gpr(s32), %overflow:gpr(s1) = G_SSUBO %copy, %constant
$w0 = COPY %add(s32)
RET_ReallyLR implicit $w0
...
---
name: ssubo_s32_shifted
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w0, $w1, $x2
; Check that we get ADDSWrs when we can fold in a shift.
;
; CHECK-LABEL: name: ssubo_s32_shifted
; CHECK: liveins: $w0, $w1, $x2
; CHECK: %copy1:gpr32 = COPY $w0
; CHECK: %copy2:gpr32 = COPY $w1
; CHECK: %add:gpr32 = SUBSWrs %copy1, %copy2, 16, implicit-def $nzcv
; CHECK: %overflow:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
; CHECK: $w0 = COPY %add
; CHECK: RET_ReallyLR implicit $w0
%copy1:gpr(s32) = COPY $w0
%copy2:gpr(s32) = COPY $w1
%constant:gpr(s32) = G_CONSTANT i32 16
%shift:gpr(s32) = G_SHL %copy2(s32), %constant(s32)
%add:gpr(s32), %overflow:gpr(s1) = G_SSUBO %copy1, %shift
$w0 = COPY %add(s32)
RET_ReallyLR implicit $w0
...
---
name: ssubo_s32_neg_imm
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w0, $w1, $x2
; Check that we get SUBSWri when we can fold in a negative constant.
;
; CHECK-LABEL: name: ssubo_s32_neg_imm
; CHECK: liveins: $w0, $w1, $x2
; CHECK: %copy:gpr32sp = COPY $w0
; CHECK: %add:gpr32 = ADDSWri %copy, 16, 0, implicit-def $nzcv
; CHECK: %overflow:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
; CHECK: $w0 = COPY %add
; CHECK: RET_ReallyLR implicit $w0
%copy:gpr(s32) = COPY $w0
%constant:gpr(s32) = G_CONSTANT i32 -16
%add:gpr(s32), %overflow:gpr(s1) = G_SSUBO %copy, %constant
$w0 = COPY %add(s32)
RET_ReallyLR implicit $w0
...
---
name: ssubo_arith_extended
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w0, $x0
; Check that we get ADDSXrx.
; CHECK-LABEL: name: ssubo_arith_extended
; CHECK: liveins: $w0, $x0
; CHECK: %reg0:gpr64sp = COPY $x0
; CHECK: %reg1:gpr32 = COPY $w0
; CHECK: %add:gpr64 = SUBSXrx %reg0, %reg1, 18, implicit-def $nzcv
; CHECK: %flags:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
; CHECK: $x0 = COPY %add
; CHECK: RET_ReallyLR implicit $x0
%reg0:gpr(s64) = COPY $x0
%reg1:gpr(s32) = COPY $w0
%ext:gpr(s64) = G_ZEXT %reg1(s32)
%cst:gpr(s64) = G_CONSTANT i64 2
%shift:gpr(s64) = G_SHL %ext, %cst(s64)
%add:gpr(s64), %flags:gpr(s1) = G_SSUBO %reg0, %shift
$x0 = COPY %add(s64)
RET_ReallyLR implicit $x0

View File

@ -1,166 +0,0 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -verify-machineinstrs -mtriple aarch64-unknown-uknown -global-isel -run-pass=instruction-select %s -o - | FileCheck %s
...
---
name: usubo_s32
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w0, $w1, $x2
; CHECK-LABEL: name: usubo_s32
; CHECK: liveins: $w0, $w1, $x2
; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[COPY1]], implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[CSINCWr]], 0, 0
; CHECK: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[UBFMWri]], 0, 7
; CHECK: $w0 = COPY [[UBFMWri1]]
; CHECK: RET_ReallyLR implicit $w0
%0:gpr(s32) = COPY $w0
%1:gpr(s32) = COPY $w1
%3:gpr(s32), %4:gpr(s1) = G_USUBO %0, %1
%5:gpr(s8) = G_ZEXT %4(s1)
%6:gpr(s32) = G_ZEXT %5(s8)
$w0 = COPY %6(s32)
RET_ReallyLR implicit $w0
...
---
name: usubo_s64
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: usubo_s64
; CHECK: liveins: $x0, $x1, $x2
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[CSINCWr]], 0, 0
; CHECK: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[UBFMWri]], 0, 7
; CHECK: $w0 = COPY [[UBFMWri1]]
; CHECK: RET_ReallyLR implicit $w0
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = COPY $x1
%3:gpr(s64), %4:gpr(s1) = G_USUBO %0, %1
%5:gpr(s8) = G_ZEXT %4(s1)
%6:gpr(s32) = G_ZEXT %5(s8)
$w0 = COPY %6(s32)
RET_ReallyLR implicit $w0
...
---
name: usubo_s32_imm
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w0, $w1, $x2
; Check that we get ADDSWri when we can fold in a constant.
;
; CHECK-LABEL: name: usubo_s32_imm
; CHECK: liveins: $w0, $w1, $x2
; CHECK: %copy:gpr32sp = COPY $w0
; CHECK: %add:gpr32 = SUBSWri %copy, 16, 0, implicit-def $nzcv
; CHECK: %overflow:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
; CHECK: $w0 = COPY %add
; CHECK: RET_ReallyLR implicit $w0
%copy:gpr(s32) = COPY $w0
%constant:gpr(s32) = G_CONSTANT i32 16
%add:gpr(s32), %overflow:gpr(s1) = G_USUBO %copy, %constant
$w0 = COPY %add(s32)
RET_ReallyLR implicit $w0
...
---
name: usubo_s32_shifted
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w0, $w1, $x2
; Check that we get ADDSWrs when we can fold in a shift.
;
; CHECK-LABEL: name: usubo_s32_shifted
; CHECK: liveins: $w0, $w1, $x2
; CHECK: %copy1:gpr32 = COPY $w0
; CHECK: %copy2:gpr32 = COPY $w1
; CHECK: %add:gpr32 = SUBSWrs %copy1, %copy2, 16, implicit-def $nzcv
; CHECK: %overflow:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
; CHECK: $w0 = COPY %add
; CHECK: RET_ReallyLR implicit $w0
%copy1:gpr(s32) = COPY $w0
%copy2:gpr(s32) = COPY $w1
%constant:gpr(s32) = G_CONSTANT i32 16
%shift:gpr(s32) = G_SHL %copy2(s32), %constant(s32)
%add:gpr(s32), %overflow:gpr(s1) = G_USUBO %copy1, %shift
$w0 = COPY %add(s32)
RET_ReallyLR implicit $w0
...
---
name: usubo_s32_neg_imm
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w0, $w1, $x2
; Check that we get SUBSWri when we can fold in a negative constant.
;
; CHECK-LABEL: name: usubo_s32_neg_imm
; CHECK: liveins: $w0, $w1, $x2
; CHECK: %copy:gpr32sp = COPY $w0
; CHECK: %add:gpr32 = ADDSWri %copy, 16, 0, implicit-def $nzcv
; CHECK: %overflow:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
; CHECK: $w0 = COPY %add
; CHECK: RET_ReallyLR implicit $w0
%copy:gpr(s32) = COPY $w0
%constant:gpr(s32) = G_CONSTANT i32 -16
%add:gpr(s32), %overflow:gpr(s1) = G_USUBO %copy, %constant
$w0 = COPY %add(s32)
RET_ReallyLR implicit $w0
...
---
name: usubo_arith_extended
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $w0, $x0
; Check that we get ADDSXrx.
; CHECK-LABEL: name: usubo_arith_extended
; CHECK: liveins: $w0, $x0
; CHECK: %reg0:gpr64sp = COPY $x0
; CHECK: %reg1:gpr32 = COPY $w0
; CHECK: %add:gpr64 = SUBSXrx %reg0, %reg1, 18, implicit-def $nzcv
; CHECK: %flags:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
; CHECK: $x0 = COPY %add
; CHECK: RET_ReallyLR implicit $x0
%reg0:gpr(s64) = COPY $x0
%reg1:gpr(s32) = COPY $w0
%ext:gpr(s64) = G_ZEXT %reg1(s32)
%cst:gpr(s64) = G_CONSTANT i64 2
%shift:gpr(s64) = G_SHL %ext, %cst(s64)
%add:gpr(s64), %flags:gpr(s1) = G_USUBO %reg0, %shift
$x0 = COPY %add(s64)
RET_ReallyLR implicit $x0