llvm-project/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp16-reduce.mir

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

223 lines
11 KiB
Plaintext
Raw Normal View History

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - --verify-machineinstrs | FileCheck %s
--- |
define dso_local arm_aapcs_vfpcc signext i16 @wrong_liveout_shift(i8* nocapture readonly %b, i8* nocapture readonly %c, i32 %N) {
entry:
%cmp11 = icmp eq i32 %N, 0
%0 = add i32 %N, 7
%1 = lshr i32 %0, 3
%2 = shl nuw i32 %1, 3
%3 = add i32 %2, -8
%4 = lshr i32 %3, 3
%5 = add nuw nsw i32 %4, 1
br i1 %cmp11, label %for.cond.cleanup, label %vector.ph
vector.ph: ; preds = %entry
[ARM] Alter t2DoLoopStart to define lr This changes the definition of t2DoLoopStart from t2DoLoopStart rGPR to GPRlr = t2DoLoopStart rGPR This will hopefully mean that low overhead loops are more tied together, and we can more reliably generate loops without reverting or being at the whims of the register allocator. This is a fairly simple change in itself, but leads to a number of other required alterations. - The hardware loop pass, if UsePhi is set, now generates loops of the form: %start = llvm.start.loop.iterations(%N) loop: %p = phi [%start], [%dec] %dec = llvm.loop.decrement.reg(%p, 1) %c = icmp ne %dec, 0 br %c, loop, exit - For this a new llvm.start.loop.iterations intrinsic was added, identical to llvm.set.loop.iterations but produces a value as seen above, gluing the loop together more through def-use chains. - This new instrinsic conceptually produces the same output as input, which is taught to SCEV so that the checks in MVETailPredication are not affected. - Some minor changes are needed to the ARMLowOverheadLoop pass, but it has been left mostly as before. We should now more reliably be able to tell that the t2DoLoopStart is correct without having to prove it, but t2WhileLoopStart and tail-predicated loops will remain the same. - And all the tests have been updated. There are a lot of them! This patch on it's own might cause more trouble that it helps, with more tail-predicated loops being reverted, but some additional patches can hopefully improve upon that to get to something that is better overall. Differential Revision: https://reviews.llvm.org/D89881
2020-11-10 23:57:58 +08:00
%start = call i32 @llvm.start.loop.iterations.i32(i32 %5)
%6 = shl i32 %4, 3
%7 = sub i32 %N, %6
br label %vector.body
vector.body: ; preds = %vector.body, %vector.ph
%lsr.iv20 = phi i8* [ %scevgep21, %vector.body ], [ %c, %vector.ph ]
%lsr.iv = phi i8* [ %scevgep, %vector.body ], [ %b, %vector.ph ]
%vec.phi = phi <8 x i16> [ <i16 32767, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, %vector.ph ], [ %15, %vector.body ]
[ARM] Alter t2DoLoopStart to define lr This changes the definition of t2DoLoopStart from t2DoLoopStart rGPR to GPRlr = t2DoLoopStart rGPR This will hopefully mean that low overhead loops are more tied together, and we can more reliably generate loops without reverting or being at the whims of the register allocator. This is a fairly simple change in itself, but leads to a number of other required alterations. - The hardware loop pass, if UsePhi is set, now generates loops of the form: %start = llvm.start.loop.iterations(%N) loop: %p = phi [%start], [%dec] %dec = llvm.loop.decrement.reg(%p, 1) %c = icmp ne %dec, 0 br %c, loop, exit - For this a new llvm.start.loop.iterations intrinsic was added, identical to llvm.set.loop.iterations but produces a value as seen above, gluing the loop together more through def-use chains. - This new instrinsic conceptually produces the same output as input, which is taught to SCEV so that the checks in MVETailPredication are not affected. - Some minor changes are needed to the ARMLowOverheadLoop pass, but it has been left mostly as before. We should now more reliably be able to tell that the t2DoLoopStart is correct without having to prove it, but t2WhileLoopStart and tail-predicated loops will remain the same. - And all the tests have been updated. There are a lot of them! This patch on it's own might cause more trouble that it helps, with more tail-predicated loops being reverted, but some additional patches can hopefully improve upon that to get to something that is better overall. Differential Revision: https://reviews.llvm.org/D89881
2020-11-10 23:57:58 +08:00
%8 = phi i32 [ %start, %vector.ph ], [ %16, %vector.body ]
%9 = phi i32 [ %N, %vector.ph ], [ %11, %vector.body ]
%lsr.iv2022 = bitcast i8* %lsr.iv20 to <8 x i8>*
%lsr.iv19 = bitcast i8* %lsr.iv to <8 x i8>*
%10 = call <8 x i1> @llvm.arm.mve.vctp16(i32 %9)
%11 = sub i32 %9, 8
%wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %lsr.iv19, i32 1, <8 x i1> %10, <8 x i8> undef)
%12 = zext <8 x i8> %wide.masked.load to <8 x i16>
%wide.masked.load16 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %lsr.iv2022, i32 1, <8 x i1> %10, <8 x i8> undef)
%13 = zext <8 x i8> %wide.masked.load16 to <8 x i16>
%14 = mul nuw <8 x i16> %13, %12
%15 = sub <8 x i16> %vec.phi, %14
%scevgep = getelementptr i8, i8* %lsr.iv, i32 8
%scevgep21 = getelementptr i8, i8* %lsr.iv20, i32 8
%16 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %8, i32 1)
%17 = icmp ne i32 %16, 0
br i1 %17, label %vector.body, label %middle.block
middle.block: ; preds = %vector.body
%vec.phi.lcssa = phi <8 x i16> [ %vec.phi, %vector.body ]
%.lcssa = phi <8 x i16> [ %15, %vector.body ]
%18 = call <8 x i1> @llvm.arm.mve.vctp16(i32 %7)
%19 = select <8 x i1> %18, <8 x i16> %.lcssa, <8 x i16> %vec.phi.lcssa
%20 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %19)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
%a.0.lcssa = phi i16 [ 32767, %entry ], [ %20, %middle.block ]
ret i16 %a.0.lcssa
}
declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>)
declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
[ARM] Alter t2DoLoopStart to define lr This changes the definition of t2DoLoopStart from t2DoLoopStart rGPR to GPRlr = t2DoLoopStart rGPR This will hopefully mean that low overhead loops are more tied together, and we can more reliably generate loops without reverting or being at the whims of the register allocator. This is a fairly simple change in itself, but leads to a number of other required alterations. - The hardware loop pass, if UsePhi is set, now generates loops of the form: %start = llvm.start.loop.iterations(%N) loop: %p = phi [%start], [%dec] %dec = llvm.loop.decrement.reg(%p, 1) %c = icmp ne %dec, 0 br %c, loop, exit - For this a new llvm.start.loop.iterations intrinsic was added, identical to llvm.set.loop.iterations but produces a value as seen above, gluing the loop together more through def-use chains. - This new instrinsic conceptually produces the same output as input, which is taught to SCEV so that the checks in MVETailPredication are not affected. - Some minor changes are needed to the ARMLowOverheadLoop pass, but it has been left mostly as before. We should now more reliably be able to tell that the t2DoLoopStart is correct without having to prove it, but t2WhileLoopStart and tail-predicated loops will remain the same. - And all the tests have been updated. There are a lot of them! This patch on it's own might cause more trouble that it helps, with more tail-predicated loops being reverted, but some additional patches can hopefully improve upon that to get to something that is better overall. Differential Revision: https://reviews.llvm.org/D89881
2020-11-10 23:57:58 +08:00
declare i32 @llvm.start.loop.iterations.i32(i32)
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
declare <8 x i1> @llvm.arm.mve.vctp16(i32)
...
---
name: wrong_liveout_shift
alignment: 16
exposesReturnsTwice: false
legalized: false
regBankSelected: false
selected: false
failedISel: false
tracksRegLiveness: true
hasWinCFI: false
registers: []
liveins:
- { reg: '$r0', virtual-reg: '' }
- { reg: '$r1', virtual-reg: '' }
- { reg: '$r2', virtual-reg: '' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
hasStackMap: false
hasPatchPoint: false
stackSize: 8
offsetAdjustment: 0
maxAlignment: 4
adjustsStack: false
hasCalls: false
stackProtector: ''
maxCallFrameSize: 0
cvBytesOfCalleeSavedRegisters: 0
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
savePoint: ''
restorePoint: ''
fixedStack: []
stack:
- { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$lr', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
callSites: []
constants:
- id: 0
value: '<8 x i16> <i16 32767, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>'
alignment: 16
isTargetSpecific: false
machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: wrong_liveout_shift
; CHECK: bb.0.entry:
; CHECK: successors: %bb.1(0x80000000)
; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
; CHECK: t2IT 0, 2, implicit-def $itstate
; CHECK: renamable $r0 = t2MOVi16 32767, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
; CHECK: renamable $r0 = tSXTH killed renamable $r0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
; CHECK: bb.1.vector.ph:
; CHECK: successors: %bb.2(0x80000000)
; CHECK: liveins: $lr, $r0, $r1, $r2
; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 7, 14 /* CC::al */, $noreg
; CHECK: renamable $r3 = t2BICri killed renamable $r3, 7, 14 /* CC::al */, $noreg, $noreg
; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 8, 14 /* CC::al */, $noreg, $noreg
; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, renamable $r12, 27, 14 /* CC::al */, $noreg, $noreg
; CHECK: renamable $r3 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
; CHECK: renamable $r12 = t2LSRri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
; CHECK: renamable $r3 = t2SUBrs renamable $r2, killed renamable $r12, 26, 14 /* CC::al */, $noreg, $noreg
; CHECK: bb.2.vector.body:
; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r3
; CHECK: renamable $vpr = MVE_VCTP16 renamable $r2, 0, $noreg, $noreg
; CHECK: $q1 = MVE_VORR killed $q0, killed $q0, 0, $noreg, $noreg, undef $q1
; CHECK: MVE_VPST 4, implicit $vpr
; CHECK: renamable $r0, renamable $q0 = MVE_VLDRBU16_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv19, align 1)
; CHECK: renamable $r1, renamable $q2 = MVE_VLDRBU16_post killed renamable $r1, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv2022, align 1)
; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 8, 14 /* CC::al */, $noreg
; CHECK: renamable $q0 = nuw MVE_VMULi16 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
; CHECK: renamable $q0 = MVE_VSUBi16 renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
; CHECK: bb.3.middle.block:
; CHECK: liveins: $q0, $q1, $r3
; CHECK: renamable $vpr = MVE_VCTP16 killed renamable $r3, 0, $noreg, $noreg
; CHECK: renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr, $noreg
; CHECK: renamable $r0 = MVE_VADDVu16no_acc killed renamable $q0, 0, $noreg, $noreg
; CHECK: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r7, def $lr
; CHECK: renamable $r0 = tSXTH killed renamable $r0, 14 /* CC::al */, $noreg
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
; CHECK: bb.4 (align 16):
; CHECK: CONSTPOOL_ENTRY 0, %const.0, 16
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $lr
tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
t2IT 0, 2, implicit-def $itstate
renamable $r0 = t2MOVi16 32767, 0, $cpsr, implicit killed $r0, implicit $itstate
renamable $r0 = tSXTH killed renamable $r0, 0, $cpsr, implicit killed $r0, implicit $itstate
tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
bb.1.vector.ph:
successors: %bb.2(0x80000000)
liveins: $r0, $r1, $r2, $lr
frame-setup tPUSH 14, $noreg, killed $lr, implicit-def $sp, implicit $sp
frame-setup CFI_INSTRUCTION def_cfa_offset 8
frame-setup CFI_INSTRUCTION offset $lr, -4
frame-setup CFI_INSTRUCTION offset $r7, -8
$r7 = frame-setup tMOVr $sp, 14, $noreg
frame-setup CFI_INSTRUCTION def_cfa_register $r7
renamable $r3, dead $cpsr = tADDi3 renamable $r2, 7, 14, $noreg
renamable $r3 = t2BICri killed renamable $r3, 7, 14, $noreg, $noreg
renamable $r12 = t2SUBri killed renamable $r3, 8, 14, $noreg, $noreg
renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
renamable $lr = nuw nsw t2ADDrs killed renamable $r3, renamable $r12, 27, 14, $noreg, $noreg
renamable $r3 = tLEApcrel %const.0, 14, $noreg
renamable $r12 = t2LSRri killed renamable $r12, 3, 14, $noreg, $noreg
renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
renamable $r3 = t2SUBrs renamable $r2, killed renamable $r12, 26, 14, $noreg, $noreg
[ARM] Alter t2DoLoopStart to define lr This changes the definition of t2DoLoopStart from t2DoLoopStart rGPR to GPRlr = t2DoLoopStart rGPR This will hopefully mean that low overhead loops are more tied together, and we can more reliably generate loops without reverting or being at the whims of the register allocator. This is a fairly simple change in itself, but leads to a number of other required alterations. - The hardware loop pass, if UsePhi is set, now generates loops of the form: %start = llvm.start.loop.iterations(%N) loop: %p = phi [%start], [%dec] %dec = llvm.loop.decrement.reg(%p, 1) %c = icmp ne %dec, 0 br %c, loop, exit - For this a new llvm.start.loop.iterations intrinsic was added, identical to llvm.set.loop.iterations but produces a value as seen above, gluing the loop together more through def-use chains. - This new instrinsic conceptually produces the same output as input, which is taught to SCEV so that the checks in MVETailPredication are not affected. - Some minor changes are needed to the ARMLowOverheadLoop pass, but it has been left mostly as before. We should now more reliably be able to tell that the t2DoLoopStart is correct without having to prove it, but t2WhileLoopStart and tail-predicated loops will remain the same. - And all the tests have been updated. There are a lot of them! This patch on it's own might cause more trouble that it helps, with more tail-predicated loops being reverted, but some additional patches can hopefully improve upon that to get to something that is better overall. Differential Revision: https://reviews.llvm.org/D89881
2020-11-10 23:57:58 +08:00
$lr = t2DoLoopStart renamable $lr
bb.2.vector.body:
successors: %bb.2(0x7c000000), %bb.3(0x04000000)
liveins: $lr, $q0, $r0, $r1, $r2, $r3
renamable $vpr = MVE_VCTP16 renamable $r2, 0, $noreg, $noreg
$q1 = MVE_VORR killed $q0, $q0, 0, $noreg, $noreg, undef $q1
MVE_VPST 4, implicit $vpr
renamable $r0, renamable $q0 = MVE_VLDRBU16_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv19, align 1)
renamable $r1, renamable $q2 = MVE_VLDRBU16_post killed renamable $r1, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv2022, align 1)
renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 8, 14, $noreg
renamable $q0 = nuw MVE_VMULi16 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
renamable $lr = t2LoopDec killed renamable $lr, 1
renamable $q0 = MVE_VSUBi16 renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
t2LoopEnd renamable $lr, %bb.2, implicit-def dead $cpsr
tB %bb.3, 14, $noreg
bb.3.middle.block:
liveins: $q0, $q1, $r3
renamable $vpr = MVE_VCTP16 killed renamable $r3, 0, $noreg, $noreg
renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr, $noreg
renamable $r0 = MVE_VADDVu16no_acc killed renamable $q0, 0, $noreg, $noreg
$sp = t2LDMIA_UPD $sp, 14, $noreg, def $r7, def $lr
renamable $r0 = tSXTH killed renamable $r0, 14, $noreg
tBX_RET 14, $noreg, implicit killed $r0
bb.4 (align 16):
CONSTPOOL_ENTRY 0, %const.0, 16
...