2017-02-01 07:48:32 +08:00
|
|
|
; RUN: llc %s -o - -enable-shrink-wrap=true -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -tail-dup-placement=0 -mtriple=thumb-macho \
|
2015-12-09 03:59:01 +08:00
|
|
|
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE --check-prefix=ENABLE-V4T
|
2017-02-01 07:48:32 +08:00
|
|
|
; RUN: llc %s -o - -enable-shrink-wrap=true -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -tail-dup-placement=0 -mtriple=thumbv5-macho \
|
2015-12-09 03:59:01 +08:00
|
|
|
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE --check-prefix=ENABLE-V5T
|
2017-02-01 07:48:32 +08:00
|
|
|
; RUN: llc %s -o - -enable-shrink-wrap=false -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -tail-dup-placement=0 -mtriple=thumb-macho \
|
2015-12-09 03:59:01 +08:00
|
|
|
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE --check-prefix=DISABLE-V4T
|
2017-02-01 07:48:32 +08:00
|
|
|
; RUN: llc %s -o - -enable-shrink-wrap=false -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -tail-dup-placement=0 -mtriple=thumbv5-macho \
|
2015-12-09 03:59:01 +08:00
|
|
|
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE --check-prefix=DISABLE-V5T
|
2017-02-01 07:48:32 +08:00
|
|
|
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
|
|
|
; Note: Lots of tests use inline asm instead of regular calls.
|
|
|
|
; This allows to have a better control on what the allocation will do.
|
|
|
|
; Otherwise, we may have spill right in the entry block, defeating
|
|
|
|
; shrink-wrapping. Moreover, some of the inline asm statements (nop)
|
|
|
|
; are here to ensure that the related paths do not end up as critical
|
|
|
|
; edges.
|
|
|
|
; Also disable the late if-converter as it makes harder to reason on
|
|
|
|
; the diffs.
|
2017-02-01 07:48:32 +08:00
|
|
|
; Disable tail-duplication during placement, as v4t vs v5t get different
|
|
|
|
; results due to branches not being analyzable under v5
|
2015-07-23 00:34:37 +08:00
|
|
|
|
|
|
|
; Initial motivating example: Simple diamond with a call just on one side.
|
|
|
|
; CHECK-LABEL: foo:
|
|
|
|
;
|
|
|
|
; Compare the arguments and jump to exit.
|
|
|
|
; No prologue needed.
|
|
|
|
; ENABLE: cmp r0, r1
|
|
|
|
; ENABLE-NEXT: bge [[EXIT_LABEL:LBB[0-9_]+]]
|
|
|
|
;
|
|
|
|
; Prologue code.
|
|
|
|
; CHECK: push {r7, lr}
|
2015-11-19 05:10:39 +08:00
|
|
|
; CHECK: sub sp, #8
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
|
|
|
; Compare the arguments and jump to exit.
|
|
|
|
; After the prologue is set.
|
|
|
|
; DISABLE: cmp r0, r1
|
|
|
|
; DISABLE-NEXT: bge [[EXIT_LABEL:LBB[0-9_]+]]
|
|
|
|
;
|
|
|
|
; Store %a in the alloca.
|
|
|
|
; CHECK: str r0, [sp, #4]
|
|
|
|
; Set the alloca address in the second argument.
|
|
|
|
; Set the first argument to zero.
|
|
|
|
; CHECK: movs r0, #0
|
|
|
|
; CHECK-NEXT: add r1, sp, #4
|
|
|
|
; CHECK-NEXT: bl
|
|
|
|
;
|
|
|
|
; With shrink-wrapping, epilogue is just after the call.
|
|
|
|
; ENABLE-NEXT: add sp, #8
|
2015-12-09 03:59:01 +08:00
|
|
|
; ENABLE-V5T-NEXT: pop {r7, pc}
|
|
|
|
; ENABLE-V4T-NEXT: pop {r7}
|
|
|
|
; ENABLE-V4T-NEXT: pop {r1}
|
|
|
|
; ENABLE-V4T-NEXT: mov lr, r1
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
|
|
|
; CHECK: [[EXIT_LABEL]]:
|
|
|
|
;
|
|
|
|
; Without shrink-wrapping, epilogue is in the exit block.
|
|
|
|
; Epilogue code. (What we pop does not matter.)
|
|
|
|
; DISABLE: add sp, #8
|
2015-12-09 03:59:01 +08:00
|
|
|
; DISABLE-V5T-NEXT: pop {r7, pc}
|
|
|
|
; DISABLE-V4T-NEXT: pop {r7}
|
|
|
|
; DISABLE-V4T-NEXT: pop {r1}
|
|
|
|
; DISABLE-V4T-NEXT: bx r1
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
|
|
|
; ENABLE-NEXT: bx lr
|
|
|
|
define i32 @foo(i32 %a, i32 %b) {
|
|
|
|
%tmp = alloca i32, align 4
|
|
|
|
%tmp2 = icmp slt i32 %a, %b
|
|
|
|
br i1 %tmp2, label %true, label %false
|
|
|
|
|
|
|
|
true:
|
|
|
|
store i32 %a, i32* %tmp, align 4
|
|
|
|
%tmp4 = call i32 @doSomething(i32 0, i32* %tmp)
|
|
|
|
br label %false
|
|
|
|
|
|
|
|
false:
|
|
|
|
%tmp.0 = phi i32 [ %tmp4, %true ], [ %a, %0 ]
|
|
|
|
ret i32 %tmp.0
|
|
|
|
}
|
|
|
|
|
2015-12-09 03:59:01 +08:00
|
|
|
|
|
|
|
; Same, but the final BB is non-trivial, so we don't duplicate the return inst.
|
|
|
|
; CHECK-LABEL: bar:
|
|
|
|
;
|
|
|
|
; With shrink-wrapping, epilogue is just after the call.
|
|
|
|
; CHECK: bl
|
|
|
|
; ENABLE-NEXT: add sp, #8
|
|
|
|
; ENABLE-NEXT: pop {r7}
|
|
|
|
; ENABLE-NEXT: pop {r0}
|
|
|
|
; ENABLE-NEXT: mov lr, r0
|
|
|
|
;
|
|
|
|
; CHECK: movs r0, #42
|
|
|
|
;
|
|
|
|
; Without shrink-wrapping, epilogue is in the exit block.
|
|
|
|
; Epilogue code. (What we pop does not matter.)
|
|
|
|
; DISABLE: add sp, #8
|
|
|
|
; DISABLE-V5T-NEXT: pop {r7, pc}
|
|
|
|
; DISABLE-V4T-NEXT: pop {r7}
|
|
|
|
; DISABLE-V4T-NEXT: pop {r1}
|
|
|
|
; DISABLE-V4T-NEXT: bx r1
|
|
|
|
;
|
|
|
|
; ENABLE-NEXT: bx lr
|
|
|
|
define i32 @bar(i32 %a, i32 %b) {
|
|
|
|
%tmp = alloca i32, align 4
|
|
|
|
%tmp2 = icmp slt i32 %a, %b
|
|
|
|
br i1 %tmp2, label %true, label %false
|
|
|
|
|
|
|
|
true:
|
|
|
|
store i32 %a, i32* %tmp, align 4
|
|
|
|
%tmp4 = call i32 @doSomething(i32 0, i32* %tmp)
|
|
|
|
br label %false
|
|
|
|
|
|
|
|
false:
|
|
|
|
ret i32 42
|
|
|
|
}
|
|
|
|
|
2015-07-23 00:34:37 +08:00
|
|
|
; Function Attrs: optsize
|
|
|
|
declare i32 @doSomething(i32, i32*)
|
|
|
|
|
|
|
|
|
|
|
|
; Check that we do not perform the restore inside the loop whereas the save
|
|
|
|
; is outside.
|
|
|
|
; CHECK-LABEL: freqSaveAndRestoreOutsideLoop:
|
|
|
|
;
|
|
|
|
; Shrink-wrapping allows to skip the prologue in the else case.
|
|
|
|
; ENABLE: cmp r0, #0
|
|
|
|
; ENABLE-NEXT: beq [[ELSE_LABEL:LBB[0-9_]+]]
|
|
|
|
;
|
|
|
|
; Prologue code.
|
|
|
|
; Make sure we save the CSR used in the inline asm: r4.
|
|
|
|
; CHECK: push {r4, lr}
|
|
|
|
;
|
|
|
|
; DISABLE: cmp r0, #0
|
|
|
|
; DISABLE-NEXT: beq [[ELSE_LABEL:LBB[0-9_]+]]
|
|
|
|
;
|
|
|
|
; SUM is in r0 because it is coalesced with the second
|
|
|
|
; argument on the else path.
|
|
|
|
; CHECK: movs [[SUM:r0]], #0
|
|
|
|
; CHECK-NEXT: movs [[IV:r[0-9]+]], #10
|
|
|
|
;
|
|
|
|
; Next BB.
|
|
|
|
; CHECK: [[LOOP:LBB[0-9_]+]]: @ %for.body
|
|
|
|
; CHECK: movs [[TMP:r[0-9]+]], #1
|
|
|
|
; CHECK: adds [[SUM]], [[TMP]], [[SUM]]
|
|
|
|
; CHECK-NEXT: subs [[IV]], [[IV]], #1
|
|
|
|
; CHECK-NEXT: bne [[LOOP]]
|
|
|
|
;
|
|
|
|
; Next BB.
|
|
|
|
; SUM << 3.
|
|
|
|
; CHECK: lsls [[SUM]], [[SUM]], #3
|
|
|
|
;
|
|
|
|
; Duplicated epilogue.
|
2015-12-09 03:59:01 +08:00
|
|
|
; DISABLE-V5T: pop {r4, pc}
|
|
|
|
; DISABLE-V4T: b [[END_LABEL:LBB[0-9_]+]]
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
|
|
|
; CHECK: [[ELSE_LABEL]]: @ %if.else
|
|
|
|
; Shift second argument by one and store into returned register.
|
|
|
|
; CHECK: lsls r0, r1, #1
|
2015-12-09 03:59:01 +08:00
|
|
|
; DISABLE-V5T-NEXT: pop {r4, pc}
|
|
|
|
; DISABLE-V4T-NEXT: [[END_LABEL]]: @ %if.end
|
|
|
|
; DISABLE-V4T-NEXT: pop {r4}
|
|
|
|
; DISABLE-V4T-NEXT: pop {r1}
|
|
|
|
; DISABLE-V4T-NEXT: bx r1
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
2015-12-29 05:40:45 +08:00
|
|
|
; ENABLE-V5T-NEXT: {{LBB[0-9_]+}}: @ %if.end
|
2015-07-23 00:34:37 +08:00
|
|
|
; ENABLE-NEXT: bx lr
|
|
|
|
define i32 @freqSaveAndRestoreOutsideLoop(i32 %cond, i32 %N) {
|
|
|
|
entry:
|
|
|
|
%tobool = icmp eq i32 %cond, 0
|
|
|
|
br i1 %tobool, label %if.else, label %for.preheader
|
|
|
|
|
|
|
|
for.preheader:
|
|
|
|
tail call void asm "nop", ""()
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %entry, %for.body
|
|
|
|
%i.05 = phi i32 [ %inc, %for.body ], [ 0, %for.preheader ]
|
|
|
|
%sum.04 = phi i32 [ %add, %for.body ], [ 0, %for.preheader ]
|
|
|
|
%call = tail call i32 asm sideeffect "movs $0, #1", "=r,~{r4}"()
|
|
|
|
%add = add nsw i32 %call, %sum.04
|
|
|
|
%inc = add nuw nsw i32 %i.05, 1
|
|
|
|
%exitcond = icmp eq i32 %inc, 10
|
|
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body
|
|
|
|
%shl = shl i32 %add, 3
|
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.else: ; preds = %entry
|
|
|
|
%mul = shl nsw i32 %N, 1
|
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.end: ; preds = %if.else, %for.end
|
|
|
|
%sum.1 = phi i32 [ %shl, %for.end ], [ %mul, %if.else ]
|
|
|
|
ret i32 %sum.1
|
|
|
|
}
|
|
|
|
|
|
|
|
declare i32 @something(...)
|
|
|
|
|
|
|
|
; Check that we do not perform the shrink-wrapping inside the loop even
|
|
|
|
; though that would be legal. The cost model must prevent that.
|
|
|
|
; CHECK-LABEL: freqSaveAndRestoreOutsideLoop2:
|
|
|
|
; Prologue code.
|
|
|
|
; Make sure we save the CSR used in the inline asm: r4.
|
|
|
|
; CHECK: push {r4
|
|
|
|
; This is the nop.
|
|
|
|
; CHECK: mov r8, r8
|
|
|
|
; CHECK: movs [[SUM:r0]], #0
|
|
|
|
; CHECK-NEXT: movs [[IV:r[0-9]+]], #10
|
|
|
|
; Next BB.
|
|
|
|
; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: @ %for.body
|
|
|
|
; CHECK: movs [[TMP:r[0-9]+]], #1
|
|
|
|
; CHECK: adds [[SUM]], [[TMP]], [[SUM]]
|
|
|
|
; CHECK-NEXT: subs [[IV]], [[IV]], #1
|
|
|
|
; CHECK-NEXT: bne [[LOOP_LABEL]]
|
|
|
|
; Next BB.
|
|
|
|
; CHECK: @ %for.exit
|
|
|
|
; This is the nop.
|
|
|
|
; CHECK: mov r8, r8
|
|
|
|
; CHECK: pop {r4
|
|
|
|
define i32 @freqSaveAndRestoreOutsideLoop2(i32 %cond) {
|
|
|
|
entry:
|
|
|
|
br label %for.preheader
|
|
|
|
|
|
|
|
for.preheader:
|
|
|
|
tail call void asm "nop", ""()
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body, %entry
|
|
|
|
%i.04 = phi i32 [ 0, %for.preheader ], [ %inc, %for.body ]
|
|
|
|
%sum.03 = phi i32 [ 0, %for.preheader ], [ %add, %for.body ]
|
|
|
|
%call = tail call i32 asm sideeffect "movs $0, #1", "=r,~{r4}"()
|
|
|
|
%add = add nsw i32 %call, %sum.03
|
|
|
|
%inc = add nuw nsw i32 %i.04, 1
|
|
|
|
%exitcond = icmp eq i32 %inc, 10
|
|
|
|
br i1 %exitcond, label %for.exit, label %for.body
|
|
|
|
|
|
|
|
for.exit:
|
|
|
|
tail call void asm "nop", ""()
|
|
|
|
br label %for.end
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body
|
|
|
|
ret i32 %add
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check with a more complex case that we do not have save within the loop and
|
|
|
|
; restore outside.
|
|
|
|
; CHECK-LABEL: loopInfoSaveOutsideLoop:
|
|
|
|
;
|
|
|
|
; ENABLE: cmp r0, #0
|
|
|
|
; ENABLE-NEXT: beq [[ELSE_LABEL:LBB[0-9_]+]]
|
|
|
|
;
|
|
|
|
; Prologue code.
|
|
|
|
; Make sure we save the CSR used in the inline asm: r4.
|
|
|
|
; CHECK: push {r4, lr}
|
|
|
|
;
|
|
|
|
; DISABLE: cmp r0, #0
|
|
|
|
; DISABLE-NEXT: beq [[ELSE_LABEL:LBB[0-9_]+]]
|
|
|
|
;
|
|
|
|
; SUM is in r0 because it is coalesced with the second
|
|
|
|
; argument on the else path.
|
|
|
|
; CHECK: movs [[SUM:r0]], #0
|
|
|
|
; CHECK-NEXT: movs [[IV:r[0-9]+]], #10
|
|
|
|
;
|
|
|
|
; Next BB.
|
|
|
|
; CHECK: [[LOOP:LBB[0-9_]+]]: @ %for.body
|
|
|
|
; CHECK: movs [[TMP:r[0-9]+]], #1
|
|
|
|
; CHECK: adds [[SUM]], [[TMP]], [[SUM]]
|
|
|
|
; CHECK-NEXT: subs [[IV]], [[IV]], #1
|
|
|
|
; CHECK-NEXT: bne [[LOOP]]
|
|
|
|
;
|
|
|
|
; Next BB.
|
|
|
|
; SUM << 3.
|
|
|
|
; CHECK: lsls [[SUM]], [[SUM]], #3
|
2015-12-29 05:40:45 +08:00
|
|
|
; ENABLE-V5T-NEXT: pop {r4, pc}
|
|
|
|
; ENABLE-V4T-NEXT: pop {r4}
|
|
|
|
; ENABLE-V4T-NEXT: pop {r1}
|
|
|
|
; ENABLE-V4T-NEXT: bx r1
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
|
|
|
; Duplicated epilogue.
|
2015-12-09 03:59:01 +08:00
|
|
|
; DISABLE-V5T: pop {r4, pc}
|
|
|
|
; DISABLE-V4T: b [[END_LABEL:LBB[0-9_]+]]
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
|
|
|
; CHECK: [[ELSE_LABEL]]: @ %if.else
|
|
|
|
; Shift second argument by one and store into returned register.
|
|
|
|
; CHECK: lsls r0, r1, #1
|
2015-12-09 03:59:01 +08:00
|
|
|
; DISABLE-V5T-NEXT: pop {r4, pc}
|
|
|
|
; DISABLE-V4T-NEXT: [[END_LABEL]]: @ %if.end
|
|
|
|
; DISABLE-V4T-NEXT: pop {r4}
|
|
|
|
; DISABLE-V4T-NEXT: pop {r1}
|
|
|
|
; DISABLE-V4T-NEXT: bx r1
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
2015-12-29 05:40:45 +08:00
|
|
|
; ENABLE-V5T-NEXT: {{LBB[0-9_]+}}: @ %if.end
|
2015-07-23 00:34:37 +08:00
|
|
|
; ENABLE-NEXT: bx lr
|
|
|
|
define i32 @loopInfoSaveOutsideLoop(i32 %cond, i32 %N) {
|
|
|
|
entry:
|
|
|
|
%tobool = icmp eq i32 %cond, 0
|
|
|
|
br i1 %tobool, label %if.else, label %for.preheader
|
|
|
|
|
|
|
|
for.preheader:
|
|
|
|
tail call void asm "nop", ""()
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %entry, %for.body
|
|
|
|
%i.05 = phi i32 [ %inc, %for.body ], [ 0, %for.preheader ]
|
|
|
|
%sum.04 = phi i32 [ %add, %for.body ], [ 0, %for.preheader ]
|
|
|
|
%call = tail call i32 asm sideeffect "movs $0, #1", "=r,~{r4}"()
|
|
|
|
%add = add nsw i32 %call, %sum.04
|
|
|
|
%inc = add nuw nsw i32 %i.05, 1
|
|
|
|
%exitcond = icmp eq i32 %inc, 10
|
|
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body
|
|
|
|
tail call void asm "nop", "~{r4}"()
|
|
|
|
%shl = shl i32 %add, 3
|
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.else: ; preds = %entry
|
|
|
|
%mul = shl nsw i32 %N, 1
|
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.end: ; preds = %if.else, %for.end
|
|
|
|
%sum.1 = phi i32 [ %shl, %for.end ], [ %mul, %if.else ]
|
|
|
|
ret i32 %sum.1
|
|
|
|
}
|
|
|
|
|
|
|
|
declare void @somethingElse(...)
|
|
|
|
|
|
|
|
; Check with a more complex case that we do not have restore within the loop and
|
|
|
|
; save outside.
|
|
|
|
; CHECK-LABEL: loopInfoRestoreOutsideLoop:
|
|
|
|
;
|
|
|
|
; ENABLE: cmp r0, #0
|
|
|
|
; ENABLE-NEXT: beq [[ELSE_LABEL:LBB[0-9_]+]]
|
|
|
|
;
|
|
|
|
; Prologue code.
|
|
|
|
; Make sure we save the CSR used in the inline asm: r4.
|
|
|
|
; CHECK: push {r4, lr}
|
|
|
|
;
|
|
|
|
; DISABLE-NEXT: cmp r0, #0
|
|
|
|
; DISABLE-NEXT: beq [[ELSE_LABEL:LBB[0-9_]+]]
|
|
|
|
;
|
|
|
|
; SUM is in r0 because it is coalesced with the second
|
|
|
|
; argument on the else path.
|
|
|
|
; CHECK: movs [[SUM:r0]], #0
|
|
|
|
; CHECK-NEXT: movs [[IV:r[0-9]+]], #10
|
|
|
|
;
|
|
|
|
; Next BB.
|
|
|
|
; CHECK: [[LOOP:LBB[0-9_]+]]: @ %for.body
|
|
|
|
; CHECK: movs [[TMP:r[0-9]+]], #1
|
|
|
|
; CHECK: adds [[SUM]], [[TMP]], [[SUM]]
|
|
|
|
; CHECK-NEXT: subs [[IV]], [[IV]], #1
|
|
|
|
; CHECK-NEXT: bne [[LOOP]]
|
|
|
|
;
|
|
|
|
; Next BB.
|
|
|
|
; SUM << 3.
|
|
|
|
; CHECK: lsls [[SUM]], [[SUM]], #3
|
2015-12-29 05:40:45 +08:00
|
|
|
; ENABLE-V5T-NEXT: pop {r4, pc}
|
|
|
|
; ENABLE-V4T-NEXT: pop {r4}
|
|
|
|
; ENABLE-V4T-NEXT: pop {r1}
|
|
|
|
; ENABLE-V4T-NEXT: bx r1
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
|
|
|
; Duplicated epilogue.
|
2015-12-09 03:59:01 +08:00
|
|
|
; DISABLE-V5T: pop {r4, pc}
|
|
|
|
; DISABLE-V4T: b [[END_LABEL:LBB[0-9_]+]]
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
|
|
|
; CHECK: [[ELSE_LABEL]]: @ %if.else
|
|
|
|
; Shift second argument by one and store into returned register.
|
|
|
|
; CHECK: lsls r0, r1, #1
|
2015-12-09 03:59:01 +08:00
|
|
|
; DISABLE-V5T-NEXT: pop {r4, pc}
|
|
|
|
; DISABLE-V4T-NEXT: [[END_LABEL]]: @ %if.end
|
|
|
|
; DISABLE-V4T-NEXT: pop {r4}
|
|
|
|
; DISABLE-V4T-NEXT: pop {r1}
|
|
|
|
; DISABLE-V4T-NEXT: bx r1
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
2015-12-29 05:40:45 +08:00
|
|
|
; ENABLE-V5T-NEXT: {{LBB[0-9_]+}}: @ %if.end
|
2015-07-23 00:34:37 +08:00
|
|
|
; ENABLE-NEXT: bx lr
|
2018-04-07 18:57:03 +08:00
|
|
|
define i32 @loopInfoRestoreOutsideLoop(i32 %cond, i32 %N) nounwind {
|
2015-07-23 00:34:37 +08:00
|
|
|
entry:
|
|
|
|
%tobool = icmp eq i32 %cond, 0
|
|
|
|
br i1 %tobool, label %if.else, label %if.then
|
|
|
|
|
|
|
|
if.then: ; preds = %entry
|
|
|
|
tail call void asm "nop", "~{r4}"()
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body, %if.then
|
|
|
|
%i.05 = phi i32 [ 0, %if.then ], [ %inc, %for.body ]
|
|
|
|
%sum.04 = phi i32 [ 0, %if.then ], [ %add, %for.body ]
|
|
|
|
%call = tail call i32 asm sideeffect "movs $0, #1", "=r,~{r4}"()
|
|
|
|
%add = add nsw i32 %call, %sum.04
|
|
|
|
%inc = add nuw nsw i32 %i.05, 1
|
|
|
|
%exitcond = icmp eq i32 %inc, 10
|
|
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body
|
|
|
|
%shl = shl i32 %add, 3
|
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.else: ; preds = %entry
|
|
|
|
%mul = shl nsw i32 %N, 1
|
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.end: ; preds = %if.else, %for.end
|
|
|
|
%sum.1 = phi i32 [ %shl, %for.end ], [ %mul, %if.else ]
|
|
|
|
ret i32 %sum.1
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check that we handle function with no frame information correctly.
|
|
|
|
; CHECK-LABEL: emptyFrame:
|
|
|
|
; CHECK: @ %entry
|
|
|
|
; CHECK-NEXT: movs r0, #0
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
define i32 @emptyFrame() {
|
|
|
|
entry:
|
|
|
|
ret i32 0
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check that we handle inline asm correctly.
|
|
|
|
; CHECK-LABEL: inlineAsm:
|
|
|
|
;
|
|
|
|
; ENABLE: cmp r0, #0
|
|
|
|
; ENABLE-NEXT: beq [[ELSE_LABEL:LBB[0-9_]+]]
|
|
|
|
;
|
|
|
|
; Prologue code.
|
|
|
|
; Make sure we save the CSR used in the inline asm: r4.
|
|
|
|
; CHECK: push {r4, lr}
|
|
|
|
;
|
|
|
|
; DISABLE: cmp r0, #0
|
|
|
|
; DISABLE-NEXT: beq [[ELSE_LABEL:LBB[0-9_]+]]
|
|
|
|
;
|
|
|
|
; CHECK: movs [[IV:r[0-9]+]], #10
|
|
|
|
;
|
|
|
|
; Next BB.
|
|
|
|
; CHECK: [[LOOP:LBB[0-9_]+]]: @ %for.body
|
|
|
|
; CHECK: movs r4, #1
|
|
|
|
; CHECK: subs [[IV]], [[IV]], #1
|
|
|
|
; CHECK-NEXT: bne [[LOOP]]
|
|
|
|
;
|
|
|
|
; Next BB.
|
|
|
|
; CHECK: movs r0, #0
|
2015-12-29 05:40:45 +08:00
|
|
|
; ENABLE-V5T-NEXT: pop {r4, pc}
|
|
|
|
; ENABLE-V4T-NEXT: pop {r4}
|
|
|
|
; ENABLE-V4T-NEXT: pop {r1}
|
|
|
|
; ENABLE-V4T-NEXT: bx r1
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
|
|
|
; Duplicated epilogue.
|
2015-12-09 03:59:01 +08:00
|
|
|
; DISABLE-V5T-NEXT: pop {r4, pc}
|
|
|
|
; DISABLE-V4T-NEXT: b [[END_LABEL:LBB[0-9_]+]]
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
|
|
|
; CHECK: [[ELSE_LABEL]]: @ %if.else
|
|
|
|
; Shift second argument by one and store into returned register.
|
|
|
|
; CHECK: lsls r0, r1, #1
|
2015-12-09 03:59:01 +08:00
|
|
|
; DISABLE-V5T-NEXT: pop {r4, pc}
|
|
|
|
; DISABLE-V4T-NEXT: [[END_LABEL]]: @ %if.end
|
|
|
|
; DISABLE-V4T-NEXT: pop {r4}
|
|
|
|
; DISABLE-V4T-NEXT: pop {r1}
|
|
|
|
; DISABLE-V4T-NEXT: bx r1
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
2015-12-29 05:40:45 +08:00
|
|
|
; ENABLE-V5T-NEXT: {{LBB[0-9_]+}}: @ %if.end
|
2015-07-23 00:34:37 +08:00
|
|
|
; ENABLE-NEXT: bx lr
|
|
|
|
define i32 @inlineAsm(i32 %cond, i32 %N) {
|
|
|
|
entry:
|
|
|
|
%tobool = icmp eq i32 %cond, 0
|
|
|
|
br i1 %tobool, label %if.else, label %for.preheader
|
|
|
|
|
|
|
|
for.preheader:
|
|
|
|
tail call void asm "nop", ""()
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %entry, %for.body
|
|
|
|
%i.03 = phi i32 [ %inc, %for.body ], [ 0, %for.preheader ]
|
|
|
|
tail call void asm sideeffect "movs r4, #1", "~{r4}"()
|
|
|
|
%inc = add nuw nsw i32 %i.03, 1
|
|
|
|
%exitcond = icmp eq i32 %inc, 10
|
|
|
|
br i1 %exitcond, label %for.exit, label %for.body
|
|
|
|
|
|
|
|
for.exit:
|
|
|
|
tail call void asm "nop", ""()
|
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.else: ; preds = %entry
|
|
|
|
%mul = shl nsw i32 %N, 1
|
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.end: ; preds = %for.body, %if.else
|
|
|
|
%sum.0 = phi i32 [ %mul, %if.else ], [ 0, %for.exit ]
|
|
|
|
ret i32 %sum.0
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check that we handle calls to variadic functions correctly.
|
|
|
|
; CHECK-LABEL: callVariadicFunc:
|
|
|
|
;
|
|
|
|
; ENABLE: cmp r0, #0
|
|
|
|
; ENABLE-NEXT: beq [[ELSE_LABEL:LBB[0-9_]+]]
|
|
|
|
;
|
|
|
|
; Prologue code.
|
|
|
|
; CHECK: push {[[TMP:r[0-9]+]], lr}
|
2015-11-19 05:10:39 +08:00
|
|
|
; CHECK: sub sp, #16
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
|
|
|
; DISABLE: cmp r0, #0
|
|
|
|
; DISABLE-NEXT: beq [[ELSE_LABEL:LBB[0-9_]+]]
|
|
|
|
;
|
|
|
|
; Setup of the varags.
|
|
|
|
; CHECK: mov [[TMP_SP:r[0-9]+]], sp
|
|
|
|
; CHECK-NEXT: str r1, {{\[}}[[TMP_SP]]]
|
|
|
|
; CHECK-NEXT: str r1, {{\[}}[[TMP_SP]], #4]
|
|
|
|
; CHECK-NEXT: str r1, {{\[}}[[TMP_SP]], #8]
|
2017-03-07 17:38:16 +08:00
|
|
|
; CHECK: movs r0, r1
|
|
|
|
; CHECK-NEXT: movs r2, r1
|
|
|
|
; CHECK-NEXT: movs r3, r1
|
2015-07-23 00:34:37 +08:00
|
|
|
; CHECK-NEXT: bl
|
|
|
|
; CHECK-NEXT: lsls r0, r0, #3
|
|
|
|
;
|
2015-12-09 03:59:01 +08:00
|
|
|
; ENABLE-NEXT: add sp, #16
|
2015-12-29 05:40:45 +08:00
|
|
|
; ENABLE-V5T-NEXT: pop {[[TMP]], pc}
|
|
|
|
; ENABLE-V4T-NEXT: pop {[[TMP]]}
|
|
|
|
; ENABLE-V4T-NEXT: pop {r1}
|
|
|
|
; ENABLE-V4T-NEXT: bx r1
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
|
|
|
; Duplicated epilogue.
|
2015-12-09 03:59:01 +08:00
|
|
|
; DISABLE-V5T-NEXT: add sp, #16
|
|
|
|
; DISABLE-V5T-NEXT: pop {[[TMP]], pc}
|
|
|
|
; DISABLE-V4T-NEXT: b [[END_LABEL:LBB[0-9_]+]]
|
2015-07-23 00:34:37 +08:00
|
|
|
;
|
|
|
|
; CHECK: [[ELSE_LABEL]]: @ %if.else
|
|
|
|
; Shift second argument by one and store into returned register.
|
|
|
|
; CHECK: lsls r0, r1, #1
|
|
|
|
;
|
|
|
|
; Epilogue code.
|
2015-12-29 05:40:45 +08:00
|
|
|
; ENABLE-V5T-NEXT: {{LBB[0-9_]+}}: @ %if.end
|
2015-07-23 00:34:37 +08:00
|
|
|
; ENABLE-NEXT: bx lr
|
|
|
|
;
|
2015-12-09 03:59:01 +08:00
|
|
|
; DISABLE-V4T-NEXT: [[END_LABEL]]: @ %if.end
|
2015-07-23 00:34:37 +08:00
|
|
|
; DISABLE-NEXT: add sp, #16
|
2015-12-09 03:59:01 +08:00
|
|
|
; DISABLE-V5T-NEXT: pop {[[TMP]], pc}
|
|
|
|
; DISABLE-V4T-NEXT: pop {[[TMP]]}
|
|
|
|
; DISABLE-V4T-NEXT: pop {r1}
|
|
|
|
; DISABLE-V4T-NEXT: bx r1
|
2015-07-23 00:34:37 +08:00
|
|
|
define i32 @callVariadicFunc(i32 %cond, i32 %N) {
|
|
|
|
entry:
|
|
|
|
%tobool = icmp eq i32 %cond, 0
|
|
|
|
br i1 %tobool, label %if.else, label %if.then
|
|
|
|
|
|
|
|
if.then: ; preds = %entry
|
|
|
|
%call = tail call i32 (i32, ...) @someVariadicFunc(i32 %N, i32 %N, i32 %N, i32 %N, i32 %N, i32 %N, i32 %N)
|
|
|
|
%shl = shl i32 %call, 3
|
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.else: ; preds = %entry
|
|
|
|
%mul = shl nsw i32 %N, 1
|
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.end: ; preds = %if.else, %if.then
|
|
|
|
%sum.0 = phi i32 [ %shl, %if.then ], [ %mul, %if.else ]
|
|
|
|
ret i32 %sum.0
|
|
|
|
}
|
|
|
|
|
|
|
|
declare i32 @someVariadicFunc(i32, ...)
|
|
|
|
|
|
|
|
; Make sure we do not insert unreachable code after noreturn function.
|
|
|
|
; Although this is not incorrect to insert such code, it is useless
|
|
|
|
; and it hurts the binary size.
|
|
|
|
;
|
|
|
|
; CHECK-LABEL: noreturn:
|
|
|
|
; DISABLE: push
|
|
|
|
;
|
2016-07-30 07:33:48 +08:00
|
|
|
; CHECK: cmp r0, #0
|
2015-07-23 00:34:37 +08:00
|
|
|
; CHECK-NEXT: bne [[ABORT:LBB[0-9_]+]]
|
|
|
|
;
|
|
|
|
; CHECK: movs r0, #42
|
|
|
|
;
|
|
|
|
; ENABLE-NEXT: bx lr
|
|
|
|
;
|
|
|
|
; DISABLE-NEXT: pop
|
|
|
|
;;
|
|
|
|
; CHECK: [[ABORT]]: @ %if.abort
|
|
|
|
;
|
|
|
|
; ENABLE: push
|
|
|
|
;
|
|
|
|
; CHECK: bl
|
|
|
|
; ENABLE-NOT: pop
|
|
|
|
define i32 @noreturn(i8 signext %bad_thing) {
|
|
|
|
entry:
|
|
|
|
%tobool = icmp eq i8 %bad_thing, 0
|
|
|
|
br i1 %tobool, label %if.end, label %if.abort
|
|
|
|
|
|
|
|
if.abort:
|
|
|
|
%call = tail call i32 asm sideeffect "movs $0, #1", "=r,~{r4}"()
|
|
|
|
tail call void @abort() #0
|
|
|
|
unreachable
|
|
|
|
|
|
|
|
if.end:
|
|
|
|
ret i32 42
|
|
|
|
}
|
|
|
|
|
|
|
|
declare void @abort() #0
|
|
|
|
|
2015-12-29 05:40:45 +08:00
|
|
|
define i32 @b_to_bx(i32 %value) {
|
|
|
|
; CHECK-LABEL: b_to_bx:
|
|
|
|
; DISABLE: push {r7, lr}
|
2018-02-28 00:59:10 +08:00
|
|
|
; CHECK: cmp r0, #49
|
2015-12-29 05:40:45 +08:00
|
|
|
; CHECK-NEXT: bgt [[ELSE_LABEL:LBB[0-9_]+]]
|
|
|
|
; ENABLE: push {r7, lr}
|
|
|
|
|
|
|
|
; CHECK: bl
|
|
|
|
; DISABLE-V5-NEXT: pop {r7, pc}
|
|
|
|
; DISABLE-V4T-NEXT: b [[END_LABEL:LBB[0-9_]+]]
|
|
|
|
|
|
|
|
; ENABLE-V5-NEXT: pop {r7, pc}
|
|
|
|
; ENABLE-V4-NEXT: pop {r7}
|
|
|
|
; ENABLE-V4-NEXT: pop {r1}
|
|
|
|
; ENABLE-V4-NEXT: bx r1
|
|
|
|
|
|
|
|
; CHECK: [[ELSE_LABEL]]: @ %if.else
|
|
|
|
; CHECK-NEXT: lsls r0, r1, #1
|
|
|
|
; DISABLE-V5-NEXT: pop {r7, pc}
|
|
|
|
; DISABLE-V4T-NEXT: [[END_LABEL]]: @ %if.end
|
|
|
|
; DISABLE-V4T-NEXT: pop {r7}
|
|
|
|
; DISABLE-V4T-NEXT: pop {r1}
|
|
|
|
; DISABLE-V4T-NEXT: bx r1
|
|
|
|
|
|
|
|
; ENABLE-V5T-NEXT: {{LBB[0-9_]+}}: @ %if.end
|
|
|
|
; ENABLE-NEXT: bx lr
|
|
|
|
|
|
|
|
entry:
|
|
|
|
%cmp = icmp slt i32 %value, 50
|
|
|
|
br i1 %cmp, label %if.then, label %if.else
|
|
|
|
|
|
|
|
if.then:
|
|
|
|
%div = sdiv i32 5000, %value
|
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.else:
|
|
|
|
%mul = shl nsw i32 %value, 1
|
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.end:
|
|
|
|
%value.addr.0 = phi i32 [ %div, %if.then ], [ %mul, %if.else ]
|
|
|
|
ret i32 %value.addr.0
|
|
|
|
}
|
|
|
|
|
|
|
|
define i1 @beq_to_bx(i32* %y, i32 %head) {
|
|
|
|
; CHECK-LABEL: beq_to_bx:
|
|
|
|
; DISABLE: push {r4, lr}
|
|
|
|
; CHECK: cmp r2, #0
|
|
|
|
; CHECK-NEXT: beq [[EXIT_LABEL:LBB[0-9_]+]]
|
|
|
|
; ENABLE: push {r4, lr}
|
|
|
|
|
|
|
|
; CHECK: tst r3, r4
|
2017-11-27 18:13:14 +08:00
|
|
|
; ENABLE-NEXT: ldr [[POP:r[4567]]], [sp, #4]
|
[ARM] Fix incorrect conversion of a tail call to an ordinary call
When we emit a tail call for Armv8-M, but then discover that the caller needs to
save/restore `LR`, we convert the tail call to an ordinary one, since restoring
`LR` takes extra instructions, which may negate the benefits of the tail
call. If the callee, however, takes stack arguments, this conversion is
incorrect, since nothing has been done to pass the stack arguments.
Thus the patch reverts https://reviews.llvm.org/rL294000
Also, we improve the instruction sequence for popping `LR` in the case when we
couldn't immediately find a scratch low register, but we can use as a temporary
one of the callee-saved low registers and restore `LR` before popping other
callee-saves.
Differential Revision: https://reviews.llvm.org/D39599
llvm-svn: 318143
2017-11-14 18:36:52 +08:00
|
|
|
; ENABLE-NEXT: mov lr, [[POP]]
|
|
|
|
; ENABLE-NEXT: pop {[[POP]]}
|
|
|
|
; ENABLE-NEXT: add sp, #4
|
2015-12-29 05:40:45 +08:00
|
|
|
; CHECK-NEXT: beq [[EXIT_LABEL]]
|
|
|
|
|
|
|
|
; CHECK: str r1, [r2]
|
[Thumb] Teach ISel how to lower compares of AND bitmasks efficiently
This is essentially a recommit of r285893, but with a correctness fix. The
problem of the original commit was that this:
bic r5, r7, #31
cbz r5, .LBB2_10
got rewritten into:
lsrs r5, r7, #5
beq .LBB2_10
The result in destination register r5 is not the same and this is incorrect
when r5 is not dead. So this fix includes checking the uses of the AND
destination register. And also, compared to the original commit, some regression
tests didn't need changing anymore because of this extra check.
For completeness, this was the original commit message:
For the common pattern (CMPZ (AND x, #bitmask), #0), we can do some more
efficient instruction selection if the bitmask is one consecutive sequence of
set bits (32 - clz(bm) - ctz(bm) == popcount(bm)).
1) If the bitmask touches the LSB, then we can remove all the upper bits and
set the flags by doing one LSLS.
2) If the bitmask touches the MSB, then we can remove all the lower bits and
set the flags with one LSRS.
3) If the bitmask has popcount == 1 (only one set bit), we can shift that bit
into the sign bit with one LSLS and change the condition query from NE/EQ to
MI/PL (we could also implement this by shifting into the carry bit and
branching on BCC/BCS).
4) Otherwise, we can emit a sequence of LSLS+LSRS to remove the upper and lower
zero bits of the mask.
1-3 require only one 16-bit instruction and can elide the CMP. 4 requires two
16-bit instructions but can elide the CMP and doesn't require materializing a
complex immediate, so is also a win.
Differential Revision: https://reviews.llvm.org/D27761
llvm-svn: 289794
2016-12-15 17:38:59 +08:00
|
|
|
; CHECK: str r3, [r2]
|
2015-12-29 05:40:45 +08:00
|
|
|
; CHECK-NEXT: movs r0, #0
|
|
|
|
; CHECK-NEXT: [[EXIT_LABEL]]: @ %cleanup
|
|
|
|
; ENABLE-NEXT: bx lr
|
|
|
|
; DISABLE-V5-NEXT: pop {r4, pc}
|
|
|
|
; DISABLE-V4T-NEXT: pop {r4}
|
|
|
|
; DISABLE-V4T-NEXT: pop {r1}
|
|
|
|
; DISABLE-V4T-NEXT: bx r1
|
|
|
|
|
|
|
|
entry:
|
|
|
|
%cmp = icmp eq i32* %y, null
|
|
|
|
br i1 %cmp, label %cleanup, label %if.end
|
|
|
|
|
|
|
|
if.end:
|
|
|
|
%z = load i32, i32* %y, align 4
|
|
|
|
%and = and i32 %z, 2
|
|
|
|
%cmp2 = icmp eq i32 %and, 0
|
|
|
|
br i1 %cmp2, label %cleanup, label %if.end4
|
|
|
|
|
|
|
|
if.end4:
|
|
|
|
store i32 %head, i32* %y, align 4
|
[Thumb] Teach ISel how to lower compares of AND bitmasks efficiently
This is essentially a recommit of r285893, but with a correctness fix. The
problem of the original commit was that this:
bic r5, r7, #31
cbz r5, .LBB2_10
got rewritten into:
lsrs r5, r7, #5
beq .LBB2_10
The result in destination register r5 is not the same and this is incorrect
when r5 is not dead. So this fix includes checking the uses of the AND
destination register. And also, compared to the original commit, some regression
tests didn't need changing anymore because of this extra check.
For completeness, this was the original commit message:
For the common pattern (CMPZ (AND x, #bitmask), #0), we can do some more
efficient instruction selection if the bitmask is one consecutive sequence of
set bits (32 - clz(bm) - ctz(bm) == popcount(bm)).
1) If the bitmask touches the LSB, then we can remove all the upper bits and
set the flags by doing one LSLS.
2) If the bitmask touches the MSB, then we can remove all the lower bits and
set the flags with one LSRS.
3) If the bitmask has popcount == 1 (only one set bit), we can shift that bit
into the sign bit with one LSLS and change the condition query from NE/EQ to
MI/PL (we could also implement this by shifting into the carry bit and
branching on BCC/BCS).
4) Otherwise, we can emit a sequence of LSLS+LSRS to remove the upper and lower
zero bits of the mask.
1-3 require only one 16-bit instruction and can elide the CMP. 4 requires two
16-bit instructions but can elide the CMP and doesn't require materializing a
complex immediate, so is also a win.
Differential Revision: https://reviews.llvm.org/D27761
llvm-svn: 289794
2016-12-15 17:38:59 +08:00
|
|
|
store volatile i32 %z, i32* %y, align 4
|
2015-12-29 05:40:45 +08:00
|
|
|
br label %cleanup
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
%retval.0 = phi i1 [ 0, %if.end4 ], [ 1, %entry ], [ 1, %if.end ]
|
|
|
|
ret i1 %retval.0
|
|
|
|
}
|
|
|
|
|
2015-07-23 00:34:37 +08:00
|
|
|
attributes #0 = { noreturn nounwind }
|