X86 byval copies no longer always_inline. <rdar://problem/8706628>

llvm-svn: 127359
This commit is contained in:
Stuart Hastings 2011-03-09 21:10:30 +00:00
parent 6492ef1237
commit 9955e2f912
8 changed files with 22 additions and 14 deletions

View File

@ -1564,7 +1564,7 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
/*isVolatile*/false, /*AlwaysInline=*/true,
/*isVolatile*/false, /*AlwaysInline=*/false,
MachinePointerInfo(), MachinePointerInfo());
}

View File

@ -0,0 +1,10 @@
; RUN: llc < %s -march=x86-64 | FileCheck %s
; RUN: llc < %s -march=x86 | FileCheck %s
; CHECK: _memcpy
define void @foo([40000 x i32] *%P) nounwind {
call void @bar([40000 x i32] * byval align 1 %P)
ret void
}
declare void @bar([40000 x i32] *%P )

View File

@ -1,5 +1,5 @@
; RUN: llc < %s -march=x86-64 | grep rep.movsq | count 2
; RUN: llc < %s -march=x86 | grep rep.movsl | count 2
; RUN: llc < %s -march=x86-64 | egrep {rep.movsq|memcpy} | count 2
; RUN: llc < %s -march=x86 | egrep {rep.movsl|memcpy} | count 2
%struct.s = type { i64, i64, i64, i64, i64, i64, i64, i64,
i64, i64, i64, i64, i64, i64, i64, i64,

View File

@ -1,5 +1,5 @@
; RUN: llc < %s -march=x86-64 | grep rep.movsq | count 2
; RUN: llc < %s -march=x86 | grep rep.movsl | count 2
; RUN: llc < %s -march=x86-64 | egrep {rep.movsq|memcpy} | count 2
; RUN: llc < %s -march=x86 | egrep {rep.movsl|memcpy} | count 2
%struct.s = type { i32, i32, i32, i32, i32, i32, i32, i32,
i32, i32, i32, i32, i32, i32, i32, i32,

View File

@ -1,5 +1,5 @@
; RUN: llc < %s -march=x86-64 | grep rep.movsq | count 2
; RUN: llc < %s -march=x86 | grep rep.movsl | count 2
; RUN: llc < %s -march=x86-64 | egrep {rep.movsq|memcpy} | count 2
; RUN: llc < %s -march=x86 | egrep {rep.movsl|memcpy} | count 2
%struct.s = type { i16, i16, i16, i16, i16, i16, i16, i16,
i16, i16, i16, i16, i16, i16, i16, i16,

View File

@ -1,5 +1,5 @@
; RUN: llc < %s -march=x86-64 | grep rep.movsq | count 2
; RUN: llc < %s -march=x86 | grep rep.movsl | count 2
; RUN: llc < %s -march=x86-64 | egrep {rep.movsq|memcpy} | count 2
; RUN: llc < %s -march=x86 | egrep {rep.movsl|memcpy} | count 2
%struct.s = type { i8, i8, i8, i8, i8, i8, i8, i8,
i8, i8, i8, i8, i8, i8, i8, i8,

View File

@ -8,10 +8,8 @@ define i32 @main() nounwind {
entry:
; CHECK: main:
; CHECK: movl $1, (%esp)
; CHECK: leal 16(%esp), %edi
; CHECK: movl $36, %ecx
; CHECK: leal 160(%esp), %esi
; CHECK: rep;movsl
; CHECK: movl ${{36|144}},
; CHECK: {{rep;movsl|memcpy}}
%s = alloca %struct.S ; <%struct.S*> [#uses=2]
%tmp15 = getelementptr %struct.S* %s, i32 0, i32 0 ; <<2 x i64>*> [#uses=1]
store <2 x i64> < i64 8589934595, i64 1 >, <2 x i64>* %tmp15, align 16

View File

@ -1,6 +1,6 @@
; RUN: llc < %s -march=x86-64 -tailcallopt | grep TAILCALL
; Expect 2 rep;movs because of tail call byval lowering.
; RUN: llc < %s -march=x86-64 -tailcallopt | grep rep | wc -l | grep 2
; RUN: llc < %s -march=x86-64 -tailcallopt | egrep {rep|memcpy} | wc -l | grep 2
; A sequence of copyto/copyfrom virtual registers is used to deal with byval
; lowering appearing after moving arguments to registers. The following two
; checks verify that the register allocator changes those sequences to direct