diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index b5aabf7ffa65..c780c14abbc3 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -15340,7 +15340,7 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr; // In the XMM save block, save all the XMM argument registers. - for (int i = 3, e = MI->getNumOperands(); i != e; ++i) { + for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) { int64_t Offset = (i - 3) * 16 + VarArgsFPOffset; MachineMemOperand *MMO = F->getMachineMemOperand( diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td index 7d10b67bfe6d..5c8840823b16 100644 --- a/llvm/lib/Target/X86/X86InstrCompiler.td +++ b/llvm/lib/Target/X86/X86InstrCompiler.td @@ -72,7 +72,7 @@ def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), // x86-64 va_start lowering magic. -let usesCustomInserter = 1 in { +let usesCustomInserter = 1, Defs = [EFLAGS] in { def VASTART_SAVE_XMM_REGS : I<0, Pseudo, (outs), (ins GR8:$al, @@ -81,7 +81,8 @@ def VASTART_SAVE_XMM_REGS : I<0, Pseudo, "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset", [(X86vastart_save_xmm_regs GR8:$al, imm:$regsavefi, - imm:$offset)]>; + imm:$offset), + (implicit EFLAGS)]>; // The VAARG_64 pseudo-instruction takes the address of the va_list, // and places the address of the next argument into a register. diff --git a/llvm/test/CodeGen/X86/vaargs.ll b/llvm/test/CodeGen/X86/vaargs.ll new file mode 100644 index 000000000000..fa31ec3b4171 --- /dev/null +++ b/llvm/test/CodeGen/X86/vaargs.ll @@ -0,0 +1,67 @@ +; RUN: llc %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=NO-FLAGS +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.9.0" + +%struct.__va_list_tag = type { i32, i32, i8*, i8* } + +; Check that vastart gets the right thing. +define i32 @sum(i32 %count, ...) nounwind optsize ssp uwtable { +; CHECK: testb %al, %al +; CHECK-NEXT: je +; CHECK-NEXT: ## BB#{{[0-9]+}}: +; CHECK-NEXT: vmovaps %xmm0, 48(%rsp) +; CHECK-NEXT: vmovaps %xmm1, 64(%rsp) +; CHECK-NEXT: vmovaps %xmm2, 80(%rsp) +; CHECK-NEXT: vmovaps %xmm3, 96(%rsp) +; CHECK-NEXT: vmovaps %xmm4, 112(%rsp) +; CHECK-NEXT: vmovaps %xmm5, 128(%rsp) +; CHECK-NEXT: vmovaps %xmm6, 144(%rsp) +; CHECK-NEXT: vmovaps %xmm7, 160(%rsp) + +; Check that [EFLAGS] hasn't been pulled in. +; NO-FLAGS-NOT: %flags + + %ap = alloca [1 x %struct.__va_list_tag], align 16 + %1 = bitcast [1 x %struct.__va_list_tag]* %ap to i8* + call void @llvm.va_start(i8* %1) + %2 = icmp sgt i32 %count, 0 + br i1 %2, label %.lr.ph, label %._crit_edge + +.lr.ph: ; preds = %0 + %3 = getelementptr inbounds [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 0 + %4 = getelementptr inbounds [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 2 + %.pre = load i32* %3, align 16 + br label %5 + +;