Revert "[X86] Allow more call sequences to use push instructions for argument passing"

It miscompiles some code and a reduced test case has been sent to the
author.

This reverts commit r240257.

llvm-svn: 242373
This commit is contained in:
Reid Kleckner 2015-07-16 01:30:00 +00:00
parent ef9828fb47
commit 938bd6fc96
2 changed files with 66 additions and 144 deletions

View File

@ -78,7 +78,7 @@ private:
typedef DenseMap<MachineInstr *, CallContext> ContextMap;
bool isLegal(MachineFunction &MF);
bool isProfitable(MachineFunction &MF, ContextMap &CallSeqMap);
void collectCallInfo(MachineFunction &MF, MachineBasicBlock &MBB,
@ -90,13 +90,6 @@ private:
MachineInstr *canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup,
unsigned Reg);
enum InstClassification { Convert, Skip, Exit };
InstClassification classifyInstruction(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const X86RegisterInfo &RegInfo,
DenseSet<unsigned int> &UsedRegs);
const char *getPassName() const override { return "X86 Optimize Call Frame"; }
const TargetInstrInfo *TII;
@ -112,7 +105,7 @@ FunctionPass *llvm::createX86CallFrameOptimization() {
return new X86CallFrameOptimization();
}
// This checks whether the transformation is legal.
// This checks whether the transformation is legal.
// Also returns false in cases where it's potentially legal, but
// we don't even want to try.
bool X86CallFrameOptimization::isLegal(MachineFunction &MF) {
@ -177,8 +170,9 @@ bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
if (!OptForSize)
return false;
unsigned StackAlign = TFL->getStackAlignment();
unsigned StackAlign = TFL->getStackAlignment();
int64_t Advantage = 0;
for (auto CC : CallSeqMap) {
// Call sites where no parameters are passed on the stack
@ -211,6 +205,7 @@ bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
return (Advantage >= 0);
}
bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {
TII = MF.getSubtarget().getInstrInfo();
TFL = MF.getSubtarget().getFrameLowering();
@ -242,64 +237,6 @@ bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {
return Changed;
}
X86CallFrameOptimization::InstClassification
X86CallFrameOptimization::classifyInstruction(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
const X86RegisterInfo &RegInfo, DenseSet<unsigned int> &UsedRegs) {
if (MI == MBB.end())
return Exit;
// The instructions we actually care about are movs onto the stack
int Opcode = MI->getOpcode();
if (Opcode == X86::MOV32mi || Opcode == X86::MOV32mr)
return Convert;
// Not all calling conventions have only stack MOVs between the stack
// adjust and the call.
// We want to tolerate other instructions, to cover more cases.
// In particular:
// a) PCrel calls, where we expect an additional COPY of the basereg.
// b) Passing frame-index addresses.
// c) Calling conventions that have inreg parameters. These generate
// both copies and movs into registers.
// To avoid creating lots of special cases, allow any instruction
// that does not write into memory, does not def or use the stack
// pointer, and does not def any register that was used by a preceding
// push.
// (Reading from memory is allowed, even if referenced through a
// frame index, since these will get adjusted properly in PEI)
// The reason for the last condition is that the pushes can't replace
// the movs in place, because the order must be reversed.
// So if we have a MOV32mr that uses EDX, then an instruction that defs
// EDX, and then the call, after the transformation the push will use
// the modified version of EDX, and not the original one.
// Since we are still in SSA form at this point, we only need to
// make sure we don't clobber any *physical* registers that were
// used by an earlier mov that will become a push.
if (MI->isCall() || MI->mayStore())
return Exit;
for (const MachineOperand &MO : MI->operands()) {
if (!MO.isReg())
continue;
unsigned int Reg = MO.getReg();
if (!RegInfo.isPhysicalRegister(Reg))
continue;
if (RegInfo.regsOverlap(Reg, RegInfo.getStackRegister()))
return Exit;
if (MO.isDef()) {
for (unsigned int U : UsedRegs)
if (RegInfo.regsOverlap(Reg, U))
return Exit;
}
}
return Skip;
}
void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
@ -317,8 +254,8 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
// How much do we adjust the stack? This puts an upper bound on
// the number of parameters actually passed on it.
unsigned int MaxAdjust = FrameSetup->getOperand(0).getImm() / 4;
unsigned int MaxAdjust = FrameSetup->getOperand(0).getImm() / 4;
// A zero adjustment means no stack parameters
if (!MaxAdjust) {
Context.NoStackParams = true;
@ -347,17 +284,11 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
if (MaxAdjust > 4)
Context.MovVector.resize(MaxAdjust, nullptr);
InstClassification Classification;
DenseSet<unsigned int> UsedRegs;
do {
int Opcode = I->getOpcode();
if (Opcode != X86::MOV32mi && Opcode != X86::MOV32mr)
break;
while ((Classification = classifyInstruction(MBB, I, RegInfo, UsedRegs)) !=
Exit) {
if (Classification == Skip) {
++I;
continue;
}
// We know the instruction is a MOV32mi/MOV32mr.
// We only want movs of the form:
// movl imm/r32, k(%esp)
// If we run into something else, bail.
@ -392,20 +323,24 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
return;
Context.MovVector[StackDisp] = I;
for (const MachineOperand &MO : I->uses()) {
if (!MO.isReg())
continue;
unsigned int Reg = MO.getReg();
if (RegInfo.isPhysicalRegister(Reg))
UsedRegs.insert(Reg);
}
++I;
} while (I != MBB.end());
// We now expect the end of the sequence - a call and a stack adjust.
if (I == MBB.end())
return;
// For PCrel calls, we expect an additional COPY of the basereg.
// If we find one, skip it.
if (I->isCopy()) {
if (I->getOperand(1).getReg() ==
MF.getInfo<X86MachineFunctionInfo>()->getGlobalBaseReg())
++I;
else
return;
}
// We now expect the end of the sequence. If we stopped early,
// or reached the end of the block without finding a call, bail.
if (I == MBB.end() || !I->isCall())
if (!I->isCall())
return;
Context.Call = I;

View File

@ -2,15 +2,11 @@
; RUN: llc < %s -mtriple=x86_64-windows | FileCheck %s -check-prefix=X64
; RUN: llc < %s -mtriple=i686-windows -force-align-stack -stack-alignment=32 | FileCheck %s -check-prefix=ALIGNED
%class.Class = type { i32 }
%struct.s = type { i64 }
declare void @good(i32 %a, i32 %b, i32 %c, i32 %d)
declare void @inreg(i32 %a, i32 inreg %b, i32 %c, i32 %d)
declare x86_thiscallcc void @thiscall(%class.Class* %class, i32 %a, i32 %b, i32 %c, i32 %d)
declare void @oneparam(i32 %a)
declare void @eightparams(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h)
declare void @struct(%struct.s* byval %a, i32 %b, i32 %c, i32 %d)
; Here, we should have a reserved frame, so we don't expect pushes
; NORMAL-LABEL: test1:
@ -112,12 +108,13 @@ entry:
ret void
}
; We support weird calling conventions
; We don't support weird calling conventions
; NORMAL-LABEL: test4:
; NORMAL: movl $2, %eax
; NORMAL-NEXT: pushl $4
; NORMAL-NEXT: pushl $3
; NORMAL-NEXT: pushl $1
; NORMAL: subl $12, %esp
; NORMAL-NEXT: movl $4, 8(%esp)
; NORMAL-NEXT: movl $3, 4(%esp)
; NORMAL-NEXT: movl $1, (%esp)
; NORMAL-NEXT: movl $2, %eax
; NORMAL-NEXT: call
; NORMAL-NEXT: addl $12, %esp
define void @test4() optsize {
@ -126,20 +123,6 @@ entry:
ret void
}
; NORMAL-LABEL: test4b:
; NORMAL: movl 4(%esp), %ecx
; NORMAL-NEXT: pushl $4
; NORMAL-NEXT: pushl $3
; NORMAL-NEXT: pushl $2
; NORMAL-NEXT: pushl $1
; NORMAL-NEXT: call
; NORMAL-NEXT: ret
define void @test4b(%class.Class* %f) optsize {
entry:
call x86_thiscallcc void @thiscall(%class.Class* %f, i32 1, i32 2, i32 3, i32 4)
ret void
}
; When there is no reserved call frame, check that additional alignment
; is added when the pushes don't add up to the required alignment.
; ALIGNED-LABEL: test5:
@ -246,27 +229,20 @@ entry:
; NORMAL-NEXT: pushl $1
; NORMAL-NEXT: call
; NORMAL-NEXT: addl $16, %esp
; NORMAL-NEXT: subl $20, %esp
; NORMAL-NEXT: movl 20(%esp), [[E1:%e..]]
; NORMAL-NEXT: movl 24(%esp), [[E2:%e..]]
; NORMAL-NEXT: movl [[E2]], 4(%esp)
; NORMAL-NEXT: movl [[E1]], (%esp)
; NORMAL-NEXT: leal 32(%esp), [[E3:%e..]]
; NORMAL-NEXT: movl [[E3]], 16(%esp)
; NORMAL-NEXT: leal 28(%esp), [[E4:%e..]]
; NORMAL-NEXT: movl [[E4]], 12(%esp)
; NORMAL-NEXT: movl $6, 8(%esp)
; NORMAL-NEXT: subl $16, %esp
; NORMAL-NEXT: leal 16(%esp), [[EAX:%e..]]
; NORMAL-NEXT: movl [[EAX]], 12(%esp)
; NORMAL-NEXT: movl $7, 8(%esp)
; NORMAL-NEXT: movl $6, 4(%esp)
; NORMAL-NEXT: movl $5, (%esp)
; NORMAL-NEXT: call
; NORMAL-NEXT: addl $20, %esp
; NORMAL-NEXT: addl $16, %esp
define void @test9() optsize {
entry:
%p = alloca i32, align 4
%q = alloca i32, align 4
%s = alloca %struct.s, align 4
call void @good(i32 1, i32 2, i32 3, i32 4)
%pv = ptrtoint i32* %p to i32
%qv = ptrtoint i32* %q to i32
call void @struct(%struct.s* byval %s, i32 6, i32 %qv, i32 %pv)
%0 = ptrtoint i32* %p to i32
call void @good(i32 5, i32 6, i32 7, i32 %0)
ret void
}
@ -315,17 +291,28 @@ define void @test11() optsize {
; Converting one mov into a push isn't worth it when
; doing so forces too much overhead for other calls.
; NORMAL-LABEL: test12:
; NORMAL: movl $8, 12(%esp)
; NORMAL: subl $16, %esp
; NORMAL-NEXT: movl $4, 8(%esp)
; NORMAL-NEXT: movl $3, 4(%esp)
; NORMAL-NEXT: movl $1, (%esp)
; NORMAL-NEXT: movl $2, %eax
; NORMAL-NEXT: calll _inreg
; NORMAL-NEXT: movl $8, 12(%esp)
; NORMAL-NEXT: movl $7, 8(%esp)
; NORMAL-NEXT: movl $6, 4(%esp)
; NORMAL-NEXT: movl $5, (%esp)
; NORMAL-NEXT: calll _good
; NORMAL-NEXT: movl $12, 8(%esp)
; NORMAL-NEXT: movl $11, 4(%esp)
; NORMAL-NEXT: movl $9, (%esp)
; NORMAL-NEXT: movl $10, %eax
; NORMAL-NEXT: calll _inreg
; NORMAL-NEXT: addl $16, %esp
define void @test12() optsize {
entry:
%s = alloca %struct.s, align 4
call void @struct(%struct.s* %s, i32 2, i32 3, i32 4)
call void @inreg(i32 1, i32 2, i32 3, i32 4)
call void @good(i32 5, i32 6, i32 7, i32 8)
call void @struct(%struct.s* %s, i32 10, i32 11, i32 12)
call void @inreg(i32 9, i32 10, i32 11, i32 12)
ret void
}
@ -337,12 +324,13 @@ entry:
; NORMAL-NEXT: pushl $1
; NORMAL-NEXT: calll _good
; NORMAL-NEXT: addl $16, %esp
; NORMAL-NEXT: subl $20, %esp
; NORMAL: movl $8, 16(%esp)
; NORMAL-NEXT: movl $7, 12(%esp)
; NORMAL-NEXT: movl $6, 8(%esp)
; NORMAL-NEXT: calll _struct
; NORMAL-NEXT: addl $20, %esp
; NORMAL-NEXT: subl $12, %esp
; NORMAL-NEXT: movl $8, 8(%esp)
; NORMAL-NEXT: movl $7, 4(%esp)
; NORMAL-NEXT: movl $5, (%esp)
; NORMAL-NEXT: movl $6, %eax
; NORMAL-NEXT: calll _inreg
; NORMAL-NEXT: addl $12, %esp
; NORMAL-NEXT: pushl $12
; NORMAL-NEXT: pushl $11
; NORMAL-NEXT: pushl $10
@ -351,9 +339,8 @@ entry:
; NORMAL-NEXT: addl $16, %esp
define void @test12b() optsize {
entry:
%s = alloca %struct.s, align 4
call void @good(i32 1, i32 2, i32 3, i32 4)
call void @struct(%struct.s* %s, i32 6, i32 7, i32 8)
call void @good(i32 1, i32 2, i32 3, i32 4)
call void @inreg(i32 5, i32 6, i32 7, i32 8)
call void @good(i32 9, i32 10, i32 11, i32 12)
ret void
}