forked from OSchip/llvm-project
[X86] Use fresh MemOps when emitting VAARG64
Previously it copied over MachineMemOperands verbatim which caused MOV32rm to have store flags set, and MOV32mr to have load flags set. This fixes some assertions being thrown with EXPENSIVE_CHECKS on. Committed on behalf of @luke (Luke Lau) Differential Revision: https://reviews.llvm.org/D62726 llvm-svn: 363268
This commit is contained in:
parent
1278a19282
commit
757a2f13fd
|
@ -28728,10 +28728,18 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
||||||
unsigned ArgMode = MI.getOperand(7).getImm();
|
unsigned ArgMode = MI.getOperand(7).getImm();
|
||||||
unsigned Align = MI.getOperand(8).getImm();
|
unsigned Align = MI.getOperand(8).getImm();
|
||||||
|
|
||||||
|
MachineFunction *MF = MBB->getParent();
|
||||||
|
|
||||||
// Memory Reference
|
// Memory Reference
|
||||||
assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
|
assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
|
||||||
SmallVector<MachineMemOperand *, 1> MMOs(MI.memoperands_begin(),
|
|
||||||
MI.memoperands_end());
|
MachineMemOperand *OldMMO = MI.memoperands().front();
|
||||||
|
|
||||||
|
// Clone the MMO into two separate MMOs for loading and storing
|
||||||
|
MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
|
||||||
|
OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
|
||||||
|
MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
|
||||||
|
OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
|
||||||
|
|
||||||
// Machine Information
|
// Machine Information
|
||||||
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
|
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
|
||||||
|
@ -28796,7 +28804,6 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
||||||
OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
|
OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
|
||||||
|
|
||||||
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
|
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
|
||||||
MachineFunction *MF = MBB->getParent();
|
|
||||||
overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
|
overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
|
||||||
offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
|
offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
|
||||||
endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
|
endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
|
||||||
|
@ -28829,7 +28836,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
||||||
.add(Index)
|
.add(Index)
|
||||||
.addDisp(Disp, UseFPOffset ? 4 : 0)
|
.addDisp(Disp, UseFPOffset ? 4 : 0)
|
||||||
.add(Segment)
|
.add(Segment)
|
||||||
.setMemRefs(MMOs);
|
.setMemRefs(LoadOnlyMMO);
|
||||||
|
|
||||||
// Check if there is enough room left to pull this argument.
|
// Check if there is enough room left to pull this argument.
|
||||||
BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
|
BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
|
||||||
|
@ -28854,7 +28861,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
||||||
.add(Index)
|
.add(Index)
|
||||||
.addDisp(Disp, 16)
|
.addDisp(Disp, 16)
|
||||||
.add(Segment)
|
.add(Segment)
|
||||||
.setMemRefs(MMOs);
|
.setMemRefs(LoadOnlyMMO);
|
||||||
|
|
||||||
// Zero-extend the offset
|
// Zero-extend the offset
|
||||||
unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
|
unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
|
||||||
|
@ -28882,7 +28889,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
||||||
.addDisp(Disp, UseFPOffset ? 4 : 0)
|
.addDisp(Disp, UseFPOffset ? 4 : 0)
|
||||||
.add(Segment)
|
.add(Segment)
|
||||||
.addReg(NextOffsetReg)
|
.addReg(NextOffsetReg)
|
||||||
.setMemRefs(MMOs);
|
.setMemRefs(StoreOnlyMMO);
|
||||||
|
|
||||||
// Jump to endMBB
|
// Jump to endMBB
|
||||||
BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
|
BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
|
||||||
|
@ -28901,7 +28908,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
||||||
.add(Index)
|
.add(Index)
|
||||||
.addDisp(Disp, 8)
|
.addDisp(Disp, 8)
|
||||||
.add(Segment)
|
.add(Segment)
|
||||||
.setMemRefs(MMOs);
|
.setMemRefs(LoadOnlyMMO);
|
||||||
|
|
||||||
// If we need to align it, do so. Otherwise, just copy the address
|
// If we need to align it, do so. Otherwise, just copy the address
|
||||||
// to OverflowDestReg.
|
// to OverflowDestReg.
|
||||||
|
@ -28938,7 +28945,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
||||||
.addDisp(Disp, 8)
|
.addDisp(Disp, 8)
|
||||||
.add(Segment)
|
.add(Segment)
|
||||||
.addReg(NextAddrReg)
|
.addReg(NextAddrReg)
|
||||||
.setMemRefs(MMOs);
|
.setMemRefs(StoreOnlyMMO);
|
||||||
|
|
||||||
// If we branched, emit the PHI to the front of endMBB.
|
// If we branched, emit the PHI to the front of endMBB.
|
||||||
if (offsetMBB) {
|
if (offsetMBB) {
|
||||||
|
|
Loading…
Reference in New Issue