forked from OSchip/llvm-project
[X86] Use fresh MemOps when emitting VAARG64
Previously it copied over MachineMemOperands verbatim which caused MOV32rm to have store flags set, and MOV32mr to have load flags set. This fixes some assertions being thrown with EXPENSIVE_CHECKS on. Committed on behalf of @luke (Luke Lau) Differential Revision: https://reviews.llvm.org/D62726 llvm-svn: 363268
This commit is contained in:
parent
1278a19282
commit
757a2f13fd
|
@ -28728,10 +28728,18 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
|||
unsigned ArgMode = MI.getOperand(7).getImm();
|
||||
unsigned Align = MI.getOperand(8).getImm();
|
||||
|
||||
MachineFunction *MF = MBB->getParent();
|
||||
|
||||
// Memory Reference
|
||||
assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
|
||||
SmallVector<MachineMemOperand *, 1> MMOs(MI.memoperands_begin(),
|
||||
MI.memoperands_end());
|
||||
|
||||
MachineMemOperand *OldMMO = MI.memoperands().front();
|
||||
|
||||
// Clone the MMO into two separate MMOs for loading and storing
|
||||
MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
|
||||
OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
|
||||
MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
|
||||
OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
|
||||
|
||||
// Machine Information
|
||||
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
|
||||
|
@ -28796,7 +28804,6 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
|||
OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
|
||||
|
||||
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
|
||||
MachineFunction *MF = MBB->getParent();
|
||||
overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
|
||||
offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
|
||||
endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
|
||||
|
@ -28829,7 +28836,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
|||
.add(Index)
|
||||
.addDisp(Disp, UseFPOffset ? 4 : 0)
|
||||
.add(Segment)
|
||||
.setMemRefs(MMOs);
|
||||
.setMemRefs(LoadOnlyMMO);
|
||||
|
||||
// Check if there is enough room left to pull this argument.
|
||||
BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
|
||||
|
@ -28854,7 +28861,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
|||
.add(Index)
|
||||
.addDisp(Disp, 16)
|
||||
.add(Segment)
|
||||
.setMemRefs(MMOs);
|
||||
.setMemRefs(LoadOnlyMMO);
|
||||
|
||||
// Zero-extend the offset
|
||||
unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
|
||||
|
@ -28882,7 +28889,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
|||
.addDisp(Disp, UseFPOffset ? 4 : 0)
|
||||
.add(Segment)
|
||||
.addReg(NextOffsetReg)
|
||||
.setMemRefs(MMOs);
|
||||
.setMemRefs(StoreOnlyMMO);
|
||||
|
||||
// Jump to endMBB
|
||||
BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
|
||||
|
@ -28901,7 +28908,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
|||
.add(Index)
|
||||
.addDisp(Disp, 8)
|
||||
.add(Segment)
|
||||
.setMemRefs(MMOs);
|
||||
.setMemRefs(LoadOnlyMMO);
|
||||
|
||||
// If we need to align it, do so. Otherwise, just copy the address
|
||||
// to OverflowDestReg.
|
||||
|
@ -28938,7 +28945,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
|||
.addDisp(Disp, 8)
|
||||
.add(Segment)
|
||||
.addReg(NextAddrReg)
|
||||
.setMemRefs(MMOs);
|
||||
.setMemRefs(StoreOnlyMMO);
|
||||
|
||||
// If we branched, emit the PHI to the front of endMBB.
|
||||
if (offsetMBB) {
|
||||
|
|
Loading…
Reference in New Issue