2003-01-13 08:26:36 +08:00
|
|
|
//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
|
2005-04-22 06:55:34 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2005-04-22 06:55:34 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2002-10-29 07:55:33 +08:00
|
|
|
//
|
2005-01-19 14:53:34 +08:00
|
|
|
// This file implements the TargetInstrInfo class.
|
2002-10-29 07:55:33 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-11-08 09:01:31 +08:00
|
|
|
#include "llvm/CodeGen/TargetInstrInfo.h"
|
2020-05-02 21:34:53 +08:00
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2012-11-28 10:35:13 +08:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2013-11-29 11:07:54 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2012-11-28 10:35:13 +08:00
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2020-01-15 01:00:32 +08:00
|
|
|
#include "llvm/CodeGen/MachineScheduler.h"
|
2012-11-28 10:35:13 +08:00
|
|
|
#include "llvm/CodeGen/PseudoSourceValue.h"
|
|
|
|
#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
|
2013-11-29 11:07:54 +08:00
|
|
|
#include "llvm/CodeGen/StackMaps.h"
|
2017-11-08 09:01:31 +08:00
|
|
|
#include "llvm/CodeGen/TargetFrameLowering.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetLowering.h"
|
|
|
|
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
2015-06-13 11:42:11 +08:00
|
|
|
#include "llvm/CodeGen/TargetSchedule.h"
|
2013-11-17 09:36:23 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2019-08-01 00:51:28 +08:00
|
|
|
#include "llvm/IR/DebugInfoMetadata.h"
|
2010-10-06 14:27:31 +08:00
|
|
|
#include "llvm/MC/MCAsmInfo.h"
|
2011-06-29 09:14:12 +08:00
|
|
|
#include "llvm/MC/MCInstrItineraries.h"
|
2012-11-28 10:35:13 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2009-08-02 12:58:19 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2012-11-28 10:35:13 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2010-12-20 04:43:38 +08:00
|
|
|
#include <cctype>
|
2016-02-03 02:20:45 +08:00
|
|
|
|
2005-01-19 14:53:34 +08:00
|
|
|
using namespace llvm;
|
2002-10-29 07:55:33 +08:00
|
|
|
|
2012-11-28 10:35:13 +08:00
|
|
|
static cl::opt<bool> DisableHazardRecognizer(
|
|
|
|
"disable-sched-hazard", cl::Hidden, cl::init(false),
|
|
|
|
cl::desc("Disable hazard detection during preRA scheduling"));
|
2009-08-02 13:20:37 +08:00
|
|
|
|
2006-12-09 02:45:48 +08:00
|
|
|
TargetInstrInfo::~TargetInstrInfo() {
|
|
|
|
}
|
|
|
|
|
2011-06-28 05:26:13 +08:00
|
|
|
const TargetRegisterClass*
|
2011-06-29 03:10:37 +08:00
|
|
|
TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
|
2012-05-08 06:10:26 +08:00
|
|
|
const TargetRegisterInfo *TRI,
|
|
|
|
const MachineFunction &MF) const {
|
2011-06-29 03:10:37 +08:00
|
|
|
if (OpNum >= MCID.getNumOperands())
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2011-06-28 05:26:13 +08:00
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
short RegClass = MCID.OpInfo[OpNum].RegClass;
|
|
|
|
if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
|
2012-05-08 06:10:26 +08:00
|
|
|
return TRI->getPointerRegClass(MF, RegClass);
|
2011-06-28 05:26:13 +08:00
|
|
|
|
|
|
|
// Instructions like INSERT_SUBREG do not have fixed register classes.
|
|
|
|
if (RegClass < 0)
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2011-06-28 05:26:13 +08:00
|
|
|
|
|
|
|
// Otherwise just look it up normally.
|
|
|
|
return TRI->getRegClass(RegClass);
|
|
|
|
}
|
|
|
|
|
2009-08-02 12:58:19 +08:00
|
|
|
/// insertNoop - Insert a noop into the instruction stream at the specified
|
|
|
|
/// point.
|
2010-12-24 12:28:06 +08:00
|
|
|
void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
|
2009-08-02 12:58:19 +08:00
|
|
|
MachineBasicBlock::iterator MI) const {
|
|
|
|
llvm_unreachable("Target didn't implement insertNoop!");
|
|
|
|
}
|
|
|
|
|
2017-09-28 17:31:46 +08:00
|
|
|
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
|
|
|
|
return strncmp(Str, MAI.getCommentString().data(),
|
|
|
|
MAI.getCommentString().size()) == 0;
|
|
|
|
}
|
|
|
|
|
2009-08-02 13:20:37 +08:00
|
|
|
/// Measure the specified inline asm to determine an approximation of its
|
|
|
|
/// length.
|
2011-03-25 02:46:34 +08:00
|
|
|
/// Comments (which run till the next SeparatorString or newline) do not
|
2009-08-02 13:20:37 +08:00
|
|
|
/// count as an instruction.
|
|
|
|
/// Any other non-whitespace text is considered an instruction, with
|
2011-03-25 02:46:34 +08:00
|
|
|
/// multiple instructions separated by SeparatorString or newlines.
|
2009-08-02 13:20:37 +08:00
|
|
|
/// Variable-length instructions are not handled here; this function
|
|
|
|
/// may be overloaded in the target code to do that.
|
2017-09-28 17:31:46 +08:00
|
|
|
/// We implement a special case of the .space directive which takes only a
|
|
|
|
/// single integer argument in base 10 that is the size in bytes. This is a
|
|
|
|
/// restricted form of the GAS directive in that we only interpret
|
|
|
|
/// simple--i.e. not a logical or arithmetic expression--size values without
|
|
|
|
/// the optional fill value. This is primarily used for creating arbitrary
|
|
|
|
/// sized inline asm blocks for testing purposes.
|
2019-05-23 00:28:41 +08:00
|
|
|
unsigned TargetInstrInfo::getInlineAsmLength(
|
|
|
|
const char *Str,
|
|
|
|
const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
|
2009-08-02 13:20:37 +08:00
|
|
|
// Count the number of instructions in the asm.
|
2017-09-28 17:31:46 +08:00
|
|
|
bool AtInsnStart = true;
|
|
|
|
unsigned Length = 0;
|
2019-05-23 00:28:41 +08:00
|
|
|
const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
|
2009-08-02 13:20:37 +08:00
|
|
|
for (; *Str; ++Str) {
|
2011-03-25 02:46:34 +08:00
|
|
|
if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
|
2016-07-02 07:26:50 +08:00
|
|
|
strlen(MAI.getSeparatorString())) == 0) {
|
2017-09-28 17:31:46 +08:00
|
|
|
AtInsnStart = true;
|
|
|
|
} else if (isAsmComment(Str, MAI)) {
|
2016-07-02 07:26:50 +08:00
|
|
|
// Stop counting as an instruction after a comment until the next
|
|
|
|
// separator.
|
2017-09-28 17:31:46 +08:00
|
|
|
AtInsnStart = false;
|
2009-08-02 13:20:37 +08:00
|
|
|
}
|
2016-07-02 07:26:50 +08:00
|
|
|
|
2020-05-02 21:34:53 +08:00
|
|
|
if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
|
2019-05-23 00:28:41 +08:00
|
|
|
unsigned AddLength = MaxInstLength;
|
2017-09-28 17:31:46 +08:00
|
|
|
if (strncmp(Str, ".space", 6) == 0) {
|
|
|
|
char *EStr;
|
|
|
|
int SpaceSize;
|
|
|
|
SpaceSize = strtol(Str + 6, &EStr, 10);
|
|
|
|
SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
|
2020-05-02 21:34:53 +08:00
|
|
|
while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
|
2017-09-28 17:31:46 +08:00
|
|
|
++EStr;
|
|
|
|
if (*EStr == '\0' || *EStr == '\n' ||
|
|
|
|
isAsmComment(EStr, MAI)) // Successfully parsed .space argument
|
|
|
|
AddLength = SpaceSize;
|
|
|
|
}
|
|
|
|
Length += AddLength;
|
|
|
|
AtInsnStart = false;
|
2016-07-02 07:26:50 +08:00
|
|
|
}
|
2009-08-02 13:20:37 +08:00
|
|
|
}
|
2010-12-24 12:28:06 +08:00
|
|
|
|
2017-09-28 17:31:46 +08:00
|
|
|
return Length;
|
2009-08-02 13:20:37 +08:00
|
|
|
}
|
2012-11-28 10:35:13 +08:00
|
|
|
|
|
|
|
/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
|
|
|
|
/// after it, replacing it with an unconditional branch to NewDest.
|
|
|
|
void
|
|
|
|
TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
|
|
|
|
MachineBasicBlock *NewDest) const {
|
|
|
|
MachineBasicBlock *MBB = Tail->getParent();
|
|
|
|
|
|
|
|
// Remove all the old successors of MBB from the CFG.
|
|
|
|
while (!MBB->succ_empty())
|
|
|
|
MBB->removeSuccessor(MBB->succ_begin());
|
|
|
|
|
2016-03-26 02:38:48 +08:00
|
|
|
// Save off the debug loc before erasing the instruction.
|
|
|
|
DebugLoc DL = Tail->getDebugLoc();
|
|
|
|
|
2019-06-27 21:10:29 +08:00
|
|
|
// Update call site info and remove all the dead instructions
|
|
|
|
// from the end of MBB.
|
|
|
|
while (Tail != MBB->end()) {
|
|
|
|
auto MI = Tail++;
|
2020-02-27 18:44:53 +08:00
|
|
|
if (MI->shouldUpdateCallSiteInfo())
|
2019-10-08 23:43:12 +08:00
|
|
|
MBB->getParent()->eraseCallSiteInfo(&*MI);
|
2019-06-27 21:10:29 +08:00
|
|
|
MBB->erase(MI);
|
|
|
|
}
|
2012-11-28 10:35:13 +08:00
|
|
|
|
|
|
|
// If MBB isn't immediately before MBB, insert a branch to it.
|
|
|
|
if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
|
2016-09-15 01:24:15 +08:00
|
|
|
insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
|
2012-11-28 10:35:13 +08:00
|
|
|
MBB->addSuccessor(NewDest);
|
|
|
|
}
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
|
|
|
|
bool NewMI, unsigned Idx1,
|
2015-09-29 04:33:22 +08:00
|
|
|
unsigned Idx2) const {
|
2016-06-30 08:01:54 +08:00
|
|
|
const MCInstrDesc &MCID = MI.getDesc();
|
2012-11-28 10:35:13 +08:00
|
|
|
bool HasDef = MCID.getNumDefs();
|
2016-06-30 08:01:54 +08:00
|
|
|
if (HasDef && !MI.getOperand(0).isReg())
|
2012-11-28 10:35:13 +08:00
|
|
|
// No idea how to commute this instruction. Target should implement its own.
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2012-11-28 10:35:13 +08:00
|
|
|
|
2015-09-29 06:54:43 +08:00
|
|
|
unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
|
|
|
|
unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
|
2015-09-29 04:33:22 +08:00
|
|
|
assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
|
|
|
|
CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
|
|
|
|
"TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
|
2016-06-30 08:01:54 +08:00
|
|
|
assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
|
2012-11-28 10:35:13 +08:00
|
|
|
"This only knows how to commute register operands so far");
|
2015-09-29 04:33:22 +08:00
|
|
|
|
2019-06-24 23:50:29 +08:00
|
|
|
Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
|
|
|
|
Register Reg1 = MI.getOperand(Idx1).getReg();
|
|
|
|
Register Reg2 = MI.getOperand(Idx2).getReg();
|
2016-06-30 08:01:54 +08:00
|
|
|
unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
|
|
|
|
unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
|
|
|
|
unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
|
|
|
|
bool Reg1IsKill = MI.getOperand(Idx1).isKill();
|
|
|
|
bool Reg2IsKill = MI.getOperand(Idx2).isKill();
|
|
|
|
bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
|
|
|
|
bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
|
|
|
|
bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
|
|
|
|
bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
|
2018-01-30 02:47:48 +08:00
|
|
|
// Avoid calling isRenamable for virtual registers since we assert that
|
|
|
|
// renamable property is only queried/set for physical registers.
|
2019-08-02 07:27:28 +08:00
|
|
|
bool Reg1IsRenamable = Register::isPhysicalRegister(Reg1)
|
2018-01-30 02:47:48 +08:00
|
|
|
? MI.getOperand(Idx1).isRenamable()
|
|
|
|
: false;
|
2019-08-02 07:27:28 +08:00
|
|
|
bool Reg2IsRenamable = Register::isPhysicalRegister(Reg2)
|
2018-01-30 02:47:48 +08:00
|
|
|
? MI.getOperand(Idx2).isRenamable()
|
|
|
|
: false;
|
2012-11-28 10:35:13 +08:00
|
|
|
// If destination is tied to either of the commuted source register, then
|
|
|
|
// it must be updated.
|
|
|
|
if (HasDef && Reg0 == Reg1 &&
|
2016-06-30 08:01:54 +08:00
|
|
|
MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
|
2012-11-28 10:35:13 +08:00
|
|
|
Reg2IsKill = false;
|
|
|
|
Reg0 = Reg2;
|
|
|
|
SubReg0 = SubReg2;
|
|
|
|
} else if (HasDef && Reg0 == Reg2 &&
|
2016-06-30 08:01:54 +08:00
|
|
|
MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
|
2012-11-28 10:35:13 +08:00
|
|
|
Reg1IsKill = false;
|
|
|
|
Reg0 = Reg1;
|
|
|
|
SubReg0 = SubReg1;
|
|
|
|
}
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
MachineInstr *CommutedMI = nullptr;
|
2012-11-28 10:35:13 +08:00
|
|
|
if (NewMI) {
|
|
|
|
// Create a new instruction.
|
2017-10-11 07:50:49 +08:00
|
|
|
MachineFunction &MF = *MI.getMF();
|
2016-06-30 08:01:54 +08:00
|
|
|
CommutedMI = MF.CloneMachineInstr(&MI);
|
|
|
|
} else {
|
|
|
|
CommutedMI = &MI;
|
2012-11-28 10:35:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (HasDef) {
|
2016-06-30 08:01:54 +08:00
|
|
|
CommutedMI->getOperand(0).setReg(Reg0);
|
|
|
|
CommutedMI->getOperand(0).setSubReg(SubReg0);
|
2012-11-28 10:35:13 +08:00
|
|
|
}
|
2016-06-30 08:01:54 +08:00
|
|
|
CommutedMI->getOperand(Idx2).setReg(Reg1);
|
|
|
|
CommutedMI->getOperand(Idx1).setReg(Reg2);
|
|
|
|
CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
|
|
|
|
CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
|
|
|
|
CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
|
|
|
|
CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
|
|
|
|
CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
|
|
|
|
CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
|
|
|
|
CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
|
|
|
|
CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
|
2018-01-30 02:47:48 +08:00
|
|
|
// Avoid calling setIsRenamable for virtual registers since we assert that
|
|
|
|
// renamable property is only queried/set for physical registers.
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isPhysicalRegister(Reg1))
|
2018-01-30 02:47:48 +08:00
|
|
|
CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isPhysicalRegister(Reg2))
|
2018-01-30 02:47:48 +08:00
|
|
|
CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
|
2016-06-30 08:01:54 +08:00
|
|
|
return CommutedMI;
|
2012-11-28 10:35:13 +08:00
|
|
|
}
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI,
|
2015-09-29 04:33:22 +08:00
|
|
|
unsigned OpIdx1,
|
|
|
|
unsigned OpIdx2) const {
|
|
|
|
// If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
|
|
|
|
// any commutable operand, which is done in findCommutedOpIndices() method
|
|
|
|
// called below.
|
|
|
|
if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
|
|
|
|
!findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
|
2016-06-30 08:01:54 +08:00
|
|
|
assert(MI.isCommutable() &&
|
2015-09-29 04:33:22 +08:00
|
|
|
"Precondition violation: MI must be commutable.");
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
|
|
|
|
unsigned &ResultIdx2,
|
|
|
|
unsigned CommutableOpIdx1,
|
|
|
|
unsigned CommutableOpIdx2) {
|
|
|
|
if (ResultIdx1 == CommuteAnyOperandIndex &&
|
|
|
|
ResultIdx2 == CommuteAnyOperandIndex) {
|
|
|
|
ResultIdx1 = CommutableOpIdx1;
|
|
|
|
ResultIdx2 = CommutableOpIdx2;
|
|
|
|
} else if (ResultIdx1 == CommuteAnyOperandIndex) {
|
|
|
|
if (ResultIdx2 == CommutableOpIdx1)
|
|
|
|
ResultIdx1 = CommutableOpIdx2;
|
|
|
|
else if (ResultIdx2 == CommutableOpIdx2)
|
|
|
|
ResultIdx1 = CommutableOpIdx1;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
} else if (ResultIdx2 == CommuteAnyOperandIndex) {
|
|
|
|
if (ResultIdx1 == CommutableOpIdx1)
|
|
|
|
ResultIdx2 = CommutableOpIdx2;
|
|
|
|
else if (ResultIdx1 == CommutableOpIdx2)
|
|
|
|
ResultIdx2 = CommutableOpIdx1;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
} else
|
|
|
|
// Check that the result operand indices match the given commutable
|
|
|
|
// operand indices.
|
|
|
|
return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
|
|
|
|
(ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-25 22:55:57 +08:00
|
|
|
bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
|
2012-11-28 10:35:13 +08:00
|
|
|
unsigned &SrcOpIdx1,
|
|
|
|
unsigned &SrcOpIdx2) const {
|
2016-06-30 08:01:54 +08:00
|
|
|
assert(!MI.isBundle() &&
|
2012-11-28 10:35:13 +08:00
|
|
|
"TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
const MCInstrDesc &MCID = MI.getDesc();
|
2012-11-28 10:35:13 +08:00
|
|
|
if (!MCID.isCommutable())
|
|
|
|
return false;
|
2015-09-29 04:33:22 +08:00
|
|
|
|
2012-11-28 10:35:13 +08:00
|
|
|
// This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
|
|
|
|
// is not true, then the target must implement this.
|
2015-09-29 04:33:22 +08:00
|
|
|
unsigned CommutableOpIdx1 = MCID.getNumDefs();
|
|
|
|
unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
|
|
|
|
if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
|
|
|
|
CommutableOpIdx1, CommutableOpIdx2))
|
|
|
|
return false;
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
|
2012-11-28 10:35:13 +08:00
|
|
|
// No idea.
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-02-23 10:46:52 +08:00
|
|
|
bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
|
|
|
|
if (!MI.isTerminator()) return false;
|
2012-11-28 10:35:13 +08:00
|
|
|
|
|
|
|
// Conditional branch is a special case.
|
2016-02-23 10:46:52 +08:00
|
|
|
if (MI.isBranch() && !MI.isBarrier())
|
2012-11-28 10:35:13 +08:00
|
|
|
return true;
|
2016-02-23 10:46:52 +08:00
|
|
|
if (!MI.isPredicable())
|
2012-11-28 10:35:13 +08:00
|
|
|
return true;
|
|
|
|
return !isPredicated(MI);
|
|
|
|
}
|
|
|
|
|
2015-06-12 03:30:37 +08:00
|
|
|
bool TargetInstrInfo::PredicateInstruction(
|
2016-02-23 10:46:52 +08:00
|
|
|
MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
|
2012-11-28 10:35:13 +08:00
|
|
|
bool MadeChange = false;
|
|
|
|
|
2016-02-23 10:46:52 +08:00
|
|
|
assert(!MI.isBundle() &&
|
2012-11-28 10:35:13 +08:00
|
|
|
"TargetInstrInfo::PredicateInstruction() can't handle bundles");
|
|
|
|
|
2016-02-23 10:46:52 +08:00
|
|
|
const MCInstrDesc &MCID = MI.getDesc();
|
|
|
|
if (!MI.isPredicable())
|
2012-11-28 10:35:13 +08:00
|
|
|
return false;
|
|
|
|
|
2016-02-23 10:46:52 +08:00
|
|
|
for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
|
2012-11-28 10:35:13 +08:00
|
|
|
if (MCID.OpInfo[i].isPredicate()) {
|
2016-02-23 10:46:52 +08:00
|
|
|
MachineOperand &MO = MI.getOperand(i);
|
2012-11-28 10:35:13 +08:00
|
|
|
if (MO.isReg()) {
|
|
|
|
MO.setReg(Pred[j].getReg());
|
|
|
|
MadeChange = true;
|
|
|
|
} else if (MO.isImm()) {
|
|
|
|
MO.setImm(Pred[j].getImm());
|
|
|
|
MadeChange = true;
|
|
|
|
} else if (MO.isMBB()) {
|
|
|
|
MO.setMBB(Pred[j].getMBB());
|
|
|
|
MadeChange = true;
|
|
|
|
}
|
|
|
|
++j;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return MadeChange;
|
|
|
|
}
|
|
|
|
|
2018-09-03 17:15:58 +08:00
|
|
|
bool TargetInstrInfo::hasLoadFromStackSlot(
|
2018-09-05 16:59:50 +08:00
|
|
|
const MachineInstr &MI,
|
|
|
|
SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
|
2018-09-03 17:15:58 +08:00
|
|
|
size_t StartSize = Accesses.size();
|
2016-06-30 08:01:54 +08:00
|
|
|
for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
|
|
|
|
oe = MI.memoperands_end();
|
|
|
|
o != oe; ++o) {
|
2018-09-05 16:59:50 +08:00
|
|
|
if ((*o)->isLoad() &&
|
|
|
|
dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
|
|
|
|
Accesses.push_back(*o);
|
2012-11-28 10:35:13 +08:00
|
|
|
}
|
2018-09-03 17:15:58 +08:00
|
|
|
return Accesses.size() != StartSize;
|
2012-11-28 10:35:13 +08:00
|
|
|
}
|
|
|
|
|
2018-09-03 17:15:58 +08:00
|
|
|
bool TargetInstrInfo::hasStoreToStackSlot(
|
2018-09-05 16:59:50 +08:00
|
|
|
const MachineInstr &MI,
|
|
|
|
SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
|
2018-09-03 17:15:58 +08:00
|
|
|
size_t StartSize = Accesses.size();
|
2016-06-30 08:01:54 +08:00
|
|
|
for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
|
|
|
|
oe = MI.memoperands_end();
|
|
|
|
o != oe; ++o) {
|
2018-09-05 16:59:50 +08:00
|
|
|
if ((*o)->isStore() &&
|
|
|
|
dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
|
|
|
|
Accesses.push_back(*o);
|
2012-11-28 10:35:13 +08:00
|
|
|
}
|
2018-09-03 17:15:58 +08:00
|
|
|
return Accesses.size() != StartSize;
|
2012-11-28 10:35:13 +08:00
|
|
|
}
|
|
|
|
|
2013-11-17 09:36:23 +08:00
|
|
|
bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
|
|
|
|
unsigned SubIdx, unsigned &Size,
|
|
|
|
unsigned &Offset,
|
2015-03-20 07:06:21 +08:00
|
|
|
const MachineFunction &MF) const {
|
2017-04-25 02:55:33 +08:00
|
|
|
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
|
2013-11-17 09:36:23 +08:00
|
|
|
if (!SubIdx) {
|
2017-04-25 02:55:33 +08:00
|
|
|
Size = TRI->getSpillSize(*RC);
|
2013-11-17 09:36:23 +08:00
|
|
|
Offset = 0;
|
|
|
|
return true;
|
|
|
|
}
|
2015-03-20 07:06:21 +08:00
|
|
|
unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
|
2018-08-09 23:19:07 +08:00
|
|
|
// Convert bit size to byte size.
|
2013-11-17 09:36:23 +08:00
|
|
|
if (BitSize % 8)
|
|
|
|
return false;
|
|
|
|
|
2015-03-20 07:06:21 +08:00
|
|
|
int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
|
2013-11-17 09:36:23 +08:00
|
|
|
if (BitOffset < 0 || BitOffset % 8)
|
|
|
|
return false;
|
|
|
|
|
2019-09-23 19:36:24 +08:00
|
|
|
Size = BitSize / 8;
|
2013-11-17 09:36:23 +08:00
|
|
|
Offset = (unsigned)BitOffset / 8;
|
|
|
|
|
2017-04-25 02:55:33 +08:00
|
|
|
assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
|
2013-11-17 09:36:23 +08:00
|
|
|
|
2015-07-16 14:11:10 +08:00
|
|
|
if (!MF.getDataLayout().isLittleEndian()) {
|
2017-04-25 02:55:33 +08:00
|
|
|
Offset = TRI->getSpillSize(*RC) - (Offset + Size);
|
2013-11-17 09:36:23 +08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-11-28 10:35:13 +08:00
|
|
|
void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I,
|
2020-04-04 01:22:51 +08:00
|
|
|
Register DestReg, unsigned SubIdx,
|
2016-06-30 08:01:54 +08:00
|
|
|
const MachineInstr &Orig,
|
2012-11-28 10:35:13 +08:00
|
|
|
const TargetRegisterInfo &TRI) const {
|
2016-06-30 08:01:54 +08:00
|
|
|
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
|
2012-11-28 10:35:13 +08:00
|
|
|
MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
|
|
|
|
MBB.insert(I, MI);
|
|
|
|
}
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0,
|
|
|
|
const MachineInstr &MI1,
|
|
|
|
const MachineRegisterInfo *MRI) const {
|
|
|
|
return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
|
2012-11-28 10:35:13 +08:00
|
|
|
}
|
|
|
|
|
2017-08-23 07:56:30 +08:00
|
|
|
MachineInstr &TargetInstrInfo::duplicate(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const {
|
2016-06-30 08:01:54 +08:00
|
|
|
assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
|
2017-08-23 07:56:30 +08:00
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig);
|
2012-11-28 10:35:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the COPY instruction in MI can be folded to a stack operation, return
|
|
|
|
// the register class to use.
|
2016-06-30 08:01:54 +08:00
|
|
|
static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
|
2012-11-28 10:35:13 +08:00
|
|
|
unsigned FoldIdx) {
|
2016-06-30 08:01:54 +08:00
|
|
|
assert(MI.isCopy() && "MI must be a COPY instruction");
|
|
|
|
if (MI.getNumOperands() != 2)
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2012-11-28 10:35:13 +08:00
|
|
|
assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
|
|
|
|
const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
|
2012-11-28 10:35:13 +08:00
|
|
|
|
|
|
|
if (FoldOp.getSubReg() || LiveOp.getSubReg())
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2012-11-28 10:35:13 +08:00
|
|
|
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register FoldReg = FoldOp.getReg();
|
|
|
|
Register LiveReg = LiveOp.getReg();
|
2012-11-28 10:35:13 +08:00
|
|
|
|
2019-08-02 07:27:28 +08:00
|
|
|
assert(Register::isVirtualRegister(FoldReg) && "Cannot fold physregs");
|
2012-11-28 10:35:13 +08:00
|
|
|
|
2017-10-11 07:50:49 +08:00
|
|
|
const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
|
2012-11-28 10:35:13 +08:00
|
|
|
const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
|
|
|
|
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isPhysicalRegister(LiveOp.getReg()))
|
2014-04-14 08:51:57 +08:00
|
|
|
return RC->contains(LiveOp.getReg()) ? RC : nullptr;
|
2012-11-28 10:35:13 +08:00
|
|
|
|
|
|
|
if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
|
|
|
|
return RC;
|
|
|
|
|
|
|
|
// FIXME: Allow folding when register classes are memory compatible.
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2012-11-28 10:35:13 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 05:48:41 +08:00
|
|
|
void TargetInstrInfo::getNoop(MCInst &NopInst) const {
|
|
|
|
llvm_unreachable("Not implemented");
|
2014-09-16 02:32:58 +08:00
|
|
|
}
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
|
2015-02-28 20:04:00 +08:00
|
|
|
ArrayRef<unsigned> Ops, int FrameIndex,
|
2013-11-29 11:07:54 +08:00
|
|
|
const TargetInstrInfo &TII) {
|
|
|
|
unsigned StartIdx = 0;
|
2020-06-10 21:01:19 +08:00
|
|
|
unsigned NumDefs = 0;
|
2016-06-30 08:01:54 +08:00
|
|
|
switch (MI.getOpcode()) {
|
2016-08-24 05:21:43 +08:00
|
|
|
case TargetOpcode::STACKMAP: {
|
|
|
|
// StackMapLiveValues are foldable
|
2016-08-30 09:38:59 +08:00
|
|
|
StartIdx = StackMapOpers(&MI).getVarIdx();
|
2013-11-29 11:07:54 +08:00
|
|
|
break;
|
2016-08-24 05:21:43 +08:00
|
|
|
}
|
2013-11-29 11:07:54 +08:00
|
|
|
case TargetOpcode::PATCHPOINT: {
|
2016-08-24 05:21:43 +08:00
|
|
|
// For PatchPoint, the call args are not foldable (even if reported in the
|
|
|
|
// stackmap e.g. via anyregcc).
|
2016-08-30 09:38:59 +08:00
|
|
|
StartIdx = PatchPointOpers(&MI).getVarIdx();
|
2013-11-29 11:07:54 +08:00
|
|
|
break;
|
|
|
|
}
|
[statepoints][experimental] Add support for live-in semantics of values in deopt bundles
This is a first step towards supporting deopt value lowering and reporting entirely with the register allocator. I hope to build on this in the near future to support live-on-return semantics, but I have a use case which allows me to test and investigate code quality with just the live-in semantics so I've chosen to start there. For those curious, my use cases is our implementation of the "__llvm_deoptimize" function we bind to @llvm.deoptimize. I'm choosing not to hard code that fact in the patch and instead make it configurable via function attributes.
The basic approach here is modelled on what is done for the "Live In" values on stackmaps and patchpoints. (A secondary goal here is to remove one of the last barriers to merging the pseudo instructions.) We start by adding the operands directly to the STATEPOINT SDNode. Once we've lowered to MI, we extend the remat logic used by the register allocator to fold virtual register uses into StackMap::Indirect entries as needed. This does rely on the fact that the register allocator rematerializes. If it didn't along some code path, we could end up with more vregs than physical registers and fail to allocate.
Today, we *only* fold in the register allocator. This can create some weird effects when combined with arguments passed on the stack because we don't fold them appropriately. I have an idea how to fix that, but it needs this patch in place to work on that effectively. (There's some weird interaction with the scheduler as well, more investigation needed.)
My near term plan is to land this patch off-by-default, experiment in my local tree to identify any correctness issues and then start fixing codegen problems one by one as I find them. Once I have the live-in lowering fully working (both correctness and code quality), I'm hoping to move on to the live-on-return semantics. Note: I don't have any *known* miscompiles with this patch enabled, but I'm pretty sure I'll find at least a couple. Thus, the "experimental" tag and the fact it's off by default.
Differential Revision: https://reviews.llvm.org/D24000
llvm-svn: 280250
2016-08-31 23:12:17 +08:00
|
|
|
case TargetOpcode::STATEPOINT: {
|
|
|
|
// For statepoints, fold deopt and gc arguments, but not call arguments.
|
|
|
|
StartIdx = StatepointOpers(&MI).getVarIdx();
|
2020-06-10 21:01:19 +08:00
|
|
|
NumDefs = MI.getNumDefs();
|
[statepoints][experimental] Add support for live-in semantics of values in deopt bundles
This is a first step towards supporting deopt value lowering and reporting entirely with the register allocator. I hope to build on this in the near future to support live-on-return semantics, but I have a use case which allows me to test and investigate code quality with just the live-in semantics so I've chosen to start there. For those curious, my use cases is our implementation of the "__llvm_deoptimize" function we bind to @llvm.deoptimize. I'm choosing not to hard code that fact in the patch and instead make it configurable via function attributes.
The basic approach here is modelled on what is done for the "Live In" values on stackmaps and patchpoints. (A secondary goal here is to remove one of the last barriers to merging the pseudo instructions.) We start by adding the operands directly to the STATEPOINT SDNode. Once we've lowered to MI, we extend the remat logic used by the register allocator to fold virtual register uses into StackMap::Indirect entries as needed. This does rely on the fact that the register allocator rematerializes. If it didn't along some code path, we could end up with more vregs than physical registers and fail to allocate.
Today, we *only* fold in the register allocator. This can create some weird effects when combined with arguments passed on the stack because we don't fold them appropriately. I have an idea how to fix that, but it needs this patch in place to work on that effectively. (There's some weird interaction with the scheduler as well, more investigation needed.)
My near term plan is to land this patch off-by-default, experiment in my local tree to identify any correctness issues and then start fixing codegen problems one by one as I find them. Once I have the live-in lowering fully working (both correctness and code quality), I'm hoping to move on to the live-on-return semantics. Note: I don't have any *known* miscompiles with this patch enabled, but I'm pretty sure I'll find at least a couple. Thus, the "experimental" tag and the fact it's off by default.
Differential Revision: https://reviews.llvm.org/D24000
llvm-svn: 280250
2016-08-31 23:12:17 +08:00
|
|
|
break;
|
|
|
|
}
|
2013-11-29 11:07:54 +08:00
|
|
|
default:
|
|
|
|
llvm_unreachable("unexpected stackmap opcode");
|
|
|
|
}
|
|
|
|
|
2020-06-10 21:01:19 +08:00
|
|
|
unsigned DefToFoldIdx = MI.getNumOperands();
|
|
|
|
|
2013-11-29 11:07:54 +08:00
|
|
|
// Return false if any operands requested for folding are not foldable (not
|
|
|
|
// part of the stackmap's live values).
|
2015-02-28 20:04:00 +08:00
|
|
|
for (unsigned Op : Ops) {
|
2020-06-10 21:01:19 +08:00
|
|
|
if (Op < NumDefs) {
|
|
|
|
assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
|
|
|
|
DefToFoldIdx = Op;
|
|
|
|
} else if (Op < StartIdx) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2020-08-27 22:20:25 +08:00
|
|
|
if (MI.getOperand(Op).isTied())
|
|
|
|
return nullptr;
|
2013-11-29 11:07:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr *NewMI =
|
2016-06-30 08:01:54 +08:00
|
|
|
MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
|
2013-11-29 11:07:54 +08:00
|
|
|
MachineInstrBuilder MIB(MF, NewMI);
|
|
|
|
|
|
|
|
// No need to fold return, the meta data, and function arguments
|
|
|
|
for (unsigned i = 0; i < StartIdx; ++i)
|
2020-06-10 21:01:19 +08:00
|
|
|
if (i != DefToFoldIdx)
|
|
|
|
MIB.add(MI.getOperand(i));
|
2013-11-29 11:07:54 +08:00
|
|
|
|
2020-06-10 21:01:19 +08:00
|
|
|
for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
|
2016-06-30 08:01:54 +08:00
|
|
|
MachineOperand &MO = MI.getOperand(i);
|
2020-06-10 21:01:19 +08:00
|
|
|
unsigned TiedTo = e;
|
|
|
|
(void)MI.isRegTiedToDefOperand(i, &TiedTo);
|
|
|
|
|
2016-08-12 06:21:41 +08:00
|
|
|
if (is_contained(Ops, i)) {
|
2020-06-10 21:01:19 +08:00
|
|
|
assert(TiedTo == e && "Cannot fold tied operands");
|
2013-11-29 11:07:54 +08:00
|
|
|
unsigned SpillSize;
|
|
|
|
unsigned SpillOffset;
|
|
|
|
// Compute the spill slot size and offset.
|
|
|
|
const TargetRegisterClass *RC =
|
|
|
|
MF.getRegInfo().getRegClass(MO.getReg());
|
2015-03-20 07:06:21 +08:00
|
|
|
bool Valid =
|
|
|
|
TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
|
2013-11-29 11:07:54 +08:00
|
|
|
if (!Valid)
|
|
|
|
report_fatal_error("cannot spill patchpoint subregister operand");
|
|
|
|
MIB.addImm(StackMaps::IndirectMemRefOp);
|
|
|
|
MIB.addImm(SpillSize);
|
|
|
|
MIB.addFrameIndex(FrameIndex);
|
2013-12-07 11:30:59 +08:00
|
|
|
MIB.addImm(SpillOffset);
|
2020-06-10 21:01:19 +08:00
|
|
|
} else {
|
2017-01-13 17:58:52 +08:00
|
|
|
MIB.add(MO);
|
2020-06-10 21:01:19 +08:00
|
|
|
if (TiedTo < e) {
|
|
|
|
assert(TiedTo < NumDefs && "Bad tied operand");
|
|
|
|
if (TiedTo > DefToFoldIdx)
|
|
|
|
--TiedTo;
|
|
|
|
NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
|
|
|
|
}
|
|
|
|
}
|
2013-11-29 11:07:54 +08:00
|
|
|
}
|
|
|
|
return NewMI;
|
|
|
|
}
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
|
|
|
|
ArrayRef<unsigned> Ops, int FI,
|
2019-06-08 14:19:15 +08:00
|
|
|
LiveIntervals *LIS,
|
|
|
|
VirtRegMap *VRM) const {
|
2016-07-16 02:26:59 +08:00
|
|
|
auto Flags = MachineMemOperand::MONone;
|
2017-10-02 23:02:06 +08:00
|
|
|
for (unsigned OpIdx : Ops)
|
|
|
|
Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
|
|
|
|
: MachineMemOperand::MOLoad;
|
2012-11-28 10:35:13 +08:00
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
MachineBasicBlock *MBB = MI.getParent();
|
2012-11-28 10:35:13 +08:00
|
|
|
assert(MBB && "foldMemoryOperand needs an inserted instruction");
|
|
|
|
MachineFunction &MF = *MBB->getParent();
|
|
|
|
|
2016-11-24 02:33:49 +08:00
|
|
|
// If we're not folding a load into a subreg, the size of the load is the
|
|
|
|
// size of the spill slot. But if we are, we need to figure out what the
|
|
|
|
// actual load size is.
|
|
|
|
int64_t MemSize = 0;
|
|
|
|
const MachineFrameInfo &MFI = MF.getFrameInfo();
|
|
|
|
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
|
|
|
|
|
|
|
|
if (Flags & MachineMemOperand::MOStore) {
|
|
|
|
MemSize = MFI.getObjectSize(FI);
|
|
|
|
} else {
|
2017-10-02 23:02:06 +08:00
|
|
|
for (unsigned OpIdx : Ops) {
|
2016-11-24 02:33:49 +08:00
|
|
|
int64_t OpSize = MFI.getObjectSize(FI);
|
|
|
|
|
2017-10-02 23:02:06 +08:00
|
|
|
if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
|
2016-11-24 02:33:49 +08:00
|
|
|
unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
|
|
|
|
if (SubRegSize > 0 && !(SubRegSize % 8))
|
|
|
|
OpSize = SubRegSize / 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemSize = std::max(MemSize, OpSize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(MemSize && "Did not expect a zero-sized stack slot");
|
|
|
|
|
2014-04-14 08:51:57 +08:00
|
|
|
MachineInstr *NewMI = nullptr;
|
2013-11-29 11:07:54 +08:00
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
if (MI.getOpcode() == TargetOpcode::STACKMAP ||
|
[statepoints][experimental] Add support for live-in semantics of values in deopt bundles
This is a first step towards supporting deopt value lowering and reporting entirely with the register allocator. I hope to build on this in the near future to support live-on-return semantics, but I have a use case which allows me to test and investigate code quality with just the live-in semantics so I've chosen to start there. For those curious, my use cases is our implementation of the "__llvm_deoptimize" function we bind to @llvm.deoptimize. I'm choosing not to hard code that fact in the patch and instead make it configurable via function attributes.
The basic approach here is modelled on what is done for the "Live In" values on stackmaps and patchpoints. (A secondary goal here is to remove one of the last barriers to merging the pseudo instructions.) We start by adding the operands directly to the STATEPOINT SDNode. Once we've lowered to MI, we extend the remat logic used by the register allocator to fold virtual register uses into StackMap::Indirect entries as needed. This does rely on the fact that the register allocator rematerializes. If it didn't along some code path, we could end up with more vregs than physical registers and fail to allocate.
Today, we *only* fold in the register allocator. This can create some weird effects when combined with arguments passed on the stack because we don't fold them appropriately. I have an idea how to fix that, but it needs this patch in place to work on that effectively. (There's some weird interaction with the scheduler as well, more investigation needed.)
My near term plan is to land this patch off-by-default, experiment in my local tree to identify any correctness issues and then start fixing codegen problems one by one as I find them. Once I have the live-in lowering fully working (both correctness and code quality), I'm hoping to move on to the live-on-return semantics. Note: I don't have any *known* miscompiles with this patch enabled, but I'm pretty sure I'll find at least a couple. Thus, the "experimental" tag and the fact it's off by default.
Differential Revision: https://reviews.llvm.org/D24000
llvm-svn: 280250
2016-08-31 23:12:17 +08:00
|
|
|
MI.getOpcode() == TargetOpcode::PATCHPOINT ||
|
|
|
|
MI.getOpcode() == TargetOpcode::STATEPOINT) {
|
2013-11-29 11:07:54 +08:00
|
|
|
// Fold stackmap/patchpoint.
|
|
|
|
NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
|
2015-06-09 04:09:58 +08:00
|
|
|
if (NewMI)
|
|
|
|
MBB->insert(MI, NewMI);
|
2013-11-29 11:07:54 +08:00
|
|
|
} else {
|
|
|
|
// Ask the target to do the actual folding.
|
2019-06-08 14:19:15 +08:00
|
|
|
NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
|
2013-11-29 11:07:54 +08:00
|
|
|
}
|
2015-06-09 04:09:58 +08:00
|
|
|
|
2013-11-29 11:07:54 +08:00
|
|
|
if (NewMI) {
|
2018-08-17 05:30:05 +08:00
|
|
|
NewMI->setMemRefs(MF, MI.memoperands());
|
2012-11-28 10:35:13 +08:00
|
|
|
// Add a memory operand, foldMemoryOperandImpl doesn't do that.
|
|
|
|
assert((!(Flags & MachineMemOperand::MOStore) ||
|
|
|
|
NewMI->mayStore()) &&
|
|
|
|
"Folded a def to a non-store!");
|
|
|
|
assert((!(Flags & MachineMemOperand::MOLoad) ||
|
|
|
|
NewMI->mayLoad()) &&
|
|
|
|
"Folded a use to a non-load!");
|
|
|
|
assert(MFI.getObjectOffset(FI) != -1);
|
2020-03-31 17:43:50 +08:00
|
|
|
MachineMemOperand *MMO =
|
|
|
|
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
|
|
|
|
Flags, MemSize, MFI.getObjectAlign(FI));
|
2012-11-28 10:35:13 +08:00
|
|
|
NewMI->addMemOperand(MF, MMO);
|
|
|
|
|
2020-06-10 14:52:54 +08:00
|
|
|
// The pass "x86 speculative load hardening" always attaches symbols to
|
|
|
|
// call instructions. We need copy it form old instruction.
|
|
|
|
NewMI->cloneInstrSymbols(MF, MI);
|
|
|
|
|
2015-06-09 04:09:58 +08:00
|
|
|
return NewMI;
|
2012-11-28 10:35:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Straight COPY may fold as load/store.
|
2016-06-30 08:01:54 +08:00
|
|
|
if (!MI.isCopy() || Ops.size() != 1)
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2012-11-28 10:35:13 +08:00
|
|
|
|
|
|
|
const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
|
|
|
|
if (!RC)
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2012-11-28 10:35:13 +08:00
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
|
2012-11-28 10:35:13 +08:00
|
|
|
MachineBasicBlock::iterator Pos = MI;
|
|
|
|
|
|
|
|
if (Flags == MachineMemOperand::MOStore)
|
|
|
|
storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
|
|
|
|
else
|
|
|
|
loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
|
2016-07-02 00:38:28 +08:00
|
|
|
return &*--Pos;
|
2012-11-28 10:35:13 +08:00
|
|
|
}
|
|
|
|
|
2017-10-02 22:03:17 +08:00
|
|
|
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
|
|
|
|
ArrayRef<unsigned> Ops,
|
|
|
|
MachineInstr &LoadMI,
|
|
|
|
LiveIntervals *LIS) const {
|
|
|
|
assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
|
|
|
|
#ifndef NDEBUG
|
2017-10-02 23:02:06 +08:00
|
|
|
for (unsigned OpIdx : Ops)
|
|
|
|
assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
|
2017-10-02 22:03:17 +08:00
|
|
|
#endif
|
2017-10-02 23:02:06 +08:00
|
|
|
|
2017-10-02 22:03:17 +08:00
|
|
|
MachineBasicBlock &MBB = *MI.getParent();
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
|
|
|
|
// Ask the target to do the actual folding.
|
|
|
|
MachineInstr *NewMI = nullptr;
|
|
|
|
int FrameIndex = 0;
|
|
|
|
|
|
|
|
if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
|
|
|
|
MI.getOpcode() == TargetOpcode::PATCHPOINT ||
|
|
|
|
MI.getOpcode() == TargetOpcode::STATEPOINT) &&
|
|
|
|
isLoadFromStackSlot(LoadMI, FrameIndex)) {
|
|
|
|
// Fold stackmap/patchpoint.
|
|
|
|
NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
|
|
|
|
if (NewMI)
|
|
|
|
NewMI = &*MBB.insert(MI, NewMI);
|
|
|
|
} else {
|
|
|
|
// Ask the target to do the actual folding.
|
|
|
|
NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!NewMI)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// Copy the memoperands from the load to the folded instruction.
|
|
|
|
if (MI.memoperands_empty()) {
|
2018-08-17 05:30:05 +08:00
|
|
|
NewMI->setMemRefs(MF, LoadMI.memoperands());
|
2017-10-02 22:03:17 +08:00
|
|
|
} else {
|
|
|
|
// Handle the rare case of folding multiple loads.
|
2018-08-17 05:30:05 +08:00
|
|
|
NewMI->setMemRefs(MF, MI.memoperands());
|
2017-10-02 22:03:17 +08:00
|
|
|
for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
|
|
|
|
E = LoadMI.memoperands_end();
|
|
|
|
I != E; ++I) {
|
|
|
|
NewMI->addMemOperand(MF, *I);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NewMI;
|
|
|
|
}
|
|
|
|
|
2015-09-21 23:09:11 +08:00
|
|
|
bool TargetInstrInfo::hasReassociableOperands(
|
|
|
|
const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
|
|
|
|
const MachineOperand &Op1 = Inst.getOperand(1);
|
|
|
|
const MachineOperand &Op2 = Inst.getOperand(2);
|
|
|
|
const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
|
|
|
|
|
|
|
|
// We need virtual register definitions for the operands that we will
|
|
|
|
// reassociate.
|
|
|
|
MachineInstr *MI1 = nullptr;
|
|
|
|
MachineInstr *MI2 = nullptr;
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg()))
|
2015-09-21 23:09:11 +08:00
|
|
|
MI1 = MRI.getUniqueVRegDef(Op1.getReg());
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Op2.isReg() && Register::isVirtualRegister(Op2.getReg()))
|
2015-09-21 23:09:11 +08:00
|
|
|
MI2 = MRI.getUniqueVRegDef(Op2.getReg());
|
|
|
|
|
|
|
|
// And they need to be in the trace (otherwise, they won't have a depth).
|
2015-10-25 07:11:13 +08:00
|
|
|
return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
|
2015-09-21 23:09:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
|
|
|
|
bool &Commuted) const {
|
|
|
|
const MachineBasicBlock *MBB = Inst.getParent();
|
|
|
|
const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
|
|
|
|
MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
|
|
|
|
MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
|
|
|
|
unsigned AssocOpcode = Inst.getOpcode();
|
|
|
|
|
|
|
|
// If only one operand has the same opcode and it's the second source operand,
|
|
|
|
// the operands must be commuted.
|
|
|
|
Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
|
|
|
|
if (Commuted)
|
|
|
|
std::swap(MI1, MI2);
|
|
|
|
|
|
|
|
// 1. The previous instruction must be the same type as Inst.
|
[x86] use instruction-level fast-math-flags to drive MachineCombiner
The code changes here are hopefully straightforward:
1. Use MachineInstruction flags to decide if FP ops can be reassociated
(use both "reassoc" and "nsz" to be consistent with IR transforms;
we probably don't need "nsz", but that's a safer interpretation of
the FMF).
2. Check that both nodes allow reassociation to change instructions.
This is a stronger requirement than we've usually implemented in
IR/DAG, but this is needed to solve the motivating bug (see below),
and it seems unlikely to impede optimization at this late stage.
3. Intersect/propagate MachineIR flags to enable further reassociation
in MachineCombiner.
We managed to make MachineCombiner flexible enough that no changes are
needed to that pass itself. So this patch should only affect x86
(assuming no other targets have implemented the hooks using MachineIR
flags yet).
The motivating example in PR43609 is another case of fast-math transforms
interacting badly with special FP ops created during lowering:
https://bugs.llvm.org/show_bug.cgi?id=43609
The special fadd ops used for converting int to FP assume that they will
not be altered, so those are created without FMF.
However, the MachineCombiner pass was being enabled for FP ops using the
global/function-level TargetOption for "UnsafeFPMath". We managed to run
instruction/node-level FMF all the way down to MachineIR sometime in the
last 1-2 years though, so we can do better now.
The test diffs require some explanation:
1. llvm/test/CodeGen/X86/fmf-flags.ll - no target option for unsafe math was
specified here, so MachineCombiner kicks in where it did not previously;
to make it behave consistently, we need to specify a CPU schedule model,
so use the default model, and there are no code diffs.
2. llvm/test/CodeGen/X86/machine-combiner.ll - replace the target option for
unsafe math with the equivalent IR-level flags, and there are no code diffs;
we can't remove the NaN/nsz options because those are still used to drive
x86 fmin/fmax codegen (special SDAG opcodes).
3. llvm/test/CodeGen/X86/pow.ll - similar to #1
4. llvm/test/CodeGen/X86/sqrt-fastmath.ll - similar to #1, but MachineCombiner
does some reassociation of the estimate sequence ops; presumably these are
perf wins based on latency/throughput (and we get some reduction of move
instructions too); I'm not sure how it affects numerical accuracy, but the
test reflects reality better now because we would expect MachineCombiner to
be enabled if the IR was generated via something like "-ffast-math" with clang.
5. llvm/test/CodeGen/X86/vec_int_to_fp.ll - this is the test added to model PR43609;
the fadds are not reassociated now, so we should get the expected results.
6. llvm/test/CodeGen/X86/vector-reduce-fadd-fast.ll - similar to #1
7. llvm/test/CodeGen/X86/vector-reduce-fmul-fast.ll - similar to #1
Differential Revision: https://reviews.llvm.org/D74851
2020-02-28 04:19:37 +08:00
|
|
|
// 2. The previous instruction must also be associative/commutative (this can
|
|
|
|
// be different even for instructions with the same opcode if traits like
|
|
|
|
// fast-math-flags are included).
|
|
|
|
// 3. The previous instruction must have virtual register definitions for its
|
2015-09-21 23:09:11 +08:00
|
|
|
// operands in the same basic block as Inst.
|
[x86] use instruction-level fast-math-flags to drive MachineCombiner
The code changes here are hopefully straightforward:
1. Use MachineInstruction flags to decide if FP ops can be reassociated
(use both "reassoc" and "nsz" to be consistent with IR transforms;
we probably don't need "nsz", but that's a safer interpretation of
the FMF).
2. Check that both nodes allow reassociation to change instructions.
This is a stronger requirement than we've usually implemented in
IR/DAG, but this is needed to solve the motivating bug (see below),
and it seems unlikely to impede optimization at this late stage.
3. Intersect/propagate MachineIR flags to enable further reassociation
in MachineCombiner.
We managed to make MachineCombiner flexible enough that no changes are
needed to that pass itself. So this patch should only affect x86
(assuming no other targets have implemented the hooks using MachineIR
flags yet).
The motivating example in PR43609 is another case of fast-math transforms
interacting badly with special FP ops created during lowering:
https://bugs.llvm.org/show_bug.cgi?id=43609
The special fadd ops used for converting int to FP assume that they will
not be altered, so those are created without FMF.
However, the MachineCombiner pass was being enabled for FP ops using the
global/function-level TargetOption for "UnsafeFPMath". We managed to run
instruction/node-level FMF all the way down to MachineIR sometime in the
last 1-2 years though, so we can do better now.
The test diffs require some explanation:
1. llvm/test/CodeGen/X86/fmf-flags.ll - no target option for unsafe math was
specified here, so MachineCombiner kicks in where it did not previously;
to make it behave consistently, we need to specify a CPU schedule model,
so use the default model, and there are no code diffs.
2. llvm/test/CodeGen/X86/machine-combiner.ll - replace the target option for
unsafe math with the equivalent IR-level flags, and there are no code diffs;
we can't remove the NaN/nsz options because those are still used to drive
x86 fmin/fmax codegen (special SDAG opcodes).
3. llvm/test/CodeGen/X86/pow.ll - similar to #1
4. llvm/test/CodeGen/X86/sqrt-fastmath.ll - similar to #1, but MachineCombiner
does some reassociation of the estimate sequence ops; presumably these are
perf wins based on latency/throughput (and we get some reduction of move
instructions too); I'm not sure how it affects numerical accuracy, but the
test reflects reality better now because we would expect MachineCombiner to
be enabled if the IR was generated via something like "-ffast-math" with clang.
5. llvm/test/CodeGen/X86/vec_int_to_fp.ll - this is the test added to model PR43609;
the fadds are not reassociated now, so we should get the expected results.
6. llvm/test/CodeGen/X86/vector-reduce-fadd-fast.ll - similar to #1
7. llvm/test/CodeGen/X86/vector-reduce-fmul-fast.ll - similar to #1
Differential Revision: https://reviews.llvm.org/D74851
2020-02-28 04:19:37 +08:00
|
|
|
// 4. The previous instruction's result must only be used by Inst.
|
|
|
|
return MI1->getOpcode() == AssocOpcode && isAssociativeAndCommutative(*MI1) &&
|
2015-10-25 07:11:13 +08:00
|
|
|
hasReassociableOperands(*MI1, MBB) &&
|
|
|
|
MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
|
2015-09-21 23:09:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// 1. The operation must be associative and commutative.
|
|
|
|
// 2. The instruction must have virtual register definitions for its
|
|
|
|
// operands in the same basic block.
|
|
|
|
// 3. The instruction must have a reassociable sibling.
|
|
|
|
bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
|
|
|
|
bool &Commuted) const {
|
2015-10-25 07:11:13 +08:00
|
|
|
return isAssociativeAndCommutative(Inst) &&
|
|
|
|
hasReassociableOperands(Inst, Inst.getParent()) &&
|
|
|
|
hasReassociableSibling(Inst, Commuted);
|
2015-09-21 23:09:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// The concept of the reassociation pass is that these operations can benefit
|
|
|
|
// from this kind of transformation:
|
|
|
|
//
|
|
|
|
// A = ? op ?
|
|
|
|
// B = A op X (Prev)
|
|
|
|
// C = B op Y (Root)
|
|
|
|
// -->
|
|
|
|
// A = ? op ?
|
|
|
|
// B = X op Y
|
|
|
|
// C = A op B
|
|
|
|
//
|
|
|
|
// breaking the dependency between A and B, allowing them to be executed in
|
|
|
|
// parallel (or back-to-back in a pipeline) instead of depending on each other.
|
|
|
|
|
|
|
|
// FIXME: This has the potential to be expensive (compile time) while not
|
|
|
|
// improving the code at all. Some ways to limit the overhead:
|
|
|
|
// 1. Track successful transforms; bail out if hit rate gets too low.
|
|
|
|
// 2. Only enable at -O3 or some other non-default optimization level.
|
|
|
|
// 3. Pre-screen pattern candidates here: if an operand of the previous
|
|
|
|
// instruction is known to not increase the critical path, then don't match
|
|
|
|
// that pattern.
|
|
|
|
bool TargetInstrInfo::getMachineCombinerPatterns(
|
|
|
|
MachineInstr &Root,
|
2015-11-06 03:34:57 +08:00
|
|
|
SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
|
2015-09-21 23:09:11 +08:00
|
|
|
bool Commute;
|
|
|
|
if (isReassociationCandidate(Root, Commute)) {
|
|
|
|
// We found a sequence of instructions that may be suitable for a
|
|
|
|
// reassociation of operands to increase ILP. Specify each commutation
|
|
|
|
// possibility for the Prev instruction in the sequence and let the
|
|
|
|
// machine combiner decide if changing the operands is worthwhile.
|
|
|
|
if (Commute) {
|
2015-11-06 03:34:57 +08:00
|
|
|
Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB);
|
|
|
|
Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB);
|
2015-09-21 23:09:11 +08:00
|
|
|
} else {
|
2015-11-06 03:34:57 +08:00
|
|
|
Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY);
|
|
|
|
Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY);
|
2015-09-21 23:09:11 +08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2017-10-02 22:03:17 +08:00
|
|
|
|
2016-04-24 13:14:01 +08:00
|
|
|
/// Return true when a code sequence can improve loop throughput.
|
|
|
|
bool
|
|
|
|
TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
|
|
|
|
return false;
|
|
|
|
}
|
2017-10-02 22:03:17 +08:00
|
|
|
|
2015-09-21 23:09:11 +08:00
|
|
|
/// Attempt the reassociation transformation to reduce critical path length.
|
|
|
|
/// See the above comments before getMachineCombinerPatterns().
|
|
|
|
void TargetInstrInfo::reassociateOps(
|
|
|
|
MachineInstr &Root, MachineInstr &Prev,
|
2015-11-06 03:34:57 +08:00
|
|
|
MachineCombinerPattern Pattern,
|
2015-09-21 23:09:11 +08:00
|
|
|
SmallVectorImpl<MachineInstr *> &InsInstrs,
|
|
|
|
SmallVectorImpl<MachineInstr *> &DelInstrs,
|
|
|
|
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
|
2017-10-11 07:50:49 +08:00
|
|
|
MachineFunction *MF = Root.getMF();
|
2015-09-21 23:09:11 +08:00
|
|
|
MachineRegisterInfo &MRI = MF->getRegInfo();
|
|
|
|
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
|
|
|
|
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
|
|
|
|
const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
|
|
|
|
|
|
|
|
// This array encodes the operand index for each parameter because the
|
|
|
|
// operands may be commuted. Each row corresponds to a pattern value,
|
|
|
|
// and each column specifies the index of A, B, X, Y.
|
|
|
|
unsigned OpIdx[4][4] = {
|
|
|
|
{ 1, 1, 2, 2 },
|
|
|
|
{ 1, 2, 2, 1 },
|
|
|
|
{ 2, 1, 1, 2 },
|
|
|
|
{ 2, 2, 1, 1 }
|
|
|
|
};
|
|
|
|
|
2015-11-06 03:34:57 +08:00
|
|
|
int Row;
|
|
|
|
switch (Pattern) {
|
|
|
|
case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
|
|
|
|
case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
|
|
|
|
case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
|
|
|
|
case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
|
|
|
|
default: llvm_unreachable("unexpected MachineCombinerPattern");
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
|
|
|
|
MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
|
|
|
|
MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
|
|
|
|
MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
|
2015-09-21 23:09:11 +08:00
|
|
|
MachineOperand &OpC = Root.getOperand(0);
|
|
|
|
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register RegA = OpA.getReg();
|
|
|
|
Register RegB = OpB.getReg();
|
|
|
|
Register RegX = OpX.getReg();
|
|
|
|
Register RegY = OpY.getReg();
|
|
|
|
Register RegC = OpC.getReg();
|
2015-09-21 23:09:11 +08:00
|
|
|
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(RegA))
|
2015-09-21 23:09:11 +08:00
|
|
|
MRI.constrainRegClass(RegA, RC);
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(RegB))
|
2015-09-21 23:09:11 +08:00
|
|
|
MRI.constrainRegClass(RegB, RC);
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(RegX))
|
2015-09-21 23:09:11 +08:00
|
|
|
MRI.constrainRegClass(RegX, RC);
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(RegY))
|
2015-09-21 23:09:11 +08:00
|
|
|
MRI.constrainRegClass(RegY, RC);
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(RegC))
|
2015-09-21 23:09:11 +08:00
|
|
|
MRI.constrainRegClass(RegC, RC);
|
|
|
|
|
|
|
|
// Create a new virtual register for the result of (X op Y) instead of
|
|
|
|
// recycling RegB because the MachineCombiner's computation of the critical
|
|
|
|
// path requires a new register definition rather than an existing one.
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register NewVR = MRI.createVirtualRegister(RC);
|
2015-09-21 23:09:11 +08:00
|
|
|
InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
|
|
|
|
|
|
|
|
unsigned Opcode = Root.getOpcode();
|
|
|
|
bool KillA = OpA.isKill();
|
|
|
|
bool KillX = OpX.isKill();
|
|
|
|
bool KillY = OpY.isKill();
|
|
|
|
|
|
|
|
// Create new instructions for insertion.
|
|
|
|
MachineInstrBuilder MIB1 =
|
|
|
|
BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
|
|
|
|
.addReg(RegX, getKillRegState(KillX))
|
|
|
|
.addReg(RegY, getKillRegState(KillY));
|
|
|
|
MachineInstrBuilder MIB2 =
|
|
|
|
BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
|
|
|
|
.addReg(RegA, getKillRegState(KillA))
|
|
|
|
.addReg(NewVR, getKillRegState(true));
|
|
|
|
|
|
|
|
setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
|
|
|
|
|
|
|
|
// Record new instructions for insertion and old instructions for deletion.
|
|
|
|
InsInstrs.push_back(MIB1);
|
|
|
|
InsInstrs.push_back(MIB2);
|
|
|
|
DelInstrs.push_back(&Prev);
|
|
|
|
DelInstrs.push_back(&Root);
|
|
|
|
}
|
|
|
|
|
|
|
|
void TargetInstrInfo::genAlternativeCodeSequence(
|
2015-11-06 03:34:57 +08:00
|
|
|
MachineInstr &Root, MachineCombinerPattern Pattern,
|
2015-09-21 23:09:11 +08:00
|
|
|
SmallVectorImpl<MachineInstr *> &InsInstrs,
|
|
|
|
SmallVectorImpl<MachineInstr *> &DelInstrs,
|
|
|
|
DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
|
2017-10-11 07:50:49 +08:00
|
|
|
MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
|
2015-09-21 23:09:11 +08:00
|
|
|
|
|
|
|
// Select the previous instruction in the sequence based on the input pattern.
|
|
|
|
MachineInstr *Prev = nullptr;
|
|
|
|
switch (Pattern) {
|
2015-11-06 03:34:57 +08:00
|
|
|
case MachineCombinerPattern::REASSOC_AX_BY:
|
|
|
|
case MachineCombinerPattern::REASSOC_XA_BY:
|
2015-09-21 23:09:11 +08:00
|
|
|
Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
|
|
|
|
break;
|
2015-11-06 03:34:57 +08:00
|
|
|
case MachineCombinerPattern::REASSOC_AX_YB:
|
|
|
|
case MachineCombinerPattern::REASSOC_XA_YB:
|
2015-09-21 23:09:11 +08:00
|
|
|
Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(Prev && "Unknown pattern for machine combiner");
|
|
|
|
|
|
|
|
reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
|
|
|
|
}
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
|
2019-10-19 09:07:48 +08:00
|
|
|
const MachineInstr &MI, AAResults *AA) const {
|
2017-10-11 07:50:49 +08:00
|
|
|
const MachineFunction &MF = *MI.getMF();
|
2012-11-28 10:35:13 +08:00
|
|
|
const MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
|
|
|
|
// Remat clients assume operand 0 is the defined register.
|
2016-06-30 08:01:54 +08:00
|
|
|
if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
|
2012-11-28 10:35:13 +08:00
|
|
|
return false;
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register DefReg = MI.getOperand(0).getReg();
|
2012-11-28 10:35:13 +08:00
|
|
|
|
|
|
|
// A sub-register definition can only be rematerialized if the instruction
|
|
|
|
// doesn't read the other parts of the register. Otherwise it is really a
|
|
|
|
// read-modify-write operation on the full virtual register which cannot be
|
|
|
|
// moved safely.
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(DefReg) && MI.getOperand(0).getSubReg() &&
|
|
|
|
MI.readsVirtualRegister(DefReg))
|
2012-11-28 10:35:13 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// A load from a fixed stack slot can be rematerialized. This may be
|
|
|
|
// redundant with subsequent checks, but it's target-independent,
|
|
|
|
// simple, and a common case.
|
|
|
|
int FrameIdx = 0;
|
2014-07-24 06:12:03 +08:00
|
|
|
if (isLoadFromStackSlot(MI, FrameIdx) &&
|
2016-07-29 02:40:00 +08:00
|
|
|
MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
|
2012-11-28 10:35:13 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Avoid instructions obviously unsafe for remat.
|
2019-06-06 06:33:10 +08:00
|
|
|
if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
|
|
|
|
MI.hasUnmodeledSideEffects())
|
2012-11-28 10:35:13 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Don't remat inline asm. We have no idea how expensive it is
|
|
|
|
// even if it's side effect free.
|
2016-06-30 08:01:54 +08:00
|
|
|
if (MI.isInlineAsm())
|
2012-11-28 10:35:13 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Avoid instructions which load from potentially varying memory.
|
2016-09-10 09:03:20 +08:00
|
|
|
if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))
|
2012-11-28 10:35:13 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// If any of the registers accessed are non-constant, conservatively assume
|
|
|
|
// the instruction is not rematerializable.
|
2016-06-30 08:01:54 +08:00
|
|
|
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand &MO = MI.getOperand(i);
|
2012-11-28 10:35:13 +08:00
|
|
|
if (!MO.isReg()) continue;
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register Reg = MO.getReg();
|
2012-11-28 10:35:13 +08:00
|
|
|
if (Reg == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Check for a well-behaved physical register.
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isPhysicalRegister(Reg)) {
|
2012-11-28 10:35:13 +08:00
|
|
|
if (MO.isUse()) {
|
|
|
|
// If the physreg has no defs anywhere, it's just an ambient register
|
|
|
|
// and we can freely move its uses. Alternatively, if it's allocatable,
|
|
|
|
// it could get allocated to something with a def during allocation.
|
2016-10-29 02:05:09 +08:00
|
|
|
if (!MRI.isConstantPhysReg(Reg))
|
2012-11-28 10:35:13 +08:00
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
// A physreg def. We can't remat it.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only allow one virtual-register def. There may be multiple defs of the
|
|
|
|
// same virtual register, though.
|
|
|
|
if (MO.isDef() && Reg != DefReg)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Don't allow any virtual-register uses. Rematting an instruction with
|
|
|
|
// virtual register uses would length the live ranges of the uses, which
|
|
|
|
// is not necessarily a good idea, certainly not "trivial".
|
|
|
|
if (MO.isUse())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Everything checked out.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const {
|
2017-10-11 07:50:49 +08:00
|
|
|
const MachineFunction *MF = MI.getMF();
|
2015-01-08 19:04:38 +08:00
|
|
|
const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
|
|
|
|
bool StackGrowsDown =
|
|
|
|
TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
|
|
|
|
|
2015-05-19 04:27:55 +08:00
|
|
|
unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
|
|
|
|
unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
|
2015-01-08 19:04:38 +08:00
|
|
|
|
2017-04-13 22:10:52 +08:00
|
|
|
if (!isFrameInstr(MI))
|
2015-01-08 19:04:38 +08:00
|
|
|
return 0;
|
2016-06-30 08:01:54 +08:00
|
|
|
|
2017-04-13 22:10:52 +08:00
|
|
|
int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
|
2015-01-08 19:04:38 +08:00
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
|
|
|
|
(StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
|
2015-01-08 19:04:38 +08:00
|
|
|
SPAdj = -SPAdj;
|
|
|
|
|
|
|
|
return SPAdj;
|
|
|
|
}
|
|
|
|
|
2012-11-28 10:35:13 +08:00
|
|
|
/// isSchedulingBoundary - Test if the given instruction should be
|
|
|
|
/// considered a scheduling boundary. This primarily includes labels
|
|
|
|
/// and terminators.
|
2016-06-30 08:01:54 +08:00
|
|
|
bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
|
2012-11-28 10:35:13 +08:00
|
|
|
const MachineBasicBlock *MBB,
|
|
|
|
const MachineFunction &MF) const {
|
|
|
|
// Terminators and labels can't be scheduled around.
|
2016-06-30 08:01:54 +08:00
|
|
|
if (MI.isTerminator() || MI.isPosition())
|
2012-11-28 10:35:13 +08:00
|
|
|
return true;
|
|
|
|
|
2020-05-16 11:43:30 +08:00
|
|
|
// INLINEASM_BR can jump to another block
|
|
|
|
if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
|
|
|
|
return true;
|
|
|
|
|
2012-11-28 10:35:13 +08:00
|
|
|
// Don't attempt to schedule around any instruction that defines
|
|
|
|
// a stack-oriented pointer, as it's unlikely to be profitable. This
|
|
|
|
// saves compile time, because it doesn't require every single
|
|
|
|
// stack slot reference to depend on the instruction that does the
|
|
|
|
// modification.
|
2014-08-05 10:39:49 +08:00
|
|
|
const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
|
|
|
|
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
|
2016-06-30 08:01:54 +08:00
|
|
|
return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
|
2012-11-28 10:35:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Provide a global flag for disabling the PreRA hazard recognizer that targets
|
|
|
|
// may choose to honor.
|
|
|
|
bool TargetInstrInfo::usePreRAHazardRecognizer() const {
|
|
|
|
return !DisableHazardRecognizer;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Default implementation of CreateTargetRAHazardRecognizer.
|
|
|
|
ScheduleHazardRecognizer *TargetInstrInfo::
|
2014-06-14 06:38:52 +08:00
|
|
|
CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
|
2012-11-28 10:35:13 +08:00
|
|
|
const ScheduleDAG *DAG) const {
|
|
|
|
// Dummy hazard recognizer allows all instructions to issue.
|
|
|
|
return new ScheduleHazardRecognizer();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Default implementation of CreateTargetMIHazardRecognizer.
|
2020-01-15 01:00:32 +08:00
|
|
|
ScheduleHazardRecognizer *TargetInstrInfo::CreateTargetMIHazardRecognizer(
|
|
|
|
const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
|
2020-01-09 21:57:53 +08:00
|
|
|
return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
|
2012-11-28 10:35:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Default implementation of CreateTargetPostRAHazardRecognizer.
|
|
|
|
ScheduleHazardRecognizer *TargetInstrInfo::
|
|
|
|
CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
|
|
|
|
const ScheduleDAG *DAG) const {
|
2020-01-09 21:57:53 +08:00
|
|
|
return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
|
2012-11-28 10:35:13 +08:00
|
|
|
}
|
|
|
|
|
2020-01-06 19:22:51 +08:00
|
|
|
// Default implementation of getMemOperandWithOffset.
|
|
|
|
bool TargetInstrInfo::getMemOperandWithOffset(
|
|
|
|
const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
|
Add OffsetIsScalable to getMemOperandWithOffset
Summary:
Making `Scale` a `TypeSize` in AArch64InstrInfo::getMemOpInfo,
has the effect that all places where this information is used
(notably, TargetInstrInfo::getMemOperandWithOffset) will need
to consider Scale - and derived, Offset - possibly being scalable.
This patch adds a new operand `bool &OffsetIsScalable` to
TargetInstrInfo::getMemOperandWithOffset and fixes up all
the places where this function is used, to consider the
offset possibly being scalable.
In most cases, this means bailing out because the algorithm does not
(or cannot) support scalable offsets in places where it does some
form of alias checking for example.
Reviewers: rovka, efriedma, kristof.beyls
Reviewed By: efriedma
Subscribers: wuzish, kerbowa, MatzeB, arsenm, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, javed.absar, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Jim, lenary, s.egerton, pzheng, sameer.abuasal, apazos, luismarques, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72758
2020-02-18 22:32:26 +08:00
|
|
|
bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const {
|
2020-01-06 19:22:51 +08:00
|
|
|
SmallVector<const MachineOperand *, 4> BaseOps;
|
[AMDGPU/MemOpsCluster] Let mem ops clustering logic also consider number of clustered bytes
Summary:
While clustering mem ops, AMDGPU target needs to consider number of clustered bytes
to decide on max number of mem ops that can be clustered. This patch adds support to pass
number of clustered bytes to target mem ops clustering logic.
Reviewers: foad, rampitec, arsenm, vpykhtin, javedabsar
Reviewed By: foad
Subscribers: MatzeB, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, javed.absar, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D80545
2020-06-02 01:20:29 +08:00
|
|
|
unsigned Width;
|
|
|
|
if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
|
|
|
|
Width, TRI) ||
|
2020-01-06 19:22:51 +08:00
|
|
|
BaseOps.size() != 1)
|
|
|
|
return false;
|
|
|
|
BaseOp = BaseOps.front();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-11-28 10:35:13 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SelectionDAG latency interface.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
int
|
|
|
|
TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
|
|
|
|
SDNode *DefNode, unsigned DefIdx,
|
|
|
|
SDNode *UseNode, unsigned UseIdx) const {
|
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!DefNode->isMachineOpcode())
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
|
|
|
|
if (!UseNode->isMachineOpcode())
|
|
|
|
return ItinData->getOperandCycle(DefClass, DefIdx);
|
|
|
|
unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
|
|
|
|
return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
|
|
|
|
}
|
|
|
|
|
|
|
|
int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
|
|
|
|
SDNode *N) const {
|
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!N->isMachineOpcode())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// MachineInstr latency interface.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr &MI) const {
|
2012-11-28 10:35:13 +08:00
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return 1;
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
unsigned Class = MI.getDesc().getSchedClass();
|
2012-11-28 10:35:13 +08:00
|
|
|
int UOps = ItinData->Itineraries[Class].NumMicroOps;
|
|
|
|
if (UOps >= 0)
|
|
|
|
return UOps;
|
|
|
|
|
|
|
|
// The # of u-ops is dynamically determined. The specific target should
|
|
|
|
// override this function to return the right number.
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the default expected latency for a def based on it's opcode.
|
2014-09-03 01:43:54 +08:00
|
|
|
unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
|
2016-06-30 08:01:54 +08:00
|
|
|
const MachineInstr &DefMI) const {
|
|
|
|
if (DefMI.isTransient())
|
2012-11-28 10:35:13 +08:00
|
|
|
return 0;
|
2016-06-30 08:01:54 +08:00
|
|
|
if (DefMI.mayLoad())
|
2014-09-03 01:43:54 +08:00
|
|
|
return SchedModel.LoadLatency;
|
2016-06-30 08:01:54 +08:00
|
|
|
if (isHighLatencyDef(DefMI.getOpcode()))
|
2014-09-03 01:43:54 +08:00
|
|
|
return SchedModel.HighLatency;
|
2012-11-28 10:35:13 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-02-23 10:46:52 +08:00
|
|
|
unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const {
|
2013-09-30 23:28:56 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr &MI,
|
|
|
|
unsigned *PredCost) const {
|
2012-11-28 10:35:13 +08:00
|
|
|
// Default to one cycle for no itinerary. However, an "empty" itinerary may
|
|
|
|
// still have a MinLatency property, which getStageLatency checks.
|
|
|
|
if (!ItinData)
|
2016-06-30 08:01:54 +08:00
|
|
|
return MI.mayLoad() ? 2 : 1;
|
2012-11-28 10:35:13 +08:00
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
return ItinData->getStageLatency(MI.getDesc().getSchedClass());
|
2012-11-28 10:35:13 +08:00
|
|
|
}
|
|
|
|
|
2015-06-13 11:42:11 +08:00
|
|
|
bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
|
2016-06-30 08:01:54 +08:00
|
|
|
const MachineInstr &DefMI,
|
2012-11-28 10:35:13 +08:00
|
|
|
unsigned DefIdx) const {
|
2015-06-13 11:42:11 +08:00
|
|
|
const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
|
2012-11-28 10:35:13 +08:00
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return false;
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
unsigned DefClass = DefMI.getDesc().getSchedClass();
|
2012-11-28 10:35:13 +08:00
|
|
|
int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
|
|
|
|
return (DefCycle != -1 && DefCycle <= 1);
|
|
|
|
}
|
|
|
|
|
2019-08-01 00:51:28 +08:00
|
|
|
Optional<ParamLoadedValue>
|
[DebugInfo] Make describeLoadedValue() reg aware
Summary:
Currently the describeLoadedValue() hook is assumed to describe the
value of the instruction's first explicit define. The hook will not be
called for instructions with more than one explicit define.
This commit adds a register parameter to the describeLoadedValue() hook,
and invokes the hook for all registers in the worklist.
This will allow us to for example describe instructions which produce
more than two parameters' values; e.g. Hexagon's various combine
instructions.
This also fixes situations in our downstream target where we may pass
smaller parameters in the high part of a register. If such a parameter's
value is produced by a larger copy instruction, we can't describe the
call site value using the super-register, and we instead need to know
which sub-register that should be used.
This also allows us to handle cases like this:
$ebx = [...]
$rdi = MOVSX64rr32 $ebx
$esi = MOV32rr $edi
CALL64pcrel32 @call
The hook will first be invoked for the MOV32rr instruction, which will
say that @call's second parameter (passed in $esi) is described by $edi.
As $edi is not preserved it will be added to the worklist. When we get
to the MOVSX64rr32 instruction, we need to describe two values; the
sign-extended value of $ebx -> $rdi for the first parameter, and $ebx ->
$edi for the second parameter, which is now possible.
This commit modifies the dbgcall-site-lea-interpretation.mir test case.
In the test case, the values of some 32-bit parameters were produced
with LEA64r. Perhaps we can in general cases handle such by emitting
expressions that AND out the lower 32-bits, but I have not been able to
land in a case where a LEA64r is used for a 32-bit parameter instead of
LEA64_32 from C code.
I have not found a case where it would be useful to describe parameters
using implicit defines, so in this patch the hook is still only invoked
for explicit defines of forwarding registers.
Reviewers: djtodoro, NikolaPrica, aprantl, vsk
Reviewed By: djtodoro, vsk
Subscribers: ormris, hiraditya, llvm-commits
Tags: #debug-info, #llvm
Differential Revision: https://reviews.llvm.org/D70431
2019-12-09 17:46:16 +08:00
|
|
|
TargetInstrInfo::describeLoadedValue(const MachineInstr &MI,
|
|
|
|
Register Reg) const {
|
2019-08-01 00:51:28 +08:00
|
|
|
const MachineFunction *MF = MI.getMF();
|
[DebugInfo] Make describeLoadedValue() reg aware
Summary:
Currently the describeLoadedValue() hook is assumed to describe the
value of the instruction's first explicit define. The hook will not be
called for instructions with more than one explicit define.
This commit adds a register parameter to the describeLoadedValue() hook,
and invokes the hook for all registers in the worklist.
This will allow us to for example describe instructions which produce
more than two parameters' values; e.g. Hexagon's various combine
instructions.
This also fixes situations in our downstream target where we may pass
smaller parameters in the high part of a register. If such a parameter's
value is produced by a larger copy instruction, we can't describe the
call site value using the super-register, and we instead need to know
which sub-register that should be used.
This also allows us to handle cases like this:
$ebx = [...]
$rdi = MOVSX64rr32 $ebx
$esi = MOV32rr $edi
CALL64pcrel32 @call
The hook will first be invoked for the MOV32rr instruction, which will
say that @call's second parameter (passed in $esi) is described by $edi.
As $edi is not preserved it will be added to the worklist. When we get
to the MOVSX64rr32 instruction, we need to describe two values; the
sign-extended value of $ebx -> $rdi for the first parameter, and $ebx ->
$edi for the second parameter, which is now possible.
This commit modifies the dbgcall-site-lea-interpretation.mir test case.
In the test case, the values of some 32-bit parameters were produced
with LEA64r. Perhaps we can in general cases handle such by emitting
expressions that AND out the lower 32-bits, but I have not been able to
land in a case where a LEA64r is used for a 32-bit parameter instead of
LEA64_32 from C code.
I have not found a case where it would be useful to describe parameters
using implicit defines, so in this patch the hook is still only invoked
for explicit defines of forwarding registers.
Reviewers: djtodoro, NikolaPrica, aprantl, vsk
Reviewed By: djtodoro, vsk
Subscribers: ormris, hiraditya, llvm-commits
Tags: #debug-info, #llvm
Differential Revision: https://reviews.llvm.org/D70431
2019-12-09 17:46:16 +08:00
|
|
|
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
|
2019-10-30 18:04:53 +08:00
|
|
|
DIExpression *Expr = DIExpression::get(MF->getFunction().getContext(), {});
|
|
|
|
int64_t Offset;
|
Add OffsetIsScalable to getMemOperandWithOffset
Summary:
Making `Scale` a `TypeSize` in AArch64InstrInfo::getMemOpInfo,
has the effect that all places where this information is used
(notably, TargetInstrInfo::getMemOperandWithOffset) will need
to consider Scale - and derived, Offset - possibly being scalable.
This patch adds a new operand `bool &OffsetIsScalable` to
TargetInstrInfo::getMemOperandWithOffset and fixes up all
the places where this function is used, to consider the
offset possibly being scalable.
In most cases, this means bailing out because the algorithm does not
(or cannot) support scalable offsets in places where it does some
form of alias checking for example.
Reviewers: rovka, efriedma, kristof.beyls
Reviewed By: efriedma
Subscribers: wuzish, kerbowa, MatzeB, arsenm, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, javed.absar, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Jim, lenary, s.egerton, pzheng, sameer.abuasal, apazos, luismarques, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72758
2020-02-18 22:32:26 +08:00
|
|
|
bool OffsetIsScalable;
|
2019-08-01 00:51:28 +08:00
|
|
|
|
[DebugInfo] Make describeLoadedValue() reg aware
Summary:
Currently the describeLoadedValue() hook is assumed to describe the
value of the instruction's first explicit define. The hook will not be
called for instructions with more than one explicit define.
This commit adds a register parameter to the describeLoadedValue() hook,
and invokes the hook for all registers in the worklist.
This will allow us to for example describe instructions which produce
more than two parameters' values; e.g. Hexagon's various combine
instructions.
This also fixes situations in our downstream target where we may pass
smaller parameters in the high part of a register. If such a parameter's
value is produced by a larger copy instruction, we can't describe the
call site value using the super-register, and we instead need to know
which sub-register that should be used.
This also allows us to handle cases like this:
$ebx = [...]
$rdi = MOVSX64rr32 $ebx
$esi = MOV32rr $edi
CALL64pcrel32 @call
The hook will first be invoked for the MOV32rr instruction, which will
say that @call's second parameter (passed in $esi) is described by $edi.
As $edi is not preserved it will be added to the worklist. When we get
to the MOVSX64rr32 instruction, we need to describe two values; the
sign-extended value of $ebx -> $rdi for the first parameter, and $ebx ->
$edi for the second parameter, which is now possible.
This commit modifies the dbgcall-site-lea-interpretation.mir test case.
In the test case, the values of some 32-bit parameters were produced
with LEA64r. Perhaps we can in general cases handle such by emitting
expressions that AND out the lower 32-bits, but I have not been able to
land in a case where a LEA64r is used for a 32-bit parameter instead of
LEA64_32 from C code.
I have not found a case where it would be useful to describe parameters
using implicit defines, so in this patch the hook is still only invoked
for explicit defines of forwarding registers.
Reviewers: djtodoro, NikolaPrica, aprantl, vsk
Reviewed By: djtodoro, vsk
Subscribers: ormris, hiraditya, llvm-commits
Tags: #debug-info, #llvm
Differential Revision: https://reviews.llvm.org/D70431
2019-12-09 17:46:16 +08:00
|
|
|
// To simplify the sub-register handling, verify that we only need to
|
|
|
|
// consider physical registers.
|
|
|
|
assert(MF->getProperties().hasProperty(
|
|
|
|
MachineFunctionProperties::Property::NoVRegs));
|
|
|
|
|
2019-11-08 18:19:58 +08:00
|
|
|
if (auto DestSrc = isCopyInstr(MI)) {
|
[DebugInfo] Make describeLoadedValue() reg aware
Summary:
Currently the describeLoadedValue() hook is assumed to describe the
value of the instruction's first explicit define. The hook will not be
called for instructions with more than one explicit define.
This commit adds a register parameter to the describeLoadedValue() hook,
and invokes the hook for all registers in the worklist.
This will allow us to for example describe instructions which produce
more than two parameters' values; e.g. Hexagon's various combine
instructions.
This also fixes situations in our downstream target where we may pass
smaller parameters in the high part of a register. If such a parameter's
value is produced by a larger copy instruction, we can't describe the
call site value using the super-register, and we instead need to know
which sub-register that should be used.
This also allows us to handle cases like this:
$ebx = [...]
$rdi = MOVSX64rr32 $ebx
$esi = MOV32rr $edi
CALL64pcrel32 @call
The hook will first be invoked for the MOV32rr instruction, which will
say that @call's second parameter (passed in $esi) is described by $edi.
As $edi is not preserved it will be added to the worklist. When we get
to the MOVSX64rr32 instruction, we need to describe two values; the
sign-extended value of $ebx -> $rdi for the first parameter, and $ebx ->
$edi for the second parameter, which is now possible.
This commit modifies the dbgcall-site-lea-interpretation.mir test case.
In the test case, the values of some 32-bit parameters were produced
with LEA64r. Perhaps we can in general cases handle such by emitting
expressions that AND out the lower 32-bits, but I have not been able to
land in a case where a LEA64r is used for a 32-bit parameter instead of
LEA64_32 from C code.
I have not found a case where it would be useful to describe parameters
using implicit defines, so in this patch the hook is still only invoked
for explicit defines of forwarding registers.
Reviewers: djtodoro, NikolaPrica, aprantl, vsk
Reviewed By: djtodoro, vsk
Subscribers: ormris, hiraditya, llvm-commits
Tags: #debug-info, #llvm
Differential Revision: https://reviews.llvm.org/D70431
2019-12-09 17:46:16 +08:00
|
|
|
Register DestReg = DestSrc->Destination->getReg();
|
|
|
|
|
2020-02-28 01:58:24 +08:00
|
|
|
// If the copy destination is the forwarding reg, describe the forwarding
|
|
|
|
// reg using the copy source as the backup location. Example:
|
|
|
|
//
|
|
|
|
// x0 = MOV x7
|
|
|
|
// call callee(x0) ; x0 described as x7
|
[DebugInfo] Make describeLoadedValue() reg aware
Summary:
Currently the describeLoadedValue() hook is assumed to describe the
value of the instruction's first explicit define. The hook will not be
called for instructions with more than one explicit define.
This commit adds a register parameter to the describeLoadedValue() hook,
and invokes the hook for all registers in the worklist.
This will allow us to for example describe instructions which produce
more than two parameters' values; e.g. Hexagon's various combine
instructions.
This also fixes situations in our downstream target where we may pass
smaller parameters in the high part of a register. If such a parameter's
value is produced by a larger copy instruction, we can't describe the
call site value using the super-register, and we instead need to know
which sub-register that should be used.
This also allows us to handle cases like this:
$ebx = [...]
$rdi = MOVSX64rr32 $ebx
$esi = MOV32rr $edi
CALL64pcrel32 @call
The hook will first be invoked for the MOV32rr instruction, which will
say that @call's second parameter (passed in $esi) is described by $edi.
As $edi is not preserved it will be added to the worklist. When we get
to the MOVSX64rr32 instruction, we need to describe two values; the
sign-extended value of $ebx -> $rdi for the first parameter, and $ebx ->
$edi for the second parameter, which is now possible.
This commit modifies the dbgcall-site-lea-interpretation.mir test case.
In the test case, the values of some 32-bit parameters were produced
with LEA64r. Perhaps we can in general cases handle such by emitting
expressions that AND out the lower 32-bits, but I have not been able to
land in a case where a LEA64r is used for a 32-bit parameter instead of
LEA64_32 from C code.
I have not found a case where it would be useful to describe parameters
using implicit defines, so in this patch the hook is still only invoked
for explicit defines of forwarding registers.
Reviewers: djtodoro, NikolaPrica, aprantl, vsk
Reviewed By: djtodoro, vsk
Subscribers: ormris, hiraditya, llvm-commits
Tags: #debug-info, #llvm
Differential Revision: https://reviews.llvm.org/D70431
2019-12-09 17:46:16 +08:00
|
|
|
if (Reg == DestReg)
|
|
|
|
return ParamLoadedValue(*DestSrc->Source, Expr);
|
|
|
|
|
|
|
|
// Cases where super- or sub-registers needs to be described should
|
|
|
|
// be handled by the target's hook implementation.
|
|
|
|
assert(!TRI->isSuperOrSubRegisterEq(Reg, DestReg) &&
|
|
|
|
"TargetInstrInfo::describeLoadedValue can't describe super- or "
|
|
|
|
"sub-regs for copy instructions");
|
|
|
|
return None;
|
|
|
|
} else if (auto RegImm = isAddImmediate(MI, Reg)) {
|
|
|
|
Register SrcReg = RegImm->Reg;
|
|
|
|
Offset = RegImm->Imm;
|
2019-10-30 18:04:53 +08:00
|
|
|
Expr = DIExpression::prepend(Expr, DIExpression::ApplyOffset, Offset);
|
[DebugInfo] Make describeLoadedValue() reg aware
Summary:
Currently the describeLoadedValue() hook is assumed to describe the
value of the instruction's first explicit define. The hook will not be
called for instructions with more than one explicit define.
This commit adds a register parameter to the describeLoadedValue() hook,
and invokes the hook for all registers in the worklist.
This will allow us to for example describe instructions which produce
more than two parameters' values; e.g. Hexagon's various combine
instructions.
This also fixes situations in our downstream target where we may pass
smaller parameters in the high part of a register. If such a parameter's
value is produced by a larger copy instruction, we can't describe the
call site value using the super-register, and we instead need to know
which sub-register that should be used.
This also allows us to handle cases like this:
$ebx = [...]
$rdi = MOVSX64rr32 $ebx
$esi = MOV32rr $edi
CALL64pcrel32 @call
The hook will first be invoked for the MOV32rr instruction, which will
say that @call's second parameter (passed in $esi) is described by $edi.
As $edi is not preserved it will be added to the worklist. When we get
to the MOVSX64rr32 instruction, we need to describe two values; the
sign-extended value of $ebx -> $rdi for the first parameter, and $ebx ->
$edi for the second parameter, which is now possible.
This commit modifies the dbgcall-site-lea-interpretation.mir test case.
In the test case, the values of some 32-bit parameters were produced
with LEA64r. Perhaps we can in general cases handle such by emitting
expressions that AND out the lower 32-bits, but I have not been able to
land in a case where a LEA64r is used for a 32-bit parameter instead of
LEA64_32 from C code.
I have not found a case where it would be useful to describe parameters
using implicit defines, so in this patch the hook is still only invoked
for explicit defines of forwarding registers.
Reviewers: djtodoro, NikolaPrica, aprantl, vsk
Reviewed By: djtodoro, vsk
Subscribers: ormris, hiraditya, llvm-commits
Tags: #debug-info, #llvm
Differential Revision: https://reviews.llvm.org/D70431
2019-12-09 17:46:16 +08:00
|
|
|
return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
|
2019-11-15 01:20:58 +08:00
|
|
|
} else if (MI.hasOneMemOperand()) {
|
|
|
|
// Only describe memory which provably does not escape the function. As
|
|
|
|
// described in llvm.org/PR43343, escaped memory may be clobbered by the
|
|
|
|
// callee (or by another thread).
|
|
|
|
const auto &TII = MF->getSubtarget().getInstrInfo();
|
|
|
|
const MachineFrameInfo &MFI = MF->getFrameInfo();
|
|
|
|
const MachineMemOperand *MMO = MI.memoperands()[0];
|
|
|
|
const PseudoSourceValue *PSV = MMO->getPseudoValue();
|
|
|
|
|
|
|
|
// If the address points to "special" memory (e.g. a spill slot), it's
|
|
|
|
// sufficient to check that it isn't aliased by any high-level IR value.
|
|
|
|
if (!PSV || PSV->mayAlias(&MFI))
|
|
|
|
return None;
|
|
|
|
|
|
|
|
const MachineOperand *BaseOp;
|
Add OffsetIsScalable to getMemOperandWithOffset
Summary:
Making `Scale` a `TypeSize` in AArch64InstrInfo::getMemOpInfo,
has the effect that all places where this information is used
(notably, TargetInstrInfo::getMemOperandWithOffset) will need
to consider Scale - and derived, Offset - possibly being scalable.
This patch adds a new operand `bool &OffsetIsScalable` to
TargetInstrInfo::getMemOperandWithOffset and fixes up all
the places where this function is used, to consider the
offset possibly being scalable.
In most cases, this means bailing out because the algorithm does not
(or cannot) support scalable offsets in places where it does some
form of alias checking for example.
Reviewers: rovka, efriedma, kristof.beyls
Reviewed By: efriedma
Subscribers: wuzish, kerbowa, MatzeB, arsenm, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, javed.absar, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Jim, lenary, s.egerton, pzheng, sameer.abuasal, apazos, luismarques, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72758
2020-02-18 22:32:26 +08:00
|
|
|
if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable,
|
|
|
|
TRI))
|
|
|
|
return None;
|
|
|
|
|
|
|
|
// FIXME: Scalable offsets are not yet handled in the offset code below.
|
|
|
|
if (OffsetIsScalable)
|
2019-11-15 01:20:58 +08:00
|
|
|
return None;
|
|
|
|
|
2020-02-05 16:50:05 +08:00
|
|
|
// TODO: Can currently only handle mem instructions with a single define.
|
|
|
|
// An example from the x86 target:
|
|
|
|
// ...
|
|
|
|
// DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
|
|
|
|
// ...
|
|
|
|
//
|
|
|
|
if (MI.getNumExplicitDefs() != 1)
|
|
|
|
return None;
|
[DebugInfo] Make describeLoadedValue() reg aware
Summary:
Currently the describeLoadedValue() hook is assumed to describe the
value of the instruction's first explicit define. The hook will not be
called for instructions with more than one explicit define.
This commit adds a register parameter to the describeLoadedValue() hook,
and invokes the hook for all registers in the worklist.
This will allow us to for example describe instructions which produce
more than two parameters' values; e.g. Hexagon's various combine
instructions.
This also fixes situations in our downstream target where we may pass
smaller parameters in the high part of a register. If such a parameter's
value is produced by a larger copy instruction, we can't describe the
call site value using the super-register, and we instead need to know
which sub-register that should be used.
This also allows us to handle cases like this:
$ebx = [...]
$rdi = MOVSX64rr32 $ebx
$esi = MOV32rr $edi
CALL64pcrel32 @call
The hook will first be invoked for the MOV32rr instruction, which will
say that @call's second parameter (passed in $esi) is described by $edi.
As $edi is not preserved it will be added to the worklist. When we get
to the MOVSX64rr32 instruction, we need to describe two values; the
sign-extended value of $ebx -> $rdi for the first parameter, and $ebx ->
$edi for the second parameter, which is now possible.
This commit modifies the dbgcall-site-lea-interpretation.mir test case.
In the test case, the values of some 32-bit parameters were produced
with LEA64r. Perhaps we can in general cases handle such by emitting
expressions that AND out the lower 32-bits, but I have not been able to
land in a case where a LEA64r is used for a 32-bit parameter instead of
LEA64_32 from C code.
I have not found a case where it would be useful to describe parameters
using implicit defines, so in this patch the hook is still only invoked
for explicit defines of forwarding registers.
Reviewers: djtodoro, NikolaPrica, aprantl, vsk
Reviewed By: djtodoro, vsk
Subscribers: ormris, hiraditya, llvm-commits
Tags: #debug-info, #llvm
Differential Revision: https://reviews.llvm.org/D70431
2019-12-09 17:46:16 +08:00
|
|
|
|
|
|
|
// TODO: In what way do we need to take Reg into consideration here?
|
|
|
|
|
2019-11-20 03:58:14 +08:00
|
|
|
SmallVector<uint64_t, 8> Ops;
|
|
|
|
DIExpression::appendOffset(Ops, Offset);
|
|
|
|
Ops.push_back(dwarf::DW_OP_deref_size);
|
|
|
|
Ops.push_back(MMO->getSize());
|
|
|
|
Expr = DIExpression::prependOpcodes(Expr, Ops);
|
2019-11-15 01:20:58 +08:00
|
|
|
return ParamLoadedValue(*BaseOp, Expr);
|
2019-08-01 00:51:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2012-11-28 10:35:13 +08:00
|
|
|
/// Both DefMI and UseMI must be valid. By default, call directly to the
|
|
|
|
/// itinerary. This may be overriden by the target.
|
2016-06-30 08:01:54 +08:00
|
|
|
int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr &DefMI,
|
|
|
|
unsigned DefIdx,
|
|
|
|
const MachineInstr &UseMI,
|
|
|
|
unsigned UseIdx) const {
|
|
|
|
unsigned DefClass = DefMI.getDesc().getSchedClass();
|
|
|
|
unsigned UseClass = UseMI.getDesc().getSchedClass();
|
2012-11-28 10:35:13 +08:00
|
|
|
return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// If we can determine the operand latency from the def only, without itinerary
|
|
|
|
/// lookup, do so. Otherwise return -1.
|
|
|
|
int TargetInstrInfo::computeDefOperandLatency(
|
2016-06-30 08:01:54 +08:00
|
|
|
const InstrItineraryData *ItinData, const MachineInstr &DefMI) const {
|
2012-11-28 10:35:13 +08:00
|
|
|
|
|
|
|
// Let the target hook getInstrLatency handle missing itineraries.
|
|
|
|
if (!ItinData)
|
|
|
|
return getInstrLatency(ItinData, DefMI);
|
|
|
|
|
2013-06-15 12:49:57 +08:00
|
|
|
if(ItinData->isEmpty())
|
2012-11-28 10:35:13 +08:00
|
|
|
return defaultDefLatency(ItinData->SchedModel, DefMI);
|
|
|
|
|
|
|
|
// ...operand lookup required
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2014-08-12 06:17:14 +08:00
|
|
|
bool TargetInstrInfo::getRegSequenceInputs(
|
|
|
|
const MachineInstr &MI, unsigned DefIdx,
|
|
|
|
SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
|
2014-08-13 01:11:26 +08:00
|
|
|
assert((MI.isRegSequence() ||
|
|
|
|
MI.isRegSequenceLike()) && "Instruction do not have the proper type");
|
2014-08-12 06:17:14 +08:00
|
|
|
|
|
|
|
if (!MI.isRegSequence())
|
|
|
|
return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
|
|
|
|
|
|
|
|
// We are looking at:
|
|
|
|
// Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
|
|
|
|
assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
|
|
|
|
for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
|
|
|
|
OpIdx += 2) {
|
|
|
|
const MachineOperand &MOReg = MI.getOperand(OpIdx);
|
2018-01-12 06:30:43 +08:00
|
|
|
if (MOReg.isUndef())
|
|
|
|
continue;
|
2014-08-12 06:17:14 +08:00
|
|
|
const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
|
|
|
|
assert(MOSubIdx.isImm() &&
|
|
|
|
"One of the subindex of the reg_sequence is not an immediate");
|
|
|
|
// Record Reg:SubReg, SubIdx.
|
|
|
|
InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
|
|
|
|
(unsigned)MOSubIdx.getImm()));
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2014-08-21 05:51:26 +08:00
|
|
|
|
|
|
|
bool TargetInstrInfo::getExtractSubregInputs(
|
|
|
|
const MachineInstr &MI, unsigned DefIdx,
|
|
|
|
RegSubRegPairAndIdx &InputReg) const {
|
|
|
|
assert((MI.isExtractSubreg() ||
|
|
|
|
MI.isExtractSubregLike()) && "Instruction do not have the proper type");
|
|
|
|
|
|
|
|
if (!MI.isExtractSubreg())
|
|
|
|
return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
|
|
|
|
|
|
|
|
// We are looking at:
|
|
|
|
// Def = EXTRACT_SUBREG v0.sub1, sub0.
|
|
|
|
assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
|
|
|
|
const MachineOperand &MOReg = MI.getOperand(1);
|
2018-01-12 06:30:43 +08:00
|
|
|
if (MOReg.isUndef())
|
|
|
|
return false;
|
2014-08-21 05:51:26 +08:00
|
|
|
const MachineOperand &MOSubIdx = MI.getOperand(2);
|
|
|
|
assert(MOSubIdx.isImm() &&
|
|
|
|
"The subindex of the extract_subreg is not an immediate");
|
|
|
|
|
|
|
|
InputReg.Reg = MOReg.getReg();
|
|
|
|
InputReg.SubReg = MOReg.getSubReg();
|
|
|
|
InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
|
|
|
|
return true;
|
|
|
|
}
|
2014-08-21 07:49:36 +08:00
|
|
|
|
|
|
|
bool TargetInstrInfo::getInsertSubregInputs(
|
|
|
|
const MachineInstr &MI, unsigned DefIdx,
|
|
|
|
RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
|
|
|
|
assert((MI.isInsertSubreg() ||
|
|
|
|
MI.isInsertSubregLike()) && "Instruction do not have the proper type");
|
|
|
|
|
|
|
|
if (!MI.isInsertSubreg())
|
|
|
|
return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
|
|
|
|
|
|
|
|
// We are looking at:
|
|
|
|
// Def = INSERT_SEQUENCE v0, v1, sub0.
|
|
|
|
assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
|
|
|
|
const MachineOperand &MOBaseReg = MI.getOperand(1);
|
|
|
|
const MachineOperand &MOInsertedReg = MI.getOperand(2);
|
2018-01-12 06:30:43 +08:00
|
|
|
if (MOInsertedReg.isUndef())
|
|
|
|
return false;
|
2014-08-21 07:49:36 +08:00
|
|
|
const MachineOperand &MOSubIdx = MI.getOperand(3);
|
|
|
|
assert(MOSubIdx.isImm() &&
|
|
|
|
"One of the subindex of the reg_sequence is not an immediate");
|
|
|
|
BaseReg.Reg = MOBaseReg.getReg();
|
|
|
|
BaseReg.SubReg = MOBaseReg.getSubReg();
|
|
|
|
|
|
|
|
InsertedReg.Reg = MOInsertedReg.getReg();
|
|
|
|
InsertedReg.SubReg = MOInsertedReg.getSubReg();
|
|
|
|
InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
|
|
|
|
return true;
|
|
|
|
}
|
[MachinePipeliner] Improve the TargetInstrInfo API analyzeLoop/reduceLoopCount
Recommit: fix asan errors.
The way MachinePipeliner uses these target hooks is stateful - we reduce trip
count by one per call to reduceLoopCount. It's a little overfit for hardware
loops, where we don't have to worry about stitching a loop induction variable
across prologs and epilogs (the induction variable is implicit).
This patch introduces a new API:
/// Analyze loop L, which must be a single-basic-block loop, and if the
/// conditions can be understood enough produce a PipelinerLoopInfo object.
virtual std::unique_ptr<PipelinerLoopInfo>
analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const;
The return value is expected to be an implementation of the abstract class:
/// Object returned by analyzeLoopForPipelining. Allows software pipelining
/// implementations to query attributes of the loop being pipelined.
class PipelinerLoopInfo {
public:
virtual ~PipelinerLoopInfo();
/// Return true if the given instruction should not be pipelined and should
/// be ignored. An example could be a loop comparison, or induction variable
/// update with no users being pipelined.
virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const = 0;
/// Create a condition to determine if the trip count of the loop is greater
/// than TC.
///
/// If the trip count is statically known to be greater than TC, return
/// true. If the trip count is statically known to be not greater than TC,
/// return false. Otherwise return nullopt and fill out Cond with the test
/// condition.
virtual Optional<bool>
createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB,
SmallVectorImpl<MachineOperand> &Cond) = 0;
/// Modify the loop such that the trip count is
/// OriginalTC + TripCountAdjust.
virtual void adjustTripCount(int TripCountAdjust) = 0;
/// Called when the loop's preheader has been modified to NewPreheader.
virtual void setPreheader(MachineBasicBlock *NewPreheader) = 0;
/// Called when the loop is being removed.
virtual void disposed() = 0;
};
The Pipeliner (ModuloSchedule.cpp) can use this object to modify the loop while
allowing the target to hold its own state across all calls. This API, in
particular the disjunction of creating a trip count check condition and
adjusting the loop, improves the code quality in ModuloSchedule.cpp.
llvm-svn: 372463
2019-09-21 16:19:41 +08:00
|
|
|
|
[MIR] Add comments to INLINEASM immediate flag MachineOperands
Summary:
The INLINEASM MIR instructions use immediate operands to encode the values of some operands.
The MachineInstr pretty printer function already handles those operands and prints human readable annotations instead of the immediates. This patch adds similar annotations to the output of the MIRPrinter, however uses the new MIROperandComment feature.
Reviewers: SjoerdMeijer, arsenm, efriedma
Reviewed By: arsenm
Subscribers: qcolombet, sdardis, jvesely, wdng, nhaehnle, hiraditya, jrtc27, atanasyan, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78088
2020-04-14 15:24:40 +08:00
|
|
|
// Returns a MIRPrinter comment for this machine operand.
|
|
|
|
std::string TargetInstrInfo::createMIROperandComment(
|
|
|
|
const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
|
|
|
|
|
|
|
if (!MI.isInlineAsm())
|
|
|
|
return "";
|
|
|
|
|
|
|
|
std::string Flags;
|
|
|
|
raw_string_ostream OS(Flags);
|
|
|
|
|
|
|
|
if (OpIdx == InlineAsm::MIOp_ExtraInfo) {
|
|
|
|
// Print HasSideEffects, MayLoad, MayStore, IsAlignStack
|
|
|
|
unsigned ExtraInfo = Op.getImm();
|
|
|
|
bool First = true;
|
|
|
|
for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) {
|
|
|
|
if (!First)
|
|
|
|
OS << " ";
|
|
|
|
First = false;
|
|
|
|
OS << Info;
|
|
|
|
}
|
|
|
|
|
|
|
|
return OS.str();
|
|
|
|
}
|
|
|
|
|
|
|
|
int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
|
|
|
|
if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
|
|
|
|
return "";
|
|
|
|
|
|
|
|
assert(Op.isImm() && "Expected flag operand to be an immediate");
|
|
|
|
// Pretty print the inline asm operand descriptor.
|
|
|
|
unsigned Flag = Op.getImm();
|
|
|
|
unsigned Kind = InlineAsm::getKind(Flag);
|
|
|
|
OS << InlineAsm::getKindName(Kind);
|
|
|
|
|
|
|
|
unsigned RCID = 0;
|
|
|
|
if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
|
|
|
|
InlineAsm::hasRegClassConstraint(Flag, RCID)) {
|
|
|
|
if (TRI) {
|
|
|
|
OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
|
|
|
|
} else
|
|
|
|
OS << ":RC" << RCID;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (InlineAsm::isMemKind(Flag)) {
|
|
|
|
unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
|
|
|
|
OS << ":" << InlineAsm::getMemConstraintName(MCID);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned TiedTo = 0;
|
|
|
|
if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
|
|
|
|
OS << " tiedto:$" << TiedTo;
|
|
|
|
|
|
|
|
return OS.str();
|
|
|
|
}
|
|
|
|
|
[MachinePipeliner] Improve the TargetInstrInfo API analyzeLoop/reduceLoopCount
Recommit: fix asan errors.
The way MachinePipeliner uses these target hooks is stateful - we reduce trip
count by one per call to reduceLoopCount. It's a little overfit for hardware
loops, where we don't have to worry about stitching a loop induction variable
across prologs and epilogs (the induction variable is implicit).
This patch introduces a new API:
/// Analyze loop L, which must be a single-basic-block loop, and if the
/// conditions can be understood enough produce a PipelinerLoopInfo object.
virtual std::unique_ptr<PipelinerLoopInfo>
analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const;
The return value is expected to be an implementation of the abstract class:
/// Object returned by analyzeLoopForPipelining. Allows software pipelining
/// implementations to query attributes of the loop being pipelined.
class PipelinerLoopInfo {
public:
virtual ~PipelinerLoopInfo();
/// Return true if the given instruction should not be pipelined and should
/// be ignored. An example could be a loop comparison, or induction variable
/// update with no users being pipelined.
virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const = 0;
/// Create a condition to determine if the trip count of the loop is greater
/// than TC.
///
/// If the trip count is statically known to be greater than TC, return
/// true. If the trip count is statically known to be not greater than TC,
/// return false. Otherwise return nullopt and fill out Cond with the test
/// condition.
virtual Optional<bool>
createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB,
SmallVectorImpl<MachineOperand> &Cond) = 0;
/// Modify the loop such that the trip count is
/// OriginalTC + TripCountAdjust.
virtual void adjustTripCount(int TripCountAdjust) = 0;
/// Called when the loop's preheader has been modified to NewPreheader.
virtual void setPreheader(MachineBasicBlock *NewPreheader) = 0;
/// Called when the loop is being removed.
virtual void disposed() = 0;
};
The Pipeliner (ModuloSchedule.cpp) can use this object to modify the loop while
allowing the target to hold its own state across all calls. This API, in
particular the disjunction of creating a trip count check condition and
adjusting the loop, improves the code quality in ModuloSchedule.cpp.
llvm-svn: 372463
2019-09-21 16:19:41 +08:00
|
|
|
TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() {}
|