2017-10-20 05:37:38 +08:00
|
|
|
//===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2017-10-20 05:37:38 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains the RISCV implementation of the TargetInstrInfo class.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "RISCVInstrInfo.h"
|
2021-01-15 03:44:02 +08:00
|
|
|
#include "MCTargetDesc/RISCVMatInt.h"
|
2017-10-20 05:37:38 +08:00
|
|
|
#include "RISCV.h"
|
|
|
|
#include "RISCVSubtarget.h"
|
|
|
|
#include "RISCVTargetMachine.h"
|
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2018-01-11 05:05:07 +08:00
|
|
|
#include "llvm/CodeGen/RegisterScavenging.h"
|
2017-10-20 05:37:38 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
|
|
#include "llvm/Support/TargetRegistry.h"
|
|
|
|
|
2020-01-17 12:04:42 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
[RISCV] Added isCompressibleInst() to estimate size in getInstSizeInBytes()
Summary:
Modified compression emitter tablegen backend to emit isCompressibleInst()
check which in turn is used by getInstSizeInBytes() to better estimate
instruction size. Note the generation of compressed instructions in RISC-V
happens late in the assembler therefore instruction size estimate might be off
if computed before.
Reviewers: lenary, asb, luismarques, lewis-revill
Reviewed By: asb
Subscribers: sameer.abuasal, lewis-revill, hiraditya, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, PkmX, jocewei, psnobl, benna, Jim, lenary, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68290
2019-12-17 07:09:48 +08:00
|
|
|
#define GEN_CHECK_COMPRESS_INSTR
|
|
|
|
#include "RISCVGenCompressInstEmitter.inc"
|
|
|
|
|
2017-10-20 05:37:38 +08:00
|
|
|
#define GET_INSTRINFO_CTOR_DTOR
|
|
|
|
#include "RISCVGenInstrInfo.inc"
|
|
|
|
|
2021-01-28 05:14:43 +08:00
|
|
|
namespace llvm {
|
|
|
|
namespace RISCVVPseudosTable {
|
|
|
|
|
|
|
|
using namespace RISCV;
|
|
|
|
|
|
|
|
#define GET_RISCVVPseudosTable_IMPL
|
|
|
|
#include "RISCVGenSearchableTables.inc"
|
|
|
|
|
|
|
|
} // namespace RISCVVPseudosTable
|
|
|
|
} // namespace llvm
|
|
|
|
|
[RISCV] Add MachineInstr immediate verification
Summary:
This patch implements the `TargetInstrInfo::verifyInstruction` hook for RISC-V. Currently the hook verifies the machine instruction's immediate operands, to check if the immediates are within the expected bounds. Without the hook invalid immediates are not detected except when doing assembly parsing, so they are silently emitted (including being truncated when emitting object code).
The bounds information is specified in tablegen by using the `OperandType` definition, which sets the `MCOperandInfo`'s `OperandType` field. Several RISC-V-specific immediate operand types were created, which extend the `MCInstrDesc`'s `OperandType` `enum`.
To have the hook called with `llc` pass it the `-verify-machineinstrs` option. For Clang add the cmake build config `-DLLVM_ENABLE_EXPENSIVE_CHECKS=True`, or temporarily patch `TargetPassConfig::addVerifyPass`.
Review concerns:
- The patch adds immediate operand type checks that cover at least the base ISA. There are several other operand types for the C extension and one type for the F/D extensions that were left out of this initial patch because they introduced further design concerns that I felt were best evaluated separately.
- Invalid register classes (e.g. passing a GPR register where a GPRC is expected) are already caught, so were not included.
- This design makes the more abstract `MachineInstr` verification depend on MC layer definitions, which arguably is not the cleanest design, but is in line with how things are done in other parts of the target and LLVM in general.
- There is some duplication of logic already present in the `MCOperandPredicate`s. Since the `MachineInstr` and `MCInstr` notions of immediates are fundamentally different, this is currently necessary.
Reviewers: asb, lenary
Reviewed By: lenary
Subscribers: hiraditya, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, kito-cheng, shiva0217, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, PkmX, jocewei, psnobl, benna, Jim, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67397
llvm-svn: 375006
2019-10-16 23:06:02 +08:00
|
|
|
RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
|
|
|
|
: RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
|
|
|
|
STI(STI) {}
|
2017-11-08 20:20:01 +08:00
|
|
|
|
2018-04-26 23:34:27 +08:00
|
|
|
unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
|
|
|
|
int &FrameIndex) const {
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
case RISCV::LB:
|
|
|
|
case RISCV::LBU:
|
|
|
|
case RISCV::LH:
|
|
|
|
case RISCV::LHU:
|
2020-07-03 22:57:59 +08:00
|
|
|
case RISCV::FLH:
|
2018-04-26 23:34:27 +08:00
|
|
|
case RISCV::LW:
|
|
|
|
case RISCV::FLW:
|
|
|
|
case RISCV::LWU:
|
|
|
|
case RISCV::LD:
|
|
|
|
case RISCV::FLD:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
|
|
|
|
MI.getOperand(2).getImm() == 0) {
|
|
|
|
FrameIndex = MI.getOperand(1).getIndex();
|
|
|
|
return MI.getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
|
|
|
|
int &FrameIndex) const {
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
case RISCV::SB:
|
|
|
|
case RISCV::SH:
|
|
|
|
case RISCV::SW:
|
2020-07-03 22:57:59 +08:00
|
|
|
case RISCV::FSH:
|
2018-04-26 23:34:27 +08:00
|
|
|
case RISCV::FSW:
|
|
|
|
case RISCV::SD:
|
|
|
|
case RISCV::FSD:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-07-14 15:26:01 +08:00
|
|
|
if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
|
|
|
|
MI.getOperand(2).getImm() == 0) {
|
|
|
|
FrameIndex = MI.getOperand(1).getIndex();
|
|
|
|
return MI.getOperand(0).getReg();
|
2018-04-26 23:34:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-08 20:20:01 +08:00
|
|
|
void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator MBBI,
|
2019-11-11 16:24:21 +08:00
|
|
|
const DebugLoc &DL, MCRegister DstReg,
|
|
|
|
MCRegister SrcReg, bool KillSrc) const {
|
2018-03-21 23:11:02 +08:00
|
|
|
if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
|
|
|
|
BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
|
|
|
|
.addReg(SrcReg, getKillRegState(KillSrc))
|
|
|
|
.addImm(0);
|
|
|
|
return;
|
|
|
|
}
|
2017-11-08 20:20:01 +08:00
|
|
|
|
2020-12-11 15:16:08 +08:00
|
|
|
// FPR->FPR copies and VR->VR copies.
|
2018-04-12 13:50:06 +08:00
|
|
|
unsigned Opc;
|
2020-12-11 15:16:08 +08:00
|
|
|
bool IsScalableVector = false;
|
2020-07-03 22:57:59 +08:00
|
|
|
if (RISCV::FPR16RegClass.contains(DstReg, SrcReg))
|
|
|
|
Opc = RISCV::FSGNJ_H;
|
|
|
|
else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg))
|
2018-04-12 13:50:06 +08:00
|
|
|
Opc = RISCV::FSGNJ_S;
|
|
|
|
else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg))
|
|
|
|
Opc = RISCV::FSGNJ_D;
|
2020-12-11 15:16:08 +08:00
|
|
|
else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
|
|
|
|
Opc = RISCV::PseudoVMV1R_V;
|
|
|
|
IsScalableVector = true;
|
|
|
|
} else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
|
|
|
|
Opc = RISCV::PseudoVMV2R_V;
|
|
|
|
IsScalableVector = true;
|
|
|
|
} else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
|
|
|
|
Opc = RISCV::PseudoVMV4R_V;
|
|
|
|
IsScalableVector = true;
|
|
|
|
} else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
|
|
|
|
Opc = RISCV::PseudoVMV8R_V;
|
|
|
|
IsScalableVector = true;
|
|
|
|
} else
|
2018-04-12 13:50:06 +08:00
|
|
|
llvm_unreachable("Impossible reg-to-reg copy");
|
|
|
|
|
2020-12-11 15:16:08 +08:00
|
|
|
if (IsScalableVector)
|
|
|
|
BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
|
|
|
|
.addReg(SrcReg, getKillRegState(KillSrc));
|
|
|
|
else
|
|
|
|
BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
|
|
|
|
.addReg(SrcReg, getKillRegState(KillSrc))
|
|
|
|
.addReg(SrcReg, getKillRegState(KillSrc));
|
2017-11-08 20:20:01 +08:00
|
|
|
}
|
2017-11-08 21:31:40 +08:00
|
|
|
|
|
|
|
void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I,
|
[NFC] unsigned->Register in storeRegTo/loadRegFromStack
Summary:
This patch makes progress on the 'unsigned -> Register' rewrite for
`TargetInstrInfo::loadRegFromStack` and `TII::storeRegToStack`.
Reviewers: arsenm, craig.topper, uweigand, jpienaar, atanasyan, venkatra, robertlytton, dylanmckay, t.p.northover, kparzysz, tstellar, k-ishizaka
Reviewed By: arsenm
Subscribers: wuzish, merge_guards_bot, jyknight, sdardis, nemanjai, jvesely, wdng, nhaehnle, hiraditya, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Jim, lenary, s.egerton, pzheng, sameer.abuasal, apazos, luismarques, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D73870
2020-02-03 21:22:06 +08:00
|
|
|
Register SrcReg, bool IsKill, int FI,
|
2017-11-08 21:31:40 +08:00
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
|
|
|
DebugLoc DL;
|
|
|
|
if (I != MBB.end())
|
|
|
|
DL = I->getDebugLoc();
|
|
|
|
|
2020-11-19 10:23:55 +08:00
|
|
|
MachineFunction *MF = MBB.getParent();
|
|
|
|
const MachineFrameInfo &MFI = MF->getFrameInfo();
|
|
|
|
MachineMemOperand *MMO = MF->getMachineMemOperand(
|
|
|
|
MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
|
|
|
|
MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
|
2018-03-21 23:11:02 +08:00
|
|
|
|
2020-11-19 10:23:55 +08:00
|
|
|
unsigned Opcode;
|
2017-12-07 20:45:05 +08:00
|
|
|
if (RISCV::GPRRegClass.hasSubClassEq(RC))
|
[RISCV] Add support for _interrupt attribute
- Save/restore only registers that are used.
This includes Callee saved registers and Caller saved registers
(arguments and temporaries) for integer and FP registers.
- If there is a call in the interrupt handler, save/restore all
Caller saved registers (arguments and temporaries) and all FP registers.
- Emit special return instructions depending on "interrupt"
attribute type.
Based on initial patch by Zhaoshi Zheng.
Reviewers: asb
Reviewed By: asb
Subscribers: rkruppe, the_o, MartinMosbeck, brucehoult, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, zzheng, edward-jones, mgrang, rogfer01, llvm-commits
Differential Revision: https://reviews.llvm.org/D48411
llvm-svn: 338047
2018-07-27 01:49:43 +08:00
|
|
|
Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
|
|
|
|
RISCV::SW : RISCV::SD;
|
2020-07-03 22:57:59 +08:00
|
|
|
else if (RISCV::FPR16RegClass.hasSubClassEq(RC))
|
|
|
|
Opcode = RISCV::FSH;
|
2018-03-21 23:11:02 +08:00
|
|
|
else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
|
|
|
|
Opcode = RISCV::FSW;
|
2018-04-12 13:34:25 +08:00
|
|
|
else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
|
|
|
|
Opcode = RISCV::FSD;
|
2017-11-08 21:31:40 +08:00
|
|
|
else
|
|
|
|
llvm_unreachable("Can't store this register to stack slot");
|
2018-03-21 23:11:02 +08:00
|
|
|
|
|
|
|
BuildMI(MBB, I, DL, get(Opcode))
|
|
|
|
.addReg(SrcReg, getKillRegState(IsKill))
|
|
|
|
.addFrameIndex(FI)
|
2020-11-19 10:23:55 +08:00
|
|
|
.addImm(0)
|
|
|
|
.addMemOperand(MMO);
|
2017-11-08 21:31:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I,
|
[NFC] unsigned->Register in storeRegTo/loadRegFromStack
Summary:
This patch makes progress on the 'unsigned -> Register' rewrite for
`TargetInstrInfo::loadRegFromStack` and `TII::storeRegToStack`.
Reviewers: arsenm, craig.topper, uweigand, jpienaar, atanasyan, venkatra, robertlytton, dylanmckay, t.p.northover, kparzysz, tstellar, k-ishizaka
Reviewed By: arsenm
Subscribers: wuzish, merge_guards_bot, jyknight, sdardis, nemanjai, jvesely, wdng, nhaehnle, hiraditya, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Jim, lenary, s.egerton, pzheng, sameer.abuasal, apazos, luismarques, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D73870
2020-02-03 21:22:06 +08:00
|
|
|
Register DstReg, int FI,
|
2017-11-08 21:31:40 +08:00
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
|
|
|
DebugLoc DL;
|
|
|
|
if (I != MBB.end())
|
|
|
|
DL = I->getDebugLoc();
|
|
|
|
|
2020-11-19 10:23:55 +08:00
|
|
|
MachineFunction *MF = MBB.getParent();
|
|
|
|
const MachineFrameInfo &MFI = MF->getFrameInfo();
|
|
|
|
MachineMemOperand *MMO = MF->getMachineMemOperand(
|
|
|
|
MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
|
|
|
|
MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
|
2018-03-21 23:11:02 +08:00
|
|
|
|
2020-11-19 10:23:55 +08:00
|
|
|
unsigned Opcode;
|
2017-12-07 20:45:05 +08:00
|
|
|
if (RISCV::GPRRegClass.hasSubClassEq(RC))
|
[RISCV] Add support for _interrupt attribute
- Save/restore only registers that are used.
This includes Callee saved registers and Caller saved registers
(arguments and temporaries) for integer and FP registers.
- If there is a call in the interrupt handler, save/restore all
Caller saved registers (arguments and temporaries) and all FP registers.
- Emit special return instructions depending on "interrupt"
attribute type.
Based on initial patch by Zhaoshi Zheng.
Reviewers: asb
Reviewed By: asb
Subscribers: rkruppe, the_o, MartinMosbeck, brucehoult, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, zzheng, edward-jones, mgrang, rogfer01, llvm-commits
Differential Revision: https://reviews.llvm.org/D48411
llvm-svn: 338047
2018-07-27 01:49:43 +08:00
|
|
|
Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
|
|
|
|
RISCV::LW : RISCV::LD;
|
2020-07-03 22:57:59 +08:00
|
|
|
else if (RISCV::FPR16RegClass.hasSubClassEq(RC))
|
|
|
|
Opcode = RISCV::FLH;
|
2018-03-21 23:11:02 +08:00
|
|
|
else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
|
|
|
|
Opcode = RISCV::FLW;
|
2018-04-12 13:34:25 +08:00
|
|
|
else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
|
|
|
|
Opcode = RISCV::FLD;
|
2017-11-08 21:31:40 +08:00
|
|
|
else
|
|
|
|
llvm_unreachable("Can't load this register from stack slot");
|
2018-03-21 23:11:02 +08:00
|
|
|
|
2020-11-19 10:23:55 +08:00
|
|
|
BuildMI(MBB, I, DL, get(Opcode), DstReg)
|
|
|
|
.addFrameIndex(FI)
|
|
|
|
.addImm(0)
|
|
|
|
.addMemOperand(MMO);
|
2017-11-08 21:31:40 +08:00
|
|
|
}
|
2018-01-11 03:53:46 +08:00
|
|
|
|
2019-09-13 12:03:32 +08:00
|
|
|
void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator MBBI,
|
|
|
|
const DebugLoc &DL, Register DstReg, uint64_t Val,
|
|
|
|
MachineInstr::MIFlag Flag) const {
|
|
|
|
MachineFunction *MF = MBB.getParent();
|
|
|
|
MachineRegisterInfo &MRI = MF->getRegInfo();
|
|
|
|
bool IsRV64 = MF->getSubtarget<RISCVSubtarget>().is64Bit();
|
|
|
|
Register SrcReg = RISCV::X0;
|
|
|
|
Register Result = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
|
|
unsigned Num = 0;
|
|
|
|
|
|
|
|
if (!IsRV64 && !isInt<32>(Val))
|
|
|
|
report_fatal_error("Should only materialize 32-bit constants for RV32");
|
|
|
|
|
|
|
|
RISCVMatInt::InstSeq Seq;
|
|
|
|
RISCVMatInt::generateInstSeq(Val, IsRV64, Seq);
|
|
|
|
assert(Seq.size() > 0);
|
|
|
|
|
|
|
|
for (RISCVMatInt::Inst &Inst : Seq) {
|
|
|
|
// Write the final result to DstReg if it's the last instruction in the Seq.
|
|
|
|
// Otherwise, write the result to the temp register.
|
|
|
|
if (++Num == Seq.size())
|
|
|
|
Result = DstReg;
|
|
|
|
|
|
|
|
if (Inst.Opc == RISCV::LUI) {
|
|
|
|
BuildMI(MBB, MBBI, DL, get(RISCV::LUI), Result)
|
|
|
|
.addImm(Inst.Imm)
|
|
|
|
.setMIFlag(Flag);
|
|
|
|
} else {
|
|
|
|
BuildMI(MBB, MBBI, DL, get(Inst.Opc), Result)
|
|
|
|
.addReg(SrcReg, RegState::Kill)
|
|
|
|
.addImm(Inst.Imm)
|
|
|
|
.setMIFlag(Flag);
|
|
|
|
}
|
|
|
|
// Only the first instruction has X0 as its source.
|
|
|
|
SrcReg = Result;
|
|
|
|
}
|
2018-01-11 03:53:46 +08:00
|
|
|
}
|
2018-01-11 04:47:00 +08:00
|
|
|
|
|
|
|
// The contents of values added to Cond are not examined outside of
|
|
|
|
// RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
|
|
|
|
// push BranchOpcode, Reg1, Reg2.
|
|
|
|
static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
|
|
|
|
SmallVectorImpl<MachineOperand> &Cond) {
|
|
|
|
// Block ends with fall-through condbranch.
|
|
|
|
assert(LastInst.getDesc().isConditionalBranch() &&
|
|
|
|
"Unknown conditional branch");
|
|
|
|
Target = LastInst.getOperand(2).getMBB();
|
|
|
|
Cond.push_back(MachineOperand::CreateImm(LastInst.getOpcode()));
|
|
|
|
Cond.push_back(LastInst.getOperand(0));
|
|
|
|
Cond.push_back(LastInst.getOperand(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned getOppositeBranchOpcode(int Opc) {
|
|
|
|
switch (Opc) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unrecognized conditional branch");
|
|
|
|
case RISCV::BEQ:
|
|
|
|
return RISCV::BNE;
|
|
|
|
case RISCV::BNE:
|
|
|
|
return RISCV::BEQ;
|
|
|
|
case RISCV::BLT:
|
|
|
|
return RISCV::BGE;
|
|
|
|
case RISCV::BGE:
|
|
|
|
return RISCV::BLT;
|
|
|
|
case RISCV::BLTU:
|
|
|
|
return RISCV::BGEU;
|
|
|
|
case RISCV::BGEU:
|
|
|
|
return RISCV::BLTU;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock *&TBB,
|
|
|
|
MachineBasicBlock *&FBB,
|
|
|
|
SmallVectorImpl<MachineOperand> &Cond,
|
|
|
|
bool AllowModify) const {
|
|
|
|
TBB = FBB = nullptr;
|
|
|
|
Cond.clear();
|
|
|
|
|
|
|
|
// If the block has no terminators, it just falls into the block after it.
|
|
|
|
MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
|
|
|
|
if (I == MBB.end() || !isUnpredicatedTerminator(*I))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Count the number of terminators and find the first unconditional or
|
|
|
|
// indirect branch.
|
|
|
|
MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
|
|
|
|
int NumTerminators = 0;
|
|
|
|
for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
|
|
|
|
J++) {
|
|
|
|
NumTerminators++;
|
|
|
|
if (J->getDesc().isUnconditionalBranch() ||
|
|
|
|
J->getDesc().isIndirectBranch()) {
|
|
|
|
FirstUncondOrIndirectBr = J.getReverse();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If AllowModify is true, we can erase any terminators after
|
|
|
|
// FirstUncondOrIndirectBR.
|
|
|
|
if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
|
|
|
|
while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
|
|
|
|
std::next(FirstUncondOrIndirectBr)->eraseFromParent();
|
|
|
|
NumTerminators--;
|
|
|
|
}
|
|
|
|
I = FirstUncondOrIndirectBr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can't handle blocks that end in an indirect branch.
|
|
|
|
if (I->getDesc().isIndirectBranch())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// We can't handle blocks with more than 2 terminators.
|
|
|
|
if (NumTerminators > 2)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Handle a single unconditional branch.
|
|
|
|
if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
|
2020-08-17 19:25:45 +08:00
|
|
|
TBB = getBranchDestBlock(*I);
|
2018-01-11 04:47:00 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle a single conditional branch.
|
|
|
|
if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
|
|
|
|
parseCondBranch(*I, TBB, Cond);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle a conditional branch followed by an unconditional branch.
|
|
|
|
if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
|
|
|
|
I->getDesc().isUnconditionalBranch()) {
|
|
|
|
parseCondBranch(*std::prev(I), TBB, Cond);
|
2020-08-17 19:25:45 +08:00
|
|
|
FBB = getBranchDestBlock(*I);
|
2018-01-11 04:47:00 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we can't handle this.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB,
|
|
|
|
int *BytesRemoved) const {
|
2018-01-11 05:05:07 +08:00
|
|
|
if (BytesRemoved)
|
|
|
|
*BytesRemoved = 0;
|
2018-01-11 04:47:00 +08:00
|
|
|
MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
|
|
|
|
if (I == MBB.end())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!I->getDesc().isUnconditionalBranch() &&
|
|
|
|
!I->getDesc().isConditionalBranch())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Remove the branch.
|
2018-01-11 05:05:07 +08:00
|
|
|
if (BytesRemoved)
|
|
|
|
*BytesRemoved += getInstSizeInBytes(*I);
|
2019-07-18 11:23:47 +08:00
|
|
|
I->eraseFromParent();
|
2018-01-11 04:47:00 +08:00
|
|
|
|
|
|
|
I = MBB.end();
|
|
|
|
|
|
|
|
if (I == MBB.begin())
|
|
|
|
return 1;
|
|
|
|
--I;
|
|
|
|
if (!I->getDesc().isConditionalBranch())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
// Remove the branch.
|
2018-01-11 05:05:07 +08:00
|
|
|
if (BytesRemoved)
|
|
|
|
*BytesRemoved += getInstSizeInBytes(*I);
|
2019-07-18 11:23:47 +08:00
|
|
|
I->eraseFromParent();
|
2018-01-11 04:47:00 +08:00
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Inserts a branch into the end of the specific MachineBasicBlock, returning
|
|
|
|
// the number of instructions inserted.
|
|
|
|
unsigned RISCVInstrInfo::insertBranch(
|
|
|
|
MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
|
|
|
|
ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
|
2018-01-11 05:05:07 +08:00
|
|
|
if (BytesAdded)
|
|
|
|
*BytesAdded = 0;
|
2018-01-11 04:47:00 +08:00
|
|
|
|
|
|
|
// Shouldn't be a fall through.
|
2020-01-21 23:47:35 +08:00
|
|
|
assert(TBB && "insertBranch must not be told to insert a fallthrough");
|
2018-01-11 04:47:00 +08:00
|
|
|
assert((Cond.size() == 3 || Cond.size() == 0) &&
|
|
|
|
"RISCV branch conditions have two components!");
|
|
|
|
|
|
|
|
// Unconditional branch.
|
|
|
|
if (Cond.empty()) {
|
2018-01-11 05:05:07 +08:00
|
|
|
MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
|
|
|
|
if (BytesAdded)
|
|
|
|
*BytesAdded += getInstSizeInBytes(MI);
|
2018-01-11 04:47:00 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Either a one or two-way conditional branch.
|
|
|
|
unsigned Opc = Cond[0].getImm();
|
2018-01-11 05:05:07 +08:00
|
|
|
MachineInstr &CondMI =
|
|
|
|
*BuildMI(&MBB, DL, get(Opc)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
|
|
|
|
if (BytesAdded)
|
|
|
|
*BytesAdded += getInstSizeInBytes(CondMI);
|
2018-01-11 04:47:00 +08:00
|
|
|
|
|
|
|
// One-way conditional branch.
|
|
|
|
if (!FBB)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
// Two-way conditional branch.
|
2018-01-11 05:05:07 +08:00
|
|
|
MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
|
|
|
|
if (BytesAdded)
|
|
|
|
*BytesAdded += getInstSizeInBytes(MI);
|
2018-01-11 04:47:00 +08:00
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
2018-01-11 05:05:07 +08:00
|
|
|
unsigned RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock &DestBB,
|
|
|
|
const DebugLoc &DL,
|
|
|
|
int64_t BrOffset,
|
|
|
|
RegScavenger *RS) const {
|
|
|
|
assert(RS && "RegScavenger required for long branching");
|
|
|
|
assert(MBB.empty() &&
|
|
|
|
"new block should be inserted for expanding unconditional branch");
|
|
|
|
assert(MBB.pred_size() == 1);
|
|
|
|
|
|
|
|
MachineFunction *MF = MBB.getParent();
|
|
|
|
MachineRegisterInfo &MRI = MF->getRegInfo();
|
|
|
|
|
|
|
|
if (!isInt<32>(BrOffset))
|
|
|
|
report_fatal_error(
|
|
|
|
"Branch offsets outside of the signed 32-bit range not supported");
|
|
|
|
|
|
|
|
// FIXME: A virtual register must be used initially, as the register
|
|
|
|
// scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
|
|
|
|
// uses the same workaround).
|
[risc-v] Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Depends on D65919
Reviewers: lenary
Subscribers: jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision for full review was: https://reviews.llvm.org/D65962
llvm-svn: 368629
2019-08-13 06:41:02 +08:00
|
|
|
Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
2018-01-11 05:05:07 +08:00
|
|
|
auto II = MBB.end();
|
|
|
|
|
2020-08-17 19:25:45 +08:00
|
|
|
MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
|
|
|
|
.addReg(ScratchReg, RegState::Define | RegState::Dead)
|
|
|
|
.addMBB(&DestBB, RISCVII::MO_CALL);
|
2018-01-11 05:05:07 +08:00
|
|
|
|
|
|
|
RS->enterBasicBlockEnd(MBB);
|
2019-03-12 04:43:29 +08:00
|
|
|
unsigned Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
|
2020-08-17 19:25:45 +08:00
|
|
|
MI.getIterator(), false, 0);
|
2018-01-11 05:05:07 +08:00
|
|
|
MRI.replaceRegWith(ScratchReg, Scav);
|
|
|
|
MRI.clearVirtRegs();
|
|
|
|
RS->setRegUsed(Scav);
|
|
|
|
return 8;
|
|
|
|
}
|
|
|
|
|
2018-01-11 04:47:00 +08:00
|
|
|
bool RISCVInstrInfo::reverseBranchCondition(
|
|
|
|
SmallVectorImpl<MachineOperand> &Cond) const {
|
|
|
|
assert((Cond.size() == 3) && "Invalid branch condition!");
|
|
|
|
Cond[0].setImm(getOppositeBranchOpcode(Cond[0].getImm()));
|
|
|
|
return false;
|
|
|
|
}
|
2018-01-11 05:05:07 +08:00
|
|
|
|
|
|
|
MachineBasicBlock *
|
|
|
|
RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
|
|
|
|
assert(MI.getDesc().isBranch() && "Unexpected opcode!");
|
|
|
|
// The branch target is always the last operand.
|
|
|
|
int NumOp = MI.getNumExplicitOperands();
|
|
|
|
return MI.getOperand(NumOp - 1).getMBB();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
|
|
|
|
int64_t BrOffset) const {
|
2020-08-17 19:25:45 +08:00
|
|
|
unsigned XLen = STI.getXLen();
|
2018-01-11 05:05:07 +08:00
|
|
|
// Ideally we could determine the supported branch offset from the
|
|
|
|
// RISCVII::FormMask, but this can't be used for Pseudo instructions like
|
|
|
|
// PseudoBR.
|
|
|
|
switch (BranchOp) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected opcode!");
|
|
|
|
case RISCV::BEQ:
|
|
|
|
case RISCV::BNE:
|
|
|
|
case RISCV::BLT:
|
|
|
|
case RISCV::BGE:
|
|
|
|
case RISCV::BLTU:
|
|
|
|
case RISCV::BGEU:
|
|
|
|
return isIntN(13, BrOffset);
|
|
|
|
case RISCV::JAL:
|
|
|
|
case RISCV::PseudoBR:
|
|
|
|
return isIntN(21, BrOffset);
|
2020-08-17 19:25:45 +08:00
|
|
|
case RISCV::PseudoJump:
|
|
|
|
return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
|
2018-01-11 05:05:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
|
|
|
|
unsigned Opcode = MI.getOpcode();
|
|
|
|
|
|
|
|
switch (Opcode) {
|
[RISCV] Added isCompressibleInst() to estimate size in getInstSizeInBytes()
Summary:
Modified compression emitter tablegen backend to emit isCompressibleInst()
check which in turn is used by getInstSizeInBytes() to better estimate
instruction size. Note the generation of compressed instructions in RISC-V
happens late in the assembler therefore instruction size estimate might be off
if computed before.
Reviewers: lenary, asb, luismarques, lewis-revill
Reviewed By: asb
Subscribers: sameer.abuasal, lewis-revill, hiraditya, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, PkmX, jocewei, psnobl, benna, Jim, lenary, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68290
2019-12-17 07:09:48 +08:00
|
|
|
default: {
|
|
|
|
if (MI.getParent() && MI.getParent()->getParent()) {
|
|
|
|
const auto MF = MI.getMF();
|
|
|
|
const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
|
|
|
|
const MCRegisterInfo &MRI = *TM.getMCRegisterInfo();
|
|
|
|
const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo();
|
|
|
|
const RISCVSubtarget &ST = MF->getSubtarget<RISCVSubtarget>();
|
|
|
|
if (isCompressibleInst(MI, &ST, MRI, STI))
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
return get(Opcode).getSize();
|
|
|
|
}
|
2018-01-11 05:05:07 +08:00
|
|
|
case TargetOpcode::EH_LABEL:
|
|
|
|
case TargetOpcode::IMPLICIT_DEF:
|
|
|
|
case TargetOpcode::KILL:
|
|
|
|
case TargetOpcode::DBG_VALUE:
|
|
|
|
return 0;
|
[RISCV] Fix RISCVInstrInfo::getInstSizeInBytes for atomics pseudos
Summary:
Without these, the generic branch relaxation pass will underestimate the
range required for branches spanning these and we can end up with
"fixup value out of range" errors rather than relaxing the branches.
Some of the instructions in the expansion may end up being compressed
but exactly determining that is awkward, and these conservative values
should be safe, if slightly suboptimal in rare cases.
Reviewers: asb, lenary, luismarques, lewis-revill
Reviewed By: asb, luismarques
Subscribers: hiraditya, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, jfb, PkmX, jocewei, psnobl, benna, Jim, s.egerton, pzheng, sameer.abuasal, apazos, evandro, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77443
2020-07-15 17:48:41 +08:00
|
|
|
// These values are determined based on RISCVExpandAtomicPseudoInsts,
|
|
|
|
// RISCVExpandPseudoInsts and RISCVMCCodeEmitter, depending on where the
|
|
|
|
// pseudos are expanded.
|
2019-06-26 18:35:58 +08:00
|
|
|
case RISCV::PseudoCALLReg:
|
2018-04-25 22:19:12 +08:00
|
|
|
case RISCV::PseudoCALL:
|
2020-02-01 02:52:37 +08:00
|
|
|
case RISCV::PseudoJump:
|
2018-05-24 06:44:08 +08:00
|
|
|
case RISCV::PseudoTAIL:
|
2019-04-01 22:42:56 +08:00
|
|
|
case RISCV::PseudoLLA:
|
2019-06-11 20:57:47 +08:00
|
|
|
case RISCV::PseudoLA:
|
2019-06-19 16:40:59 +08:00
|
|
|
case RISCV::PseudoLA_TLS_IE:
|
|
|
|
case RISCV::PseudoLA_TLS_GD:
|
2018-04-25 22:19:12 +08:00
|
|
|
return 8;
|
[RISCV] Fix RISCVInstrInfo::getInstSizeInBytes for atomics pseudos
Summary:
Without these, the generic branch relaxation pass will underestimate the
range required for branches spanning these and we can end up with
"fixup value out of range" errors rather than relaxing the branches.
Some of the instructions in the expansion may end up being compressed
but exactly determining that is awkward, and these conservative values
should be safe, if slightly suboptimal in rare cases.
Reviewers: asb, lenary, luismarques, lewis-revill
Reviewed By: asb, luismarques
Subscribers: hiraditya, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, jfb, PkmX, jocewei, psnobl, benna, Jim, s.egerton, pzheng, sameer.abuasal, apazos, evandro, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77443
2020-07-15 17:48:41 +08:00
|
|
|
case RISCV::PseudoAtomicLoadNand32:
|
|
|
|
case RISCV::PseudoAtomicLoadNand64:
|
|
|
|
return 20;
|
|
|
|
case RISCV::PseudoMaskedAtomicSwap32:
|
|
|
|
case RISCV::PseudoMaskedAtomicLoadAdd32:
|
|
|
|
case RISCV::PseudoMaskedAtomicLoadSub32:
|
|
|
|
return 28;
|
|
|
|
case RISCV::PseudoMaskedAtomicLoadNand32:
|
|
|
|
return 32;
|
|
|
|
case RISCV::PseudoMaskedAtomicLoadMax32:
|
|
|
|
case RISCV::PseudoMaskedAtomicLoadMin32:
|
|
|
|
return 44;
|
|
|
|
case RISCV::PseudoMaskedAtomicLoadUMax32:
|
|
|
|
case RISCV::PseudoMaskedAtomicLoadUMin32:
|
|
|
|
return 36;
|
|
|
|
case RISCV::PseudoCmpXchg32:
|
|
|
|
case RISCV::PseudoCmpXchg64:
|
|
|
|
return 16;
|
|
|
|
case RISCV::PseudoMaskedCmpXchg32:
|
|
|
|
return 32;
|
2019-02-09 04:48:56 +08:00
|
|
|
case TargetOpcode::INLINEASM:
|
|
|
|
case TargetOpcode::INLINEASM_BR: {
|
2018-01-11 05:05:07 +08:00
|
|
|
const MachineFunction &MF = *MI.getParent()->getParent();
|
|
|
|
const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
|
|
|
|
return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
|
|
|
|
*TM.getMCAsmInfo());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-01-26 04:22:49 +08:00
|
|
|
|
|
|
|
bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
|
|
|
|
const unsigned Opcode = MI.getOpcode();
|
2020-08-25 22:44:57 +08:00
|
|
|
switch (Opcode) {
|
|
|
|
default:
|
|
|
|
break;
|
2020-08-25 22:45:24 +08:00
|
|
|
case RISCV::FSGNJ_D:
|
|
|
|
case RISCV::FSGNJ_S:
|
2020-09-21 15:59:22 +08:00
|
|
|
// The canonical floating-point move is fsgnj rd, rs, rs.
|
2020-08-25 22:45:24 +08:00
|
|
|
return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
|
|
|
|
MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
|
2020-08-25 22:44:57 +08:00
|
|
|
case RISCV::ADDI:
|
|
|
|
case RISCV::ORI:
|
|
|
|
case RISCV::XORI:
|
|
|
|
return (MI.getOperand(1).isReg() &&
|
|
|
|
MI.getOperand(1).getReg() == RISCV::X0) ||
|
|
|
|
(MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
|
2019-01-26 04:22:49 +08:00
|
|
|
}
|
|
|
|
return MI.isAsCheapAsAMove();
|
|
|
|
}
|
[RISCV] Add MachineInstr immediate verification
Summary:
This patch implements the `TargetInstrInfo::verifyInstruction` hook for RISC-V. Currently the hook verifies the machine instruction's immediate operands, to check if the immediates are within the expected bounds. Without the hook invalid immediates are not detected except when doing assembly parsing, so they are silently emitted (including being truncated when emitting object code).
The bounds information is specified in tablegen by using the `OperandType` definition, which sets the `MCOperandInfo`'s `OperandType` field. Several RISC-V-specific immediate operand types were created, which extend the `MCInstrDesc`'s `OperandType` `enum`.
To have the hook called with `llc` pass it the `-verify-machineinstrs` option. For Clang add the cmake build config `-DLLVM_ENABLE_EXPENSIVE_CHECKS=True`, or temporarily patch `TargetPassConfig::addVerifyPass`.
Review concerns:
- The patch adds immediate operand type checks that cover at least the base ISA. There are several other operand types for the C extension and one type for the F/D extensions that were left out of this initial patch because they introduced further design concerns that I felt were best evaluated separately.
- Invalid register classes (e.g. passing a GPR register where a GPRC is expected) are already caught, so were not included.
- This design makes the more abstract `MachineInstr` verification depend on MC layer definitions, which arguably is not the cleanest design, but is in line with how things are done in other parts of the target and LLVM in general.
- There is some duplication of logic already present in the `MCOperandPredicate`s. Since the `MachineInstr` and `MCInstr` notions of immediates are fundamentally different, this is currently necessary.
Reviewers: asb, lenary
Reviewed By: lenary
Subscribers: hiraditya, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, kito-cheng, shiva0217, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, PkmX, jocewei, psnobl, benna, Jim, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67397
llvm-svn: 375006
2019-10-16 23:06:02 +08:00
|
|
|
|
2020-09-21 15:59:22 +08:00
|
|
|
Optional<DestSourcePair>
|
|
|
|
RISCVInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
|
|
|
|
if (MI.isMoveReg())
|
|
|
|
return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case RISCV::ADDI:
|
[RISCV] Only return DestSourcePair from isCopyInstrImpl for registers
ADDI often has a frameindex in operand 1, but consumers of this
interface, such as MachineSink, tend to call getReg() on the Destination
and Source operands, leading to the following crash when building
FreeBSD after this implementation was added in 8cf6778d30:
```
clang: llvm/include/llvm/CodeGen/MachineOperand.h:359: llvm::Register llvm::MachineOperand::getReg() const: Assertion `isReg() && "This is not a register operand!"' failed.
PLEASE submit a bug report to https://bugs.llvm.org/ and include the crash backtrace, preprocessed source, and associated run script.
Stack dump:
#0 0x00007f4286f9b4d0 llvm::sys::PrintStackTrace(llvm::raw_ostream&, int) llvm/lib/Support/Unix/Signals.inc:563:0
#1 0x00007f4286f9b587 PrintStackTraceSignalHandler(void*) llvm/lib/Support/Unix/Signals.inc:630:0
#2 0x00007f4286f9926b llvm::sys::RunSignalHandlers() llvm/lib/Support/Signals.cpp:71:0
#3 0x00007f4286f9ae52 SignalHandler(int) llvm/lib/Support/Unix/Signals.inc:405:0
#4 0x00007f428646ffd0 (/lib/x86_64-linux-gnu/libc.so.6+0x3efd0)
#5 0x00007f428646ff47 raise /build/glibc-2ORdQG/glibc-2.27/signal/../sysdeps/unix/sysv/linux/raise.c:51:0
#6 0x00007f42864718b1 abort /build/glibc-2ORdQG/glibc-2.27/stdlib/abort.c:81:0
#7 0x00007f428646142a __assert_fail_base /build/glibc-2ORdQG/glibc-2.27/assert/assert.c:89:0
#8 0x00007f42864614a2 (/lib/x86_64-linux-gnu/libc.so.6+0x304a2)
#9 0x00007f428d4078e2 llvm::MachineOperand::getReg() const llvm/include/llvm/CodeGen/MachineOperand.h:359:0
#10 0x00007f428d8260e7 attemptDebugCopyProp(llvm::MachineInstr&, llvm::MachineInstr&) llvm/lib/CodeGen/MachineSink.cpp:862:0
#11 0x00007f428d826442 performSink(llvm::MachineInstr&, llvm::MachineBasicBlock&, llvm::MachineInstrBundleIterator<llvm::MachineInstr, false>, llvm::SmallVectorImpl<llvm::MachineInstr*>&) llvm/lib/CodeGen/MachineSink.cpp:918:0
#12 0x00007f428d826e27 (anonymous namespace)::MachineSinking::SinkInstruction(llvm::MachineInstr&, bool&, std::map<llvm::MachineBasicBlock*, llvm::SmallVector<llvm::MachineBasicBlock*, 4u>, std::less<llvm::MachineBasicBlock*>, std::allocator<std::pair<llvm::MachineBasicBlock* const, llvm::SmallVector<llvm::MachineBasicBlock*, 4u> > > >&) llvm/lib/CodeGen/MachineSink.cpp:1073:0
#13 0x00007f428d824a2c (anonymous namespace)::MachineSinking::ProcessBlock(llvm::MachineBasicBlock&) llvm/lib/CodeGen/MachineSink.cpp:410:0
#14 0x00007f428d824513 (anonymous namespace)::MachineSinking::runOnMachineFunction(llvm::MachineFunction&) llvm/lib/CodeGen/MachineSink.cpp:340:0
```
Thus, check that operand 1 is also a register in the condition.
Reviewed By: arichardson, luismarques
Differential Revision: https://reviews.llvm.org/D89090
2020-11-03 11:55:47 +08:00
|
|
|
// Operand 1 can be a frameindex but callers expect registers
|
|
|
|
if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
|
|
|
|
MI.getOperand(2).getImm() == 0)
|
2020-09-21 15:59:22 +08:00
|
|
|
return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
|
|
|
|
break;
|
|
|
|
case RISCV::FSGNJ_D:
|
|
|
|
case RISCV::FSGNJ_S:
|
|
|
|
// The canonical floating-point move is fsgnj rd, rs, rs.
|
|
|
|
if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
|
|
|
|
MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
|
|
|
|
return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
[RISCV] Add MachineInstr immediate verification
Summary:
This patch implements the `TargetInstrInfo::verifyInstruction` hook for RISC-V. Currently the hook verifies the machine instruction's immediate operands, to check if the immediates are within the expected bounds. Without the hook invalid immediates are not detected except when doing assembly parsing, so they are silently emitted (including being truncated when emitting object code).
The bounds information is specified in tablegen by using the `OperandType` definition, which sets the `MCOperandInfo`'s `OperandType` field. Several RISC-V-specific immediate operand types were created, which extend the `MCInstrDesc`'s `OperandType` `enum`.
To have the hook called with `llc` pass it the `-verify-machineinstrs` option. For Clang add the cmake build config `-DLLVM_ENABLE_EXPENSIVE_CHECKS=True`, or temporarily patch `TargetPassConfig::addVerifyPass`.
Review concerns:
- The patch adds immediate operand type checks that cover at least the base ISA. There are several other operand types for the C extension and one type for the F/D extensions that were left out of this initial patch because they introduced further design concerns that I felt were best evaluated separately.
- Invalid register classes (e.g. passing a GPR register where a GPRC is expected) are already caught, so were not included.
- This design makes the more abstract `MachineInstr` verification depend on MC layer definitions, which arguably is not the cleanest design, but is in line with how things are done in other parts of the target and LLVM in general.
- There is some duplication of logic already present in the `MCOperandPredicate`s. Since the `MachineInstr` and `MCInstr` notions of immediates are fundamentally different, this is currently necessary.
Reviewers: asb, lenary
Reviewed By: lenary
Subscribers: hiraditya, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, kito-cheng, shiva0217, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, PkmX, jocewei, psnobl, benna, Jim, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67397
llvm-svn: 375006
2019-10-16 23:06:02 +08:00
|
|
|
bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
|
|
|
|
StringRef &ErrInfo) const {
|
|
|
|
const MCInstrInfo *MCII = STI.getInstrInfo();
|
|
|
|
MCInstrDesc const &Desc = MCII->get(MI.getOpcode());
|
|
|
|
|
|
|
|
for (auto &OI : enumerate(Desc.operands())) {
|
|
|
|
unsigned OpType = OI.value().OperandType;
|
|
|
|
if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
|
|
|
|
OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
|
|
|
|
const MachineOperand &MO = MI.getOperand(OI.index());
|
|
|
|
if (MO.isImm()) {
|
|
|
|
int64_t Imm = MO.getImm();
|
|
|
|
bool Ok;
|
|
|
|
switch (OpType) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected operand type");
|
|
|
|
case RISCVOp::OPERAND_UIMM4:
|
|
|
|
Ok = isUInt<4>(Imm);
|
|
|
|
break;
|
|
|
|
case RISCVOp::OPERAND_UIMM5:
|
|
|
|
Ok = isUInt<5>(Imm);
|
|
|
|
break;
|
|
|
|
case RISCVOp::OPERAND_UIMM12:
|
|
|
|
Ok = isUInt<12>(Imm);
|
|
|
|
break;
|
|
|
|
case RISCVOp::OPERAND_SIMM12:
|
|
|
|
Ok = isInt<12>(Imm);
|
|
|
|
break;
|
|
|
|
case RISCVOp::OPERAND_UIMM20:
|
|
|
|
Ok = isUInt<20>(Imm);
|
|
|
|
break;
|
|
|
|
case RISCVOp::OPERAND_UIMMLOG2XLEN:
|
|
|
|
if (STI.getTargetTriple().isArch64Bit())
|
|
|
|
Ok = isUInt<6>(Imm);
|
|
|
|
else
|
|
|
|
Ok = isUInt<5>(Imm);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!Ok) {
|
|
|
|
ErrInfo = "Invalid immediate";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2019-11-05 17:36:08 +08:00
|
|
|
|
|
|
|
// Return true if get the base operand, byte offset of an instruction and the
|
|
|
|
// memory width. Width is the size of memory that is being loaded/stored.
|
|
|
|
bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
|
|
|
|
const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
|
|
|
|
unsigned &Width, const TargetRegisterInfo *TRI) const {
|
2019-12-11 22:45:48 +08:00
|
|
|
if (!LdSt.mayLoadOrStore())
|
|
|
|
return false;
|
2019-11-05 17:36:08 +08:00
|
|
|
|
|
|
|
// Here we assume the standard RISC-V ISA, which uses a base+offset
|
|
|
|
// addressing mode. You'll need to relax these conditions to support custom
|
|
|
|
// load/stores instructions.
|
|
|
|
if (LdSt.getNumExplicitOperands() != 3)
|
|
|
|
return false;
|
|
|
|
if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!LdSt.hasOneMemOperand())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Width = (*LdSt.memoperands_begin())->getSize();
|
|
|
|
BaseReg = &LdSt.getOperand(1);
|
|
|
|
Offset = LdSt.getOperand(2).getImm();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint(
|
|
|
|
const MachineInstr &MIa, const MachineInstr &MIb) const {
|
|
|
|
assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
|
|
|
|
assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
|
|
|
|
|
|
|
|
if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
|
|
|
|
MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Retrieve the base register, offset from the base register and width. Width
|
|
|
|
// is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
|
|
|
|
// base registers are identical, and the offset of a lower memory access +
|
|
|
|
// the width doesn't overlap the offset of a higher memory access,
|
|
|
|
// then the memory accesses are different.
|
|
|
|
const TargetRegisterInfo *TRI = STI.getRegisterInfo();
|
|
|
|
const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
|
|
|
|
int64_t OffsetA = 0, OffsetB = 0;
|
|
|
|
unsigned int WidthA = 0, WidthB = 0;
|
|
|
|
if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
|
|
|
|
getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
|
|
|
|
if (BaseOpA->isIdenticalTo(*BaseOpB)) {
|
|
|
|
int LowOffset = std::min(OffsetA, OffsetB);
|
|
|
|
int HighOffset = std::max(OffsetA, OffsetB);
|
|
|
|
int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
|
|
|
|
if (LowOffset + LowWidth <= HighOffset)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
[RISCV] Machine Operand Flag Serialization
Summary:
These hooks ensure that the RISC-V backend can serialize and parse MIR
correctly.
Reviewers: jrtc27, luismarques
Reviewed By: luismarques
Subscribers: hiraditya, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, PkmX, jocewei, psnobl, benna, Jim, s.egerton, pzheng, sameer.abuasal, apazos, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70666
2019-12-09 21:16:28 +08:00
|
|
|
|
|
|
|
std::pair<unsigned, unsigned>
|
|
|
|
RISCVInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
|
|
|
|
const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
|
|
|
|
return std::make_pair(TF & Mask, TF & ~Mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
ArrayRef<std::pair<unsigned, const char *>>
|
|
|
|
RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
|
|
|
|
using namespace RISCVII;
|
|
|
|
static const std::pair<unsigned, const char *> TargetFlags[] = {
|
|
|
|
{MO_CALL, "riscv-call"},
|
|
|
|
{MO_PLT, "riscv-plt"},
|
|
|
|
{MO_LO, "riscv-lo"},
|
|
|
|
{MO_HI, "riscv-hi"},
|
|
|
|
{MO_PCREL_LO, "riscv-pcrel-lo"},
|
|
|
|
{MO_PCREL_HI, "riscv-pcrel-hi"},
|
|
|
|
{MO_GOT_HI, "riscv-got-hi"},
|
|
|
|
{MO_TPREL_LO, "riscv-tprel-lo"},
|
|
|
|
{MO_TPREL_HI, "riscv-tprel-hi"},
|
|
|
|
{MO_TPREL_ADD, "riscv-tprel-add"},
|
|
|
|
{MO_TLS_GOT_HI, "riscv-tls-got-hi"},
|
|
|
|
{MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
|
|
|
|
return makeArrayRef(TargetFlags);
|
|
|
|
}
|
2019-12-20 00:41:53 +08:00
|
|
|
bool RISCVInstrInfo::isFunctionSafeToOutlineFrom(
|
|
|
|
MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
|
|
|
|
const Function &F = MF.getFunction();
|
|
|
|
|
|
|
|
// Can F be deduplicated by the linker? If it can, don't outline from it.
|
|
|
|
if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Don't outline from functions with section markings; the program could
|
|
|
|
// expect that all the code is in the named section.
|
|
|
|
if (F.hasSection())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// It's safe to outline from MF.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool RISCVInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
|
|
|
|
unsigned &Flags) const {
|
|
|
|
// More accurate safety checking is done in getOutliningCandidateInfo.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enum values indicating how an outlined call should be constructed.
|
|
|
|
enum MachineOutlinerConstructionID {
|
|
|
|
MachineOutlinerDefault
|
|
|
|
};
|
|
|
|
|
|
|
|
outliner::OutlinedFunction RISCVInstrInfo::getOutliningCandidateInfo(
|
|
|
|
std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
|
|
|
|
|
|
|
|
// First we need to filter out candidates where the X5 register (IE t0) can't
|
|
|
|
// be used to setup the function call.
|
|
|
|
auto CannotInsertCall = [](outliner::Candidate &C) {
|
|
|
|
const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
|
|
|
|
|
|
|
|
C.initLRU(*TRI);
|
|
|
|
LiveRegUnits LRU = C.LRU;
|
|
|
|
return !LRU.available(RISCV::X5);
|
|
|
|
};
|
|
|
|
|
2020-12-21 09:43:22 +08:00
|
|
|
llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
|
2019-12-20 00:41:53 +08:00
|
|
|
|
|
|
|
// If the sequence doesn't have enough candidates left, then we're done.
|
|
|
|
if (RepeatedSequenceLocs.size() < 2)
|
|
|
|
return outliner::OutlinedFunction();
|
|
|
|
|
|
|
|
unsigned SequenceSize = 0;
|
|
|
|
|
|
|
|
auto I = RepeatedSequenceLocs[0].front();
|
|
|
|
auto E = std::next(RepeatedSequenceLocs[0].back());
|
|
|
|
for (; I != E; ++I)
|
|
|
|
SequenceSize += getInstSizeInBytes(*I);
|
|
|
|
|
|
|
|
// call t0, function = 8 bytes.
|
|
|
|
unsigned CallOverhead = 8;
|
|
|
|
for (auto &C : RepeatedSequenceLocs)
|
|
|
|
C.setCallInfo(MachineOutlinerDefault, CallOverhead);
|
|
|
|
|
|
|
|
// jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
|
|
|
|
unsigned FrameOverhead = 4;
|
|
|
|
if (RepeatedSequenceLocs[0].getMF()->getSubtarget()
|
|
|
|
.getFeatureBits()[RISCV::FeatureStdExtC])
|
|
|
|
FrameOverhead = 2;
|
|
|
|
|
|
|
|
return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
|
|
|
|
FrameOverhead, MachineOutlinerDefault);
|
|
|
|
}
|
|
|
|
|
|
|
|
outliner::InstrType
|
|
|
|
RISCVInstrInfo::getOutliningType(MachineBasicBlock::iterator &MBBI,
|
|
|
|
unsigned Flags) const {
|
|
|
|
MachineInstr &MI = *MBBI;
|
|
|
|
MachineBasicBlock *MBB = MI.getParent();
|
|
|
|
const TargetRegisterInfo *TRI =
|
|
|
|
MBB->getParent()->getSubtarget().getRegisterInfo();
|
|
|
|
|
|
|
|
// Positions generally can't safely be outlined.
|
|
|
|
if (MI.isPosition()) {
|
|
|
|
// We can manually strip out CFI instructions later.
|
|
|
|
if (MI.isCFIInstruction())
|
|
|
|
return outliner::InstrType::Invisible;
|
|
|
|
|
|
|
|
return outliner::InstrType::Illegal;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't trust the user to write safe inline assembly.
|
|
|
|
if (MI.isInlineAsm())
|
|
|
|
return outliner::InstrType::Illegal;
|
|
|
|
|
|
|
|
// We can't outline branches to other basic blocks.
|
|
|
|
if (MI.isTerminator() && !MBB->succ_empty())
|
|
|
|
return outliner::InstrType::Illegal;
|
|
|
|
|
|
|
|
// We need support for tail calls to outlined functions before return
|
|
|
|
// statements can be allowed.
|
|
|
|
if (MI.isReturn())
|
|
|
|
return outliner::InstrType::Illegal;
|
|
|
|
|
|
|
|
// Don't allow modifying the X5 register which we use for return addresses for
|
|
|
|
// these outlined functions.
|
|
|
|
if (MI.modifiesRegister(RISCV::X5, TRI) ||
|
|
|
|
MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
|
|
|
|
return outliner::InstrType::Illegal;
|
|
|
|
|
|
|
|
// Make sure the operands don't reference something unsafe.
|
|
|
|
for (const auto &MO : MI.operands())
|
|
|
|
if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI())
|
|
|
|
return outliner::InstrType::Illegal;
|
|
|
|
|
|
|
|
// Don't allow instructions which won't be materialized to impact outlining
|
|
|
|
// analysis.
|
|
|
|
if (MI.isMetaInstruction())
|
|
|
|
return outliner::InstrType::Invisible;
|
|
|
|
|
|
|
|
return outliner::InstrType::Legal;
|
|
|
|
}
|
|
|
|
|
|
|
|
void RISCVInstrInfo::buildOutlinedFrame(
|
|
|
|
MachineBasicBlock &MBB, MachineFunction &MF,
|
|
|
|
const outliner::OutlinedFunction &OF) const {
|
|
|
|
|
|
|
|
// Strip out any CFI instructions
|
|
|
|
bool Changed = true;
|
|
|
|
while (Changed) {
|
|
|
|
Changed = false;
|
|
|
|
auto I = MBB.begin();
|
|
|
|
auto E = MBB.end();
|
|
|
|
for (; I != E; ++I) {
|
|
|
|
if (I->isCFIInstruction()) {
|
|
|
|
I->removeFromParent();
|
|
|
|
Changed = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-22 08:40:41 +08:00
|
|
|
MBB.addLiveIn(RISCV::X5);
|
|
|
|
|
2019-12-20 00:41:53 +08:00
|
|
|
// Add in a return instruction to the end of the outlined frame.
|
|
|
|
MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
|
|
|
|
.addReg(RISCV::X0, RegState::Define)
|
|
|
|
.addReg(RISCV::X5)
|
|
|
|
.addImm(0));
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineBasicBlock::iterator RISCVInstrInfo::insertOutlinedCall(
|
|
|
|
Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
|
|
|
|
MachineFunction &MF, const outliner::Candidate &C) const {
|
|
|
|
|
|
|
|
// Add in a call instruction to the outlined function at the given location.
|
|
|
|
It = MBB.insert(It,
|
|
|
|
BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
|
|
|
|
.addGlobalAddress(M.getNamedValue(MF.getName()), 0,
|
|
|
|
RISCVII::MO_CALL));
|
|
|
|
return It;
|
|
|
|
}
|
2021-02-09 01:56:47 +08:00
|
|
|
|
|
|
|
// clang-format off
|
|
|
|
#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
|
|
|
|
RISCV::PseudoV##OP##_##TYPE##_##LMUL##_COMMUTABLE
|
|
|
|
|
|
|
|
#define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
|
|
|
|
CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
|
|
|
|
case CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
|
|
|
|
case CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
|
|
|
|
case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
|
|
|
|
case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
|
|
|
|
case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
|
|
|
|
case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
|
|
|
|
|
|
|
|
#define CASE_VFMA_SPLATS(OP) \
|
|
|
|
CASE_VFMA_OPCODE_LMULS(OP, VF16): \
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(OP, VF32): \
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(OP, VF64)
|
|
|
|
// clang-format on
|
|
|
|
|
|
|
|
bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
|
|
|
|
unsigned &SrcOpIdx1,
|
|
|
|
unsigned &SrcOpIdx2) const {
|
|
|
|
const MCInstrDesc &Desc = MI.getDesc();
|
|
|
|
if (!Desc.isCommutable())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
case CASE_VFMA_SPLATS(FMADD):
|
|
|
|
case CASE_VFMA_SPLATS(FMSUB):
|
|
|
|
case CASE_VFMA_SPLATS(FMACC):
|
|
|
|
case CASE_VFMA_SPLATS(FMSAC):
|
|
|
|
case CASE_VFMA_SPLATS(FNMADD):
|
|
|
|
case CASE_VFMA_SPLATS(FNMSUB):
|
|
|
|
case CASE_VFMA_SPLATS(FNMACC):
|
|
|
|
case CASE_VFMA_SPLATS(FNMSAC):
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(FMACC, VV):
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(FMSAC, VV):
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(FNMACC, VV):
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(FNMSAC, VV): {
|
|
|
|
// For these instructions we can only swap operand 1 and operand 3 by
|
|
|
|
// changing the opcode.
|
|
|
|
unsigned CommutableOpIdx1 = 1;
|
|
|
|
unsigned CommutableOpIdx2 = 3;
|
|
|
|
if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
|
|
|
|
CommutableOpIdx2))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(FMADD, VV):
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(FMSUB, VV):
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(FNMADD, VV):
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(FNMSUB, VV): {
|
|
|
|
// For these instructions we have more freedom. We can commute with the
|
|
|
|
// other multiplicand or with the addend/subtrahend/minuend.
|
|
|
|
|
|
|
|
// Any fixed operand must be from source 1, 2 or 3.
|
|
|
|
if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
|
|
|
|
return false;
|
|
|
|
if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// It both ops are fixed one must be the tied source.
|
|
|
|
if (SrcOpIdx1 != CommuteAnyOperandIndex &&
|
|
|
|
SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Look for two different register operands assumed to be commutable
|
|
|
|
// regardless of the FMA opcode. The FMA opcode is adjusted later if
|
|
|
|
// needed.
|
|
|
|
if (SrcOpIdx1 == CommuteAnyOperandIndex ||
|
|
|
|
SrcOpIdx2 == CommuteAnyOperandIndex) {
|
|
|
|
// At least one of operands to be commuted is not specified and
|
|
|
|
// this method is free to choose appropriate commutable operands.
|
|
|
|
unsigned CommutableOpIdx1 = SrcOpIdx1;
|
|
|
|
if (SrcOpIdx1 == SrcOpIdx2) {
|
|
|
|
// Both of operands are not fixed. Set one of commutable
|
|
|
|
// operands to the tied source.
|
|
|
|
CommutableOpIdx1 = 1;
|
|
|
|
} else if (SrcOpIdx1 == CommutableOpIdx1) {
|
|
|
|
// Only one of the operands is not fixed.
|
|
|
|
CommutableOpIdx1 = SrcOpIdx2;
|
|
|
|
}
|
|
|
|
|
|
|
|
// CommutableOpIdx1 is well defined now. Let's choose another commutable
|
|
|
|
// operand and assign its index to CommutableOpIdx2.
|
|
|
|
unsigned CommutableOpIdx2;
|
|
|
|
if (CommutableOpIdx1 != 1) {
|
|
|
|
// If we haven't already used the tied source, we must use it now.
|
|
|
|
CommutableOpIdx2 = 1;
|
|
|
|
} else {
|
|
|
|
Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
|
|
|
|
|
|
|
|
// The commuted operands should have different registers.
|
|
|
|
// Otherwise, the commute transformation does not change anything and
|
|
|
|
// is useless. We use this as a hint to make our decision.
|
|
|
|
if (Op1Reg != MI.getOperand(2).getReg())
|
|
|
|
CommutableOpIdx2 = 2;
|
|
|
|
else
|
|
|
|
CommutableOpIdx2 = 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assign the found pair of commutable indices to SrcOpIdx1 and
|
|
|
|
// SrcOpIdx2 to return those values.
|
|
|
|
if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
|
|
|
|
CommutableOpIdx2))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
|
|
|
|
case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_COMMUTABLE: \
|
|
|
|
Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_COMMUTABLE; \
|
|
|
|
break;
|
|
|
|
|
|
|
|
#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
|
|
|
|
|
|
|
|
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF16) \
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF32) \
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF64)
|
|
|
|
|
|
|
|
MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
|
|
|
|
bool NewMI,
|
|
|
|
unsigned OpIdx1,
|
|
|
|
unsigned OpIdx2) const {
|
|
|
|
auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
|
|
|
|
if (NewMI)
|
|
|
|
return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
|
|
|
|
return MI;
|
|
|
|
};
|
|
|
|
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
case CASE_VFMA_SPLATS(FMACC):
|
|
|
|
case CASE_VFMA_SPLATS(FMADD):
|
|
|
|
case CASE_VFMA_SPLATS(FMSAC):
|
|
|
|
case CASE_VFMA_SPLATS(FMSUB):
|
|
|
|
case CASE_VFMA_SPLATS(FNMACC):
|
|
|
|
case CASE_VFMA_SPLATS(FNMADD):
|
|
|
|
case CASE_VFMA_SPLATS(FNMSAC):
|
|
|
|
case CASE_VFMA_SPLATS(FNMSUB):
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(FMACC, VV):
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(FMSAC, VV):
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(FNMACC, VV):
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(FNMSAC, VV): {
|
|
|
|
// It only make sense to toggle these between clobbering the
|
|
|
|
// addend/subtrahend/minuend one of the multiplicands.
|
|
|
|
assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
|
|
|
|
assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
|
|
|
|
unsigned Opc;
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected opcode");
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_SPLATS(FMSAC, FMSUB)
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_SPLATS(FMSUB, FMSAC)
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMACC, FNMADD)
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMADD, FNMACC)
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSAC, FNMSUB)
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSUB, FNMSAC)
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_LMULS(FMACC, FMADD, VV)
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_LMULS(FMSAC, FMSUB, VV)
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_LMULS(FNMACC, FNMADD, VV)
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_LMULS(FNMSAC, FNMSUB, VV)
|
|
|
|
}
|
|
|
|
|
|
|
|
auto &WorkingMI = cloneIfNew(MI);
|
|
|
|
WorkingMI.setDesc(get(Opc));
|
|
|
|
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
|
|
|
|
OpIdx1, OpIdx2);
|
|
|
|
}
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(FMADD, VV):
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(FMSUB, VV):
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(FNMADD, VV):
|
|
|
|
case CASE_VFMA_OPCODE_LMULS(FNMSUB, VV): {
|
|
|
|
assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
|
|
|
|
// If one of the operands, is the addend we need to change opcode.
|
|
|
|
// Otherwise we're just swapping 2 of the multiplicands.
|
|
|
|
if (OpIdx1 == 3 || OpIdx2 == 3) {
|
|
|
|
unsigned Opc;
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected opcode");
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_LMULS(FMADD, FMACC, VV)
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_LMULS(FMSUB, FMSAC, VV)
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_LMULS(FNMADD, FNMACC, VV)
|
|
|
|
CASE_VFMA_CHANGE_OPCODE_LMULS(FNMSUB, FNMSAC, VV)
|
|
|
|
}
|
|
|
|
|
|
|
|
auto &WorkingMI = cloneIfNew(MI);
|
|
|
|
WorkingMI.setDesc(get(Opc));
|
|
|
|
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
|
|
|
|
OpIdx1, OpIdx2);
|
|
|
|
}
|
|
|
|
// Let the default code handle it.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
|
|
|
|
#undef CASE_VFMA_CHANGE_OPCODE_LMULS
|
|
|
|
#undef CASE_VFMA_CHANGE_OPCODE_COMMON
|
|
|
|
#undef CASE_VFMA_SPLATS
|
|
|
|
#undef CASE_VFMA_OPCODE_LMULS
|
|
|
|
#undef CASE_VFMA_OPCODE_COMMON
|