2012-02-18 20:03:15 +08:00
|
|
|
//===-- SparcInstrInfo.cpp - Sparc Instruction Information ----------------===//
|
2006-02-05 13:50:24 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2006-02-05 13:50:24 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains the Sparc implementation of the TargetInstrInfo class.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "SparcInstrInfo.h"
|
|
|
|
#include "Sparc.h"
|
2011-07-11 11:57:24 +08:00
|
|
|
#include "SparcMachineFunctionInfo.h"
|
|
|
|
#include "SparcSubtarget.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2013-06-26 20:40:16 +08:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2006-02-05 13:50:24 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2013-06-26 20:40:16 +08:00
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
2009-09-16 01:46:24 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2009-07-12 04:10:48 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2011-08-25 02:08:43 +08:00
|
|
|
#include "llvm/Support/TargetRegistry.h"
|
2011-06-29 04:07:07 +08:00
|
|
|
|
2006-02-05 13:50:24 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 10:03:14 +08:00
|
|
|
#define GET_INSTRINFO_CTOR_DTOR
|
|
|
|
#include "SparcGenInstrInfo.inc"
|
2013-11-19 08:57:56 +08:00
|
|
|
|
|
|
|
// Pin the vtable to this file.
|
|
|
|
void SparcInstrInfo::anchor() {}
|
|
|
|
|
2006-02-05 13:50:24 +08:00
|
|
|
SparcInstrInfo::SparcInstrInfo(SparcSubtarget &ST)
|
2015-03-12 13:55:26 +08:00
|
|
|
: SparcGenInstrInfo(SP::ADJCALLSTACKDOWN, SP::ADJCALLSTACKUP), RI(),
|
|
|
|
Subtarget(ST) {}
|
2006-02-05 13:50:24 +08:00
|
|
|
|
|
|
|
/// isLoadFromStackSlot - If the specified machine instruction is a direct
|
|
|
|
/// load from a stack slot, return the virtual or physical register number of
|
|
|
|
/// the destination along with the FrameIndex of the loaded stack slot. If
|
|
|
|
/// not, return 0. This predicate must return 0 if the instruction has
|
|
|
|
/// any side effects other than loading from the stack slot.
|
2016-06-30 08:01:54 +08:00
|
|
|
unsigned SparcInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
|
2006-02-05 13:50:24 +08:00
|
|
|
int &FrameIndex) const {
|
2016-06-30 08:01:54 +08:00
|
|
|
if (MI.getOpcode() == SP::LDri || MI.getOpcode() == SP::LDXri ||
|
|
|
|
MI.getOpcode() == SP::LDFri || MI.getOpcode() == SP::LDDFri ||
|
|
|
|
MI.getOpcode() == SP::LDQFri) {
|
|
|
|
if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
|
|
|
|
MI.getOperand(2).getImm() == 0) {
|
|
|
|
FrameIndex = MI.getOperand(1).getIndex();
|
|
|
|
return MI.getOperand(0).getReg();
|
2006-02-05 13:50:24 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isStoreToStackSlot - If the specified machine instruction is a direct
|
|
|
|
/// store to a stack slot, return the virtual or physical register number of
|
|
|
|
/// the source reg along with the FrameIndex of the loaded stack slot. If
|
|
|
|
/// not, return 0. This predicate must return 0 if the instruction has
|
|
|
|
/// any side effects other than storing to the stack slot.
|
2016-06-30 08:01:54 +08:00
|
|
|
unsigned SparcInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
|
2006-02-05 13:50:24 +08:00
|
|
|
int &FrameIndex) const {
|
2016-06-30 08:01:54 +08:00
|
|
|
if (MI.getOpcode() == SP::STri || MI.getOpcode() == SP::STXri ||
|
|
|
|
MI.getOpcode() == SP::STFri || MI.getOpcode() == SP::STDFri ||
|
|
|
|
MI.getOpcode() == SP::STQFri) {
|
|
|
|
if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
|
|
|
|
MI.getOperand(1).getImm() == 0) {
|
|
|
|
FrameIndex = MI.getOperand(0).getIndex();
|
|
|
|
return MI.getOperand(2).getReg();
|
2006-02-05 13:50:24 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2006-10-25 00:39:19 +08:00
|
|
|
|
2011-01-16 11:15:11 +08:00
|
|
|
static bool IsIntegerCC(unsigned CC)
|
|
|
|
{
|
|
|
|
return (CC <= SPCC::ICC_VC);
|
|
|
|
}
|
|
|
|
|
|
|
|
static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
|
|
|
|
{
|
|
|
|
switch(CC) {
|
2014-03-02 04:08:48 +08:00
|
|
|
case SPCC::ICC_A: return SPCC::ICC_N;
|
|
|
|
case SPCC::ICC_N: return SPCC::ICC_A;
|
2011-01-16 11:15:11 +08:00
|
|
|
case SPCC::ICC_NE: return SPCC::ICC_E;
|
|
|
|
case SPCC::ICC_E: return SPCC::ICC_NE;
|
|
|
|
case SPCC::ICC_G: return SPCC::ICC_LE;
|
|
|
|
case SPCC::ICC_LE: return SPCC::ICC_G;
|
|
|
|
case SPCC::ICC_GE: return SPCC::ICC_L;
|
|
|
|
case SPCC::ICC_L: return SPCC::ICC_GE;
|
|
|
|
case SPCC::ICC_GU: return SPCC::ICC_LEU;
|
|
|
|
case SPCC::ICC_LEU: return SPCC::ICC_GU;
|
|
|
|
case SPCC::ICC_CC: return SPCC::ICC_CS;
|
|
|
|
case SPCC::ICC_CS: return SPCC::ICC_CC;
|
|
|
|
case SPCC::ICC_POS: return SPCC::ICC_NEG;
|
|
|
|
case SPCC::ICC_NEG: return SPCC::ICC_POS;
|
|
|
|
case SPCC::ICC_VC: return SPCC::ICC_VS;
|
|
|
|
case SPCC::ICC_VS: return SPCC::ICC_VC;
|
|
|
|
|
2014-03-02 04:08:48 +08:00
|
|
|
case SPCC::FCC_A: return SPCC::FCC_N;
|
|
|
|
case SPCC::FCC_N: return SPCC::FCC_A;
|
2011-01-16 11:15:11 +08:00
|
|
|
case SPCC::FCC_U: return SPCC::FCC_O;
|
|
|
|
case SPCC::FCC_O: return SPCC::FCC_U;
|
2013-10-05 07:54:30 +08:00
|
|
|
case SPCC::FCC_G: return SPCC::FCC_ULE;
|
|
|
|
case SPCC::FCC_LE: return SPCC::FCC_UG;
|
|
|
|
case SPCC::FCC_UG: return SPCC::FCC_LE;
|
|
|
|
case SPCC::FCC_ULE: return SPCC::FCC_G;
|
|
|
|
case SPCC::FCC_L: return SPCC::FCC_UGE;
|
|
|
|
case SPCC::FCC_GE: return SPCC::FCC_UL;
|
|
|
|
case SPCC::FCC_UL: return SPCC::FCC_GE;
|
|
|
|
case SPCC::FCC_UGE: return SPCC::FCC_L;
|
2011-01-16 11:15:11 +08:00
|
|
|
case SPCC::FCC_LG: return SPCC::FCC_UE;
|
|
|
|
case SPCC::FCC_UE: return SPCC::FCC_LG;
|
|
|
|
case SPCC::FCC_NE: return SPCC::FCC_E;
|
|
|
|
case SPCC::FCC_E: return SPCC::FCC_NE;
|
2016-03-10 02:20:21 +08:00
|
|
|
|
|
|
|
case SPCC::CPCC_A: return SPCC::CPCC_N;
|
|
|
|
case SPCC::CPCC_N: return SPCC::CPCC_A;
|
2016-08-18 04:30:52 +08:00
|
|
|
case SPCC::CPCC_3: LLVM_FALLTHROUGH;
|
|
|
|
case SPCC::CPCC_2: LLVM_FALLTHROUGH;
|
|
|
|
case SPCC::CPCC_23: LLVM_FALLTHROUGH;
|
|
|
|
case SPCC::CPCC_1: LLVM_FALLTHROUGH;
|
|
|
|
case SPCC::CPCC_13: LLVM_FALLTHROUGH;
|
|
|
|
case SPCC::CPCC_12: LLVM_FALLTHROUGH;
|
|
|
|
case SPCC::CPCC_123: LLVM_FALLTHROUGH;
|
|
|
|
case SPCC::CPCC_0: LLVM_FALLTHROUGH;
|
|
|
|
case SPCC::CPCC_03: LLVM_FALLTHROUGH;
|
|
|
|
case SPCC::CPCC_02: LLVM_FALLTHROUGH;
|
|
|
|
case SPCC::CPCC_023: LLVM_FALLTHROUGH;
|
|
|
|
case SPCC::CPCC_01: LLVM_FALLTHROUGH;
|
|
|
|
case SPCC::CPCC_013: LLVM_FALLTHROUGH;
|
2016-03-10 02:20:21 +08:00
|
|
|
case SPCC::CPCC_012:
|
|
|
|
// "Opposite" code is not meaningful, as we don't know
|
|
|
|
// what the CoProc condition means here. The cond-code will
|
|
|
|
// only be used in inline assembler, so this code should
|
|
|
|
// not be reached in a normal compilation pass.
|
|
|
|
llvm_unreachable("Meaningless inversion of co-processor cond code");
|
2011-01-16 11:15:11 +08:00
|
|
|
}
|
2012-01-11 04:47:20 +08:00
|
|
|
llvm_unreachable("Invalid cond code");
|
2011-01-16 11:15:11 +08:00
|
|
|
}
|
|
|
|
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
static bool isUncondBranchOpcode(int Opc) { return Opc == SP::BA; }
|
2011-01-16 11:15:11 +08:00
|
|
|
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
static bool isCondBranchOpcode(int Opc) {
|
|
|
|
return Opc == SP::FBCOND || Opc == SP::BCOND;
|
|
|
|
}
|
2011-01-16 11:15:11 +08:00
|
|
|
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
static bool isIndirectBranchOpcode(int Opc) {
|
|
|
|
return Opc == SP::BINDrr || Opc == SP::BINDri;
|
|
|
|
}
|
2011-01-16 11:15:11 +08:00
|
|
|
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
|
|
|
|
SmallVectorImpl<MachineOperand> &Cond) {
|
|
|
|
Cond.push_back(MachineOperand::CreateImm(LastInst->getOperand(1).getImm()));
|
|
|
|
Target = LastInst->getOperand(0).getMBB();
|
|
|
|
}
|
2011-01-16 11:15:11 +08:00
|
|
|
|
2016-07-15 22:41:04 +08:00
|
|
|
bool SparcInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
MachineBasicBlock *&TBB,
|
|
|
|
MachineBasicBlock *&FBB,
|
|
|
|
SmallVectorImpl<MachineOperand> &Cond,
|
|
|
|
bool AllowModify) const {
|
|
|
|
MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
|
|
|
|
if (I == MBB.end())
|
|
|
|
return false;
|
|
|
|
|
2016-02-23 10:46:52 +08:00
|
|
|
if (!isUnpredicatedTerminator(*I))
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Get the last instruction in the block.
|
2016-07-09 03:41:40 +08:00
|
|
|
MachineInstr *LastInst = &*I;
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
unsigned LastOpc = LastInst->getOpcode();
|
|
|
|
|
|
|
|
// If there is only one terminator instruction, process it.
|
2016-02-23 10:46:52 +08:00
|
|
|
if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
if (isUncondBranchOpcode(LastOpc)) {
|
|
|
|
TBB = LastInst->getOperand(0).getMBB();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (isCondBranchOpcode(LastOpc)) {
|
|
|
|
// Block ends with fall-through condbranch.
|
|
|
|
parseCondBranch(LastInst, TBB, Cond);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true; // Can't handle indirect branch.
|
|
|
|
}
|
2011-01-16 11:15:11 +08:00
|
|
|
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
// Get the instruction before it if it is a terminator.
|
2016-07-09 03:41:40 +08:00
|
|
|
MachineInstr *SecondLastInst = &*I;
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
unsigned SecondLastOpc = SecondLastInst->getOpcode();
|
|
|
|
|
|
|
|
// If AllowModify is true and the block ends with two or more unconditional
|
|
|
|
// branches, delete all but the first unconditional branch.
|
|
|
|
if (AllowModify && isUncondBranchOpcode(LastOpc)) {
|
|
|
|
while (isUncondBranchOpcode(SecondLastOpc)) {
|
|
|
|
LastInst->eraseFromParent();
|
|
|
|
LastInst = SecondLastInst;
|
|
|
|
LastOpc = LastInst->getOpcode();
|
2016-02-23 10:46:52 +08:00
|
|
|
if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
// Return now the only terminator is an unconditional branch.
|
|
|
|
TBB = LastInst->getOperand(0).getMBB();
|
|
|
|
return false;
|
|
|
|
} else {
|
2016-07-09 03:41:40 +08:00
|
|
|
SecondLastInst = &*I;
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
SecondLastOpc = SecondLastInst->getOpcode();
|
2011-01-16 11:15:11 +08:00
|
|
|
}
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
}
|
|
|
|
}
|
2011-01-16 11:15:11 +08:00
|
|
|
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
// If there are three terminators, we don't know what sort of block this is.
|
2016-02-23 10:46:52 +08:00
|
|
|
if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
return true;
|
2011-01-16 11:15:11 +08:00
|
|
|
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
// If the block ends with a B and a Bcc, handle it.
|
|
|
|
if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
|
|
|
|
parseCondBranch(SecondLastInst, TBB, Cond);
|
|
|
|
FBB = LastInst->getOperand(0).getMBB();
|
|
|
|
return false;
|
|
|
|
}
|
2011-01-16 11:15:11 +08:00
|
|
|
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
// If the block ends with two unconditional branches, handle it. The second
|
|
|
|
// one is not executed.
|
|
|
|
if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
|
|
|
|
TBB = SecondLastInst->getOperand(0).getMBB();
|
|
|
|
return false;
|
|
|
|
}
|
2011-01-16 11:15:11 +08:00
|
|
|
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
// ...likewise if it ends with an indirect branch followed by an unconditional
|
|
|
|
// branch.
|
|
|
|
if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
|
|
|
|
I = LastInst;
|
|
|
|
if (AllowModify)
|
|
|
|
I->eraseFromParent();
|
2011-01-16 11:15:11 +08:00
|
|
|
return true;
|
|
|
|
}
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
|
|
|
|
// Otherwise, can't handle this.
|
|
|
|
return true;
|
2011-01-16 11:15:11 +08:00
|
|
|
}
|
|
|
|
|
2016-09-15 01:24:15 +08:00
|
|
|
unsigned SparcInstrInfo::insertBranch(MachineBasicBlock &MBB,
|
2016-06-12 23:39:02 +08:00
|
|
|
MachineBasicBlock *TBB,
|
|
|
|
MachineBasicBlock *FBB,
|
|
|
|
ArrayRef<MachineOperand> Cond,
|
2016-09-15 01:23:48 +08:00
|
|
|
const DebugLoc &DL,
|
|
|
|
int *BytesAdded) const {
|
2016-09-15 01:24:15 +08:00
|
|
|
assert(TBB && "insertBranch must not be told to insert a fallthrough");
|
2011-01-16 11:15:11 +08:00
|
|
|
assert((Cond.size() == 1 || Cond.size() == 0) &&
|
|
|
|
"Sparc branch conditions should have one component!");
|
2016-09-15 01:23:48 +08:00
|
|
|
assert(!BytesAdded && "code size not handled");
|
2011-01-16 11:15:11 +08:00
|
|
|
|
|
|
|
if (Cond.empty()) {
|
|
|
|
assert(!FBB && "Unconditional branch with multiple successors!");
|
|
|
|
BuildMI(&MBB, DL, get(SP::BA)).addMBB(TBB);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2013-06-05 02:33:25 +08:00
|
|
|
// Conditional branch
|
2011-01-16 11:15:11 +08:00
|
|
|
unsigned CC = Cond[0].getImm();
|
|
|
|
|
|
|
|
if (IsIntegerCC(CC))
|
|
|
|
BuildMI(&MBB, DL, get(SP::BCOND)).addMBB(TBB).addImm(CC);
|
|
|
|
else
|
|
|
|
BuildMI(&MBB, DL, get(SP::FBCOND)).addMBB(TBB).addImm(CC);
|
|
|
|
if (!FBB)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
BuildMI(&MBB, DL, get(SP::BA)).addMBB(FBB);
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
2016-09-15 04:43:16 +08:00
|
|
|
unsigned SparcInstrInfo::removeBranch(MachineBasicBlock &MBB,
|
2016-09-15 01:23:48 +08:00
|
|
|
int *BytesRemoved) const {
|
|
|
|
assert(!BytesRemoved && "code size not handled");
|
|
|
|
|
2011-01-16 11:15:11 +08:00
|
|
|
MachineBasicBlock::iterator I = MBB.end();
|
|
|
|
unsigned Count = 0;
|
|
|
|
while (I != MBB.begin()) {
|
|
|
|
--I;
|
|
|
|
|
2018-05-09 10:42:00 +08:00
|
|
|
if (I->isDebugInstr())
|
2011-01-16 11:15:11 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (I->getOpcode() != SP::BA
|
|
|
|
&& I->getOpcode() != SP::BCOND
|
|
|
|
&& I->getOpcode() != SP::FBCOND)
|
|
|
|
break; // Not a branch
|
|
|
|
|
|
|
|
I->eraseFromParent();
|
|
|
|
I = MBB.end();
|
|
|
|
++Count;
|
|
|
|
}
|
|
|
|
return Count;
|
2006-10-25 01:07:11 +08:00
|
|
|
}
|
2007-12-31 14:32:00 +08:00
|
|
|
|
2016-09-15 04:43:16 +08:00
|
|
|
bool SparcInstrInfo::reverseBranchCondition(
|
[SPARC] Revamp AnalyzeBranch and add ReverseBranchCondition.
AnalyzeBranch on X86 (and, previously, SPARC, which implementation was
copied from X86) tries to modify the branches based on block
layout (e.g. checking isLayoutSuccessor), when AllowModify is true.
The rest of the architectures leave that up to the caller, which can
call InsertBranch, RemoveBranch, and ReverseBranchCondition as
appropriate. That appears to be the preferred way to do it nowadays.
This commit makes SPARC like the rest: replaces AnalyzeBranch with an
implementation cribbed from AArch64, and adds a ReverseBranchCondition
implementation.
Additionally, a test-case has been added (also cribbed from AArch64)
demonstrating that redundant branch sequences no longer get emitted.
E.g., it used to emit code like this:
bne .LBB1_2
nop
ba .LBB1_1
nop
.LBB1_2:
And now emits:
cmp %i0, 42
be .LBB1_1
nop
llvm-svn: 257572
2016-01-13 12:44:14 +08:00
|
|
|
SmallVectorImpl<MachineOperand> &Cond) const {
|
|
|
|
assert(Cond.size() == 1);
|
|
|
|
SPCC::CondCodes CC = static_cast<SPCC::CondCodes>(Cond[0].getImm());
|
|
|
|
Cond[0].setImm(GetOppositeBranchCondition(CC));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-07-11 15:56:09 +08:00
|
|
|
void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
2016-06-12 23:39:02 +08:00
|
|
|
MachineBasicBlock::iterator I,
|
|
|
|
const DebugLoc &DL, unsigned DestReg,
|
|
|
|
unsigned SrcReg, bool KillSrc) const {
|
2013-09-03 02:32:45 +08:00
|
|
|
unsigned numSubRegs = 0;
|
|
|
|
unsigned movOpc = 0;
|
2014-04-25 13:30:21 +08:00
|
|
|
const unsigned *subRegIdx = nullptr;
|
[Sparc] Implement i64 load/store support for 32-bit sparc.
The LDD/STD instructions can load/store a 64bit quantity from/to
memory to/from a consecutive even/odd pair of (32-bit) registers. They
are part of SparcV8, and also present in SparcV9. (Although deprecated
there, as you can store 64bits in one register).
As recommended on llvmdev in the thread "How to enable use of 64bit
load/store for 32bit architecture" from Apr 2015, I've modeled the
64-bit load/store operations as working on a v2i32 type, rather than
making i64 a legal type, but with few legal operations. The latter
does not (currently) work, as there is much code in llvm which assumes
that if i64 is legal, operations like "add" will actually work on it.
The same assumption does not hold for v2i32 -- for vector types, it is
workable to support only load/store, and expand everything else.
This patch:
- Adds a new register class, IntPair, for even/odd pairs of registers.
- Modifies the list of reserved registers, the stack spilling code,
and register copying code to support the IntPair register class.
- Adds support in AsmParser. (note that in asm text, you write the
name of the first register of the pair only. So the parser has to
morph the single register into the equivalent paired register).
- Adds the new instructions themselves (LDD/STD/LDDA/STDA).
- Hooks up the instructions and registers as a vector type v2i32. Adds
custom legalizer to transform i64 load/stores into v2i32 load/stores
and bitcasts, so that the new instructions can actually be
generated, and marks all operations other than load/store on v2i32
as needing to be expanded.
- Copies the unfortunate SelectInlineAsm hack from ARMISelDAGToDAG.
This hack undoes the transformation of i64 operands into two
arbitrarily-allocated separate i32 registers in
SelectionDAGBuilder. and instead passes them in a single
IntPair. (Arbitrarily allocated registers are not useful, asm code
expects to be receiving a pair, which can be passed to ldd/std.)
Also adds a bunch of test cases covering all the bugs I've added along
the way.
Differential Revision: http://reviews.llvm.org/D8713
llvm-svn: 244484
2015-08-11 03:11:39 +08:00
|
|
|
bool ExtraG0 = false;
|
2013-09-03 02:32:45 +08:00
|
|
|
|
[Sparc] Implement i64 load/store support for 32-bit sparc.
The LDD/STD instructions can load/store a 64bit quantity from/to
memory to/from a consecutive even/odd pair of (32-bit) registers. They
are part of SparcV8, and also present in SparcV9. (Although deprecated
there, as you can store 64bits in one register).
As recommended on llvmdev in the thread "How to enable use of 64bit
load/store for 32bit architecture" from Apr 2015, I've modeled the
64-bit load/store operations as working on a v2i32 type, rather than
making i64 a legal type, but with few legal operations. The latter
does not (currently) work, as there is much code in llvm which assumes
that if i64 is legal, operations like "add" will actually work on it.
The same assumption does not hold for v2i32 -- for vector types, it is
workable to support only load/store, and expand everything else.
This patch:
- Adds a new register class, IntPair, for even/odd pairs of registers.
- Modifies the list of reserved registers, the stack spilling code,
and register copying code to support the IntPair register class.
- Adds support in AsmParser. (note that in asm text, you write the
name of the first register of the pair only. So the parser has to
morph the single register into the equivalent paired register).
- Adds the new instructions themselves (LDD/STD/LDDA/STDA).
- Hooks up the instructions and registers as a vector type v2i32. Adds
custom legalizer to transform i64 load/stores into v2i32 load/stores
and bitcasts, so that the new instructions can actually be
generated, and marks all operations other than load/store on v2i32
as needing to be expanded.
- Copies the unfortunate SelectInlineAsm hack from ARMISelDAGToDAG.
This hack undoes the transformation of i64 operands into two
arbitrarily-allocated separate i32 registers in
SelectionDAGBuilder. and instead passes them in a single
IntPair. (Arbitrarily allocated registers are not useful, asm code
expects to be receiving a pair, which can be passed to ldd/std.)
Also adds a bunch of test cases covering all the bugs I've added along
the way.
Differential Revision: http://reviews.llvm.org/D8713
llvm-svn: 244484
2015-08-11 03:11:39 +08:00
|
|
|
const unsigned DW_SubRegsIdx[] = { SP::sub_even, SP::sub_odd };
|
2013-09-03 02:32:45 +08:00
|
|
|
const unsigned DFP_FP_SubRegsIdx[] = { SP::sub_even, SP::sub_odd };
|
|
|
|
const unsigned QFP_DFP_SubRegsIdx[] = { SP::sub_even64, SP::sub_odd64 };
|
|
|
|
const unsigned QFP_FP_SubRegsIdx[] = { SP::sub_even, SP::sub_odd,
|
|
|
|
SP::sub_odd64_then_sub_even,
|
|
|
|
SP::sub_odd64_then_sub_odd };
|
|
|
|
|
2010-07-11 15:56:09 +08:00
|
|
|
if (SP::IntRegsRegClass.contains(DestReg, SrcReg))
|
|
|
|
BuildMI(MBB, I, DL, get(SP::ORrr), DestReg).addReg(SP::G0)
|
|
|
|
.addReg(SrcReg, getKillRegState(KillSrc));
|
[Sparc] Implement i64 load/store support for 32-bit sparc.
The LDD/STD instructions can load/store a 64bit quantity from/to
memory to/from a consecutive even/odd pair of (32-bit) registers. They
are part of SparcV8, and also present in SparcV9. (Although deprecated
there, as you can store 64bits in one register).
As recommended on llvmdev in the thread "How to enable use of 64bit
load/store for 32bit architecture" from Apr 2015, I've modeled the
64-bit load/store operations as working on a v2i32 type, rather than
making i64 a legal type, but with few legal operations. The latter
does not (currently) work, as there is much code in llvm which assumes
that if i64 is legal, operations like "add" will actually work on it.
The same assumption does not hold for v2i32 -- for vector types, it is
workable to support only load/store, and expand everything else.
This patch:
- Adds a new register class, IntPair, for even/odd pairs of registers.
- Modifies the list of reserved registers, the stack spilling code,
and register copying code to support the IntPair register class.
- Adds support in AsmParser. (note that in asm text, you write the
name of the first register of the pair only. So the parser has to
morph the single register into the equivalent paired register).
- Adds the new instructions themselves (LDD/STD/LDDA/STDA).
- Hooks up the instructions and registers as a vector type v2i32. Adds
custom legalizer to transform i64 load/stores into v2i32 load/stores
and bitcasts, so that the new instructions can actually be
generated, and marks all operations other than load/store on v2i32
as needing to be expanded.
- Copies the unfortunate SelectInlineAsm hack from ARMISelDAGToDAG.
This hack undoes the transformation of i64 operands into two
arbitrarily-allocated separate i32 registers in
SelectionDAGBuilder. and instead passes them in a single
IntPair. (Arbitrarily allocated registers are not useful, asm code
expects to be receiving a pair, which can be passed to ldd/std.)
Also adds a bunch of test cases covering all the bugs I've added along
the way.
Differential Revision: http://reviews.llvm.org/D8713
llvm-svn: 244484
2015-08-11 03:11:39 +08:00
|
|
|
else if (SP::IntPairRegClass.contains(DestReg, SrcReg)) {
|
|
|
|
subRegIdx = DW_SubRegsIdx;
|
|
|
|
numSubRegs = 2;
|
|
|
|
movOpc = SP::ORrr;
|
|
|
|
ExtraG0 = true;
|
|
|
|
} else if (SP::FPRegsRegClass.contains(DestReg, SrcReg))
|
2010-07-11 15:56:09 +08:00
|
|
|
BuildMI(MBB, I, DL, get(SP::FMOVS), DestReg)
|
|
|
|
.addReg(SrcReg, getKillRegState(KillSrc));
|
2013-06-08 23:32:59 +08:00
|
|
|
else if (SP::DFPRegsRegClass.contains(DestReg, SrcReg)) {
|
|
|
|
if (Subtarget.isV9()) {
|
|
|
|
BuildMI(MBB, I, DL, get(SP::FMOVD), DestReg)
|
|
|
|
.addReg(SrcReg, getKillRegState(KillSrc));
|
|
|
|
} else {
|
|
|
|
// Use two FMOVS instructions.
|
2013-09-03 02:32:45 +08:00
|
|
|
subRegIdx = DFP_FP_SubRegsIdx;
|
|
|
|
numSubRegs = 2;
|
|
|
|
movOpc = SP::FMOVS;
|
|
|
|
}
|
|
|
|
} else if (SP::QFPRegsRegClass.contains(DestReg, SrcReg)) {
|
|
|
|
if (Subtarget.isV9()) {
|
|
|
|
if (Subtarget.hasHardQuad()) {
|
|
|
|
BuildMI(MBB, I, DL, get(SP::FMOVQ), DestReg)
|
|
|
|
.addReg(SrcReg, getKillRegState(KillSrc));
|
|
|
|
} else {
|
|
|
|
// Use two FMOVD instructions.
|
|
|
|
subRegIdx = QFP_DFP_SubRegsIdx;
|
|
|
|
numSubRegs = 2;
|
|
|
|
movOpc = SP::FMOVD;
|
2013-06-08 23:32:59 +08:00
|
|
|
}
|
2013-09-03 02:32:45 +08:00
|
|
|
} else {
|
|
|
|
// Use four FMOVS instructions.
|
|
|
|
subRegIdx = QFP_FP_SubRegsIdx;
|
|
|
|
numSubRegs = 4;
|
|
|
|
movOpc = SP::FMOVS;
|
2013-06-08 23:32:59 +08:00
|
|
|
}
|
2015-07-09 00:25:12 +08:00
|
|
|
} else if (SP::ASRRegsRegClass.contains(DestReg) &&
|
|
|
|
SP::IntRegsRegClass.contains(SrcReg)) {
|
|
|
|
BuildMI(MBB, I, DL, get(SP::WRASRrr), DestReg)
|
|
|
|
.addReg(SP::G0)
|
|
|
|
.addReg(SrcReg, getKillRegState(KillSrc));
|
|
|
|
} else if (SP::IntRegsRegClass.contains(DestReg) &&
|
|
|
|
SP::ASRRegsRegClass.contains(SrcReg)) {
|
|
|
|
BuildMI(MBB, I, DL, get(SP::RDASR), DestReg)
|
|
|
|
.addReg(SrcReg, getKillRegState(KillSrc));
|
2013-06-08 23:32:59 +08:00
|
|
|
} else
|
2010-07-11 15:56:09 +08:00
|
|
|
llvm_unreachable("Impossible reg-to-reg copy");
|
2013-09-03 02:32:45 +08:00
|
|
|
|
2014-04-25 13:30:21 +08:00
|
|
|
if (numSubRegs == 0 || subRegIdx == nullptr || movOpc == 0)
|
2013-09-03 02:32:45 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
2014-04-25 13:30:21 +08:00
|
|
|
MachineInstr *MovMI = nullptr;
|
2013-09-03 02:32:45 +08:00
|
|
|
|
|
|
|
for (unsigned i = 0; i != numSubRegs; ++i) {
|
|
|
|
unsigned Dst = TRI->getSubReg(DestReg, subRegIdx[i]);
|
|
|
|
unsigned Src = TRI->getSubReg(SrcReg, subRegIdx[i]);
|
|
|
|
assert(Dst && Src && "Bad sub-register");
|
|
|
|
|
[Sparc] Implement i64 load/store support for 32-bit sparc.
The LDD/STD instructions can load/store a 64bit quantity from/to
memory to/from a consecutive even/odd pair of (32-bit) registers. They
are part of SparcV8, and also present in SparcV9. (Although deprecated
there, as you can store 64bits in one register).
As recommended on llvmdev in the thread "How to enable use of 64bit
load/store for 32bit architecture" from Apr 2015, I've modeled the
64-bit load/store operations as working on a v2i32 type, rather than
making i64 a legal type, but with few legal operations. The latter
does not (currently) work, as there is much code in llvm which assumes
that if i64 is legal, operations like "add" will actually work on it.
The same assumption does not hold for v2i32 -- for vector types, it is
workable to support only load/store, and expand everything else.
This patch:
- Adds a new register class, IntPair, for even/odd pairs of registers.
- Modifies the list of reserved registers, the stack spilling code,
and register copying code to support the IntPair register class.
- Adds support in AsmParser. (note that in asm text, you write the
name of the first register of the pair only. So the parser has to
morph the single register into the equivalent paired register).
- Adds the new instructions themselves (LDD/STD/LDDA/STDA).
- Hooks up the instructions and registers as a vector type v2i32. Adds
custom legalizer to transform i64 load/stores into v2i32 load/stores
and bitcasts, so that the new instructions can actually be
generated, and marks all operations other than load/store on v2i32
as needing to be expanded.
- Copies the unfortunate SelectInlineAsm hack from ARMISelDAGToDAG.
This hack undoes the transformation of i64 operands into two
arbitrarily-allocated separate i32 registers in
SelectionDAGBuilder. and instead passes them in a single
IntPair. (Arbitrarily allocated registers are not useful, asm code
expects to be receiving a pair, which can be passed to ldd/std.)
Also adds a bunch of test cases covering all the bugs I've added along
the way.
Differential Revision: http://reviews.llvm.org/D8713
llvm-svn: 244484
2015-08-11 03:11:39 +08:00
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(movOpc), Dst);
|
|
|
|
if (ExtraG0)
|
|
|
|
MIB.addReg(SP::G0);
|
|
|
|
MIB.addReg(Src);
|
|
|
|
MovMI = MIB.getInstr();
|
2013-09-03 02:32:45 +08:00
|
|
|
}
|
|
|
|
// Add implicit super-register defs and kills to the last MovMI.
|
|
|
|
MovMI->addRegisterDefined(DestReg, TRI);
|
|
|
|
if (KillSrc)
|
|
|
|
MovMI->addRegisterKilled(SrcReg, TRI);
|
2007-12-31 14:32:00 +08:00
|
|
|
}
|
2008-01-02 05:11:32 +08:00
|
|
|
|
|
|
|
void SparcInstrInfo::
|
|
|
|
storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
|
|
|
unsigned SrcReg, bool isKill, int FI,
|
2010-05-07 03:06:44 +08:00
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
2010-04-03 04:16:16 +08:00
|
|
|
DebugLoc DL;
|
2009-02-12 08:02:55 +08:00
|
|
|
if (I != MBB.end()) DL = I->getDebugLoc();
|
|
|
|
|
2013-06-26 20:40:16 +08:00
|
|
|
MachineFunction *MF = MBB.getParent();
|
2016-07-29 02:40:00 +08:00
|
|
|
const MachineFrameInfo &MFI = MF->getFrameInfo();
|
2015-08-12 07:09:45 +08:00
|
|
|
MachineMemOperand *MMO = MF->getMachineMemOperand(
|
|
|
|
MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
|
|
|
|
MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
|
2013-06-26 20:40:16 +08:00
|
|
|
|
2008-01-02 05:11:32 +08:00
|
|
|
// On the order of operands here: think "[FrameIdx + 0] = SrcReg".
|
[Sparc] Implement i64 load/store support for 32-bit sparc.
The LDD/STD instructions can load/store a 64bit quantity from/to
memory to/from a consecutive even/odd pair of (32-bit) registers. They
are part of SparcV8, and also present in SparcV9. (Although deprecated
there, as you can store 64bits in one register).
As recommended on llvmdev in the thread "How to enable use of 64bit
load/store for 32bit architecture" from Apr 2015, I've modeled the
64-bit load/store operations as working on a v2i32 type, rather than
making i64 a legal type, but with few legal operations. The latter
does not (currently) work, as there is much code in llvm which assumes
that if i64 is legal, operations like "add" will actually work on it.
The same assumption does not hold for v2i32 -- for vector types, it is
workable to support only load/store, and expand everything else.
This patch:
- Adds a new register class, IntPair, for even/odd pairs of registers.
- Modifies the list of reserved registers, the stack spilling code,
and register copying code to support the IntPair register class.
- Adds support in AsmParser. (note that in asm text, you write the
name of the first register of the pair only. So the parser has to
morph the single register into the equivalent paired register).
- Adds the new instructions themselves (LDD/STD/LDDA/STDA).
- Hooks up the instructions and registers as a vector type v2i32. Adds
custom legalizer to transform i64 load/stores into v2i32 load/stores
and bitcasts, so that the new instructions can actually be
generated, and marks all operations other than load/store on v2i32
as needing to be expanded.
- Copies the unfortunate SelectInlineAsm hack from ARMISelDAGToDAG.
This hack undoes the transformation of i64 operands into two
arbitrarily-allocated separate i32 registers in
SelectionDAGBuilder. and instead passes them in a single
IntPair. (Arbitrarily allocated registers are not useful, asm code
expects to be receiving a pair, which can be passed to ldd/std.)
Also adds a bunch of test cases covering all the bugs I've added along
the way.
Differential Revision: http://reviews.llvm.org/D8713
llvm-svn: 244484
2015-08-11 03:11:39 +08:00
|
|
|
if (RC == &SP::I64RegsRegClass)
|
2013-05-20 08:53:25 +08:00
|
|
|
BuildMI(MBB, I, DL, get(SP::STXri)).addFrameIndex(FI).addImm(0)
|
2013-06-26 20:40:16 +08:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
|
2013-05-20 08:53:25 +08:00
|
|
|
else if (RC == &SP::IntRegsRegClass)
|
2009-02-12 08:02:55 +08:00
|
|
|
BuildMI(MBB, I, DL, get(SP::STri)).addFrameIndex(FI).addImm(0)
|
2013-06-26 20:40:16 +08:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
|
[Sparc] Implement i64 load/store support for 32-bit sparc.
The LDD/STD instructions can load/store a 64bit quantity from/to
memory to/from a consecutive even/odd pair of (32-bit) registers. They
are part of SparcV8, and also present in SparcV9. (Although deprecated
there, as you can store 64bits in one register).
As recommended on llvmdev in the thread "How to enable use of 64bit
load/store for 32bit architecture" from Apr 2015, I've modeled the
64-bit load/store operations as working on a v2i32 type, rather than
making i64 a legal type, but with few legal operations. The latter
does not (currently) work, as there is much code in llvm which assumes
that if i64 is legal, operations like "add" will actually work on it.
The same assumption does not hold for v2i32 -- for vector types, it is
workable to support only load/store, and expand everything else.
This patch:
- Adds a new register class, IntPair, for even/odd pairs of registers.
- Modifies the list of reserved registers, the stack spilling code,
and register copying code to support the IntPair register class.
- Adds support in AsmParser. (note that in asm text, you write the
name of the first register of the pair only. So the parser has to
morph the single register into the equivalent paired register).
- Adds the new instructions themselves (LDD/STD/LDDA/STDA).
- Hooks up the instructions and registers as a vector type v2i32. Adds
custom legalizer to transform i64 load/stores into v2i32 load/stores
and bitcasts, so that the new instructions can actually be
generated, and marks all operations other than load/store on v2i32
as needing to be expanded.
- Copies the unfortunate SelectInlineAsm hack from ARMISelDAGToDAG.
This hack undoes the transformation of i64 operands into two
arbitrarily-allocated separate i32 registers in
SelectionDAGBuilder. and instead passes them in a single
IntPair. (Arbitrarily allocated registers are not useful, asm code
expects to be receiving a pair, which can be passed to ldd/std.)
Also adds a bunch of test cases covering all the bugs I've added along
the way.
Differential Revision: http://reviews.llvm.org/D8713
llvm-svn: 244484
2015-08-11 03:11:39 +08:00
|
|
|
else if (RC == &SP::IntPairRegClass)
|
|
|
|
BuildMI(MBB, I, DL, get(SP::STDri)).addFrameIndex(FI).addImm(0)
|
|
|
|
.addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
|
2012-04-20 14:31:50 +08:00
|
|
|
else if (RC == &SP::FPRegsRegClass)
|
2009-02-12 08:02:55 +08:00
|
|
|
BuildMI(MBB, I, DL, get(SP::STFri)).addFrameIndex(FI).addImm(0)
|
2013-06-26 20:40:16 +08:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
|
2013-09-03 02:32:45 +08:00
|
|
|
else if (SP::DFPRegsRegClass.hasSubClassEq(RC))
|
2009-02-12 08:02:55 +08:00
|
|
|
BuildMI(MBB, I, DL, get(SP::STDFri)).addFrameIndex(FI).addImm(0)
|
2013-06-26 20:40:16 +08:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
|
2013-09-03 02:32:45 +08:00
|
|
|
else if (SP::QFPRegsRegClass.hasSubClassEq(RC))
|
|
|
|
// Use STQFri irrespective of its legality. If STQ is not legal, it will be
|
|
|
|
// lowered into two STDs in eliminateFrameIndex.
|
|
|
|
BuildMI(MBB, I, DL, get(SP::STQFri)).addFrameIndex(FI).addImm(0)
|
|
|
|
.addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
|
2008-01-02 05:11:32 +08:00
|
|
|
else
|
2009-07-15 00:55:14 +08:00
|
|
|
llvm_unreachable("Can't store this register to stack slot");
|
2008-01-02 05:11:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void SparcInstrInfo::
|
|
|
|
loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
|
|
|
unsigned DestReg, int FI,
|
2010-05-07 03:06:44 +08:00
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
2010-04-03 04:16:16 +08:00
|
|
|
DebugLoc DL;
|
2009-02-12 08:02:55 +08:00
|
|
|
if (I != MBB.end()) DL = I->getDebugLoc();
|
|
|
|
|
2013-06-26 20:40:16 +08:00
|
|
|
MachineFunction *MF = MBB.getParent();
|
2016-07-29 02:40:00 +08:00
|
|
|
const MachineFrameInfo &MFI = MF->getFrameInfo();
|
2015-08-12 07:09:45 +08:00
|
|
|
MachineMemOperand *MMO = MF->getMachineMemOperand(
|
|
|
|
MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
|
|
|
|
MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
|
2013-06-26 20:40:16 +08:00
|
|
|
|
2013-05-20 08:53:25 +08:00
|
|
|
if (RC == &SP::I64RegsRegClass)
|
2013-06-26 20:40:16 +08:00
|
|
|
BuildMI(MBB, I, DL, get(SP::LDXri), DestReg).addFrameIndex(FI).addImm(0)
|
|
|
|
.addMemOperand(MMO);
|
2013-05-20 08:53:25 +08:00
|
|
|
else if (RC == &SP::IntRegsRegClass)
|
2013-06-26 20:40:16 +08:00
|
|
|
BuildMI(MBB, I, DL, get(SP::LDri), DestReg).addFrameIndex(FI).addImm(0)
|
|
|
|
.addMemOperand(MMO);
|
[Sparc] Implement i64 load/store support for 32-bit sparc.
The LDD/STD instructions can load/store a 64bit quantity from/to
memory to/from a consecutive even/odd pair of (32-bit) registers. They
are part of SparcV8, and also present in SparcV9. (Although deprecated
there, as you can store 64bits in one register).
As recommended on llvmdev in the thread "How to enable use of 64bit
load/store for 32bit architecture" from Apr 2015, I've modeled the
64-bit load/store operations as working on a v2i32 type, rather than
making i64 a legal type, but with few legal operations. The latter
does not (currently) work, as there is much code in llvm which assumes
that if i64 is legal, operations like "add" will actually work on it.
The same assumption does not hold for v2i32 -- for vector types, it is
workable to support only load/store, and expand everything else.
This patch:
- Adds a new register class, IntPair, for even/odd pairs of registers.
- Modifies the list of reserved registers, the stack spilling code,
and register copying code to support the IntPair register class.
- Adds support in AsmParser. (note that in asm text, you write the
name of the first register of the pair only. So the parser has to
morph the single register into the equivalent paired register).
- Adds the new instructions themselves (LDD/STD/LDDA/STDA).
- Hooks up the instructions and registers as a vector type v2i32. Adds
custom legalizer to transform i64 load/stores into v2i32 load/stores
and bitcasts, so that the new instructions can actually be
generated, and marks all operations other than load/store on v2i32
as needing to be expanded.
- Copies the unfortunate SelectInlineAsm hack from ARMISelDAGToDAG.
This hack undoes the transformation of i64 operands into two
arbitrarily-allocated separate i32 registers in
SelectionDAGBuilder. and instead passes them in a single
IntPair. (Arbitrarily allocated registers are not useful, asm code
expects to be receiving a pair, which can be passed to ldd/std.)
Also adds a bunch of test cases covering all the bugs I've added along
the way.
Differential Revision: http://reviews.llvm.org/D8713
llvm-svn: 244484
2015-08-11 03:11:39 +08:00
|
|
|
else if (RC == &SP::IntPairRegClass)
|
|
|
|
BuildMI(MBB, I, DL, get(SP::LDDri), DestReg).addFrameIndex(FI).addImm(0)
|
|
|
|
.addMemOperand(MMO);
|
2012-04-20 14:31:50 +08:00
|
|
|
else if (RC == &SP::FPRegsRegClass)
|
2013-06-26 20:40:16 +08:00
|
|
|
BuildMI(MBB, I, DL, get(SP::LDFri), DestReg).addFrameIndex(FI).addImm(0)
|
|
|
|
.addMemOperand(MMO);
|
2013-09-03 02:32:45 +08:00
|
|
|
else if (SP::DFPRegsRegClass.hasSubClassEq(RC))
|
2013-06-26 20:40:16 +08:00
|
|
|
BuildMI(MBB, I, DL, get(SP::LDDFri), DestReg).addFrameIndex(FI).addImm(0)
|
|
|
|
.addMemOperand(MMO);
|
2013-09-03 02:32:45 +08:00
|
|
|
else if (SP::QFPRegsRegClass.hasSubClassEq(RC))
|
|
|
|
// Use LDQFri irrespective of its legality. If LDQ is not legal, it will be
|
|
|
|
// lowered into two LDDs in eliminateFrameIndex.
|
|
|
|
BuildMI(MBB, I, DL, get(SP::LDQFri), DestReg).addFrameIndex(FI).addImm(0)
|
|
|
|
.addMemOperand(MMO);
|
2008-01-02 05:11:32 +08:00
|
|
|
else
|
2009-07-15 00:55:14 +08:00
|
|
|
llvm_unreachable("Can't load this register from stack slot");
|
2008-01-02 05:11:32 +08:00
|
|
|
}
|
|
|
|
|
2009-09-16 01:46:24 +08:00
|
|
|
unsigned SparcInstrInfo::getGlobalBaseReg(MachineFunction *MF) const
|
|
|
|
{
|
|
|
|
SparcMachineFunctionInfo *SparcFI = MF->getInfo<SparcMachineFunctionInfo>();
|
|
|
|
unsigned GlobalBaseReg = SparcFI->getGlobalBaseReg();
|
|
|
|
if (GlobalBaseReg != 0)
|
|
|
|
return GlobalBaseReg;
|
|
|
|
|
|
|
|
// Insert the set of GlobalBaseReg into the first MBB of the function
|
|
|
|
MachineBasicBlock &FirstMBB = MF->front();
|
|
|
|
MachineBasicBlock::iterator MBBI = FirstMBB.begin();
|
|
|
|
MachineRegisterInfo &RegInfo = MF->getRegInfo();
|
|
|
|
|
2014-01-29 11:35:08 +08:00
|
|
|
const TargetRegisterClass *PtrRC =
|
|
|
|
Subtarget.is64Bit() ? &SP::I64RegsRegClass : &SP::IntRegsRegClass;
|
|
|
|
GlobalBaseReg = RegInfo.createVirtualRegister(PtrRC);
|
2009-09-16 01:46:24 +08:00
|
|
|
|
2010-04-03 04:16:16 +08:00
|
|
|
DebugLoc dl;
|
2009-09-16 01:46:24 +08:00
|
|
|
|
|
|
|
BuildMI(FirstMBB, MBBI, dl, get(SP::GETPCX), GlobalBaseReg);
|
|
|
|
SparcFI->setGlobalBaseReg(GlobalBaseReg);
|
|
|
|
return GlobalBaseReg;
|
|
|
|
}
|
2016-04-26 18:37:14 +08:00
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
bool SparcInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
|
|
|
|
switch (MI.getOpcode()) {
|
2016-04-26 18:37:14 +08:00
|
|
|
case TargetOpcode::LOAD_STACK_GUARD: {
|
2016-04-26 18:43:47 +08:00
|
|
|
assert(Subtarget.isTargetLinux() &&
|
2016-04-26 18:37:14 +08:00
|
|
|
"Only Linux target is expected to contain LOAD_STACK_GUARD");
|
|
|
|
// offsetof(tcbhead_t, stack_guard) from sysdeps/sparc/nptl/tls.h in glibc.
|
|
|
|
const int64_t Offset = Subtarget.is64Bit() ? 0x28 : 0x14;
|
2016-06-30 08:01:54 +08:00
|
|
|
MI.setDesc(get(Subtarget.is64Bit() ? SP::LDXri : SP::LDri));
|
|
|
|
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
|
|
|
|
.addReg(SP::G7)
|
|
|
|
.addImm(Offset);
|
2016-04-26 18:37:14 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|