2013-05-07 00:15:19 +08:00
|
|
|
//===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains the SystemZ implementation of the TargetInstrInfo class.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "SystemZInstrInfo.h"
|
2013-07-20 00:12:08 +08:00
|
|
|
#include "SystemZTargetMachine.h"
|
2013-05-07 00:15:19 +08:00
|
|
|
#include "SystemZInstrBuilder.h"
|
2013-07-20 00:12:08 +08:00
|
|
|
#include "llvm/CodeGen/LiveVariables.h"
|
2013-07-02 23:28:56 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
#define GET_INSTRINFO_CTOR
|
|
|
|
#define GET_INSTRMAP_INFO
|
|
|
|
#include "SystemZGenInstrInfo.inc"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2013-07-31 19:36:35 +08:00
|
|
|
// Return a mask with Count low bits set.
|
|
|
|
static uint64_t allOnes(unsigned int Count) {
|
|
|
|
return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
|
|
|
|
}
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
SystemZInstrInfo::SystemZInstrInfo(SystemZTargetMachine &tm)
|
|
|
|
: SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP),
|
2013-07-20 00:12:08 +08:00
|
|
|
RI(tm), TM(tm) {
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// MI is a 128-bit load or store. Split it into two 64-bit loads or stores,
|
|
|
|
// each having the opcode given by NewOpcode.
|
|
|
|
void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
|
|
|
|
unsigned NewOpcode) const {
|
|
|
|
MachineBasicBlock *MBB = MI->getParent();
|
|
|
|
MachineFunction &MF = *MBB->getParent();
|
|
|
|
|
|
|
|
// Get two load or store instructions. Use the original instruction for one
|
|
|
|
// of them (arbitarily the second here) and create a clone for the other.
|
|
|
|
MachineInstr *EarlierMI = MF.CloneMachineInstr(MI);
|
|
|
|
MBB->insert(MI, EarlierMI);
|
|
|
|
|
|
|
|
// Set up the two 64-bit registers.
|
|
|
|
MachineOperand &HighRegOp = EarlierMI->getOperand(0);
|
|
|
|
MachineOperand &LowRegOp = MI->getOperand(0);
|
|
|
|
HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_high));
|
|
|
|
LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_low));
|
|
|
|
|
|
|
|
// The address in the first (high) instruction is already correct.
|
|
|
|
// Adjust the offset in the second (low) instruction.
|
|
|
|
MachineOperand &HighOffsetOp = EarlierMI->getOperand(2);
|
|
|
|
MachineOperand &LowOffsetOp = MI->getOperand(2);
|
|
|
|
LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
|
|
|
|
|
|
|
|
// Set the opcodes.
|
|
|
|
unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm());
|
|
|
|
unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm());
|
|
|
|
assert(HighOpcode && LowOpcode && "Both offsets should be in range");
|
|
|
|
|
|
|
|
EarlierMI->setDesc(get(HighOpcode));
|
|
|
|
MI->setDesc(get(LowOpcode));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Split ADJDYNALLOC instruction MI.
|
|
|
|
void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
|
|
|
|
MachineBasicBlock *MBB = MI->getParent();
|
|
|
|
MachineFunction &MF = *MBB->getParent();
|
|
|
|
MachineFrameInfo *MFFrame = MF.getFrameInfo();
|
|
|
|
MachineOperand &OffsetMO = MI->getOperand(2);
|
|
|
|
|
|
|
|
uint64_t Offset = (MFFrame->getMaxCallFrameSize() +
|
|
|
|
SystemZMC::CallFrameSize +
|
|
|
|
OffsetMO.getImm());
|
|
|
|
unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset);
|
|
|
|
assert(NewOpcode && "No support for huge argument lists yet");
|
|
|
|
MI->setDesc(get(NewOpcode));
|
|
|
|
OffsetMO.setImm(Offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If MI is a simple load or store for a frame object, return the register
|
|
|
|
// it loads or stores and set FrameIndex to the index of the frame object.
|
|
|
|
// Return 0 otherwise.
|
|
|
|
//
|
|
|
|
// Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
|
2013-07-02 23:28:56 +08:00
|
|
|
static int isSimpleMove(const MachineInstr *MI, int &FrameIndex,
|
|
|
|
unsigned Flag) {
|
2013-05-07 00:15:19 +08:00
|
|
|
const MCInstrDesc &MCID = MI->getDesc();
|
|
|
|
if ((MCID.TSFlags & Flag) &&
|
|
|
|
MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).getImm() == 0 &&
|
|
|
|
MI->getOperand(3).getReg() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
|
|
|
|
int &FrameIndex) const {
|
|
|
|
return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
|
|
|
|
int &FrameIndex) const {
|
|
|
|
return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
|
|
|
|
}
|
|
|
|
|
2013-07-05 22:38:48 +08:00
|
|
|
bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr *MI,
|
|
|
|
int &DestFrameIndex,
|
|
|
|
int &SrcFrameIndex) const {
|
|
|
|
// Check for MVC 0(Length,FI1),0(FI2)
|
|
|
|
const MachineFrameInfo *MFI = MI->getParent()->getParent()->getFrameInfo();
|
|
|
|
if (MI->getOpcode() != SystemZ::MVC ||
|
|
|
|
!MI->getOperand(0).isFI() ||
|
|
|
|
MI->getOperand(1).getImm() != 0 ||
|
|
|
|
!MI->getOperand(3).isFI() ||
|
|
|
|
MI->getOperand(4).getImm() != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check that Length covers the full slots.
|
|
|
|
int64_t Length = MI->getOperand(2).getImm();
|
|
|
|
unsigned FI1 = MI->getOperand(0).getIndex();
|
|
|
|
unsigned FI2 = MI->getOperand(3).getIndex();
|
|
|
|
if (MFI->getObjectSize(FI1) != Length ||
|
|
|
|
MFI->getObjectSize(FI2) != Length)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
DestFrameIndex = FI1;
|
|
|
|
SrcFrameIndex = FI2;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock *&TBB,
|
|
|
|
MachineBasicBlock *&FBB,
|
|
|
|
SmallVectorImpl<MachineOperand> &Cond,
|
|
|
|
bool AllowModify) const {
|
|
|
|
// Most of the code and comments here are boilerplate.
|
|
|
|
|
|
|
|
// Start from the bottom of the block and work up, examining the
|
|
|
|
// terminator instructions.
|
|
|
|
MachineBasicBlock::iterator I = MBB.end();
|
|
|
|
while (I != MBB.begin()) {
|
|
|
|
--I;
|
|
|
|
if (I->isDebugValue())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Working from the bottom, when we see a non-terminator instruction, we're
|
|
|
|
// done.
|
|
|
|
if (!isUnpredicatedTerminator(I))
|
|
|
|
break;
|
|
|
|
|
|
|
|
// A terminator that isn't a branch can't easily be handled by this
|
|
|
|
// analysis.
|
2013-05-28 18:13:54 +08:00
|
|
|
if (!I->isBranch())
|
2013-05-07 00:15:19 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Can't handle indirect branches.
|
2013-05-28 18:13:54 +08:00
|
|
|
SystemZII::Branch Branch(getBranchInfo(I));
|
|
|
|
if (!Branch.Target->isMBB())
|
2013-05-07 00:15:19 +08:00
|
|
|
return true;
|
|
|
|
|
2013-05-28 18:41:11 +08:00
|
|
|
// Punt on compound branches.
|
|
|
|
if (Branch.Type != SystemZII::BranchNormal)
|
|
|
|
return true;
|
|
|
|
|
2013-05-28 18:13:54 +08:00
|
|
|
if (Branch.CCMask == SystemZ::CCMASK_ANY) {
|
2013-05-07 00:15:19 +08:00
|
|
|
// Handle unconditional branches.
|
|
|
|
if (!AllowModify) {
|
2013-05-28 18:13:54 +08:00
|
|
|
TBB = Branch.Target->getMBB();
|
2013-05-07 00:15:19 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the block has any instructions after a JMP, delete them.
|
|
|
|
while (llvm::next(I) != MBB.end())
|
|
|
|
llvm::next(I)->eraseFromParent();
|
|
|
|
|
|
|
|
Cond.clear();
|
|
|
|
FBB = 0;
|
|
|
|
|
|
|
|
// Delete the JMP if it's equivalent to a fall-through.
|
2013-05-28 18:13:54 +08:00
|
|
|
if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) {
|
2013-05-07 00:15:19 +08:00
|
|
|
TBB = 0;
|
|
|
|
I->eraseFromParent();
|
|
|
|
I = MBB.end();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// TBB is used to indicate the unconditinal destination.
|
2013-05-28 18:13:54 +08:00
|
|
|
TBB = Branch.Target->getMBB();
|
2013-05-07 00:15:19 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Working from the bottom, handle the first conditional branch.
|
|
|
|
if (Cond.empty()) {
|
|
|
|
// FIXME: add X86-style branch swap
|
|
|
|
FBB = TBB;
|
2013-05-28 18:13:54 +08:00
|
|
|
TBB = Branch.Target->getMBB();
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
llvm-svn: 187495
2013-07-31 20:30:20 +08:00
|
|
|
Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));
|
2013-05-28 18:13:54 +08:00
|
|
|
Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));
|
2013-05-07 00:15:19 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle subsequent conditional branches.
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
llvm-svn: 187495
2013-07-31 20:30:20 +08:00
|
|
|
assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch");
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// Only handle the case where all conditional branches branch to the same
|
|
|
|
// destination.
|
2013-05-28 18:13:54 +08:00
|
|
|
if (TBB != Branch.Target->getMBB())
|
2013-05-07 00:15:19 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// If the conditions are the same, we can leave them alone.
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
llvm-svn: 187495
2013-07-31 20:30:20 +08:00
|
|
|
unsigned OldCCValid = Cond[0].getImm();
|
|
|
|
unsigned OldCCMask = Cond[1].getImm();
|
|
|
|
if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask)
|
2013-05-07 00:15:19 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// FIXME: Try combining conditions like X86 does. Should be easy on Z!
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
llvm-svn: 187495
2013-07-31 20:30:20 +08:00
|
|
|
return false;
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
|
|
|
|
// Most of the code and comments here are boilerplate.
|
|
|
|
MachineBasicBlock::iterator I = MBB.end();
|
|
|
|
unsigned Count = 0;
|
|
|
|
|
|
|
|
while (I != MBB.begin()) {
|
|
|
|
--I;
|
|
|
|
if (I->isDebugValue())
|
|
|
|
continue;
|
2013-05-28 18:13:54 +08:00
|
|
|
if (!I->isBranch())
|
2013-05-07 00:15:19 +08:00
|
|
|
break;
|
2013-05-28 18:13:54 +08:00
|
|
|
if (!getBranchInfo(I).Target->isMBB())
|
2013-05-07 00:15:19 +08:00
|
|
|
break;
|
|
|
|
// Remove the branch.
|
|
|
|
I->eraseFromParent();
|
|
|
|
I = MBB.end();
|
|
|
|
++Count;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Count;
|
|
|
|
}
|
|
|
|
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
llvm-svn: 187495
2013-07-31 20:30:20 +08:00
|
|
|
bool SystemZInstrInfo::
|
|
|
|
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
|
|
|
|
assert(Cond.size() == 2 && "Invalid condition");
|
|
|
|
Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
unsigned
|
|
|
|
SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
|
|
|
|
MachineBasicBlock *FBB,
|
|
|
|
const SmallVectorImpl<MachineOperand> &Cond,
|
|
|
|
DebugLoc DL) const {
|
|
|
|
// In this function we output 32-bit branches, which should always
|
|
|
|
// have enough range. They can be shortened and relaxed by later code
|
|
|
|
// in the pipeline, if desired.
|
|
|
|
|
|
|
|
// Shouldn't be a fall through.
|
|
|
|
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
llvm-svn: 187495
2013-07-31 20:30:20 +08:00
|
|
|
assert((Cond.size() == 2 || Cond.size() == 0) &&
|
2013-05-07 00:15:19 +08:00
|
|
|
"SystemZ branch conditions have one component!");
|
|
|
|
|
|
|
|
if (Cond.empty()) {
|
|
|
|
// Unconditional branch?
|
|
|
|
assert(!FBB && "Unconditional branch with multiple successors!");
|
[SystemZ] Add long branch pass
Before this change, the SystemZ backend would use BRCL for all branches
and only consider shortening them to BRC when generating an object file.
E.g. a branch on equal would use the JGE alias of BRCL in assembly output,
but might be shortened to the JE alias of BRC in ELF output. This was
a useful first step, but it had two problems:
(1) The z assembler isn't traditionally supposed to perform branch shortening
or branch relaxation. We followed this rule by not relaxing branches
in assembler input, but that meant that generating assembly code and
then assembling it would not produce the same result as going directly
to object code; the former would give long branches everywhere, whereas
the latter would use short branches where possible.
(2) Other useful branches, like COMPARE AND BRANCH, do not have long forms.
We would need to do something else before supporting them.
(Although COMPARE AND BRANCH does not change the condition codes,
the plan is to model COMPARE AND BRANCH as a CC-clobbering instruction
during codegen, so that we can safely lower it to a separate compare
and long branch where necessary. This is not a valid transformation
for the assembler proper to make.)
This patch therefore moves branch relaxation to a pre-emit pass.
For now, calls are still shortened from BRASL to BRAS by the assembler,
although this too is not really the traditional behaviour.
The first test takes about 1.5s to run, and there are likely to be
more tests in this vein once further branch types are added. The feeling
on IRC was that 1.5s is a bit much for a single test, so I've restricted
it to SystemZ hosts for now.
The patch exposes (and fixes) some typos in the main CodeGen/SystemZ tests.
A later patch will remove the {{g}}s from that directory.
llvm-svn: 182274
2013-05-20 22:23:08 +08:00
|
|
|
BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB);
|
2013-05-07 00:15:19 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Conditional branch.
|
|
|
|
unsigned Count = 0;
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
llvm-svn: 187495
2013-07-31 20:30:20 +08:00
|
|
|
unsigned CCValid = Cond[0].getImm();
|
|
|
|
unsigned CCMask = Cond[1].getImm();
|
|
|
|
BuildMI(&MBB, DL, get(SystemZ::BRC))
|
|
|
|
.addImm(CCValid).addImm(CCMask).addMBB(TBB);
|
2013-05-07 00:15:19 +08:00
|
|
|
++Count;
|
|
|
|
|
|
|
|
if (FBB) {
|
|
|
|
// Two-way Conditional branch. Insert the second branch.
|
[SystemZ] Add long branch pass
Before this change, the SystemZ backend would use BRCL for all branches
and only consider shortening them to BRC when generating an object file.
E.g. a branch on equal would use the JGE alias of BRCL in assembly output,
but might be shortened to the JE alias of BRC in ELF output. This was
a useful first step, but it had two problems:
(1) The z assembler isn't traditionally supposed to perform branch shortening
or branch relaxation. We followed this rule by not relaxing branches
in assembler input, but that meant that generating assembly code and
then assembling it would not produce the same result as going directly
to object code; the former would give long branches everywhere, whereas
the latter would use short branches where possible.
(2) Other useful branches, like COMPARE AND BRANCH, do not have long forms.
We would need to do something else before supporting them.
(Although COMPARE AND BRANCH does not change the condition codes,
the plan is to model COMPARE AND BRANCH as a CC-clobbering instruction
during codegen, so that we can safely lower it to a separate compare
and long branch where necessary. This is not a valid transformation
for the assembler proper to make.)
This patch therefore moves branch relaxation to a pre-emit pass.
For now, calls are still shortened from BRASL to BRAS by the assembler,
although this too is not really the traditional behaviour.
The first test takes about 1.5s to run, and there are likely to be
more tests in this vein once further branch types are added. The feeling
on IRC was that 1.5s is a bit much for a single test, so I've restricted
it to SystemZ hosts for now.
The patch exposes (and fixes) some typos in the main CodeGen/SystemZ tests.
A later patch will remove the {{g}}s from that directory.
llvm-svn: 182274
2013-05-20 22:23:08 +08:00
|
|
|
BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB);
|
2013-05-07 00:15:19 +08:00
|
|
|
++Count;
|
|
|
|
}
|
|
|
|
return Count;
|
|
|
|
}
|
|
|
|
|
2013-07-25 17:11:15 +08:00
|
|
|
// If Opcode is a move that has a conditional variant, return that variant,
|
|
|
|
// otherwise return 0.
|
|
|
|
static unsigned getConditionalMove(unsigned Opcode) {
|
|
|
|
switch (Opcode) {
|
|
|
|
case SystemZ::LR: return SystemZ::LOCR;
|
|
|
|
case SystemZ::LGR: return SystemZ::LOCGR;
|
|
|
|
default: return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SystemZInstrInfo::isPredicable(MachineInstr *MI) const {
|
|
|
|
unsigned Opcode = MI->getOpcode();
|
|
|
|
if (TM.getSubtargetImpl()->hasLoadStoreOnCond() &&
|
|
|
|
getConditionalMove(Opcode))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SystemZInstrInfo::
|
|
|
|
isProfitableToIfCvt(MachineBasicBlock &MBB,
|
|
|
|
unsigned NumCycles, unsigned ExtraPredCycles,
|
|
|
|
const BranchProbability &Probability) const {
|
|
|
|
// For now only convert single instructions.
|
|
|
|
return NumCycles == 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SystemZInstrInfo::
|
|
|
|
isProfitableToIfCvt(MachineBasicBlock &TMBB,
|
|
|
|
unsigned NumCyclesT, unsigned ExtraPredCyclesT,
|
|
|
|
MachineBasicBlock &FMBB,
|
|
|
|
unsigned NumCyclesF, unsigned ExtraPredCyclesF,
|
|
|
|
const BranchProbability &Probability) const {
|
|
|
|
// For now avoid converting mutually-exclusive cases.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SystemZInstrInfo::
|
|
|
|
PredicateInstruction(MachineInstr *MI,
|
|
|
|
const SmallVectorImpl<MachineOperand> &Pred) const {
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
llvm-svn: 187495
2013-07-31 20:30:20 +08:00
|
|
|
assert(Pred.size() == 2 && "Invalid condition");
|
|
|
|
unsigned CCValid = Pred[0].getImm();
|
|
|
|
unsigned CCMask = Pred[1].getImm();
|
2013-07-25 17:11:15 +08:00
|
|
|
assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
|
|
|
|
unsigned Opcode = MI->getOpcode();
|
|
|
|
if (TM.getSubtargetImpl()->hasLoadStoreOnCond()) {
|
|
|
|
if (unsigned CondOpcode = getConditionalMove(Opcode)) {
|
|
|
|
MI->setDesc(get(CondOpcode));
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
llvm-svn: 187495
2013-07-31 20:30:20 +08:00
|
|
|
MachineInstrBuilder(*MI->getParent()->getParent(), MI)
|
2013-08-01 18:39:40 +08:00
|
|
|
.addImm(CCValid).addImm(CCMask)
|
|
|
|
.addReg(SystemZ::CC, RegState::Implicit);;
|
2013-07-25 17:11:15 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
void
|
|
|
|
SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator MBBI, DebugLoc DL,
|
|
|
|
unsigned DestReg, unsigned SrcReg,
|
|
|
|
bool KillSrc) const {
|
|
|
|
// Split 128-bit GPR moves into two 64-bit moves. This handles ADDR128 too.
|
|
|
|
if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
|
|
|
|
copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_high),
|
|
|
|
RI.getSubReg(SrcReg, SystemZ::subreg_high), KillSrc);
|
|
|
|
copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_low),
|
|
|
|
RI.getSubReg(SrcReg, SystemZ::subreg_low), KillSrc);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Everything else needs only one instruction.
|
|
|
|
unsigned Opcode;
|
|
|
|
if (SystemZ::GR32BitRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opcode = SystemZ::LR;
|
|
|
|
else if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opcode = SystemZ::LGR;
|
|
|
|
else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opcode = SystemZ::LER;
|
|
|
|
else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opcode = SystemZ::LDR;
|
|
|
|
else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opcode = SystemZ::LXR;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Impossible reg-to-reg copy");
|
|
|
|
|
|
|
|
BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
|
|
|
|
.addReg(SrcReg, getKillRegState(KillSrc));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
SystemZInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator MBBI,
|
|
|
|
unsigned SrcReg, bool isKill,
|
|
|
|
int FrameIdx,
|
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
|
|
|
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
|
|
|
|
|
|
|
|
// Callers may expect a single instruction, so keep 128-bit moves
|
|
|
|
// together for now and lower them after register allocation.
|
|
|
|
unsigned LoadOpcode, StoreOpcode;
|
|
|
|
getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
|
|
|
|
addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode))
|
|
|
|
.addReg(SrcReg, getKillRegState(isKill)), FrameIdx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator MBBI,
|
|
|
|
unsigned DestReg, int FrameIdx,
|
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
|
|
|
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
|
|
|
|
|
|
|
|
// Callers may expect a single instruction, so keep 128-bit moves
|
|
|
|
// together for now and lower them after register allocation.
|
|
|
|
unsigned LoadOpcode, StoreOpcode;
|
|
|
|
getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
|
|
|
|
addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg),
|
|
|
|
FrameIdx);
|
|
|
|
}
|
|
|
|
|
2013-07-02 23:28:56 +08:00
|
|
|
// Return true if MI is a simple load or store with a 12-bit displacement
|
|
|
|
// and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
|
|
|
|
static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
|
|
|
|
const MCInstrDesc &MCID = MI->getDesc();
|
|
|
|
return ((MCID.TSFlags & Flag) &&
|
|
|
|
isUInt<12>(MI->getOperand(2).getImm()) &&
|
|
|
|
MI->getOperand(3).getReg() == 0);
|
|
|
|
}
|
|
|
|
|
2013-07-31 19:36:35 +08:00
|
|
|
namespace {
|
|
|
|
struct LogicOp {
|
|
|
|
LogicOp() : RegSize(0), ImmLSB(0), ImmSize(0) {}
|
|
|
|
LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
|
|
|
|
: RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
|
|
|
|
|
|
|
|
operator bool() const { return RegSize; }
|
|
|
|
|
|
|
|
unsigned RegSize, ImmLSB, ImmSize;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
static LogicOp interpretAndImmediate(unsigned Opcode) {
|
|
|
|
switch (Opcode) {
|
|
|
|
case SystemZ::NILL32: return LogicOp(32, 0, 16);
|
|
|
|
case SystemZ::NILH32: return LogicOp(32, 16, 16);
|
|
|
|
case SystemZ::NILL: return LogicOp(64, 0, 16);
|
|
|
|
case SystemZ::NILH: return LogicOp(64, 16, 16);
|
|
|
|
case SystemZ::NIHL: return LogicOp(64, 32, 16);
|
|
|
|
case SystemZ::NIHH: return LogicOp(64, 48, 16);
|
|
|
|
case SystemZ::NILF32: return LogicOp(32, 0, 32);
|
|
|
|
case SystemZ::NILF: return LogicOp(64, 0, 32);
|
|
|
|
case SystemZ::NIHF: return LogicOp(64, 32, 32);
|
|
|
|
default: return LogicOp();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Used to return from convertToThreeAddress after replacing two-address
|
|
|
|
// instruction OldMI with three-address instruction NewMI.
|
|
|
|
static MachineInstr *finishConvertToThreeAddress(MachineInstr *OldMI,
|
|
|
|
MachineInstr *NewMI,
|
|
|
|
LiveVariables *LV) {
|
|
|
|
if (LV) {
|
|
|
|
unsigned NumOps = OldMI->getNumOperands();
|
|
|
|
for (unsigned I = 1; I < NumOps; ++I) {
|
|
|
|
MachineOperand &Op = OldMI->getOperand(I);
|
|
|
|
if (Op.isReg() && Op.isKill())
|
|
|
|
LV->replaceKillInstruction(Op.getReg(), OldMI, NewMI);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NewMI;
|
|
|
|
}
|
|
|
|
|
2013-07-20 00:12:08 +08:00
|
|
|
MachineInstr *
|
|
|
|
SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
|
|
|
MachineBasicBlock::iterator &MBBI,
|
|
|
|
LiveVariables *LV) const {
|
|
|
|
MachineInstr *MI = MBBI;
|
|
|
|
MachineBasicBlock *MBB = MI->getParent();
|
|
|
|
|
|
|
|
unsigned Opcode = MI->getOpcode();
|
|
|
|
unsigned NumOps = MI->getNumOperands();
|
|
|
|
|
|
|
|
// Try to convert something like SLL into SLLK, if supported.
|
|
|
|
// We prefer to keep the two-operand form where possible both
|
|
|
|
// because it tends to be shorter and because some instructions
|
|
|
|
// have memory forms that can be used during spilling.
|
|
|
|
if (TM.getSubtargetImpl()->hasDistinctOps()) {
|
|
|
|
int ThreeOperandOpcode = SystemZ::getThreeOperandOpcode(Opcode);
|
|
|
|
if (ThreeOperandOpcode >= 0) {
|
2013-07-31 19:36:35 +08:00
|
|
|
MachineOperand &Dest = MI->getOperand(0);
|
2013-07-20 00:12:08 +08:00
|
|
|
MachineOperand &Src = MI->getOperand(1);
|
2013-07-31 19:36:35 +08:00
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
BuildMI(*MBB, MBBI, MI->getDebugLoc(), get(ThreeOperandOpcode))
|
|
|
|
.addOperand(Dest);
|
2013-07-20 00:12:08 +08:00
|
|
|
// Keep the kill state, but drop the tied flag.
|
2013-07-31 19:36:35 +08:00
|
|
|
MIB.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg());
|
2013-07-20 00:12:08 +08:00
|
|
|
// Keep the remaining operands as-is.
|
|
|
|
for (unsigned I = 2; I < NumOps; ++I)
|
|
|
|
MIB.addOperand(MI->getOperand(I));
|
2013-07-31 19:36:35 +08:00
|
|
|
return finishConvertToThreeAddress(MI, MIB, LV);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to convert an AND into an RISBG-type instruction.
|
|
|
|
if (LogicOp And = interpretAndImmediate(Opcode)) {
|
|
|
|
unsigned NewOpcode;
|
|
|
|
if (And.RegSize == 64)
|
|
|
|
NewOpcode = SystemZ::RISBG;
|
|
|
|
else if (TM.getSubtargetImpl()->hasHighWord())
|
|
|
|
NewOpcode = SystemZ::RISBLG32;
|
|
|
|
else
|
|
|
|
// We can't use RISBG for 32-bit operations because it clobbers the
|
|
|
|
// high word of the destination too.
|
|
|
|
NewOpcode = 0;
|
|
|
|
if (NewOpcode) {
|
|
|
|
uint64_t Imm = MI->getOperand(2).getImm() << And.ImmLSB;
|
|
|
|
// AND IMMEDIATE leaves the other bits of the register unchanged.
|
|
|
|
Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
|
|
|
|
unsigned Start, End;
|
|
|
|
if (isRxSBGMask(Imm, And.RegSize, Start, End)) {
|
|
|
|
if (NewOpcode == SystemZ::RISBLG32) {
|
|
|
|
Start &= 31;
|
|
|
|
End &= 31;
|
2013-07-20 00:12:08 +08:00
|
|
|
}
|
2013-07-31 19:36:35 +08:00
|
|
|
MachineOperand &Dest = MI->getOperand(0);
|
|
|
|
MachineOperand &Src = MI->getOperand(1);
|
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
BuildMI(*MBB, MI, MI->getDebugLoc(), get(NewOpcode))
|
|
|
|
.addOperand(Dest).addReg(0)
|
|
|
|
.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg())
|
|
|
|
.addImm(Start).addImm(End + 128).addImm(0);
|
|
|
|
return finishConvertToThreeAddress(MI, MIB, LV);
|
2013-07-20 00:12:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-07-02 23:28:56 +08:00
|
|
|
MachineInstr *
|
|
|
|
SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
|
|
|
MachineInstr *MI,
|
|
|
|
const SmallVectorImpl<unsigned> &Ops,
|
|
|
|
int FrameIndex) const {
|
|
|
|
const MachineFrameInfo *MFI = MF.getFrameInfo();
|
|
|
|
unsigned Size = MFI->getObjectSize(FrameIndex);
|
|
|
|
|
|
|
|
// Eary exit for cases we don't care about
|
|
|
|
if (Ops.size() != 1)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
unsigned OpNum = Ops[0];
|
2013-07-03 10:20:49 +08:00
|
|
|
assert(Size == MF.getRegInfo()
|
|
|
|
.getRegClass(MI->getOperand(OpNum).getReg())->getSize() &&
|
2013-07-03 05:17:31 +08:00
|
|
|
"Invalid size combination");
|
2013-07-02 23:28:56 +08:00
|
|
|
|
2013-07-12 16:37:17 +08:00
|
|
|
unsigned Opcode = MI->getOpcode();
|
|
|
|
if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
|
|
|
|
bool Op0IsGPR = (Opcode == SystemZ::LGDR);
|
|
|
|
bool Op1IsGPR = (Opcode == SystemZ::LDGR);
|
|
|
|
// If we're spilling the destination of an LDGR or LGDR, store the
|
|
|
|
// source register instead.
|
|
|
|
if (OpNum == 0) {
|
|
|
|
unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
|
|
|
|
return BuildMI(MF, MI->getDebugLoc(), get(StoreOpcode))
|
|
|
|
.addOperand(MI->getOperand(1)).addFrameIndex(FrameIndex)
|
|
|
|
.addImm(0).addReg(0);
|
|
|
|
}
|
|
|
|
// If we're spilling the source of an LDGR or LGDR, load the
|
|
|
|
// destination register instead.
|
|
|
|
if (OpNum == 1) {
|
|
|
|
unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
|
|
|
|
unsigned Dest = MI->getOperand(0).getReg();
|
|
|
|
return BuildMI(MF, MI->getDebugLoc(), get(LoadOpcode), Dest)
|
|
|
|
.addFrameIndex(FrameIndex).addImm(0).addReg(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-02 23:28:56 +08:00
|
|
|
// Look for cases where the source of a simple store or the destination
|
|
|
|
// of a simple load is being spilled. Try to use MVC instead.
|
|
|
|
//
|
|
|
|
// Although MVC is in practice a fast choice in these cases, it is still
|
|
|
|
// logically a bytewise copy. This means that we cannot use it if the
|
|
|
|
// load or store is volatile. It also means that the transformation is
|
|
|
|
// not valid in cases where the two memories partially overlap; however,
|
|
|
|
// that is not a problem here, because we know that one of the memories
|
|
|
|
// is a full frame index.
|
|
|
|
if (OpNum == 0 && MI->hasOneMemOperand()) {
|
|
|
|
MachineMemOperand *MMO = *MI->memoperands_begin();
|
|
|
|
if (MMO->getSize() == Size && !MMO->isVolatile()) {
|
|
|
|
// Handle conversion of loads.
|
2013-07-05 22:02:01 +08:00
|
|
|
if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) {
|
2013-07-02 23:28:56 +08:00
|
|
|
return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC))
|
2013-07-05 22:31:24 +08:00
|
|
|
.addFrameIndex(FrameIndex).addImm(0).addImm(Size)
|
2013-07-02 23:28:56 +08:00
|
|
|
.addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm())
|
2013-07-05 22:31:24 +08:00
|
|
|
.addMemOperand(MMO);
|
2013-07-02 23:28:56 +08:00
|
|
|
}
|
|
|
|
// Handle conversion of stores.
|
2013-07-05 22:02:01 +08:00
|
|
|
if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) {
|
2013-07-02 23:28:56 +08:00
|
|
|
return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC))
|
|
|
|
.addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm())
|
2013-07-05 22:31:24 +08:00
|
|
|
.addImm(Size).addFrameIndex(FrameIndex).addImm(0)
|
|
|
|
.addMemOperand(MMO);
|
2013-07-02 23:28:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-03 18:10:02 +08:00
|
|
|
// If the spilled operand is the final one, try to change <INSN>R
|
|
|
|
// into <INSN>.
|
2013-07-12 16:37:17 +08:00
|
|
|
int MemOpcode = SystemZ::getMemOpcode(Opcode);
|
2013-07-03 18:10:02 +08:00
|
|
|
if (MemOpcode >= 0) {
|
|
|
|
unsigned NumOps = MI->getNumExplicitOperands();
|
|
|
|
if (OpNum == NumOps - 1) {
|
|
|
|
const MCInstrDesc &MemDesc = get(MemOpcode);
|
|
|
|
uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
|
|
|
|
assert(AccessBytes != 0 && "Size of access should be known");
|
|
|
|
assert(AccessBytes <= Size && "Access outside the frame index");
|
|
|
|
uint64_t Offset = Size - AccessBytes;
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode));
|
|
|
|
for (unsigned I = 0; I < OpNum; ++I)
|
|
|
|
MIB.addOperand(MI->getOperand(I));
|
|
|
|
MIB.addFrameIndex(FrameIndex).addImm(Offset);
|
|
|
|
if (MemDesc.TSFlags & SystemZII::HasIndex)
|
|
|
|
MIB.addReg(0);
|
|
|
|
return MIB;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-02 23:28:56 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr *
|
|
|
|
SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI,
|
|
|
|
const SmallVectorImpl<unsigned> &Ops,
|
|
|
|
MachineInstr* LoadMI) const {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
bool
|
|
|
|
SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
case SystemZ::L128:
|
|
|
|
splitMove(MI, SystemZ::LG);
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case SystemZ::ST128:
|
|
|
|
splitMove(MI, SystemZ::STG);
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case SystemZ::LX:
|
|
|
|
splitMove(MI, SystemZ::LD);
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case SystemZ::STX:
|
|
|
|
splitMove(MI, SystemZ::STD);
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case SystemZ::ADJDYNALLOC:
|
|
|
|
splitAdjDynAlloc(MI);
|
|
|
|
return true;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[SystemZ] Add long branch pass
Before this change, the SystemZ backend would use BRCL for all branches
and only consider shortening them to BRC when generating an object file.
E.g. a branch on equal would use the JGE alias of BRCL in assembly output,
but might be shortened to the JE alias of BRC in ELF output. This was
a useful first step, but it had two problems:
(1) The z assembler isn't traditionally supposed to perform branch shortening
or branch relaxation. We followed this rule by not relaxing branches
in assembler input, but that meant that generating assembly code and
then assembling it would not produce the same result as going directly
to object code; the former would give long branches everywhere, whereas
the latter would use short branches where possible.
(2) Other useful branches, like COMPARE AND BRANCH, do not have long forms.
We would need to do something else before supporting them.
(Although COMPARE AND BRANCH does not change the condition codes,
the plan is to model COMPARE AND BRANCH as a CC-clobbering instruction
during codegen, so that we can safely lower it to a separate compare
and long branch where necessary. This is not a valid transformation
for the assembler proper to make.)
This patch therefore moves branch relaxation to a pre-emit pass.
For now, calls are still shortened from BRASL to BRAS by the assembler,
although this too is not really the traditional behaviour.
The first test takes about 1.5s to run, and there are likely to be
more tests in this vein once further branch types are added. The feeling
on IRC was that 1.5s is a bit much for a single test, so I've restricted
it to SystemZ hosts for now.
The patch exposes (and fixes) some typos in the main CodeGen/SystemZ tests.
A later patch will remove the {{g}}s from that directory.
llvm-svn: 182274
2013-05-20 22:23:08 +08:00
|
|
|
uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr *MI) const {
|
|
|
|
if (MI->getOpcode() == TargetOpcode::INLINEASM) {
|
|
|
|
const MachineFunction *MF = MI->getParent()->getParent();
|
|
|
|
const char *AsmStr = MI->getOperand(0).getSymbolName();
|
|
|
|
return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
|
|
|
|
}
|
|
|
|
return MI->getDesc().getSize();
|
|
|
|
}
|
|
|
|
|
2013-05-28 18:13:54 +08:00
|
|
|
SystemZII::Branch
|
|
|
|
SystemZInstrInfo::getBranchInfo(const MachineInstr *MI) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
case SystemZ::BR:
|
|
|
|
case SystemZ::J:
|
|
|
|
case SystemZ::JG:
|
2013-05-28 18:41:11 +08:00
|
|
|
return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY,
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
llvm-svn: 187495
2013-07-31 20:30:20 +08:00
|
|
|
SystemZ::CCMASK_ANY, &MI->getOperand(0));
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
case SystemZ::BRC:
|
|
|
|
case SystemZ::BRCL:
|
2013-05-28 18:41:11 +08:00
|
|
|
return SystemZII::Branch(SystemZII::BranchNormal,
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
llvm-svn: 187495
2013-07-31 20:30:20 +08:00
|
|
|
MI->getOperand(0).getImm(),
|
|
|
|
MI->getOperand(1).getImm(), &MI->getOperand(2));
|
2013-05-28 18:41:11 +08:00
|
|
|
|
2013-08-05 19:23:46 +08:00
|
|
|
case SystemZ::BRCT:
|
|
|
|
return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP,
|
|
|
|
SystemZ::CCMASK_CMP_NE, &MI->getOperand(2));
|
|
|
|
|
|
|
|
case SystemZ::BRCTG:
|
|
|
|
return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP,
|
|
|
|
SystemZ::CCMASK_CMP_NE, &MI->getOperand(2));
|
|
|
|
|
2013-05-29 19:58:52 +08:00
|
|
|
case SystemZ::CIJ:
|
2013-05-28 18:41:11 +08:00
|
|
|
case SystemZ::CRJ:
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
llvm-svn: 187495
2013-07-31 20:30:20 +08:00
|
|
|
return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP,
|
|
|
|
MI->getOperand(2).getImm(), &MI->getOperand(3));
|
2013-05-28 18:41:11 +08:00
|
|
|
|
2013-05-29 19:58:52 +08:00
|
|
|
case SystemZ::CGIJ:
|
2013-05-28 18:41:11 +08:00
|
|
|
case SystemZ::CGRJ:
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
llvm-svn: 187495
2013-07-31 20:30:20 +08:00
|
|
|
return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP,
|
|
|
|
MI->getOperand(2).getImm(), &MI->getOperand(3));
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
default:
|
2013-05-28 18:13:54 +08:00
|
|
|
llvm_unreachable("Unrecognized branch opcode");
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC,
|
|
|
|
unsigned &LoadOpcode,
|
|
|
|
unsigned &StoreOpcode) const {
|
|
|
|
if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) {
|
|
|
|
LoadOpcode = SystemZ::L;
|
|
|
|
StoreOpcode = SystemZ::ST32;
|
|
|
|
} else if (RC == &SystemZ::GR64BitRegClass ||
|
|
|
|
RC == &SystemZ::ADDR64BitRegClass) {
|
|
|
|
LoadOpcode = SystemZ::LG;
|
|
|
|
StoreOpcode = SystemZ::STG;
|
|
|
|
} else if (RC == &SystemZ::GR128BitRegClass ||
|
|
|
|
RC == &SystemZ::ADDR128BitRegClass) {
|
|
|
|
LoadOpcode = SystemZ::L128;
|
|
|
|
StoreOpcode = SystemZ::ST128;
|
|
|
|
} else if (RC == &SystemZ::FP32BitRegClass) {
|
|
|
|
LoadOpcode = SystemZ::LE;
|
|
|
|
StoreOpcode = SystemZ::STE;
|
|
|
|
} else if (RC == &SystemZ::FP64BitRegClass) {
|
|
|
|
LoadOpcode = SystemZ::LD;
|
|
|
|
StoreOpcode = SystemZ::STD;
|
|
|
|
} else if (RC == &SystemZ::FP128BitRegClass) {
|
|
|
|
LoadOpcode = SystemZ::LX;
|
|
|
|
StoreOpcode = SystemZ::STX;
|
|
|
|
} else
|
|
|
|
llvm_unreachable("Unsupported regclass to load or store");
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode,
|
|
|
|
int64_t Offset) const {
|
|
|
|
const MCInstrDesc &MCID = get(Opcode);
|
|
|
|
int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset);
|
|
|
|
if (isUInt<12>(Offset) && isUInt<12>(Offset2)) {
|
|
|
|
// Get the instruction to use for unsigned 12-bit displacements.
|
|
|
|
int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode);
|
|
|
|
if (Disp12Opcode >= 0)
|
|
|
|
return Disp12Opcode;
|
|
|
|
|
|
|
|
// All address-related instructions can use unsigned 12-bit
|
|
|
|
// displacements.
|
|
|
|
return Opcode;
|
|
|
|
}
|
|
|
|
if (isInt<20>(Offset) && isInt<20>(Offset2)) {
|
|
|
|
// Get the instruction to use for signed 20-bit displacements.
|
|
|
|
int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode);
|
|
|
|
if (Disp20Opcode >= 0)
|
|
|
|
return Disp20Opcode;
|
|
|
|
|
|
|
|
// Check whether Opcode allows signed 20-bit displacements.
|
|
|
|
if (MCID.TSFlags & SystemZII::Has20BitOffset)
|
|
|
|
return Opcode;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-08-05 19:03:20 +08:00
|
|
|
unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
|
|
|
|
switch (Opcode) {
|
|
|
|
case SystemZ::L: return SystemZ::LT;
|
|
|
|
case SystemZ::LY: return SystemZ::LT;
|
|
|
|
case SystemZ::LG: return SystemZ::LTG;
|
|
|
|
case SystemZ::LGF: return SystemZ::LTGF;
|
|
|
|
case SystemZ::LR: return SystemZ::LTR;
|
|
|
|
case SystemZ::LGFR: return SystemZ::LTGFR;
|
|
|
|
case SystemZ::LGR: return SystemZ::LTGR;
|
|
|
|
default: return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-31 19:36:35 +08:00
|
|
|
// Return true if Mask matches the regexp 0*1+0*, given that zero masks
|
|
|
|
// have already been filtered out. Store the first set bit in LSB and
|
|
|
|
// the number of set bits in Length if so.
|
|
|
|
static bool isStringOfOnes(uint64_t Mask, unsigned &LSB, unsigned &Length) {
|
|
|
|
unsigned First = findFirstSet(Mask);
|
|
|
|
uint64_t Top = (Mask >> First) + 1;
|
|
|
|
if ((Top & -Top) == Top) {
|
|
|
|
LSB = First;
|
|
|
|
Length = findFirstSet(Top);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
|
|
|
|
unsigned &Start, unsigned &End) const {
|
|
|
|
// Reject trivial all-zero masks.
|
|
|
|
if (Mask == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of
|
|
|
|
// the msb and End specifies the index of the lsb.
|
|
|
|
unsigned LSB, Length;
|
|
|
|
if (isStringOfOnes(Mask, LSB, Length)) {
|
|
|
|
Start = 63 - (LSB + Length - 1);
|
|
|
|
End = 63 - LSB;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle the wrap-around 1+0+1+ cases. Start then specifies the msb
|
|
|
|
// of the low 1s and End specifies the lsb of the high 1s.
|
|
|
|
if (isStringOfOnes(Mask ^ allOnes(BitSize), LSB, Length)) {
|
|
|
|
assert(LSB > 0 && "Bottom bit must be set");
|
|
|
|
assert(LSB + Length < BitSize && "Top bit must be set");
|
|
|
|
Start = 63 - (LSB - 1);
|
|
|
|
End = 63 - (LSB + Length);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-05-29 19:58:52 +08:00
|
|
|
unsigned SystemZInstrInfo::getCompareAndBranch(unsigned Opcode,
|
|
|
|
const MachineInstr *MI) const {
|
2013-05-28 18:41:11 +08:00
|
|
|
switch (Opcode) {
|
|
|
|
case SystemZ::CR:
|
|
|
|
return SystemZ::CRJ;
|
|
|
|
case SystemZ::CGR:
|
|
|
|
return SystemZ::CGRJ;
|
2013-05-29 19:58:52 +08:00
|
|
|
case SystemZ::CHI:
|
|
|
|
return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CIJ : 0;
|
|
|
|
case SystemZ::CGHI:
|
|
|
|
return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CGIJ : 0;
|
2013-05-28 18:41:11 +08:00
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator MBBI,
|
|
|
|
unsigned Reg, uint64_t Value) const {
|
|
|
|
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
|
|
|
|
unsigned Opcode;
|
|
|
|
if (isInt<16>(Value))
|
|
|
|
Opcode = SystemZ::LGHI;
|
|
|
|
else if (SystemZ::isImmLL(Value))
|
|
|
|
Opcode = SystemZ::LLILL;
|
|
|
|
else if (SystemZ::isImmLH(Value)) {
|
|
|
|
Opcode = SystemZ::LLILH;
|
|
|
|
Value >>= 16;
|
|
|
|
} else {
|
|
|
|
assert(isInt<32>(Value) && "Huge values not handled yet");
|
|
|
|
Opcode = SystemZ::LGFI;
|
|
|
|
}
|
|
|
|
BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value);
|
|
|
|
}
|