[FastISel] Remove kill tracking

This is a followup to D98145: As far as I know, tracking of kill
flags in FastISel is just a compile-time optimization. However,
I'm not actually seeing any compile-time regression when removing
the tracking. This probably used to be more important in the past,
before FastRA was switched to allocate instructions in reverse
order, which means that it discovers kills as a matter of course.

As such, the kill tracking doesn't really seem to serve a purpose
anymore, and just adds additional complexity and potential for
errors. This patch removes it entirely. The primary changes are
dropping the hasTrivialKill() method and removing the kill
arguments from the emitFast methods. The rest is mechanical fixup.

Differential Revision: https://reviews.llvm.org/D98294
This commit is contained in:
Nikita Popov 2021-03-09 21:04:03 +01:00
parent 3bcb6a389f
commit 665065821e
11 changed files with 388 additions and 668 deletions

View File

@ -116,6 +116,13 @@ Changes to the Go bindings
-------------------------- --------------------------
Changes to the FastISel infrastructure
--------------------------------------
* FastISel no longer tracks killed registers, and instead leaves this to the
register allocator. This means that ``hasTrivialKill()`` is removed, as well
as the ``OpNIsKill`` parameters to the ``fastEmit_*()`` family of functions.
Changes to the DAG infrastructure Changes to the DAG infrastructure
--------------------------------- ---------------------------------

View File

@ -274,7 +274,7 @@ public:
/// This is a wrapper around getRegForValue that also takes care of /// This is a wrapper around getRegForValue that also takes care of
/// truncating or sign-extending the given getelementptr index value. /// truncating or sign-extending the given getelementptr index value.
std::pair<Register, bool> getRegForGEPIndex(const Value *Idx); Register getRegForGEPIndex(const Value *Idx);
/// We're checking to see if we can fold \p LI into \p FoldInst. Note /// We're checking to see if we can fold \p LI into \p FoldInst. Note
/// that we could have a sequence where multiple LLVM IR instructions are /// that we could have a sequence where multiple LLVM IR instructions are
@ -347,27 +347,26 @@ protected:
/// This method is called by target-independent code to request that an /// This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register operand be emitted. /// instruction with the given type, opcode, and register operand be emitted.
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0);
bool Op0IsKill);
/// This method is called by target-independent code to request that an /// This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register operands be emitted. /// instruction with the given type, opcode, and register operands be emitted.
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill); unsigned Op1);
/// This method is called by target-independent code to request that an /// This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register and immediate /// instruction with the given type, opcode, and register and immediate
/// operands be emitted. /// operands be emitted.
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, uint64_t Imm); uint64_t Imm);
/// This method is a wrapper of fastEmit_ri. /// This method is a wrapper of fastEmit_ri.
/// ///
/// It first tries to emit an instruction with an immediate operand using /// It first tries to emit an instruction with an immediate operand using
/// fastEmit_ri. If that fails, it materializes the immediate into a register /// fastEmit_ri. If that fails, it materializes the immediate into a register
/// and try fastEmit_rr instead. /// and try fastEmit_rr instead.
Register fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill, Register fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, uint64_t Imm,
uint64_t Imm, MVT ImmType); MVT ImmType);
/// This method is called by target-independent code to request that an /// This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and immediate operand be emitted. /// instruction with the given type, opcode, and immediate operand be emitted.
@ -387,33 +386,31 @@ protected:
/// Emit a MachineInstr with one register operand and a result register /// Emit a MachineInstr with one register operand and a result register
/// in the given register class. /// in the given register class.
Register fastEmitInst_r(unsigned MachineInstOpcode, Register fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0, const TargetRegisterClass *RC, unsigned Op0);
bool Op0IsKill);
/// Emit a MachineInstr with two register operands and a result /// Emit a MachineInstr with two register operands and a result
/// register in the given register class. /// register in the given register class.
Register fastEmitInst_rr(unsigned MachineInstOpcode, Register fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0, const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill); unsigned Op1);
/// Emit a MachineInstr with three register operands and a result /// Emit a MachineInstr with three register operands and a result
/// register in the given register class. /// register in the given register class.
Register fastEmitInst_rrr(unsigned MachineInstOpcode, Register fastEmitInst_rrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0, const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill, unsigned Op1, unsigned Op2);
unsigned Op2, bool Op2IsKill);
/// Emit a MachineInstr with a register operand, an immediate, and a /// Emit a MachineInstr with a register operand, an immediate, and a
/// result register in the given register class. /// result register in the given register class.
Register fastEmitInst_ri(unsigned MachineInstOpcode, Register fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0, const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm); uint64_t Imm);
/// Emit a MachineInstr with one register operand and two immediate /// Emit a MachineInstr with one register operand and two immediate
/// operands. /// operands.
Register fastEmitInst_rii(unsigned MachineInstOpcode, Register fastEmitInst_rii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0, const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm1, uint64_t Imm2); uint64_t Imm1, uint64_t Imm2);
/// Emit a MachineInstr with a floating point immediate, and a result /// Emit a MachineInstr with a floating point immediate, and a result
/// register in the given register class. /// register in the given register class.
@ -425,8 +422,7 @@ protected:
/// result register in the given register class. /// result register in the given register class.
Register fastEmitInst_rri(unsigned MachineInstOpcode, Register fastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0, const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill, unsigned Op1, uint64_t Imm);
uint64_t Imm);
/// Emit a MachineInstr with a single immediate operand, and a result /// Emit a MachineInstr with a single immediate operand, and a result
/// register in the given register class. /// register in the given register class.
@ -435,12 +431,11 @@ protected:
/// Emit a MachineInstr for an extract_subreg from a specified index of /// Emit a MachineInstr for an extract_subreg from a specified index of
/// a superregister to a specified type. /// a superregister to a specified type.
Register fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill, Register fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, uint32_t Idx);
uint32_t Idx);
/// Emit MachineInstrs to compute the value of Op with all but the /// Emit MachineInstrs to compute the value of Op with all but the
/// least significant bit set to zero. /// least significant bit set to zero.
Register fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill); Register fastEmitZExtFromI1(MVT VT, unsigned Op0);
/// Emit an unconditional branch to the given block, unless it is the /// Emit an unconditional branch to the given block, unless it is the
/// immediate (fall-through) successor, and update the CFG. /// immediate (fall-through) successor, and update the CFG.
@ -490,12 +485,6 @@ protected:
/// - \c Add has a constant operand. /// - \c Add has a constant operand.
bool canFoldAddIntoGEP(const User *GEP, const Value *Add); bool canFoldAddIntoGEP(const User *GEP, const Value *Add);
/// Test whether the register associated with this value has exactly one use,
/// in which case that single use is killing. Note that multiple IR values
/// may map onto the same register, in which case this is not the same as
/// checking that an IR value has one use.
bool hasTrivialKill(const Value *V);
/// Create a machine mem operand from the given instruction. /// Create a machine mem operand from the given instruction.
MachineMemOperand *createMachineMemOperandFor(const Instruction *I) const; MachineMemOperand *createMachineMemOperandFor(const Instruction *I) const;

View File

@ -238,42 +238,6 @@ void FastISel::flushLocalValueMap() {
SavedInsertPt = FuncInfo.InsertPt; SavedInsertPt = FuncInfo.InsertPt;
} }
bool FastISel::hasTrivialKill(const Value *V) {
// Don't consider constants or arguments to have trivial kills.
const Instruction *I = dyn_cast<Instruction>(V);
if (!I)
return false;
// No-op casts are trivially coalesced by fast-isel.
if (const auto *Cast = dyn_cast<CastInst>(I))
if (Cast->isNoopCast(DL) && !hasTrivialKill(Cast->getOperand(0)))
return false;
// Even the value might have only one use in the LLVM IR, it is possible that
// FastISel might fold the use into another instruction and now there is more
// than one use at the Machine Instruction level.
Register Reg = lookUpRegForValue(V);
if (Reg && !MRI.use_empty(Reg))
return false;
// GEPs with all zero indices are trivially coalesced by fast-isel.
if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
return false;
// Casts and extractvalues may be trivially coalesced by fast-isel.
if (I->getOpcode() == Instruction::BitCast ||
I->getOpcode() == Instruction::PtrToInt ||
I->getOpcode() == Instruction::IntToPtr ||
I->getOpcode() == Instruction::ExtractValue)
return false;
// Only instructions with a single use in the same basic block are considered
// to have trivial kills.
return I->hasOneUse() &&
cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
}
Register FastISel::getRegForValue(const Value *V) { Register FastISel::getRegForValue(const Value *V) {
EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true); EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
// Don't handle non-simple values in FastISel. // Don't handle non-simple values in FastISel.
@ -346,8 +310,8 @@ Register FastISel::materializeConstant(const Value *V, MVT VT) {
Register IntegerReg = Register IntegerReg =
getRegForValue(ConstantInt::get(V->getContext(), SIntVal)); getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
if (IntegerReg) if (IntegerReg)
Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg, Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
/*Op0IsKill=*/false); IntegerReg);
} }
} }
} else if (const auto *Op = dyn_cast<Operator>(V)) { } else if (const auto *Op = dyn_cast<Operator>(V)) {
@ -419,27 +383,22 @@ void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) {
} }
} }
std::pair<Register, bool> FastISel::getRegForGEPIndex(const Value *Idx) { Register FastISel::getRegForGEPIndex(const Value *Idx) {
Register IdxN = getRegForValue(Idx); Register IdxN = getRegForValue(Idx);
if (!IdxN) if (!IdxN)
// Unhandled operand. Halt "fast" selection and bail. // Unhandled operand. Halt "fast" selection and bail.
return std::pair<Register, bool>(Register(), false); return Register();
bool IdxNIsKill = hasTrivialKill(Idx);
// If the index is smaller or larger than intptr_t, truncate or extend it. // If the index is smaller or larger than intptr_t, truncate or extend it.
MVT PtrVT = TLI.getPointerTy(DL); MVT PtrVT = TLI.getPointerTy(DL);
EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false); EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
if (IdxVT.bitsLT(PtrVT)) { if (IdxVT.bitsLT(PtrVT)) {
IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN, IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
IdxNIsKill);
IdxNIsKill = true;
} else if (IdxVT.bitsGT(PtrVT)) { } else if (IdxVT.bitsGT(PtrVT)) {
IdxN = IdxN =
fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill); fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
IdxNIsKill = true;
} }
return std::pair<Register, bool>(IdxN, IdxNIsKill); return IdxN;
} }
void FastISel::recomputeInsertPt() { void FastISel::recomputeInsertPt() {
@ -517,11 +476,10 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
Register Op1 = getRegForValue(I->getOperand(1)); Register Op1 = getRegForValue(I->getOperand(1));
if (!Op1) if (!Op1)
return false; return false;
bool Op1IsKill = hasTrivialKill(I->getOperand(1));
Register ResultReg = Register ResultReg =
fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill, fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(),
CI->getZExtValue(), VT.getSimpleVT()); VT.getSimpleVT());
if (!ResultReg) if (!ResultReg)
return false; return false;
@ -533,7 +491,6 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
Register Op0 = getRegForValue(I->getOperand(0)); Register Op0 = getRegForValue(I->getOperand(0));
if (!Op0) // Unhandled operand. Halt "fast" selection and bail. if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
return false; return false;
bool Op0IsKill = hasTrivialKill(I->getOperand(0));
// Check if the second operand is a constant and handle it appropriately. // Check if the second operand is a constant and handle it appropriately.
if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
@ -553,8 +510,8 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
ISDOpcode = ISD::AND; ISDOpcode = ISD::AND;
} }
Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm,
Op0IsKill, Imm, VT.getSimpleVT()); VT.getSimpleVT());
if (!ResultReg) if (!ResultReg)
return false; return false;
@ -566,11 +523,10 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
Register Op1 = getRegForValue(I->getOperand(1)); Register Op1 = getRegForValue(I->getOperand(1));
if (!Op1) // Unhandled operand. Halt "fast" selection and bail. if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
return false; return false;
bool Op1IsKill = hasTrivialKill(I->getOperand(1));
// Now we have both operands in registers. Emit the instruction. // Now we have both operands in registers. Emit the instruction.
Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(), Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill); ISDOpcode, Op0, Op1);
if (!ResultReg) if (!ResultReg)
// Target-specific code wasn't able to find a machine opcode for // Target-specific code wasn't able to find a machine opcode for
// the given ISD opcode and type. Halt "fast" selection and bail. // the given ISD opcode and type. Halt "fast" selection and bail.
@ -591,8 +547,6 @@ bool FastISel::selectGetElementPtr(const User *I) {
if (isa<VectorType>(I->getType())) if (isa<VectorType>(I->getType()))
return false; return false;
bool NIsKill = hasTrivialKill(I->getOperand(0));
// Keep a running tab of the total offset to coalesce multiple N = N + Offset // Keep a running tab of the total offset to coalesce multiple N = N + Offset
// into a single N = N + TotalOffset. // into a single N = N + TotalOffset.
uint64_t TotalOffs = 0; uint64_t TotalOffs = 0;
@ -608,10 +562,9 @@ bool FastISel::selectGetElementPtr(const User *I) {
// N = N + Offset // N = N + Offset
TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field); TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
if (TotalOffs >= MaxOffs) { if (TotalOffs >= MaxOffs) {
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
if (!N) // Unhandled operand. Halt "fast" selection and bail. if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false; return false;
NIsKill = true;
TotalOffs = 0; TotalOffs = 0;
} }
} }
@ -626,43 +579,38 @@ bool FastISel::selectGetElementPtr(const User *I) {
uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue(); uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
TotalOffs += DL.getTypeAllocSize(Ty) * IdxN; TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
if (TotalOffs >= MaxOffs) { if (TotalOffs >= MaxOffs) {
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
if (!N) // Unhandled operand. Halt "fast" selection and bail. if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false; return false;
NIsKill = true;
TotalOffs = 0; TotalOffs = 0;
} }
continue; continue;
} }
if (TotalOffs) { if (TotalOffs) {
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
if (!N) // Unhandled operand. Halt "fast" selection and bail. if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false; return false;
NIsKill = true;
TotalOffs = 0; TotalOffs = 0;
} }
// N = N + Idx * ElementSize; // N = N + Idx * ElementSize;
uint64_t ElementSize = DL.getTypeAllocSize(Ty); uint64_t ElementSize = DL.getTypeAllocSize(Ty);
std::pair<Register, bool> Pair = getRegForGEPIndex(Idx); Register IdxN = getRegForGEPIndex(Idx);
Register IdxN = Pair.first;
bool IdxNIsKill = Pair.second;
if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
return false; return false;
if (ElementSize != 1) { if (ElementSize != 1) {
IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT); IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
return false; return false;
IdxNIsKill = true;
} }
N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill); N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
if (!N) // Unhandled operand. Halt "fast" selection and bail. if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false; return false;
} }
} }
if (TotalOffs) { if (TotalOffs) {
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
if (!N) // Unhandled operand. Halt "fast" selection and bail. if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false; return false;
} }
@ -1425,10 +1373,8 @@ bool FastISel::selectCast(const User *I, unsigned Opcode) {
// Unhandled operand. Halt "fast" selection and bail. // Unhandled operand. Halt "fast" selection and bail.
return false; return false;
bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
Opcode, InputReg, InputRegIsKill); Opcode, InputReg);
if (!ResultReg) if (!ResultReg)
return false; return false;
@ -1459,7 +1405,6 @@ bool FastISel::selectBitCast(const User *I) {
Register Op0 = getRegForValue(I->getOperand(0)); Register Op0 = getRegForValue(I->getOperand(0));
if (!Op0) // Unhandled operand. Halt "fast" selection and bail. if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
return false; return false;
bool Op0IsKill = hasTrivialKill(I->getOperand(0));
// First, try to perform the bitcast by inserting a reg-reg copy. // First, try to perform the bitcast by inserting a reg-reg copy.
Register ResultReg; Register ResultReg;
@ -1476,7 +1421,7 @@ bool FastISel::selectBitCast(const User *I) {
// If the reg-reg copy failed, select a BITCAST opcode. // If the reg-reg copy failed, select a BITCAST opcode.
if (!ResultReg) if (!ResultReg)
ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill); ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0);
if (!ResultReg) if (!ResultReg)
return false; return false;
@ -1652,12 +1597,11 @@ bool FastISel::selectFNeg(const User *I, const Value *In) {
Register OpReg = getRegForValue(In); Register OpReg = getRegForValue(In);
if (!OpReg) if (!OpReg)
return false; return false;
bool OpRegIsKill = hasTrivialKill(In);
// If the target has ISD::FNEG, use it. // If the target has ISD::FNEG, use it.
EVT VT = TLI.getValueType(DL, I->getType()); EVT VT = TLI.getValueType(DL, I->getType());
Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG, Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
OpReg, OpRegIsKill); OpReg);
if (ResultReg) { if (ResultReg) {
updateValueMap(I, ResultReg); updateValueMap(I, ResultReg);
return true; return true;
@ -1672,18 +1616,18 @@ bool FastISel::selectFNeg(const User *I, const Value *In) {
return false; return false;
Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(), Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
ISD::BITCAST, OpReg, OpRegIsKill); ISD::BITCAST, OpReg);
if (!IntReg) if (!IntReg)
return false; return false;
Register IntResultReg = fastEmit_ri_( Register IntResultReg = fastEmit_ri_(
IntVT.getSimpleVT(), ISD::XOR, IntReg, /*Op0IsKill=*/true, IntVT.getSimpleVT(), ISD::XOR, IntReg,
UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT()); UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
if (!IntResultReg) if (!IntResultReg)
return false; return false;
ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST, ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
IntResultReg, /*Op0IsKill=*/true); IntResultReg);
if (!ResultReg) if (!ResultReg)
return false; return false;
@ -1883,14 +1827,12 @@ bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; } unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/, unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/) {
bool /*Op0IsKill*/) {
return 0; return 0;
} }
unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/, unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
bool /*Op0IsKill*/, unsigned /*Op1*/, unsigned /*Op1*/) {
bool /*Op1IsKill*/) {
return 0; return 0;
} }
@ -1904,7 +1846,7 @@ unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
} }
unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/, unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
bool /*Op0IsKill*/, uint64_t /*Imm*/) { uint64_t /*Imm*/) {
return 0; return 0;
} }
@ -1913,7 +1855,7 @@ unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
/// If that fails, it materializes the immediate into a register and try /// If that fails, it materializes the immediate into a register and try
/// fastEmit_rr instead. /// fastEmit_rr instead.
Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, uint64_t Imm, MVT ImmType) { uint64_t Imm, MVT ImmType) {
// If this is a multiply by a power of two, emit this as a shift left. // If this is a multiply by a power of two, emit this as a shift left.
if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) { if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
Opcode = ISD::SHL; Opcode = ISD::SHL;
@ -1931,11 +1873,10 @@ Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
return 0; return 0;
// First check if immediate type is legal. If not, we can't use the ri form. // First check if immediate type is legal. If not, we can't use the ri form.
Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm); Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm);
if (ResultReg) if (ResultReg)
return ResultReg; return ResultReg;
Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm); Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
bool IsImmKill = true;
if (!MaterialReg) { if (!MaterialReg) {
// This is a bit ugly/slow, but failing here means falling out of // This is a bit ugly/slow, but failing here means falling out of
// fast-isel, which would be very slow. // fast-isel, which would be very slow.
@ -1944,15 +1885,8 @@ Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm)); MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
if (!MaterialReg) if (!MaterialReg)
return 0; return 0;
// FIXME: If the materialized register here has no uses yet then this
// will be the first use and we should be able to mark it as killed.
// However, the local value area for materialising constant expressions
// grows down, not up, which means that any constant expressions we generate
// later which also use 'Imm' could be after this instruction and therefore
// after this kill.
IsImmKill = false;
} }
return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill); return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
} }
Register FastISel::createResultReg(const TargetRegisterClass *RC) { Register FastISel::createResultReg(const TargetRegisterClass *RC) {
@ -1986,8 +1920,7 @@ Register FastISel::fastEmitInst_(unsigned MachineInstOpcode,
} }
Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode, Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0, const TargetRegisterClass *RC, unsigned Op0) {
bool Op0IsKill) {
const MCInstrDesc &II = TII.get(MachineInstOpcode); const MCInstrDesc &II = TII.get(MachineInstOpcode);
Register ResultReg = createResultReg(RC); Register ResultReg = createResultReg(RC);
@ -1995,10 +1928,10 @@ Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
if (II.getNumDefs() >= 1) if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill)); .addReg(Op0);
else { else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill)); .addReg(Op0);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
} }
@ -2008,8 +1941,7 @@ Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode, Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0, const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, unsigned Op1) {
bool Op1IsKill) {
const MCInstrDesc &II = TII.get(MachineInstOpcode); const MCInstrDesc &II = TII.get(MachineInstOpcode);
Register ResultReg = createResultReg(RC); Register ResultReg = createResultReg(RC);
@ -2018,12 +1950,12 @@ Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
if (II.getNumDefs() >= 1) if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill)) .addReg(Op0)
.addReg(Op1, getKillRegState(Op1IsKill)); .addReg(Op1);
else { else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill)) .addReg(Op0)
.addReg(Op1, getKillRegState(Op1IsKill)); .addReg(Op1);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
} }
@ -2032,9 +1964,7 @@ Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode, Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0, const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, unsigned Op1, unsigned Op2) {
bool Op1IsKill, unsigned Op2,
bool Op2IsKill) {
const MCInstrDesc &II = TII.get(MachineInstOpcode); const MCInstrDesc &II = TII.get(MachineInstOpcode);
Register ResultReg = createResultReg(RC); Register ResultReg = createResultReg(RC);
@ -2044,14 +1974,14 @@ Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
if (II.getNumDefs() >= 1) if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill)) .addReg(Op0)
.addReg(Op1, getKillRegState(Op1IsKill)) .addReg(Op1)
.addReg(Op2, getKillRegState(Op2IsKill)); .addReg(Op2);
else { else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill)) .addReg(Op0)
.addReg(Op1, getKillRegState(Op1IsKill)) .addReg(Op1)
.addReg(Op2, getKillRegState(Op2IsKill)); .addReg(Op2);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
} }
@ -2060,7 +1990,7 @@ Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode, Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0, const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm) { uint64_t Imm) {
const MCInstrDesc &II = TII.get(MachineInstOpcode); const MCInstrDesc &II = TII.get(MachineInstOpcode);
Register ResultReg = createResultReg(RC); Register ResultReg = createResultReg(RC);
@ -2068,11 +1998,11 @@ Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
if (II.getNumDefs() >= 1) if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill)) .addReg(Op0)
.addImm(Imm); .addImm(Imm);
else { else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill)) .addReg(Op0)
.addImm(Imm); .addImm(Imm);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
@ -2082,8 +2012,7 @@ Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode, Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0, const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm1, uint64_t Imm1, uint64_t Imm2) {
uint64_t Imm2) {
const MCInstrDesc &II = TII.get(MachineInstOpcode); const MCInstrDesc &II = TII.get(MachineInstOpcode);
Register ResultReg = createResultReg(RC); Register ResultReg = createResultReg(RC);
@ -2091,12 +2020,12 @@ Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
if (II.getNumDefs() >= 1) if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill)) .addReg(Op0)
.addImm(Imm1) .addImm(Imm1)
.addImm(Imm2); .addImm(Imm2);
else { else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill)) .addReg(Op0)
.addImm(Imm1) .addImm(Imm1)
.addImm(Imm2); .addImm(Imm2);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@ -2126,8 +2055,7 @@ Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode, Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0, const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, unsigned Op1, uint64_t Imm) {
bool Op1IsKill, uint64_t Imm) {
const MCInstrDesc &II = TII.get(MachineInstOpcode); const MCInstrDesc &II = TII.get(MachineInstOpcode);
Register ResultReg = createResultReg(RC); Register ResultReg = createResultReg(RC);
@ -2136,13 +2064,13 @@ Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
if (II.getNumDefs() >= 1) if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill)) .addReg(Op0)
.addReg(Op1, getKillRegState(Op1IsKill)) .addReg(Op1)
.addImm(Imm); .addImm(Imm);
else { else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill)) .addReg(Op0)
.addReg(Op1, getKillRegState(Op1IsKill)) .addReg(Op1)
.addImm(Imm); .addImm(Imm);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
@ -2167,21 +2095,21 @@ Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
} }
Register FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, Register FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
bool Op0IsKill, uint32_t Idx) { uint32_t Idx) {
Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
assert(Register::isVirtualRegister(Op0) && assert(Register::isVirtualRegister(Op0) &&
"Cannot yet extract from physregs"); "Cannot yet extract from physregs");
const TargetRegisterClass *RC = MRI.getRegClass(Op0); const TargetRegisterClass *RC = MRI.getRegClass(Op0);
MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx)); MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx); ResultReg).addReg(Op0, 0, Idx);
return ResultReg; return ResultReg;
} }
/// Emit MachineInstrs to compute the value of Op with all but the least /// Emit MachineInstrs to compute the value of Op with all but the least
/// significant bit set to zero. /// significant bit set to zero.
Register FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) { Register FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0) {
return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1); return fastEmit_ri(VT, VT, ISD::AND, Op0, 1);
} }
/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks. /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.

File diff suppressed because it is too large Load Diff

View File

@ -136,16 +136,13 @@ class ARMFastISel final : public FastISel {
// Code from FastISel.cpp. // Code from FastISel.cpp.
unsigned fastEmitInst_r(unsigned MachineInstOpcode, unsigned fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, const TargetRegisterClass *RC, unsigned Op0);
unsigned Op0, bool Op0IsKill);
unsigned fastEmitInst_rr(unsigned MachineInstOpcode, unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill, unsigned Op0, unsigned Op1);
unsigned Op1, bool Op1IsKill);
unsigned fastEmitInst_ri(unsigned MachineInstOpcode, unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill, unsigned Op0, uint64_t Imm);
uint64_t Imm);
unsigned fastEmitInst_i(unsigned MachineInstOpcode, unsigned fastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, const TargetRegisterClass *RC,
uint64_t Imm); uint64_t Imm);
@ -299,7 +296,7 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode, unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill) { unsigned Op0) {
Register ResultReg = createResultReg(RC); Register ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode); const MCInstrDesc &II = TII.get(MachineInstOpcode);
@ -308,10 +305,10 @@ unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
Op0 = constrainOperandRegClass(II, Op0, 1); Op0 = constrainOperandRegClass(II, Op0, 1);
if (II.getNumDefs() >= 1) { if (II.getNumDefs() >= 1) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
ResultReg).addReg(Op0, Op0IsKill * RegState::Kill)); ResultReg).addReg(Op0));
} else { } else {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)); .addReg(Op0));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg) TII.get(TargetOpcode::COPY), ResultReg)
.addReg(II.ImplicitDefs[0])); .addReg(II.ImplicitDefs[0]));
@ -321,8 +318,7 @@ unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode, unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill, unsigned Op0, unsigned Op1) {
unsigned Op1, bool Op1IsKill) {
unsigned ResultReg = createResultReg(RC); unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode); const MCInstrDesc &II = TII.get(MachineInstOpcode);
@ -334,12 +330,12 @@ unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
if (II.getNumDefs() >= 1) { if (II.getNumDefs() >= 1) {
AddOptionalDefs( AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op0)
.addReg(Op1, Op1IsKill * RegState::Kill)); .addReg(Op1));
} else { } else {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op0)
.addReg(Op1, Op1IsKill * RegState::Kill)); .addReg(Op1));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg) TII.get(TargetOpcode::COPY), ResultReg)
.addReg(II.ImplicitDefs[0])); .addReg(II.ImplicitDefs[0]));
@ -349,8 +345,7 @@ unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode, unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill, unsigned Op0, uint64_t Imm) {
uint64_t Imm) {
unsigned ResultReg = createResultReg(RC); unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode); const MCInstrDesc &II = TII.get(MachineInstOpcode);
@ -360,11 +355,11 @@ unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
if (II.getNumDefs() >= 1) { if (II.getNumDefs() >= 1) {
AddOptionalDefs( AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op0)
.addImm(Imm)); .addImm(Imm));
} else { } else {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op0)
.addImm(Imm)); .addImm(Imm));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg) TII.get(TargetOpcode::COPY), ResultReg)
@ -851,7 +846,7 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {
// get the reg+offset into a register. // get the reg+offset into a register.
if (needsLowering) { if (needsLowering) {
Addr.Base.Reg = fastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, Addr.Base.Reg = fastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
/*Op0IsKill*/false, Addr.Offset, MVT::i32); Addr.Offset, MVT::i32);
Addr.Offset = 0; Addr.Offset = 0;
} }
} }
@ -1967,8 +1962,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
break; break;
} }
case CCValAssign::BCvt: { case CCValAssign::BCvt: {
unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg);
/*TODO: Kill=*/false);
assert(BC != 0 && "Failed to emit a bitcast!"); assert(BC != 0 && "Failed to emit a bitcast!");
Arg = BC; Arg = BC;
ArgVT = VA.getLocVT(); ArgVT = VA.getLocVT();

View File

@ -228,14 +228,13 @@ private:
unsigned fastEmitInst_rr(unsigned MachineInstOpcode, unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill, unsigned Op0, unsigned Op1);
unsigned Op1, bool Op1IsKill);
// for some reason, this default is not generated by tablegen // for some reason, this default is not generated by tablegen
// so we explicitly generate it here. // so we explicitly generate it here.
unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC, unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill, uint64_t imm1, unsigned Op0, uint64_t imm1, uint64_t imm2,
uint64_t imm2, unsigned Op3, bool Op3IsKill) { unsigned Op3) {
return 0; return 0;
} }
@ -2122,8 +2121,7 @@ void MipsFastISel::simplifyAddress(Address &Addr) {
unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode, unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill, unsigned Op0, unsigned Op1) {
unsigned Op1, bool Op1IsKill) {
// We treat the MUL instruction in a special way because it clobbers // We treat the MUL instruction in a special way because it clobbers
// the HI0 & LO0 registers. The TableGen definition of this instruction can // the HI0 & LO0 registers. The TableGen definition of this instruction can
// mark these registers only as implicitly defined. As a result, the // mark these registers only as implicitly defined. As a result, the
@ -2136,15 +2134,14 @@ unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill)) .addReg(Op0)
.addReg(Op1, getKillRegState(Op1IsKill)) .addReg(Op1)
.addReg(Mips::HI0, RegState::ImplicitDefine | RegState::Dead) .addReg(Mips::HI0, RegState::ImplicitDefine | RegState::Dead)
.addReg(Mips::LO0, RegState::ImplicitDefine | RegState::Dead); .addReg(Mips::LO0, RegState::ImplicitDefine | RegState::Dead);
return ResultReg; return ResultReg;
} }
return FastISel::fastEmitInst_rr(MachineInstOpcode, RC, Op0, Op0IsKill, Op1, return FastISel::fastEmitInst_rr(MachineInstOpcode, RC, Op0, Op1);
Op1IsKill);
} }
namespace llvm { namespace llvm {

View File

@ -112,15 +112,12 @@ class PPCFastISel final : public FastISel {
unsigned fastEmit_i(MVT Ty, MVT RetTy, unsigned Opc, uint64_t Imm) override; unsigned fastEmit_i(MVT Ty, MVT RetTy, unsigned Opc, uint64_t Imm) override;
unsigned fastEmitInst_ri(unsigned MachineInstOpcode, unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill, unsigned Op0, uint64_t Imm);
uint64_t Imm);
unsigned fastEmitInst_r(unsigned MachineInstOpcode, unsigned fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, const TargetRegisterClass *RC, unsigned Op0);
unsigned Op0, bool Op0IsKill);
unsigned fastEmitInst_rr(unsigned MachineInstOpcode, unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill, unsigned Op0, unsigned Op1);
unsigned Op1, bool Op1IsKill);
bool fastLowerCall(CallLoweringInfo &CLI) override; bool fastLowerCall(CallLoweringInfo &CLI) override;
@ -2426,7 +2423,7 @@ unsigned PPCFastISel::fastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) {
// where those regs have another meaning. // where those regs have another meaning.
unsigned PPCFastISel::fastEmitInst_ri(unsigned MachineInstOpcode, unsigned PPCFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill, unsigned Op0,
uint64_t Imm) { uint64_t Imm) {
if (MachineInstOpcode == PPC::ADDI) if (MachineInstOpcode == PPC::ADDI)
MRI.setRegClass(Op0, &PPC::GPRC_and_GPRC_NOR0RegClass); MRI.setRegClass(Op0, &PPC::GPRC_and_GPRC_NOR0RegClass);
@ -2437,8 +2434,7 @@ unsigned PPCFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
(RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass : (RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
(RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC)); (RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
return FastISel::fastEmitInst_ri(MachineInstOpcode, UseRC, return FastISel::fastEmitInst_ri(MachineInstOpcode, UseRC, Op0, Imm);
Op0, Op0IsKill, Imm);
} }
// Override for instructions with one register operand to avoid use of // Override for instructions with one register operand to avoid use of
@ -2446,12 +2442,12 @@ unsigned PPCFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
// we must be conservative. // we must be conservative.
unsigned PPCFastISel::fastEmitInst_r(unsigned MachineInstOpcode, unsigned PPCFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass* RC, const TargetRegisterClass* RC,
unsigned Op0, bool Op0IsKill) { unsigned Op0) {
const TargetRegisterClass *UseRC = const TargetRegisterClass *UseRC =
(RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass : (RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
(RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC)); (RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
return FastISel::fastEmitInst_r(MachineInstOpcode, UseRC, Op0, Op0IsKill); return FastISel::fastEmitInst_r(MachineInstOpcode, UseRC, Op0);
} }
// Override for instructions with two register operands to avoid use // Override for instructions with two register operands to avoid use
@ -2459,14 +2455,12 @@ unsigned PPCFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
// so we must be conservative. // so we must be conservative.
unsigned PPCFastISel::fastEmitInst_rr(unsigned MachineInstOpcode, unsigned PPCFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass* RC, const TargetRegisterClass* RC,
unsigned Op0, bool Op0IsKill, unsigned Op0, unsigned Op1) {
unsigned Op1, bool Op1IsKill) {
const TargetRegisterClass *UseRC = const TargetRegisterClass *UseRC =
(RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass : (RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
(RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC)); (RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
return FastISel::fastEmitInst_rr(MachineInstOpcode, UseRC, Op0, Op0IsKill, return FastISel::fastEmitInst_rr(MachineInstOpcode, UseRC, Op0, Op1);
Op1, Op1IsKill);
} }
namespace llvm { namespace llvm {

View File

@ -1167,7 +1167,7 @@ bool WebAssemblyFastISel::selectBitCast(const Instruction *I) {
} }
Register Reg = fastEmit_ISD_BITCAST_r(VT.getSimpleVT(), RetVT.getSimpleVT(), Register Reg = fastEmit_ISD_BITCAST_r(VT.getSimpleVT(), RetVT.getSimpleVT(),
In, I->getOperand(0)->hasOneUse()); In);
if (!Reg) if (!Reg)
return false; return false;
MachineBasicBlock::iterator Iter = FuncInfo.InsertPt; MachineBasicBlock::iterator Iter = FuncInfo.InsertPt;

View File

@ -89,8 +89,7 @@ private:
bool X86FastEmitStore(EVT VT, const Value *Val, X86AddressMode &AM, bool X86FastEmitStore(EVT VT, const Value *Val, X86AddressMode &AM,
MachineMemOperand *MMO = nullptr, bool Aligned = false); MachineMemOperand *MMO = nullptr, bool Aligned = false);
bool X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill, bool X86FastEmitStore(EVT VT, unsigned ValReg, X86AddressMode &AM,
X86AddressMode &AM,
MachineMemOperand *MMO = nullptr, bool Aligned = false); MachineMemOperand *MMO = nullptr, bool Aligned = false);
bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
@ -176,9 +175,7 @@ private:
unsigned fastEmitInst_rrrr(unsigned MachineInstOpcode, unsigned fastEmitInst_rrrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0, const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill, unsigned Op1, unsigned Op2, unsigned Op3);
unsigned Op2, bool Op2IsKill, unsigned Op3,
bool Op3IsKill);
}; };
} // end anonymous namespace. } // end anonymous namespace.
@ -487,8 +484,7 @@ bool X86FastISel::X86FastEmitLoad(MVT VT, X86AddressMode &AM,
/// type VT. The address is either pre-computed, consisted of a base ptr, Ptr /// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
/// and a displacement offset, or a GlobalAddress, /// and a displacement offset, or a GlobalAddress,
/// i.e. V. Return true if it is possible. /// i.e. V. Return true if it is possible.
bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill, bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, X86AddressMode &AM,
X86AddressMode &AM,
MachineMemOperand *MMO, bool Aligned) { MachineMemOperand *MMO, bool Aligned) {
bool HasSSE1 = Subtarget->hasSSE1(); bool HasSSE1 = Subtarget->hasSSE1();
bool HasSSE2 = Subtarget->hasSSE2(); bool HasSSE2 = Subtarget->hasSSE2();
@ -508,7 +504,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
Register AndResult = createResultReg(&X86::GR8RegClass); Register AndResult = createResultReg(&X86::GR8RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::AND8ri), AndResult) TII.get(X86::AND8ri), AndResult)
.addReg(ValReg, getKillRegState(ValIsKill)).addImm(1); .addReg(ValReg).addImm(1);
ValReg = AndResult; ValReg = AndResult;
LLVM_FALLTHROUGH; // handle i1 as i8. LLVM_FALLTHROUGH; // handle i1 as i8.
} }
@ -654,7 +650,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
ValReg = constrainOperandRegClass(Desc, ValReg, Desc.getNumOperands() - 1); ValReg = constrainOperandRegClass(Desc, ValReg, Desc.getNumOperands() - 1);
MachineInstrBuilder MIB = MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, Desc); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, Desc);
addFullAddress(MIB, AM).addReg(ValReg, getKillRegState(ValIsKill)); addFullAddress(MIB, AM).addReg(ValReg);
if (MMO) if (MMO)
MIB->addMemOperand(*FuncInfo.MF, MMO); MIB->addMemOperand(*FuncInfo.MF, MMO);
@ -702,8 +698,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
if (ValReg == 0) if (ValReg == 0)
return false; return false;
bool ValKill = hasTrivialKill(Val); return X86FastEmitStore(VT, ValReg, AM, MMO, Aligned);
return X86FastEmitStore(VT, ValReg, ValKill, AM, MMO, Aligned);
} }
/// X86FastEmitExtend - Emit a machine instruction to extend a value Src of /// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
@ -712,8 +707,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
unsigned Src, EVT SrcVT, unsigned Src, EVT SrcVT,
unsigned &ResultReg) { unsigned &ResultReg) {
unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src);
Src, /*TODO: Kill=*/false);
if (RR == 0) if (RR == 0)
return false; return false;
@ -945,7 +939,7 @@ redo_gep:
(S == 1 || S == 2 || S == 4 || S == 8)) { (S == 1 || S == 2 || S == 4 || S == 8)) {
// Scaled-index addressing. // Scaled-index addressing.
Scale = S; Scale = S;
IndexReg = getRegForGEPIndex(Op).first; IndexReg = getRegForGEPIndex(Op);
if (IndexReg == 0) if (IndexReg == 0)
return false; return false;
break; break;
@ -1262,14 +1256,13 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
if (Outs[0].Flags.isSExt()) if (Outs[0].Flags.isSExt())
return false; return false;
// TODO // TODO
SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg, /*Op0IsKill=*/false); SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg);
SrcVT = MVT::i8; SrcVT = MVT::i8;
} }
unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND : unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :
ISD::SIGN_EXTEND; ISD::SIGN_EXTEND;
// TODO // TODO
SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, SrcReg, SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, SrcReg);
/*Op0IsKill=*/false);
} }
// Make the copy. // Make the copy.
@ -1467,8 +1460,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
ResultReg = createResultReg(&X86::GR32RegClass); ResultReg = createResultReg(&X86::GR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0), BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0),
ResultReg); ResultReg);
ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, X86::sub_8bit);
/*Op0IsKill=*/true, X86::sub_8bit);
if (!ResultReg) if (!ResultReg)
return false; return false;
break; break;
@ -1558,7 +1550,7 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType()); MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
if (SrcVT == MVT::i1) { if (SrcVT == MVT::i1) {
// Set the high bits to zero. // Set the high bits to zero.
ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false); ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg);
SrcVT = MVT::i8; SrcVT = MVT::i8;
if (ResultReg == 0) if (ResultReg == 0)
@ -1591,11 +1583,10 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVZX32rr8), BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVZX32rr8),
Result32).addReg(ResultReg); Result32).addReg(ResultReg);
ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit);
/*Op0IsKill=*/true, X86::sub_16bit);
} else if (DstVT != MVT::i8) { } else if (DstVT != MVT::i8) {
ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND, ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
ResultReg, /*Op0IsKill=*/true); ResultReg);
if (ResultReg == 0) if (ResultReg == 0)
return false; return false;
} }
@ -1617,8 +1608,7 @@ bool X86FastISel::X86SelectSExt(const Instruction *I) {
MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType()); MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
if (SrcVT == MVT::i1) { if (SrcVT == MVT::i1) {
// Set the high bits to zero. // Set the high bits to zero.
Register ZExtReg = fastEmitZExtFromI1(MVT::i8, ResultReg, Register ZExtReg = fastEmitZExtFromI1(MVT::i8, ResultReg);
/*TODO: Kill=*/false);
if (ZExtReg == 0) if (ZExtReg == 0)
return false; return false;
@ -1637,11 +1627,10 @@ bool X86FastISel::X86SelectSExt(const Instruction *I) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVSX32rr8), BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVSX32rr8),
Result32).addReg(ResultReg); Result32).addReg(ResultReg);
ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit);
/*Op0IsKill=*/true, X86::sub_16bit);
} else if (DstVT != MVT::i8) { } else if (DstVT != MVT::i8) {
ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::SIGN_EXTEND, ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::SIGN_EXTEND,
ResultReg, /*Op0IsKill=*/true); ResultReg);
if (ResultReg == 0) if (ResultReg == 0)
return false; return false;
} }
@ -1793,8 +1782,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), OpReg) TII.get(TargetOpcode::COPY), OpReg)
.addReg(KOpReg); .addReg(KOpReg);
OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, /*Op0IsKill=*/true, OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, X86::sub_8bit);
X86::sub_8bit);
} }
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
.addReg(OpReg) .addReg(OpReg)
@ -2025,7 +2013,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
// Now reference the 8-bit subreg of the result. // Now reference the 8-bit subreg of the result.
ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg, ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
/*Op0IsKill=*/true, X86::sub_8bit); X86::sub_8bit);
} }
// Copy the result out of the physreg if we haven't already. // Copy the result out of the physreg if we haven't already.
if (!ResultReg) { if (!ResultReg) {
@ -2130,7 +2118,6 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
Register CondReg = getRegForValue(Cond); Register CondReg = getRegForValue(Cond);
if (CondReg == 0) if (CondReg == 0)
return false; return false;
bool CondIsKill = hasTrivialKill(Cond);
// In case OpReg is a K register, COPY to a GPR // In case OpReg is a K register, COPY to a GPR
if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) { if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
@ -2138,12 +2125,11 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
CondReg = createResultReg(&X86::GR32RegClass); CondReg = createResultReg(&X86::GR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), CondReg) TII.get(TargetOpcode::COPY), CondReg)
.addReg(KCondReg, getKillRegState(CondIsKill)); .addReg(KCondReg);
CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, /*Op0IsKill=*/true, CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit);
X86::sub_8bit);
} }
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
.addReg(CondReg, getKillRegState(CondIsKill)) .addReg(CondReg)
.addImm(1); .addImm(1);
} }
@ -2151,18 +2137,13 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
const Value *RHS = I->getOperand(2); const Value *RHS = I->getOperand(2);
Register RHSReg = getRegForValue(RHS); Register RHSReg = getRegForValue(RHS);
bool RHSIsKill = hasTrivialKill(RHS);
Register LHSReg = getRegForValue(LHS); Register LHSReg = getRegForValue(LHS);
bool LHSIsKill = hasTrivialKill(LHS);
if (!LHSReg || !RHSReg) if (!LHSReg || !RHSReg)
return false; return false;
const TargetRegisterInfo &TRI = *Subtarget->getRegisterInfo(); const TargetRegisterInfo &TRI = *Subtarget->getRegisterInfo();
unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(*RC)/8); unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(*RC)/8);
Register ResultReg = fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, Register ResultReg = fastEmitInst_rri(Opc, RC, RHSReg, LHSReg, CC);
LHSReg, LHSIsKill, CC);
updateValueMap(I, ResultReg); updateValueMap(I, ResultReg);
return true; return true;
} }
@ -2211,17 +2192,9 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
const Value *RHS = I->getOperand(2); const Value *RHS = I->getOperand(2);
Register LHSReg = getRegForValue(LHS); Register LHSReg = getRegForValue(LHS);
bool LHSIsKill = hasTrivialKill(LHS);
Register RHSReg = getRegForValue(RHS); Register RHSReg = getRegForValue(RHS);
bool RHSIsKill = hasTrivialKill(RHS);
Register CmpLHSReg = getRegForValue(CmpLHS); Register CmpLHSReg = getRegForValue(CmpLHS);
bool CmpLHSIsKill = hasTrivialKill(CmpLHS);
Register CmpRHSReg = getRegForValue(CmpRHS); Register CmpRHSReg = getRegForValue(CmpRHS);
bool CmpRHSIsKill = hasTrivialKill(CmpRHS);
if (!LHSReg || !RHSReg || !CmpLHSReg || !CmpRHSReg) if (!LHSReg || !RHSReg || !CmpLHSReg || !CmpRHSReg)
return false; return false;
@ -2235,8 +2208,8 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
unsigned CmpOpcode = unsigned CmpOpcode =
(RetVT == MVT::f32) ? X86::VCMPSSZrr : X86::VCMPSDZrr; (RetVT == MVT::f32) ? X86::VCMPSSZrr : X86::VCMPSDZrr;
Register CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpLHSIsKill, Register CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpRHSReg,
CmpRHSReg, CmpRHSIsKill, CC); CC);
// Need an IMPLICIT_DEF for the input that is used to generate the upper // Need an IMPLICIT_DEF for the input that is used to generate the upper
// bits of the result register since its not based on any of the inputs. // bits of the result register since its not based on any of the inputs.
@ -2248,9 +2221,8 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
// LHS in the input. The mask input comes from the compare. // LHS in the input. The mask input comes from the compare.
unsigned MovOpcode = unsigned MovOpcode =
(RetVT == MVT::f32) ? X86::VMOVSSZrrk : X86::VMOVSDZrrk; (RetVT == MVT::f32) ? X86::VMOVSSZrrk : X86::VMOVSDZrrk;
unsigned MovReg = fastEmitInst_rrrr(MovOpcode, VR128X, RHSReg, RHSIsKill, unsigned MovReg = fastEmitInst_rrrr(MovOpcode, VR128X, RHSReg, CmpReg,
CmpReg, true, ImplicitDefReg, true, ImplicitDefReg, LHSReg);
LHSReg, LHSIsKill);
ResultReg = createResultReg(RC); ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@ -2269,10 +2241,10 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
unsigned BlendOpcode = unsigned BlendOpcode =
(RetVT == MVT::f32) ? X86::VBLENDVPSrr : X86::VBLENDVPDrr; (RetVT == MVT::f32) ? X86::VBLENDVPSrr : X86::VBLENDVPDrr;
Register CmpReg = fastEmitInst_rri(CmpOpcode, RC, CmpLHSReg, CmpLHSIsKill, Register CmpReg = fastEmitInst_rri(CmpOpcode, RC, CmpLHSReg, CmpRHSReg,
CmpRHSReg, CmpRHSIsKill, CC); CC);
Register VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, RHSIsKill, Register VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, LHSReg,
LHSReg, LHSIsKill, CmpReg, true); CmpReg);
ResultReg = createResultReg(RC); ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(VBlendReg); TII.get(TargetOpcode::COPY), ResultReg).addReg(VBlendReg);
@ -2291,14 +2263,10 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
} }
const TargetRegisterClass *VR128 = &X86::VR128RegClass; const TargetRegisterClass *VR128 = &X86::VR128RegClass;
Register CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill, Register CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpRHSReg, CC);
CmpRHSReg, CmpRHSIsKill, CC); Register AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg, LHSReg);
Register AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg, Register AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg, RHSReg);
/*Op0IsKill=*/false, LHSReg, LHSIsKill); Register OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, AndReg);
Register AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg,
/*Op0IsKill=*/true, RHSReg, RHSIsKill);
Register OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, /*Op0IsKill=*/true,
AndReg, /*Op1IsKill=*/true);
ResultReg = createResultReg(RC); ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(OrReg); TII.get(TargetOpcode::COPY), ResultReg).addReg(OrReg);
@ -2348,7 +2316,6 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
Register CondReg = getRegForValue(Cond); Register CondReg = getRegForValue(Cond);
if (CondReg == 0) if (CondReg == 0)
return false; return false;
bool CondIsKill = hasTrivialKill(Cond);
// In case OpReg is a K register, COPY to a GPR // In case OpReg is a K register, COPY to a GPR
if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) { if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
@ -2356,12 +2323,11 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
CondReg = createResultReg(&X86::GR32RegClass); CondReg = createResultReg(&X86::GR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), CondReg) TII.get(TargetOpcode::COPY), CondReg)
.addReg(KCondReg, getKillRegState(CondIsKill)); .addReg(KCondReg);
CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, /*Op0IsKill=*/true, CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit);
X86::sub_8bit);
} }
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
.addReg(CondReg, getKillRegState(CondIsKill)) .addReg(CondReg)
.addImm(1); .addImm(1);
} }
@ -2369,18 +2335,14 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
const Value *RHS = I->getOperand(2); const Value *RHS = I->getOperand(2);
Register LHSReg = getRegForValue(LHS); Register LHSReg = getRegForValue(LHS);
bool LHSIsKill = hasTrivialKill(LHS);
Register RHSReg = getRegForValue(RHS); Register RHSReg = getRegForValue(RHS);
bool RHSIsKill = hasTrivialKill(RHS);
if (!LHSReg || !RHSReg) if (!LHSReg || !RHSReg)
return false; return false;
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT); const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
Register ResultReg = Register ResultReg =
fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC); fastEmitInst_rri(Opc, RC, RHSReg, LHSReg, CC);
updateValueMap(I, ResultReg); updateValueMap(I, ResultReg);
return true; return true;
} }
@ -2404,12 +2366,11 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
Register OpReg = getRegForValue(Opnd); Register OpReg = getRegForValue(Opnd);
if (OpReg == 0) if (OpReg == 0)
return false; return false;
bool OpIsKill = hasTrivialKill(Opnd);
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT); const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
Register ResultReg = createResultReg(RC); Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg) TII.get(TargetOpcode::COPY), ResultReg)
.addReg(OpReg, getKillRegState(OpIsKill)); .addReg(OpReg);
updateValueMap(I, ResultReg); updateValueMap(I, ResultReg);
return true; return true;
} }
@ -2479,8 +2440,7 @@ bool X86FastISel::X86SelectIntToFP(const Instruction *I, bool IsSigned) {
Register ImplicitDefReg = createResultReg(RC); Register ImplicitDefReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg); TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
Register ResultReg = Register ResultReg = fastEmitInst_rr(Opcode, RC, ImplicitDefReg, OpReg);
fastEmitInst_rr(Opcode, RC, ImplicitDefReg, true, OpReg, false);
updateValueMap(I, ResultReg); updateValueMap(I, ResultReg);
return true; return true;
} }
@ -2577,8 +2537,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
} }
// Issue an extract_subreg. // Issue an extract_subreg.
Register ResultReg = fastEmitInst_extractsubreg(MVT::i8, Register ResultReg = fastEmitInst_extractsubreg(MVT::i8, InputReg,
InputReg, false,
X86::sub_8bit); X86::sub_8bit);
if (!ResultReg) if (!ResultReg)
return false; return false;
@ -2614,7 +2573,7 @@ bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
unsigned Reg; unsigned Reg;
bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg); bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg);
RV &= X86FastEmitStore(VT, Reg, /*ValIsKill=*/true, DestAM); RV &= X86FastEmitStore(VT, Reg, DestAM);
assert(RV && "Failed to emit load or store??"); assert(RV && "Failed to emit load or store??");
unsigned Size = VT.getSizeInBits()/8; unsigned Size = VT.getSizeInBits()/8;
@ -2662,7 +2621,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// controlled by MXCSR. // controlled by MXCSR.
unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPS2PHZ128rr unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPS2PHZ128rr
: X86::VCVTPS2PHrr; : X86::VCVTPS2PHrr;
InputReg = fastEmitInst_ri(Opc, RC, InputReg, false, 4); InputReg = fastEmitInst_ri(Opc, RC, InputReg, 4);
// Move the lower 32-bits of ResultReg to another register of class GR32. // Move the lower 32-bits of ResultReg to another register of class GR32.
Opc = Subtarget->hasAVX512() ? X86::VMOVPDI2DIZrr Opc = Subtarget->hasAVX512() ? X86::VMOVPDI2DIZrr
@ -2673,20 +2632,19 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// The result value is in the lower 16-bits of ResultReg. // The result value is in the lower 16-bits of ResultReg.
unsigned RegIdx = X86::sub_16bit; unsigned RegIdx = X86::sub_16bit;
ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, true, RegIdx); ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, RegIdx);
} else { } else {
assert(Op->getType()->isIntegerTy(16) && "Expected a 16-bit integer!"); assert(Op->getType()->isIntegerTy(16) && "Expected a 16-bit integer!");
// Explicitly zero-extend the input to 32-bit. // Explicitly zero-extend the input to 32-bit.
InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::ZERO_EXTEND, InputReg, InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::ZERO_EXTEND, InputReg);
/*Op0IsKill=*/false);
// The following SCALAR_TO_VECTOR will be expanded into a VMOVDI2PDIrr. // The following SCALAR_TO_VECTOR will be expanded into a VMOVDI2PDIrr.
InputReg = fastEmit_r(MVT::i32, MVT::v4i32, ISD::SCALAR_TO_VECTOR, InputReg = fastEmit_r(MVT::i32, MVT::v4i32, ISD::SCALAR_TO_VECTOR,
InputReg, /*Op0IsKill=*/true); InputReg);
unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPH2PSZ128rr unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPH2PSZ128rr
: X86::VCVTPH2PSrr; : X86::VCVTPH2PSrr;
InputReg = fastEmitInst_r(Opc, RC, InputReg, /*Op0IsKill=*/true); InputReg = fastEmitInst_r(Opc, RC, InputReg);
// The result value is in the lower 32-bits of ResultReg. // The result value is in the lower 32-bits of ResultReg.
// Emit an explicit copy from register class VR128 to register class FR32. // Emit an explicit copy from register class VR128 to register class FR32.
@ -2937,7 +2895,6 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
Register LHSReg = getRegForValue(LHS); Register LHSReg = getRegForValue(LHS);
if (LHSReg == 0) if (LHSReg == 0)
return false; return false;
bool LHSIsKill = hasTrivialKill(LHS);
unsigned ResultReg = 0; unsigned ResultReg = 0;
// Check if we have an immediate version. // Check if we have an immediate version.
@ -2954,21 +2911,17 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
bool IsDec = BaseOpc == ISD::SUB; bool IsDec = BaseOpc == ISD::SUB;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg) TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
.addReg(LHSReg, getKillRegState(LHSIsKill)); .addReg(LHSReg);
} else } else
ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill, ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, CI->getZExtValue());
CI->getZExtValue());
} }
unsigned RHSReg; unsigned RHSReg;
bool RHSIsKill;
if (!ResultReg) { if (!ResultReg) {
RHSReg = getRegForValue(RHS); RHSReg = getRegForValue(RHS);
if (RHSReg == 0) if (RHSReg == 0)
return false; return false;
RHSIsKill = hasTrivialKill(RHS); ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, RHSReg);
ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
RHSIsKill);
} }
// FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit // FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit
@ -2981,9 +2934,9 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// the X86::MUL*r instruction. // the X86::MUL*r instruction.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8]) TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8])
.addReg(LHSReg, getKillRegState(LHSIsKill)); .addReg(LHSReg);
ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8], ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
TLI.getRegClassFor(VT), RHSReg, RHSIsKill); TLI.getRegClassFor(VT), RHSReg);
} else if (BaseOpc == X86ISD::SMUL && !ResultReg) { } else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
static const uint16_t MULOpc[] = static const uint16_t MULOpc[] =
{ X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr }; { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr };
@ -2992,13 +2945,11 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// X86::IMUL8r instruction. // X86::IMUL8r instruction.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), X86::AL) TII.get(TargetOpcode::COPY), X86::AL)
.addReg(LHSReg, getKillRegState(LHSIsKill)); .addReg(LHSReg);
ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg, ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg);
RHSIsKill);
} else } else
ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8], ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
TLI.getRegClassFor(VT), LHSReg, LHSIsKill, TLI.getRegClassFor(VT), LHSReg, RHSReg);
RHSReg, RHSIsKill);
} }
if (!ResultReg) if (!ResultReg)
@ -3309,8 +3260,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
if (!isTypeLegal(PrevVal->getType(), VT)) if (!isTypeLegal(PrevVal->getType(), VT))
return false; return false;
ResultReg = ResultReg = fastEmit_ri(VT, VT, ISD::AND, ResultReg, 1);
fastEmit_ri(VT, VT, ISD::AND, ResultReg, hasTrivialKill(PrevVal), 1);
} else { } else {
if (!isTypeLegal(Val->getType(), VT) || if (!isTypeLegal(Val->getType(), VT) ||
(VT.isVector() && VT.getVectorElementType() == MVT::i1)) (VT.isVector() && VT.getVectorElementType() == MVT::i1))
@ -3378,7 +3328,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
// Handle zero-extension from i1 to i8, which is common. // Handle zero-extension from i1 to i8, which is common.
if (ArgVT == MVT::i1) { if (ArgVT == MVT::i1) {
// Set the high bits to zero. // Set the high bits to zero.
ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg, /*TODO: Kill=*/false); ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg);
ArgVT = MVT::i8; ArgVT = MVT::i8;
if (ArgReg == 0) if (ArgReg == 0)
@ -3408,8 +3358,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
break; break;
} }
case CCValAssign::BCvt: { case CCValAssign::BCvt: {
ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg, ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg);
/*TODO: Kill=*/false);
assert(ArgReg && "Failed to emit a bitcast!"); assert(ArgReg && "Failed to emit a bitcast!");
ArgVT = VA.getLocVT(); ArgVT = VA.getLocVT();
break; break;
@ -3462,8 +3411,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO)) if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO))
return false; return false;
} else { } else {
bool ValIsKill = hasTrivialKill(ArgVal); if (!X86FastEmitStore(ArgVT, ArgReg, AM, MMO))
if (!X86FastEmitStore(ArgVT, ArgReg, ValIsKill, AM, MMO))
return false; return false;
} }
} }
@ -3727,11 +3675,9 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
default: llvm_unreachable("Unexpected value type"); default: llvm_unreachable("Unexpected value type");
case MVT::i1: case MVT::i1:
case MVT::i8: case MVT::i8:
return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Op0IsKill=*/true, return fastEmitInst_extractsubreg(MVT::i8, SrcReg, X86::sub_8bit);
X86::sub_8bit);
case MVT::i16: case MVT::i16:
return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Op0IsKill=*/true, return fastEmitInst_extractsubreg(MVT::i16, SrcReg, X86::sub_16bit);
X86::sub_16bit);
case MVT::i32: case MVT::i32:
return SrcReg; return SrcReg;
case MVT::i64: { case MVT::i64: {
@ -4001,10 +3947,8 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
unsigned X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode, unsigned X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill, unsigned Op0, unsigned Op1,
unsigned Op1, bool Op1IsKill, unsigned Op2, unsigned Op3) {
unsigned Op2, bool Op2IsKill,
unsigned Op3, bool Op3IsKill) {
const MCInstrDesc &II = TII.get(MachineInstOpcode); const MCInstrDesc &II = TII.get(MachineInstOpcode);
Register ResultReg = createResultReg(RC); Register ResultReg = createResultReg(RC);
@ -4015,16 +3959,16 @@ unsigned X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode,
if (II.getNumDefs() >= 1) if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill)) .addReg(Op0)
.addReg(Op1, getKillRegState(Op1IsKill)) .addReg(Op1)
.addReg(Op2, getKillRegState(Op2IsKill)) .addReg(Op2)
.addReg(Op3, getKillRegState(Op3IsKill)); .addReg(Op3);
else { else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, getKillRegState(Op0IsKill)) .addReg(Op0)
.addReg(Op1, getKillRegState(Op1IsKill)) .addReg(Op1)
.addReg(Op2, getKillRegState(Op2IsKill)) .addReg(Op2)
.addReg(Op3, getKillRegState(Op3IsKill)); .addReg(Op3);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
} }

View File

@ -6,7 +6,7 @@
; CHECK-SSA: [[QUOTREG:%[0-9]+]]:gpr32 = SDIVWr ; CHECK-SSA: [[QUOTREG:%[0-9]+]]:gpr32 = SDIVWr
; CHECK-SSA-NOT: [[QUOTREG]] = ; CHECK-SSA-NOT: [[QUOTREG]] =
; CHECK-SSA: {{%[0-9]+}}:gpr32 = MSUBWrrr killed [[QUOTREG]] ; CHECK-SSA: {{%[0-9]+}}:gpr32 = MSUBWrrr [[QUOTREG]]
; CHECK-SSA-LABEL: Machine code for function t2 ; CHECK-SSA-LABEL: Machine code for function t2

View File

@ -294,7 +294,7 @@ struct OperandsSignature {
for (unsigned i = 0, e = Operands.size(); i != e; ++i) { for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
OS << LS; OS << LS;
if (Operands[i].isReg()) { if (Operands[i].isReg()) {
OS << "unsigned Op" << i << ", bool Op" << i << "IsKill"; OS << "unsigned Op" << i;
} else if (Operands[i].isImm()) { } else if (Operands[i].isImm()) {
OS << "uint64_t imm" << i; OS << "uint64_t imm" << i;
} else if (Operands[i].isFP()) { } else if (Operands[i].isFP()) {
@ -316,7 +316,7 @@ struct OperandsSignature {
OS << LS; OS << LS;
if (Operands[i].isReg()) { if (Operands[i].isReg()) {
OS << "Op" << i << ", Op" << i << "IsKill"; OS << "Op" << i;
} else if (Operands[i].isImm()) { } else if (Operands[i].isImm()) {
OS << "imm" << i; OS << "imm" << i;
} else if (Operands[i].isFP()) { } else if (Operands[i].isFP()) {
@ -332,7 +332,7 @@ struct OperandsSignature {
for (unsigned i = 0, e = Operands.size(); i != e; ++i) { for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
OS << LS; OS << LS;
if (Operands[i].isReg()) { if (Operands[i].isReg()) {
OS << "Op" << i << ", Op" << i << "IsKill"; OS << "Op" << i;
} else if (Operands[i].isImm()) { } else if (Operands[i].isImm()) {
OS << "imm" << i; OS << "imm" << i;
} else if (Operands[i].isFP()) { } else if (Operands[i].isFP()) {
@ -673,7 +673,7 @@ void FastISelMap::emitInstructionCode(raw_ostream &OS,
OS << ");\n"; OS << ");\n";
} else { } else {
OS << "extractsubreg(" << RetVTName OS << "extractsubreg(" << RetVTName
<< ", Op0, Op0IsKill, " << Memo.SubRegNo << ");\n"; << ", Op0, " << Memo.SubRegNo << ");\n";
} }
if (!PredicateCheck.empty()) { if (!PredicateCheck.empty()) {