forked from OSchip/llvm-project
Fix a bunch of places where operator-> was used directly on the return from dyn_cast.
Inspired by r331508, I did a grep and found these. Mostly just change from dyn_cast to cast. Some cases also showed a dyn_cast result being converted to bool, so those I changed to isa. llvm-svn: 331577
This commit is contained in:
parent
00d83601b4
commit
781aa181ab
|
@ -16933,14 +16933,14 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
|
|||
const ConstantFPSDNode *Zero = nullptr;
|
||||
|
||||
if (TheSelect->getOpcode() == ISD::SELECT_CC) {
|
||||
CC = dyn_cast<CondCodeSDNode>(TheSelect->getOperand(4))->get();
|
||||
CC = cast<CondCodeSDNode>(TheSelect->getOperand(4))->get();
|
||||
CmpLHS = TheSelect->getOperand(0);
|
||||
Zero = isConstOrConstSplatFP(TheSelect->getOperand(1));
|
||||
} else {
|
||||
// SELECT or VSELECT
|
||||
SDValue Cmp = TheSelect->getOperand(0);
|
||||
if (Cmp.getOpcode() == ISD::SETCC) {
|
||||
CC = dyn_cast<CondCodeSDNode>(Cmp.getOperand(2))->get();
|
||||
CC = cast<CondCodeSDNode>(Cmp.getOperand(2))->get();
|
||||
CmpLHS = Cmp.getOperand(0);
|
||||
Zero = isConstOrConstSplatFP(Cmp.getOperand(1));
|
||||
}
|
||||
|
|
|
@ -3726,7 +3726,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
|
|||
if (Tmp2.isUndef() ||
|
||||
(Tmp2.getOpcode() == ISD::AND &&
|
||||
isa<ConstantSDNode>(Tmp2.getOperand(1)) &&
|
||||
dyn_cast<ConstantSDNode>(Tmp2.getOperand(1))->getZExtValue() == 1))
|
||||
cast<ConstantSDNode>(Tmp2.getOperand(1))->getZExtValue() == 1))
|
||||
Tmp3 = Tmp2;
|
||||
else
|
||||
Tmp3 = DAG.getNode(ISD::AND, dl, Tmp2.getValueType(), Tmp2,
|
||||
|
|
|
@ -1369,7 +1369,7 @@ static void propagateSwiftErrorVRegs(FunctionLoweringInfo *FuncInfo) {
|
|||
}
|
||||
|
||||
auto DLoc = isa<Instruction>(SwiftErrorVal)
|
||||
? dyn_cast<Instruction>(SwiftErrorVal)->getDebugLoc()
|
||||
? cast<Instruction>(SwiftErrorVal)->getDebugLoc()
|
||||
: DebugLoc();
|
||||
const auto *TII = FuncInfo->MF->getSubtarget().getInstrInfo();
|
||||
|
||||
|
|
|
@ -1393,11 +1393,11 @@ void Function::setSectionPrefix(StringRef Prefix) {
|
|||
|
||||
Optional<StringRef> Function::getSectionPrefix() const {
|
||||
if (MDNode *MD = getMetadata(LLVMContext::MD_section_prefix)) {
|
||||
assert(dyn_cast<MDString>(MD->getOperand(0))
|
||||
assert(cast<MDString>(MD->getOperand(0))
|
||||
->getString()
|
||||
.equals("function_section_prefix") &&
|
||||
"Metadata not match");
|
||||
return dyn_cast<MDString>(MD->getOperand(1))->getString();
|
||||
return cast<MDString>(MD->getOperand(1))->getString();
|
||||
}
|
||||
return None;
|
||||
}
|
||||
|
|
|
@ -8101,8 +8101,7 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
|
|||
// vectors to integer vectors.
|
||||
if (EltTy->isPointerTy()) {
|
||||
Type *IntTy = DL.getIntPtrType(EltTy);
|
||||
unsigned NumOpElts =
|
||||
dyn_cast<VectorType>(Op0->getType())->getVectorNumElements();
|
||||
unsigned NumOpElts = Op0->getType()->getVectorNumElements();
|
||||
|
||||
// Convert to the corresponding integer vector.
|
||||
Type *IntVecTy = VectorType::get(IntTy, NumOpElts);
|
||||
|
|
|
@ -755,7 +755,7 @@ bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
|
|||
// Create relocations for unconditional branches to function symbols with
|
||||
// different execution mode in ELF binaries.
|
||||
if (Sym && Sym->isELF()) {
|
||||
unsigned Type = dyn_cast<MCSymbolELF>(Sym)->getType();
|
||||
unsigned Type = cast<MCSymbolELF>(Sym)->getType();
|
||||
if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) {
|
||||
if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch))
|
||||
return true;
|
||||
|
|
|
@ -475,8 +475,8 @@ public:
|
|||
} else if (isa<MCBinaryExpr>(getImm())) {
|
||||
#ifndef NDEBUG
|
||||
const MCBinaryExpr *BinaryExpr = dyn_cast<MCBinaryExpr>(getImm());
|
||||
assert(dyn_cast<LanaiMCExpr>(BinaryExpr->getLHS()) &&
|
||||
dyn_cast<LanaiMCExpr>(BinaryExpr->getLHS())->getKind() ==
|
||||
assert(isa<LanaiMCExpr>(BinaryExpr->getLHS()) &&
|
||||
cast<LanaiMCExpr>(BinaryExpr->getLHS())->getKind() ==
|
||||
LanaiMCExpr::VK_Lanai_ABS_LO);
|
||||
#endif
|
||||
Inst.addOperand(MCOperand::createExpr(getImm()));
|
||||
|
@ -505,8 +505,8 @@ public:
|
|||
} else if (isa<MCBinaryExpr>(getImm())) {
|
||||
#ifndef NDEBUG
|
||||
const MCBinaryExpr *BinaryExpr = dyn_cast<MCBinaryExpr>(getImm());
|
||||
assert(dyn_cast<LanaiMCExpr>(BinaryExpr->getLHS()) &&
|
||||
dyn_cast<LanaiMCExpr>(BinaryExpr->getLHS())->getKind() ==
|
||||
assert(isa<LanaiMCExpr>(BinaryExpr->getLHS()) &&
|
||||
cast<LanaiMCExpr>(BinaryExpr->getLHS())->getKind() ==
|
||||
LanaiMCExpr::VK_Lanai_ABS_HI);
|
||||
#endif
|
||||
Inst.addOperand(MCOperand::createExpr(getImm()));
|
||||
|
|
|
@ -316,7 +316,7 @@ void LanaiDAGToDAGISel::Select(SDNode *Node) {
|
|||
void LanaiDAGToDAGISel::selectFrameIndex(SDNode *Node) {
|
||||
SDLoc DL(Node);
|
||||
SDValue Imm = CurDAG->getTargetConstant(0, DL, MVT::i32);
|
||||
int FI = dyn_cast<FrameIndexSDNode>(Node)->getIndex();
|
||||
int FI = cast<FrameIndexSDNode>(Node)->getIndex();
|
||||
EVT VT = Node->getValueType(0);
|
||||
SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
|
||||
unsigned Opc = Lanai::ADD_I_LO;
|
||||
|
|
|
@ -93,7 +93,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
if (Opcode == ISD::FrameIndex) {
|
||||
SDLoc DL(Node);
|
||||
SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
|
||||
int FI = dyn_cast<FrameIndexSDNode>(Node)->getIndex();
|
||||
int FI = cast<FrameIndexSDNode>(Node)->getIndex();
|
||||
EVT VT = Node->getValueType(0);
|
||||
SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
|
||||
ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
|
||||
|
|
|
@ -4257,9 +4257,9 @@ static bool tryBuildVectorByteMask(BuildVectorSDNode *BVN, uint64_t &Mask) {
|
|||
if (!Op.isUndef()) {
|
||||
uint64_t Value;
|
||||
if (Op.getOpcode() == ISD::Constant)
|
||||
Value = dyn_cast<ConstantSDNode>(Op)->getZExtValue();
|
||||
Value = cast<ConstantSDNode>(Op)->getZExtValue();
|
||||
else if (Op.getOpcode() == ISD::ConstantFP)
|
||||
Value = (dyn_cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt()
|
||||
Value = (cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt()
|
||||
.getZExtValue());
|
||||
else
|
||||
return false;
|
||||
|
@ -4642,7 +4642,7 @@ SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
|
|||
Op1.getOpcode() != ISD::BITCAST &&
|
||||
Op1.getOpcode() != ISD::ConstantFP &&
|
||||
Op2.getOpcode() == ISD::Constant) {
|
||||
uint64_t Index = dyn_cast<ConstantSDNode>(Op2)->getZExtValue();
|
||||
uint64_t Index = cast<ConstantSDNode>(Op2)->getZExtValue();
|
||||
unsigned Mask = VT.getVectorNumElements() - 1;
|
||||
if (Index <= Mask)
|
||||
return Op;
|
||||
|
|
|
@ -737,7 +737,7 @@ int SystemZTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondT
|
|||
unsigned PredicateExtraCost = 0;
|
||||
if (I != nullptr) {
|
||||
// Some predicates cost one or two extra instructions.
|
||||
switch (dyn_cast<CmpInst>(I)->getPredicate()) {
|
||||
switch (cast<CmpInst>(I)->getPredicate()) {
|
||||
case CmpInst::Predicate::ICMP_NE:
|
||||
case CmpInst::Predicate::ICMP_UGE:
|
||||
case CmpInst::Predicate::ICMP_ULE:
|
||||
|
|
|
@ -723,7 +723,7 @@ static Value *rewriteGEPAsOffset(Value *Start, Value *Base,
|
|||
}
|
||||
|
||||
auto *Op = NewInsts[GEP->getOperand(0)];
|
||||
if (isa<ConstantInt>(Op) && dyn_cast<ConstantInt>(Op)->isZero())
|
||||
if (isa<ConstantInt>(Op) && cast<ConstantInt>(Op)->isZero())
|
||||
NewInsts[GEP] = Index;
|
||||
else
|
||||
NewInsts[GEP] = Builder.CreateNSWAdd(
|
||||
|
|
|
@ -2496,7 +2496,7 @@ bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
|
|||
// Static allocas (constant size in the entry block) are handled by
|
||||
// prologue/epilogue insertion so they're free anyway. We definitely don't
|
||||
// want to make them non-constant.
|
||||
return !dyn_cast<AllocaInst>(I)->isStaticAlloca();
|
||||
return !cast<AllocaInst>(I)->isStaticAlloca();
|
||||
case Instruction::GetElementPtr:
|
||||
if (OpIdx == 0)
|
||||
return true;
|
||||
|
|
|
@ -2355,7 +2355,7 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
|
|||
}
|
||||
case Instruction::Load: {
|
||||
// Cost of wide load - cost of scalar loads.
|
||||
unsigned alignment = dyn_cast<LoadInst>(VL0)->getAlignment();
|
||||
unsigned alignment = cast<LoadInst>(VL0)->getAlignment();
|
||||
if (NeedToShuffleReuses) {
|
||||
ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) *
|
||||
TTI->getMemoryOpCost(Instruction::Load, ScalarTy,
|
||||
|
@ -2374,7 +2374,7 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
|
|||
}
|
||||
case Instruction::Store: {
|
||||
// We know that we can merge the stores. Calculate the cost.
|
||||
unsigned alignment = dyn_cast<StoreInst>(VL0)->getAlignment();
|
||||
unsigned alignment = cast<StoreInst>(VL0)->getAlignment();
|
||||
if (NeedToShuffleReuses) {
|
||||
ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) *
|
||||
TTI->getMemoryOpCost(Instruction::Store, ScalarTy,
|
||||
|
@ -5987,9 +5987,8 @@ static Value *getReductionValue(const DominatorTree *DT, PHINode *P,
|
|||
// reduction phi. Vectorizing such cases has been reported to cause
|
||||
// miscompiles. See PR25787.
|
||||
auto DominatedReduxValue = [&](Value *R) {
|
||||
return (
|
||||
dyn_cast<Instruction>(R) &&
|
||||
DT->dominates(P->getParent(), dyn_cast<Instruction>(R)->getParent()));
|
||||
return isa<Instruction>(R) &&
|
||||
DT->dominates(P->getParent(), cast<Instruction>(R)->getParent());
|
||||
};
|
||||
|
||||
Value *Rdx = nullptr;
|
||||
|
|
|
@ -473,8 +473,7 @@ TEST(ConstantsTest, BitcastToGEP) {
|
|||
GlobalValue::ExternalLinkage, nullptr);
|
||||
auto *PtrTy = PointerType::get(i32, 0);
|
||||
auto *C = ConstantExpr::getBitCast(G, PtrTy);
|
||||
ASSERT_EQ(dyn_cast<ConstantExpr>(C)->getOpcode(),
|
||||
Instruction::BitCast);
|
||||
ASSERT_EQ(cast<ConstantExpr>(C)->getOpcode(), Instruction::BitCast);
|
||||
}
|
||||
|
||||
} // end anonymous namespace
|
||||
|
|
Loading…
Reference in New Issue