diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h index e6109a703e07..b924a23c8f3f 100644 --- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h +++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h @@ -725,9 +725,10 @@ public: // Debugging methods. void dump() const; - void print(raw_ostream &OS, const SlotIndexes* = nullptr) const; + void print(raw_ostream &OS, const SlotIndexes * = nullptr, + bool IsVerbose = true) const; void print(raw_ostream &OS, ModuleSlotTracker &MST, - const SlotIndexes* = nullptr) const; + const SlotIndexes * = nullptr, bool IsVerbose = true) const; // Printing method used by LoopInfo. void printAsOperand(raw_ostream &OS, bool PrintType = true) const; diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h index 3c1c1bb14f42..56a42921979d 100644 --- a/llvm/include/llvm/CodeGen/MachineInstr.h +++ b/llvm/include/llvm/CodeGen/MachineInstr.h @@ -1233,16 +1233,20 @@ public: bool hasComplexRegisterTies() const; /// Print this MI to \p OS. + /// Don't print information that can be inferred from other instructions if + /// \p IsVerbose is false. It is usually true when only a fragment of the + /// function is printed. /// Only print the defs and the opcode if \p SkipOpers is true. /// Otherwise, also print operands if \p SkipDebugLoc is true. /// Otherwise, also print the debug loc, with a terminating newline. /// \p TII is used to print the opcode name. If it's not present, but the /// MI is in a function, the opcode will be printed using the function's TII. - void print(raw_ostream &OS, bool SkipOpers = false, bool SkipDebugLoc = false, - const TargetInstrInfo *TII = nullptr) const; - void print(raw_ostream &OS, ModuleSlotTracker &MST, bool SkipOpers = false, + void print(raw_ostream &OS, bool IsVerbose = true, bool SkipOpers = false, bool SkipDebugLoc = false, const TargetInstrInfo *TII = nullptr) const; + void print(raw_ostream &OS, ModuleSlotTracker &MST, bool IsVerbose = true, + bool SkipOpers = false, bool SkipDebugLoc = false, + const TargetInstrInfo *TII = nullptr) const; void dump() const; /// @} diff --git a/llvm/include/llvm/CodeGen/MachineOperand.h b/llvm/include/llvm/CodeGen/MachineOperand.h index ebd130556c46..145702aec472 100644 --- a/llvm/include/llvm/CodeGen/MachineOperand.h +++ b/llvm/include/llvm/CodeGen/MachineOperand.h @@ -270,6 +270,9 @@ public: /// \param PrintDef - whether we want to print `def` on an operand which /// isDef. Sometimes, if the operand is printed before '=', we don't print /// `def`. + /// \param IsVerbose - whether we want a verbose output of the MO. This + /// prints extra information that can be easily inferred when printing the + /// whole function, but not when printing only a fragment of it. /// \param ShouldPrintRegisterTies - whether we want to print register ties. /// Sometimes they are easily determined by the instruction's descriptor /// (MachineInstr::hasComplexRegiterTies can determine if it's needed). @@ -280,7 +283,7 @@ public: /// information from it's parent. /// \param IntrinsicInfo - same as \p TRI. void print(raw_ostream &os, ModuleSlotTracker &MST, LLT TypeToPrint, - bool PrintDef, bool ShouldPrintRegisterTies, + bool PrintDef, bool IsVerbose, bool ShouldPrintRegisterTies, unsigned TiedOperandIdx, const TargetRegisterInfo *TRI, const TargetIntrinsicInfo *IntrinsicInfo) const; diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp index 5e7a2585b4e5..5b22762f001f 100644 --- a/llvm/lib/CodeGen/MIRPrinter.cpp +++ b/llvm/lib/CodeGen/MIRPrinter.cpp @@ -768,8 +768,8 @@ void MIPrinter::print(const MachineInstr &MI, unsigned OpIdx, if (ShouldPrintRegisterTies && Op.isReg() && Op.isTied() && !Op.isDef()) TiedOperandIdx = Op.getParent()->findTiedOperandIdx(OpIdx); const TargetIntrinsicInfo *TII = MI.getMF()->getTarget().getIntrinsicInfo(); - Op.print(OS, MST, TypeToPrint, PrintDef, ShouldPrintRegisterTies, - TiedOperandIdx, TRI, TII); + Op.print(OS, MST, TypeToPrint, PrintDef, /*IsVerbose=*/false, + ShouldPrintRegisterTies, TiedOperandIdx, TRI, TII); break; } case MachineOperand::MO_FrameIndex: diff --git a/llvm/lib/CodeGen/MachineBasicBlock.cpp b/llvm/lib/CodeGen/MachineBasicBlock.cpp index 209abf34d885..9c0bec0f2d19 100644 --- a/llvm/lib/CodeGen/MachineBasicBlock.cpp +++ b/llvm/lib/CodeGen/MachineBasicBlock.cpp @@ -259,8 +259,8 @@ std::string MachineBasicBlock::getFullName() const { return Name; } -void MachineBasicBlock::print(raw_ostream &OS, const SlotIndexes *Indexes) - const { +void MachineBasicBlock::print(raw_ostream &OS, const SlotIndexes *Indexes, + bool IsVerbose) const { const MachineFunction *MF = getParent(); if (!MF) { OS << "Can't print out MachineBasicBlock because parent MachineFunction" @@ -270,11 +270,12 @@ void MachineBasicBlock::print(raw_ostream &OS, const SlotIndexes *Indexes) const Function &F = MF->getFunction(); const Module *M = F.getParent(); ModuleSlotTracker MST(M); - print(OS, MST, Indexes); + print(OS, MST, Indexes, IsVerbose); } void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST, - const SlotIndexes *Indexes) const { + const SlotIndexes *Indexes, + bool IsVerbose) const { const MachineFunction *MF = getParent(); if (!MF) { OS << "Can't print out MachineBasicBlock because parent MachineFunction" @@ -330,7 +331,7 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST, OS << '\t'; if (I.isInsideBundle()) OS << " * "; - I.print(OS, MST); + I.print(OS, MST, IsVerbose); } // Print the successors of this block according to the CFG. diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp index bc8eb1429d92..da286d79be94 100644 --- a/llvm/lib/CodeGen/MachineFunction.cpp +++ b/llvm/lib/CodeGen/MachineFunction.cpp @@ -522,7 +522,9 @@ void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const { MST.incorporateFunction(getFunction()); for (const auto &BB : *this) { OS << '\n'; - BB.print(OS, MST, Indexes); + // If we print the whole function, don't print any verbose information, + // since that information is already present. + BB.print(OS, MST, Indexes, /*IsVerbose=*/false); } OS << "\n# End machine code for function " << getName() << ".\n\n"; diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp index 2a0ebc607c77..97e24e8e0ba1 100644 --- a/llvm/lib/CodeGen/MachineInstr.cpp +++ b/llvm/lib/CodeGen/MachineInstr.cpp @@ -1229,8 +1229,8 @@ LLVM_DUMP_METHOD void MachineInstr::dump() const { } #endif -void MachineInstr::print(raw_ostream &OS, bool SkipOpers, bool SkipDebugLoc, - const TargetInstrInfo *TII) const { +void MachineInstr::print(raw_ostream &OS, bool IsVerbose, bool SkipOpers, + bool SkipDebugLoc, const TargetInstrInfo *TII) const { const Module *M = nullptr; const Function *F = nullptr; if (const MachineFunction *MF = getMFIfAvailable(*this)) { @@ -1241,11 +1241,11 @@ void MachineInstr::print(raw_ostream &OS, bool SkipOpers, bool SkipDebugLoc, ModuleSlotTracker MST(M); if (F) MST.incorporateFunction(*F); - print(OS, MST, SkipOpers, SkipDebugLoc, TII); + print(OS, MST, IsVerbose, SkipOpers, SkipDebugLoc, TII); } void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST, - bool SkipOpers, bool SkipDebugLoc, + bool IsVerbose, bool SkipOpers, bool SkipDebugLoc, const TargetInstrInfo *TII) const { // We can be a bit tidier if we know the MachineFunction. const MachineFunction *MF = nullptr; @@ -1281,8 +1281,8 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST, LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{}; unsigned TiedOperandIdx = getTiedOperandIdx(StartOp); - MO.print(OS, MST, TypeToPrint, /*PrintDef=*/false, ShouldPrintRegisterTies, - TiedOperandIdx, TRI, IntrinsicInfo); + MO.print(OS, MST, TypeToPrint, /*PrintDef=*/false, IsVerbose, + ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo); ++StartOp; } @@ -1314,7 +1314,7 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST, const unsigned OpIdx = InlineAsm::MIOp_AsmString; LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{}; unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx); - getOperand(OpIdx).print(OS, MST, TypeToPrint, /*PrintDef=*/true, + getOperand(OpIdx).print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsVerbose, ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo); @@ -1353,7 +1353,7 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST, else { LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{}; unsigned TiedOperandIdx = getTiedOperandIdx(i); - MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, + MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsVerbose, ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo); } } else if (i == AsmDescOp && MO.isImm()) { @@ -1420,7 +1420,7 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST, if (MO.isImm() && isOperandSubregIdx(i)) MachineOperand::printSubRegIdx(OS, MO.getImm(), TRI); else - MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, + MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsVerbose, ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo); } } diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp index 9d0c93eb27d6..13af5e117888 100644 --- a/llvm/lib/CodeGen/MachineOperand.cpp +++ b/llvm/lib/CodeGen/MachineOperand.cpp @@ -641,13 +641,13 @@ void MachineOperand::print(raw_ostream &OS, const TargetRegisterInfo *TRI, const TargetIntrinsicInfo *IntrinsicInfo) const { tryToGetTargetInfo(*this, TRI, IntrinsicInfo); ModuleSlotTracker DummyMST(nullptr); - print(OS, DummyMST, LLT{}, /*PrintDef=*/false, + print(OS, DummyMST, LLT{}, /*PrintDef=*/false, /*IsVerbose=*/true, /*ShouldPrintRegisterTies=*/true, /*TiedOperandIdx=*/0, TRI, IntrinsicInfo); } void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST, - LLT TypeToPrint, bool PrintDef, + LLT TypeToPrint, bool PrintDef, bool IsVerbose, bool ShouldPrintRegisterTies, unsigned TiedOperandIdx, const TargetRegisterInfo *TRI, @@ -687,7 +687,7 @@ void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST, if (TargetRegisterInfo::isVirtualRegister(Reg)) { if (const MachineFunction *MF = getMFIfAvailable(*this)) { const MachineRegisterInfo &MRI = MF->getRegInfo(); - if (!PrintDef || MRI.def_empty(Reg)) { + if (IsVerbose || !PrintDef || MRI.def_empty(Reg)) { OS << ':'; OS << printRegClassOrBank(Reg, MRI, TRI); } diff --git a/llvm/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp b/llvm/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp index ca4452218da1..661537b07e80 100644 --- a/llvm/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp +++ b/llvm/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp @@ -27,7 +27,7 @@ DiagnosticInfoMIROptimization::MachineArgument::MachineArgument( Key = MKey; raw_string_ostream OS(Val); - MI.print(OS, /*SkipOpers=*/false, /*SkipDebugLoc=*/true); + MI.print(OS, /*IsVerbose=*/true, /*SkipOpers=*/false, /*SkipDebugLoc=*/true); } Optional diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll index 61a47da674fa..ebbacd8b59cf 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll @@ -46,7 +46,7 @@ define [1 x double] @constant() { ; The key problem here is that we may fail to create an MBB referenced by a ; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things ; happen. -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: cannot select: G_STORE %6(s32), %2(p0); mem:ST4[%addr] (in function: pending_phis) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: cannot select: G_STORE %6:gpr(s32), %2:gpr(p0); mem:ST4[%addr] (in function: pending_phis) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for pending_phis ; FALLBACK-WITH-REPORT-OUT-LABEL: pending_phis: define i32 @pending_phis(i1 %tst, i32 %val, i32* %addr) { @@ -66,7 +66,7 @@ false: } ; General legalizer inability to handle types whose size wasn't a power of 2. -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1(s42), %0(p0); mem:ST6[%addr](align=8) (in function: odd_type) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1:_(s42), %0:_(p0); mem:ST6[%addr](align=8) (in function: odd_type) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_type ; FALLBACK-WITH-REPORT-OUT-LABEL: odd_type: define void @odd_type(i42* %addr) { @@ -75,7 +75,7 @@ define void @odd_type(i42* %addr) { ret void } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1(<7 x s32>), %0(p0); mem:ST28[%addr](align=32) (in function: odd_vector) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1:_(<7 x s32>), %0:_(p0); mem:ST28[%addr](align=32) (in function: odd_vector) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_vector ; FALLBACK-WITH-REPORT-OUT-LABEL: odd_vector: define void @odd_vector(<7 x i32>* %addr) { @@ -94,7 +94,7 @@ define i128 @sequence_sizes([8 x i8] %in) { } ; Just to make sure we don't accidentally emit a normal load/store. -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: cannot select: %2:gpr(s64) = G_LOAD %0(p0); mem:LD8[%addr] (in function: atomic_ops) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: cannot select: %2:gpr(s64) = G_LOAD %0:gpr(p0); mem:LD8[%addr] (in function: atomic_ops) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for atomic_ops ; FALLBACK-WITH-REPORT-LABEL: atomic_ops: define i64 @atomic_ops(i64* %addr) { @@ -142,7 +142,7 @@ define fp128 @test_quad_dump() { ret fp128 0xL00000000000000004000000000000000 } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: %0:_(p0) = G_EXTRACT_VECTOR_ELT %1(<2 x p0>), %2(s32) (in function: vector_of_pointers_extractelement) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: %0:_(p0) = G_EXTRACT_VECTOR_ELT %1:_(<2 x p0>), %2:_(s32) (in function: vector_of_pointers_extractelement) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_extractelement ; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_extractelement: @var = global <2 x i16*> zeroinitializer @@ -159,7 +159,7 @@ end: br label %block } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %0(<2 x p0>), %4(p0); mem:ST16[undef] (in function: vector_of_pointers_insertelement) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %0:_(<2 x p0>), %4:_(p0); mem:ST16[undef] (in function: vector_of_pointers_insertelement) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_insertelement ; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_insertelement: define void @vector_of_pointers_insertelement() { @@ -175,7 +175,7 @@ end: br label %block } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1(s96), %3(p0); mem:ST12[undef](align=4) (in function: nonpow2_insertvalue_narrowing) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1:_(s96), %3:_(p0); mem:ST12[undef](align=4) (in function: nonpow2_insertvalue_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_insertvalue_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_insertvalue_narrowing: %struct96 = type { float, float, float } @@ -216,7 +216,7 @@ define void @nonpow2_load_narrowing() { ret void } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %3(s96), %0(p0); mem:ST12[%c](align=16) (in function: nonpow2_store_narrowing +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %3:_(s96), %0:_(p0); mem:ST12[%c](align=16) (in function: nonpow2_store_narrowing ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_store_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_store_narrowing: define void @nonpow2_store_narrowing(i96* %c) { @@ -226,7 +226,7 @@ define void @nonpow2_store_narrowing(i96* %c) { ret void } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %0(s96), %1(p0); mem:ST12[undef](align=16) (in function: nonpow2_constant_narrowing) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %0:_(s96), %1:_(p0); mem:ST12[undef](align=16) (in function: nonpow2_constant_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_constant_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_constant_narrowing: define void @nonpow2_constant_narrowing() { @@ -236,8 +236,8 @@ define void @nonpow2_constant_narrowing() { ; Currently can't handle vector lengths that aren't an exact multiple of ; natively supported vector lengths. Test that the fall-back works for those. -; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: :0:0: unable to legalize instruction: %1(<7 x s64>) = G_ADD %0, %0; (in function: nonpow2_vector_add_fewerelements -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: %2:_(s64) = G_EXTRACT_VECTOR_ELT %1(<7 x s64>), %3(s64) (in function: nonpow2_vector_add_fewerelements) +; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: :0:0: unable to legalize instruction: %1:_(<7 x s64>) = G_ADD %0, %0; (in function: nonpow2_vector_add_fewerelements +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: %2:_(s64) = G_EXTRACT_VECTOR_ELT %1:_(<7 x s64>), %3:_(s64) (in function: nonpow2_vector_add_fewerelements) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_vector_add_fewerelements ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_vector_add_fewerelements: define void @nonpow2_vector_add_fewerelements() { diff --git a/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll b/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll index c6bdbe4f0322..5d6c5a7b2cad 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll @@ -5,10 +5,10 @@ ; CHECK-LABEL: stp_i64_scale:%bb.0 ; CHECK:Cluster ld/st SU(4) - SU(3) ; CHECK:Cluster ld/st SU(2) - SU(5) -; CHECK:SU(4): STRXui %1, %0, 1 -; CHECK:SU(3): STRXui %1, %0, 2 -; CHECK:SU(2): STRXui %1, %0, 3 -; CHECK:SU(5): STRXui %1, %0, 4 +; CHECK:SU(4): STRXui %1:gpr64, %0:gpr64common, 1 +; CHECK:SU(3): STRXui %1:gpr64, %0:gpr64common, 2 +; CHECK:SU(2): STRXui %1:gpr64, %0:gpr64common, 3 +; CHECK:SU(5): STRXui %1:gpr64, %0:gpr64common, 4 define i64 @stp_i64_scale(i64* nocapture %P, i64 %v) { entry: %arrayidx = getelementptr inbounds i64, i64* %P, i64 3 @@ -26,10 +26,10 @@ entry: ; CHECK-LABEL: stp_i32_scale:%bb.0 ; CHECK:Cluster ld/st SU(4) - SU(3) ; CHECK:Cluster ld/st SU(2) - SU(5) -; CHECK:SU(4): STRWui %1, %0, 1 -; CHECK:SU(3): STRWui %1, %0, 2 -; CHECK:SU(2): STRWui %1, %0, 3 -; CHECK:SU(5): STRWui %1, %0, 4 +; CHECK:SU(4): STRWui %1:gpr32, %0:gpr64common, 1 +; CHECK:SU(3): STRWui %1:gpr32, %0:gpr64common, 2 +; CHECK:SU(2): STRWui %1:gpr32, %0:gpr64common, 3 +; CHECK:SU(5): STRWui %1:gpr32, %0:gpr64common, 4 define i32 @stp_i32_scale(i32* nocapture %P, i32 %v) { entry: %arrayidx = getelementptr inbounds i32, i32* %P, i32 3 @@ -47,10 +47,10 @@ entry: ; CHECK-LABEL:stp_i64_unscale:%bb.0 entry ; CHECK:Cluster ld/st SU(5) - SU(2) ; CHECK:Cluster ld/st SU(4) - SU(3) -; CHECK:SU(5): STURXi %1, %0, -32 -; CHECK:SU(2): STURXi %1, %0, -24 -; CHECK:SU(4): STURXi %1, %0, -16 -; CHECK:SU(3): STURXi %1, %0, -8 +; CHECK:SU(5): STURXi %1:gpr64, %0:gpr64common, -32 +; CHECK:SU(2): STURXi %1:gpr64, %0:gpr64common, -24 +; CHECK:SU(4): STURXi %1:gpr64, %0:gpr64common, -16 +; CHECK:SU(3): STURXi %1:gpr64, %0:gpr64common, -8 define void @stp_i64_unscale(i64* nocapture %P, i64 %v) #0 { entry: %arrayidx = getelementptr inbounds i64, i64* %P, i64 -3 @@ -68,10 +68,10 @@ entry: ; CHECK-LABEL:stp_i32_unscale:%bb.0 entry ; CHECK:Cluster ld/st SU(5) - SU(2) ; CHECK:Cluster ld/st SU(4) - SU(3) -; CHECK:SU(5): STURWi %1, %0, -16 -; CHECK:SU(2): STURWi %1, %0, -12 -; CHECK:SU(4): STURWi %1, %0, -8 -; CHECK:SU(3): STURWi %1, %0, -4 +; CHECK:SU(5): STURWi %1:gpr32, %0:gpr64common, -16 +; CHECK:SU(2): STURWi %1:gpr32, %0:gpr64common, -12 +; CHECK:SU(4): STURWi %1:gpr32, %0:gpr64common, -8 +; CHECK:SU(3): STURWi %1:gpr32, %0:gpr64common, -4 define void @stp_i32_unscale(i32* nocapture %P, i32 %v) #0 { entry: %arrayidx = getelementptr inbounds i32, i32* %P, i32 -3 @@ -89,10 +89,10 @@ entry: ; CHECK-LABEL:stp_double:%bb.0 ; CHECK:Cluster ld/st SU(3) - SU(4) ; CHECK:Cluster ld/st SU(2) - SU(5) -; CHECK:SU(3): STRDui %1, %0, 1 -; CHECK:SU(4): STRDui %1, %0, 2 -; CHECK:SU(2): STRDui %1, %0, 3 -; CHECK:SU(5): STRDui %1, %0, 4 +; CHECK:SU(3): STRDui %1:fpr64, %0:gpr64common, 1 +; CHECK:SU(4): STRDui %1:fpr64, %0:gpr64common, 2 +; CHECK:SU(2): STRDui %1:fpr64, %0:gpr64common, 3 +; CHECK:SU(5): STRDui %1:fpr64, %0:gpr64common, 4 define void @stp_double(double* nocapture %P, double %v) { entry: %arrayidx = getelementptr inbounds double, double* %P, i64 3 @@ -110,10 +110,10 @@ entry: ; CHECK-LABEL:stp_float:%bb.0 ; CHECK:Cluster ld/st SU(3) - SU(4) ; CHECK:Cluster ld/st SU(2) - SU(5) -; CHECK:SU(3): STRSui %1, %0, 1 -; CHECK:SU(4): STRSui %1, %0, 2 -; CHECK:SU(2): STRSui %1, %0, 3 -; CHECK:SU(5): STRSui %1, %0, 4 +; CHECK:SU(3): STRSui %1:fpr32, %0:gpr64common, 1 +; CHECK:SU(4): STRSui %1:fpr32, %0:gpr64common, 2 +; CHECK:SU(2): STRSui %1:fpr32, %0:gpr64common, 3 +; CHECK:SU(5): STRSui %1:fpr32, %0:gpr64common, 4 define void @stp_float(float* nocapture %P, float %v) { entry: %arrayidx = getelementptr inbounds float, float* %P, i64 3 @@ -130,10 +130,10 @@ entry: ; CHECK: ********** MI Scheduling ********** ; CHECK-LABEL: stp_volatile:%bb.0 ; CHECK-NOT: Cluster ld/st -; CHECK:SU(2): STRXui %1, %0, 3; mem:Volatile -; CHECK:SU(3): STRXui %1, %0, 2; mem:Volatile -; CHECK:SU(4): STRXui %1, %0, 1; mem:Volatile -; CHECK:SU(5): STRXui %1, %0, 4; mem:Volatile +; CHECK:SU(2): STRXui %1:gpr64, %0:gpr64common, 3; mem:Volatile +; CHECK:SU(3): STRXui %1:gpr64, %0:gpr64common, 2; mem:Volatile +; CHECK:SU(4): STRXui %1:gpr64, %0:gpr64common, 1; mem:Volatile +; CHECK:SU(5): STRXui %1:gpr64, %0:gpr64common, 4; mem:Volatile define i64 @stp_volatile(i64* nocapture %P, i64 %v) { entry: %arrayidx = getelementptr inbounds i64, i64* %P, i64 3 diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll b/llvm/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll index bbb699bbb468..8af6b8220470 100644 --- a/llvm/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll +++ b/llvm/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll @@ -6,7 +6,7 @@ ; ; CHECK: ********** MI Scheduling ********** ; CHECK: shiftable -; CHECK: SU(2): %2:gpr64common = SUBXri %1, 20, 0 +; CHECK: SU(2): %2:gpr64common = SUBXri %1:gpr64common, 20, 0 ; CHECK: Successors: ; CHECK-NEXT: SU(4): Data Latency=1 Reg=%2 ; CHECK-NEXT: SU(3): Data Latency=2 Reg=%2 diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll b/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll index 28169ff209c3..88d6a68ee014 100644 --- a/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll +++ b/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll @@ -5,14 +5,14 @@ ; ; CHECK: ********** MI Scheduling ********** ; CHECK: misched_bug:%bb.0 entry -; CHECK: SU(2): %2:gpr32 = LDRWui %0, 1; mem:LD4[%ptr1_plus1] +; CHECK: SU(2): %2:gpr32 = LDRWui %0:gpr64common, 1; mem:LD4[%ptr1_plus1] ; CHECK: Successors: ; CHECK-NEXT: SU(5): Data Latency=4 Reg=%2 ; CHECK-NEXT: SU(4): Ord Latency=0 -; CHECK: SU(3): STRWui %wzr, %0, 0; mem:ST4[%ptr1] +; CHECK: SU(3): STRWui %wzr, %0:gpr64common, 0; mem:ST4[%ptr1] ; CHECK: Successors: ; CHECK: SU(4): Ord Latency=0 -; CHECK: SU(4): STRWui %wzr, %1, 0; mem:ST4[%ptr2] +; CHECK: SU(4): STRWui %wzr, %1:gpr64common, 0; mem:ST4[%ptr2] ; CHECK: SU(5): %w0 = COPY %2 ; CHECK: ** ScheduleDAGMI::schedule picking next node define i32 @misched_bug(i32* %ptr1, i32* %ptr2) { diff --git a/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir b/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir index d7eafde19161..8b8f3f0771ff 100644 --- a/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir +++ b/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir @@ -42,57 +42,57 @@ # CHECK_SWIFT: Latency : 2 # CHECK_R52: Latency : 2 # -# CHECK: SU(3): %3:rgpr = t2LDRi12 %2, 0, 14, %noreg; mem:LD4[@g1](dereferenceable) +# CHECK: SU(3): %3:rgpr = t2LDRi12 %2:rgpr, 0, 14, %noreg; mem:LD4[@g1](dereferenceable) # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 3 # CHECK_R52: Latency : 4 # -# CHECK : SU(6): %6 = t2ADDrr %3, %3, 14, %noreg, %noreg +# CHECK : SU(6): %6 = t2ADDrr %3:rgpr, %3:rgpr, 14, %noreg, %noreg # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 1 # CHECK_R52: Latency : 3 -# CHECK: SU(7): %7:rgpr = t2SDIV %6, %5, 14, %noreg +# CHECK: SU(7): %7:rgpr = t2SDIV %6:rgpr, %5:rgpr, 14, %noreg # CHECK_A9: Latency : 0 # CHECK_SWIFT: Latency : 14 # CHECK_R52: Latency : 8 -# CHECK: SU(8): t2STRi12 %7, %2, 0, 14, %noreg; mem:ST4[@g1] +# CHECK: SU(8): t2STRi12 %7:rgpr, %2:rgpr, 0, 14, %noreg; mem:ST4[@g1] # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 0 # CHECK_R52: Latency : 4 # -# CHECK: SU(9): %8:rgpr = t2SMULBB %1, %1, 14, %noreg +# CHECK: SU(9): %8:rgpr = t2SMULBB %1:rgpr, %1:rgpr, 14, %noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(10): %9:rgpr = t2SMLABB %0, %0, %8, 14, %noreg +# CHECK: SU(10): %9:rgpr = t2SMLABB %0:rgpr, %0:rgpr, %8:rgpr, 14, %noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(11): %10:rgpr = t2UXTH %9, 0, 14, %noreg +# CHECK: SU(11): %10:rgpr = t2UXTH %9:rgpr, 0, 14, %noreg # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 1 # CHECK_R52: Latency : 3 # -# CHECK: SU(12): %11:rgpr = t2MUL %10, %7, 14, %noreg +# CHECK: SU(12): %11:rgpr = t2MUL %10:rgpr, %7:rgpr, 14, %noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(13): %12:rgpr = t2MLA %11, %11, %11, 14, %noreg +# CHECK: SU(13): %12:rgpr = t2MLA %11:rgpr, %11:rgpr, %11:rgpr, 14, %noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(14): %13:rgpr, %14:rgpr = t2UMULL %12, %12, 14, %noreg +# CHECK: SU(14): %13:rgpr, %14:rgpr = t2UMULL %12:rgpr, %12:rgpr, 14, %noreg # CHECK_A9: Latency : 3 # CHECK_SWIFT: Latency : 5 # CHECK_R52: Latency : 4 # -# CHECK: SU(18): %19:rgpr, %20:rgpr = t2UMLAL %12, %12, %19, %20, 14, %noreg +# CHECK: SU(18): %19:rgpr, %20:rgpr = t2UMLAL %12:rgpr, %12:rgpr, %19:rgpr, %20:rgpr, 14, %noreg # CHECK_A9: Latency : 3 # CHECK_SWIFT: Latency : 7 # CHECK_R52: Latency : 4 diff --git a/llvm/test/CodeGen/ARM/misched-int-basic.mir b/llvm/test/CodeGen/ARM/misched-int-basic.mir index b77fd0f93b22..0428ea99c803 100644 --- a/llvm/test/CodeGen/ARM/misched-int-basic.mir +++ b/llvm/test/CodeGen/ARM/misched-int-basic.mir @@ -28,37 +28,37 @@ } # CHECK: ********** MI Scheduling ********** -# CHECK: SU(2): %2:gpr = SMULBB %1, %1, 14, %noreg +# CHECK: SU(2): %2:gpr = SMULBB %1:gpr, %1:gpr, 14, %noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(3): %3:gprnopc = SMLABB %0, %0, %2, 14, %noreg +# CHECK: SU(3): %3:gprnopc = SMLABB %0:gprnopc, %0:gprnopc, %2:gpr, 14, %noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(4): %4:gprnopc = UXTH %3, 0, 14, %noreg +# CHECK: SU(4): %4:gprnopc = UXTH %3:gprnopc, 0, 14, %noreg # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 1 # CHECK_R52: Latency : 3 # -# CHECK: SU(5): %5:gprnopc = MUL %4, %4, 14, %noreg, %noreg +# CHECK: SU(5): %5:gprnopc = MUL %4:gprnopc, %4:gprnopc, 14, %noreg, %noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(6): %6:gprnopc = MLA %5, %5, %5, 14, %noreg, %noreg +# CHECK: SU(6): %6:gprnopc = MLA %5:gprnopc, %5:gprnopc, %5:gprnopc, 14, %noreg, %noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(7): %7:gprnopc, %8:gprnopc = UMULL %6, %6, 14, %noreg, %noreg +# CHECK: SU(7): %7:gprnopc, %8:gprnopc = UMULL %6:gprnopc, %6:gprnopc, 14, %noreg, %noreg # CHECK_A9: Latency : 3 # CHECK_SWIFT: Latency : 5 # CHECK_R52: Latency : 4 # -# CHECK: SU(11): %13:gpr, %14:gprnopc = UMLAL %6, %6, %13, %14, 14, %noreg, %noreg +# CHECK: SU(11): %13:gpr, %14:gprnopc = UMLAL %6:gprnopc, %6:gprnopc, %13:gpr, %14:gprnopc, 14, %noreg, %noreg # CHECK_SWIFT: Latency : 7 # CHECK_A9: Latency : 3 # CHECK_R52: Latency : 4 diff --git a/llvm/test/CodeGen/ARM/single-issue-r52.mir b/llvm/test/CodeGen/ARM/single-issue-r52.mir index ce5eb32e8d5a..22751592ff7e 100644 --- a/llvm/test/CodeGen/ARM/single-issue-r52.mir +++ b/llvm/test/CodeGen/ARM/single-issue-r52.mir @@ -20,13 +20,13 @@ # CHECK: ********** MI Scheduling ********** # CHECK: ScheduleDAGMILive::schedule starting -# CHECK: SU(1): %1:qqpr = VLD4d8Pseudo %0, 8, 14, %noreg; mem:LD32[%A](align=8) +# CHECK: SU(1): %1:qqpr = VLD4d8Pseudo %0:gpr, 8, 14, %noreg; mem:LD32[%A](align=8) # CHECK: Latency : 8 # CHECK: Single Issue : true; -# CHECK: SU(2): %4:dpr = VADDv8i8 %1.dsub_0, %1.dsub_1, 14, %noreg +# CHECK: SU(2): %4:dpr = VADDv8i8 %1.dsub_0:qqpr, %1.dsub_1:qqpr, 14, %noreg # CHECK: Latency : 5 # CHECK: Single Issue : false; -# CHECK: SU(3): %5:gpr, %6:gpr = VMOVRRD %4, 14, %noreg +# CHECK: SU(3): %5:gpr, %6:gpr = VMOVRRD %4:dpr, 14, %noreg # CHECK: Latency : 4 # CHECK: Single Issue : false; diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll b/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll index f4d359a20651..959d45a01129 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll +++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll @@ -8,7 +8,7 @@ ; the fallback path. ; Check that we fallback on invoke translation failures. -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1(s80), %0(p0); mem:ST10[%ptr](align=16) (in function: test_x86_fp80_dump) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1:_(s80), %0:_(p0); mem:ST10[%ptr](align=16) (in function: test_x86_fp80_dump) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_x86_fp80_dump ; FALLBACK-WITH-REPORT-OUT-LABEL: test_x86_fp80_dump: define void @test_x86_fp80_dump(x86_fp80* %ptr){ diff --git a/llvm/test/CodeGen/X86/misched-copy.ll b/llvm/test/CodeGen/X86/misched-copy.ll index 690e1de8f508..9456bf6cfac6 100644 --- a/llvm/test/CodeGen/X86/misched-copy.ll +++ b/llvm/test/CodeGen/X86/misched-copy.ll @@ -10,7 +10,7 @@ ; ; CHECK: *** Final schedule for %bb.1 *** ; CHECK: %eax = COPY -; CHECK-NEXT: MUL32r %{{[0-9]+}}, implicit-def %eax, implicit-def %edx, implicit-def dead %eflags, implicit %eax +; CHECK-NEXT: MUL32r %{{[0-9]+}}:gr32, implicit-def %eax, implicit-def %edx, implicit-def dead %eflags, implicit %eax ; CHECK-NEXT: COPY %e{{[ad]}}x ; CHECK-NEXT: COPY %e{{[ad]}}x ; CHECK: DIVSSrm diff --git a/llvm/unittests/CodeGen/MachineOperandTest.cpp b/llvm/unittests/CodeGen/MachineOperandTest.cpp index 6861dbfe383d..8f35406f1600 100644 --- a/llvm/unittests/CodeGen/MachineOperandTest.cpp +++ b/llvm/unittests/CodeGen/MachineOperandTest.cpp @@ -118,7 +118,6 @@ TEST(MachineOperandTest, PrintSubRegIndex) { // TRI and IntrinsicInfo we can print the operand as a subreg index. std::string str; raw_string_ostream OS(str); - ModuleSlotTracker DummyMST(nullptr); MachineOperand::printSubRegIdx(OS, MO.getImm(), nullptr); ASSERT_TRUE(OS.str() == "%subreg.3"); } @@ -296,7 +295,7 @@ TEST(MachineOperandTest, PrintMetadata) { LLVMContext Ctx; Module M("MachineOperandMDNodeTest", Ctx); NamedMDNode *MD = M.getOrInsertNamedMetadata("namedmd"); - ModuleSlotTracker DummyMST(&M); + ModuleSlotTracker MST(&M); Metadata *MDS = MDString::get(Ctx, "foo"); MDNode *Node = MDNode::get(Ctx, MDS); MD->addOperand(Node); @@ -312,7 +311,8 @@ TEST(MachineOperandTest, PrintMetadata) { std::string str; // Print a MachineOperand containing a metadata node. raw_string_ostream OS(str); - MO.print(OS, DummyMST, LLT{}, false, false, 0, /*TRI=*/nullptr, + MO.print(OS, MST, LLT{}, /*PrintDef=*/false, /*IsVerbose=*/false, + /*ShouldPrintRegisterTies=*/false, 0, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr); ASSERT_TRUE(OS.str() == "!0"); }