From 3fd39d3694d32efa44242c099e923a7f4d982095 Mon Sep 17 00:00:00 2001 From: Paul Robinson Date: Mon, 30 Nov 2020 11:42:08 -0800 Subject: [PATCH] [FastISel] NFC: Clean up unnecessary bookkeeping Now that we flush the local value map for every instruction, we don't need any extra flushes for specific cases. Also, LastFlushPoint is not used for anything. Follow-ups to #dc35368 (D91734). Differential Revision: https://reviews.llvm.org/D92338 --- llvm/include/llvm/CodeGen/FastISel.h | 4 --- llvm/lib/CodeGen/SelectionDAG/FastISel.cpp | 31 ---------------------- 2 files changed, 35 deletions(-) diff --git a/llvm/include/llvm/CodeGen/FastISel.h b/llvm/include/llvm/CodeGen/FastISel.h index 96f809e11b37..a086d057384a 100644 --- a/llvm/include/llvm/CodeGen/FastISel.h +++ b/llvm/include/llvm/CodeGen/FastISel.h @@ -224,10 +224,6 @@ protected: /// makes sense (for example, on function calls) MachineInstr *EmitStartPt; - /// Last local value flush point. On a subsequent flush, no local value will - /// sink past this point. - MachineBasicBlock::iterator LastFlushPoint; - public: virtual ~FastISel(); diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp index 7615861149c6..5ef91f72dee7 100644 --- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -215,7 +215,6 @@ void FastISel::flushLocalValueMap() { LastLocalValue = EmitStartPt; recomputeInsertPt(); SavedInsertPt = FuncInfo.InsertPt; - LastFlushPoint = FuncInfo.InsertPt; } bool FastISel::hasTrivialKill(const Value *V) { @@ -437,8 +436,6 @@ void FastISel::removeDeadCode(MachineBasicBlock::iterator I, assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 && "Invalid iterator!"); while (I != E) { - if (LastFlushPoint == I) - LastFlushPoint = E; if (SavedInsertPt == I) SavedInsertPt = E; if (EmitStartPt == I) @@ -1189,11 +1186,6 @@ bool FastISel::selectCall(const User *I) { // Handle simple inline asms. if (const InlineAsm *IA = dyn_cast(Call->getCalledOperand())) { - // If the inline asm has side effects, then make sure that no local value - // lives across by flushing the local value map. - if (IA->hasSideEffects()) - flushLocalValueMap(); - // Don't attempt to handle constraints. if (!IA->getConstraintString().empty()) return false; @@ -1223,15 +1215,6 @@ bool FastISel::selectCall(const User *I) { if (const auto *II = dyn_cast(Call)) return selectIntrinsicCall(II); - // Usually, it does not make sense to initialize a value, - // make an unrelated function call and use the value, because - // it tends to be spilled on the stack. So, we move the pointer - // to the last local value to the beginning of the block, so that - // all the values which have already been materialized, - // appear after the call. It also makes sense to skip intrinsics - // since they tend to be inlined. - flushLocalValueMap(); - return lowerCall(Call); } @@ -1388,20 +1371,6 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) { return selectXRayCustomEvent(II); case Intrinsic::xray_typedevent: return selectXRayTypedEvent(II); - - case Intrinsic::memcpy: - case Intrinsic::memcpy_element_unordered_atomic: - case Intrinsic::memcpy_inline: - case Intrinsic::memmove: - case Intrinsic::memmove_element_unordered_atomic: - case Intrinsic::memset: - case Intrinsic::memset_element_unordered_atomic: - // Flush the local value map just like we do for regular calls, - // to avoid excessive spills and reloads. - // These intrinsics mostly turn into library calls at O0; and - // even memcpy_inline should be treated like one for this purpose. - flushLocalValueMap(); - break; } return fastLowerIntrinsicCall(II);