forked from OSchip/llvm-project
[PBQP] Cautiously update edge costs in the solver
The NodeMetadata are maintained in an incremental way. When an edge between 2 nodes has its cost updated, in the course of graph reduction for example, the NodeMetadata need first to have the old edge cost removed, then the new edge cost added. Only once the NodeMetadata have been fully updated, it becomes safe to consider promoting the nodes to the ConservativelyAllocatable or OptimallyReducible sets. Previously, this promotion was occuring right after the removing the old cost, and this was breaking the assumption that a ConservativelyAllocatable should not be spilled. This patch also adds asserts to: - enforces the invariant that a node's reduction can not be downgraded, - only not provably allocatable or optimally reducible nodes can be spilled. llvm-svn: 228816
This commit is contained in:
parent
9fd8cdc009
commit
de79026d5e
|
@ -507,14 +507,14 @@ namespace PBQP {
|
|||
return getNode(NId).getAdjEdgeIds().size();
|
||||
}
|
||||
|
||||
/// @brief Set an edge's cost matrix.
|
||||
/// @brief Update an edge's cost matrix.
|
||||
/// @param EId Edge id.
|
||||
/// @param Costs New cost matrix.
|
||||
template <typename OtherMatrixT>
|
||||
void setEdgeCosts(EdgeId EId, OtherMatrixT Costs) {
|
||||
void updateEdgeCosts(EdgeId EId, OtherMatrixT Costs) {
|
||||
MatrixPtr AllocatedCosts = CostAlloc.getMatrix(std::move(Costs));
|
||||
if (Solver)
|
||||
Solver->handleSetEdgeCosts(EId, *AllocatedCosts);
|
||||
Solver->handleUpdateCosts(EId, *AllocatedCosts);
|
||||
getEdge(EId).Costs = AllocatedCosts;
|
||||
}
|
||||
|
||||
|
|
|
@ -132,9 +132,9 @@ namespace PBQP {
|
|||
} else {
|
||||
const Matrix &YZECosts = G.getEdgeCosts(YZEId);
|
||||
if (YNId == G.getEdgeNode1Id(YZEId)) {
|
||||
G.setEdgeCosts(YZEId, Delta + YZECosts);
|
||||
G.updateEdgeCosts(YZEId, Delta + YZECosts);
|
||||
} else {
|
||||
G.setEdgeCosts(YZEId, Delta.transpose() + YZECosts);
|
||||
G.updateEdgeCosts(YZEId, Delta.transpose() + YZECosts);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -180,10 +180,15 @@ class NodeMetadata {
|
|||
public:
|
||||
typedef RegAlloc::AllowedRegVector AllowedRegVector;
|
||||
|
||||
typedef enum { Unprocessed,
|
||||
OptimallyReducible,
|
||||
ConservativelyAllocatable,
|
||||
NotProvablyAllocatable } ReductionState;
|
||||
// The node's reduction state. The order in this enum is important,
|
||||
// as it is assumed nodes can only progress up (i.e. towards being
|
||||
// optimally reducible) when reducing the graph.
|
||||
typedef enum {
|
||||
Unprocessed,
|
||||
NotProvablyAllocatable,
|
||||
ConservativelyAllocatable,
|
||||
OptimallyReducible
|
||||
} ReductionState;
|
||||
|
||||
NodeMetadata()
|
||||
: RS(Unprocessed), NumOpts(0), DeniedOpts(0), OptUnsafeEdges(nullptr),
|
||||
|
@ -248,7 +253,13 @@ public:
|
|||
}
|
||||
|
||||
ReductionState getReductionState() const { return RS; }
|
||||
void setReductionState(ReductionState RS) { this->RS = RS; }
|
||||
void setReductionState(ReductionState RS) {
|
||||
assert(RS >= this->RS && "A node's reduction state can not be downgraded");
|
||||
this->RS = RS;
|
||||
}
|
||||
bool isSpillable() const {
|
||||
return RS == NotProvablyAllocatable || RS == OptimallyReducible;
|
||||
}
|
||||
|
||||
void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
|
||||
DeniedOpts += Transpose ? MD.getWorstRow() : MD.getWorstCol();
|
||||
|
@ -333,6 +344,42 @@ public:
|
|||
NodeMetadata& NMd = G.getNodeMetadata(NId);
|
||||
const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
|
||||
NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
|
||||
promote(NId, NMd);
|
||||
}
|
||||
|
||||
void handleReconnectEdge(EdgeId EId, NodeId NId) {
|
||||
NodeMetadata& NMd = G.getNodeMetadata(NId);
|
||||
const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
|
||||
NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
|
||||
}
|
||||
|
||||
void handleUpdateCosts(EdgeId EId, const Matrix& NewCosts) {
|
||||
NodeId N1Id = G.getEdgeNode1Id(EId);
|
||||
NodeId N2Id = G.getEdgeNode2Id(EId);
|
||||
NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
|
||||
NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
|
||||
bool Transpose = N1Id != G.getEdgeNode1Id(EId);
|
||||
|
||||
// Metadata are computed incrementally. First, update them
|
||||
// by removing the old cost.
|
||||
const MatrixMetadata& OldMMd = G.getEdgeCosts(EId).getMetadata();
|
||||
N1Md.handleRemoveEdge(OldMMd, Transpose);
|
||||
N2Md.handleRemoveEdge(OldMMd, !Transpose);
|
||||
|
||||
// And update now the metadata with the new cost.
|
||||
const MatrixMetadata& MMd = NewCosts.getMetadata();
|
||||
N1Md.handleAddEdge(MMd, Transpose);
|
||||
N2Md.handleAddEdge(MMd, !Transpose);
|
||||
|
||||
// As the metadata may have changed with the update, the nodes may have
|
||||
// become ConservativelyAllocatable or OptimallyReducible.
|
||||
promote(N1Id, N1Md);
|
||||
promote(N2Id, N2Md);
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
void promote(NodeId NId, NodeMetadata& NMd) {
|
||||
if (G.getNodeDegree(NId) == 3) {
|
||||
// This node is becoming optimally reducible.
|
||||
moveToOptimallyReducibleNodes(NId);
|
||||
|
@ -344,26 +391,6 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
void handleReconnectEdge(EdgeId EId, NodeId NId) {
|
||||
NodeMetadata& NMd = G.getNodeMetadata(NId);
|
||||
const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
|
||||
NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
|
||||
}
|
||||
|
||||
void handleSetEdgeCosts(EdgeId EId, const Matrix& NewCosts) {
|
||||
handleRemoveEdge(EId);
|
||||
|
||||
NodeId N1Id = G.getEdgeNode1Id(EId);
|
||||
NodeId N2Id = G.getEdgeNode2Id(EId);
|
||||
NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
|
||||
NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
|
||||
const MatrixMetadata& MMd = NewCosts.getMetadata();
|
||||
N1Md.handleAddEdge(MMd, N1Id != G.getEdgeNode1Id(EId));
|
||||
N2Md.handleAddEdge(MMd, N2Id != G.getEdgeNode1Id(EId));
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
void removeFromCurrentSet(NodeId NId) {
|
||||
switch (G.getNodeMetadata(NId).getReductionState()) {
|
||||
case NodeMetadata::Unprocessed: break;
|
||||
|
|
|
@ -401,7 +401,7 @@ public:
|
|||
}
|
||||
PBQPRAGraph::RawMatrix Costs(G.getEdgeCosts(EId));
|
||||
addVirtRegCoalesce(Costs, *Allowed1, *Allowed2, CBenefit);
|
||||
G.setEdgeCosts(EId, std::move(Costs));
|
||||
G.updateEdgeCosts(EId, std::move(Costs));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -621,6 +621,8 @@ bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAGraph &G,
|
|||
assert(PReg != 0 && "Invalid preg selected.");
|
||||
VRM.assignVirt2Phys(VReg, PReg);
|
||||
} else {
|
||||
assert(G.getNodeMetadata(NId).isSpillable() &&
|
||||
"Spilling a node which can not be spilled.");
|
||||
// Spill VReg. If this introduces new intervals we'll need another round
|
||||
// of allocation.
|
||||
SmallVector<unsigned, 8> NewVRegs;
|
||||
|
|
|
@ -235,7 +235,7 @@ bool A57ChainingConstraint::addIntraChainConstraint(PBQPRAGraph &G, unsigned Rd,
|
|||
costs[i + 1][j + 1] = sameParityMax + 1.0;
|
||||
}
|
||||
}
|
||||
G.setEdgeCosts(edge, std::move(costs));
|
||||
G.updateEdgeCosts(edge, std::move(costs));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -312,7 +312,7 @@ void A57ChainingConstraint::addInterChainConstraint(PBQPRAGraph &G, unsigned Rd,
|
|||
costs[i + 1][j + 1] = sameParityMax + 1.0;
|
||||
}
|
||||
}
|
||||
G.setEdgeCosts(edge, std::move(costs));
|
||||
G.updateEdgeCosts(edge, std::move(costs));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue