Fix several const-correctness issues, resolving some -Wcast-qual warnings.

llvm-svn: 54349
This commit is contained in:
Dan Gohman 2008-08-05 14:45:15 +00:00
parent 0e5546fa61
commit e955c481fd
4 changed files with 17 additions and 16 deletions

View File

@ -88,7 +88,7 @@ public:
void Schedule();
/// IsReachable - Checks if SU is reachable from TargetSU.
bool IsReachable(SUnit *SU, SUnit *TargetSU);
bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
/// willCreateCycle - Returns true if adding an edge from SU to TargetSU will
/// create a cycle.
@ -155,7 +155,7 @@ private:
/// DFS - make a DFS traversal and mark all nodes affected by the
/// edge insertion. These nodes will later get new topological indexes
/// by means of the Shift method.
void DFS(SUnit *SU, int UpperBound, bool& HasLoop);
void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
/// Shift - reassign topological indexes for the nodes in the DAG
/// to preserve the topological ordering.
@ -395,7 +395,7 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
}
/// IsReachable - Checks if SU is reachable from TargetSU.
bool ScheduleDAGRRList::IsReachable(SUnit *SU, SUnit *TargetSU) {
bool ScheduleDAGRRList::IsReachable(const SUnit *SU, const SUnit *TargetSU) {
// If insertion of the edge SU->TargetSU would create a cycle
// then there is a path from TargetSU to SU.
int UpperBound, LowerBound;
@ -543,8 +543,8 @@ bool ScheduleDAGRRList::RemovePred(SUnit *M, SUnit *N,
/// DFS - Make a DFS traversal to mark all nodes reachable from SU and mark
/// all nodes affected by the edge insertion. These nodes will later get new
/// topological indexes by means of the Shift method.
void ScheduleDAGRRList::DFS(SUnit *SU, int UpperBound, bool& HasLoop) {
std::vector<SUnit*> WorkList;
void ScheduleDAGRRList::DFS(const SUnit *SU, int UpperBound, bool& HasLoop) {
std::vector<const SUnit*> WorkList;
WorkList.reserve(SUnits.size());
WorkList.push_back(SU);
@ -1403,7 +1403,7 @@ namespace {
class VISIBILITY_HIDDEN BURegReductionPriorityQueue
: public RegReductionPriorityQueue<bu_ls_rr_sort> {
// SUnits - The SUnits for the current graph.
const std::vector<SUnit> *SUnits;
std::vector<SUnit> *SUnits;
// SethiUllmanNumbers - The SethiUllman number for each node.
std::vector<unsigned> SethiUllmanNumbers;
@ -1692,11 +1692,11 @@ BURegReductionPriorityQueue::canClobber(const SUnit *SU, const SUnit *Op) {
/// hasCopyToRegUse - Return true if SU has a value successor that is a
/// CopyToReg node.
static bool hasCopyToRegUse(SUnit *SU) {
static bool hasCopyToRegUse(const SUnit *SU) {
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isCtrl) continue;
SUnit *SuccSU = I->Dep;
const SUnit *SuccSU = I->Dep;
if (SuccSU->Node && SuccSU->Node->getOpcode() == ISD::CopyToReg)
return true;
}
@ -1705,7 +1705,7 @@ static bool hasCopyToRegUse(SUnit *SU) {
/// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
/// physical register defs.
static bool canClobberPhysRegDefs(SUnit *SuccSU, SUnit *SU,
static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) {
SDNode *N = SuccSU->Node;
@ -1739,7 +1739,7 @@ static bool canClobberPhysRegDefs(SUnit *SuccSU, SUnit *SU,
/// commutable, favor the one that's not commutable.
void BURegReductionPriorityQueue::AddPseudoTwoAddrDeps() {
for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
SUnit *SU = (SUnit *)&((*SUnits)[i]);
SUnit *SU = &(*SUnits)[i];
if (!SU->isTwoAddress)
continue;
@ -1819,7 +1819,7 @@ static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
unsigned Sum = 0;
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
SUnit *SuccSU = I->Dep;
const SUnit *SuccSU = I->Dep;
for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
EE = SuccSU->Preds.end(); II != EE; ++II) {
SUnit *PredSU = II->Dep;

View File

@ -58,13 +58,13 @@ bool SmallPtrSetImpl::insert_imp(const void * Ptr) {
Grow();
// Okay, we know we have space. Find a hash bucket.
void **Bucket = const_cast<void**>(FindBucketFor((void*)Ptr));
const void **Bucket = const_cast<const void**>(FindBucketFor(Ptr));
if (*Bucket == Ptr) return false; // Already inserted, good.
// Otherwise, insert it!
if (*Bucket == getTombstoneMarker())
--NumTombstones;
*Bucket = (void*)Ptr;
*Bucket = Ptr;
++NumElements; // Track density.
return true;
}

View File

@ -1070,7 +1070,8 @@ private:
}
typename MapTy::iterator I =
Map.find(MapKey((TypeClass*)CP->getRawType(), getValType(CP)));
Map.find(MapKey(static_cast<const TypeClass*>(CP->getRawType()),
getValType(CP)));
if (I == Map.end() || I->second != CP) {
// FIXME: This should not use a linear scan. If this gets to be a
// performance problem, someone should look at this.

View File

@ -84,9 +84,9 @@ void Type::destroy() const {
// Now call the destructor for the subclass directly because we're going
// to delete this as an array of char.
if (isa<FunctionType>(this))
((FunctionType*)this)->FunctionType::~FunctionType();
static_cast<const FunctionType*>(this)->FunctionType::~FunctionType();
else
((StructType*)this)->StructType::~StructType();
static_cast<const StructType*>(this)->StructType::~StructType();
// Finally, remove the memory as an array deallocation of the chars it was
// constructed from.