Fix const-correctness issues with the SrcValue handling in the

memory intrinsic expansion code.

llvm-svn: 49666
This commit is contained in:
Dan Gohman 2008-04-14 17:55:48 +00:00
parent d80edddccd
commit 2505d86783
7 changed files with 28 additions and 28 deletions

View File

@ -326,17 +326,17 @@ public:
SDOperand getMemcpy(SDOperand Chain, SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
bool AlwaysInline,
Value *DstSV, uint64_t DstOff,
Value *SrcSV, uint64_t SrcOff);
const Value *DstSV, uint64_t DstOff,
const Value *SrcSV, uint64_t SrcOff);
SDOperand getMemmove(SDOperand Chain, SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
Value *DstSV, uint64_t DstOff,
Value *SrcSV, uint64_t SrcOff);
const Value *DstSV, uint64_t DstOff,
const Value *SrcSV, uint64_t SrcOff);
SDOperand getMemset(SDOperand Chain, SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
Value *DstSV, uint64_t DstOff);
const Value *DstSV, uint64_t DstOff);
/// getSetCC - Helper function to make it easier to build SetCC's if you just
/// have an ISD::CondCode instead of an SDOperand.

View File

@ -967,8 +967,8 @@ public:
SDOperand Op1, SDOperand Op2,
SDOperand Op3, unsigned Align,
bool AlwaysInline,
Value *DstSV, uint64_t DstOff,
Value *SrcSV, uint64_t SrcOff) {
const Value *DstSV, uint64_t DstOff,
const Value *SrcSV, uint64_t SrcOff) {
return SDOperand();
}
@ -983,8 +983,8 @@ public:
SDOperand Chain,
SDOperand Op1, SDOperand Op2,
SDOperand Op3, unsigned Align,
Value *DstSV, uint64_t DstOff,
Value *SrcSV, uint64_t SrcOff) {
const Value *DstSV, uint64_t DstOff,
const Value *SrcSV, uint64_t SrcOff) {
return SDOperand();
}
@ -999,7 +999,7 @@ public:
SDOperand Chain,
SDOperand Op1, SDOperand Op2,
SDOperand Op3, unsigned Align,
Value *DstSV, uint64_t DstOff) {
const Value *DstSV, uint64_t DstOff) {
return SDOperand();
}

View File

@ -2500,8 +2500,8 @@ static SDOperand getMemcpyLoadsAndStores(SelectionDAG &DAG,
SDOperand Src, uint64_t Size,
unsigned Align,
bool AlwaysInline,
Value *DstSV, uint64_t DstOff,
Value *SrcSV, uint64_t SrcOff) {
const Value *DstSV, uint64_t DstOff,
const Value *SrcSV, uint64_t SrcOff) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// Expand memcpy to a series of store ops if the size operand falls below
@ -2573,7 +2573,7 @@ static SDOperand getMemsetStores(SelectionDAG &DAG,
SDOperand Chain, SDOperand Dst,
SDOperand Src, uint64_t Size,
unsigned Align,
Value *DstSV, uint64_t DstOff) {
const Value *DstSV, uint64_t DstOff) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// Expand memset to a series of load/store ops if the size operand
@ -2604,8 +2604,8 @@ static SDOperand getMemsetStores(SelectionDAG &DAG,
SDOperand SelectionDAG::getMemcpy(SDOperand Chain, SDOperand Dst,
SDOperand Src, SDOperand Size,
unsigned Align, bool AlwaysInline,
Value *DstSV, uint64_t DstOff,
Value *SrcSV, uint64_t SrcOff) {
const Value *DstSV, uint64_t DstOff,
const Value *SrcSV, uint64_t SrcOff) {
// Check to see if we should lower the memcpy to loads and stores first.
// For cases within the target-specified limits, this is the best choice.
@ -2658,8 +2658,8 @@ SDOperand SelectionDAG::getMemcpy(SDOperand Chain, SDOperand Dst,
SDOperand SelectionDAG::getMemmove(SDOperand Chain, SDOperand Dst,
SDOperand Src, SDOperand Size,
unsigned Align,
Value *DstSV, uint64_t DstOff,
Value *SrcSV, uint64_t SrcOff) {
const Value *DstSV, uint64_t DstOff,
const Value *SrcSV, uint64_t SrcOff) {
// TODO: Optimize small memmove cases with simple loads and stores,
// ensuring that all loads precede all stores. This can cause severe
@ -2691,7 +2691,7 @@ SDOperand SelectionDAG::getMemmove(SDOperand Chain, SDOperand Dst,
SDOperand SelectionDAG::getMemset(SDOperand Chain, SDOperand Dst,
SDOperand Src, SDOperand Size,
unsigned Align,
Value *DstSV, uint64_t DstOff) {
const Value *DstSV, uint64_t DstOff) {
// Check to see if we should lower the memset to stores first.
// For cases within the target-specified limits, this is the best choice.

View File

@ -1247,8 +1247,8 @@ ARMTargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG,
SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
bool AlwaysInline,
Value *DstSV, uint64_t DstOff,
Value *SrcSV, uint64_t SrcOff){
const Value *DstSV, uint64_t DstOff,
const Value *SrcSV, uint64_t SrcOff){
// Do repeated 4-byte loads and stores. To be improved.
// This requires 4-byte alignment.
if ((Align & 3) != 0)

View File

@ -149,8 +149,8 @@ namespace llvm {
SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
bool AlwaysInline,
Value *DstSV, uint64_t DstOff,
Value *SrcSV, uint64_t SrcOff);
const Value *DstSV, uint64_t DstOff,
const Value *SrcSV, uint64_t SrcOff);
};
}

View File

@ -4664,7 +4664,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG,
SDOperand Chain,
SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
Value *DstSV, uint64_t DstOff) {
const Value *DstSV, uint64_t DstOff) {
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
/// If not DWORD aligned or size is more than the threshold, call the library.
@ -4804,8 +4804,8 @@ X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG,
SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
bool AlwaysInline,
Value *DstSV, uint64_t DstOff,
Value *SrcSV, uint64_t SrcOff){
const Value *DstSV, uint64_t DstOff,
const Value *SrcSV, uint64_t SrcOff){
// This requires the copy size to be a constant, preferrably
// within a subtarget-specific limit.

View File

@ -550,14 +550,14 @@ namespace llvm {
SDOperand Chain,
SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
Value *DstSV, uint64_t DstOff);
const Value *DstSV, uint64_t DstOff);
SDOperand EmitTargetCodeForMemcpy(SelectionDAG &DAG,
SDOperand Chain,
SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
bool AlwaysInline,
Value *DstSV, uint64_t DstOff,
Value *SrcSV, uint64_t SrcOff);
const Value *DstSV, uint64_t DstOff,
const Value *SrcSV, uint64_t SrcOff);
};
}