forked from mindspore-Ecosystem/mindspore
!377 Tensor end() iterator performance problem
Merge pull request !377 from h.farahat/end_iterator
This commit is contained in:
commit
7874e96b9b
|
@ -85,6 +85,7 @@ Tensor &Tensor::operator=(Tensor &&other) noexcept {
|
||||||
shape_ = other.shape();
|
shape_ = other.shape();
|
||||||
type_ = other.type();
|
type_ = other.type();
|
||||||
data_ = other.StartAddr();
|
data_ = other.StartAddr();
|
||||||
|
data_end_ = other.data_end_;
|
||||||
data_allocator_ = std::move(other.data_allocator_);
|
data_allocator_ = std::move(other.data_allocator_);
|
||||||
other.Invalidate();
|
other.Invalidate();
|
||||||
}
|
}
|
||||||
|
@ -208,11 +209,13 @@ Tensor::~Tensor() {
|
||||||
if (data_allocator_ != nullptr) {
|
if (data_allocator_ != nullptr) {
|
||||||
data_allocator_->deallocate(data_);
|
data_allocator_->deallocate(data_);
|
||||||
data_ = nullptr;
|
data_ = nullptr;
|
||||||
|
data_end_ = nullptr;
|
||||||
} else {
|
} else {
|
||||||
// If we didn't have an allocator, but data_ is not null then it must
|
// If we didn't have an allocator, but data_ is not null then it must
|
||||||
// be a stand-alone tensor that used malloc directly.
|
// be a stand-alone tensor that used malloc directly.
|
||||||
free(data_);
|
free(data_);
|
||||||
data_ = nullptr;
|
data_ = nullptr;
|
||||||
|
data_end_ = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -338,8 +341,10 @@ unsigned char *Tensor::StartAddr() {
|
||||||
// on the shape and type and allocate it.
|
// on the shape and type and allocate it.
|
||||||
if (data_allocator_ != nullptr) {
|
if (data_allocator_ != nullptr) {
|
||||||
data_ = data_allocator_->allocate(this->SizeInBytes());
|
data_ = data_allocator_->allocate(this->SizeInBytes());
|
||||||
|
data_end_ = data_ + SizeInBytes();
|
||||||
} else {
|
} else {
|
||||||
data_ = static_cast<unsigned char *>(malloc(this->SizeInBytes()));
|
data_ = static_cast<unsigned char *>(malloc(this->SizeInBytes()));
|
||||||
|
data_end_ = data_ + SizeInBytes();
|
||||||
if (data_ == nullptr) {
|
if (data_ == nullptr) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
@ -362,6 +367,7 @@ void Tensor::Invalidate() {
|
||||||
shape_ = TensorShape::CreateUnknownRankShape();
|
shape_ = TensorShape::CreateUnknownRankShape();
|
||||||
type_ = DataType(DataType::DE_UNKNOWN);
|
type_ = DataType(DataType::DE_UNKNOWN);
|
||||||
data_ = nullptr;
|
data_ = nullptr;
|
||||||
|
data_end_ = nullptr;
|
||||||
data_allocator_ = nullptr;
|
data_allocator_ = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -363,7 +363,7 @@ class Tensor {
|
||||||
// @return TensorIterator
|
// @return TensorIterator
|
||||||
template <typename T>
|
template <typename T>
|
||||||
TensorIterator<T> end() {
|
TensorIterator<T> end() {
|
||||||
return TensorIterator<T>(data_ + SizeInBytes());
|
return TensorIterator<T>(data_end_);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -402,6 +402,8 @@ class Tensor {
|
||||||
unsigned char *data_;
|
unsigned char *data_;
|
||||||
// An allocator for data_
|
// An allocator for data_
|
||||||
CharAllocPtr data_allocator_;
|
CharAllocPtr data_allocator_;
|
||||||
|
// pointer to the end of the physical data
|
||||||
|
unsigned char *data_end_ = nullptr;
|
||||||
};
|
};
|
||||||
} // namespace dataset
|
} // namespace dataset
|
||||||
} // namespace mindspore
|
} // namespace mindspore
|
||||||
|
|
Loading…
Reference in New Issue