waring_fix_1.6
This commit is contained in:
parent
181addec81
commit
1496a575c4
|
@ -414,9 +414,13 @@ std::shared_ptr<SchemaObj> SchemaCharIF(const std::vector<char> &schema_file) {
|
|||
|
||||
// Function to create a Batch dataset
|
||||
BatchDataset::BatchDataset(const std::shared_ptr<Dataset> &input, int32_t batch_size, bool drop_remainder) {
|
||||
// Default values
|
||||
auto ds = std::make_shared<BatchNode>(input->IRNode(), batch_size, drop_remainder);
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
if (input == nullptr) {
|
||||
ir_node_ = nullptr;
|
||||
} else {
|
||||
// Default values
|
||||
auto ds = std::make_shared<BatchNode>(input->IRNode(), batch_size, drop_remainder);
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef ENABLE_ANDROID
|
||||
|
@ -445,12 +449,15 @@ BucketBatchByLengthDataset::BucketBatchByLengthDataset(
|
|||
}
|
||||
map.insert({p.first, {TensorShape(p.second.first), rt}});
|
||||
}
|
||||
if (input == nullptr) {
|
||||
ir_node_ = nullptr;
|
||||
} else {
|
||||
auto ds = std::make_shared<BucketBatchByLengthNode>(
|
||||
input->IRNode(), VectorCharToString(column_names), bucket_boundaries, bucket_batch_sizes, c_func,
|
||||
PadInfoCharToString(map), pad_to_bucket_boundary, drop_remainder);
|
||||
|
||||
auto ds = std::make_shared<BucketBatchByLengthNode>(input->IRNode(), VectorCharToString(column_names),
|
||||
bucket_boundaries, bucket_batch_sizes, c_func,
|
||||
PadInfoCharToString(map), pad_to_bucket_boundary, drop_remainder);
|
||||
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
}
|
||||
}
|
||||
|
||||
ConcatDataset::ConcatDataset(const std::vector<std::shared_ptr<Dataset>> &datasets) {
|
||||
|
@ -472,9 +479,13 @@ FilterDataset::FilterDataset(const std::shared_ptr<Dataset> &input,
|
|||
if (predicate) {
|
||||
c_func = std::make_shared<CFuncOp>(std::bind(FuncPtrConverter, predicate, std::placeholders::_1));
|
||||
}
|
||||
auto ds = std::make_shared<FilterNode>(input->IRNode(), c_func, VectorCharToString(input_columns));
|
||||
if (input == nullptr) {
|
||||
ir_node_ = nullptr;
|
||||
} else {
|
||||
auto ds = std::make_shared<FilterNode>(input->IRNode(), c_func, VectorCharToString(input_columns));
|
||||
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -485,53 +496,81 @@ MapDataset::MapDataset(const std::shared_ptr<Dataset> &input,
|
|||
const std::vector<std::vector<char>> &project_columns,
|
||||
const std::shared_ptr<DatasetCache> &cache,
|
||||
const std::vector<std::shared_ptr<DSCallback>> &callbacks) {
|
||||
auto ds = std::make_shared<MapNode>(input->IRNode(), operations, VectorCharToString(input_columns),
|
||||
VectorCharToString(output_columns), VectorCharToString(project_columns), cache,
|
||||
callbacks);
|
||||
if (input == nullptr) {
|
||||
ir_node_ = nullptr;
|
||||
} else {
|
||||
auto ds = std::make_shared<MapNode>(input->IRNode(), operations, VectorCharToString(input_columns),
|
||||
VectorCharToString(output_columns), VectorCharToString(project_columns), cache,
|
||||
callbacks);
|
||||
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
}
|
||||
}
|
||||
|
||||
ProjectDataset::ProjectDataset(const std::shared_ptr<Dataset> &input, const std::vector<std::vector<char>> &columns) {
|
||||
auto ds = std::make_shared<ProjectNode>(input->IRNode(), VectorCharToString(columns));
|
||||
if (input == nullptr) {
|
||||
ir_node_ = nullptr;
|
||||
} else {
|
||||
auto ds = std::make_shared<ProjectNode>(input->IRNode(), VectorCharToString(columns));
|
||||
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef ENABLE_ANDROID
|
||||
RenameDataset::RenameDataset(const std::shared_ptr<Dataset> &input, const std::vector<std::vector<char>> &input_columns,
|
||||
const std::vector<std::vector<char>> &output_columns) {
|
||||
auto ds = std::make_shared<RenameNode>(input->IRNode(), VectorCharToString(input_columns),
|
||||
VectorCharToString(output_columns));
|
||||
if (input == nullptr) {
|
||||
ir_node_ = nullptr;
|
||||
} else {
|
||||
auto ds = std::make_shared<RenameNode>(input->IRNode(), VectorCharToString(input_columns),
|
||||
VectorCharToString(output_columns));
|
||||
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
RepeatDataset::RepeatDataset(const std::shared_ptr<Dataset> &input, int32_t count) {
|
||||
auto ds = std::make_shared<RepeatNode>(input->IRNode(), count);
|
||||
if (input == nullptr) {
|
||||
ir_node_ = nullptr;
|
||||
} else {
|
||||
auto ds = std::make_shared<RepeatNode>(input->IRNode(), count);
|
||||
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
}
|
||||
}
|
||||
|
||||
ShuffleDataset::ShuffleDataset(const std::shared_ptr<Dataset> &input, int32_t buffer_size) {
|
||||
// Pass in reshuffle_each_epoch with true
|
||||
auto ds = std::make_shared<ShuffleNode>(input->IRNode(), buffer_size, true);
|
||||
if (input == nullptr) {
|
||||
ir_node_ = nullptr;
|
||||
} else {
|
||||
// Pass in reshuffle_each_epoch with true
|
||||
auto ds = std::make_shared<ShuffleNode>(input->IRNode(), buffer_size, true);
|
||||
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef ENABLE_ANDROID
|
||||
SkipDataset::SkipDataset(const std::shared_ptr<Dataset> &input, int32_t count) {
|
||||
auto ds = std::make_shared<SkipNode>(input->IRNode(), count);
|
||||
if (input == nullptr) {
|
||||
ir_node_ = nullptr;
|
||||
} else {
|
||||
auto ds = std::make_shared<SkipNode>(input->IRNode(), count);
|
||||
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
}
|
||||
}
|
||||
|
||||
TakeDataset::TakeDataset(const std::shared_ptr<Dataset> &input, int32_t count) {
|
||||
auto ds = std::make_shared<TakeNode>(input->IRNode(), count);
|
||||
if (input == nullptr) {
|
||||
ir_node_ = nullptr;
|
||||
} else {
|
||||
auto ds = std::make_shared<TakeNode>(input->IRNode(), count);
|
||||
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
ir_node_ = std::static_pointer_cast<DatasetNode>(ds);
|
||||
}
|
||||
}
|
||||
|
||||
ZipDataset::ZipDataset(const std::vector<std::shared_ptr<Dataset>> &datasets) {
|
||||
|
|
|
@ -513,7 +513,7 @@ Status Tensor::GetItemPtr(T **ptr, const std::vector<dsize_t> &index) const {
|
|||
dsize_t flat_idx;
|
||||
RETURN_IF_NOT_OK(shape_.ToFlatIndex(index, &flat_idx));
|
||||
*ptr = reinterpret_cast<T *>(data_ + flat_idx * type_.SizeInBytes());
|
||||
RETURN_UNEXPECTED_IF_NULL(ptr);
|
||||
RETURN_UNEXPECTED_IF_NULL(*ptr);
|
||||
|
||||
return Status::OK();
|
||||
} else {
|
||||
|
@ -650,7 +650,6 @@ Status Tensor::GetBufferInfo(Tensor *t, py::buffer_info *out) {
|
|||
t->Rank(), /* Number of dimensions */
|
||||
t->shape().AsVector(), /* Buffer dimensions */
|
||||
t->Strides());
|
||||
RETURN_UNEXPECTED_IF_NULL(out);
|
||||
return Status::OK();
|
||||
}
|
||||
#endif
|
||||
|
@ -857,7 +856,6 @@ Status Tensor::GetDataAsNumpyStrings(py::array *data) {
|
|||
(void)std::transform(strides.begin(), strides.end(), strides.begin(),
|
||||
[&max_value](const auto &s) { return s * max_value; });
|
||||
*data = py::array(py::dtype("S" + std::to_string(max_value)), shape_.AsVector(), strides, tmp_data);
|
||||
RETURN_UNEXPECTED_IF_NULL(data);
|
||||
data_allocator_->deallocate(reinterpret_cast<uchar *>(tmp_data));
|
||||
return Status::OK();
|
||||
}
|
||||
|
|
|
@ -804,13 +804,13 @@ inline Status Tensor::CreateFromVector<std::string>(const std::vector<std::strin
|
|||
return (*out)->Reshape(shape);
|
||||
}
|
||||
}
|
||||
auto length_sum = [](dsize_t sum, const std::string &s) { return s.length() + sum; };
|
||||
auto length_sum = [](size_t sum, const std::string &s) { return s.length() + sum; };
|
||||
dsize_t total_length = std::accumulate(items.begin(), items.end(), 0, length_sum);
|
||||
|
||||
// total bytes needed = offset array + strings
|
||||
// offset array needs to store one offset var per element + 1 extra to get the length of the last string.
|
||||
// strings will be null-terminated --> need 1 extra byte per element
|
||||
dsize_t num_bytes = (kOffsetSize + 1) * (*out)->shape_.NumOfElements() + kOffsetSize + total_length;
|
||||
size_t num_bytes = (kOffsetSize + 1) * (*out)->shape_.NumOfElements() + kOffsetSize + total_length;
|
||||
|
||||
RETURN_IF_NOT_OK((*out)->AllocateBuffer(num_bytes));
|
||||
auto offset_arr = reinterpret_cast<offset_t *>((*out)->data_);
|
||||
|
|
|
@ -164,12 +164,12 @@ class TensorRow {
|
|||
|
||||
std::vector<std::string> getPath() const { return path_; }
|
||||
|
||||
void setPath(std::vector<std::string> path) { path_ = path; }
|
||||
void setPath(const std::vector<std::string> &path) { path_ = path; }
|
||||
|
||||
const vector_type &getRow() const { return row_; }
|
||||
|
||||
int64_t SizeInBytes() const {
|
||||
size_t sz = 0;
|
||||
dsize_t SizeInBytes() const {
|
||||
dsize_t sz = 0;
|
||||
for (auto &it : row_) {
|
||||
sz += it->SizeInBytes();
|
||||
}
|
||||
|
@ -189,9 +189,11 @@ class TensorRow {
|
|||
|
||||
void resize(size_type size) { row_.resize(size); }
|
||||
|
||||
bool empty() { return row_.empty(); }
|
||||
const bool empty() { return row_.empty(); }
|
||||
|
||||
void insert(iterator position, iterator first, iterator last) { row_.insert(position, first, last); }
|
||||
void insert(const_iterator position, const_iterator first, const_iterator last) {
|
||||
row_.insert(position, first, last);
|
||||
}
|
||||
|
||||
// Wrapper functions to support vector element access
|
||||
reference at(size_type index) { return row_.at(index); }
|
||||
|
@ -230,7 +232,7 @@ class TensorRow {
|
|||
|
||||
bool skip() const { return (static_cast<uint32_t>(tensor_row_flag_) & static_cast<uint32_t>(kFlagSkip)); }
|
||||
|
||||
TensorRowFlags Flags() { return tensor_row_flag_; }
|
||||
const TensorRowFlags Flags() { return tensor_row_flag_; }
|
||||
|
||||
explicit TensorRow(TensorRowFlags);
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace dataset {
|
||||
inline dataset::DataType MSTypeToDEType(TypeId data_type) {
|
||||
inline dataset::DataType MSTypeToDEType(const TypeId data_type) {
|
||||
switch (data_type) {
|
||||
case kNumberTypeBool:
|
||||
return dataset::DataType(dataset::DataType::DE_BOOL);
|
||||
|
|
|
@ -137,7 +137,7 @@ class Connector {
|
|||
// Resets the internal index tracking of the queue so that it can be used again with new inputs,
|
||||
// starting from the beginning.
|
||||
void Reset() {
|
||||
for (int i = 0; i < queues_.size(); ++i) {
|
||||
for (size_t i = 0; i < queues_.size(); ++i) {
|
||||
queues_[i]->Reset();
|
||||
}
|
||||
expect_consumer_ = 0;
|
||||
|
@ -158,16 +158,16 @@ class Connector {
|
|||
}
|
||||
|
||||
// Get current size of connector.
|
||||
int32_t size() const {
|
||||
int32_t size = 0;
|
||||
size_t size() const {
|
||||
size_t size = 0;
|
||||
for (size_t i = 0; i < queues_.size(); ++i) {
|
||||
size += queues_[i]->size();
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
int32_t capacity() const {
|
||||
int32_t capacity = 0;
|
||||
size_t capacity() const {
|
||||
size_t capacity = 0;
|
||||
for (size_t i = 0; i < queues_.size(); ++i) {
|
||||
capacity += queues_[i]->capacity();
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ class TreeConsumer {
|
|||
TreeConsumer();
|
||||
|
||||
/// \brief Destructor
|
||||
~TreeConsumer() = default;
|
||||
virtual ~TreeConsumer() = default;
|
||||
/// Initializes the consumer, this involves constructing and preparing the tree.
|
||||
/// \param d The dataset node that represent the root of the IR tree.
|
||||
/// \return Status error code.
|
||||
|
|
|
@ -157,7 +157,7 @@ class DataSchema {
|
|||
|
||||
/// \brief getter
|
||||
/// \return The number of columns in the schema
|
||||
int32_t NumColumns() const { return col_descs_.size(); }
|
||||
size_t NumColumns() const { return col_descs_.size(); }
|
||||
|
||||
bool Empty() const { return NumColumns() == 0; }
|
||||
|
||||
|
|
|
@ -129,7 +129,6 @@ class DeviceQueueOp : public PipelineOp {
|
|||
// Description: Print info when first batch send successful in sink_mode
|
||||
void PrintEndInfoWhenFirstBatch(bool *first_push_flag);
|
||||
|
||||
private:
|
||||
#ifdef ENABLE_TDTQUE
|
||||
void WaitContinueSignal() const;
|
||||
Status SendDataToAscend();
|
||||
|
|
|
@ -35,7 +35,7 @@ class MapJob {
|
|||
MapJob() = default;
|
||||
|
||||
// Destructor
|
||||
~MapJob() = default;
|
||||
virtual ~MapJob() = default;
|
||||
|
||||
Status AddOperation(std::shared_ptr<TensorOp> operation) {
|
||||
ops_.push_back(operation);
|
||||
|
|
|
@ -685,7 +685,7 @@ Status CsvOp::ComputeColMap() {
|
|||
for (auto &csv_file : csv_files_list_) {
|
||||
Status rc = ColMapAnalyse(csv_file);
|
||||
|
||||
/* Process exception if ERROR in column name solving*/
|
||||
/* Process exception if ERROR in column name solving */
|
||||
if (!rc.IsOk()) {
|
||||
MS_LOG(ERROR) << "Invalid file, failed to get column name list from csv file: " + csv_file;
|
||||
RETURN_STATUS_UNEXPECTED("Invalid file, failed to get column name list from csv file: " + csv_file);
|
||||
|
@ -787,7 +787,7 @@ bool CsvOp::ColumnNameValidate() {
|
|||
getline(handle, line);
|
||||
std::vector<std::string> col_names = split(line, field_delim_);
|
||||
|
||||
/* Analyse the column name and draw a conclusion*/
|
||||
/* Analyse the column name and draw a conclusion */
|
||||
if (record.empty()) { // Case the first file
|
||||
record = col_names;
|
||||
match_file = csv_file;
|
||||
|
|
|
@ -139,7 +139,7 @@ Status DIV2KOp::GetDIV2KLRDirRealName(const std::string &hr_dir_key, const std::
|
|||
if (lr_it == DatasetPramMap.end()) {
|
||||
std::string out_str = "{\n";
|
||||
std::for_each(DatasetPramMap.begin(), DatasetPramMap.end(),
|
||||
[&out_str](std::pair<std::string, std::string> item) -> void {
|
||||
[&out_str](const std::pair<std::string, std::string> &item) -> void {
|
||||
out_str += ("\t" + item.first + ": " + item.second + ",\n");
|
||||
});
|
||||
out_str += "\n}";
|
||||
|
|
|
@ -251,6 +251,7 @@ Status ImageFolderOp::CountRowsAndClasses(const std::string &path, const std::se
|
|||
}
|
||||
std::queue<std::string> folder_paths;
|
||||
std::shared_ptr<Path::DirIterator> dir_itr = Path::DirIterator::OpenDirectory(&dir);
|
||||
RETURN_UNEXPECTED_IF_NULL(dir_itr);
|
||||
std::unordered_set<std::string> folder_names;
|
||||
while (dir_itr->HasNext()) {
|
||||
Path subdir = dir_itr->Next();
|
||||
|
|
|
@ -69,7 +69,7 @@ class SamplerRT {
|
|||
SamplerRT(const SamplerRT &s) : SamplerRT(s.num_samples_, s.samples_per_tensor_) {}
|
||||
|
||||
// default destructor
|
||||
~SamplerRT() = default;
|
||||
virtual ~SamplerRT() = default;
|
||||
|
||||
// Get a list of sample ids.
|
||||
// @note It is Sampler responsibility to make sure that the id is not out of bound.
|
||||
|
|
|
@ -278,7 +278,7 @@ Status USPSOp::CalculateNumRowsPerShard() {
|
|||
}
|
||||
if (num_rows_ == 0) {
|
||||
std::stringstream ss;
|
||||
for (int i = 0; i < data_files_list_.size(); ++i) {
|
||||
for (size_t i = 0; i < data_files_list_.size(); ++i) {
|
||||
ss << " " << data_files_list_[i];
|
||||
}
|
||||
std::string file_list = ss.str();
|
||||
|
|
|
@ -62,8 +62,8 @@ Status GraphLoader::GetNodesAndEdges() {
|
|||
RETURN_IF_NOT_OK(edge_ptr->GetNode(&p));
|
||||
auto src_itr = n_id_map->find(p.first->id()), dst_itr = n_id_map->find(p.second->id());
|
||||
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(src_itr != n_id_map->end(), "invalid src_id:" + std::to_string(src_itr->first));
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(dst_itr != n_id_map->end(), "invalid src_id:" + std::to_string(dst_itr->first));
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(src_itr != n_id_map->end(), "invalid src_id.");
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(dst_itr != n_id_map->end(), "invalid src_id.");
|
||||
|
||||
RETURN_IF_NOT_OK(edge_ptr->SetNode({src_itr->second, dst_itr->second}));
|
||||
RETURN_IF_NOT_OK(src_itr->second->AddNeighbor(dst_itr->second, edge_ptr->weight()));
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace dataset {
|
||||
|
||||
const std::unordered_map<DataTypePb, DataType::Type> g_pb2datatype_map{
|
||||
{DataTypePb::DE_PB_UNKNOWN, DataType::DE_UNKNOWN}, {DataTypePb::DE_PB_BOOL, DataType::DE_BOOL},
|
||||
{DataTypePb::DE_PB_INT8, DataType::DE_INT8}, {DataTypePb::DE_PB_UINT8, DataType::DE_UINT8},
|
||||
|
@ -79,6 +78,5 @@ Status PbToTensor(const TensorPb *tensor_pb, std::shared_ptr<Tensor> *tensor) {
|
|||
*tensor = std::move(tensor_out);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
} // namespace dataset
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -67,7 +67,7 @@ Status SkipNode::GetDatasetSize(const std::shared_ptr<DatasetSizeGetter> &size_g
|
|||
int64_t num_rows;
|
||||
RETURN_IF_NOT_OK(children_[0]->GetDatasetSize(size_getter, estimate, &num_rows));
|
||||
*dataset_size = 0;
|
||||
if (skip_count_ >= 0 && skip_count_ < num_rows) {
|
||||
if (skip_count_ < num_rows) {
|
||||
*dataset_size = num_rows - skip_count_;
|
||||
}
|
||||
dataset_size_ = *dataset_size;
|
||||
|
|
|
@ -74,7 +74,7 @@ class MS_API Sampler : std::enable_shared_from_this<Sampler> {
|
|||
Sampler() = default;
|
||||
|
||||
/// \brief Destructor
|
||||
~Sampler() = default;
|
||||
virtual ~Sampler() = default;
|
||||
|
||||
/// \brief A virtual function to add a child sampler.
|
||||
/// \param[in] child The child sampler to be added as a children of this sampler.
|
||||
|
|
|
@ -112,7 +112,7 @@ class MS_API SliceOption {
|
|||
explicit SliceOption(const std::vector<dsize_t> &indices) : indices_(indices) {}
|
||||
|
||||
/// \param[in] slice Slice the generated indices from the slice object along the dimension.
|
||||
explicit SliceOption(Slice slice) : slice_(slice) {}
|
||||
explicit SliceOption(const Slice &slice) : slice_(slice) {}
|
||||
|
||||
SliceOption(SliceOption const &slice) = default;
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ namespace mindspore {
|
|||
namespace dataset {
|
||||
class CFuncOp : public TensorOp {
|
||||
public:
|
||||
explicit CFuncOp(std::function<TensorRow(TensorRow)> func) : c_func_ptr_(func) {}
|
||||
explicit CFuncOp(const std::function<TensorRow(TensorRow)> &func) : c_func_ptr_(func) {}
|
||||
|
||||
~CFuncOp() override = default;
|
||||
|
||||
|
|
|
@ -178,7 +178,6 @@ Status BoundingBox::UpdateBBoxesForCrop(TensorPtr *bbox_list, size_t *bbox_count
|
|||
// create new tensor and copy over bboxes still valid to the image
|
||||
// bboxes outside of new cropped region are ignored - empty tensor returned in case of none
|
||||
*bbox_count = correct_ind.size();
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(*bbox_count >= 0, "BoundingBox: correct_ind.size() is smaller than zero.");
|
||||
bbox_float temp = 0.0;
|
||||
for (auto slice : correct_ind) { // for every index in the loop
|
||||
for (dsize_t ix = 0; ix < bboxDim; ix++) {
|
||||
|
|
|
@ -140,7 +140,7 @@ class DvppCommon {
|
|||
uint32_t &decSize);
|
||||
static APP_ERROR GetPngDecodeDataSize(const void *data, uint32_t dataSize, acldvppPixelFormat format,
|
||||
uint32_t &decSize);
|
||||
static APP_ERROR GetJpegEncodeStrideSize(std::shared_ptr<DvppDataInfo> &input);
|
||||
static APP_ERROR GetJpegEncodeStrideSize(std::shared_ptr<DvppDataInfo> &inputImage);
|
||||
static APP_ERROR SetEncodeLevel(uint32_t level, acldvppJpegeConfig &jpegeConfig);
|
||||
static APP_ERROR GetVideoDecodeStrideSize(uint32_t width, uint32_t height, acldvppPixelFormat format,
|
||||
uint32_t &widthStride, uint32_t &heightStride);
|
||||
|
@ -150,7 +150,7 @@ class DvppCommon {
|
|||
// The following interfaces can be called only when the DvppCommon object is initialized with Init
|
||||
APP_ERROR VpcResize(DvppDataInfo &input, DvppDataInfo &output, bool withSynchronize,
|
||||
VpcProcessType processType = VPC_PT_DEFAULT);
|
||||
APP_ERROR VpcCrop(const DvppCropInputInfo &input, const DvppDataInfo &output, bool withSynchronize);
|
||||
APP_ERROR VpcCrop(const DvppCropInputInfo &cropInput, const DvppDataInfo &output, bool withSynchronize);
|
||||
APP_ERROR JpegDecode(DvppDataInfo &input, DvppDataInfo &output, bool withSynchronize);
|
||||
|
||||
APP_ERROR PngDecode(DvppDataInfo &input, DvppDataInfo &output, bool withSynchronize);
|
||||
|
@ -162,7 +162,7 @@ class DvppCommon {
|
|||
|
||||
// These functions started with "Combine" encapsulate the DVPP process together, malloc DVPP memory,
|
||||
// transfer pictures from host to device, and then execute the DVPP operation.
|
||||
// The caller needs to pay attention to the release of the memory alloced in these functions.
|
||||
// The caller needs to pay attention to the release of the memory allocated in these functions.
|
||||
// You can call the ReleaseDvppBuffer function to release memory after use completely.
|
||||
APP_ERROR CombineResizeProcess(DvppDataInfo &input, DvppDataInfo &output, bool withSynchronize,
|
||||
VpcProcessType processType = VPC_PT_DEFAULT);
|
||||
|
|
|
@ -77,7 +77,7 @@ enum {
|
|||
APP_ERR_COMM_OPEN_FAIL = APP_ERR_COMM_BASE + 19, // Device, file or resource open failed
|
||||
APP_ERR_COMM_READ_FAIL = APP_ERR_COMM_BASE + 20, // Device, file or resource read failed
|
||||
APP_ERR_COMM_WRITE_FAIL = APP_ERR_COMM_BASE + 21, // Device, file or resource write failed
|
||||
APP_ERR_COMM_DESTORY_FAIL = APP_ERR_COMM_BASE + 22, // Device, file or resource destory failed
|
||||
APP_ERR_COMM_DESTORY_FAIL = APP_ERR_COMM_BASE + 22, // Device, file or resource destroy failed
|
||||
APP_ERR_COMM_EXIT = APP_ERR_COMM_BASE + 23, // End of data stream, stop the application
|
||||
APP_ERR_COMM_CONNECTION_CLOSE = APP_ERR_COMM_BASE + 24, // Out of connection, Communication shutdown
|
||||
APP_ERR_COMM_CONNECTION_FAILURE = APP_ERR_COMM_BASE + 25, // connection fail
|
||||
|
@ -89,8 +89,8 @@ enum {
|
|||
APP_ERR_DVPP_CROP_FAIL = APP_ERR_DVPP_BASE + 1, // DVPP: crop fail
|
||||
APP_ERR_DVPP_RESIZE_FAIL = APP_ERR_DVPP_BASE + 2, // DVPP: resize fail
|
||||
APP_ERR_DVPP_CROP_RESIZE_FAIL = APP_ERR_DVPP_BASE + 3, // DVPP: corp and resize fail
|
||||
APP_ERR_DVPP_CONVERT_FROMAT_FAIL = APP_ERR_DVPP_BASE + 4, // DVPP: convert image fromat fail
|
||||
APP_ERR_DVPP_VPC_FAIL = APP_ERR_DVPP_BASE + 5, // DVPP: VPC(crop, resize, convert fromat) fail
|
||||
APP_ERR_DVPP_CONVERT_FROMAT_FAIL = APP_ERR_DVPP_BASE + 4, // DVPP: convert image format fail
|
||||
APP_ERR_DVPP_VPC_FAIL = APP_ERR_DVPP_BASE + 5, // DVPP: VPC(crop, resize, convert format) fail
|
||||
APP_ERR_DVPP_JPEG_DECODE_FAIL = APP_ERR_DVPP_BASE + 6, // DVPP: decode jpeg or jpg fail
|
||||
APP_ERR_DVPP_JPEG_ENCODE_FAIL = APP_ERR_DVPP_BASE + 7, // DVPP: encode jpeg or jpg fail
|
||||
APP_ERR_DVPP_PNG_DECODE_FAIL = APP_ERR_DVPP_BASE + 8, // DVPP: encode png fail
|
||||
|
@ -128,7 +128,7 @@ enum {
|
|||
// define the error code of blocking queue
|
||||
APP_ERR_QUEUE_BASE = 5000,
|
||||
APP_ERR_QUEUE_EMPTY = APP_ERR_QUEUE_BASE + 1, // Queue: empty queue
|
||||
APP_ERR_QUEUE_STOPED = APP_ERR_QUEUE_BASE + 2, // Queue: queue stoped
|
||||
APP_ERR_QUEUE_STOPED = APP_ERR_QUEUE_BASE + 2, // Queue: queue stopped
|
||||
APP_ERROR_QUEUE_FULL = APP_ERR_QUEUE_BASE + 3, // Queue: full queue
|
||||
|
||||
// define the idrecognition web error code
|
||||
|
@ -235,7 +235,7 @@ const std::string APP_ERR_INFER_LOG_STRING[] = {
|
|||
const std::string APP_ERR_QUEUE_LOG_STRING[] = {
|
||||
[0] = "Placeholder",
|
||||
[1] = "empty queue",
|
||||
[2] = "queue stoped",
|
||||
[2] = "queue stopped",
|
||||
[3] = "full queue",
|
||||
};
|
||||
|
||||
|
|
|
@ -1022,7 +1022,7 @@ APP_ERROR MDAclProcess::device_memory_release() {
|
|||
|
||||
std::vector<uint32_t> MDAclProcess::Get_Primary_Shape() {
|
||||
std::vector<uint32_t> pri_shape;
|
||||
if (!dvppCommon_) {
|
||||
if (dvppCommon_) {
|
||||
pri_shape.emplace_back(dvppCommon_->GetDecodedImage()->heightStride);
|
||||
pri_shape.emplace_back(dvppCommon_->GetDecodedImage()->widthStride);
|
||||
}
|
||||
|
|
|
@ -593,7 +593,6 @@ bool InitFromPixel(const unsigned char *data, LPixelType pixel_type, LDataType d
|
|||
} else {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ConvertTo(const LiteMat &src, LiteMat &dst, double scale) {
|
||||
|
@ -694,7 +693,6 @@ bool Crop(const LiteMat &src, LiteMat &dst, int x, int y, int w, int h) {
|
|||
} else {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool CheckZero(const std::vector<float> &vs) {
|
||||
|
@ -936,7 +934,6 @@ bool Split(const LiteMat &src, std::vector<LiteMat> &mv) {
|
|||
} else {
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -1285,7 +1282,6 @@ bool Transpose(const LiteMat &src, LiteMat &dst) {
|
|||
} else {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -1483,7 +1479,7 @@ void JacobiSVD(LiteMat &A, LiteMat &_W, LiteMat &V) {
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
void SVBkSb(int m, int n, int nb, LiteMat w, LiteMat u, LiteMat v, const LiteMat src2, LiteMat dst) {
|
||||
void SVBkSb(int m, int n, LiteMat w, LiteMat u, LiteMat v, const LiteMat src2, LiteMat dst) {
|
||||
T eps = DBL_EPSILON * 2;
|
||||
double thresgold = 0;
|
||||
int nm = std::min(m, n);
|
||||
|
@ -1522,7 +1518,6 @@ bool GetPerspectiveTransformImpl(const LiteMat &src1, const LiteMat &src2, LiteM
|
|||
int m = src1.height_;
|
||||
int m_ = m;
|
||||
int n = src1.width_;
|
||||
int nb = src2.width_;
|
||||
|
||||
if (m < n) {
|
||||
return false;
|
||||
|
@ -1540,7 +1535,7 @@ bool GetPerspectiveTransformImpl(const LiteMat &src1, const LiteMat &src2, LiteM
|
|||
JacobiSVD<double>(a, w, v);
|
||||
u = a;
|
||||
|
||||
SVBkSb<double>(m_, n, nb, w, u, v, src2, dst);
|
||||
SVBkSb<double>(m_, n, w, u, v, src2, dst);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -230,7 +230,7 @@ Status JpegCropAndDecode(const std::shared_ptr<Tensor> &input, std::shared_ptr<T
|
|||
return Status::OK();
|
||||
}
|
||||
|
||||
static LDataType GetLiteCVDataType(DataType data_type) {
|
||||
static LDataType GetLiteCVDataType(const DataType &data_type) {
|
||||
if (data_type == DataType::DE_UINT8) {
|
||||
return LDataType::UINT8;
|
||||
} else if (data_type == DataType::DE_FLOAT32) {
|
||||
|
@ -326,8 +326,8 @@ Status GetJpegImageInfo(const std::shared_ptr<Tensor> &input, int *img_width, in
|
|||
return Status::OK();
|
||||
}
|
||||
|
||||
Status Normalize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, std::vector<float> vec_mean,
|
||||
std::vector<float> vec_std) {
|
||||
Status Normalize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output,
|
||||
const std::vector<float> &vec_mean, const std::vector<float> &vec_std) {
|
||||
if (input->Rank() != 3) {
|
||||
RETURN_STATUS_UNEXPECTED("Normalize: image shape is not <H,W,C>.");
|
||||
}
|
||||
|
|
|
@ -82,8 +82,8 @@ Status GetJpegImageInfo(const std::shared_ptr<Tensor> &input, int *img_width, in
|
|||
/// \param[in] mean Tensor of shape <3> and type DE_FLOAT32 which are mean of each channel in RGB order
|
||||
/// \param[in] std Tensor of shape <3> and type DE_FLOAT32 which are std of each channel in RGB order
|
||||
/// \param[out] output Normalized image Tensor of same input shape and type DE_FLOAT32
|
||||
Status Normalize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, std::vector<float> vec_mean,
|
||||
std::vector<float> vec_std);
|
||||
Status Normalize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output,
|
||||
const std::vector<float> &vec_mean, const std::vector<float> &vec_std);
|
||||
|
||||
/// \brief Returns Resized image.
|
||||
/// \param[in] input
|
||||
|
|
|
@ -120,7 +120,7 @@ void RandomCropOp::GenRandomXY(int *x, int *y, const int32_t &padded_image_w, co
|
|||
|
||||
Status RandomCropOp::Compute(const TensorRow &input, TensorRow *output) {
|
||||
IO_CHECK_VECTOR(input, output);
|
||||
if (input.size() != 1) {
|
||||
if (input.size() > 1) {
|
||||
for (size_t i = 0; i < input.size() - 1; i++) {
|
||||
if (input[i]->Rank() != 2 && input[i]->Rank() != 3) {
|
||||
std::string err_msg =
|
||||
|
@ -170,8 +170,7 @@ Status RandomCropOp::OutputShape(const std::vector<TensorShape> &inputs, std::ve
|
|||
TensorShape out = TensorShape{crop_height_, crop_width_};
|
||||
if (inputs[0].Rank() == 2) {
|
||||
(void)outputs.emplace_back(out);
|
||||
}
|
||||
if (inputs[0].Rank() == 3) {
|
||||
} else if (inputs[0].Rank() == 3) {
|
||||
(void)outputs.emplace_back(out.AppendDim(inputs[0][2]));
|
||||
}
|
||||
if (!outputs.empty()) {
|
||||
|
|
|
@ -46,9 +46,9 @@ Status SoftDvppDecodeRandomCropResizeJpegOp::GetCropInfo(const std::shared_ptr<T
|
|||
int y = 0;
|
||||
int crop_heigh = 0;
|
||||
int crop_widht = 0;
|
||||
std::unique_ptr<RandomCropAndResizeOp> random_crop_resize(
|
||||
new RandomCropAndResizeOp(target_height_, target_width_, scale_lb_, scale_ub_, aspect_lb_, aspect_ub_,
|
||||
InterpolationMode::kLinear, max_attempts_));
|
||||
auto random_crop_resize =
|
||||
std::make_unique<RandomCropAndResizeOp>(target_height_, target_width_, scale_lb_, scale_ub_, aspect_lb_, aspect_ub_,
|
||||
InterpolationMode::kLinear, max_attempts_);
|
||||
RETURN_IF_NOT_OK(random_crop_resize->GetCropBox(img_height, img_width, &x, &y, &crop_heigh, &crop_widht));
|
||||
crop_info->left = x;
|
||||
crop_info->up = y;
|
||||
|
|
|
@ -38,6 +38,7 @@ Status JsonHelper::CreateAlbum(const std::string &in_dir, const std::string &out
|
|||
// iterate over in dir and create json for all images
|
||||
uint64_t index = 0;
|
||||
auto dir_it = Path::DirIterator::OpenDirectory(&base_dir);
|
||||
RETURN_UNEXPECTED_IF_NULL(dir_it);
|
||||
while (dir_it->HasNext()) {
|
||||
Path v = dir_it->Next();
|
||||
// check if found file fits image extension
|
||||
|
|
Loading…
Reference in New Issue