forked from mindspore-Ecosystem/mindspore
!31435 Fix code check warnings
Merge pull request !31435 from xiaotianci/fix_code_check
This commit is contained in:
commit
0a578d39e9
|
@ -24,38 +24,39 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace dataset {
|
||||
|
||||
PYBIND_REGISTER(
|
||||
SchemaObj, 0, ([](const py::module *m) {
|
||||
(void)py::class_<SchemaObj, std::shared_ptr<SchemaObj>>(*m, "SchemaObj", "to create a SchemaObj")
|
||||
.def(py::init([](std::string schema_file) {
|
||||
.def(py::init([](const std::string &schema_file) {
|
||||
auto schema = std::make_shared<SchemaObj>(schema_file);
|
||||
THROW_IF_ERROR(schema->Init());
|
||||
return schema;
|
||||
}))
|
||||
.def("add_column",
|
||||
[](SchemaObj &self, std::string name, TypeId de_type, std::vector<int32_t> shape) {
|
||||
[](SchemaObj &self, const std::string &name, TypeId de_type, const std::vector<int32_t> &shape) {
|
||||
THROW_IF_ERROR(self.add_column(name, static_cast<mindspore::DataType>(de_type), shape));
|
||||
})
|
||||
.def("add_column", [](SchemaObj &self, std::string name, std::string de_type,
|
||||
std::vector<int32_t> shape) { THROW_IF_ERROR(self.add_column(name, de_type, shape)); })
|
||||
.def("add_column",
|
||||
[](SchemaObj &self, std::string name, TypeId de_type) {
|
||||
[](SchemaObj &self, const std::string &name, const std::string &de_type, const std::vector<int32_t> &shape) {
|
||||
THROW_IF_ERROR(self.add_column(name, de_type, shape));
|
||||
})
|
||||
.def("add_column",
|
||||
[](SchemaObj &self, const std::string &name, TypeId de_type) {
|
||||
THROW_IF_ERROR(self.add_column(name, static_cast<mindspore::DataType>(de_type)));
|
||||
})
|
||||
.def("add_column", [](SchemaObj &self, std::string name,
|
||||
std::string de_type) { THROW_IF_ERROR(self.add_column(name, de_type)); })
|
||||
.def("add_column", [](SchemaObj &self, const std::string &name,
|
||||
const std::string &de_type) { THROW_IF_ERROR(self.add_column(name, de_type)); })
|
||||
.def("parse_columns",
|
||||
[](SchemaObj &self, std::string json_string) { THROW_IF_ERROR(self.ParseColumnString(json_string)); })
|
||||
[](SchemaObj &self, const std::string &json_string) { THROW_IF_ERROR(self.ParseColumnString(json_string)); })
|
||||
.def("to_json", &SchemaObj::to_json)
|
||||
.def("to_string", &SchemaObj::to_string)
|
||||
.def("from_string",
|
||||
[](SchemaObj &self, std::string json_string) { THROW_IF_ERROR(self.FromJSONString(json_string)); })
|
||||
.def("set_dataset_type", [](SchemaObj &self, std::string dataset_type) { self.set_dataset_type(dataset_type); })
|
||||
[](SchemaObj &self, const std::string &json_string) { THROW_IF_ERROR(self.FromJSONString(json_string)); })
|
||||
.def("set_dataset_type",
|
||||
[](SchemaObj &self, const std::string &dataset_type) { self.set_dataset_type(dataset_type); })
|
||||
.def("set_num_rows", [](SchemaObj &self, int32_t num_rows) { self.set_num_rows(num_rows); })
|
||||
.def("get_num_rows", &SchemaObj::get_num_rows)
|
||||
.def("__deepcopy__", [](py::object &schema, py::dict memo) { return schema; });
|
||||
.def("__deepcopy__", [](const py::object &schema, const py::dict &memo) { return schema; });
|
||||
}));
|
||||
|
||||
} // namespace dataset
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -29,15 +29,16 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace dataset {
|
||||
|
||||
PYBIND_REGISTER(ShardOperator, 0, ([](const py::module *m) {
|
||||
(void)py::class_<mindrecord::ShardOperator, std::shared_ptr<mindrecord::ShardOperator>>(
|
||||
*m, "ShardOperator")
|
||||
.def("add_child", [](std::shared_ptr<mindrecord::ShardOperator> self,
|
||||
std::shared_ptr<mindrecord::ShardOperator> child) { self->SetChildOp(child); })
|
||||
.def("set_num_samples", [](std::shared_ptr<mindrecord::ShardOperator> self, int64_t num_samples) {
|
||||
self->SetNumSamples(num_samples);
|
||||
});
|
||||
.def("add_child",
|
||||
[](const std::shared_ptr<mindrecord::ShardOperator> &self,
|
||||
const std::shared_ptr<mindrecord::ShardOperator> &child) {
|
||||
THROW_IF_ERROR(self->SetChildOp(child));
|
||||
})
|
||||
.def("set_num_samples", [](const std::shared_ptr<mindrecord::ShardOperator> &self,
|
||||
int64_t num_samples) { self->SetNumSamples(num_samples); });
|
||||
}));
|
||||
|
||||
PYBIND_REGISTER(ShardDistributedSample, 1, ([](const py::module *m) {
|
||||
|
@ -51,7 +52,7 @@ PYBIND_REGISTER(
|
|||
ShardPkSample, 1, ([](const py::module *m) {
|
||||
(void)py::class_<mindrecord::ShardPkSample, mindrecord::ShardOperator, std::shared_ptr<mindrecord::ShardPkSample>>(
|
||||
*m, "MindrecordPkSampler")
|
||||
.def(py::init([](int64_t kVal, std::string kColumn, bool shuffle, int64_t num_samples) {
|
||||
.def(py::init([](int64_t kVal, const std::string &kColumn, bool shuffle, int64_t num_samples) {
|
||||
if (shuffle == true) {
|
||||
return std::make_shared<mindrecord::ShardPkSample>(kColumn, kVal, std::numeric_limits<int64_t>::max(),
|
||||
GetSeed(), num_samples);
|
||||
|
@ -95,6 +96,5 @@ PYBIND_REGISTER(ShuffleMode, 1, ([](const py::module *m) {
|
|||
.value("INFILE", ShuffleMode::kInfile)
|
||||
.export_values();
|
||||
}));
|
||||
|
||||
} // namespace dataset
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -766,7 +766,8 @@ Status FindMaxPerFrame(const std::shared_ptr<Tensor> &input, std::shared_ptr<Ten
|
|||
auto channel = input->shape()[0];
|
||||
auto num_of_frames = input->shape()[1];
|
||||
auto lags = input->shape()[2];
|
||||
int32_t lag_min = static_cast<int32_t>(ceil(static_cast<float>(sample_rate) / freq_high));
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(freq_high != 0, "DetectPitchFrequency: freq_high can not be zero.");
|
||||
auto lag_min = static_cast<int32_t>(ceil(static_cast<float>(sample_rate) / freq_high));
|
||||
TensorShape out_shape({channel, num_of_frames});
|
||||
// pack batch
|
||||
for (auto itr = input->begin<T>(); itr != input->end<T>(); ++itr) {
|
||||
|
|
|
@ -41,12 +41,12 @@ ConfigManager::ConfigManager()
|
|||
sending_batches_(kCfgSendingBatch),
|
||||
rank_id_(kCfgDefaultRankId),
|
||||
seed_(kCfgDefaultSeed),
|
||||
numa_enable_(false),
|
||||
monitor_sampling_interval_(kCfgMonitorSamplingInterval),
|
||||
callback_timout_(kCfgCallbackTimeout),
|
||||
cache_host_(kCfgDefaultCacheHost),
|
||||
cache_port_(kCfgDefaultCachePort),
|
||||
num_connections_(kDftNumConnections),
|
||||
numa_enable_(false),
|
||||
cache_prefetch_size_(kDftCachePrefetchSize),
|
||||
auto_num_workers_(kDftAutoNumWorkers),
|
||||
num_cpu_threads_(std::thread::hardware_concurrency()),
|
||||
|
|
|
@ -245,7 +245,7 @@ class ConfigManager {
|
|||
|
||||
// getter function
|
||||
// @return - Flag to indicate whether to save AutoTune configuration
|
||||
bool save_autoconfig() { return save_autoconfig_; }
|
||||
bool save_autoconfig() const { return save_autoconfig_; }
|
||||
|
||||
// getter function
|
||||
// @return - The final AutoTune configuration JSON filepath
|
||||
|
@ -276,6 +276,10 @@ class ConfigManager {
|
|||
void set_multiprocessing_timeout_interval(uint32_t interval) { multiprocessing_timeout_interval_ = interval; }
|
||||
|
||||
private:
|
||||
// Private helper function that takes a nlohmann json format and populates the settings
|
||||
// @param j - The json nlohmann json info
|
||||
Status FromJson(const nlohmann::json &j);
|
||||
|
||||
int32_t num_parallel_workers_;
|
||||
int32_t worker_connector_size_;
|
||||
int32_t op_connector_size_;
|
||||
|
@ -299,14 +303,11 @@ class ConfigManager {
|
|||
bool enable_shared_mem_;
|
||||
bool auto_offload_;
|
||||
bool enable_autotune_;
|
||||
bool save_autoconfig_; // True if should save AutoTune configuration
|
||||
std::string autotune_json_filepath_; // Filepath name of the final AutoTune Configuration JSON file
|
||||
bool save_autoconfig_; // True if should save AutoTune configuration
|
||||
int64_t autotune_interval_;
|
||||
bool enable_watchdog_; // Watchdog python thread enabled flag
|
||||
uint32_t multiprocessing_timeout_interval_; // Multiprocessing timeout interval in seconds
|
||||
// Private helper function that takes a nlohmann json format and populates the settings
|
||||
// @param j - The json nlohmann json info
|
||||
Status FromJson(const nlohmann::json &j);
|
||||
std::string autotune_json_filepath_; // Filepath name of the final AutoTune Configuration JSON file
|
||||
};
|
||||
} // namespace dataset
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDDATA_PYBINDSUPPORT_H
|
||||
#define MINDDATA_PYBINDSUPPORT_H
|
||||
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_PYBIND_SUPPORT_H_
|
||||
#define MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_PYBIND_SUPPORT_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
|
@ -70,7 +70,7 @@ struct npy_format_descriptor<float16> {
|
|||
handle ptr = npy_api::get().PyArray_DescrFromType_(kNpyFloat16);
|
||||
return reinterpret_borrow<pybind11::dtype>(ptr);
|
||||
}
|
||||
virtual ~npy_format_descriptor<float16>() {}
|
||||
virtual ~npy_format_descriptor<float16>() = default;
|
||||
|
||||
static std::string format() {
|
||||
// following: https://docs.python.org/3/library/struct.html#format-characters
|
||||
|
@ -85,4 +85,4 @@ struct type_caster<float16> : public npy_scalar_caster<float16> {
|
|||
} // namespace detail
|
||||
} // namespace pybind11
|
||||
|
||||
#endif // MINDDATA_PYBINDSUPPORT_H
|
||||
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_PYBIND_SUPPORT_H_
|
||||
|
|
|
@ -58,7 +58,7 @@ Status StorageManager::DoServiceStart() {
|
|||
if (root_.IsDirectory()) {
|
||||
// create multiple containers and store their index in a pool
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(pool_size_ > 0, "Expect positive pool_size_, but got:" + std::to_string(pool_size_));
|
||||
for (int i = 0; i < pool_size_; i++) {
|
||||
for (auto i = 0; i < pool_size_; i++) {
|
||||
RETURN_IF_NOT_OK(AddOneContainer());
|
||||
}
|
||||
} else {
|
||||
|
@ -82,7 +82,7 @@ Status StorageManager::Write(key_type *key, const std::vector<ReadableSlice> &bu
|
|||
value_type out_value;
|
||||
bool create_new_container = false;
|
||||
int old_container_pos = -1;
|
||||
size_t last_num_container = -1;
|
||||
int last_num_container = -1;
|
||||
do {
|
||||
SharedLock lock_s(&rw_lock_);
|
||||
size_t num_containers = containers_.size();
|
||||
|
@ -105,9 +105,9 @@ Status StorageManager::Write(key_type *key, const std::vector<ReadableSlice> &bu
|
|||
RETURN_STATUS_UNEXPECTED("num_containers is zero");
|
||||
}
|
||||
// Pick a random container from the writable container pool to insert.
|
||||
std::uniform_int_distribution<int> distribution(0, pool_size_ - 1);
|
||||
int pos_in_pool = distribution(mt);
|
||||
int cont_index = writable_containers_pool_.at(pos_in_pool);
|
||||
std::uniform_int_distribution<size_t> distribution(0, pool_size_ - 1);
|
||||
size_t pos_in_pool = distribution(mt);
|
||||
size_t cont_index = writable_containers_pool_.at(pos_in_pool);
|
||||
cont = containers_.at(cont_index);
|
||||
off64_t offset;
|
||||
Status rc = cont->Insert(buf, &offset);
|
||||
|
@ -135,7 +135,7 @@ Status StorageManager::Read(StorageManager::key_type key, WritableSlice *dest, s
|
|||
if (r.second) {
|
||||
auto &it = r.first;
|
||||
value_type v = *it;
|
||||
int container_inx = v.first;
|
||||
size_t container_inx = v.first;
|
||||
off_t offset = v.second.first;
|
||||
size_t sz = v.second.second;
|
||||
if (dest->GetSize() < sz) {
|
||||
|
@ -173,7 +173,7 @@ Status StorageManager::DoServiceStop() noexcept {
|
|||
|
||||
StorageManager::StorageManager(const Path &root) : root_(root), file_id_(0), index_(), pool_size_(1) {}
|
||||
|
||||
StorageManager::StorageManager(const Path &root, int pool_size)
|
||||
StorageManager::StorageManager(const Path &root, size_t pool_size)
|
||||
: root_(root), file_id_(0), index_(), pool_size_(pool_size) {}
|
||||
|
||||
StorageManager::~StorageManager() { (void)StorageManager::DoServiceStop(); }
|
||||
|
|
|
@ -17,10 +17,12 @@
|
|||
#define MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_STORAGE_MANAGER_H_
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "minddata/dataset/engine/cache/storage_container.h"
|
||||
#include "minddata/dataset/util/allocator.h"
|
||||
#include "minddata/dataset/util/auto_index.h"
|
||||
|
@ -31,6 +33,7 @@
|
|||
#include "minddata/dataset/util/slice.h"
|
||||
|
||||
using ListOfContainers = std::vector<std::shared_ptr<mindspore::dataset::StorageContainer>>;
|
||||
|
||||
namespace mindspore {
|
||||
namespace dataset {
|
||||
class StorageManager : public Service {
|
||||
|
@ -51,7 +54,7 @@ class StorageManager : public Service {
|
|||
|
||||
explicit StorageManager(const Path &);
|
||||
|
||||
StorageManager(const Path &root, int pool_size);
|
||||
StorageManager(const Path &root, size_t pool_size);
|
||||
|
||||
~StorageManager() override;
|
||||
|
||||
|
@ -75,12 +78,12 @@ class StorageManager : public Service {
|
|||
int file_id_;
|
||||
RWLock rw_lock_;
|
||||
storage_index index_;
|
||||
std::vector<int> writable_containers_pool_;
|
||||
int pool_size_;
|
||||
std::vector<size_t> writable_containers_pool_;
|
||||
size_t pool_size_;
|
||||
|
||||
std::string GetBaseName(const std::string &prefix, int32_t file_id);
|
||||
static std::string GetBaseName(const std::string &prefix, int32_t file_id);
|
||||
|
||||
std::string ConstructFileName(const std::string &prefix, int32_t file_id, const std::string &suffix);
|
||||
static std::string ConstructFileName(const std::string &prefix, int32_t file_id, const std::string &suffix);
|
||||
|
||||
/// \brief Add a new storage container
|
||||
/// The newly-created container is going to be added into a pool of writable containers.
|
||||
|
|
|
@ -22,11 +22,9 @@
|
|||
#include "minddata/dataset/core/config_manager.h"
|
||||
#include "minddata/dataset/core/tensor_shape.h"
|
||||
#include "minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h"
|
||||
#include "minddata/dataset/engine/execution_tree.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace dataset {
|
||||
const char kColumnImage[] = "image";
|
||||
const char kJsonImages[] = "images";
|
||||
const char kJsonImagesFileName[] = "file_name";
|
||||
const char kJsonId[] = "id";
|
||||
|
@ -108,13 +106,13 @@ Status CocoOp::LoadTensorRow(row_id_type row_id, TensorRow *trow) {
|
|||
|
||||
auto bboxRow = itr->second;
|
||||
std::vector<float> bbox_row;
|
||||
dsize_t bbox_row_num = static_cast<dsize_t>(bboxRow.size());
|
||||
auto bbox_row_num = static_cast<dsize_t>(bboxRow.size());
|
||||
dsize_t bbox_column_num = 0;
|
||||
for (auto bbox : bboxRow) {
|
||||
std::for_each(bboxRow.begin(), bboxRow.end(), [&](const auto &bbox) {
|
||||
if (static_cast<dsize_t>(bbox.size()) > bbox_column_num) {
|
||||
bbox_column_num = static_cast<dsize_t>(bbox.size());
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
for (auto bbox : bboxRow) {
|
||||
bbox_row.insert(bbox_row.end(), bbox.begin(), bbox.end());
|
||||
|
@ -175,7 +173,7 @@ Status CocoOp::LoadDetectionTensorRow(row_id_type row_id, const std::string &ima
|
|||
annotation_path_};
|
||||
if (extra_metadata_) {
|
||||
std::string img_id;
|
||||
size_t pos = image_id.find(".");
|
||||
size_t pos = image_id.find('.');
|
||||
if (pos == std::string::npos) {
|
||||
RETURN_STATUS_UNEXPECTED("Invalid image, 'image_id': " + image_id + " should be with suffix like \".jpg\"");
|
||||
}
|
||||
|
@ -210,7 +208,7 @@ Status CocoOp::LoadSimpleTensorRow(row_id_type row_id, const std::string &image_
|
|||
std::vector<std::string> path_list = {image_full_path.ToString(), annotation_path_, annotation_path_};
|
||||
if (extra_metadata_) {
|
||||
std::string img_id;
|
||||
size_t pos = image_id.find(".");
|
||||
size_t pos = image_id.find('.');
|
||||
if (pos == std::string::npos) {
|
||||
RETURN_STATUS_UNEXPECTED("Invalid image, 'image_id': " + image_id + " should be with suffix like \".jpg\"");
|
||||
}
|
||||
|
@ -233,8 +231,8 @@ Status CocoOp::LoadCaptioningTensorRow(row_id_type row_id, const std::string &im
|
|||
std::vector<std::string> path_list = {image_full_path.ToString(), annotation_path_};
|
||||
if (extra_metadata_) {
|
||||
std::string img_id;
|
||||
size_t pos = image_id.find(".");
|
||||
if (pos == image_id.npos) {
|
||||
size_t pos = image_id.find('.');
|
||||
if (pos == std::string::npos) {
|
||||
RETURN_STATUS_UNEXPECTED("Invalid image, 'image_id': " + image_id + " should be with suffix like \".jpg\".");
|
||||
}
|
||||
std::copy(image_id.begin(), image_id.begin() + pos, std::back_inserter(img_id));
|
||||
|
@ -286,7 +284,7 @@ Status CocoOp::LoadMixTensorRow(row_id_type row_id, const std::string &image_id,
|
|||
annotation_path_, annotation_path_};
|
||||
if (extra_metadata_) {
|
||||
std::string img_id;
|
||||
size_t pos = image_id.find(".");
|
||||
size_t pos = image_id.find('.');
|
||||
if (pos == std::string::npos) {
|
||||
RETURN_STATUS_UNEXPECTED("Invalid image, " + image_id + " should be with suffix like \".jpg\"");
|
||||
}
|
||||
|
@ -320,10 +318,8 @@ Status CocoOp::PrepareData() {
|
|||
}
|
||||
|
||||
std::ifstream in(realpath.value());
|
||||
if (!in.is_open()) {
|
||||
RETURN_STATUS_UNEXPECTED("Invalid annotation file, Coco Dataset annotation file: " + annotation_path_ +
|
||||
" open failed, permission denied!");
|
||||
}
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(in.is_open(), "Invalid annotation file, Coco Dataset annotation file: " +
|
||||
annotation_path_ + " open failed, permission denied!");
|
||||
in >> js;
|
||||
} catch (const std::exception &err) {
|
||||
RETURN_STATUS_UNEXPECTED("Invalid annotation file, Coco Dataset annotation file:" + annotation_path_ +
|
||||
|
@ -341,15 +337,14 @@ Status CocoOp::PrepareData() {
|
|||
}
|
||||
nlohmann::json annotations_list;
|
||||
RETURN_IF_NOT_OK(SearchNodeInJson(js, std::string(kJsonAnnotations), &annotations_list));
|
||||
for (auto annotation : annotations_list) {
|
||||
for (const auto &annotation : annotations_list) {
|
||||
int32_t image_id = 0, id = 0;
|
||||
std::string file_name;
|
||||
RETURN_IF_NOT_OK(SearchNodeInJson(annotation, std::string(kJsonAnnoImageId), &image_id));
|
||||
auto itr_file = image_index_.find(image_id);
|
||||
if (itr_file == image_index_.end()) {
|
||||
RETURN_STATUS_UNEXPECTED("Invalid annotation, the attribute of 'image_id': " + std::to_string(image_id) +
|
||||
" is missing in the node of 'image' from annotation file: " + annotation_path_);
|
||||
}
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(itr_file != image_index_.end(),
|
||||
"Invalid annotation, the attribute of 'image_id': " + std::to_string(image_id) +
|
||||
" is missing in the node of 'image' from annotation file: " + annotation_path_);
|
||||
file_name = itr_file->second;
|
||||
switch (task_type_) {
|
||||
case TaskType::Detection:
|
||||
|
@ -377,34 +372,33 @@ Status CocoOp::PrepareData() {
|
|||
}
|
||||
}
|
||||
if (task_type_ == TaskType::Captioning) {
|
||||
for (auto img : image_que) {
|
||||
for (const auto &img : image_que) {
|
||||
if (captions_map_.find(img) != captions_map_.end()) {
|
||||
image_ids_.push_back(img);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (auto img : image_que) {
|
||||
for (const auto &img : image_que) {
|
||||
if (coordinate_map_.find(img) != coordinate_map_.end()) {
|
||||
image_ids_.push_back(img);
|
||||
}
|
||||
}
|
||||
}
|
||||
num_rows_ = image_ids_.size();
|
||||
if (num_rows_ == 0) {
|
||||
RETURN_STATUS_UNEXPECTED(
|
||||
"Invalid data, 'CocoDataset' API can't read the data file (interface mismatch or no data found). "
|
||||
"Check file in directory: " +
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(
|
||||
num_rows_ != 0,
|
||||
"Invalid data, 'CocoDataset' API can't read the data file (interface mismatch or no data found). "
|
||||
"Check file in directory: " +
|
||||
image_folder_path_ + ".");
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status CocoOp::ImageColumnLoad(const nlohmann::json &image_tree, std::vector<std::string> *image_vec) {
|
||||
if (image_tree.size() == 0) {
|
||||
if (image_tree.empty()) {
|
||||
RETURN_STATUS_UNEXPECTED("Invalid annotation, the 'image' node is missing in annotation file: " + annotation_path_ +
|
||||
".");
|
||||
}
|
||||
for (auto img : image_tree) {
|
||||
for (const auto &img : image_tree) {
|
||||
std::string file_name;
|
||||
int32_t id = 0;
|
||||
RETURN_IF_NOT_OK(SearchNodeInJson(img, std::string(kJsonImagesFileName), &file_name));
|
||||
|
@ -446,7 +440,7 @@ Status CocoOp::StuffColumnLoad(const nlohmann::json &annotation_tree, const std:
|
|||
RETURN_IF_NOT_OK(SearchNodeInJson(annotation_tree, std::string(kJsonAnnoSegmentation), &segmentation));
|
||||
if (iscrowd == 0) {
|
||||
for (auto item : segmentation) {
|
||||
if (bbox.size() > 0) bbox.clear();
|
||||
if (!bbox.empty()) bbox.clear();
|
||||
bbox.insert(bbox.end(), item.begin(), item.end());
|
||||
coordinate_map_[image_file].push_back(bbox);
|
||||
}
|
||||
|
@ -515,14 +509,14 @@ Status CocoOp::PanopticColumnLoad(const nlohmann::json &annotation_tree, const s
|
|||
|
||||
Status CocoOp::CaptionColumnLoad(const nlohmann::json &annotation_tree, const std::string &image_file,
|
||||
const int32_t &unique_id) {
|
||||
std::string caption = "";
|
||||
std::string caption;
|
||||
RETURN_IF_NOT_OK(SearchNodeInJson(annotation_tree, std::string(kJsonAnnoCaption), &caption));
|
||||
captions_map_[image_file].push_back(caption);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status CocoOp::CategoriesColumnLoad(const nlohmann::json &categories_tree) {
|
||||
if (categories_tree.size() == 0) {
|
||||
if (categories_tree.empty()) {
|
||||
RETURN_STATUS_UNEXPECTED(
|
||||
"Invalid annotation, the 'categories' node is missing in annotation file: " + annotation_path_ + ".");
|
||||
}
|
||||
|
@ -560,10 +554,11 @@ Status CocoOp::CategoriesColumnLoad(const nlohmann::json &categories_tree) {
|
|||
return Status::OK();
|
||||
}
|
||||
|
||||
Status CocoOp::ReadImageToTensor(const std::string &path, const ColDescriptor &col, std::shared_ptr<Tensor> *tensor) {
|
||||
Status CocoOp::ReadImageToTensor(const std::string &path, const ColDescriptor &col,
|
||||
std::shared_ptr<Tensor> *tensor) const {
|
||||
RETURN_IF_NOT_OK(Tensor::CreateFromFile(path, tensor));
|
||||
|
||||
if (decode_ == true) {
|
||||
if (decode_) {
|
||||
Status rc = Decode(*tensor, tensor);
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(
|
||||
rc.IsOk(), "Invalid image, failed to decode " + path + ": the image is broken or permission denied.");
|
||||
|
|
|
@ -230,7 +230,7 @@ class CocoOp : public MappableLeafOp {
|
|||
/// \param[in] col Contains tensor implementation and datatype.
|
||||
/// \param[out] tensor Returned tensor.
|
||||
/// \return Status The status code returned.
|
||||
Status ReadImageToTensor(const std::string &path, const ColDescriptor &col, std::shared_ptr<Tensor> *tensor);
|
||||
Status ReadImageToTensor(const std::string &path, const ColDescriptor &col, std::shared_ptr<Tensor> *tensor) const;
|
||||
|
||||
/// \brief Read annotation from Annotation folder.
|
||||
/// \return Status The status code returned.
|
||||
|
|
|
@ -36,7 +36,7 @@ class __attribute__((visibility("default"))) ShardOperator {
|
|||
|
||||
virtual bool HasChildOp() { return child_op_ != nullptr; }
|
||||
|
||||
virtual Status SetChildOp(std::shared_ptr<ShardOperator> child_op) {
|
||||
virtual Status SetChildOp(const std::shared_ptr<ShardOperator> &child_op) {
|
||||
if (child_op != nullptr) {
|
||||
child_op_ = child_op;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue