forked from mindspore-Ecosystem/mindspore
minddata error message optimize
This commit is contained in:
parent
ef425a1321
commit
9d89de5765
|
@ -209,34 +209,34 @@ Status ToDevice::Terminate() {
|
|||
// SaveToDisk
|
||||
Status SaveToDisk::ValidateParams() {
|
||||
if (dataset_path_.empty()) {
|
||||
std::string err = "CreateSaver failed, dataset_path must not be empty";
|
||||
std::string err = "SaveToDisk failed, dataset_path must not be empty";
|
||||
MS_LOG(ERROR) << err;
|
||||
RETURN_STATUS_SYNTAX_ERROR(err);
|
||||
}
|
||||
Path dir(dataset_path_);
|
||||
if (dir.IsDirectory()) {
|
||||
std::string err = "CreateSaver failed, dataset_path must not be a directory";
|
||||
std::string err = "SaveToDisk failed, dataset_path must not be a directory";
|
||||
MS_LOG(ERROR) << err;
|
||||
RETURN_STATUS_SYNTAX_ERROR(err);
|
||||
}
|
||||
std::string real_path;
|
||||
if (Path::RealPath(dir.ParentPath(), real_path).IsError()) {
|
||||
std::string err_msg = "CreateSaver failed, can not get real dataset path: " + dir.ParentPath();
|
||||
std::string err_msg = "SaveToDisk failed, can not get real dataset path: " + dir.ParentPath();
|
||||
MS_LOG(ERROR) << err_msg;
|
||||
RETURN_STATUS_SYNTAX_ERROR(err_msg);
|
||||
}
|
||||
if (access(dir.ParentPath().c_str(), R_OK) == -1) {
|
||||
std::string err_msg = "CreateSaver failed, no access to specified dataset path: " + dataset_path_;
|
||||
std::string err_msg = "SaveToDisk failed, no access to specified dataset path: " + dataset_path_;
|
||||
MS_LOG(ERROR) << err_msg;
|
||||
RETURN_STATUS_SYNTAX_ERROR(err_msg);
|
||||
}
|
||||
if (num_files_ <= 0 || num_files_ > 1000) {
|
||||
std::string err = "CreateSaver failed, num_files must between 1 and 1000, but got " + std::to_string(num_files_);
|
||||
std::string err = "SaveToDisk failed, num_files must between 1 and 1000, but got " + std::to_string(num_files_);
|
||||
MS_LOG(ERROR) << err;
|
||||
RETURN_STATUS_SYNTAX_ERROR(err);
|
||||
}
|
||||
if (dataset_type_ != "mindrecord") {
|
||||
std::string err = "CreateSaver failed, only \"mindrecord\" dataset format is supported, but got " + dataset_type_;
|
||||
std::string err = "SaveToDisk failed, only \"mindrecord\" dataset format is supported, but got " + dataset_type_;
|
||||
MS_LOG(ERROR) << err;
|
||||
RETURN_STATUS_SYNTAX_ERROR(err);
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ Status SaveToDisk::CheckTensorRowShapes(const std::unordered_map<std::string, in
|
|||
std::string mr_type;
|
||||
std::string el = column_type.ToString();
|
||||
if (mindrecord::kTypesMap.find(el) == mindrecord::kTypesMap.end()) {
|
||||
std::string err_msg("Error: can not support data type: " + el);
|
||||
std::string err_msg("Invalid type, unsupported data type: " + el);
|
||||
RETURN_STATUS_UNEXPECTED(err_msg);
|
||||
} else {
|
||||
mr_type = mindrecord::kTypesMap.at(el);
|
||||
|
@ -356,13 +356,13 @@ Status SaveToDisk::FetchMetaFromTensorRow(const std::unordered_map<std::string,
|
|||
const TensorRow &row, nlohmann::json *schema,
|
||||
std::vector<std::string> *index_fields) {
|
||||
if (schema == nullptr) {
|
||||
RETURN_STATUS_UNEXPECTED("Error: schema is NULL.");
|
||||
RETURN_STATUS_UNEXPECTED("schema can not be nullptr.");
|
||||
}
|
||||
if (index_fields == nullptr) {
|
||||
RETURN_STATUS_UNEXPECTED("Error: index fields is NULL.");
|
||||
RETURN_STATUS_UNEXPECTED("index_fields can not be nullptr.");
|
||||
}
|
||||
if (column_name_id_map.empty()) {
|
||||
RETURN_STATUS_UNEXPECTED("Error: column not found.");
|
||||
RETURN_STATUS_UNEXPECTED("column_name_id_map can not be nullptr..");
|
||||
}
|
||||
nlohmann::json dataset_schema;
|
||||
for (auto &col : column_name_id_map) {
|
||||
|
@ -378,7 +378,7 @@ Status SaveToDisk::FetchMetaFromTensorRow(const std::unordered_map<std::string,
|
|||
std::string el = column_type.ToString();
|
||||
dataset_schema[column_name] = el;
|
||||
if (mindrecord::kTypesMap.find(el) == mindrecord::kTypesMap.end()) {
|
||||
std::string err_msg("Error: can not support data type: " + el);
|
||||
std::string err_msg("Invalid type, unsupported data type: " + el);
|
||||
RETURN_STATUS_UNEXPECTED(err_msg);
|
||||
} else {
|
||||
mr_type = mindrecord::kTypesMap.at(el);
|
||||
|
@ -390,7 +390,7 @@ Status SaveToDisk::FetchMetaFromTensorRow(const std::unordered_map<std::string,
|
|||
(*schema)[column_name] = {{"type", mr_type}};
|
||||
} else {
|
||||
if (mr_type == "string") { // mindrecord can not support string with shape.
|
||||
std::string err_msg("Error: mindrecord can not support multi-dimensional string tensor.");
|
||||
std::string err_msg("Invalid data, mindrecord can not support multi-dimensional string tensor.");
|
||||
RETURN_STATUS_UNEXPECTED(err_msg);
|
||||
}
|
||||
if (mr_type == "bytes") { // ignore shape of bytes in minrecord
|
||||
|
@ -411,13 +411,13 @@ inline Status ValidateInputParams(nlohmann::json *row_raw_data,
|
|||
std::map<std::string, std::unique_ptr<std::vector<uint8_t>>> *row_bin_data,
|
||||
const std::unordered_map<std::string, int32_t> &column_name_id_map) {
|
||||
if (row_raw_data == nullptr) {
|
||||
RETURN_STATUS_UNEXPECTED("Error: row raw data is NULL.");
|
||||
RETURN_STATUS_UNEXPECTED("row_raw_data can not be nullptr.");
|
||||
}
|
||||
if (row_bin_data == nullptr) {
|
||||
RETURN_STATUS_UNEXPECTED("Error: row bin data is NULL.");
|
||||
RETURN_STATUS_UNEXPECTED("row_bin_data can not be nullptr.");
|
||||
}
|
||||
if (column_name_id_map.empty()) {
|
||||
RETURN_STATUS_UNEXPECTED("Error: column not found");
|
||||
RETURN_STATUS_UNEXPECTED("column_name_id_map can not be nullptr.");
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -516,7 +516,7 @@ Status SaveToDisk::FetchItemData(std::shared_ptr<Tensor> tensor, std::string col
|
|||
std::string ss(sv);
|
||||
(*row_raw_data)[column_name] = std::move(ss);
|
||||
} else {
|
||||
RETURN_STATUS_UNEXPECTED("Got unexpected type when casting data.");
|
||||
RETURN_STATUS_UNEXPECTED("Invalid dtype, got unexpected type when casting data: " + column_type.ToString());
|
||||
}
|
||||
if (data_ptr != nullptr) {
|
||||
(*row_bin_data)[column_name] = std::move(data_ptr);
|
||||
|
@ -616,7 +616,7 @@ Status TreeGetters::GetBatchSize(int64_t *batch_size) {
|
|||
std::shared_ptr<DatasetOp> root = std::shared_ptr<DatasetOp>(tree_adapter_->GetRoot());
|
||||
RETURN_UNEXPECTED_IF_NULL(root);
|
||||
*batch_size = root->GetTreeBatchSize();
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(*batch_size != -1, "Error in finding the batch size.");
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(*batch_size != -1, "GetBatchSize: Failed to find the batch size in Dataset pipeline.");
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
|
@ -644,7 +644,7 @@ Status TreeGetters::GetColumnNames(std::vector<std::string> *output) {
|
|||
std::shared_ptr<DatasetOp> root = std::shared_ptr<DatasetOp>(tree_adapter_->GetRoot());
|
||||
RETURN_UNEXPECTED_IF_NULL(root);
|
||||
std::unordered_map<std::string, int32_t> column_name_id_map = root->column_name_id_map();
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(!column_name_id_map.empty(), "GetColumnNames: column_name_id map is empty.");
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(!column_name_id_map.empty(), "GetColumnNames: column_name_id map can not be empty.");
|
||||
std::vector<std::pair<std::string, int32_t>> col_name_id_vec(column_name_id_map.begin(), column_name_id_map.end());
|
||||
std::sort(col_name_id_vec.begin(), col_name_id_vec.end(),
|
||||
[](const std::pair<std::string, int32_t> &a, const std::pair<std::string, int32_t> &b) {
|
||||
|
@ -696,7 +696,7 @@ Status BuildVocabConsumer::Start() {
|
|||
TensorRow row;
|
||||
RETURN_IF_NOT_OK(tree_adapter_->GetNext(&row));
|
||||
// The returned row would EOE which is an empty row
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(row.empty(), "The fetched row from BuildVocab should be an EOE.");
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(row.empty(), "BuildVocab: The fetched row from BuildVocab should be an EOE.");
|
||||
return Status::OK();
|
||||
}
|
||||
Status DatasetSizeGetter::GetDatasetSize(int64_t *size, bool estimate) {
|
||||
|
|
|
@ -220,7 +220,7 @@ Status DataSchema::ColumnOrderLoad(nlohmann::json column_tree, const std::vector
|
|||
// Find the column in the json document
|
||||
auto column_info = column_tree.find(common::SafeCStr(curr_col_name));
|
||||
if (column_info == column_tree.end()) {
|
||||
RETURN_STATUS_UNEXPECTED("Invalid data, failed to find column name: " + curr_col_name);
|
||||
RETURN_STATUS_UNEXPECTED("Invalid data, failed to find column name: " + curr_col_name + " in given json file.");
|
||||
}
|
||||
// At this point, columnInfo.value() is the subtree in the json document that contains
|
||||
// all of the data for a given column. This data will formulate our schema column.
|
||||
|
@ -238,7 +238,7 @@ Status DataSchema::ColumnOrderLoad(nlohmann::json column_tree, const std::vector
|
|||
for (const auto &it_child : column_tree.items()) {
|
||||
auto name = it_child.value().find("name");
|
||||
if (name == it_child.value().end()) {
|
||||
RETURN_STATUS_UNEXPECTED("Name field is missing for this column.");
|
||||
RETURN_STATUS_UNEXPECTED("Invalid data, \"name\" field is missing for column: " + curr_col_name);
|
||||
}
|
||||
if (name.value() == curr_col_name) {
|
||||
index = i;
|
||||
|
@ -247,7 +247,7 @@ Status DataSchema::ColumnOrderLoad(nlohmann::json column_tree, const std::vector
|
|||
i++;
|
||||
}
|
||||
if (index == -1) {
|
||||
RETURN_STATUS_UNEXPECTED("Invalid data, failed to find column name: " + curr_col_name);
|
||||
RETURN_STATUS_UNEXPECTED("Invalid data, failed to find column name: " + curr_col_name + " in given json file.");
|
||||
}
|
||||
nlohmann::json column_child_tree = column_tree[index];
|
||||
RETURN_IF_NOT_OK(ColumnLoad(column_child_tree, curr_col_name));
|
||||
|
@ -259,7 +259,7 @@ Status DataSchema::ColumnOrderLoad(nlohmann::json column_tree, const std::vector
|
|||
// Internal helper function for parsing shape info and building a vector for the shape construction.
|
||||
static Status BuildShape(const nlohmann::json &shapeVal, std::vector<dsize_t> *outShape) {
|
||||
if (outShape == nullptr) {
|
||||
RETURN_STATUS_UNEXPECTED("null output shape");
|
||||
RETURN_STATUS_UNEXPECTED("outShape can not be nullptr.");
|
||||
}
|
||||
if (shapeVal.empty()) return Status::OK();
|
||||
|
||||
|
@ -294,19 +294,21 @@ Status DataSchema::ColumnLoad(nlohmann::json column_child_tree, const std::strin
|
|||
shape_field_exists = true;
|
||||
RETURN_IF_NOT_OK(BuildShape(it_child.value(), &tmp_shape));
|
||||
} else {
|
||||
std::string err_msg = "Unexpected column attribute " + it_child.key() + " for column " + col_name;
|
||||
std::string err_msg = "Invalid data, unexpected column attribute " + it_child.key() + " for column " + col_name +
|
||||
", expected attribute: name, type, rank, t_impl or shape.";
|
||||
RETURN_STATUS_UNEXPECTED(err_msg);
|
||||
}
|
||||
}
|
||||
if (!name.empty()) {
|
||||
if (!col_name.empty() && col_name != name) {
|
||||
std::string err_msg =
|
||||
"json schema file for column " + col_name + " has column name that does not match columnsToLoad";
|
||||
"Invalid data, json schema file for column " + col_name + " has column name that does not match columnsToLoad";
|
||||
RETURN_STATUS_UNEXPECTED(err_msg);
|
||||
}
|
||||
} else {
|
||||
if (col_name.empty()) {
|
||||
std::string err_msg = "json schema file for column " + col_name + " has invalid or missing column name.";
|
||||
std::string err_msg =
|
||||
"Invalid data, json schema file for column " + col_name + " has invalid or missing column name.";
|
||||
RETURN_STATUS_UNEXPECTED(err_msg);
|
||||
} else {
|
||||
name = col_name;
|
||||
|
@ -315,12 +317,12 @@ Status DataSchema::ColumnLoad(nlohmann::json column_child_tree, const std::strin
|
|||
// data type is mandatory field
|
||||
if (type_str.empty())
|
||||
return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__,
|
||||
"json schema file for column " + col_name + " has invalid or missing column type.");
|
||||
"Invalid data, json schema file for column " + col_name + " has invalid or missing column type.");
|
||||
|
||||
// rank number is mandatory field
|
||||
if (rank_value <= -1)
|
||||
return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__,
|
||||
"json schema file for column " + col_name + " must define a positive rank value.");
|
||||
"Invalid data, json schema file for column " + col_name + " must define a positive rank value.");
|
||||
|
||||
// Create the column descriptor for this column from the data we pulled from the json file
|
||||
TensorShape col_shape = TensorShape(tmp_shape);
|
||||
|
@ -347,12 +349,12 @@ Status DataSchema::LoadSchemaFile(const std::string &schema_file_path,
|
|||
num_rows_ = 0;
|
||||
} catch (nlohmann::json::exception &e) {
|
||||
in.close();
|
||||
RETURN_STATUS_UNEXPECTED("Unable to parse \"numRows\" from schema");
|
||||
RETURN_STATUS_UNEXPECTED("Invalid data, unable to parse \"numRows\" from schema file: " + schema_file_path);
|
||||
}
|
||||
nlohmann::json column_tree = js.at("columns");
|
||||
if (column_tree.empty()) {
|
||||
in.close();
|
||||
RETURN_STATUS_UNEXPECTED("columns is null");
|
||||
RETURN_STATUS_UNEXPECTED("Invalid data, \"columns\" field is missing in schema file: " + schema_file_path);
|
||||
}
|
||||
if (columns_to_load.empty()) {
|
||||
// Parse the json tree and load the schema's columns in whatever order that the json
|
||||
|
@ -372,7 +374,7 @@ Status DataSchema::LoadSchemaFile(const std::string &schema_file_path,
|
|||
in.close();
|
||||
} catch (const std::exception &err) {
|
||||
// Catch any exception and convert to Status return code
|
||||
RETURN_STATUS_UNEXPECTED("Schema file failed to load with JSON tools. File is: " + schema_file_path);
|
||||
RETURN_STATUS_UNEXPECTED("Invalid file, failed to load and parse schema file: " + schema_file_path);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -386,7 +388,7 @@ Status DataSchema::LoadSchemaString(const std::string &schema_json_string,
|
|||
num_rows_ = js.value("numRows", 0);
|
||||
nlohmann::json column_tree = js.at("columns");
|
||||
if (column_tree.empty()) {
|
||||
RETURN_STATUS_UNEXPECTED("columns is null");
|
||||
RETURN_STATUS_UNEXPECTED("Invalid data, \"columns\" field is missing in schema string.");
|
||||
}
|
||||
if (columns_to_load.empty()) {
|
||||
// Parse the json tree and load the schema's columns in whatever order that the json
|
||||
|
@ -397,7 +399,7 @@ Status DataSchema::LoadSchemaString(const std::string &schema_json_string,
|
|||
}
|
||||
} catch (const std::exception &err) {
|
||||
// Catch any exception and convert to Status return code
|
||||
RETURN_STATUS_UNEXPECTED("Schema file failed to load");
|
||||
RETURN_STATUS_UNEXPECTED("Invalid data, failed to load and parse schema string.");
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -439,7 +441,7 @@ Status DataSchema::PreLoadExceptionCheck(const nlohmann::json &js) {
|
|||
// Check if columns node exists. It is required for building schema from file.
|
||||
if (js.find("columns") == js.end())
|
||||
return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__,
|
||||
"\"columns\" node is required in the schema json file.");
|
||||
"Invalid data, \"columns\" node is required in the schema json file.");
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
|
|
|
@ -167,7 +167,7 @@ Status ExecutionTree::Launch() {
|
|||
#endif
|
||||
int32_t thread_num = get_nprocs();
|
||||
if (thread_num == 0) {
|
||||
std::string err_msg = "Invalid thread number.";
|
||||
std::string err_msg = "Invalid thread number, got 0.";
|
||||
RETURN_STATUS_UNEXPECTED(err_msg);
|
||||
}
|
||||
constexpr int32_t max_cv_threads_cnt = 8;
|
||||
|
|
|
@ -79,11 +79,11 @@ Status ProfilingManager::Initialize() {
|
|||
// Register nodes based on config
|
||||
std::string dir = common::GetEnv("MINDDATA_PROFILING_DIR");
|
||||
if (dir.empty()) {
|
||||
RETURN_STATUS_UNEXPECTED("Profiling dir is not set.");
|
||||
RETURN_STATUS_UNEXPECTED("Invalid parameter, Profiling directory is not set.");
|
||||
}
|
||||
char real_path[PATH_MAX] = {0};
|
||||
if (dir.size() >= PATH_MAX) {
|
||||
RETURN_STATUS_UNEXPECTED("Profiling dir is invalid.");
|
||||
RETURN_STATUS_UNEXPECTED("Invalid file, Profiling directory is invalid.");
|
||||
}
|
||||
#if defined(_WIN32) || defined(_WIN64)
|
||||
if (_fullpath(real_path, common::SafeCStr(dir), PATH_MAX) == nullptr) {
|
||||
|
@ -91,7 +91,7 @@ Status ProfilingManager::Initialize() {
|
|||
}
|
||||
#else
|
||||
if (realpath(common::SafeCStr(dir), real_path) == nullptr) {
|
||||
RETURN_STATUS_UNEXPECTED("Profiling dir is invalid.");
|
||||
RETURN_STATUS_UNEXPECTED("Invalid file, can not get realpath of Profiling directory.");
|
||||
}
|
||||
#endif
|
||||
dir_path_ = real_path;
|
||||
|
|
|
@ -19,16 +19,16 @@
|
|||
|
||||
namespace mindspore::dataset {
|
||||
Status PythonRuntimeContext::Terminate() {
|
||||
MS_LOG(INFO) << "Terminating a PythonRuntime";
|
||||
MS_LOG(INFO) << "Terminating a Dataset PythonRuntime.";
|
||||
if (tree_consumer_ != nullptr) {
|
||||
return TerminateImpl();
|
||||
}
|
||||
MS_LOG(WARNING) << "TreeConsumer was not initialized";
|
||||
MS_LOG(WARNING) << "Dataset TreeConsumer was not initialized.";
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status PythonRuntimeContext::TerminateImpl() {
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(tree_consumer_ != nullptr, " Tree Consumer is not initialized");
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(tree_consumer_ != nullptr, "Dataset TreeConsumer is not initialized.");
|
||||
// Release GIL before joining all threads
|
||||
py::gil_scoped_release gil_release;
|
||||
return tree_consumer_->Terminate();
|
||||
|
|
|
@ -22,16 +22,16 @@ void RuntimeContext::AssignConsumer(std::shared_ptr<TreeConsumer> tree_consumer)
|
|||
tree_consumer_ = std::move(tree_consumer);
|
||||
}
|
||||
Status NativeRuntimeContext::Terminate() {
|
||||
MS_LOG(INFO) << "Terminating a NativeRuntime.";
|
||||
MS_LOG(INFO) << "Terminating a Dataset NativeRuntime.";
|
||||
if (tree_consumer_ != nullptr) {
|
||||
return TerminateImpl();
|
||||
}
|
||||
MS_LOG(WARNING) << "TreeConsumer was not initialized.";
|
||||
MS_LOG(WARNING) << "Dataset TreeConsumer was not initialized.";
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status NativeRuntimeContext::TerminateImpl() {
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(tree_consumer_ != nullptr, " TreeConsumer is not initialized.");
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(tree_consumer_ != nullptr, "Dataset TreeConsumer is not initialized.");
|
||||
return tree_consumer_->Terminate();
|
||||
}
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ Status Serdes::SaveJSONToFile(nlohmann::json json_string, const std::string &fil
|
|||
|
||||
ChangeFileMode(whole_path.value(), S_IRUSR | S_IWUSR);
|
||||
} catch (const std::exception &err) {
|
||||
RETURN_STATUS_UNEXPECTED("Save json string into " + file_name + " failed!");
|
||||
RETURN_STATUS_UNEXPECTED("Invalid data, failed to save json string into file: " + file_name);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -87,11 +87,11 @@ Status Serdes::Deserialize(std::string json_filepath, std::shared_ptr<DatasetNod
|
|||
nlohmann::json json_obj;
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(json_filepath.size() != 0, "Json path is null");
|
||||
std::ifstream json_in(json_filepath);
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(json_in, "Json path is not valid");
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(json_in, "Invalid file, failed to open json file: " + json_filepath);
|
||||
try {
|
||||
json_in >> json_obj;
|
||||
} catch (const std::exception &e) {
|
||||
return Status(StatusCode::kMDSyntaxError, "Json object is not valid");
|
||||
return Status(StatusCode::kMDSyntaxError, "Invalid file, failed to parse json file: " + json_filepath);
|
||||
}
|
||||
RETURN_IF_NOT_OK(ConstructPipeline(json_obj, ds));
|
||||
return Status::OK();
|
||||
|
@ -123,7 +123,8 @@ Status Serdes::ConstructPipeline(nlohmann::json json_obj, std::shared_ptr<Datase
|
|||
CHECK_FAIL_RETURN_UNEXPECTED(datasets.size() > 1, "Should concat more than 1 dataset");
|
||||
RETURN_IF_NOT_OK(ConcatNode::from_json(json_obj, datasets, ds));
|
||||
} else {
|
||||
return Status(StatusCode::kMDUnexpectedError, "Operation is not supported");
|
||||
return Status(StatusCode::kMDUnexpectedError,
|
||||
"Invalid data, unsupported operation type: " + std::string(json_obj["op_type"]));
|
||||
}
|
||||
}
|
||||
return Status::OK();
|
||||
|
@ -131,7 +132,7 @@ Status Serdes::ConstructPipeline(nlohmann::json json_obj, std::shared_ptr<Datase
|
|||
|
||||
Status Serdes::CreateNode(std::shared_ptr<DatasetNode> child_ds, nlohmann::json json_obj,
|
||||
std::shared_ptr<DatasetNode> *ds) {
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(json_obj.find("op_type") != json_obj.end(), "Failed to find op_type");
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(json_obj.find("op_type") != json_obj.end(), "Failed to find op_type in json.");
|
||||
std::string op_type = json_obj["op_type"];
|
||||
if (child_ds == nullptr) {
|
||||
// if dataset doesn't have any child, then create a source dataset IR. e.g., ImageFolderNode, CocoNode
|
||||
|
@ -173,7 +174,7 @@ Status Serdes::CreateDatasetNode(nlohmann::json json_obj, std::string op_type, s
|
|||
} else if (op_type == kVOCNode) {
|
||||
RETURN_IF_NOT_OK(VOCNode::from_json(json_obj, ds));
|
||||
} else {
|
||||
return Status(StatusCode::kMDUnexpectedError, op_type + " is not supported");
|
||||
return Status(StatusCode::kMDUnexpectedError, "Invalid data, unsupported operation type: " + op_type);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -199,7 +200,7 @@ Status Serdes::CreateDatasetOperationNode(std::shared_ptr<DatasetNode> ds, nlohm
|
|||
} else if (op_type == kTakeNode) {
|
||||
RETURN_IF_NOT_OK(TakeNode::from_json(json_obj, ds, result));
|
||||
} else {
|
||||
return Status(StatusCode::kMDUnexpectedError, op_type + " operation is not supported");
|
||||
return Status(StatusCode::kMDUnexpectedError, "Invalid data, unsupported operation type: " + op_type);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -224,7 +225,7 @@ Status Serdes::ConstructSampler(nlohmann::json json_obj, std::shared_ptr<Sampler
|
|||
} else if (sampler_name == "WeightedRandomSampler") {
|
||||
RETURN_IF_NOT_OK(WeightedRandomSamplerObj::from_json(json_obj, num_samples, sampler));
|
||||
} else {
|
||||
return Status(StatusCode::kMDUnexpectedError, sampler_name + "Sampler is not supported");
|
||||
return Status(StatusCode::kMDUnexpectedError, "Invalid data, unsupported sampler type: " + sampler_name);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -235,6 +236,8 @@ Status Serdes::ConstructTensorOps(nlohmann::json json_obj, std::vector<std::shar
|
|||
if (item.find("python_module") != item.end()) {
|
||||
if (Py_IsInitialized()) {
|
||||
RETURN_IF_NOT_OK(PyFuncOp::from_json(item, result));
|
||||
} else {
|
||||
RETURN_STATUS_SYNTAX_ERROR("Python module is not initialized or Pyfunction is not supported on this platform.");
|
||||
}
|
||||
} else {
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(item.find("tensor_op_name") != item.end(), "Failed to find tensor_op_name");
|
||||
|
@ -242,7 +245,8 @@ Status Serdes::ConstructTensorOps(nlohmann::json json_obj, std::vector<std::shar
|
|||
std::string op_name = item["tensor_op_name"];
|
||||
nlohmann::json op_params = item["tensor_op_params"];
|
||||
std::shared_ptr<TensorOperation> operation = nullptr;
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(func_ptr_.find(op_name) != func_ptr_.end(), "Failed to find " + op_name);
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(func_ptr_.find(op_name) != func_ptr_.end(),
|
||||
"Invalid data, unsupported operation: " + op_name);
|
||||
RETURN_IF_NOT_OK(func_ptr_[op_name](op_params, &operation));
|
||||
output.push_back(operation);
|
||||
*result = output;
|
||||
|
|
|
@ -128,7 +128,7 @@ Status TreeAdapter::BuildExecutionTreeRecur(std::shared_ptr<DatasetNode> ir, std
|
|||
std::vector<std::shared_ptr<DatasetOp>> ops;
|
||||
RETURN_IF_NOT_OK(ir->Build(&ops));
|
||||
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(!ops.empty(), "Unable to build node.");
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(!ops.empty(), "Unable to build node: " + ir->Name());
|
||||
|
||||
(*op) = ops.front(); // return the first op to be added as child by the caller of this function
|
||||
RETURN_IF_NOT_OK(tree_->AssociateNode(*op));
|
||||
|
|
|
@ -28,7 +28,7 @@ Status TreeAdapterLite::BuildExecutionTreeRecur(std::shared_ptr<DatasetNode> ir,
|
|||
std::vector<std::shared_ptr<DatasetOp>> ops;
|
||||
RETURN_IF_NOT_OK(ir->Build(&ops));
|
||||
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(!ops.empty(), "Unable to build node.");
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(!ops.empty(), "Unable to build node: " + ir->Name());
|
||||
|
||||
(*op) = ops.front(); // return the first op to be added as child by the caller of this function
|
||||
|
||||
|
|
|
@ -460,7 +460,7 @@ def test_serdes_exception():
|
|||
with pytest.raises(RuntimeError) as msg:
|
||||
data2 = ds.deserialize(input_dict=data1_json)
|
||||
ds.serialize(data2, "filter_dataset_fail.json")
|
||||
assert "Filter operation is not supported" in str(msg)
|
||||
assert "Invalid data, unsupported operation type: Filter" in str(msg)
|
||||
delete_json_files()
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue