diff --git a/include/api/status.h b/include/api/status.h index ca425936aa1..c949059d34b 100644 --- a/include/api/status.h +++ b/include/api/status.h @@ -118,8 +118,10 @@ class MS_API Status { inline std::string ToString() const; int GetLineOfCode() const; + inline std::string GetFileName() const; inline std::string GetErrDescription() const; inline std::string SetErrDescription(const std::string &err_description); + inline void SetStatusMsg(const std::string &status_msg); MS_API friend std::ostream &operator<<(std::ostream &os, const Status &s); @@ -144,8 +146,10 @@ class MS_API Status { Status(enum StatusCode status_code, const std::vector &status_msg); Status(enum StatusCode code, int line_of_code, const char *file_name, const std::vector &extra); std::vector ToCString() const; + std::vector GetFileNameChar() const; std::vector GetErrDescriptionChar() const; std::vector SetErrDescription(const std::vector &err_description); + void SetStatusMsgChar(const std::vector &status_msg); static std::vector CodeAsCString(enum StatusCode c); struct Data; @@ -157,10 +161,12 @@ Status::Status(enum StatusCode status_code, const std::string &status_msg) Status::Status(const enum StatusCode code, int line_of_code, const char *file_name, const std::string &extra) : Status(code, line_of_code, file_name, StringToChar(extra)) {} std::string Status::ToString() const { return CharToString(ToCString()); } +std::string Status::GetFileName() const { return CharToString(GetFileNameChar()); } std::string Status::GetErrDescription() const { return CharToString(GetErrDescriptionChar()); } std::string Status::SetErrDescription(const std::string &err_description) { return CharToString(SetErrDescription(StringToChar(err_description))); } +void Status::SetStatusMsg(const std::string &status_msg) { SetStatusMsgChar(StringToChar(status_msg)); } std::string Status::CodeAsString(enum StatusCode c) { return CharToString(CodeAsCString(c)); } } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_STATUS_H diff --git a/mindspore/ccsrc/minddata/dataset/CMakeLists.txt b/mindspore/ccsrc/minddata/dataset/CMakeLists.txt index 89588dc4e6f..55d98e10311 100644 --- a/mindspore/ccsrc/minddata/dataset/CMakeLists.txt +++ b/mindspore/ccsrc/minddata/dataset/CMakeLists.txt @@ -76,6 +76,11 @@ else() endif() endif() +set(MINDDATA_LOG_ADAPTER_SRC ${CMAKE_SOURCE_DIR}/mindspore/ccsrc/minddata/dataset/util/md_log_adapter.cc) +add_library(md_log_adapter_obj OBJECT ${MINDDATA_LOG_ADAPTER_SRC}) +add_library(md_log_adapter STATIC $) +target_link_libraries(md_log_adapter mindspore_core) + ################## Include sub-modules ############################### add_subdirectory(util) add_subdirectory(core) @@ -200,6 +205,8 @@ endif() set(dataengine_submodules ${dataengine_submodules} CACHE INTERNAL "_c_dataengine objects") add_library(_c_dataengine SHARED ${dataengine_submodules}) +add_dependencies(_c_dataengine md_log_adapter) +target_link_libraries(_c_dataengine PRIVATE md_log_adapter) if(ENABLE_PYTHON) set_target_properties(_c_dataengine PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" diff --git a/mindspore/ccsrc/minddata/dataset/api/python/pybind_register.h b/mindspore/ccsrc/minddata/dataset/api/python/pybind_register.h index 35f79fd166d..70cb95face1 100644 --- a/mindspore/ccsrc/minddata/dataset/api/python/pybind_register.h +++ b/mindspore/ccsrc/minddata/dataset/api/python/pybind_register.h @@ -25,15 +25,16 @@ #include "pybind11/pybind11.h" #include "pybind11/stl.h" +#include "minddata/dataset/util/md_log_adapter.h" namespace py = pybind11; namespace mindspore { namespace dataset { -#define THROW_IF_ERROR(s) \ - do { \ - Status rc = std::move(s); \ - if (rc.IsError()) throw std::runtime_error(rc.ToString()); \ +#define THROW_IF_ERROR(s) \ + do { \ + Status rc = std::move(s); \ + if (rc.IsError()) throw std::runtime_error(MDLogAdapter::Apply(&rc).ToString()); \ } while (false) using PybindDefineFunc = std::function; diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/map_op/cpu_map_job.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/map_op/cpu_map_job.cc index e06b55b6669..c3665a3188e 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/map_op/cpu_map_job.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/map_op/cpu_map_job.cc @@ -62,7 +62,7 @@ Status CpuMapJob::RebuildMapErrorMsg(const TensorRow &input_row, const size_t &i std::string abbr_op_name = op_name.substr(0, op_name.length() - 2); err_msg += "map operation: [" + abbr_op_name + "] failed. "; if (input_row.getPath().size() > 0 && !input_row.getPath()[0].empty()) { - err_msg += "The corresponding data files: " + input_row.getPath()[0]; + err_msg += "The corresponding data file is: " + input_row.getPath()[0]; if (input_row.getPath().size() > 1) { std::set path_set; path_set.insert(input_row.getPath()[0]); @@ -80,6 +80,9 @@ Status CpuMapJob::RebuildMapErrorMsg(const TensorRow &input_row, const size_t &i err_msg += "Error description:\n"; } err_msg += tensor_err_msg; + if (abbr_op_name == "PyFunc") { + RETURN_STATUS_ERROR(StatusCode::kMDPyFuncException, err_msg); + } rc->SetErrDescription(err_msg); return *rc; } diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/generator_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/generator_op.cc index 2d7352a0ec1..da5a1ce04ea 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/generator_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/generator_op.cc @@ -201,12 +201,29 @@ Status GeneratorOp::operator()() { generator_counter_++; } catch (py::error_already_set &e) { eoe = e.matches(PyExc_StopIteration); - // Restore exception to python - e.restore(); // Pop up non StopIteration Python Exception if (!eoe) { - RETURN_STATUS_ERROR(StatusCode::kMDPyFuncException, e.what()); + std::string traceback; + try { + // Construct python-like traceback + py::list tb = py::module::import("traceback").attr("format_tb")(e.trace()); + traceback = "Traceback (most recent call last):\n"; + for (auto t : tb) { + traceback += py::reinterpret_borrow(t); + } + traceback += e.what(); + } catch (std::exception &) { + // Back to original exception + traceback = e.what(); + } + + // Restore exception to python + e.restore(); + RETURN_STATUS_ERROR(StatusCode::kMDPyFuncException, traceback); } + + // Restore exception to python + e.restore(); if (num_rows_sampled != -1 && num_rows_sampled != generator_counter_) { if (generator_counter_ == 0) { std::string msg = diff --git a/mindspore/ccsrc/minddata/dataset/util/CMakeLists.txt b/mindspore/ccsrc/minddata/dataset/util/CMakeLists.txt index 5a679c45e35..6f7820c3643 100644 --- a/mindspore/ccsrc/minddata/dataset/util/CMakeLists.txt +++ b/mindspore/ccsrc/minddata/dataset/util/CMakeLists.txt @@ -1,3 +1,4 @@ file(GLOB_RECURSE _CURRENT_SRC_FILES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "*.cc") +list(REMOVE_ITEM _CURRENT_SRC_FILES "md_log_adapter.cc") set_property(SOURCE ${_CURRENT_SRC_FILES} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_MD) add_library(utils OBJECT ${_CURRENT_SRC_FILES}) diff --git a/mindspore/ccsrc/minddata/dataset/util/md_log_adapter.cc b/mindspore/ccsrc/minddata/dataset/util/md_log_adapter.cc new file mode 100644 index 00000000000..753ce0bf1fa --- /dev/null +++ b/mindspore/ccsrc/minddata/dataset/util/md_log_adapter.cc @@ -0,0 +1,78 @@ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "minddata/dataset/util/md_log_adapter.h" +#include "minddata/dataset/util/status.h" + +#include + +namespace mindspore { +namespace dataset { +Status MDLogAdapter::Apply(Status *rc) { + std::string status_msg = ConstructMsg(rc->StatusCode(), rc->CodeAsString(rc->StatusCode()), "", rc->GetLineOfCode(), + rc->GetFileName(), rc->GetErrDescription()); + rc->SetStatusMsg(status_msg); + return *rc; +} + +std::string MDLogAdapter::ConstructMsg(const enum StatusCode &status_code, const std::string &code_as_string, + const std::string &status_msg, const int line_of_code, + const std::string &file_name, const std::string &err_description) { + std::ostringstream ss; + std::string kSplitLine = std::string(66, '-') + "\n"; + std::string err_ori = err_description; + + /// Python Runtime Error + ss << code_as_string << ". \n\n"; + + /// Python Stack + std::string user_err; + std::string user_stack; + if (status_code == StatusCode::kMDPyFuncException) { + std::string at_stack = "\n\nAt:\n"; + if (err_ori.find(at_stack) != std::string::npos) { + user_stack = err_ori.substr(0, err_ori.find(at_stack)); + user_err = "Execute user Python code failed, check 'Python Call Stack' above."; + ss << kSplitLine << "- Python Call Stack: \n" << kSplitLine; + ss << user_stack << "\n\n"; + } else { + user_err = err_ori; + } + } + + /// Summary Message + ss << kSplitLine << "- Dataset Pipeline Error Message: \n" << kSplitLine; + if (!user_err.empty()) { + ss << "[ERROR] " + user_err + "\n\n"; + } else { + user_err = err_description; + if (*user_err.rbegin() != '.') { + user_err += '.'; + } + ss << "[ERROR] " + user_err + "\n\n"; + } + + /// C++ Stack + if (!file_name.empty()) { + ss << kSplitLine << "- C++ Call Stack: (For framework developers) \n" << kSplitLine; + std::string cpp_trace = std::string(file_name) + "(" + std::to_string(line_of_code) + ").\n"; + ss << cpp_trace << "\n\n"; + } + + return ss.str(); +} +} // namespace dataset +} // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/util/md_log_adapter.h b/mindspore/ccsrc/minddata/dataset/util/md_log_adapter.h new file mode 100644 index 00000000000..d93ecd782e0 --- /dev/null +++ b/mindspore/ccsrc/minddata/dataset/util/md_log_adapter.h @@ -0,0 +1,41 @@ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_MD_LOG_ADAPTER_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_MD_LOG_ADAPTER_H_ + +#include +#include +#include + +#include "include/api/status.h" + +namespace mindspore { +namespace dataset { +class MDLogAdapter { + public: + MDLogAdapter() = default; + + ~MDLogAdapter() = default; + + static Status Apply(Status *rc); + + static std::string ConstructMsg(const enum StatusCode &status_code, const std::string &code_as_string, + const std::string &status_msg, const int line_of_code, const std::string &file_name, + const std::string &err_description); +}; +} // namespace dataset +} // namespace mindspore +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_MD_LOG_ADAPTER_H diff --git a/mindspore/ccsrc/minddata/dataset/util/task_manager.cc b/mindspore/ccsrc/minddata/dataset/util/task_manager.cc index 8161bf4d3f0..ccf768facbf 100644 --- a/mindspore/ccsrc/minddata/dataset/util/task_manager.cc +++ b/mindspore/ccsrc/minddata/dataset/util/task_manager.cc @@ -218,8 +218,10 @@ void TaskManager::InterruptMaster(const Status &rc) { master->caught_severe_exception_ = true; // Move log error here for some scenarios didn't call GetMasterThreadRc if (master->rc_.StatusCode() != mindspore::StatusCode::kMDPyFuncException) { +#ifndef ENABLE_PYTHON // use python operation, the error had been raised in python layer. So disable log prompt here. MS_LOG(ERROR) << "Task is terminated with err msg (more details are in info level logs): " << master->rc_; +#endif } } } diff --git a/mindspore/ccsrc/minddata/mindrecord/CMakeLists.txt b/mindspore/ccsrc/minddata/mindrecord/CMakeLists.txt index 2253146fc76..d58570137e6 100644 --- a/mindspore/ccsrc/minddata/mindrecord/CMakeLists.txt +++ b/mindspore/ccsrc/minddata/mindrecord/CMakeLists.txt @@ -34,6 +34,7 @@ endif() set_property(SOURCE ${DIR_LIB_SRCS} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_MD) add_library(mindrecord_obj OBJECT ${DIR_LIB_SRCS}) add_library(_c_mindrecord SHARED $) +add_dependencies(_c_mindrecord md_log_adapter) set_target_properties(_c_mindrecord PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" @@ -48,6 +49,7 @@ else() mindspore::protobuf) endif() target_link_libraries(_c_mindrecord PRIVATE mindspore_core) +target_link_libraries(_c_mindrecord PRIVATE md_log_adapter) if(USE_GLOG) target_link_libraries(_c_mindrecord PRIVATE mindspore::glog) else() diff --git a/mindspore/ccsrc/minddata/mindrecord/common/shard_pybind.cc b/mindspore/ccsrc/minddata/mindrecord/common/shard_pybind.cc index 3485a0228bc..8bd2ddf28fd 100644 --- a/mindspore/ccsrc/minddata/mindrecord/common/shard_pybind.cc +++ b/mindspore/ccsrc/minddata/mindrecord/common/shard_pybind.cc @@ -18,6 +18,7 @@ #include #include "utils/ms_utils.h" +#include "minddata/dataset/util/md_log_adapter.h" #include "minddata/mindrecord/include/common/log_adapter.h" #include "minddata/mindrecord/include/common/shard_utils.h" #include "minddata/mindrecord/include/shard_error.h" @@ -30,13 +31,14 @@ #include "pybind11/stl.h" namespace py = pybind11; +using mindspore::dataset::MDLogAdapter; namespace mindspore { namespace mindrecord { -#define THROW_IF_ERROR(s) \ - do { \ - Status rc = std::move(s); \ - if (rc.IsError()) throw std::runtime_error(rc.ToString()); \ +#define THROW_IF_ERROR(s) \ + do { \ + Status rc = std::move(s); \ + if (rc.IsError()) throw std::runtime_error(MDLogAdapter::Apply(&rc).ToString()); \ } while (false) void BindSchema(py::module *m) { diff --git a/mindspore/core/utils/status.cc b/mindspore/core/utils/status.cc index 10f85a9efac..4ddedecc42f 100644 --- a/mindspore/core/utils/status.cc +++ b/mindspore/core/utils/status.cc @@ -26,6 +26,7 @@ #endif #include #include +#include namespace mindspore { struct Status::Data { @@ -45,7 +46,7 @@ static std::map status_info_map = { {kMDShapeMisMatch, "Shape is incorrect"}, {kMDInterrupted, "Interrupted system call"}, {kMDNoSpace, "No space left on device"}, - {kMDPyFuncException, "Exception thrown from PyFunc"}, + {kMDPyFuncException, "Exception thrown from user defined Python function in dataset"}, {kMDDuplicateKey, "Duplicate key"}, {kMDPythonInterpreterFailure, ""}, {kMDTDTPushFailure, "Unexpected error"}, @@ -58,7 +59,7 @@ static std::map status_info_map = { {kMDBuddySpaceFull, "BuddySpace full"}, {kMDNetWorkError, "Network error"}, {kMDNotImplementedYet, "Unexpected error"}, - {kMDUnexpectedError, "Unexpected error"}, + {kMDUnexpectedError, "Exception thrown from dataset pipeline. Refer to 'Dataset Pipeline Error Message'"}, // ME {kMEFailed, "Common error code."}, {kMEInvalidInput, "Invalid input."}, @@ -157,6 +158,13 @@ int Status::GetLineOfCode() const { return data_->line_of_code; } +std::vector Status::GetFileNameChar() const { + if (data_ == nullptr) { + return std::vector(); + } + return StringToChar(data_->file_name); +} + std::vector Status::GetErrDescriptionChar() const { if (data_ == nullptr) { return std::vector(); @@ -204,6 +212,13 @@ std::vector Status::SetErrDescription(const std::vector &err_descrip return StringToChar(data_->status_msg); } +void Status::SetStatusMsgChar(const std::vector &status_msg) { + if (data_ == nullptr) { + return; + } + data_->status_msg = CharToString(status_msg); +} + bool Status::operator==(const Status &other) const { if (data_ == nullptr && other.data_ == nullptr) { return true; diff --git a/mindspore/lite/test/config_level0/cropped_size.cfg b/mindspore/lite/test/config_level0/cropped_size.cfg index b8db76d65a9..9e9224c5ab9 100644 --- a/mindspore/lite/test/config_level0/cropped_size.cfg +++ b/mindspore/lite/test/config_level0/cropped_size.cfg @@ -1,2 +1,2 @@ Note: This is the mindspore Lite inference framework size threshold. Offline review is required before modify this value!!! -1097000 +1100096 diff --git a/mindspore/lite/test/config_level1/cropped_size.cfg b/mindspore/lite/test/config_level1/cropped_size.cfg index 9867d0d0969..122131dd8a0 100644 --- a/mindspore/lite/test/config_level1/cropped_size.cfg +++ b/mindspore/lite/test/config_level1/cropped_size.cfg @@ -1,2 +1,2 @@ Note: This is the mindspore Lite inference framework size threshold. Modifying this threshold requires meeting review. -1097000 +1100096 diff --git a/mindspore/python/mindspore/dataset/core/py_util_helpers.py b/mindspore/python/mindspore/dataset/core/py_util_helpers.py index 82858f0e3d5..f1406d6cee2 100644 --- a/mindspore/python/mindspore/dataset/core/py_util_helpers.py +++ b/mindspore/python/mindspore/dataset/core/py_util_helpers.py @@ -53,9 +53,10 @@ class ExceptionHandler: def reraise(self): """Reraise the caught exception in the main thread/process""" - # Error message like: "Caught ValueError in GeneratorDataset worker process. Original Traceback:". - err_msg = "Caught {} {}.\nOriginal {}".format( - self.except_type.__name__, self.where, self.except_msg) + # Find the last traceback which is more useful to user. + index = [i for i in range(len(self.except_msg)) if self.except_msg.startswith('Traceback', i)] + err_msg = "{}".format(self.except_msg[index[-1]:]).strip() + if self.except_type == KeyError: # As KeyError will call its repr() function automatically, which makes stack info hard to read. err_msg = KeyErrorParse(err_msg) diff --git a/mindspore/python/mindspore/dataset/core/validator_helpers.py b/mindspore/python/mindspore/dataset/core/validator_helpers.py index 28998c19fe3..a080a6f66c0 100644 --- a/mindspore/python/mindspore/dataset/core/validator_helpers.py +++ b/mindspore/python/mindspore/dataset/core/validator_helpers.py @@ -760,7 +760,7 @@ def check_tensor_op(param, param_name): def check_c_tensor_op(param, param_name): """check whether param is a tensor op or a callable Python function but not a py_transform""" if callable(param) and str(param).find("py_transform") >= 0: - raise TypeError("{0} is a py_transform op which is not allow to use.".format(param_name)) + raise TypeError("{0} is a py_transform op which is not allowed to use.".format(param_name)) if not isinstance(param, cde.TensorOp) and not callable(param) and not getattr(param, 'parse', None): raise TypeError("{0} is neither a c_transform op (TensorOperation) nor a callable pyfunc.".format(param_name)) diff --git a/mindspore/python/mindspore/mindrecord/filewriter.py b/mindspore/python/mindspore/mindrecord/filewriter.py index dd88e79bfdc..5745926713b 100644 --- a/mindspore/python/mindspore/mindrecord/filewriter.py +++ b/mindspore/python/mindspore/mindrecord/filewriter.py @@ -266,7 +266,7 @@ class FileWriter: if not isinstance(raw_data, list): raise ParamTypeError('raw_data', 'list') if self._flush and not self._append: - raise RuntimeError("Unexpected error. Not allow to call `write_raw_data` on flushed MindRecord files." \ + raise RuntimeError("Not allowed to call `write_raw_data` on flushed MindRecord files." \ "When creating new Mindrecord files, please remove `commit` before `write_raw_data`." \ "In other cases, when appending to existing MindRecord files, " \ "please call `open_for_append` first and then `write_raw_data`.") diff --git a/tests/ut/cpp/CMakeLists.txt b/tests/ut/cpp/CMakeLists.txt index ae52e4f13e7..71c1beab724 100644 --- a/tests/ut/cpp/CMakeLists.txt +++ b/tests/ut/cpp/CMakeLists.txt @@ -282,7 +282,8 @@ set(ut_objects ${CORE_OBJECT_LIST} $ $) if(ENABLE_MINDDATA) - set(ut_objects ${ut_objects} ${dataengine_submodules} $) + set(ut_objects ${ut_objects} ${dataengine_submodules} $ + $) endif() add_executable(ut_tests ${ut_objects}) diff --git a/tests/ut/python/dataset/test_autotune.py b/tests/ut/python/dataset/test_autotune.py index 49bf43f74fb..d783208b1f3 100644 --- a/tests/ut/python/dataset/test_autotune.py +++ b/tests/ut/python/dataset/test_autotune.py @@ -184,7 +184,7 @@ class TestAutotuneWithProfiler: with pytest.raises(RuntimeError) as excinfo: md_profiler.init() - assert "Unexpected error. Stop MD Autotune before initializing the MD Profiler." in str(excinfo.value) + assert "Stop MD Autotune before initializing the MD Profiler." in str(excinfo.value) @staticmethod def test_autotune_simple_pipeline(): diff --git a/tests/ut/python/dataset/test_cache_map.py b/tests/ut/python/dataset/test_cache_map.py index a17fa0ad509..9882ec59965 100644 --- a/tests/ut/python/dataset/test_cache_map.py +++ b/tests/ut/python/dataset/test_cache_map.py @@ -648,7 +648,7 @@ def test_cache_map_failure11(): num_iter = 0 for _ in ds1.create_dict_iterator(num_epochs=1): num_iter += 1 - assert "Unexpected error. Server is not set up with spill support" in str( + assert "Server is not set up with spill support" in str( e.value) assert num_iter == 0 diff --git a/tests/ut/python/dataset/test_datasets_ag_news.py b/tests/ut/python/dataset/test_datasets_ag_news.py index ee58b375de7..f6db27b39cf 100644 --- a/tests/ut/python/dataset/test_datasets_ag_news.py +++ b/tests/ut/python/dataset/test_datasets_ag_news.py @@ -131,7 +131,7 @@ def test_ag_news_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.AGNewsDataset(FILE_DIR, usage='test', shuffle=False) @@ -140,7 +140,7 @@ def test_ag_news_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.AGNewsDataset(FILE_DIR, usage='test', shuffle=False) @@ -149,7 +149,7 @@ def test_ag_news_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) if __name__ == "__main__": diff --git a/tests/ut/python/dataset/test_datasets_amazon_review.py b/tests/ut/python/dataset/test_datasets_amazon_review.py index c8f29645cf5..7abd45b2791 100644 --- a/tests/ut/python/dataset/test_datasets_amazon_review.py +++ b/tests/ut/python/dataset/test_datasets_amazon_review.py @@ -165,7 +165,7 @@ def test_amazon_review_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.AmazonReviewDataset(FULL_DIR, usage='test', shuffle=False) @@ -174,7 +174,7 @@ def test_amazon_review_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.AmazonReviewDataset(FULL_DIR, usage='test', shuffle=False) @@ -183,7 +183,7 @@ def test_amazon_review_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) def test_amazon_review_dataset_pipeline(): diff --git a/tests/ut/python/dataset/test_datasets_celeba.py b/tests/ut/python/dataset/test_datasets_celeba.py index 925316747ab..008f3af92cb 100644 --- a/tests/ut/python/dataset/test_datasets_celeba.py +++ b/tests/ut/python/dataset/test_datasets_celeba.py @@ -158,7 +158,7 @@ def test_celeba_dataset_exception_file_path(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.CelebADataset(DATA_DIR, shuffle=False) @@ -168,7 +168,7 @@ def test_celeba_dataset_exception_file_path(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.CelebADataset(DATA_DIR, shuffle=False) @@ -177,7 +177,7 @@ def test_celeba_dataset_exception_file_path(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) def test_celeba_sampler_exception(): diff --git a/tests/ut/python/dataset/test_datasets_cifarop.py b/tests/ut/python/dataset/test_datasets_cifarop.py index 1524fcd8c48..01fe3fc3e90 100644 --- a/tests/ut/python/dataset/test_datasets_cifarop.py +++ b/tests/ut/python/dataset/test_datasets_cifarop.py @@ -487,7 +487,7 @@ def test_cifar_exception_file_path(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.Cifar10Dataset(DATA_DIR_10) @@ -497,7 +497,7 @@ def test_cifar_exception_file_path(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.Cifar100Dataset(DATA_DIR_100) @@ -507,7 +507,7 @@ def test_cifar_exception_file_path(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.Cifar100Dataset(DATA_DIR_100) @@ -517,7 +517,7 @@ def test_cifar_exception_file_path(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.Cifar100Dataset(DATA_DIR_100) @@ -527,7 +527,7 @@ def test_cifar_exception_file_path(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) def test_cifar10_pk_sampler_get_dataset_size(): diff --git a/tests/ut/python/dataset/test_datasets_cityscapes.py b/tests/ut/python/dataset/test_datasets_cityscapes.py index 970abd1fb2d..7dbbf0aff29 100644 --- a/tests/ut/python/dataset/test_datasets_cityscapes.py +++ b/tests/ut/python/dataset/test_datasets_cityscapes.py @@ -236,7 +236,7 @@ def test_cityscapes_exception(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files:" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is:" in str(e) try: data = ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task) @@ -246,7 +246,7 @@ def test_cityscapes_exception(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files:" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is:" in str(e) def test_cityscapes_param(): diff --git a/tests/ut/python/dataset/test_datasets_clue.py b/tests/ut/python/dataset/test_datasets_clue.py index 8f803bc06bc..6c4da1f6ec9 100644 --- a/tests/ut/python/dataset/test_datasets_clue.py +++ b/tests/ut/python/dataset/test_datasets_clue.py @@ -409,7 +409,7 @@ def test_clue_exception_file_path(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.CLUEDataset(train_file, task='AFQMC', usage='train') @@ -418,7 +418,7 @@ def test_clue_exception_file_path(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.CLUEDataset(train_file, task='AFQMC', usage='train') @@ -427,7 +427,7 @@ def test_clue_exception_file_path(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) if __name__ == "__main__": diff --git a/tests/ut/python/dataset/test_datasets_cmu_arctic.py b/tests/ut/python/dataset/test_datasets_cmu_arctic.py index f1635641bc0..4877524e8df 100644 --- a/tests/ut/python/dataset/test_datasets_cmu_arctic.py +++ b/tests/ut/python/dataset/test_datasets_cmu_arctic.py @@ -147,7 +147,7 @@ def test_cmu_arctic_exception(): def exception_func(item): raise Exception("Error occur!") - error_msg_8 = "The corresponding data files" + error_msg_8 = "The corresponding data file is" with pytest.raises(RuntimeError, match=error_msg_8): data = ds.CMUArcticDataset(DATA_DIR) data = data.map(operations=exception_func, input_columns=["waveform"], num_parallel_workers=1) diff --git a/tests/ut/python/dataset/test_datasets_coco.py b/tests/ut/python/dataset/test_datasets_coco.py index 67bd6bb4b4c..6e29d246629 100644 --- a/tests/ut/python/dataset/test_datasets_coco.py +++ b/tests/ut/python/dataset/test_datasets_coco.py @@ -429,7 +429,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=ANNOTATION_FILE, task="Detection") @@ -439,7 +439,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=ANNOTATION_FILE, task="Detection") @@ -448,7 +448,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=ANNOTATION_FILE, task="Detection") @@ -457,7 +457,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=ANNOTATION_FILE, task="Stuff") @@ -466,7 +466,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=ANNOTATION_FILE, task="Stuff") @@ -476,7 +476,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=ANNOTATION_FILE, task="Stuff") @@ -485,7 +485,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=ANNOTATION_FILE, task="Stuff") @@ -494,7 +494,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=KEYPOINT_FILE, task="Keypoint") @@ -503,7 +503,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=KEYPOINT_FILE, task="Keypoint") @@ -513,7 +513,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=KEYPOINT_FILE, task="Keypoint") @@ -522,7 +522,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=KEYPOINT_FILE, task="Keypoint") @@ -531,7 +531,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=PANOPTIC_FILE, task="Panoptic") @@ -540,7 +540,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=PANOPTIC_FILE, task="Panoptic") @@ -550,7 +550,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=PANOPTIC_FILE, task="Panoptic") @@ -559,7 +559,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=PANOPTIC_FILE, task="Panoptic") @@ -568,7 +568,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=PANOPTIC_FILE, task="Panoptic") @@ -577,7 +577,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=CAPTIONS_FILE, task="Captioning") @@ -586,7 +586,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data1 = ds.CocoDataset(DATA_DIR, annotation_file=CAPTIONS_FILE, task="Captioning") @@ -595,7 +595,7 @@ def test_coco_case_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) if __name__ == '__main__': diff --git a/tests/ut/python/dataset/test_datasets_conll2000.py b/tests/ut/python/dataset/test_datasets_conll2000.py index f170a4bc741..d168518ff93 100644 --- a/tests/ut/python/dataset/test_datasets_conll2000.py +++ b/tests/ut/python/dataset/test_datasets_conll2000.py @@ -324,7 +324,7 @@ def test_conll2000_dataset_exceptions(): data = data.map(operations=exception_func, input_columns=["word"], num_parallel_workers=1) for _ in data.__iter__(): pass - assert "map operation: [PyFunc] failed. The corresponding data files" in str(error_info.value) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(error_info.value) if __name__ == "__main__": diff --git a/tests/ut/python/dataset/test_datasets_csv.py b/tests/ut/python/dataset/test_datasets_csv.py index 90dd1687e72..a36e0a67e10 100644 --- a/tests/ut/python/dataset/test_datasets_csv.py +++ b/tests/ut/python/dataset/test_datasets_csv.py @@ -325,7 +325,7 @@ def test_csv_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.CSVDataset( @@ -338,7 +338,7 @@ def test_csv_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.CSVDataset( @@ -351,7 +351,7 @@ def test_csv_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.CSVDataset( @@ -364,7 +364,7 @@ def test_csv_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) def test_csv_dataset_duplicate_columns(): diff --git a/tests/ut/python/dataset/test_datasets_dbpedia.py b/tests/ut/python/dataset/test_datasets_dbpedia.py index a168ca77b3f..93969b21d97 100644 --- a/tests/ut/python/dataset/test_datasets_dbpedia.py +++ b/tests/ut/python/dataset/test_datasets_dbpedia.py @@ -114,7 +114,7 @@ def test_dbpedia_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.DBpediaDataset(DATA_DIR, usage="test", shuffle=False) data = data.map(operations=exception_func, input_columns=["content"], num_parallel_workers=1) @@ -122,7 +122,7 @@ def test_dbpedia_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) if __name__ == "__main__": diff --git a/tests/ut/python/dataset/test_datasets_div2k.py b/tests/ut/python/dataset/test_datasets_div2k.py index ab3ebcc6ba5..9b4e91b10fc 100644 --- a/tests/ut/python/dataset/test_datasets_div2k.py +++ b/tests/ut/python/dataset/test_datasets_div2k.py @@ -232,7 +232,7 @@ def test_div2k_exception(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files:" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is:" in str(e) try: data = ds.DIV2KDataset(DATASET_DIR, usage=usage, downgrade=downgrade, scale=scale) @@ -242,7 +242,7 @@ def test_div2k_exception(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files:" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is:" in str(e) if __name__ == "__main__": diff --git a/tests/ut/python/dataset/test_datasets_emnist.py b/tests/ut/python/dataset/test_datasets_emnist.py index 67616fa4c60..12a28108c94 100644 --- a/tests/ut/python/dataset/test_datasets_emnist.py +++ b/tests/ut/python/dataset/test_datasets_emnist.py @@ -359,7 +359,7 @@ def test_emnist_exception(): def exception_func(item): raise Exception("Error occur!") - error_msg_8 = "The corresponding data files" + error_msg_8 = "The corresponding data file is" with pytest.raises(RuntimeError, match=error_msg_8): data = ds.EMnistDataset(DATA_DIR, "mnist", "train") data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1) diff --git a/tests/ut/python/dataset/test_datasets_enwik9.py b/tests/ut/python/dataset/test_datasets_enwik9.py index d9874a3591e..0197ddb89c7 100644 --- a/tests/ut/python/dataset/test_datasets_enwik9.py +++ b/tests/ut/python/dataset/test_datasets_enwik9.py @@ -279,7 +279,7 @@ def test_enwik9_dataset_exceptions(): data = data.map(operations=exception_func, input_columns=["text"], num_parallel_workers=1) for _ in data.__iter__(): pass - assert "map operation: [PyFunc] failed. The corresponding data files" in str(error_info.value) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(error_info.value) if __name__ == "__main__": diff --git a/tests/ut/python/dataset/test_datasets_fashion_mnist.py b/tests/ut/python/dataset/test_datasets_fashion_mnist.py index 8921d123972..80e09d34302 100644 --- a/tests/ut/python/dataset/test_datasets_fashion_mnist.py +++ b/tests/ut/python/dataset/test_datasets_fashion_mnist.py @@ -228,7 +228,7 @@ def test_fashion_mnist_exception(): def exception_func(item): raise Exception("Error occur!") - error_msg_8 = "The corresponding data files" + error_msg_8 = "The corresponding data file is" with pytest.raises(RuntimeError, match=error_msg_8): data = ds.FashionMnistDataset(DATA_DIR) data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1) diff --git a/tests/ut/python/dataset/test_datasets_flickr.py b/tests/ut/python/dataset/test_datasets_flickr.py index 9dbd4f0d51e..d80f1f3505f 100644 --- a/tests/ut/python/dataset/test_datasets_flickr.py +++ b/tests/ut/python/dataset/test_datasets_flickr.py @@ -153,7 +153,7 @@ def test_flickr30k_dataset_exception(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.FlickrDataset(FLICKR30K_DATASET_DIR, FLICKR30K_ANNOTATION_FILE_1, decode=True) @@ -163,7 +163,7 @@ def test_flickr30k_dataset_exception(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) if __name__ == "__main__": diff --git a/tests/ut/python/dataset/test_datasets_gtzan.py b/tests/ut/python/dataset/test_datasets_gtzan.py index f9e367ee459..f93c1d8a208 100644 --- a/tests/ut/python/dataset/test_datasets_gtzan.py +++ b/tests/ut/python/dataset/test_datasets_gtzan.py @@ -150,7 +150,7 @@ def test_gtzan_exception(): def exception_func(item): raise Exception("Error occur!") - error_msg_8 = "The corresponding data files" + error_msg_8 = "The corresponding data file is" with pytest.raises(RuntimeError, match=error_msg_8): data = ds.GTZANDataset(DATA_DIR) diff --git a/tests/ut/python/dataset/test_datasets_imagefolder.py b/tests/ut/python/dataset/test_datasets_imagefolder.py index 399cff6172b..10457f4322e 100644 --- a/tests/ut/python/dataset/test_datasets_imagefolder.py +++ b/tests/ut/python/dataset/test_datasets_imagefolder.py @@ -876,7 +876,7 @@ def test_imagefolder_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.ImageFolderDataset(DATA_DIR) @@ -887,7 +887,7 @@ def test_imagefolder_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.ImageFolderDataset(DATA_DIR) @@ -897,7 +897,7 @@ def test_imagefolder_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) data_dir_invalid = "../data/dataset/testPK" try: diff --git a/tests/ut/python/dataset/test_datasets_imdb.py b/tests/ut/python/dataset/test_datasets_imdb.py index 1d9c4482d0e..96569443a31 100644 --- a/tests/ut/python/dataset/test_datasets_imdb.py +++ b/tests/ut/python/dataset/test_datasets_imdb.py @@ -686,7 +686,7 @@ def test_imdb_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.IMDBDataset(DATA_DIR) @@ -697,7 +697,7 @@ def test_imdb_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) data_dir_invalid = "../data/dataset/IMDBDATASET" try: diff --git a/tests/ut/python/dataset/test_datasets_iwslt.py b/tests/ut/python/dataset/test_datasets_iwslt.py index 380a0aca702..6a6c1ef3c77 100644 --- a/tests/ut/python/dataset/test_datasets_iwslt.py +++ b/tests/ut/python/dataset/test_datasets_iwslt.py @@ -119,7 +119,7 @@ def test_iwslt2016_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.IWSLT2016Dataset(DATA_IWSLT2016_DIR, usage='train', language_pair=["de", "en"], shuffle=False) @@ -128,7 +128,7 @@ def test_iwslt2016_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) def test_iwslt2017_dataset_basic(): @@ -231,7 +231,7 @@ def test_iwslt2017_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.IWSLT2017Dataset(DATA_IWSLT2017_DIR, usage='train', language_pair=["de", "en"], shuffle=False) @@ -240,7 +240,7 @@ def test_iwslt2017_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) if __name__ == "__main__": diff --git a/tests/ut/python/dataset/test_datasets_kmnist.py b/tests/ut/python/dataset/test_datasets_kmnist.py index 38268ee66f7..e75d1b4cf60 100644 --- a/tests/ut/python/dataset/test_datasets_kmnist.py +++ b/tests/ut/python/dataset/test_datasets_kmnist.py @@ -236,7 +236,7 @@ def test_kmnist_exception(): def exception_func(item): raise Exception("Error occur!") - error_msg_8 = "The corresponding data files" + error_msg_8 = "The corresponding data file is" with pytest.raises(RuntimeError, match=error_msg_8): data = ds.KMnistDataset(DATA_DIR) data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1) diff --git a/tests/ut/python/dataset/test_datasets_libri_tts.py b/tests/ut/python/dataset/test_datasets_libri_tts.py index c00c76341b0..61657d0b38e 100644 --- a/tests/ut/python/dataset/test_datasets_libri_tts.py +++ b/tests/ut/python/dataset/test_datasets_libri_tts.py @@ -149,7 +149,7 @@ def test_libri_tts_exception(): def exception_func(item): raise Exception("Error occur!") - error_msg_8 = "The corresponding data files" + error_msg_8 = "The corresponding data file is" with pytest.raises(RuntimeError, match=error_msg_8): data = ds.LibriTTSDataset(DATA_DIR) data = data.map(operations=exception_func, input_columns=["waveform"], num_parallel_workers=1) diff --git a/tests/ut/python/dataset/test_datasets_lj_speech.py b/tests/ut/python/dataset/test_datasets_lj_speech.py index f43c48718dc..b48026f01ff 100644 --- a/tests/ut/python/dataset/test_datasets_lj_speech.py +++ b/tests/ut/python/dataset/test_datasets_lj_speech.py @@ -124,7 +124,7 @@ def test_lj_speech_exception(): def exception_func(item): raise Exception("Error occur!") - error_msg_8 = "The corresponding data files" + error_msg_8 = "The corresponding data file is" with pytest.raises(RuntimeError, match=error_msg_8): data = ds.LJSpeechDataset(DATA_DIR) data = data.map(operations=exception_func, input_columns=["waveform"], num_parallel_workers=1) diff --git a/tests/ut/python/dataset/test_datasets_lsun.py b/tests/ut/python/dataset/test_datasets_lsun.py index ff7e1c69642..9753a1631b8 100644 --- a/tests/ut/python/dataset/test_datasets_lsun.py +++ b/tests/ut/python/dataset/test_datasets_lsun.py @@ -561,7 +561,7 @@ def test_lsun_exception_map(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.LSUNDataset(DATA_DIR) @@ -573,7 +573,7 @@ def test_lsun_exception_map(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.LSUNDataset(DATA_DIR) @@ -583,7 +583,7 @@ def test_lsun_exception_map(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) if __name__ == '__main__': diff --git a/tests/ut/python/dataset/test_datasets_manifestop.py b/tests/ut/python/dataset/test_datasets_manifestop.py index b479fdd9f7c..af4dffe29f8 100644 --- a/tests/ut/python/dataset/test_datasets_manifestop.py +++ b/tests/ut/python/dataset/test_datasets_manifestop.py @@ -179,7 +179,7 @@ def test_manifest_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.ManifestDataset(DATA_FILE) @@ -189,7 +189,7 @@ def test_manifest_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.ManifestDataset(DATA_FILE) @@ -198,7 +198,7 @@ def test_manifest_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) NO_SOURCE_DATA_FILE = "../data/dataset/testManifestData/invalidNoSource.manifest" try: diff --git a/tests/ut/python/dataset/test_datasets_mnist.py b/tests/ut/python/dataset/test_datasets_mnist.py index 8b1b7c066b0..133021d897d 100644 --- a/tests/ut/python/dataset/test_datasets_mnist.py +++ b/tests/ut/python/dataset/test_datasets_mnist.py @@ -217,7 +217,7 @@ def test_mnist_exception(): def exception_func(item): raise Exception("Error occur!") - error_msg_8 = "The corresponding data files" + error_msg_8 = "The corresponding data file is" with pytest.raises(RuntimeError, match=error_msg_8): data = ds.MnistDataset(DATA_DIR) data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1) diff --git a/tests/ut/python/dataset/test_datasets_omniglot.py b/tests/ut/python/dataset/test_datasets_omniglot.py index 027ae8f99ea..d49f01b51da 100644 --- a/tests/ut/python/dataset/test_datasets_omniglot.py +++ b/tests/ut/python/dataset/test_datasets_omniglot.py @@ -453,7 +453,7 @@ def test_omniglot_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str( + assert "map operation: [PyFunc] failed. The corresponding data file is" in str( e) try: @@ -466,7 +466,7 @@ def test_omniglot_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.OmniglotDataset(DATA_DIR) @@ -476,7 +476,7 @@ def test_omniglot_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) if __name__ == '__main__': diff --git a/tests/ut/python/dataset/test_datasets_penn_treebank.py b/tests/ut/python/dataset/test_datasets_penn_treebank.py index 5e5d26aa968..5358bbf695e 100644 --- a/tests/ut/python/dataset/test_datasets_penn_treebank.py +++ b/tests/ut/python/dataset/test_datasets_penn_treebank.py @@ -361,7 +361,7 @@ def test_penn_treebank_dataset_exceptions(): data = data.map(operations=exception_func, input_columns=["text"], num_parallel_workers=1) for _ in data.__iter__(): pass - assert "map operation: [PyFunc] failed. The corresponding data files" in str(error_info.value) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(error_info.value) if __name__ == "__main__": diff --git a/tests/ut/python/dataset/test_datasets_qmnist.py b/tests/ut/python/dataset/test_datasets_qmnist.py index 762c2546820..87ae1e77c63 100644 --- a/tests/ut/python/dataset/test_datasets_qmnist.py +++ b/tests/ut/python/dataset/test_datasets_qmnist.py @@ -273,7 +273,7 @@ def test_qmnist_exception(): def exception_func(item): raise Exception("Error occur!") - error_msg_8 = "The corresponding data files" + error_msg_8 = "The corresponding data file is" with pytest.raises(RuntimeError, match=error_msg_8): data = ds.QMnistDataset(DATA_DIR, "train", True) data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1) diff --git a/tests/ut/python/dataset/test_datasets_sbu.py b/tests/ut/python/dataset/test_datasets_sbu.py index c163e535612..592079c64ff 100644 --- a/tests/ut/python/dataset/test_datasets_sbu.py +++ b/tests/ut/python/dataset/test_datasets_sbu.py @@ -232,7 +232,7 @@ def test_sbu_exception(): def exception_func(item): raise Exception("Error occur!") - error_msg_8 = "The corresponding data files" + error_msg_8 = "The corresponding data file is" with pytest.raises(RuntimeError, match=error_msg_8): dataset = ds.SBUDataset(DATA_DIR, decode=True) dataset = dataset.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1) diff --git a/tests/ut/python/dataset/test_datasets_semeion.py b/tests/ut/python/dataset/test_datasets_semeion.py index 6cbe4469cd9..60bbb62261e 100644 --- a/tests/ut/python/dataset/test_datasets_semeion.py +++ b/tests/ut/python/dataset/test_datasets_semeion.py @@ -207,7 +207,7 @@ def test_semeion_exception_file_path(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.SemeionDataset(DATA_DIR_SEMEION) @@ -217,7 +217,7 @@ def test_semeion_exception_file_path(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) def test_semeion_pipeline(): diff --git a/tests/ut/python/dataset/test_datasets_sogou_news.py b/tests/ut/python/dataset/test_datasets_sogou_news.py index 0ea98ec2846..42e6a5080f7 100644 --- a/tests/ut/python/dataset/test_datasets_sogou_news.py +++ b/tests/ut/python/dataset/test_datasets_sogou_news.py @@ -153,7 +153,7 @@ def test_sogou_news_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.SogouNewsDataset(DATA_SOGOU_NEWS_DIR, usage='test', shuffle=False) @@ -162,7 +162,7 @@ def test_sogou_news_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.SogouNewsDataset(DATA_SOGOU_NEWS_DIR, usage='test', shuffle=False) @@ -171,7 +171,7 @@ def test_sogou_news_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) if __name__ == "__main__": diff --git a/tests/ut/python/dataset/test_datasets_speech_commands.py b/tests/ut/python/dataset/test_datasets_speech_commands.py index 4af9b1a82b4..71e2b8010e8 100644 --- a/tests/ut/python/dataset/test_datasets_speech_commands.py +++ b/tests/ut/python/dataset/test_datasets_speech_commands.py @@ -124,7 +124,7 @@ def test_speech_commands_exception(): def exception_func(item): raise Exception("Error occur!") - error_msg_8 = "The corresponding data files." + error_msg_8 = "The corresponding data file is." with pytest.raises(RuntimeError, match=error_msg_8): data = ds.SpeechCommandsDataset(DATA_DIR) data = data.map(operations=exception_func, input_columns=["waveform"], num_parallel_workers=1) diff --git a/tests/ut/python/dataset/test_datasets_squad.py b/tests/ut/python/dataset/test_datasets_squad.py index 8a06d21c989..a9ca04c3e27 100644 --- a/tests/ut/python/dataset/test_datasets_squad.py +++ b/tests/ut/python/dataset/test_datasets_squad.py @@ -205,7 +205,7 @@ def test_squad_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" \ + assert "map operation: [PyFunc] failed. The corresponding data file is" \ in str(e) try: @@ -216,7 +216,7 @@ def test_squad_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" \ + assert "map operation: [PyFunc] failed. The corresponding data file is" \ in str(e) try: @@ -227,7 +227,7 @@ def test_squad_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" \ + assert "map operation: [PyFunc] failed. The corresponding data file is" \ in str(e) try: @@ -238,7 +238,7 @@ def test_squad_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" \ + assert "map operation: [PyFunc] failed. The corresponding data file is" \ in str(e) diff --git a/tests/ut/python/dataset/test_datasets_stl10.py b/tests/ut/python/dataset/test_datasets_stl10.py index 442202c5758..daa0375e6fb 100644 --- a/tests/ut/python/dataset/test_datasets_stl10.py +++ b/tests/ut/python/dataset/test_datasets_stl10.py @@ -305,7 +305,7 @@ def test_stl10_exception(): def exception_func(item): raise Exception("Error occur!") - error_msg_8 = "The corresponding data files" + error_msg_8 = "The corresponding data file is" with pytest.raises(RuntimeError, match=error_msg_8): all_data = ds.STL10Dataset(DATA_DIR, "all") all_data = all_data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1) diff --git a/tests/ut/python/dataset/test_datasets_tedlium.py b/tests/ut/python/dataset/test_datasets_tedlium.py index 11712f61fb8..e7f9de66859 100644 --- a/tests/ut/python/dataset/test_datasets_tedlium.py +++ b/tests/ut/python/dataset/test_datasets_tedlium.py @@ -195,7 +195,7 @@ def test_tedlium_exception_file_path(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.TedliumDataset(DATA_DIR_TEDLIUM_RELEASE12, RELEASE1) @@ -205,7 +205,7 @@ def test_tedlium_exception_file_path(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.TedliumDataset(DATA_DIR_TEDLIUM_RELEASE12, RELEASE2) @@ -215,7 +215,7 @@ def test_tedlium_exception_file_path(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.TedliumDataset(DATA_DIR_TEDLIUM_RELEASE12, RELEASE2) @@ -225,7 +225,7 @@ def test_tedlium_exception_file_path(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.TedliumDataset(DATA_DIR_TEDLIUM_RELEASE3, RELEASE3) @@ -235,7 +235,7 @@ def test_tedlium_exception_file_path(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.TedliumDataset(DATA_DIR_TEDLIUM_RELEASE3, RELEASE3) @@ -245,7 +245,7 @@ def test_tedlium_exception_file_path(): num_rows += 1 assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) def test_tedlium_extensions(): diff --git a/tests/ut/python/dataset/test_datasets_textfileop.py b/tests/ut/python/dataset/test_datasets_textfileop.py index cb28c404e47..7d88de3fc56 100644 --- a/tests/ut/python/dataset/test_datasets_textfileop.py +++ b/tests/ut/python/dataset/test_datasets_textfileop.py @@ -327,7 +327,7 @@ def test_textline_dataset_exceptions(): data = data.map(operations=exception_func, input_columns=["text"], num_parallel_workers=1) for _ in data.__iter__(): pass - assert "map operation: [PyFunc] failed. The corresponding data files" in str(error_info.value) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(error_info.value) if __name__ == "__main__": diff --git a/tests/ut/python/dataset/test_datasets_tfrecord.py b/tests/ut/python/dataset/test_datasets_tfrecord.py index 6cefa055ab3..90cd14ff89a 100644 --- a/tests/ut/python/dataset/test_datasets_tfrecord.py +++ b/tests/ut/python/dataset/test_datasets_tfrecord.py @@ -437,7 +437,7 @@ def test_tfrecord_exception(): data = data.map(operations=exception_func, input_columns=["col_1d"], num_parallel_workers=1) for _ in data.__iter__(): pass - assert "map operation: [PyFunc] failed. The corresponding data files" in str(info.value) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(info.value) with pytest.raises(RuntimeError) as info: schema = ds.Schema() @@ -448,7 +448,7 @@ def test_tfrecord_exception(): data = data.map(operations=exception_func, input_columns=["col_2d"], num_parallel_workers=1) for _ in data.__iter__(): pass - assert "map operation: [PyFunc] failed. The corresponding data files" in str(info.value) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(info.value) with pytest.raises(RuntimeError) as info: schema = ds.Schema() @@ -459,7 +459,7 @@ def test_tfrecord_exception(): data = data.map(operations=exception_func, input_columns=["col_3d"], num_parallel_workers=1) for _ in data.__iter__(): pass - assert "map operation: [PyFunc] failed. The corresponding data files" in str(info.value) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(info.value) if __name__ == '__main__': diff --git a/tests/ut/python/dataset/test_datasets_udposop.py b/tests/ut/python/dataset/test_datasets_udposop.py index d8618b66bb5..a8d986f4a2e 100644 --- a/tests/ut/python/dataset/test_datasets_udposop.py +++ b/tests/ut/python/dataset/test_datasets_udposop.py @@ -311,7 +311,7 @@ def test_udpos_dataset_exceptions(): data = data.map(operations=exception_func, input_columns=["word"], num_parallel_workers=1) for _ in data.__iter__(): pass - assert "map operation: [PyFunc] failed. The corresponding data files" in str(error_info.value) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(error_info.value) if __name__ == "__main__": diff --git a/tests/ut/python/dataset/test_datasets_voc.py b/tests/ut/python/dataset/test_datasets_voc.py index 129b86772d3..6aa6a03f99f 100644 --- a/tests/ut/python/dataset/test_datasets_voc.py +++ b/tests/ut/python/dataset/test_datasets_voc.py @@ -296,7 +296,7 @@ def test_voc_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False) @@ -306,7 +306,7 @@ def test_voc_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False) @@ -315,7 +315,7 @@ def test_voc_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False) @@ -324,7 +324,7 @@ def test_voc_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False) @@ -333,7 +333,7 @@ def test_voc_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.VOCDataset(DATA_DIR, task="Segmentation", usage="train", shuffle=False) @@ -342,7 +342,7 @@ def test_voc_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.VOCDataset(DATA_DIR, task="Segmentation", usage="train", shuffle=False) @@ -352,7 +352,7 @@ def test_voc_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.VOCDataset(DATA_DIR, task="Segmentation", usage="train", shuffle=False) @@ -361,7 +361,7 @@ def test_voc_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.VOCDataset(DATA_DIR, task="Segmentation", usage="train", shuffle=False) @@ -371,7 +371,7 @@ def test_voc_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) def test_voc_num_classes(): diff --git a/tests/ut/python/dataset/test_datasets_wider_face.py b/tests/ut/python/dataset/test_datasets_wider_face.py index 843ec9aab41..22c279c45f3 100644 --- a/tests/ut/python/dataset/test_datasets_wider_face.py +++ b/tests/ut/python/dataset/test_datasets_wider_face.py @@ -205,7 +205,7 @@ def test_wider_face_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) # usage = all try: @@ -215,7 +215,7 @@ def test_wider_face_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.WIDERFaceDataset(DATA_DIR, shuffle=False) @@ -224,7 +224,7 @@ def test_wider_face_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.WIDERFaceDataset(DATA_DIR, shuffle=False) @@ -233,7 +233,7 @@ def test_wider_face_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.WIDERFaceDataset(DATA_DIR, shuffle=False) @@ -242,7 +242,7 @@ def test_wider_face_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.WIDERFaceDataset(DATA_DIR, shuffle=False) @@ -251,7 +251,7 @@ def test_wider_face_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.WIDERFaceDataset(DATA_DIR, shuffle=False) @@ -260,7 +260,7 @@ def test_wider_face_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.WIDERFaceDataset(DATA_DIR, shuffle=False) @@ -269,7 +269,7 @@ def test_wider_face_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.WIDERFaceDataset(DATA_DIR, shuffle=False) @@ -278,7 +278,7 @@ def test_wider_face_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) if __name__ == '__main__': diff --git a/tests/ut/python/dataset/test_datasets_wiki_text.py b/tests/ut/python/dataset/test_datasets_wiki_text.py index ee23d9b6b36..50ab509c4ad 100644 --- a/tests/ut/python/dataset/test_datasets_wiki_text.py +++ b/tests/ut/python/dataset/test_datasets_wiki_text.py @@ -370,7 +370,7 @@ def test_wiki_text_dataset_exceptions(): data = data.map(operations=exception_func, input_columns=["text"], num_parallel_workers=1) for _ in data.__iter__(): pass - assert "map operation: [PyFunc] failed. The corresponding data files" in str(error_info.value) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(error_info.value) if __name__ == "__main__": diff --git a/tests/ut/python/dataset/test_datasets_yahoo_answers.py b/tests/ut/python/dataset/test_datasets_yahoo_answers.py index 83ee9c6696e..ff011b9c1f0 100644 --- a/tests/ut/python/dataset/test_datasets_yahoo_answers.py +++ b/tests/ut/python/dataset/test_datasets_yahoo_answers.py @@ -122,7 +122,7 @@ def test_yahoo_answers_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.YahooAnswersDataset(DATA_DIR, usage="test", shuffle=False) data = data.map(operations=exception_func, input_columns=["content"], num_parallel_workers=1) @@ -130,7 +130,7 @@ def test_yahoo_answers_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) if __name__ == "__main__": diff --git a/tests/ut/python/dataset/test_datasets_yelp_review.py b/tests/ut/python/dataset/test_datasets_yelp_review.py index 9c3a74697df..70c643c21ad 100644 --- a/tests/ut/python/dataset/test_datasets_yelp_review.py +++ b/tests/ut/python/dataset/test_datasets_yelp_review.py @@ -129,7 +129,7 @@ def test_yelp_review_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) try: data = ds.YelpReviewDataset(DATA_POLARITY_DIR, usage='test', shuffle=False) @@ -138,7 +138,7 @@ def test_yelp_review_dataset_exception(): pass assert False except RuntimeError as e: - assert "map operation: [PyFunc] failed. The corresponding data files" in str(e) + assert "map operation: [PyFunc] failed. The corresponding data file is" in str(e) if __name__ == "__main__": diff --git a/tests/ut/python/dataset/test_datasets_yes_no.py b/tests/ut/python/dataset/test_datasets_yes_no.py index c61d13fd60d..16d9e6e5ce6 100644 --- a/tests/ut/python/dataset/test_datasets_yes_no.py +++ b/tests/ut/python/dataset/test_datasets_yes_no.py @@ -143,7 +143,7 @@ def test_yes_no_exception(): def exception_func(item): raise Exception("Error occur!") - error_msg_8 = "The corresponding data files" + error_msg_8 = "The corresponding data file is" with pytest.raises(RuntimeError, match=error_msg_8): data = ds.YesNoDataset(DATA_DIR) data = data.map(operations=exception_func, input_columns=[ diff --git a/tests/ut/python/dataset/test_eager_vision.py b/tests/ut/python/dataset/test_eager_vision.py index 21651287107..9f8760d9e2b 100644 --- a/tests/ut/python/dataset/test_eager_vision.py +++ b/tests/ut/python/dataset/test_eager_vision.py @@ -540,7 +540,7 @@ def test_eager_invalid_image_cutout(): assert error_msg in str(error_info.value) my_input = np.random.randn(60, 50) - test_config(my_input, RuntimeError, "Unexpected error. CutOut: shape is invalid.") + test_config(my_input, RuntimeError, "CutOut: shape is invalid.") test_config(1, TypeError, "Input should be NumPy or PIL image, got .") test_config(1.0, TypeError, "Input should be NumPy or PIL image, got .") diff --git a/tests/ut/python/dataset/test_five_crop.py b/tests/ut/python/dataset/test_five_crop.py index 1e565d26d82..7a62d4e9c13 100644 --- a/tests/ut/python/dataset/test_five_crop.py +++ b/tests/ut/python/dataset/test_five_crop.py @@ -98,7 +98,7 @@ def test_five_crop_error_msg(): for _ in data: pass error_msg = \ - "Unexpected error. map operation: [ToTensor] failed. The op is OneToOne, can only accept one tensor as input." + "map operation: [ToTensor] failed. The op is OneToOne, can only accept one tensor as input." assert error_msg in str(info.value) diff --git a/tests/ut/python/dataset/test_formatted_exception.py b/tests/ut/python/dataset/test_formatted_exception.py new file mode 100644 index 00000000000..67c062626ee --- /dev/null +++ b/tests/ut/python/dataset/test_formatted_exception.py @@ -0,0 +1,239 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +import numpy as np +import mindspore.dataset as ds +import mindspore.dataset.vision as vision + + +def test_generator_single_worker_exception(): + """ + Feature: Formatted exception. + Description: Test formatted exception in GeneratorDataset scenario with one worker. + Expectation: Python stack and summary message can be found in exception log. + """ + class Gen(): + def __init__(self): + self.data = [1, 2, 3, 4] + def __getitem__(self, index): + data = self.data[index] + return data/0 + def __len__(self): + return 4 + + dataset = ds.GeneratorDataset(Gen(), ["image"], shuffle=False, num_parallel_workers=1) + + try: + for data in dataset.create_dict_iterator(output_numpy=True, num_epochs=1): + print(data["image"].shape) + assert False + except RuntimeError as e: + assert "Exception thrown from user defined Python function in dataset" in str(e) + assert "Python Call Stack" in str(e) + assert "Traceback (most recent call last):" in str(e) + assert "ZeroDivisionError: division by zero" in str(e) + assert "Dataset Pipeline Error Message:" in str(e) + + +def test_generator_multi_workers_exception(): + """ + Feature: Formatted exception. + Description: Test formatted exception in GeneratorDataset scenario with multi-workers. + Expectation: Python stack and summary message can be found in exception log. + """ + def pyfunc(image): + return image + + class Gen(): + def __init__(self): + self.data = [[1], [2], [3], [4]] + def __getitem__(self, index): + image = Image.open(index) + return image + def __len__(self): + return 4 + + dataset = ds.GeneratorDataset(Gen(), ["image"], shuffle=False, num_parallel_workers=2) + dataset = dataset.map(operations=pyfunc, input_columns=["image"]) + + try: + for data in dataset.create_dict_iterator(output_numpy=True, num_epochs=1): + print(data["image"].shape) + assert False + except RuntimeError as e: + assert "Exception thrown from user defined Python function in dataset" in str(e) + assert "Python Call Stack" in str(e) + assert "Traceback (most recent call last):" in str(e) + assert "NameError: name 'Image' is not defined" in str(e) + assert "Dataset Pipeline Error Message:" in str(e) + + +def test_batch_operator_exception(): + """ + Feature: Formatted exception. + Description: Test formatted exception in batch operator scenario. + Expectation: Python stack and summary message can be found in exception log. + """ + class Gen(): + def __init__(self): + self.data = [np.ones((2)), np.ones((2)), np.ones((2)), np.ones((2, 3))] + def __getitem__(self, index): + return self.data[index] + def __len__(self): + return 4 + + dataset = ds.GeneratorDataset(Gen(), ["image"], shuffle=False) + dataset = dataset.batch(2) + + try: + for data in dataset.create_dict_iterator(output_numpy=True, num_epochs=1): + print(data["image"].shape) + assert False + except RuntimeError as e: + assert "Exception thrown from dataset pipeline. Refer to 'Dataset Pipeline Error Message'" in str(e) + assert "Python Call Stack" not in str(e) + assert "C++ Call Stack: (For framework developers)" in str(e) + + +def test_batch_operator_with_pyfunc_exception(): + """ + Feature: Formatted exception. + Description: Test formatted exception in batch operator with pyfunc scenario. + Expectation: Python stack and summary message can be found in exception log. + """ + class Gen(): + def __init__(self): + self.data = [np.ones((2)), np.ones((2)), np.ones((2)), np.ones((2))] + def __getitem__(self, index): + return self.data[index] + def __len__(self): + return 4 + + def batch_func(col, batch_info): + zero = 0 + fake_data = 1/zero + return np.ones((3)), np.array(fake_data) + + dataset = ds.GeneratorDataset(Gen(), ["image"], shuffle=False) + dataset = dataset.batch(2, per_batch_map=batch_func, input_columns=["image"]) + + try: + for data in dataset.create_dict_iterator(output_numpy=True, num_epochs=1): + print(data["image"].shape) + assert False + except RuntimeError as e: + assert "Exception thrown from user defined Python function in dataset" in str(e) + assert "Python Call Stack" in str(e) + assert "Traceback (most recent call last):" in str(e) + assert "in batch_func" in str(e) + assert "Dataset Pipeline Error Message:" in str(e) + + +def test_map_operator_with_c_ops_and_multiprocessing_exception(): + """ + Feature: Formatted exception. + Description: Test formatted exception in map operator with c ops scenario. + Expectation: Python stack and summary message can be found in exception log. + """ + class Gen(): + def __init__(self): + self.data = [np.ones((10, 10, 3)), + np.ones((15, 15, 3)), + np.ones((5, 5, 3))] + def __getitem__(self, index): + return self.data[index] + def __len__(self): + return 3 + + dataset = ds.GeneratorDataset(Gen(), ["image"], shuffle=False, num_parallel_workers=2) + dataset = dataset.map(operations=vision.RandomCrop((8, 8)), input_columns=["image"], num_parallel_workers=2) + + try: + for data in dataset.create_dict_iterator(output_numpy=True, num_epochs=1): + print(data["image"].shape) + assert False + except RuntimeError as e: + assert "Shape is incorrect" in str(e) + assert "Python Call Stack" not in str(e) + assert "Dataset Pipeline Error Message:" in str(e) + + +def test_map_operator_with_pyfunc_and_multithreading_exception(): + """ + Feature: Formatted exception. + Description: Test formatted exception in map operator with pyfunc scenario. + Expectation: Python stack and summary message can be found in exception log. + """ + def pyfunc(image): + a = 1 + b = 0 + c = a/b + return c + + class Gen(): + def __init__(self): + self.data = [[1], [2], [3], [4]] + def __getitem__(self, index): + return self.data[index] + def __len__(self): + return 4 + + dataset = ds.GeneratorDataset(Gen(), ["image"], shuffle=False, num_parallel_workers=2) + dataset = dataset.map(operations=pyfunc, input_columns=["image"], num_parallel_workers=2) + + try: + for data in dataset.create_dict_iterator(output_numpy=True, num_epochs=1): + print(data["image"].shape) + assert False + except RuntimeError as e: + assert "Exception thrown from user defined Python function in dataset" in str(e) + assert "Python Call Stack" in str(e) + assert "Traceback (most recent call last):" in str(e) + assert "Dataset Pipeline Error Message:" in str(e) + + +def test_map_operator_with_pyfunc_and_multiprocessing_exception(): + """ + Feature: Formatted exception. + Description: Test formatted exception in map operator with pyfunc scenario. + Expectation: Python stack and summary message can be found in exception log. + """ + def pyfunc(image): + a = 1 + b = 0 + c = a/b + return c + + class Gen(): + def __init__(self): + self.data = [[1], [2], [3], [4]] + def __getitem__(self, index): + return self.data[index] + def __len__(self): + return 4 + + dataset = ds.GeneratorDataset(Gen(), ["image"], shuffle=False, num_parallel_workers=1) + dataset = dataset.map(operations=pyfunc, input_columns=["image"], num_parallel_workers=2, + python_multiprocessing=True) + + try: + for data in dataset.create_dict_iterator(output_numpy=True, num_epochs=1): + print(data["image"].shape) + assert False + except RuntimeError as e: + assert "Exception thrown from user defined Python function in dataset" in str(e) + assert "Python Call Stack" in str(e) + assert "Traceback (most recent call last):" in str(e) + assert "in pyfunc" in str(e) + assert "Dataset Pipeline Error Message:" in str(e) diff --git a/tests/ut/python/dataset/test_griffinlim.py b/tests/ut/python/dataset/test_griffinlim.py index 3fae02edf94..424e7ae1df7 100644 --- a/tests/ut/python/dataset/test_griffinlim.py +++ b/tests/ut/python/dataset/test_griffinlim.py @@ -154,7 +154,7 @@ def test_griffin_lim_pipeline_invalid_param_constraint(): data1 = ds.NumpySlicesDataset(in_data, column_names=["multi_dimensional_data"], shuffle=False) with pytest.raises(RuntimeError, - match=r"Unexpected error. map operation: \[GriffinLim\] failed. " + + match=r"map operation: \[GriffinLim\] failed. " + r"GriffinLim: the frequency of the input should equal to n_fft / 2 \+ 1"): transforms = [c_audio.GriffinLim(n_fft=100)] data1 = data1.map(operations=transforms, input_columns=["multi_dimensional_data"]) @@ -162,7 +162,7 @@ def test_griffin_lim_pipeline_invalid_param_constraint(): _ = item["multi_dimensional_data"] with pytest.raises(RuntimeError, - match=r"Unexpected error. map operation: \[GriffinLim\] failed. " + + match=r"map operation: \[GriffinLim\] failed. " + r"GriffinLim: the frequency of the input should equal to n_fft / 2 \+ 1"): transforms = [c_audio.GriffinLim(n_fft=300, n_iter=10, win_length=0, hop_length=120)] data1 = data1.map(operations=transforms, input_columns=["multi_dimensional_data"]) @@ -170,7 +170,7 @@ def test_griffin_lim_pipeline_invalid_param_constraint(): _ = item["multi_dimensional_data"] with pytest.raises(RuntimeError, - match=r"Syntax error. GriffinLim: momentum equal to or greater than 1 can be unstable, " + + match=r"GriffinLim: momentum equal to or greater than 1 can be unstable, " + "but got: 1.000000"): transforms = [c_audio.GriffinLim(n_fft=300, n_iter=10, win_length=0, hop_length=0, power=2, momentum=1)] data1 = data1.map(operations=transforms, input_columns=["multi_dimensional_data"]) diff --git a/tests/ut/python/dataset/test_melscale_fbanks.py b/tests/ut/python/dataset/test_melscale_fbanks.py index 755b436ac5a..bb725487767 100644 --- a/tests/ut/python/dataset/test_melscale_fbanks.py +++ b/tests/ut/python/dataset/test_melscale_fbanks.py @@ -115,7 +115,6 @@ def test_melscale_fbanks_invalid_input(): logger.info("Test melscale_fbanks with bad input: {0}".format(test_name)) with pytest.raises(error) as error_info: audio.melscale_fbanks(n_freqs, f_min, f_max, n_mels, sample_rate, norm, mel_type) - print(error_info) assert error_msg in str(error_info.value) test_invalid_input("invalid n_freqs parameter Value", 99999999999, 0, 50, 5, 100, audio.NormType.NONE, diff --git a/tests/ut/python/dataset/test_minddataset_exception.py b/tests/ut/python/dataset/test_minddataset_exception.py index 8455d60c61a..98c55dcc6fa 100644 --- a/tests/ut/python/dataset/test_minddataset_exception.py +++ b/tests/ut/python/dataset/test_minddataset_exception.py @@ -113,7 +113,7 @@ def test_invalid_mindrecord(): f.write('just for test') columns_list = ["data", "file_name", "label"] num_readers = 4 - with pytest.raises(RuntimeError, match="Unexpected error. Invalid file, the size of mindrecord file header " + with pytest.raises(RuntimeError, match="Invalid file, the size of mindrecord file header " "is larger than the upper limit."): data_set = ds.MindDataset(file_name, columns_list, num_readers) for _ in data_set.create_dict_iterator(num_epochs=1, output_numpy=True): @@ -375,21 +375,21 @@ def test_mindrecord_exception(): file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0] create_cv_mindrecord(file_name, 1) columns_list = ["data", "file_name", "label"] - with pytest.raises(RuntimeError, match="The corresponding data files"): + with pytest.raises(RuntimeError, match="The corresponding data file is"): data_set = ds.MindDataset(file_name, columns_list, shuffle=False) data_set = data_set.map(operations=exception_func, input_columns=["data"], num_parallel_workers=1) num_iter = 0 for _ in data_set.create_dict_iterator(num_epochs=1, output_numpy=True): num_iter += 1 - with pytest.raises(RuntimeError, match="The corresponding data files"): + with pytest.raises(RuntimeError, match="The corresponding data file is"): data_set = ds.MindDataset(file_name, columns_list, shuffle=False) data_set = data_set.map(operations=exception_func, input_columns=["file_name"], num_parallel_workers=1) num_iter = 0 for _ in data_set.create_dict_iterator(num_epochs=1, output_numpy=True): num_iter += 1 - with pytest.raises(RuntimeError, match="The corresponding data files"): + with pytest.raises(RuntimeError, match="The corresponding data file is"): data_set = ds.MindDataset(file_name, columns_list, shuffle=False) data_set = data_set.map(operations=exception_func, input_columns=["label"], num_parallel_workers=1) diff --git a/tests/ut/python/dataset/test_profiling_startstop.py b/tests/ut/python/dataset/test_profiling_startstop.py index 682d4667b57..9a75fe3169c 100644 --- a/tests/ut/python/dataset/test_profiling_startstop.py +++ b/tests/ut/python/dataset/test_profiling_startstop.py @@ -252,7 +252,7 @@ class TestMindDataProfilingStartStop: # Reissue Start MindData Profiling self.md_profiler.start() - assert "MD ProfilingManager is already running." in str(info) + assert "MD ProfilingManager is already running." in str(info.value) # Stop MindData Profiling self.md_profiler.stop() @@ -290,7 +290,7 @@ class TestMindDataProfilingStartStop: # Stop MindData Profiling - without prior Start() self.md_profiler.stop() - assert "MD ProfilingManager has not started yet." in str(info) + assert "MD ProfilingManager has not started yet." in str(info.value) # Start MindData Profiling self.md_profiler.start() diff --git a/tests/ut/python/dataset/test_serdes_dataset.py b/tests/ut/python/dataset/test_serdes_dataset.py index c9b894f2aa6..47b11cd5e74 100644 --- a/tests/ut/python/dataset/test_serdes_dataset.py +++ b/tests/ut/python/dataset/test_serdes_dataset.py @@ -1028,19 +1028,19 @@ def test_serdes_not_implemented_op_exception(): vision.Perspective(start_points=[[0, 63], [63, 63], [63, 0], [0, 0]], end_points=[[0, 63], [63, 63], [63, 0], [0, 0]], interpolation=Inter.BILINEAR)]) - assert "Unexpected error. Invalid data, unsupported operation: Perspective" in str(error_info.value) + assert "Invalid data, unsupported operation: Perspective" in str(error_info.value) # Proper to_json and from_json support has not yet been added for AdjustBrightness op with pytest.raises(RuntimeError) as error_info: test_config([vision.Decode(), vision.AdjustBrightness(brightness_factor=2.0)]) - assert "Unexpected error. Invalid data, unsupported operation: AdjustBrightness" in str(error_info.value) + assert "Invalid data, unsupported operation: AdjustBrightness" in str(error_info.value) # Proper to_json and from_json support has not yet been added for AdjustContrast op with pytest.raises(RuntimeError) as error_info: test_config([vision.Decode(), vision.AdjustContrast(contrast_factor=2.0)]) - assert "Unexpected error. Invalid data, unsupported operation: AdjustContrast" in str(error_info.value) + assert "Invalid data, unsupported operation: AdjustContrast" in str(error_info.value) # Restore configuration ds.config.set_seed(original_seed) diff --git a/tests/ut/python/dataset/test_spectrogram.py b/tests/ut/python/dataset/test_spectrogram.py index f09d5621f71..55b45e4796d 100644 --- a/tests/ut/python/dataset/test_spectrogram.py +++ b/tests/ut/python/dataset/test_spectrogram.py @@ -413,7 +413,7 @@ def test_spectrogram_param(): _ = audio.Spectrogram(n_fft=100, center=False)(wav) except RuntimeError as error: logger.info("Got an exception in Spectrogram: {}".format(str(error))) - assert "Unexpected error. Spectrogram: n_fft should be more than 0 and less than 30," \ + assert "Spectrogram: n_fft should be more than 0 and less than 30," \ " but got n_fft: 100." in str(error) diff --git a/tests/ut/python/dataset/test_ten_crop.py b/tests/ut/python/dataset/test_ten_crop.py index c976dad7ced..b56fd2dfdec 100644 --- a/tests/ut/python/dataset/test_ten_crop.py +++ b/tests/ut/python/dataset/test_ten_crop.py @@ -191,7 +191,7 @@ def test_ten_crop_wrong_img_error_msg(): with pytest.raises(RuntimeError) as info: data.create_tuple_iterator(num_epochs=1).__next__() error_msg = \ - "Unexpected error. map operation: [ToTensor] failed. The op is OneToOne, can only accept one tensor as input." + "map operation: [ToTensor] failed. The op is OneToOne, can only accept one tensor as input." assert error_msg in str(info.value) diff --git a/tests/ut/python/dataset/test_two_level_pipeline.py b/tests/ut/python/dataset/test_two_level_pipeline.py index 4894dd5d8a6..ed6356e9e22 100644 --- a/tests/ut/python/dataset/test_two_level_pipeline.py +++ b/tests/ut/python/dataset/test_two_level_pipeline.py @@ -124,7 +124,7 @@ def test_minddtaset_generatordataset_exception_01(add_and_remove_cv_file): for _ in range(num_epochs): for _ in iter_: num_iter += 1 - assert 'Unexpected error. Invalid data, column name:' in str(error_info.value) + assert 'Invalid data, column name:' in str(error_info.value) # pylint: disable=redefined-outer-name @@ -187,7 +187,7 @@ def test_minddtaset_generatordataset_exception_02(add_and_remove_file): for item in iter_: print("item: ", item) num_iter += 1 - assert 'Unexpected error. Invalid data, column name:' in str(error_info.value) + assert 'Invalid data, column name:' in str(error_info.value) def test_two_level_pipeline_with_multiprocessing(): diff --git a/tests/ut/python/dataset/test_uniform_augment.py b/tests/ut/python/dataset/test_uniform_augment.py index 9ad9b038f50..240a686745b 100644 --- a/tests/ut/python/dataset/test_uniform_augment.py +++ b/tests/ut/python/dataset/test_uniform_augment.py @@ -386,7 +386,7 @@ def test_cpp_uniform_augment_random_crop_badinput(num_ops=1): with pytest.raises(RuntimeError) as error_info: for _ in ds1.create_dict_iterator(num_epochs=1, output_numpy=True): num_batches += 1 - assert "Shape is incorrect. map operation: [UniformAugment] failed." in str(error_info) + assert "map operation: [UniformAugment] failed." in str(error_info.value) if __name__ == "__main__": diff --git a/tests/ut/python/dataset/test_vectors.py b/tests/ut/python/dataset/test_vectors.py index 1f2b4453f2c..e41439b89ec 100644 --- a/tests/ut/python/dataset/test_vectors.py +++ b/tests/ut/python/dataset/test_vectors.py @@ -195,7 +195,7 @@ def test_vectors_invalid_input(): error=RuntimeError, error_msg="invalid file, file is empty.") test_invalid_input("the count of `unknown_init`'s element is different with word vector.", DATASET_ROOT_PATH + "vectors.txt", - error=RuntimeError, error_msg="Unexpected error. ToVectors: " + + error=RuntimeError, error_msg="ToVectors: " + "unk_init must be the same length as vectors, but got unk_init: 2 and vectors: 6", unk_init=[-1, -1]) test_invalid_input("The file not exist", DATASET_ROOT_PATH + "not_exist.txt", error=RuntimeError, diff --git a/tests/ut/python/dataset_deprecated/test_eager_vision.py b/tests/ut/python/dataset_deprecated/test_eager_vision.py index bfba3f0ade1..fa249384adf 100644 --- a/tests/ut/python/dataset_deprecated/test_eager_vision.py +++ b/tests/ut/python/dataset_deprecated/test_eager_vision.py @@ -561,7 +561,7 @@ def test_eager_invalid_image_cutout_c(): assert error_msg in str(error_info.value) my_input = np.random.randn(60, 50) - test_config(my_input, RuntimeError, "Unexpected error. CutOut: shape is invalid.") + test_config(my_input, RuntimeError, "CutOut: shape is invalid.") test_config(1, TypeError, "Input should be NumPy or PIL image, got .") test_config(1.0, TypeError, "Input should be NumPy or PIL image, got .") diff --git a/tests/ut/python/dataset_deprecated/test_map.py b/tests/ut/python/dataset_deprecated/test_map.py index 270b1c2433a..180d86f8b6a 100644 --- a/tests/ut/python/dataset_deprecated/test_map.py +++ b/tests/ut/python/dataset_deprecated/test_map.py @@ -266,8 +266,8 @@ def test_map_with_exact_log(): for data in dataset.create_dict_iterator(): print(data["data"], data["label"]) print("-----{}++++".format(info.value), flush=True) - assert str(info.value).count("Exception thrown from PyFunc") == 1 - assert str(info.value).count("Caught TypeError in map") == 1 + assert str(info.value).count("Exception thrown from user defined Python function") == 1 + assert str(info.value).count("map operation") == 1 assert str(info.value).count("img should be PIL image") == 1 diff --git a/tests/ut/python/dataset_deprecated/test_to_type.py b/tests/ut/python/dataset_deprecated/test_to_type.py index 66f0c4bd5c9..b0bd4cf3fad 100644 --- a/tests/ut/python/dataset_deprecated/test_to_type.py +++ b/tests/ut/python/dataset_deprecated/test_to_type.py @@ -261,7 +261,7 @@ def test_to_type_05(): data = data.map(operations=transform, input_columns=["image"]) for _ in enumerate(data): pass - assert "data type" in str(error_info.value) + assert "datatype" in str(error_info.value) def test_to_type_invalid_arg(): diff --git a/tests/ut/python/mindrecord/test_cifar10_to_mindrecord.py b/tests/ut/python/mindrecord/test_cifar10_to_mindrecord.py index 970ffbcc680..508a0c0b1f9 100644 --- a/tests/ut/python/mindrecord/test_cifar10_to_mindrecord.py +++ b/tests/ut/python/mindrecord/test_cifar10_to_mindrecord.py @@ -135,7 +135,7 @@ def test_cifar10_to_mindrecord_directory(fixture_file): when destination path is directory. """ with pytest.raises(RuntimeError, - match="Unexpected error. Invalid file, mindrecord files already exist. Please check file path:"): + match="Invalid file, mindrecord files already exist. Please check file path:"): cifar10_transformer = Cifar10ToMR(CIFAR10_DIR, CIFAR10_DIR) cifar10_transformer.transform() @@ -146,7 +146,7 @@ def test_cifar10_to_mindrecord_filename_equals_cifar10(): when destination path equals source path. """ with pytest.raises(RuntimeError, - match="Unexpected error. Invalid file, mindrecord files already exist. Please check file path:"): + match="Invalid file, mindrecord files already exist. Please check file path:"): cifar10_transformer = Cifar10ToMR(CIFAR10_DIR, CIFAR10_DIR + "/data_batch_0") cifar10_transformer.transform() diff --git a/tests/ut/python/mindrecord/test_mindrecord_base.py b/tests/ut/python/mindrecord/test_mindrecord_base.py index e8c81ddbeae..93278d1eb6d 100644 --- a/tests/ut/python/mindrecord/test_mindrecord_base.py +++ b/tests/ut/python/mindrecord/test_mindrecord_base.py @@ -1229,7 +1229,7 @@ def test_mindrecord_commit_exception_01(): writer.commit() writer.write_raw_data(data[5:10]) - assert 'Unexpected error. Not allow to call `write_raw_data` on flushed MindRecord files.' in str(err.value) + assert 'Not allowed to call `write_raw_data` on flushed MindRecord files.' in str(err.value) remove_multi_files(mindrecord_file_name, 4) @@ -1249,7 +1249,7 @@ def test_cv_file_overwrite_exception_01(): "label": {"type": "int64"}, "data": {"type": "bytes"}} writer.add_schema(cv_schema_json, "img_schema") writer.write_raw_data(data) - assert 'Unexpected error. Invalid file, mindrecord files already exist. Please check file path:' in str(err.value) + assert 'Invalid file, mindrecord files already exist. Please check file path:' in str(err.value) remove_multi_files(mindrecord_file_name, FILES_NUM) def test_cv_file_overwrite_exception_02(): @@ -1268,5 +1268,5 @@ def test_cv_file_overwrite_exception_02(): "label": {"type": "int64"}, "data": {"type": "bytes"}} writer.add_schema(cv_schema_json, "img_schema") writer.write_raw_data(data) - assert 'Unexpected error. Invalid file, mindrecord files already exist. Please check file path:' in str(err.value) + assert 'Invalid file, mindrecord files already exist. Please check file path:' in str(err.value) remove_multi_files(mindrecord_file_name, FILES_NUM) diff --git a/tests/ut/python/mindrecord/test_mindrecord_exception.py b/tests/ut/python/mindrecord/test_mindrecord_exception.py index 92f393cef8f..73a58ada685 100644 --- a/tests/ut/python/mindrecord/test_mindrecord_exception.py +++ b/tests/ut/python/mindrecord/test_mindrecord_exception.py @@ -256,7 +256,7 @@ def test_invalid_db(): f.write('just for test') with pytest.raises(RuntimeError) as err: FileReader(file_name) - assert "Unexpected error. Failed to execute the sql [ SELECT NAME from SHARD_NAME; ] " \ + assert "Failed to execute the sql [ SELECT NAME from SHARD_NAME; ] " \ "while verifying meta file" in str(err.value) remove_file(file_name) @@ -271,7 +271,7 @@ def test_overwrite_invalid_mindrecord(): f.write('just for test') with pytest.raises(RuntimeError) as err: create_cv_mindrecord(1, file_name) - assert 'Unexpected error. Invalid file, mindrecord files already exist. Please check file path:' in str(err.value) + assert 'Invalid file, mindrecord files already exist. Please check file path:' in str(err.value) remove_file(file_name) def test_overwrite_invalid_db(): @@ -285,7 +285,7 @@ def test_overwrite_invalid_db(): f.write('just for test') with pytest.raises(RuntimeError) as err: create_cv_mindrecord(1, file_name) - assert 'Unexpected error. Invalid file, mindrecord files already exist. Please check file path:' in str(err.value) + assert 'Invalid file, mindrecord files already exist. Please check file path:' in str(err.value) remove_file(file_name) def test_read_after_close(): @@ -382,7 +382,7 @@ def test_mindpage_pageno_pagesize_not_int(): with pytest.raises(ParamValueError): reader.read_at_page_by_name("822", 0, "qwer") - with pytest.raises(RuntimeError, match=r"Unexpected error. Invalid data, " + with pytest.raises(RuntimeError, match=r"Invalid data, " r"category_id: 99999 must be in the range \[0, 10\]."): reader.read_at_page_by_id(99999, 0, 1) remove_file(file_name) @@ -407,11 +407,11 @@ def test_mindpage_filename_not_exist(): info = reader.read_category_info() logger.info("category info: {}".format(info)) - with pytest.raises(RuntimeError, match=r"Unexpected error. Invalid data, " + with pytest.raises(RuntimeError, match=r"Invalid data, " r"category_id: 9999 must be in the range \[0, 10\]."): reader.read_at_page_by_id(9999, 0, 1) - with pytest.raises(RuntimeError, match="Unexpected error. category_name: abc.jpg could not found."): + with pytest.raises(RuntimeError, match="category_name: abc.jpg could not found."): reader.read_at_page_by_name("abc.jpg", 0, 1) with pytest.raises(ParamValueError): @@ -567,7 +567,7 @@ def test_write_with_invalid_data(): mindrecord_file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0] # field: file_name => filename - with pytest.raises(RuntimeError, match="Unexpected error. Invalid data, " \ + with pytest.raises(RuntimeError, match="Invalid data, " \ "the number of schema should be positive but got:"): remove_one_file(mindrecord_file_name) remove_one_file(mindrecord_file_name + ".db") @@ -603,7 +603,7 @@ def test_write_with_invalid_data(): writer.commit() # field: data => image - with pytest.raises(RuntimeError, match="Unexpected error. Invalid data, " \ + with pytest.raises(RuntimeError, match="Invalid data, " \ "the number of schema should be positive but got:"): remove_one_file(mindrecord_file_name) remove_one_file(mindrecord_file_name + ".db") @@ -639,7 +639,7 @@ def test_write_with_invalid_data(): writer.commit() # string type with int value - with pytest.raises(RuntimeError, match="Unexpected error. Invalid data, " \ + with pytest.raises(RuntimeError, match="Invalid data, " \ "the number of schema should be positive but got:"): remove_one_file(mindrecord_file_name) remove_one_file(mindrecord_file_name + ".db") @@ -675,7 +675,7 @@ def test_write_with_invalid_data(): writer.commit() # field with int64 type, but the real data is string - with pytest.raises(RuntimeError, match="Unexpected error. Invalid data, " \ + with pytest.raises(RuntimeError, match="Invalid data, " \ "the number of schema should be positive but got:"): remove_one_file(mindrecord_file_name) remove_one_file(mindrecord_file_name + ".db") @@ -711,7 +711,7 @@ def test_write_with_invalid_data(): writer.commit() # bytes field is string - with pytest.raises(RuntimeError, match="Unexpected error. Invalid data, " \ + with pytest.raises(RuntimeError, match="Invalid data, " \ "the number of schema should be positive but got:"): remove_one_file(mindrecord_file_name) remove_one_file(mindrecord_file_name + ".db") @@ -747,7 +747,7 @@ def test_write_with_invalid_data(): writer.commit() # field is not numpy type - with pytest.raises(RuntimeError, match="Unexpected error. Invalid data, " \ + with pytest.raises(RuntimeError, match="Invalid data, " \ "the number of schema should be positive but got:"): remove_one_file(mindrecord_file_name) remove_one_file(mindrecord_file_name + ".db") @@ -783,7 +783,7 @@ def test_write_with_invalid_data(): writer.commit() # not enough field - with pytest.raises(RuntimeError, match="Unexpected error. Invalid data, " \ + with pytest.raises(RuntimeError, match="Invalid data, " \ "the number of schema should be positive but got:"): remove_one_file(mindrecord_file_name) remove_one_file(mindrecord_file_name + ".db")