diff --git a/mindspore/lite/tools/converter/converter.cc b/mindspore/lite/tools/converter/converter.cc index 06559bef292..3bc92a7c2f9 100644 --- a/mindspore/lite/tools/converter/converter.cc +++ b/mindspore/lite/tools/converter/converter.cc @@ -799,12 +799,12 @@ int CheckInputShape(const std::shared_ptr ¶m) { std::vector dims = elem.second; if (dims.empty()) { MS_LOG(ERROR) << "INPUT MISSING: input tensor dim is empty"; - return lite::RET_ERROR; + return lite::RET_INPUT_PARAM_INVALID; } bool has_negative_dim = std::any_of(dims.begin(), dims.end(), [](int64_t dim) { return dim < 0; }); if (has_negative_dim) { MS_LOG(ERROR) << "INPUT ILLEGAL: Unsupported dim < 0."; - return lite::RET_ERROR; + return lite::RET_INPUT_PARAM_INVALID; } } } diff --git a/mindspore/lite/tools/converter/converter_lite/converter_flags.cc b/mindspore/lite/tools/converter/converter_lite/converter_flags.cc index 78f15c1d329..61ac9d02bfb 100644 --- a/mindspore/lite/tools/converter/converter_lite/converter_flags.cc +++ b/mindspore/lite/tools/converter/converter_lite/converter_flags.cc @@ -158,7 +158,7 @@ int Flags::InitInTensorShape() const { constexpr int kMinShapeSizeInStr = 2; if (string_split.size() < kMinShapeSizeInStr) { MS_LOG(ERROR) << "shape size must not be less than " << kMinShapeSizeInStr; - return mindspore::lite::RET_ERROR; + return lite::RET_INPUT_PARAM_INVALID; } auto name = string_split[0]; for (size_t i = 1; i < string_split.size() - 1; ++i) { @@ -166,17 +166,17 @@ int Flags::InitInTensorShape() const { } if (name.empty()) { MS_LOG(ERROR) << "input tensor name is empty"; - return lite::RET_ERROR; + return lite::RET_INPUT_PARAM_INVALID; } auto dim_strs = string_split[string_split.size() - 1]; if (dim_strs.empty()) { MS_LOG(ERROR) << "input tensor dim string is empty"; - return lite::RET_ERROR; + return lite::RET_INPUT_PARAM_INVALID; } auto dims = lite::StrSplit(dim_strs, std::string(",")); if (dims.empty()) { MS_LOG(ERROR) << "input tensor dim is empty"; - return lite::RET_ERROR; + return lite::RET_INPUT_PARAM_INVALID; } for (const auto &dim : dims) { int64_t dim_value; @@ -184,7 +184,7 @@ int Flags::InitInTensorShape() const { dim_value = std::stoi(dim); } catch (const std::exception &e) { MS_LOG(ERROR) << "Get dim failed: " << e.what(); - return lite::RET_ERROR; + return lite::RET_INPUT_PARAM_INVALID; } shape.push_back(dim_value); }