change err info

This commit is contained in:
shenwei41 2021-11-23 14:50:07 +08:00
parent acbaff17f7
commit 5d48409395
9 changed files with 30 additions and 19 deletions

View File

@ -174,7 +174,7 @@ std::shared_ptr<TensorOperation> PadEnd::Parse() {
std::shared_ptr<Tensor> pad_value;
Status rc = Tensor::CreateFromMSTensor(data_->pad_value_, &pad_value);
if (rc.IsError()) {
MS_LOG(ERROR) << "Error creating value constant tensor. " << rc;
MS_LOG(ERROR) << "Error creating pad_value constant tensor. " << rc;
return nullptr;
}
return std::make_shared<PadEndOperation>(TensorShape(data_->pad_shape_), pad_value);

View File

@ -99,7 +99,7 @@ Status ConfigManager::FromJson(const nlohmann::json &j) {
Status ConfigManager::LoadFile(const std::string &settingsFile) {
Status rc;
if (!Path(settingsFile).Exists()) {
RETURN_STATUS_UNEXPECTED("File is not found.");
RETURN_STATUS_UNEXPECTED("File path: settingsFile is not exist, check input path of config 'load' API.");
}
// Some settings are mandatory, others are not (with default). If a setting
// is optional it will set a default value if the config is missing from the file.

View File

@ -83,7 +83,9 @@ DataType DataType::FromCVType(int cv_type) {
case CV_64F:
return DataType(DataType::DE_FLOAT64);
default:
MS_LOG(ERROR) << "Cannot convert from OpenCV type, unknown CV type. Unknown data type is returned!";
std::string err_msg = "Cannot convert from OpenCV type, unknown CV type.";
err_msg += " Currently supported data type: [int8, uint8, int16, uint16, int32, float16, float32, float64]";
MS_LOG(ERROR) << err_msg;
return DataType(DataType::DE_UNKNOWN);
}
}
@ -156,7 +158,11 @@ DataType DataType::FromNpArray(const py::array &arr) {
} else if (arr.dtype().kind() == 'S' || arr.dtype().kind() == 'U') {
return DataType(DataType::DE_STRING);
} else {
MS_LOG(ERROR) << "Cannot convert from numpy type. Unknown data type is returned!";
std::string err_msg = "Cannot convert from numpy type. Unknown data type is returned!";
err_msg +=
" Currently supported data type: [int8, uint8, int16, uint16, int32, uint32, int64, uint64, float16, float32, "
"float64, string]";
MS_LOG(ERROR) << err_msg;
return DataType(DataType::DE_UNKNOWN);
}
}

View File

@ -29,6 +29,7 @@
#include "utils/ms_utils.h"
#include "minddata/dataset/include/dataset/constants.h"
#include "minddata/dataset/util/validators.h"
#ifndef ENABLE_ANDROID
#include "minddata/dataset/core/cv_tensor.h"
@ -701,9 +702,9 @@ Status Tensor::to_json_convert(nlohmann::json *args) {
}
Status Tensor::from_json(nlohmann::json op_params, std::shared_ptr<Tensor> *tensor) {
CHECK_FAIL_RETURN_UNEXPECTED(op_params.find("shape") != op_params.end(), "Failed to find shape");
CHECK_FAIL_RETURN_UNEXPECTED(op_params.find("type") != op_params.end(), "Failed to find type");
CHECK_FAIL_RETURN_UNEXPECTED(op_params.find("data") != op_params.end(), "Failed to find data");
RETURN_IF_NOT_OK(ValidateParamInJson(op_params, "shape", "Tensor"));
RETURN_IF_NOT_OK(ValidateParamInJson(op_params, "type", "Tensor"));
RETURN_IF_NOT_OK(ValidateParamInJson(op_params, "data", "Tensor"));
std::string type = op_params["type"];
std::vector<dsize_t> list = op_params["shape"];
TensorShape shape = TensorShape(list);

View File

@ -232,7 +232,7 @@ NgramOperation::NgramOperation(const std::vector<int32_t> &ngrams, const std::pa
Status NgramOperation::ValidateParams() {
if (ngrams_.size() == 0) {
std::string err_msg = "Ngram : Container cannot be empty.";
std::string err_msg = "Ngram : The size of the parameter 'ngrams_' is not to be 0.";
LOG_AND_RETURN_STATUS_SYNTAX_ERROR(err_msg);
} else {
for (int32_t i = 0; i < ngrams_.size(); ++i) {
@ -414,7 +414,7 @@ Status ToNumberOperation::to_json(nlohmann::json *out_json) {
}
Status ToNumberOperation::from_json(nlohmann::json op_params, std::shared_ptr<TensorOperation> *operation) {
CHECK_FAIL_RETURN_UNEXPECTED(op_params.find("data_type") != op_params.end(), "Failed to find data_type");
CHECK_FAIL_RETURN_UNEXPECTED(op_params.find("data_type") != op_params.end(), "Failed to find param 'data_type'.");
std::string data_type = op_params["data_type"];
*operation = std::make_shared<text::ToNumberOperation>(data_type);
return Status::OK();

View File

@ -56,7 +56,7 @@ Status NgramOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Te
}
for (auto n : ngrams_) {
CHECK_FAIL_RETURN_UNEXPECTED(n > 0, "Ngram: ngrams needs to be a positive number.\n");
CHECK_FAIL_RETURN_UNEXPECTED(n > 0, "Ngram: The element in the container 'ngrams' cannot be negative.\n");
int32_t start_ind = l_len_ - std::min(l_len_, n - 1);
int32_t end_ind = offsets.size() - r_len_ + std::min(r_len_, n - 1);
if (end_ind - start_ind <= n) {
@ -81,8 +81,8 @@ void NgramOp::Print(std::ostream &out) const {
}
Status NgramOp::OutputShape(const std::vector<TensorShape> &inputs, std::vector<TensorShape> &outputs) {
CHECK_FAIL_RETURN_UNEXPECTED(inputs.size() == NumInput(), "Ngram: incorrect num of inputs\n");
CHECK_FAIL_RETURN_UNEXPECTED(inputs[0].Rank() == 1, "Ngram: ngram only works with 1-dim data\n");
CHECK_FAIL_RETURN_UNEXPECTED(inputs.size() == NumInput(), "Ngram: The size of inputs must be 1\n");
CHECK_FAIL_RETURN_UNEXPECTED(inputs[0].Rank() == 1, "Ngram: The parameter 'ngram' only works with 1-dim data\n");
dsize_t num_elements = ngrams_.size();
for (int32_t n : ngrams_) {
// here since rank == 1, NumOfElements == shape[0]. add padding length to string
@ -91,7 +91,7 @@ Status NgramOp::OutputShape(const std::vector<TensorShape> &inputs, std::vector<
num_elements += std::max(len_with_padding - n, 0);
}
(void)outputs.emplace_back(TensorShape({num_elements}));
CHECK_FAIL_RETURN_UNEXPECTED(outputs.size() == NumOutput(), "Ngram: incorrect num of outputs\n");
CHECK_FAIL_RETURN_UNEXPECTED(outputs.size() == NumOutput(), "Ngram: The size of outputs must be 1\n");
return Status::OK();
}
} // namespace dataset

View File

@ -20,8 +20,11 @@ namespace dataset {
Status SlidingWindowOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
IO_CHECK(input, output);
CHECK_FAIL_RETURN_UNEXPECTED(input->shape().Rank() == 1,
"SlidingWindow: SlidingWindow supports 1D input only for now.");
CHECK_FAIL_RETURN_UNEXPECTED(axis_ == 0 || axis_ == -1, "SlidingWindow: axis supports 0 or -1 only for now.");
"SlidingWindow: SlidingWindow supports 1D input only for now, but got " +
std::to_string(input->shape().Rank()) + "D.");
CHECK_FAIL_RETURN_UNEXPECTED(
axis_ == 0 || axis_ == -1,
"SlidingWindow: The parameter axis supports 0 or -1 only for now, but got " + std::to_string(axis_) + ".");
std::vector<TensorShape> input_shape = {input->shape()};
std::vector<TensorShape> output_shape = {TensorShape({})};

View File

@ -25,7 +25,8 @@ namespace dataset {
Status TruncateSequencePairOp::Compute(const TensorRow &input, TensorRow *output) {
IO_CHECK_VECTOR(input, output);
CHECK_FAIL_RETURN_UNEXPECTED(input.size() == 2, "TruncateSequencePair: Expected two inputs.");
CHECK_FAIL_RETURN_UNEXPECTED(input.size() == 2, "TruncateSequencePair: Expected two inputs, but got " +
std::to_string(input.size()) + " inputs.");
std::shared_ptr<Tensor> seq1 = input[0];
std::shared_ptr<Tensor> seq2 = input[1];
CHECK_FAIL_RETURN_UNEXPECTED(seq1->shape().Rank() == 1 && seq2->shape().Rank() == 1,

View File

@ -36,7 +36,7 @@ def test_sliding_window_callable():
input2 = [["", "", "", "", ""]]
with pytest.raises(RuntimeError) as info:
_ = op(input2)
assert "SlidingWindow: SlidingWindow supports 1D input only for now." in str(info.value)
assert "SlidingWindow: SlidingWindow supports 1D input only for now, but got 2D." in str(info.value)
# test input multiple tensors
with pytest.raises(RuntimeError) as info:
@ -114,7 +114,7 @@ def test_sliding_window_exception():
pass
assert False
except RuntimeError as e:
assert "axis supports 0 or -1 only for now." in str(e)
assert "The parameter axis supports 0 or -1 only for now, but got -100." in str(e)
try:
inputs = ["aa", "bb", "cc"]
@ -124,7 +124,7 @@ def test_sliding_window_exception():
pass
assert False
except RuntimeError as e:
assert "SlidingWindow supports 1D input only for now." in str(e)
assert "SlidingWindow supports 1D input only for now, but got 0D." in str(e)
if __name__ == '__main__':