modify log in kernel part

This commit is contained in:
ms_yan 2021-09-11 18:18:39 +08:00
parent 1002e2704b
commit 0a365e91bf
39 changed files with 365 additions and 223 deletions

View File

@ -34,23 +34,26 @@ Status ConcatenateOp::OutputShape(const std::vector<TensorShape> &inputs, std::v
std::vector<TensorShape> inputs_copy; std::vector<TensorShape> inputs_copy;
inputs_copy.push_back(inputs[0].Squeeze()); inputs_copy.push_back(inputs[0].Squeeze());
CHECK_FAIL_RETURN_UNEXPECTED(inputs.at(0).Rank() == 1, "Concatenate: only 1D input supported"); CHECK_FAIL_RETURN_UNEXPECTED(inputs.at(0).Rank() == 1,
"Concatenate: only 1D input supported, got rank:" + std::to_string(inputs.at(0).Rank()));
outputs.clear(); outputs.clear();
dsize_t output_shape = 0; dsize_t output_shape = 0;
output_shape = output_shape + inputs.at(0).NumOfElements(); output_shape = output_shape + inputs.at(0).NumOfElements();
if (prepend_ != nullptr) { if (prepend_ != nullptr) {
CHECK_FAIL_RETURN_UNEXPECTED(prepend_->shape().Rank() == 1, "Concatenate: only 1D prepend supported"); CHECK_FAIL_RETURN_UNEXPECTED(prepend_->shape().Rank() == 1, "Concatenate: only 1D prepend supported, got rank: " +
std::to_string(prepend_->shape().Rank()));
CHECK_FAIL_RETURN_UNEXPECTED( CHECK_FAIL_RETURN_UNEXPECTED(
(std::numeric_limits<uint64_t>::max() - output_shape) > prepend_->shape().NumOfElements(), (std::numeric_limits<uint64_t>::max() - output_shape) > prepend_->shape().NumOfElements(),
"Concatenate: append parameter is too large to pend."); "Concatenate: append parameter is too large to pend.");
output_shape = output_shape + prepend_->shape().NumOfElements(); output_shape = output_shape + prepend_->shape().NumOfElements();
} }
if (append_ != nullptr) { if (append_ != nullptr) {
CHECK_FAIL_RETURN_UNEXPECTED(append_->shape().Rank() == 1, "Concatenate: only 1D append supported"); CHECK_FAIL_RETURN_UNEXPECTED(append_->shape().Rank() == 1, "Concatenate: only 1D append supported, got rank: " +
std::to_string(append_->shape().Rank()));
CHECK_FAIL_RETURN_UNEXPECTED( CHECK_FAIL_RETURN_UNEXPECTED(
(std::numeric_limits<uint64_t>::max() - output_shape) > append_->shape().NumOfElements(), (std::numeric_limits<uint64_t>::max() - output_shape) > append_->shape().NumOfElements(),
"Concatenate: append parameter is too large to pend."); "Concatenate: append parameter is too large to pend, got: " + std::to_string(append_->shape().NumOfElements()));
output_shape = output_shape + append_->shape().NumOfElements(); output_shape = output_shape + append_->shape().NumOfElements();
} }

View File

@ -43,7 +43,8 @@ Status OneHotEncodingUnsigned(const std::shared_ptr<Tensor> &input, std::shared_
RETURN_IF_NOT_OK(input->GetItemAt<uint64_t>(&class_idx, {index})); RETURN_IF_NOT_OK(input->GetItemAt<uint64_t>(&class_idx, {index}));
} }
if (class_idx >= static_cast<uint64_t>(num_classes)) { if (class_idx >= static_cast<uint64_t>(num_classes)) {
RETURN_STATUS_UNEXPECTED("OneHot: OneHot index values are not in range"); RETURN_STATUS_UNEXPECTED("OneHot: index values should not bigger than num classes: " + std::to_string(num_classes) +
", but got: " + std::to_string(class_idx));
} }
if (input->type() == DataType::DE_UINT64) { if (input->type() == DataType::DE_UINT64) {
RETURN_IF_NOT_OK((*output)->SetItemAt<uint64_t>({index, static_cast<dsize_t>(class_idx)}, 1)); RETURN_IF_NOT_OK((*output)->SetItemAt<uint64_t>({index, static_cast<dsize_t>(class_idx)}, 1));
@ -68,7 +69,8 @@ Status OneHotEncodingSigned(const std::shared_ptr<Tensor> &input, std::shared_pt
RETURN_IF_NOT_OK(input->GetItemAt<int64_t>(&class_idx, {index})); RETURN_IF_NOT_OK(input->GetItemAt<int64_t>(&class_idx, {index}));
} }
if (class_idx >= static_cast<int64_t>(num_classes)) { if (class_idx >= static_cast<int64_t>(num_classes)) {
RETURN_STATUS_UNEXPECTED("OneHot: OneHot index values are not in range"); RETURN_STATUS_UNEXPECTED("OneHot: index values should not bigger than num classes: " + std::to_string(num_classes) +
", but got: " + std::to_string(class_idx));
} }
if (input->type() == DataType::DE_INT64) { if (input->type() == DataType::DE_INT64) {
RETURN_IF_NOT_OK((*output)->SetItemAt<int64_t>({index, static_cast<dsize_t>(class_idx)}, 1)); RETURN_IF_NOT_OK((*output)->SetItemAt<int64_t>({index, static_cast<dsize_t>(class_idx)}, 1));
@ -88,10 +90,11 @@ Status OneHotEncoding(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tens
input->Squeeze(); input->Squeeze();
if (input->Rank() > 1) { // We expect the input to be int he first dimension if (input->Rank() > 1) { // We expect the input to be int he first dimension
RETURN_STATUS_UNEXPECTED("OneHot: OneHot only supports scalars or 1D input."); RETURN_STATUS_UNEXPECTED("OneHot: OneHot only supports scalars or 1D input, got rank: " +
std::to_string(input->Rank()));
} }
if (!input->type().IsInt()) { if (!input->type().IsInt()) {
RETURN_STATUS_UNEXPECTED("OneHot: OneHot does not support input of this type."); RETURN_STATUS_UNEXPECTED("OneHot: OneHot only not support input of int type.");
} }
try { try {
dsize_t num_elements = 1; dsize_t num_elements = 1;
@ -219,7 +222,7 @@ Status Fill(const std::shared_ptr<Tensor> input, std::shared_ptr<Tensor> *output
const TensorShape &input_shape = input->shape(); const TensorShape &input_shape = input->shape();
CHECK_FAIL_RETURN_UNEXPECTED(!((fill_type == DataType::DE_STRING) && (input_type != DataType::DE_STRING)), CHECK_FAIL_RETURN_UNEXPECTED(!((fill_type == DataType::DE_STRING) && (input_type != DataType::DE_STRING)),
"Fill: fill datatype does not match the input datatype."); "Fill: fill datatype is string but the input datatype is not string.");
CHECK_FAIL_RETURN_UNEXPECTED(fill_value->shape() == TensorShape({}), CHECK_FAIL_RETURN_UNEXPECTED(fill_value->shape() == TensorShape({}),
"Fill: the shape of fill_value is not a scalar."); "Fill: the shape of fill_value is not a scalar.");
@ -338,7 +341,9 @@ Status TypeCast(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *o
break; break;
case DataType::DE_UNKNOWN: case DataType::DE_UNKNOWN:
// sanity check, unreachable code. // sanity check, unreachable code.
RETURN_STATUS_UNEXPECTED("TypeCast: TypeCast does not support input of this type."); RETURN_STATUS_UNEXPECTED(
"TypeCast: TypeCast does not support input of this type, supported is: [bool, int8, int16, int32, int64, uint8,"
" uint16, uint32, uint64, float16, float32, float64]");
} }
return Status::OK(); return Status::OK();
} }
@ -398,7 +403,9 @@ Status PadEndNumeric(const std::shared_ptr<Tensor> &src, std::shared_ptr<Tensor>
if (src->Rank() == 0 || src->shape().AsVector() == pad_shape) { if (src->Rank() == 0 || src->shape().AsVector() == pad_shape) {
(*dst) = src; // if no padding, copy the pointer (*dst) = src; // if no padding, copy the pointer
} else { } else {
CHECK_FAIL_RETURN_UNEXPECTED(src->Rank() == pad_shape.size(), "PadEnd: invalid pad shape."); CHECK_FAIL_RETURN_UNEXPECTED(src->Rank() == pad_shape.size(),
"PadEnd: invalid pad shape, as rank of input is: " + std::to_string(src->Rank()) +
", and rank of pad value: " + std::to_string(pad_shape.size()));
RETURN_IF_NOT_OK(Tensor::CreateEmpty(TensorShape(pad_shape), src->type(), dst)); RETURN_IF_NOT_OK(Tensor::CreateEmpty(TensorShape(pad_shape), src->type(), dst));
auto tensor_type = src->type().value(); auto tensor_type = src->type().value();
if (pad_val == 0) { // if pad with zero, don't care what type it is if (pad_val == 0) { // if pad with zero, don't care what type it is
@ -455,7 +462,9 @@ Status PadEndString(const std::shared_ptr<Tensor> &src, std::shared_ptr<Tensor>
if (src->Rank() == 0 || src->shape().AsVector() == pad_shape) { if (src->Rank() == 0 || src->shape().AsVector() == pad_shape) {
(*dst) = src; // if no padding, copy the pointer (*dst) = src; // if no padding, copy the pointer
} else { } else {
CHECK_FAIL_RETURN_UNEXPECTED(src->Rank() == pad_shape.size(), "Pad to diff rank not allowed"); CHECK_FAIL_RETURN_UNEXPECTED(src->Rank() == pad_shape.size(),
"PadEnd: invalid pad shape, as rank of input is: " + std::to_string(src->Rank()) +
", and rank of pad value: " + std::to_string(pad_shape.size()));
std::vector<dsize_t> cur_ind(src->Rank(), 0); std::vector<dsize_t> cur_ind(src->Rank(), 0);
std::vector<std::string> strings; std::vector<std::string> strings;
RETURN_IF_NOT_OK(PadEndStringHelper(src, &strings, TensorShape(pad_shape), cur_ind, 0, pad_val)); RETURN_IF_NOT_OK(PadEndStringHelper(src, &strings, TensorShape(pad_shape), cur_ind, 0, pad_val));
@ -530,7 +539,8 @@ Status MaskHelper(const std::shared_ptr<Tensor> &input, const std::shared_ptr<Te
Status Mask(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, const std::shared_ptr<Tensor> &value, Status Mask(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, const std::shared_ptr<Tensor> &value,
RelationalOp op) { RelationalOp op) {
CHECK_FAIL_RETURN_UNEXPECTED(input->type().IsNumeric() == value->type().IsNumeric(), CHECK_FAIL_RETURN_UNEXPECTED(input->type().IsNumeric() == value->type().IsNumeric(),
"Mask: input datatype does not match the value datatype."); "Mask: input datatype does not match the value datatype, both should be numeric or "
"non-numerical in the same time.");
CHECK_FAIL_RETURN_UNEXPECTED(value->shape() == TensorShape::CreateScalar(), "Mask: value shape is not a scalar"); CHECK_FAIL_RETURN_UNEXPECTED(value->shape() == TensorShape::CreateScalar(), "Mask: value shape is not a scalar");
RETURN_IF_NOT_OK(Tensor::CreateEmpty(input->shape(), DataType(DataType::DE_BOOL), output)); RETURN_IF_NOT_OK(Tensor::CreateEmpty(input->shape(), DataType(DataType::DE_BOOL), output));
@ -594,7 +604,7 @@ Status Concatenate(const TensorRow &input, TensorRow *output, int8_t axis, std::
std::shared_ptr<Tensor> append) { std::shared_ptr<Tensor> append) {
CHECK_FAIL_RETURN_UNEXPECTED(input.size() > 0, "Concatenate: input is null"); CHECK_FAIL_RETURN_UNEXPECTED(input.size() > 0, "Concatenate: input is null");
axis = Tensor::HandleNeg(axis, input[0]->shape().Rank()); axis = Tensor::HandleNeg(axis, input[0]->shape().Rank());
CHECK_FAIL_RETURN_UNEXPECTED(axis == 0, "Concatenate: only 1D input supported"); CHECK_FAIL_RETURN_UNEXPECTED(axis == 0, "Concatenate: only 1D input supported, got rank: " + std::to_string(axis));
TensorShape t = TensorShape::CreateScalar(); TensorShape t = TensorShape::CreateScalar();
@ -603,22 +613,30 @@ Status Concatenate(const TensorRow &input, TensorRow *output, int8_t axis, std::
TensorRow tensor_list; TensorRow tensor_list;
if (prepend != nullptr) { if (prepend != nullptr) {
CHECK_FAIL_RETURN_UNEXPECTED(first_dtype == prepend->type(), CHECK_FAIL_RETURN_UNEXPECTED(
"Concatenate: input datatype does not match the prepend datatype."); first_dtype == prepend->type(),
CHECK_FAIL_RETURN_UNEXPECTED(prepend->shape().Rank() == 1, "Concatenate: only 1D input supported"); "Concatenate: input datatype does not match the prepend datatype: " + prepend->type().ToString());
CHECK_FAIL_RETURN_UNEXPECTED(
prepend->shape().Rank() == 1,
"Concatenate: only 1D input supported, got rank of input: " + std::to_string(prepend->shape().Rank()));
tensor_list.emplace_back(prepend); tensor_list.emplace_back(prepend);
} }
for (dsize_t i = 0; i < input.size(); i++) { for (dsize_t i = 0; i < input.size(); i++) {
CHECK_FAIL_RETURN_UNEXPECTED(first_dtype == input[i]->type(), "Concatenate: inconsistent datatype of input."); CHECK_FAIL_RETURN_UNEXPECTED(first_dtype == input[i]->type(), "Concatenate: inconsistent datatype of input.");
CHECK_FAIL_RETURN_UNEXPECTED(input[i]->shape().Rank() == 1, "Concatenate: only 1D input supported"); CHECK_FAIL_RETURN_UNEXPECTED(
input[i]->shape().Rank() == 1,
"Concatenate: only 1D input supported, got rank of input: " + std::to_string(input[i]->shape().Rank()));
tensor_list.emplace_back(input[i]); tensor_list.emplace_back(input[i]);
} }
if (append != nullptr) { if (append != nullptr) {
CHECK_FAIL_RETURN_UNEXPECTED(first_dtype == append->type(), CHECK_FAIL_RETURN_UNEXPECTED(
"Concatenate: input datatype does not match the append datatype."); first_dtype == append->type(),
CHECK_FAIL_RETURN_UNEXPECTED(append->shape().Rank() == 1, "Concatenate: only 1D append supported"); "Concatenate: input datatype does not match the append datatype: " + append->type().ToString());
CHECK_FAIL_RETURN_UNEXPECTED(
append->shape().Rank() == 1,
"Concatenate: only 1D append supported, got rank of input: " + std::to_string(append->shape().Rank()));
tensor_list.emplace_back(append); tensor_list.emplace_back(append);
} }
@ -670,7 +688,8 @@ Status BatchTensorToCVTensorVector(const std::shared_ptr<Tensor> &input,
TensorShape remaining({-1}); TensorShape remaining({-1});
std::vector<int64_t> index(tensor_shape.size(), 0); std::vector<int64_t> index(tensor_shape.size(), 0);
if (tensor_shape.size() <= 1) { if (tensor_shape.size() <= 1) {
RETURN_STATUS_UNEXPECTED("MixUpBatch: input must be at least 2-D in order to unpack."); RETURN_STATUS_UNEXPECTED("MixUpBatch: input must be at least 2-D in order to unpack, but got rank: " +
std::to_string(tensor_shape.size()));
} }
TensorShape element_shape(std::vector<int64_t>(tensor_shape.begin() + 1, tensor_shape.end())); TensorShape element_shape(std::vector<int64_t>(tensor_shape.begin() + 1, tensor_shape.end()));
@ -682,7 +701,7 @@ Status BatchTensorToCVTensorVector(const std::shared_ptr<Tensor> &input,
RETURN_IF_NOT_OK(Tensor::CreateFromMemory(element_shape, input->type(), start_addr_of_index, &out)); RETURN_IF_NOT_OK(Tensor::CreateFromMemory(element_shape, input->type(), start_addr_of_index, &out));
std::shared_ptr<CVTensor> cv_out = CVTensor::AsCVTensor(std::move(out)); std::shared_ptr<CVTensor> cv_out = CVTensor::AsCVTensor(std::move(out));
if (!cv_out->mat().data) { if (!cv_out->mat().data) {
RETURN_STATUS_UNEXPECTED("MixUpBatch: allocate memory failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] MixUpBatch: allocate memory failed.");
} }
output->push_back(cv_out); output->push_back(cv_out);
} }
@ -695,7 +714,8 @@ Status BatchTensorToTensorVector(const std::shared_ptr<Tensor> &input, std::vect
TensorShape remaining({-1}); TensorShape remaining({-1});
std::vector<int64_t> index(tensor_shape.size(), 0); std::vector<int64_t> index(tensor_shape.size(), 0);
if (tensor_shape.size() <= 1) { if (tensor_shape.size() <= 1) {
RETURN_STATUS_UNEXPECTED("CutMixBatch: input must be at least 2-D in order to unpack."); RETURN_STATUS_UNEXPECTED("CutMixBatch: input must be at least 2-D in order to unpack, but got rank:" +
std::to_string(tensor_shape.size()));
} }
TensorShape element_shape(std::vector<int64_t>(tensor_shape.begin() + 1, tensor_shape.end())); TensorShape element_shape(std::vector<int64_t>(tensor_shape.begin() + 1, tensor_shape.end()));
@ -793,7 +813,8 @@ Status UniqueHelper(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor
Status Unique(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, Status Unique(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output,
std::shared_ptr<Tensor> *output_idx, std::shared_ptr<Tensor> *output_cnt) { std::shared_ptr<Tensor> *output_idx, std::shared_ptr<Tensor> *output_cnt) {
CHECK_FAIL_RETURN_UNEXPECTED(input->shape().Rank() == 1, "Unique: only 1D input supported."); CHECK_FAIL_RETURN_UNEXPECTED(input->shape().Rank() == 1, "Unique: only 1D input supported, but got rank: " +
std::to_string(input->shape().Rank()));
if (input->type() == DataType::DE_INT64) { if (input->type() == DataType::DE_INT64) {
RETURN_IF_NOT_OK(UniqueHelper<int64_t>(input, output, output_idx, output_cnt)); RETURN_IF_NOT_OK(UniqueHelper<int64_t>(input, output, output_idx, output_cnt));
} else if (input->type() == DataType::DE_INT32) { } else if (input->type() == DataType::DE_INT32) {

View File

@ -24,7 +24,9 @@ namespace dataset {
Status DuplicateOp::Compute(const TensorRow &input, TensorRow *output) { Status DuplicateOp::Compute(const TensorRow &input, TensorRow *output) {
IO_CHECK_VECTOR(input, output); IO_CHECK_VECTOR(input, output);
CHECK_FAIL_RETURN_UNEXPECTED(input.size() == 1, "Duplicate: only supports transform one column each time."); CHECK_FAIL_RETURN_UNEXPECTED(
input.size() == 1,
"Duplicate: only supports transform one column each time, got column num: " + std::to_string(input.size()));
std::shared_ptr<Tensor> out; std::shared_ptr<Tensor> out;
RETURN_IF_NOT_OK(Tensor::CreateFromTensor(input[0], &out)); RETURN_IF_NOT_OK(Tensor::CreateFromTensor(input[0], &out));
output->push_back(input[0]); output->push_back(input[0]);

View File

@ -25,7 +25,7 @@ namespace dataset {
Status MaskOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) { Status MaskOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
IO_CHECK(input, output); IO_CHECK(input, output);
std::shared_ptr<Tensor> temp_output; std::shared_ptr<Tensor> temp_output;
CHECK_FAIL_RETURN_UNEXPECTED(type_.IsNumeric(), "Mask: only support numeric datatype of input."); CHECK_FAIL_RETURN_UNEXPECTED(type_.IsNumeric(), "Mask: only support numeric datatype of input, got string.");
RETURN_IF_NOT_OK(Mask(input, &temp_output, value_, op_)); RETURN_IF_NOT_OK(Mask(input, &temp_output, value_, op_));

View File

@ -30,7 +30,7 @@ uint32_t RandomChoiceOp::NumInput() {
for (auto &op : ops_) { for (auto &op : ops_) {
uint32_t cur_num = op->NumInput(); uint32_t cur_num = op->NumInput();
if (num_input != cur_num && cur_num > 0) { if (num_input != cur_num && cur_num > 0) {
MS_LOG(WARNING) << "Unable to determine NumInput, ops in RandomChoice don't take the same number of input."; MS_LOG(WARNING) << "Unable to determine Num of Input, ops in RandomChoice don't take the same number of input.";
return 0; return 0;
} }
} }

View File

@ -24,18 +24,22 @@ namespace dataset {
Status UniqueOp::Compute(const TensorRow &input, TensorRow *output) { Status UniqueOp::Compute(const TensorRow &input, TensorRow *output) {
IO_CHECK_VECTOR(input, output); IO_CHECK_VECTOR(input, output);
CHECK_FAIL_RETURN_UNEXPECTED(input.size() == 1, "Unique: only support 1D input"); CHECK_FAIL_RETURN_UNEXPECTED(input.size() == 1,
"Unique: only support 1D input, got rank: " + std::to_string(input.size()));
auto in_tensor = input[0]; auto in_tensor = input[0];
auto in_tensor_shape = in_tensor->shape(); auto in_tensor_shape = in_tensor->shape();
auto in_tensor_type = in_tensor->type(); auto in_tensor_type = in_tensor->type();
CHECK_FAIL_RETURN_UNEXPECTED(in_tensor_type.IsNumeric(), "Unique: Tensor type must be numeric."); CHECK_FAIL_RETURN_UNEXPECTED(in_tensor_type.IsNumeric(),
CHECK_FAIL_RETURN_UNEXPECTED(in_tensor_shape.Rank() >= 2, "Unique: only support numeric datatype of input, got string.");
"Unique: input must be at least 2-D in order to do unique op.");
CHECK_FAIL_RETURN_UNEXPECTED( CHECK_FAIL_RETURN_UNEXPECTED(
in_tensor->Size() <= std::numeric_limits<int32_t>::max(), in_tensor_shape.Rank() >= 2,
"Unique: Unique does not support input tensor large than " + std::to_string(std::numeric_limits<int32_t>::max())); "Unique: input must be at least 2-D in order to do unique op, got rank:" + std::to_string(in_tensor_shape.Rank()));
CHECK_FAIL_RETURN_UNEXPECTED(in_tensor->Size() <= std::numeric_limits<int32_t>::max(),
"Unique: Unique does not support size of input tensor large than: " +
std::to_string(std::numeric_limits<int32_t>::max()) +
", got:" + std::to_string(in_tensor->Size()));
RETURN_IF_NOT_OK(in_tensor->Reshape(TensorShape({in_tensor->Size()}))); RETURN_IF_NOT_OK(in_tensor->Reshape(TensorShape({in_tensor->Size()})));

View File

@ -45,16 +45,20 @@ Status BoundingBox::ReadFromTensor(const TensorPtr &bbox_tensor, dsize_t index_o
Status BoundingBox::ValidateBoundingBoxes(const TensorRow &image_and_bbox) { Status BoundingBox::ValidateBoundingBoxes(const TensorRow &image_and_bbox) {
if (image_and_bbox.size() != 2) { if (image_and_bbox.size() != 2) {
return Status(StatusCode::kMDBoundingBoxInvalidShape, __LINE__, __FILE__, return Status(StatusCode::kMDBoundingBoxInvalidShape, __LINE__, __FILE__,
"BoundingBox: invalid input, likely missed bounding boxes."); "BoundingBox: invalid input, size of input data should be 2 (including image and bounding box), "
"but got: " +
std::to_string(image_and_bbox.size()));
} }
if (image_and_bbox[1]->shape().Size() < 2) { if (image_and_bbox[1]->shape().Size() < 2) {
return Status(StatusCode::kMDBoundingBoxInvalidShape, __LINE__, __FILE__, return Status(StatusCode::kMDBoundingBoxInvalidShape, __LINE__, __FILE__,
"BoundingBox: bounding boxes should have to be two-dimensional matrix at least."); "BoundingBox: bounding boxes should have to be two-dimensional matrix at least, but got " +
std::to_string(image_and_bbox[1]->shape().Size()) + " dimension.");
} }
int64_t num_of_features = image_and_bbox[1]->shape()[1]; int64_t num_of_features = image_and_bbox[1]->shape()[1];
if (num_of_features < kNumOfCols) { if (num_of_features < kNumOfCols) {
return Status(StatusCode::kMDBoundingBoxInvalidShape, __LINE__, __FILE__, return Status(
"BoundingBox: bounding boxes should be have at least 4 features."); StatusCode::kMDBoundingBoxInvalidShape, __LINE__, __FILE__,
"BoundingBox: bounding boxes should be have at least 4 features, but got: " + std::to_string(num_of_features));
} }
std::vector<std::shared_ptr<BoundingBox>> bbox_list; std::vector<std::shared_ptr<BoundingBox>> bbox_list;
RETURN_IF_NOT_OK(GetListOfBoundingBoxes(image_and_bbox[1], &bbox_list)); RETURN_IF_NOT_OK(GetListOfBoundingBoxes(image_and_bbox[1], &bbox_list));
@ -62,16 +66,20 @@ Status BoundingBox::ValidateBoundingBoxes(const TensorRow &image_and_bbox) {
int64_t img_w = image_and_bbox[0]->shape()[1]; int64_t img_w = image_and_bbox[0]->shape()[1];
for (auto &bbox : bbox_list) { for (auto &bbox : bbox_list) {
CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int64_t>::max() - bbox->x()) > bbox->width(), CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int64_t>::max() - bbox->x()) > bbox->width(),
"BoundingBox: bbox_width is too large."); "BoundingBox: bbox width is too large as coordinate x bigger than max num of int64.");
CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int64_t>::max() - bbox->y()) > bbox->height(), CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int64_t>::max() - bbox->y()) > bbox->height(),
"BoundingBox: bbox_height is too large."); "BoundingBox: bbox height is too large as coordinate y bigger than max num of int64.");
if ((bbox->x() + bbox->width() > img_w) || (bbox->y() + bbox->height() > img_h)) { if ((bbox->x() + bbox->width() > img_w) || (bbox->y() + bbox->height() > img_h)) {
return Status(StatusCode::kMDBoundingBoxOutOfBounds, __LINE__, __FILE__, return Status(StatusCode::kMDBoundingBoxOutOfBounds, __LINE__, __FILE__,
"BoundingBox: bounding boxes is out of bounds of the image"); "BoundingBox: bounding boxes is out of bounds of the image, as image width: " +
std::to_string(img_w) + ", bbox width coordinate: " + std::to_string(bbox->x() + bbox->width()) +
", and image height: " + std::to_string(img_h) +
", bbox height coordinate: " + std::to_string(bbox->y() + bbox->height()));
} }
if (static_cast<int>(bbox->x()) < 0 || static_cast<int>(bbox->y()) < 0) { if (static_cast<int>(bbox->x()) < 0 || static_cast<int>(bbox->y()) < 0) {
return Status(StatusCode::kMDBoundingBoxOutOfBounds, __LINE__, __FILE__, return Status(StatusCode::kMDBoundingBoxOutOfBounds, __LINE__, __FILE__,
"BoundingBox: the coordinates of the bounding boxes has negative value."); "BoundingBox: the coordinates of the bounding boxes has negative value, got: (" +
std::to_string(bbox->x()) + "," + std::to_string(bbox->y()) + ").");
} }
} }
return Status::OK(); return Status::OK();
@ -116,10 +124,10 @@ Status BoundingBox::PadBBoxes(const TensorPtr *bbox_list, size_t bbox_count, int
std::shared_ptr<BoundingBox> bbox; std::shared_ptr<BoundingBox> bbox;
RETURN_IF_NOT_OK(ReadFromTensor(*bbox_list, i, &bbox)); RETURN_IF_NOT_OK(ReadFromTensor(*bbox_list, i, &bbox));
CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - bbox->x()) > pad_left, CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - bbox->x()) > pad_left,
"BoundingBox: pad_left is too large."); "BoundingBox: pad_left is too large as coordinate x bigger than max num of int64.");
bbox->SetX(bbox->x() + pad_left); bbox->SetX(bbox->x() + pad_left);
CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - bbox->y()) > pad_top, CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - bbox->y()) > pad_top,
"BoundingBox: pad_top is too large."); "BoundingBox: pad_top is too large as coordinate y bigger than max num of int64.");
bbox->SetY(bbox->y() + pad_top); bbox->SetY(bbox->y() + pad_top);
RETURN_IF_NOT_OK(bbox->WriteToTensor(*bbox_list, i)); RETURN_IF_NOT_OK(bbox->WriteToTensor(*bbox_list, i));
} }
@ -199,13 +207,13 @@ Status BoundingBox::UpdateBBoxesForResize(const TensorPtr &bbox_list, size_t bbo
RETURN_IF_NOT_OK(ReadFromTensor(bbox_list, i, &bbox)); RETURN_IF_NOT_OK(ReadFromTensor(bbox_list, i, &bbox));
CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<float_t>::max() / bbox->x()) > W_aspRatio, CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<float_t>::max() / bbox->x()) > W_aspRatio,
"BoundingBox: W_aspRatio is too large."); "BoundingBox: Width aspect Ratio is too large as got: " + std::to_string(W_aspRatio));
CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<float_t>::max() / bbox->y()) > H_aspRatio, CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<float_t>::max() / bbox->y()) > H_aspRatio,
"BoundingBox: H_aspRatio is too large."); "BoundingBox: Height aspect Ratio is too large as got: " + std::to_string(H_aspRatio));
CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<float_t>::max() / bbox->width()) > W_aspRatio, CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<float_t>::max() / bbox->width()) > W_aspRatio,
"BoundingBox: W_aspRatio is too large."); "BoundingBox: Width aspect Ratio is too large as got: " + std::to_string(W_aspRatio));
CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<float_t>::max() / bbox->height()) > H_aspRatio, CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<float_t>::max() / bbox->height()) > H_aspRatio,
"BoundingBox: H_aspRatio is too large."); "BoundingBox: Height aspect Ratio is too large as got: " + std::to_string(H_aspRatio));
// update positions and widths // update positions and widths
bbox->SetX(bbox->x() * W_aspRatio); bbox->SetX(bbox->x() * W_aspRatio);

View File

@ -34,8 +34,12 @@ Status CenterCropOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_p
std::string err_msg; std::string err_msg;
std::string err_head = "CenterCrop: "; std::string err_head = "CenterCrop: ";
dsize_t rank = input->shape().Rank(); dsize_t rank = input->shape().Rank();
err_msg += (rank < 2 || rank > 3) ? "image shape is not <H,W,C> or <H,W> \t" : ""; err_msg +=
err_msg += (crop_het_ <= 0 || crop_wid_ <= 0) ? "crop size needs to be positive integers\t" : ""; (rank < 2 || rank > 3) ? "image shape is not <H,W,C> or <H,W>, but got rank: " + std::to_string(rank) + "\t" : "";
err_msg += (crop_het_ <= 0 || crop_wid_ <= 0)
? "crop size needs to be positive integers, but got crop height:" + std::to_string(crop_het_) +
", crop width: " + std::to_string(crop_wid_) + "\t"
: "";
if (err_msg.length() != 0) RETURN_STATUS_UNEXPECTED(err_head + err_msg); if (err_msg.length() != 0) RETURN_STATUS_UNEXPECTED(err_head + err_msg);
@ -45,7 +49,10 @@ Status CenterCropOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_p
constexpr int64_t pad_limit = 3; constexpr int64_t pad_limit = 3;
CHECK_FAIL_RETURN_UNEXPECTED((top < input->shape()[0] * pad_limit && left < input->shape()[1] * pad_limit), CHECK_FAIL_RETURN_UNEXPECTED((top < input->shape()[0] * pad_limit && left < input->shape()[1] * pad_limit),
"CenterCrop: CenterCropOp padding size is more than 3 times the original size."); "CenterCrop: CenterCropOp padding size is more than 3 times the original size, got pad"
" top: " +
std::to_string(top) + "pad left: " + std::to_string(left) + ", and original size: " +
std::to_string(input->shape()[0]) + ", " + std::to_string(input->shape()[1]));
if (top > 0 && left > 0) { // padding only if (top > 0 && left > 0) { // padding only
return Pad(input, output, top / 2 + top % 2, top / 2, left / 2 + left % 2, left / 2, BorderType::kConstant); return Pad(input, output, top / 2 + top % 2, top / 2, left / 2 + left % 2, left / 2, BorderType::kConstant);
@ -73,7 +80,9 @@ Status CenterCropOp::OutputShape(const std::vector<TensorShape> &inputs, std::ve
if (inputs[0].Rank() == 2) outputs.emplace_back(out); if (inputs[0].Rank() == 2) outputs.emplace_back(out);
if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2])); if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2]));
if (!outputs.empty()) return Status::OK(); if (!outputs.empty()) return Status::OK();
return Status(StatusCode::kMDUnexpectedError, "CenterCrop: invalid input shape."); return Status(StatusCode::kMDUnexpectedError,
"CenterCrop: invalid input shape, expected 2D or 3D input, but got input dimension is:" +
std::to_string(inputs[0].Rank()));
} }
} // namespace dataset } // namespace dataset
} // namespace mindspore } // namespace mindspore

View File

@ -27,13 +27,13 @@ namespace dataset {
Status CropOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) { Status CropOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
IO_CHECK(input, output); IO_CHECK(input, output);
CHECK_FAIL_RETURN_UNEXPECTED( RETURN_IF_NOT_OK(ValidateImageRank("Crop", input->shape().Size()));
input->shape().Size() >= 2,
"Crop: the shape size " + std::to_string(input->shape().Size()) + " of input is invalid.");
int32_t input_h = static_cast<int>(input->shape()[0]); int32_t input_h = static_cast<int>(input->shape()[0]);
int32_t input_w = static_cast<int>(input->shape()[1]); int32_t input_w = static_cast<int>(input->shape()[1]);
CHECK_FAIL_RETURN_UNEXPECTED(y_ + height_ <= input_h, "Crop: Crop height dimension exceeds image dimensions."); CHECK_FAIL_RETURN_UNEXPECTED(y_ + height_ <= input_h, "Crop: Crop height dimension: " + std::to_string(y_ + height_) +
CHECK_FAIL_RETURN_UNEXPECTED(x_ + width_ <= input_w, "Crop: Crop width dimension exceeds image dimensions."); " exceeds image height: " + std::to_string(input_h));
CHECK_FAIL_RETURN_UNEXPECTED(x_ + width_ <= input_w, "Crop: Crop width dimension: " + std::to_string(x_ + width_) +
" exceeds image width: " + std::to_string(input_w));
return Crop(input, output, x_, y_, width_, height_); return Crop(input, output, x_, y_, width_, height_);
} }

View File

@ -60,7 +60,9 @@ void CutMixBatchOp::GetCropBox(int height, int width, float lam, int *x, int *y,
Status CutMixBatchOp::ValidateCutMixBatch(const TensorRow &input) { Status CutMixBatchOp::ValidateCutMixBatch(const TensorRow &input) {
if (input.size() < kMinLabelShapeSize) { if (input.size() < kMinLabelShapeSize) {
RETURN_STATUS_UNEXPECTED("CutMixBatch: invalid input, both image and label columns are required."); RETURN_STATUS_UNEXPECTED(
"CutMixBatch: invalid input, size of input should be 2 (including image and label), but got: " +
std::to_string(input.size()));
} }
std::vector<int64_t> image_shape = input.at(0)->shape().AsVector(); std::vector<int64_t> image_shape = input.at(0)->shape().AsVector();
std::vector<int64_t> label_shape = input.at(1)->shape().AsVector(); std::vector<int64_t> label_shape = input.at(1)->shape().AsVector();
@ -68,26 +70,33 @@ Status CutMixBatchOp::ValidateCutMixBatch(const TensorRow &input) {
// Check inputs // Check inputs
if (image_shape.size() != kExpectedImageShapeSize || image_shape[0] != label_shape[0]) { if (image_shape.size() != kExpectedImageShapeSize || image_shape[0] != label_shape[0]) {
RETURN_STATUS_UNEXPECTED( RETURN_STATUS_UNEXPECTED(
"CutMixBatch: please make sure images are HWC or CHW " "CutMixBatch: please make sure images are <H,W,C> or <C,H,W> format, and batched before calling CutMixBatch.");
"and batched before calling CutMixBatch.");
} }
if (!input.at(1)->type().IsInt()) { if (!input.at(1)->type().IsInt()) {
RETURN_STATUS_UNEXPECTED("CutMixBatch: Wrong labels type. The second column (labels) must only include int types."); RETURN_STATUS_UNEXPECTED(
"CutMixBatch: Wrong labels type. The second column (labels) must only include int types, but got:" +
input.at(1)->type().ToString());
} }
if (label_shape.size() != kMinLabelShapeSize && label_shape.size() != kMaxLabelShapeSize) { if (label_shape.size() != kMinLabelShapeSize && label_shape.size() != kMaxLabelShapeSize) {
RETURN_STATUS_UNEXPECTED( RETURN_STATUS_UNEXPECTED(
"CutMixBatch: wrong labels shape. " "CutMixBatch: wrong labels shape. "
"The second column (labels) must have a shape of NC or NLC where N is the batch size, " "The second column (labels) must have a shape of NC or NLC where N is the batch size, "
"L is the number of labels in each row, and C is the number of classes. " "L is the number of labels in each row, and C is the number of classes. "
"labels must be in one-hot format and in a batch."); "labels must be in one-hot format and in a batch, but got rank: " +
std::to_string(label_shape.size()));
} }
std::string shape_info = "(";
for (auto i : image_shape) {
shape_info = shape_info + std::to_string(i) + ", ";
}
shape_info.replace(shape_info.end() - 1, shape_info.end(), ")");
if ((image_shape[kDimensionOne] != kValueOne && image_shape[kDimensionOne] != kValueThree) && if ((image_shape[kDimensionOne] != kValueOne && image_shape[kDimensionOne] != kValueThree) &&
image_batch_format_ == ImageBatchFormat::kNCHW) { image_batch_format_ == ImageBatchFormat::kNCHW) {
RETURN_STATUS_UNEXPECTED("CutMixBatch: image doesn't match the NCHW format."); RETURN_STATUS_UNEXPECTED("CutMixBatch: image doesn't match the <N,C,H,W> format, got shape: " + shape_info);
} }
if ((image_shape[kDimensionThree] != kValueOne && image_shape[kDimensionThree] != kValueThree) && if ((image_shape[kDimensionThree] != kValueOne && image_shape[kDimensionThree] != kValueThree) &&
image_batch_format_ == ImageBatchFormat::kNHWC) { image_batch_format_ == ImageBatchFormat::kNHWC) {
RETURN_STATUS_UNEXPECTED("CutMixBatch: image doesn't match the NHWC format."); RETURN_STATUS_UNEXPECTED("CutMixBatch: image doesn't match the <N,H,W,C> format, got shape: " + shape_info);
} }
return Status::OK(); return Status::OK();
@ -184,8 +193,9 @@ Status CutMixBatchOp::Compute(const TensorRow &input, TensorRow *output) {
// Calculate random labels // Calculate random labels
std::vector<int64_t> rand_indx; std::vector<int64_t> rand_indx;
CHECK_FAIL_RETURN_UNEXPECTED(images.size() <= static_cast<size_t>(std::numeric_limits<int64_t>::max()), CHECK_FAIL_RETURN_UNEXPECTED(
"The size of \"images\" must not be more than \"INT64_MAX\"."); images.size() <= static_cast<size_t>(std::numeric_limits<int64_t>::max()),
"The size of \"images\" must not be more than \"INT64_MAX\", but got: " + std::to_string(images.size()));
for (int64_t idx = 0; idx < static_cast<int64_t>(images.size()); idx++) rand_indx.push_back(idx); for (int64_t idx = 0; idx < static_cast<int64_t>(images.size()); idx++) rand_indx.push_back(idx);
std::shuffle(rand_indx.begin(), rand_indx.end(), rnd_); std::shuffle(rand_indx.begin(), rand_indx.end(), rnd_);
std::gamma_distribution<float> gamma_distribution(alpha_, 1); std::gamma_distribution<float> gamma_distribution(alpha_, 1);
@ -205,7 +215,8 @@ Status CutMixBatchOp::Compute(const TensorRow &input, TensorRow *output) {
float x1 = gamma_distribution(rnd_); float x1 = gamma_distribution(rnd_);
float x2 = gamma_distribution(rnd_); float x2 = gamma_distribution(rnd_);
CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<float_t>::max() - x1) > x2, CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<float_t>::max() - x1) > x2,
"CutMixBatchOp: gamma_distribution x1 and x2 are too large."); "CutMixBatchOp: gamma_distribution x1 and x2 are too large, got x1: " +
std::to_string(x1) + ", x2:" + std::to_string(x2));
float lam = x1 / (x1 + x2); float lam = x1 / (x1 + x2);
double random_number = uniform_distribution(rnd_); double random_number = uniform_distribution(rnd_);
if (random_number < prob_) { if (random_number < prob_) {

View File

@ -38,12 +38,13 @@ Status DecodeOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<T
IO_CHECK(input, output); IO_CHECK(input, output);
// check the input tensor shape // check the input tensor shape
if (input->Rank() != 1) { if (input->Rank() != 1) {
RETURN_STATUS_UNEXPECTED("Decode: invalid input shape, only support 1D input."); RETURN_STATUS_UNEXPECTED("Decode: invalid input shape, only support 1D input, got rank: " +
std::to_string(input->Rank()));
} }
if (is_rgb_format_) { // RGB colour mode if (is_rgb_format_) { // RGB colour mode
return Decode(input, output); return Decode(input, output);
} else { // BGR colour mode } else { // BGR colour mode
RETURN_STATUS_UNEXPECTED("Decode: only support RGB image."); RETURN_STATUS_UNEXPECTED("Decode: only support Decoded into RGB image, check input parameter first.");
} }
} }
Status DecodeOp::OutputShape(const std::vector<TensorShape> &inputs, std::vector<TensorShape> &outputs) { Status DecodeOp::OutputShape(const std::vector<TensorShape> &inputs, std::vector<TensorShape> &outputs) {

View File

@ -27,9 +27,7 @@ namespace dataset {
Status GaussianBlurOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) { Status GaussianBlurOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
IO_CHECK(input, output); IO_CHECK(input, output);
if (input->Rank() != 3 && input->Rank() != 2) { RETURN_IF_NOT_OK(ValidateImageRank("GaussianBlur", input->Rank()));
RETURN_STATUS_UNEXPECTED("GaussianBlur: input image is not in shape of <H,W,C> or <H,W>");
}
return GaussianBlur(input, output, kernel_x_, kernel_y_, sigma_x_, sigma_y_); return GaussianBlur(input, output, kernel_x_, kernel_y_, sigma_x_, sigma_y_);
} }
} // namespace dataset } // namespace dataset

View File

@ -16,6 +16,7 @@
#include "minddata/dataset/kernels/image/image_utils.h" #include "minddata/dataset/kernels/image/image_utils.h"
#include <opencv2/imgproc/types_c.h> #include <opencv2/imgproc/types_c.h>
#include <algorithm> #include <algorithm>
#include <limits>
#include <vector> #include <vector>
#include <stdexcept> #include <stdexcept>
#include <opencv2/imgcodecs.hpp> #include <opencv2/imgcodecs.hpp>
@ -81,7 +82,9 @@ Status GetConvertShape(ConvertMode convert_mode, const std::shared_ptr<CVTensor>
} else if (std::find(one_channels.begin(), one_channels.end(), convert_mode) != one_channels.end()) { } else if (std::find(one_channels.begin(), one_channels.end(), convert_mode) != one_channels.end()) {
*node = {input_cv->shape()[0], input_cv->shape()[1]}; *node = {input_cv->shape()[0], input_cv->shape()[1]};
} else { } else {
RETURN_STATUS_UNEXPECTED("The mode of image channel conversion must be in ConvertMode."); RETURN_STATUS_UNEXPECTED(
"The mode of image channel conversion must be in ConvertMode, which mainly includes "
"conversion between RGB, BGR, GRAY, RGBA etc.");
} }
return Status::OK(); return Status::OK();
} }
@ -104,8 +107,9 @@ bool CheckTensorShape(const std::shared_ptr<Tensor> &tensor, const int &channel)
Status Flip(std::shared_ptr<Tensor> input, std::shared_ptr<Tensor> *output, int flip_code) { Status Flip(std::shared_ptr<Tensor> input, std::shared_ptr<Tensor> *output, int flip_code) {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(std::move(input)); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(std::move(input));
if (input_cv->Rank() <= 1 || input_cv->mat().dims > 2) { if (input_cv->Rank() == 1 || input_cv->mat().dims > 2) {
RETURN_STATUS_UNEXPECTED("Flip: input tensor is not in shape of <H,W,C> or <H,W>."); RETURN_STATUS_UNEXPECTED("Flip: shape of input is not <H,W,C> or <H,W>, but got rank:" +
std::to_string(input_cv->Rank()));
} }
std::shared_ptr<CVTensor> output_cv; std::shared_ptr<CVTensor> output_cv;
@ -120,7 +124,7 @@ Status Flip(std::shared_ptr<Tensor> input, std::shared_ptr<Tensor> *output, int
RETURN_STATUS_UNEXPECTED("Flip: " + std::string(e.what())); RETURN_STATUS_UNEXPECTED("Flip: " + std::string(e.what()));
} }
} else { } else {
RETURN_STATUS_UNEXPECTED("Flip: allocate memory failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] Flip: allocate memory failed.");
} }
} }
@ -136,11 +140,9 @@ Status Resize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *out
int32_t output_width, double fx, double fy, InterpolationMode mode) { int32_t output_width, double fx, double fy, InterpolationMode mode) {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("Resize: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] Resize: load image failed.");
}
if (input_cv->Rank() != DEFAULT_IMAGE_RANK && input_cv->Rank() != 2) {
RETURN_STATUS_UNEXPECTED("Resize: input tensor is not in shape of <H,W,C> or <H,W>");
} }
RETURN_IF_NOT_OK(ValidateImageRank("Resize", input_cv->Rank()));
if (mode == InterpolationMode::kCubicPil) { if (mode == InterpolationMode::kCubicPil) {
LiteMat imIn, imOut; LiteMat imIn, imOut;
@ -162,7 +164,10 @@ Status Resize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *out
// resize image too large or too small, 1000 is arbitrarily chosen here to prevent open cv from segmentation fault // resize image too large or too small, 1000 is arbitrarily chosen here to prevent open cv from segmentation fault
if (output_height > in_image.rows * kResizeShapeLimits || output_width > in_image.cols * kResizeShapeLimits) { if (output_height > in_image.rows * kResizeShapeLimits || output_width > in_image.cols * kResizeShapeLimits) {
std::string err_msg = std::string err_msg =
"Resize: the resizing width or height is too big, it's 1000 times bigger than the original image."; "Resize: the resizing width or height is too big, it's 1000 times bigger than the original image, got output "
"height: " +
std::to_string(output_height) + ", width: " + std::to_string(output_width) +
", and original image size:" + std::to_string(in_image.rows) + ", " + std::to_string(in_image.cols);
return Status(StatusCode::kMDShapeMisMatch, err_msg); return Status(StatusCode::kMDShapeMisMatch, err_msg);
} }
if (output_height == 0 || output_width == 0) { if (output_height == 0 || output_width == 0) {
@ -208,7 +213,7 @@ Status Decode(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *out
Status DecodeCv(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) { Status DecodeCv(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("Decode: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] Decode: load image failed.");
} }
try { try {
cv::Mat img_mat = cv::imdecode(input_cv->mat(), cv::IMREAD_COLOR | cv::IMREAD_IGNORE_ORIENTATION); cv::Mat img_mat = cv::imdecode(input_cv->mat(), cv::IMREAD_COLOR | cv::IMREAD_IGNORE_ORIENTATION);
@ -287,7 +292,7 @@ static Status JpegReadScanlines(jpeg_decompress_struct *const cinfo, int max_sca
try { try {
num_lines_read = jpeg_read_scanlines(cinfo, &scanline_ptr, 1); num_lines_read = jpeg_read_scanlines(cinfo, &scanline_ptr, 1);
} catch (std::runtime_error &e) { } catch (std::runtime_error &e) {
RETURN_STATUS_UNEXPECTED("Decode: image decode failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] Decode: image decode failed.");
} }
if (cinfo->out_color_space == JCS_CMYK && num_lines_read > 0) { if (cinfo->out_color_space == JCS_CMYK && num_lines_read > 0) {
for (int i = 0; i < crop_w; ++i) { for (int i = 0; i < crop_w; ++i) {
@ -314,11 +319,11 @@ static Status JpegReadScanlines(jpeg_decompress_struct *const cinfo, int max_sca
int copy_status = memcpy_s(buffer, buffer_size, scanline_ptr + offset, stride); int copy_status = memcpy_s(buffer, buffer_size, scanline_ptr + offset, stride);
if (copy_status != 0) { if (copy_status != 0) {
jpeg_destroy_decompress(cinfo); jpeg_destroy_decompress(cinfo);
RETURN_STATUS_UNEXPECTED("Decode: memcpy failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] Decode: memcpy failed.");
} }
} else { } else {
jpeg_destroy_decompress(cinfo); jpeg_destroy_decompress(cinfo);
std::string err_msg = "Decode: image decode failed."; std::string err_msg = "[Internal ERROR] Decode: image decode failed.";
RETURN_STATUS_UNEXPECTED(err_msg); RETURN_STATUS_UNEXPECTED(err_msg);
} }
buffer += stride; buffer += stride;
@ -342,7 +347,7 @@ static Status JpegSetColorSpace(jpeg_decompress_struct *cinfo) {
return Status::OK(); return Status::OK();
default: default:
jpeg_destroy_decompress(cinfo); jpeg_destroy_decompress(cinfo);
std::string err_msg = "Decode: image decode failed."; std::string err_msg = "[Internal ERROR] Decode: image decode failed.";
RETURN_STATUS_UNEXPECTED(err_msg); RETURN_STATUS_UNEXPECTED(err_msg);
} }
} }
@ -373,9 +378,9 @@ Status JpegCropAndDecode(const std::shared_ptr<Tensor> &input, std::shared_ptr<T
return DestroyDecompressAndReturnError(e.what()); return DestroyDecompressAndReturnError(e.what());
} }
CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - crop_w) > crop_x, CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - crop_w) > crop_x,
"JpegCropAndDecode: addition out of bounds."); "JpegCropAndDecode: addition(crop x and crop width) out of bounds.");
CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - crop_h) > crop_y, CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - crop_h) > crop_y,
"JpegCropAndDecode: addition out of bounds."); "JpegCropAndDecode: addition(crop y and crop height) out of bounds.");
if (crop_x == 0 && crop_y == 0 && crop_w == 0 && crop_h == 0) { if (crop_x == 0 && crop_y == 0 && crop_w == 0 && crop_h == 0) {
crop_w = cinfo.output_width; crop_w = cinfo.output_width;
crop_h = cinfo.output_height; crop_h = cinfo.output_height;
@ -424,7 +429,7 @@ Status JpegCropAndDecode(const std::shared_ptr<Tensor> &input, std::shared_ptr<T
Status Rescale(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, float rescale, float shift) { Status Rescale(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, float rescale, float shift) {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("Rescale: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] Rescale: load image failed.");
} }
cv::Mat input_image = input_cv->mat(); cv::Mat input_image = input_cv->mat();
std::shared_ptr<CVTensor> output_cv; std::shared_ptr<CVTensor> output_cv;
@ -441,24 +446,24 @@ Status Rescale(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *ou
Status Crop(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, int x, int y, int w, int h) { Status Crop(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, int x, int y, int w, int h) {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("Crop: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] Crop: load image failed.");
} }
if (input_cv->Rank() != DEFAULT_IMAGE_RANK && input_cv->Rank() != 2) { RETURN_IF_NOT_OK(ValidateImageRank("Crop", input_cv->Rank()));
RETURN_STATUS_UNEXPECTED("Crop: invalid image Shape, only support <H,W,C> or <H,W>"); CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - y) > h,
} "Crop: addition(x and height) out of bounds.");
CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - y) > h, "Crop: addition out of bounds.");
// account for integer overflow // account for integer overflow
if (y < 0 || (y + h) > input_cv->shape()[0] || (y + h) < 0) { if (y < 0 || (y + h) > input_cv->shape()[0] || (y + h) < 0) {
RETURN_STATUS_UNEXPECTED( RETURN_STATUS_UNEXPECTED(
"Crop: invalid y coordinate value for crop, " "Crop: invalid y coordinate value for crop, y coordinate value exceeds the boundary of the image, got y: " +
"y coordinate value exceeds the boundary of the image."); std::to_string(y));
} }
CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - x) > w, "Crop: addition out of bounds."); CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - x) > w, "Crop: addition out of bounds.");
// account for integer overflow // account for integer overflow
if (x < 0 || (x + w) > input_cv->shape()[1] || (x + w) < 0) { if (x < 0 || (x + w) > input_cv->shape()[1] || (x + w) < 0) {
RETURN_STATUS_UNEXPECTED( RETURN_STATUS_UNEXPECTED(
"Crop: invalid x coordinate value for crop, " "Crop: invalid x coordinate value for crop, "
"x coordinate value exceeds the boundary of the image."); "x coordinate value exceeds the boundary of the image, got x: " +
std::to_string(x));
} }
try { try {
TensorShape shape{h, w}; TensorShape shape{h, w};
@ -480,22 +485,23 @@ Status Crop(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *outpu
Status ConvertColor(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, ConvertMode convert_mode) { Status ConvertColor(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, ConvertMode convert_mode) {
try { try {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (input_cv->Rank() != DEFAULT_IMAGE_RANK && input_cv->Rank() != 2) { RETURN_IF_NOT_OK(ValidateImageRank("ConvertColor", input_cv->Rank()));
RETURN_STATUS_UNEXPECTED("ConvertColor: invalid image Shape, only support <H,W,C> or <H,W>");
}
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("ConvertColor: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] ConvertColor: load image failed.");
} }
if (input_cv->Rank() == 3) { if (input_cv->Rank() == 3) {
int num_channels = input_cv->shape()[CHANNEL_INDEX]; int num_channels = input_cv->shape()[CHANNEL_INDEX];
if (num_channels != 3 && num_channels != 4) { if (num_channels != 3 && num_channels != 4) {
RETURN_STATUS_UNEXPECTED("ConvertColor: number of channels of image should be 3, 4"); RETURN_STATUS_UNEXPECTED("ConvertColor: number of channels of image should be 3 or 4, but got:" +
std::to_string(num_channels));
} }
} }
std::vector<dsize_t> node; std::vector<dsize_t> node;
RETURN_IF_NOT_OK(GetConvertShape(convert_mode, input_cv, &node)); RETURN_IF_NOT_OK(GetConvertShape(convert_mode, input_cv, &node));
if (node.empty()) { if (node.empty()) {
RETURN_STATUS_UNEXPECTED("ConvertColor: convert mode must be in ConvertMode."); RETURN_STATUS_UNEXPECTED(
"ConvertColor: convert mode must be in ConvertMode, which mainly includes conversion "
"between RGB, BGR, GRAY, RGBA etc.");
} }
TensorShape out_shape = TensorShape(node); TensorShape out_shape = TensorShape(node);
std::shared_ptr<CVTensor> output_cv; std::shared_ptr<CVTensor> output_cv;
@ -512,7 +518,7 @@ Status HwcToChw(std::shared_ptr<Tensor> input, std::shared_ptr<Tensor> *output)
try { try {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("HWC2CHW: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] HWC2CHW: load image failed.");
} }
if (input_cv->Rank() == 2) { if (input_cv->Rank() == 2) {
// If input tensor is 2D, we assume we have hw dimensions // If input tensor is 2D, we assume we have hw dimensions
@ -524,7 +530,8 @@ Status HwcToChw(std::shared_ptr<Tensor> input, std::shared_ptr<Tensor> *output)
if (input_cv->shape().Size() < MIN_IMAGE_DIMENSION || input_cv->shape().Size() > DEFAULT_IMAGE_CHANNELS || if (input_cv->shape().Size() < MIN_IMAGE_DIMENSION || input_cv->shape().Size() > DEFAULT_IMAGE_CHANNELS ||
(input_cv->shape().Size() == DEFAULT_IMAGE_CHANNELS && num_channels != DEFAULT_IMAGE_CHANNELS && (input_cv->shape().Size() == DEFAULT_IMAGE_CHANNELS && num_channels != DEFAULT_IMAGE_CHANNELS &&
num_channels != MIN_IMAGE_CHANNELS)) { num_channels != MIN_IMAGE_CHANNELS)) {
RETURN_STATUS_UNEXPECTED("HWC2CHW: image shape is not <H,W,C>."); RETURN_STATUS_UNEXPECTED("HWC2CHW: image shape is not <H,W,C>, but got rank: " +
std::to_string(input_cv->shape().Size()));
} }
cv::Mat output_img; cv::Mat output_img;
@ -604,7 +611,7 @@ Status MaskWithTensor(const std::shared_ptr<Tensor> &sub_mat, std::shared_ptr<Te
} else { } else {
RETURN_STATUS_UNEXPECTED( RETURN_STATUS_UNEXPECTED(
"CutMixBatch: MaskWithTensor failed: " "CutMixBatch: MaskWithTensor failed: "
"image format must be CHW, HWC, or HW."); "image format must be <C,H,W>, <H,W,C>, or <H,W>.");
} }
return Status::OK(); return Status::OK();
} }
@ -655,11 +662,9 @@ Status CropAndResize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tenso
try { try {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("CropAndResize: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] CropAndResize: load image failed.");
}
if (input_cv->Rank() != DEFAULT_IMAGE_RANK && input_cv->Rank() != 2) {
RETURN_STATUS_UNEXPECTED("CropAndResize: image shape is not <H,W,C> or <H,W>");
} }
RETURN_IF_NOT_OK(ValidateImageRank("CropAndResize", input_cv->Rank()));
// image too large or too small, 1000 is arbitrary here to prevent opencv from segmentation fault // image too large or too small, 1000 is arbitrary here to prevent opencv from segmentation fault
const uint32_t kCropShapeLimits = 1000; const uint32_t kCropShapeLimits = 1000;
if (crop_height == 0 || crop_width == 0 || target_height == 0 || target_height > crop_height * kCropShapeLimits || if (crop_height == 0 || crop_width == 0 || target_height == 0 || target_height > crop_height * kCropShapeLimits ||
@ -713,11 +718,9 @@ Status Rotate(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *out
try { try {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("Rotate: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] Rotate: load image failed.");
}
if (input_cv->Rank() != DEFAULT_IMAGE_RANK && input_cv->Rank() != MIN_IMAGE_DIMENSION) {
RETURN_STATUS_UNEXPECTED("Rotate: image shape is not <H,W,C> or <H,W>.");
} }
RETURN_IF_NOT_OK(ValidateImageRank("Rotate", input_cv->Rank()));
cv::Mat input_img = input_cv->mat(); cv::Mat input_img = input_cv->mat();
if (input_img.cols > (MAX_INT_PRECISION * 2) || input_img.rows > (MAX_INT_PRECISION * 2)) { if (input_img.cols > (MAX_INT_PRECISION * 2) || input_img.rows > (MAX_INT_PRECISION * 2)) {
@ -789,7 +792,9 @@ Status Normalize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *
} }
CHECK_FAIL_RETURN_UNEXPECTED((*output)->Rank() == DEFAULT_IMAGE_RANK, "Normalize: image shape is not <H,W,C>."); CHECK_FAIL_RETURN_UNEXPECTED((*output)->Rank() == DEFAULT_IMAGE_RANK, "Normalize: image shape is not <H,W,C>.");
CHECK_FAIL_RETURN_UNEXPECTED(std.size() == mean.size(), "Normalize: mean and std vectors are not of same size."); CHECK_FAIL_RETURN_UNEXPECTED(std.size() == mean.size(),
"Normalize: mean and std vectors are not of same size, got size of std:" +
std::to_string(std.size()) + ", and mean size:" + std::to_string(mean.size()));
// caller provided 1 mean/std value and there are more than one channel --> duplicate mean/std value // caller provided 1 mean/std value and there are more than one channel --> duplicate mean/std value
if (mean.size() == 1 && (*output)->shape()[CHANNEL_INDEX] != 1) { if (mean.size() == 1 && (*output)->shape()[CHANNEL_INDEX] != 1) {
@ -799,7 +804,10 @@ Status Normalize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *
} }
} }
CHECK_FAIL_RETURN_UNEXPECTED((*output)->shape()[CHANNEL_INDEX] == mean.size(), CHECK_FAIL_RETURN_UNEXPECTED((*output)->shape()[CHANNEL_INDEX] == mean.size(),
"Normalize: number of channels does not match the size of mean and std vectors."); "Normalize: number of channels does not match the size of mean and std vectors, got "
"channels: " +
std::to_string((*output)->shape()[CHANNEL_INDEX]) +
", size of mean:" + std::to_string(mean.size()));
switch (input->type().value()) { switch (input->type().value()) {
case DataType::DE_BOOL: case DataType::DE_BOOL:
@ -854,7 +862,7 @@ Status NormalizePad(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor
const std::shared_ptr<Tensor> &mean, const std::shared_ptr<Tensor> &std, const std::string &dtype) { const std::shared_ptr<Tensor> &mean, const std::shared_ptr<Tensor> &std, const std::string &dtype) {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (!(input_cv->mat().data && input_cv->Rank() == DEFAULT_IMAGE_CHANNELS)) { if (!(input_cv->mat().data && input_cv->Rank() == DEFAULT_IMAGE_CHANNELS)) {
RETURN_STATUS_UNEXPECTED("NormalizePad: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] NormalizePad: load image failed.");
} }
DataType tensor_type = DataType(DataType::DE_FLOAT32); DataType tensor_type = DataType(DataType::DE_FLOAT32);
int compute_type = CV_32F; int compute_type = CV_32F;
@ -870,12 +878,16 @@ Status NormalizePad(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor
RETURN_IF_NOT_OK(CVTensor::CreateEmpty(new_shape, tensor_type, &output_cv)); RETURN_IF_NOT_OK(CVTensor::CreateEmpty(new_shape, tensor_type, &output_cv));
mean->Squeeze(); mean->Squeeze();
if (mean->type() != DataType::DE_FLOAT32 || mean->Rank() != 1 || mean->shape()[0] != DEFAULT_IMAGE_CHANNELS) { if (mean->type() != DataType::DE_FLOAT32 || mean->Rank() != 1 || mean->shape()[0] != DEFAULT_IMAGE_CHANNELS) {
std::string err_msg = "NormalizePad: mean tensor should be of size 3 and type float."; std::string err_msg =
"NormalizePad: mean tensor should be of size 3 and type float, but got rank: " + std::to_string(mean->Rank()) +
", and type: " + mean->type().ToString();
return Status(StatusCode::kMDShapeMisMatch, err_msg); return Status(StatusCode::kMDShapeMisMatch, err_msg);
} }
std->Squeeze(); std->Squeeze();
if (std->type() != DataType::DE_FLOAT32 || std->Rank() != 1 || std->shape()[0] != DEFAULT_IMAGE_CHANNELS) { if (std->type() != DataType::DE_FLOAT32 || std->Rank() != 1 || std->shape()[0] != DEFAULT_IMAGE_CHANNELS) {
std::string err_msg = "NormalizePad: std tensor should be of size 3 and type float."; std::string err_msg =
"NormalizePad: std tensor should be of size 3 and type float, but got rank: " + std::to_string(std->Rank()) +
", and type: " + std->type().ToString();
return Status(StatusCode::kMDShapeMisMatch, err_msg); return Status(StatusCode::kMDShapeMisMatch, err_msg);
} }
try { try {
@ -884,7 +896,7 @@ Status NormalizePad(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor
std::vector<cv::Mat> rgb; std::vector<cv::Mat> rgb;
cv::split(in_image, rgb); cv::split(in_image, rgb);
if (rgb.size() != DEFAULT_IMAGE_CHANNELS) { if (rgb.size() != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("NormalizePad: input image is not in RGB."); RETURN_STATUS_UNEXPECTED("NormalizePad: input image is not in RGB, got rank: " + std::to_string(in_image.dims));
} }
for (int8_t i = 0; i < DEFAULT_IMAGE_CHANNELS; i++) { for (int8_t i = 0; i < DEFAULT_IMAGE_CHANNELS; i++) {
float mean_c, std_c; float mean_c, std_c;
@ -906,13 +918,17 @@ Status AdjustBrightness(const std::shared_ptr<Tensor> &input, std::shared_ptr<Te
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
cv::Mat input_img = input_cv->mat(); cv::Mat input_img = input_cv->mat();
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("AdjustBrightness: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] AdjustBrightness: load image failed.");
} }
CHECK_FAIL_RETURN_UNEXPECTED(input_cv->shape().Size() > CHANNEL_INDEX, "AdjustBrightness: shape is invalid."); CHECK_FAIL_RETURN_UNEXPECTED(
input_cv->shape().Size() > CHANNEL_INDEX,
"AdjustBrightness: image rank should not bigger than:" + std::to_string(CHANNEL_INDEX) + ", but got" +
std::to_string(input_cv->shape().Size()));
int num_channels = input_cv->shape()[CHANNEL_INDEX]; int num_channels = input_cv->shape()[CHANNEL_INDEX];
// Rank of the image represents how many dimensions, image is expected to be HWC // Rank of the image represents how many dimensions, image is expected to be HWC
if (input_cv->Rank() != DEFAULT_IMAGE_RANK || num_channels != DEFAULT_IMAGE_CHANNELS) { if (input_cv->Rank() != DEFAULT_IMAGE_RANK || num_channels != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("AdjustBrightness: image shape is not <H,W,C> or channel is not 3."); RETURN_STATUS_UNEXPECTED("AdjustBrightness: image shape is not <H,W,C> or channel is not 3, got image rank: " +
std::to_string(input_cv->Rank()) + ", and channel:" + std::to_string(num_channels));
} }
std::shared_ptr<CVTensor> output_cv; std::shared_ptr<CVTensor> output_cv;
RETURN_IF_NOT_OK(CVTensor::CreateEmpty(input_cv->shape(), input_cv->type(), &output_cv)); RETURN_IF_NOT_OK(CVTensor::CreateEmpty(input_cv->shape(), input_cv->type(), &output_cv));
@ -929,12 +945,15 @@ Status AdjustContrast(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tens
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
cv::Mat input_img = input_cv->mat(); cv::Mat input_img = input_cv->mat();
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("AdjustContrast: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] AdjustContrast: load image failed.");
} }
CHECK_FAIL_RETURN_UNEXPECTED(input_cv->shape().Size() > CHANNEL_INDEX, "AdjustBrightness: shape is invalid."); CHECK_FAIL_RETURN_UNEXPECTED(input_cv->shape().Size() > CHANNEL_INDEX,
"AdjustContrast: image rank should not bigger than:" + std::to_string(CHANNEL_INDEX) +
", but got" + std::to_string(input_cv->shape().Size()));
int num_channels = input_cv->shape()[CHANNEL_INDEX]; int num_channels = input_cv->shape()[CHANNEL_INDEX];
if (input_cv->Rank() != DEFAULT_IMAGE_CHANNELS || num_channels != DEFAULT_IMAGE_CHANNELS) { if (input_cv->Rank() != DEFAULT_IMAGE_CHANNELS || num_channels != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("AdjustContrast: image shape is not <H,W,C> or channel is not 3."); RETURN_STATUS_UNEXPECTED("AdjustContrast: image shape is not <H,W,C> or channel is not 3, got image rank: " +
std::to_string(input_cv->Rank()) + ", and channel:" + std::to_string(num_channels));
} }
cv::Mat gray, output_img; cv::Mat gray, output_img;
cv::cvtColor(input_img, gray, CV_RGB2GRAY); cv::cvtColor(input_img, gray, CV_RGB2GRAY);
@ -963,7 +982,8 @@ Status AdjustGamma(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor>
num_channels = input->shape()[-1]; num_channels = input->shape()[-1];
} }
if (num_channels != 1 && num_channels != 3) { if (num_channels != 1 && num_channels != 3) {
RETURN_STATUS_UNEXPECTED("AdjustGamma: channel of input image should be 1 or 3."); RETURN_STATUS_UNEXPECTED("AdjustGamma: channel of input image should be 1 or 3, but got: " +
std::to_string(num_channels));
} }
if (input->type().IsFloat()) { if (input->type().IsFloat()) {
for (auto itr = input->begin<float>(); itr != input->end<float>(); itr++) { for (auto itr = input->begin<float>(); itr != input->end<float>(); itr++) {
@ -975,7 +995,7 @@ Status AdjustGamma(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor>
} else { } else {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("AdjustGamma: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] AdjustGamma: load image failed.");
} }
cv::Mat input_img = input_cv->mat(); cv::Mat input_img = input_cv->mat();
std::shared_ptr<CVTensor> output_cv; std::shared_ptr<CVTensor> output_cv;
@ -1015,10 +1035,11 @@ Status AutoContrast(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor
try { try {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("AutoContrast: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] AutoContrast: load image failed.");
} }
if (input_cv->Rank() != DEFAULT_IMAGE_RANK && input_cv->Rank() != MIN_IMAGE_DIMENSION) { if (input_cv->Rank() != DEFAULT_IMAGE_RANK && input_cv->Rank() != MIN_IMAGE_DIMENSION) {
RETURN_STATUS_UNEXPECTED("AutoContrast: image channel should be 1 or 3."); RETURN_STATUS_UNEXPECTED("AutoContrast: image channel should be 1 or 3, but got: " +
std::to_string(input_cv->Rank()));
} }
// Reshape to extend dimension if rank is 2 for algorithm to work. then reshape output to be of rank 2 like input // Reshape to extend dimension if rank is 2 for algorithm to work. then reshape output to be of rank 2 like input
if (input_cv->Rank() == MIN_IMAGE_DIMENSION) { if (input_cv->Rank() == MIN_IMAGE_DIMENSION) {
@ -1027,7 +1048,8 @@ Status AutoContrast(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor
// Get number of channels and image matrix // Get number of channels and image matrix
std::size_t num_of_channels = input_cv->shape()[CHANNEL_INDEX]; std::size_t num_of_channels = input_cv->shape()[CHANNEL_INDEX];
if (num_of_channels != MIN_IMAGE_CHANNELS && num_of_channels != DEFAULT_IMAGE_CHANNELS) { if (num_of_channels != MIN_IMAGE_CHANNELS && num_of_channels != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("AutoContrast: image shape is not <H,W,C>."); RETURN_STATUS_UNEXPECTED("AutoContrast: channel of input image should be 1 or 3, but got: " +
std::to_string(num_of_channels));
} }
cv::Mat image = input_cv->mat(); cv::Mat image = input_cv->mat();
// Separate the image to channels // Separate the image to channels
@ -1093,12 +1115,16 @@ Status AdjustSaturation(const std::shared_ptr<Tensor> &input, std::shared_ptr<Te
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
cv::Mat input_img = input_cv->mat(); cv::Mat input_img = input_cv->mat();
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("AdjustSaturation: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] AdjustSaturation: load image failed.");
} }
CHECK_FAIL_RETURN_UNEXPECTED(input_cv->shape().Size() > CHANNEL_INDEX, "AdjustSaturation: shape is invalid."); CHECK_FAIL_RETURN_UNEXPECTED(
input_cv->shape().Size() > CHANNEL_INDEX,
"AdjustSaturation: image rank should not bigger than: " + std::to_string(CHANNEL_INDEX) +
", but got: " + std::to_string(input_cv->shape().Size()));
int num_channels = input_cv->shape()[CHANNEL_INDEX]; int num_channels = input_cv->shape()[CHANNEL_INDEX];
if (input_cv->Rank() != DEFAULT_IMAGE_RANK || num_channels != DEFAULT_IMAGE_CHANNELS) { if (input_cv->Rank() != DEFAULT_IMAGE_RANK || num_channels != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("AdjustSaturation: image shape is not <H,W,C> or channel is not 3."); RETURN_STATUS_UNEXPECTED("AdjustSaturation: image shape is not <H,W,C> or channel is not 3, but got rank: " +
std::to_string(input_cv->Rank()) + ", and channel: " + std::to_string(num_channels));
} }
std::shared_ptr<CVTensor> output_cv; std::shared_ptr<CVTensor> output_cv;
RETURN_IF_NOT_OK(CVTensor::CreateEmpty(input_cv->shape(), input_cv->type(), &output_cv)); RETURN_IF_NOT_OK(CVTensor::CreateEmpty(input_cv->shape(), input_cv->type(), &output_cv));
@ -1116,18 +1142,22 @@ Status AdjustSaturation(const std::shared_ptr<Tensor> &input, std::shared_ptr<Te
Status AdjustHue(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, const float &hue) { Status AdjustHue(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, const float &hue) {
if (hue > 0.5 || hue < -0.5) { if (hue > 0.5 || hue < -0.5) {
RETURN_STATUS_UNEXPECTED("AdjustHue: invalid parameter, hue is not within [-0.5, 0.5]."); RETURN_STATUS_UNEXPECTED("AdjustHue: invalid parameter, hue should within [-0.5, 0.5], but got: " +
std::to_string(hue));
} }
try { try {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
cv::Mat input_img = input_cv->mat(); cv::Mat input_img = input_cv->mat();
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("AdjustHue: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] AdjustHue: load image failed.");
} }
CHECK_FAIL_RETURN_UNEXPECTED(input_cv->shape().Size() > 2, "AdjustHue: shape is invalid."); CHECK_FAIL_RETURN_UNEXPECTED(input_cv->shape().Size() > 2,
"AdjustHue: image rank should not bigger than:" + std::to_string(2) +
", but got: " + std::to_string(input_cv->shape().Size()));
int num_channels = input_cv->shape()[2]; int num_channels = input_cv->shape()[2];
if (input_cv->Rank() != DEFAULT_IMAGE_RANK || num_channels != DEFAULT_IMAGE_CHANNELS) { if (input_cv->Rank() != DEFAULT_IMAGE_RANK || num_channels != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("AdjustHue: image shape is not <H,W,C> or channel is not 3."); RETURN_STATUS_UNEXPECTED("AdjustHue: image shape is not <H,W,C> or channel is not 3, but got rank: " +
std::to_string(input_cv->Rank()) + ", and channel: " + std::to_string(num_channels));
} }
std::shared_ptr<CVTensor> output_cv; std::shared_ptr<CVTensor> output_cv;
RETURN_IF_NOT_OK(CVTensor::CreateEmpty(input_cv->shape(), input_cv->type(), &output_cv)); RETURN_IF_NOT_OK(CVTensor::CreateEmpty(input_cv->shape(), input_cv->type(), &output_cv));
@ -1154,10 +1184,10 @@ Status Equalize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *o
try { try {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("Equalize: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] Equalize: load image failed.");
} }
if (input_cv->Rank() != DEFAULT_IMAGE_RANK && input_cv->Rank() != MIN_IMAGE_DIMENSION) { if (input_cv->Rank() != DEFAULT_IMAGE_RANK && input_cv->Rank() != MIN_IMAGE_DIMENSION) {
RETURN_STATUS_UNEXPECTED("Equalize: image shape is not <H,W,C> or <H,W>"); RETURN_STATUS_UNEXPECTED("Equalize: image rank should be 1 or 3, but got: " + std::to_string(input_cv->Rank()));
} }
// For greyscale images, extend dimension if rank is 2 and reshape output to be of rank 2. // For greyscale images, extend dimension if rank is 2 and reshape output to be of rank 2.
if (input_cv->Rank() == MIN_IMAGE_DIMENSION) { if (input_cv->Rank() == MIN_IMAGE_DIMENSION) {
@ -1166,7 +1196,8 @@ Status Equalize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *o
// Get number of channels and image matrix // Get number of channels and image matrix
std::size_t num_of_channels = input_cv->shape()[CHANNEL_INDEX]; std::size_t num_of_channels = input_cv->shape()[CHANNEL_INDEX];
if (num_of_channels != MIN_IMAGE_CHANNELS && num_of_channels != DEFAULT_IMAGE_CHANNELS) { if (num_of_channels != MIN_IMAGE_CHANNELS && num_of_channels != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("Equalize: image shape is not <H,W,C>."); RETURN_STATUS_UNEXPECTED("Equalize: channel of input image should be 1 or 3, but got: " +
std::to_string(num_of_channels));
} }
cv::Mat image = input_cv->mat(); cv::Mat image = input_cv->mat();
// Separate the image to channels // Separate the image to channels
@ -1199,17 +1230,21 @@ Status Erase(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *outp
CHECK_FAIL_RETURN_UNEXPECTED(input_cv->shape().Size() > CHANNEL_INDEX, "Erase: shape is invalid."); CHECK_FAIL_RETURN_UNEXPECTED(input_cv->shape().Size() > CHANNEL_INDEX, "Erase: shape is invalid.");
int num_channels = input_cv->shape()[CHANNEL_INDEX]; int num_channels = input_cv->shape()[CHANNEL_INDEX];
if (input_cv->mat().data == nullptr) { if (input_cv->mat().data == nullptr) {
RETURN_STATUS_UNEXPECTED("CutOut: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] CutOut: load image failed.");
} }
if (input_cv->Rank() != DEFAULT_IMAGE_RANK || num_channels != DEFAULT_IMAGE_CHANNELS) { if (input_cv->Rank() != DEFAULT_IMAGE_RANK || num_channels != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("CutOut: image shape is not <H,W,C> or channel is not 3."); RETURN_STATUS_UNEXPECTED("CutOut: image shape is not <H,W,C> or channel is not 3, but got rank: " +
std::to_string(input_cv->Rank()) + ", and channel: " + std::to_string(num_channels));
} }
cv::Mat input_img = input_cv->mat(); cv::Mat input_img = input_cv->mat();
int32_t image_h = input_cv->shape()[0]; int32_t image_h = input_cv->shape()[0];
int32_t image_w = input_cv->shape()[1]; int32_t image_w = input_cv->shape()[1];
// check if erase size is bigger than image itself // check if erase size is bigger than image itself
if (box_height > image_h || box_width > image_w) { if (box_height > image_h || box_width > image_w) {
RETURN_STATUS_UNEXPECTED("CutOut: box size is too large for image erase"); RETURN_STATUS_UNEXPECTED(
"CutOut: box size is too large for image erase, got box height: " + std::to_string(box_height) +
"box weight: " + std::to_string(box_width) + ", and image height: " + std::to_string(image_h) +
", image width: " + std::to_string(image_w));
} }
// for random color // for random color
@ -1265,7 +1300,8 @@ Status Pad(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output
// validate rank // validate rank
if (input_cv->Rank() == 1 || input_cv->mat().dims > MIN_IMAGE_DIMENSION) { if (input_cv->Rank() == 1 || input_cv->mat().dims > MIN_IMAGE_DIMENSION) {
RETURN_STATUS_UNEXPECTED("Pad: input tensor is not in shape of <H,W,C> or <H,W>."); RETURN_STATUS_UNEXPECTED("Pad: input shape is not <H,W,C> or <H, W>, got rank: " +
std::to_string(input_cv->Rank()));
} }
// get the border type in openCV // get the border type in openCV
@ -1297,10 +1333,9 @@ Status RgbaToRgb(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(std::move(input)); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(std::move(input));
int num_channels = input_cv->shape()[CHANNEL_INDEX]; int num_channels = input_cv->shape()[CHANNEL_INDEX];
if (input_cv->shape().Size() != DEFAULT_IMAGE_CHANNELS || num_channels != 4) { if (input_cv->shape().Size() != DEFAULT_IMAGE_CHANNELS || num_channels != 4) {
std::string err_msg = std::string err_msg = "RgbaToRgb: rank of image is not: " + std::to_string(DEFAULT_IMAGE_CHANNELS) +
"RgbaToRgb: Number of channels of image does not equal 4, " ", but got: " + std::to_string(input_cv->shape().Size()) +
"but got : " + ", or channels of image should be 4, but got: " + std::to_string(num_channels);
std::to_string(num_channels);
RETURN_STATUS_UNEXPECTED(err_msg); RETURN_STATUS_UNEXPECTED(err_msg);
} }
TensorShape out_shape = TensorShape({input_cv->shape()[0], input_cv->shape()[1], 3}); TensorShape out_shape = TensorShape({input_cv->shape()[0], input_cv->shape()[1], 3});
@ -1319,10 +1354,9 @@ Status RgbaToBgr(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(std::move(input)); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(std::move(input));
int num_channels = input_cv->shape()[CHANNEL_INDEX]; int num_channels = input_cv->shape()[CHANNEL_INDEX];
if (input_cv->shape().Size() != DEFAULT_IMAGE_CHANNELS || num_channels != 4) { if (input_cv->shape().Size() != DEFAULT_IMAGE_CHANNELS || num_channels != 4) {
std::string err_msg = std::string err_msg = "RgbaToBgr: rank of image is not: " + std::to_string(DEFAULT_IMAGE_CHANNELS) +
"RgbaToBgr: number of channels of image should be 4, " ", but got: " + std::to_string(input_cv->shape().Size()) +
"but got : " + ", or channels of image should be 4, but got: " + std::to_string(num_channels);
std::to_string(num_channels);
RETURN_STATUS_UNEXPECTED(err_msg); RETURN_STATUS_UNEXPECTED(err_msg);
} }
TensorShape out_shape = TensorShape({input_cv->shape()[0], input_cv->shape()[1], 3}); TensorShape out_shape = TensorShape({input_cv->shape()[0], input_cv->shape()[1], 3});
@ -1341,10 +1375,12 @@ Status RgbToBgr(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *o
auto input_type = input->type(); auto input_type = input->type();
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("RgbToBgr: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] RgbToBgr: load image failed.");
} }
if (input_cv->Rank() != 3 || input_cv->shape()[2] != 3) { if (input_cv->Rank() != 3 || input_cv->shape()[2] != 3) {
RETURN_STATUS_UNEXPECTED("RgbToBgr: input tensor is not in shape of <H,W,C> or channel is not 3."); RETURN_STATUS_UNEXPECTED("RgbToBgr: input tensor is not in shape of <H,W,C> or channel is not 3, got rank: " +
std::to_string(input_cv->Rank()) +
", and channel: " + std::to_string(input_cv->shape()[2]));
} }
cv::Mat image = input_cv->mat().clone(); cv::Mat image = input_cv->mat().clone();
@ -1396,7 +1432,9 @@ Status RgbToGray(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *
try { try {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(std::move(input)); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(std::move(input));
if (input_cv->Rank() != DEFAULT_IMAGE_RANK || input_cv->shape()[CHANNEL_INDEX] != DEFAULT_IMAGE_CHANNELS) { if (input_cv->Rank() != DEFAULT_IMAGE_RANK || input_cv->shape()[CHANNEL_INDEX] != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("RgbToGray: image shape is not <H,W,C> or channel is not 3."); RETURN_STATUS_UNEXPECTED(
"RgbToGray: image shape is not <H,W,C> or channel is not 3, got rank: " + std::to_string(input_cv->Rank()) +
", and channel: " + std::to_string(input_cv->shape()[2]));
} }
TensorShape out_shape = TensorShape({input_cv->shape()[0], input_cv->shape()[1]}); TensorShape out_shape = TensorShape({input_cv->shape()[0], input_cv->shape()[1]});
std::shared_ptr<CVTensor> output_cv; std::shared_ptr<CVTensor> output_cv;
@ -1433,9 +1471,7 @@ Status Affine(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *out
InterpolationMode interpolation, uint8_t fill_r, uint8_t fill_g, uint8_t fill_b) { InterpolationMode interpolation, uint8_t fill_r, uint8_t fill_g, uint8_t fill_b) {
try { try {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (input_cv->Rank() < MIN_IMAGE_DIMENSION || input_cv->Rank() > DEFAULT_IMAGE_RANK) { RETURN_IF_NOT_OK(ValidateImageRank("Affine", input_cv->Rank()));
RETURN_STATUS_UNEXPECTED("Affine: image shape is not <H,W,C> or <H,W>.");
}
cv::Mat affine_mat(mat); cv::Mat affine_mat(mat);
affine_mat = affine_mat.reshape(1, {2, 3}); affine_mat = affine_mat.reshape(1, {2, 3});
@ -1457,7 +1493,7 @@ Status GaussianBlur(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor
try { try {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (input_cv->mat().data == nullptr) { if (input_cv->mat().data == nullptr) {
RETURN_STATUS_UNEXPECTED("GaussianBlur: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] GaussianBlur: load image failed.");
} }
cv::Mat output_cv_mat; cv::Mat output_cv_mat;
cv::GaussianBlur(input_cv->mat(), output_cv_mat, cv::Size(kernel_x, kernel_y), static_cast<double>(sigma_x), cv::GaussianBlur(input_cv->mat(), output_cv_mat, cv::Size(kernel_x, kernel_y), static_cast<double>(sigma_x),
@ -1475,19 +1511,21 @@ Status ComputePatchSize(const std::shared_ptr<CVTensor> &input_cv,
std::shared_ptr<std::pair<int32_t, int32_t>> *patch_size, int32_t num_height, int32_t num_width, std::shared_ptr<std::pair<int32_t, int32_t>> *patch_size, int32_t num_height, int32_t num_width,
SliceMode slice_mode) { SliceMode slice_mode) {
if (input_cv->mat().data == nullptr) { if (input_cv->mat().data == nullptr) {
RETURN_STATUS_UNEXPECTED("SlicePatches: Tensor could not convert to CV Tensor."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] SlicePatches: Tensor could not convert to CV Tensor.");
}
if (input_cv->Rank() != 3 && input_cv->Rank() != 2) {
RETURN_STATUS_UNEXPECTED("SlicePatches: image shape is not <H,W,C> or <H,W>.");
} }
RETURN_IF_NOT_OK(ValidateImageRank("Affine", input_cv->Rank()));
cv::Mat in_img = input_cv->mat(); cv::Mat in_img = input_cv->mat();
cv::Size s = in_img.size(); cv::Size s = in_img.size();
if (num_height == 0 || num_height > s.height) { if (num_height == 0 || num_height > s.height) {
RETURN_STATUS_UNEXPECTED("SlicePatches: The number of patches on height axis equals 0 or is greater than height."); RETURN_STATUS_UNEXPECTED(
"SlicePatches: The number of patches on height axis equals 0 or is greater than height, got number of patches:" +
std::to_string(num_height));
} }
if (num_width == 0 || num_width > s.width) { if (num_width == 0 || num_width > s.width) {
RETURN_STATUS_UNEXPECTED("SlicePatches: The number of patches on width axis equals 0 or is greater than width."); RETURN_STATUS_UNEXPECTED(
"SlicePatches: The number of patches on width axis equals 0 or is greater than width, got number of patches:" +
std::to_string(num_width));
} }
int32_t patch_h = s.height / num_height; int32_t patch_h = s.height / num_height;
if (s.height % num_height != 0) { if (s.height % num_height != 0) {
@ -1547,5 +1585,13 @@ Status SlicePatches(const std::shared_ptr<Tensor> &input, std::vector<std::share
RETURN_STATUS_UNEXPECTED("SlicePatches: " + std::string(e.what())); RETURN_STATUS_UNEXPECTED("SlicePatches: " + std::string(e.what()));
} }
} }
Status ValidateImageRank(const std::string &op_name, int32_t rank) {
if (rank != 2 && rank != 3) {
std::string err_msg = op_name + ": image shape is not <H,W,C> or <H, W>, but got rank:" + std::to_string(rank);
RETURN_STATUS_UNEXPECTED(err_msg);
}
return Status::OK();
}
} // namespace dataset } // namespace dataset
} // namespace mindspore } // namespace mindspore

View File

@ -373,6 +373,11 @@ Status SlicePatches(const std::shared_ptr<Tensor> &input, std::vector<std::share
Status ComputePatchSize(const std::shared_ptr<CVTensor> &input_cv, Status ComputePatchSize(const std::shared_ptr<CVTensor> &input_cv,
std::shared_ptr<std::pair<int32_t, int32_t>> *patch_size, int32_t num_height, int32_t num_width, std::shared_ptr<std::pair<int32_t, int32_t>> *patch_size, int32_t num_height, int32_t num_width,
SliceMode slice_mode); SliceMode slice_mode);
/// \brief Validate image rank.
/// \param[in] op_name operator name.
/// \param[in] rank refers to the rank of input image shape.
Status ValidateImageRank(const std::string &op_name, int32_t rank);
} // namespace dataset } // namespace dataset
} // namespace mindspore } // namespace mindspore
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_IMAGE_UTILS_H_ #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_IMAGE_UTILS_H_

View File

@ -30,11 +30,11 @@ Status InvertOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<T
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
cv::Mat input_img = input_cv->mat(); cv::Mat input_img = input_cv->mat();
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("Invert: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] Invert: load image failed.");
} }
if (input_cv->Rank() != 3) { if (input_cv->Rank() != 3) {
RETURN_STATUS_UNEXPECTED("Invert: image shape is not <H,W,C>"); RETURN_STATUS_UNEXPECTED("Invert: image shape is not <H,W,C>, got rank: " + std::to_string(input_cv->Rank()));
} }
int num_channels = input_cv->shape()[2]; int num_channels = input_cv->shape()[2];
if (num_channels != 3) { if (num_channels != 3) {

View File

@ -811,5 +811,12 @@ Status GaussianBlur(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor
} }
} }
Status ValidateImageRank(const std::string &op_name, int32_t rank) {
if (rank != 2 && rank != 3) {
std::string err_msg = op_name + ": image shape is not <H,W,C> or <H, W>, but got rank:" + std::to_string(rank);
RETURN_STATUS_UNEXPECTED(err_msg);
}
return Status::OK();
}
} // namespace dataset } // namespace dataset
} // namespace mindspore } // namespace mindspore

View File

@ -161,6 +161,10 @@ Status Affine(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *out
Status GaussianBlur(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, int32_t kernel_size_x, Status GaussianBlur(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, int32_t kernel_size_x,
int32_t kernel_size_y, float sigma_x, float sigma_y); int32_t kernel_size_y, float sigma_x, float sigma_y);
/// \brief Validate image rank.
/// \param[in] op_name operator name.
/// \param[in] rank refers to the rank of input image shape.
Status ValidateImageRank(const std::string &op_name, int32_t rank);
} // namespace dataset } // namespace dataset
} // namespace mindspore } // namespace mindspore
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_LITE_IMAGE_UTILS_H_ #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_LITE_IMAGE_UTILS_H_

View File

@ -39,8 +39,9 @@ MixUpBatchOp::MixUpBatchOp(float alpha) : alpha_(alpha) { rnd_.seed(GetSeed());
Status MixUpBatchOp::ComputeLabels(const TensorRow &input, std::shared_ptr<Tensor> *out_labels, Status MixUpBatchOp::ComputeLabels(const TensorRow &input, std::shared_ptr<Tensor> *out_labels,
std::vector<int64_t> *rand_indx, const std::vector<int64_t> &label_shape, std::vector<int64_t> *rand_indx, const std::vector<int64_t> &label_shape,
const float lam, const size_t images_size) { const float lam, const size_t images_size) {
CHECK_FAIL_RETURN_UNEXPECTED(images_size <= static_cast<size_t>(std::numeric_limits<int64_t>::max()), CHECK_FAIL_RETURN_UNEXPECTED(
"The \"images_size\" must not be more than \"INT64_MAX\"."); images_size <= static_cast<size_t>(std::numeric_limits<int64_t>::max()),
"The \'images_size\' must not be more than \'INT64_MAX\', but got: " + std::to_string(images_size));
for (int64_t i = 0; i < static_cast<int64_t>(images_size); i++) rand_indx->push_back(i); for (int64_t i = 0; i < static_cast<int64_t>(images_size); i++) rand_indx->push_back(i);
std::shuffle(rand_indx->begin(), rand_indx->end(), rnd_); std::shuffle(rand_indx->begin(), rand_indx->end(), rnd_);
@ -76,7 +77,8 @@ Status MixUpBatchOp::ComputeLabels(const TensorRow &input, std::shared_ptr<Tenso
Status MixUpBatchOp::Compute(const TensorRow &input, TensorRow *output) { Status MixUpBatchOp::Compute(const TensorRow &input, TensorRow *output) {
if (input.size() < 2) { if (input.size() < 2) {
RETURN_STATUS_UNEXPECTED("MixUpBatch: input lack of images or labels"); RETURN_STATUS_UNEXPECTED("MixUpBatch: size of input data should be 2 (including images or labels), but got: " +
std::to_string(input.size()));
} }
std::vector<std::shared_ptr<CVTensor>> images; std::vector<std::shared_ptr<CVTensor>> images;
@ -85,14 +87,14 @@ Status MixUpBatchOp::Compute(const TensorRow &input, TensorRow *output) {
// Check inputs // Check inputs
if (image_shape.size() != kExpectedImageShapeSize || image_shape[0] != label_shape[0]) { if (image_shape.size() != kExpectedImageShapeSize || image_shape[0] != label_shape[0]) {
RETURN_STATUS_UNEXPECTED( RETURN_STATUS_UNEXPECTED("MixUpBatch: rank of image shape should be: " + std::to_string(kExpectedImageShapeSize) +
"MixUpBatch: " ", but got: " + std::to_string(image_shape.size()) +
"please make sure images are HWC or CHW and batched before calling MixUpBatch."); ", make sure image shape are <H,W,C> or <C,H,W> and batched before calling MixUpBatch.");
} }
if (!input.at(1)->type().IsInt()) { if (!input.at(1)->type().IsInt()) {
RETURN_STATUS_UNEXPECTED( RETURN_STATUS_UNEXPECTED(
"MixUpBatch: wrong labels type. " "MixUpBatch: wrong labels type. The second column (labels) must only include int types, but got: " +
"The second column (labels) must only include int types."); input.at(1)->type().ToString());
} }
if (label_shape.size() != kMinLabelShapeSize && label_shape.size() != kMaxLabelShapeSize) { if (label_shape.size() != kMinLabelShapeSize && label_shape.size() != kMaxLabelShapeSize) {
@ -100,11 +102,12 @@ Status MixUpBatchOp::Compute(const TensorRow &input, TensorRow *output) {
"MixUpBatch: wrong labels shape. " "MixUpBatch: wrong labels shape. "
"The second column (labels) must have a shape of NC or NLC where N is the batch size, " "The second column (labels) must have a shape of NC or NLC where N is the batch size, "
"L is the number of labels in each row, and C is the number of classes. " "L is the number of labels in each row, and C is the number of classes. "
"labels must be in one-hot format and in a batch."); "labels must be in one-hot format and in a batch, but got rank: " +
std::to_string(label_shape.size()));
} }
if ((image_shape[dimension_one] != value_one && image_shape[dimension_one] != value_three) && if ((image_shape[dimension_one] != value_one && image_shape[dimension_one] != value_three) &&
(image_shape[dimension_three] != value_one && image_shape[dimension_three] != value_three)) { (image_shape[dimension_three] != value_one && image_shape[dimension_three] != value_three)) {
RETURN_STATUS_UNEXPECTED("MixUpBatch: images must be in the shape of HWC or CHW."); RETURN_STATUS_UNEXPECTED("MixUpBatch: images shape is not <H,W,C> or <C,H,W>.");
} }
// Move images into a vector of CVTensors // Move images into a vector of CVTensors
@ -116,8 +119,11 @@ Status MixUpBatchOp::Compute(const TensorRow &input, TensorRow *output) {
std::gamma_distribution<float> distribution(alpha_, 1); std::gamma_distribution<float> distribution(alpha_, 1);
float x1 = distribution(rnd_); float x1 = distribution(rnd_);
float x2 = distribution(rnd_); float x2 = distribution(rnd_);
CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<float_t>::max() - x1) > x2, "multiplication out of bounds"); CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<float_t>::max() - x1) > x2,
CHECK_FAIL_RETURN_UNEXPECTED(x1 + x2 != 0.0, "addition out of bounds"); "multiplication out of bounds, with multipliers: " + std::to_string(x1) + " and " +
std::to_string(x2) +
", which result in the out of bounds product:" + std::to_string(x1 * x2));
CHECK_FAIL_RETURN_UNEXPECTED(x1 + x2 != 0.0, "addition of variable(x1 and x2) of Gamma should not be 0.");
float lam = x1 / (x1 + x2); float lam = x1 / (x1 + x2);
// Calculate random labels // Calculate random labels
@ -138,7 +144,7 @@ Status MixUpBatchOp::Compute(const TensorRow &input, TensorRow *output) {
input.at(0)->type(), start_addr_of_index, &out)); input.at(0)->type(), start_addr_of_index, &out));
std::shared_ptr<CVTensor> rand_image = CVTensor::AsCVTensor(std::move(out)); std::shared_ptr<CVTensor> rand_image = CVTensor::AsCVTensor(std::move(out));
if (!rand_image->mat().data) { if (!rand_image->mat().data) {
RETURN_STATUS_UNEXPECTED("MixUpBatch: allocate memory failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] MixUpBatch: allocate memory failed.");
} }
images[i]->mat() = lam * images[i]->mat() + (1 - lam) * rand_image->mat(); images[i]->mat() = lam * images[i]->mat() + (1 - lam) * rand_image->mat();
} }

View File

@ -26,11 +26,13 @@ NormalizePadOp::NormalizePadOp(float mean_r, float mean_g, float mean_b, float s
std::string dtype) { std::string dtype) {
Status s = Tensor::CreateFromVector<float>({mean_r, mean_g, mean_b}, &mean_); Status s = Tensor::CreateFromVector<float>({mean_r, mean_g, mean_b}, &mean_);
if (s.IsError()) { if (s.IsError()) {
MS_LOG(ERROR) << "NormalizePad: invalid mean value."; MS_LOG(ERROR) << "NormalizePad: invalid mean value, got: (" + std::to_string(mean_r) + std::to_string(mean_g) +
std::to_string(mean_b) + ").";
} }
s = Tensor::CreateFromVector<float>({std_r, std_g, std_b}, &std_); s = Tensor::CreateFromVector<float>({std_r, std_g, std_b}, &std_);
if (s.IsError()) { if (s.IsError()) {
MS_LOG(ERROR) << "NormalizePad: invalid std value."; MS_LOG(ERROR) << "NormalizePad: invalid std value, got: (" + std::to_string(std_r) + std::to_string(std_g) +
std::to_string(std_b) + ").";
} }
dtype_ = dtype; dtype_ = dtype;
} }

View File

@ -29,10 +29,11 @@ Status PosterizeOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_pt
uint8_t mask_value = ~((uint8_t)(1 << (8 - bit_)) - 1); uint8_t mask_value = ~((uint8_t)(1 << (8 - bit_)) - 1);
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("Posterize: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] Posterize: load image failed.");
} }
if (input_cv->Rank() != 3 && input_cv->Rank() != 2) { if (input_cv->Rank() != 3 && input_cv->Rank() != 2) {
RETURN_STATUS_UNEXPECTED("Posterize: input image is not in shape of <H,W,C> or <H,W>"); RETURN_STATUS_UNEXPECTED("Posterize: input image is not in shape of <H,W,C> or <H,W>, but got rank: " +
std::to_string(input_cv->Rank()));
} }
std::vector<uint8_t> lut_vector; std::vector<uint8_t> lut_vector;
for (std::size_t i = 0; i < 256; i++) { for (std::size_t i = 0; i < 256; i++) {
@ -42,7 +43,7 @@ Status PosterizeOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_pt
cv::Mat output_img; cv::Mat output_img;
CHECK_FAIL_RETURN_UNEXPECTED(in_image.depth() == CV_8U || in_image.depth() == CV_8S, CHECK_FAIL_RETURN_UNEXPECTED(in_image.depth() == CV_8U || in_image.depth() == CV_8S,
"Posterize: input image data type can not be float, " "Posterize: data type of input image should be int, "
"but got " + "but got " +
input->type().ToString()); input->type().ToString());
cv::LUT(in_image, lut_vector, output_img); cv::LUT(in_image, lut_vector, output_img);

View File

@ -15,6 +15,7 @@
*/ */
#include "minddata/dataset/kernels/image/random_color_op.h" #include "minddata/dataset/kernels/image/random_color_op.h"
#include "minddata/dataset/kernels/image/image_utils.h"
#include "minddata/dataset/core/cv_tensor.h" #include "minddata/dataset/core/cv_tensor.h"
namespace mindspore { namespace mindspore {
namespace dataset { namespace dataset {
@ -26,7 +27,8 @@ RandomColorOp::RandomColorOp(float t_lb, float t_ub) : rnd_(GetSeed()), dist_(t_
Status RandomColorOp::Compute(const std::shared_ptr<Tensor> &in, std::shared_ptr<Tensor> *out) { Status RandomColorOp::Compute(const std::shared_ptr<Tensor> &in, std::shared_ptr<Tensor> *out) {
IO_CHECK(in, out); IO_CHECK(in, out);
if (in->Rank() != 3 || in->shape()[2] != 3) { if (in->Rank() != 3 || in->shape()[2] != 3) {
RETURN_STATUS_UNEXPECTED("RandomColor: image shape is not <H,W,C> or channel is not 3."); RETURN_STATUS_UNEXPECTED("RandomColor: image shape is not <H,W,C> or channel is not 3, got rank: " +
std::to_string(in->Rank()) + ", and channel: " + std::to_string(in->shape()[2]));
} }
// 0.5 pixel precision assuming an 8 bit image // 0.5 pixel precision assuming an 8 bit image
const auto eps = 0.00195; const auto eps = 0.00195;

View File

@ -61,8 +61,7 @@ Status RandomCropAndResizeOp::Compute(const TensorRow &input, TensorRow *output)
int crop_height = 0; int crop_height = 0;
int crop_width = 0; int crop_width = 0;
for (size_t i = 0; i < input.size(); i++) { for (size_t i = 0; i < input.size(); i++) {
CHECK_FAIL_RETURN_UNEXPECTED(input[i]->shape().Size() >= 2, RETURN_IF_NOT_OK(ValidateImageRank("RandomCropAndResize", input[i]->shape().Size()));
"RandomCropAndResize: the image is not <H,W,C> or <H,W>");
int h_in = input[i]->shape()[0]; int h_in = input[i]->shape()[0];
int w_in = input[i]->shape()[1]; int w_in = input[i]->shape()[1];
if (i == 0) { if (i == 0) {
@ -87,7 +86,9 @@ Status RandomCropAndResizeOp::OutputShape(const std::vector<TensorShape> &inputs
if (!outputs.empty()) { if (!outputs.empty()) {
return Status::OK(); return Status::OK();
} }
return Status(StatusCode::kMDUnexpectedError, "RandomCropAndResize: invalid input shape"); return Status(StatusCode::kMDUnexpectedError,
"RandomCropAndResize: invalid input shape, expected 2D or 3D input, but got input dimension is: " +
std::to_string(inputs[0].Rank()));
} }
Status RandomCropAndResizeOp::GetCropBox(int h_in, int w_in, int *x, int *y, int *crop_height, int *crop_width) { Status RandomCropAndResizeOp::GetCropBox(int h_in, int w_in, int *x, int *y, int *crop_height, int *crop_width) {
CHECK_FAIL_RETURN_UNEXPECTED(crop_height != nullptr, "crop_height is nullptr."); CHECK_FAIL_RETURN_UNEXPECTED(crop_height != nullptr, "crop_height is nullptr.");

View File

@ -28,8 +28,8 @@ namespace dataset {
Status RandomCropAndResizeWithBBoxOp::Compute(const TensorRow &input, TensorRow *output) { Status RandomCropAndResizeWithBBoxOp::Compute(const TensorRow &input, TensorRow *output) {
IO_CHECK_VECTOR(input, output); IO_CHECK_VECTOR(input, output);
RETURN_IF_NOT_OK(BoundingBox::ValidateBoundingBoxes(input)); RETURN_IF_NOT_OK(BoundingBox::ValidateBoundingBoxes(input));
CHECK_FAIL_RETURN_UNEXPECTED(input[0]->shape().Size() >= 2, RETURN_IF_NOT_OK(ValidateImageRank("RandomCropAndResizeWithBBox", input[0]->shape().Size()));
"RandomCropAndResizeWithBBox: image shape is not <H,W,C> or <H,W>.");
const int output_count = 2; const int output_count = 2;
output->resize(output_count); output->resize(output_count);
(*output)[1] = std::move(input[1]); // move boxes over to output (*output)[1] = std::move(input[1]); // move boxes over to output

View File

@ -39,7 +39,7 @@ Status RandomCropDecodeResizeOp::Compute(const TensorRow &input, TensorRow *outp
decoded.resize(output_count); decoded.resize(output_count);
for (size_t i = 0; i < input.size(); i++) { for (size_t i = 0; i < input.size(); i++) {
if (input[i] == nullptr) { if (input[i] == nullptr) {
RETURN_STATUS_UNEXPECTED("RandomCropDecodeResize: input image is empty."); RETURN_STATUS_UNEXPECTED("RandomCropDecodeResize: input image is empty since got nullptr.");
} }
if (!IsNonEmptyJPEG(input[i])) { if (!IsNonEmptyJPEG(input[i])) {
DecodeOp op(true); DecodeOp op(true);

View File

@ -62,11 +62,16 @@ Status RandomCropOp::ImagePadding(const std::shared_ptr<Tensor> &input, std::sha
CHECK_FAIL_RETURN_UNEXPECTED( CHECK_FAIL_RETURN_UNEXPECTED(
pad_top_ < input->shape()[0] * max_ratio && pad_bottom_ < input->shape()[0] * max_ratio && pad_top_ < input->shape()[0] * max_ratio && pad_bottom_ < input->shape()[0] * max_ratio &&
pad_left_ < input->shape()[1] * max_ratio && pad_right_ < input->shape()[1] * max_ratio, pad_left_ < input->shape()[1] * max_ratio && pad_right_ < input->shape()[1] * max_ratio,
"Pad: padding size is three times bigger than the image size."); "Pad: padding size is three times bigger than the image size, padding top: " + std::to_string(pad_top_) +
", padding bottom: " + std::to_string(pad_bottom_) + ", padding pad_left_: " + std::to_string(pad_left_) +
", padding padding right:" + std::to_string(pad_right_) + ", image shape: " + std::to_string(input->shape()[0]) +
", " + std::to_string(input->shape()[1]));
RETURN_IF_NOT_OK( RETURN_IF_NOT_OK(
Pad(input, pad_image, pad_top_, pad_bottom_, pad_left_, pad_right_, border_type_, fill_r_, fill_g_, fill_b_)); Pad(input, pad_image, pad_top_, pad_bottom_, pad_left_, pad_right_, border_type_, fill_r_, fill_g_, fill_b_));
CHECK_FAIL_RETURN_UNEXPECTED((*pad_image)->shape().Size() >= 2, "RandomCrop: invalid shape of image after pad."); CHECK_FAIL_RETURN_UNEXPECTED(
(*pad_image)->shape().Size() >= 2,
"RandomCrop: invalid shape of image after pad, got rank: " + std::to_string((*pad_image)->shape().Size()));
*padded_image_h = (*pad_image)->shape()[0]; *padded_image_h = (*pad_image)->shape()[0];
*padded_image_w = (*pad_image)->shape()[1]; *padded_image_w = (*pad_image)->shape()[1];
@ -97,11 +102,12 @@ Status RandomCropOp::ImagePadding(const std::shared_ptr<Tensor> &input, std::sha
if (crop_height_ == 0 || crop_width_ == 0) { if (crop_height_ == 0 || crop_width_ == 0) {
return Status(StatusCode::kMDShapeMisMatch, __LINE__, __FILE__, return Status(StatusCode::kMDShapeMisMatch, __LINE__, __FILE__,
"RandomCrop: invalid crop size, crop dimension is not allowed to be zero."); "RandomCrop: invalid crop size, crop width or crop height is not allowed to be zero.");
} }
if (*padded_image_h < crop_height_ || *padded_image_w < crop_width_ || crop_height_ == 0 || crop_width_ == 0) { if (*padded_image_h < crop_height_ || *padded_image_w < crop_width_ || crop_height_ == 0 || crop_width_ == 0) {
return Status(StatusCode::kMDShapeMisMatch, __LINE__, __FILE__, return Status(StatusCode::kMDShapeMisMatch, __LINE__, __FILE__,
"RandomCrop: invalid crop size, crop size is bigger than the image dimensions."); "RandomCrop: invalid crop size, crop size is bigger than the image dimensions, got crop height: " +
std::to_string(crop_height_) + ", crop width: " + std::to_string(crop_width_));
} }
return Status::OK(); return Status::OK();
} }
@ -126,9 +132,7 @@ Status RandomCropOp::Compute(const TensorRow &input, TensorRow *output) {
const int output_count = input.size(); const int output_count = input.size();
output->resize(output_count); output->resize(output_count);
for (size_t i = 0; i < input.size(); i++) { for (size_t i = 0; i < input.size(); i++) {
if (input[i]->Rank() != 3 && input[i]->Rank() != 2) { RETURN_IF_NOT_OK(ValidateImageRank("RandomCrop", input[i]->shape().Size()));
RETURN_STATUS_UNEXPECTED("RandomCrop: image shape is not <H,W,C> or <H,W>.");
}
std::shared_ptr<Tensor> pad_image = nullptr; std::shared_ptr<Tensor> pad_image = nullptr;
int32_t t_pad_top = 0; int32_t t_pad_top = 0;
int32_t t_pad_bottom = 0; int32_t t_pad_bottom = 0;

View File

@ -68,7 +68,9 @@ Status RandomRotationOp::OutputShape(const std::vector<TensorShape> &inputs, std
// if expand_, then we cannot know the shape. We need the input image to find the output shape --> set it to // if expand_, then we cannot know the shape. We need the input image to find the output shape --> set it to
// <-1,-1[,3]> // <-1,-1[,3]>
CHECK_FAIL_RETURN_UNEXPECTED(inputs.size() > 0 && inputs[0].Size() >= 2, CHECK_FAIL_RETURN_UNEXPECTED(inputs.size() > 0 && inputs[0].Size() >= 2,
"RandomRotationOp::OutputShape inputs is invalid."); "RandomRotationOp: invalid input shape, expected 2D or 3D input, but got input"
" dimension is: " +
std::to_string(inputs[0].Rank()));
if (!expand_) { if (!expand_) {
outputH = inputs[0][0]; outputH = inputs[0][0];
outputW = inputs[0][1]; outputW = inputs[0][1];

View File

@ -29,7 +29,8 @@ Status RandomSolarizeOp::Compute(const std::shared_ptr<Tensor> &input, std::shar
uint8_t threshold_min_ = threshold_[0], threshold_max_ = threshold_[1]; uint8_t threshold_min_ = threshold_[0], threshold_max_ = threshold_[1];
CHECK_FAIL_RETURN_UNEXPECTED(threshold_min_ <= threshold_max_, CHECK_FAIL_RETURN_UNEXPECTED(threshold_min_ <= threshold_max_,
"RandomSolarize: min of threshold is greater than max of threshold."); "RandomSolarize: min of threshold: " + std::to_string(threshold_min_) +
" is greater than max of threshold: " + std::to_string(threshold_max_));
uint8_t threshold_min = std::uniform_int_distribution(threshold_min_, threshold_max_)(rnd_); uint8_t threshold_min = std::uniform_int_distribution(threshold_min_, threshold_max_)(rnd_);
uint8_t threshold_max = std::uniform_int_distribution(threshold_min_, threshold_max_)(rnd_); uint8_t threshold_max = std::uniform_int_distribution(threshold_min_, threshold_max_)(rnd_);

View File

@ -258,7 +258,8 @@ bool ImageInterpolation(LiteMat input, LiteMat &output, int x_size, int y_size,
bool ResizeCubic(const LiteMat &input, LiteMat &dst, int dst_w, int dst_h) { bool ResizeCubic(const LiteMat &input, LiteMat &dst, int dst_w, int dst_h) {
if (input.data_type_ != LDataType::UINT8 || input.channel_ != 3) { if (input.data_type_ != LDataType::UINT8 || input.channel_ != 3) {
MS_LOG(ERROR) << "Unsupported data type, only support input image of uint8 dtype and 3 channel."; MS_LOG(ERROR) << "Unsupported data type, only support input image of uint8 dtype and 3 channel, got channel: " +
std::to_string(input.channel_);
return false; return false;
} }
int x_size = dst_w, y_size = dst_h; int x_size = dst_w, y_size = dst_h;

View File

@ -29,7 +29,7 @@ const InterpolationMode ResizeOp::kDefInterpolation = InterpolationMode::kLinear
Status ResizeOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) { Status ResizeOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
IO_CHECK(input, output); IO_CHECK(input, output);
CHECK_FAIL_RETURN_UNEXPECTED(input->shape().Size() >= 2, "Resize: image shape should be <H,W,C> or <H,W>."); RETURN_IF_NOT_OK(ValidateImageRank("Resize", input->shape().Size()));
int32_t output_h = 0; int32_t output_h = 0;
int32_t output_w = 0; int32_t output_w = 0;
int32_t input_h = static_cast<int>(input->shape()[0]); int32_t input_h = static_cast<int>(input->shape()[0]);
@ -71,7 +71,9 @@ Status ResizeOp::OutputShape(const std::vector<TensorShape> &inputs, std::vector
if (!outputs.empty()) { if (!outputs.empty()) {
return Status::OK(); return Status::OK();
} }
return Status(StatusCode::kMDUnexpectedError, "Resize: invalid input wrong shape."); return Status(StatusCode::kMDUnexpectedError,
"Resize: invalid input shape, expected 2D or 3D input, but got input dimension is:" +
std::to_string(inputs[0].Rank()));
} }
} // namespace dataset } // namespace dataset
} // namespace mindspore } // namespace mindspore

View File

@ -28,7 +28,7 @@ Status RgbToBgrOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr
auto input_type = input->type(); auto input_type = input->type();
CHECK_FAIL_RETURN_UNEXPECTED(input_type != DataType::DE_UINT32 && input_type != DataType::DE_UINT64 && CHECK_FAIL_RETURN_UNEXPECTED(input_type != DataType::DE_UINT32 && input_type != DataType::DE_UINT64 &&
input_type != DataType::DE_INT64 && input_type != DataType::DE_STRING, input_type != DataType::DE_INT64 && input_type != DataType::DE_STRING,
"RgbToBgr: unsupported data type as [uint32, int64, uint64, string]."); "RgbToBgr: Input includes unsupported data type in [uint32, int64, uint64, string].");
return RgbToBgr(input, output); return RgbToBgr(input, output);
} }

View File

@ -45,9 +45,7 @@ RotateOp::RotateOp(float degrees, InterpolationMode resample, bool expand, std::
Status RotateOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) { Status RotateOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
IO_CHECK(input, output); IO_CHECK(input, output);
CHECK_FAIL_RETURN_UNEXPECTED( RETURN_IF_NOT_OK(ValidateImageRank("Rotate", input->shape().Size()));
input->shape().Size() == 2 || input->shape().Size() == 3,
"Rotate: image shape " + std::to_string(input->shape().Size()) + " is not <H,W,C> or <H,W>.");
#ifndef ENABLE_ANDROID #ifndef ENABLE_ANDROID
return Rotate(input, output, center_, degrees_, interpolation_, expand_, fill_r_, fill_g_, fill_b_); return Rotate(input, output, center_, degrees_, interpolation_, expand_, fill_r_, fill_g_, fill_b_);
#else #else
@ -70,11 +68,14 @@ Status RotateOp::OutputShape(const std::vector<TensorShape> &inputs, std::vector
if (inputs[0].Rank() == 2) outputs.emplace_back(out); if (inputs[0].Rank() == 2) outputs.emplace_back(out);
if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2])); if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2]));
if (!outputs.empty()) return Status::OK(); if (!outputs.empty()) return Status::OK();
return Status(StatusCode::kMDUnexpectedError, "Rotate: invalid input shape."); return Status(StatusCode::kMDUnexpectedError,
"Rotate: invalid input shape, expected 2D or 3D input, but got input dimension is:" +
std::to_string(inputs[0].Rank()));
#else #else
if (inputs.size() != NumInput()) if (inputs.size() != NumInput())
return Status(StatusCode::kMDUnexpectedError, return Status(StatusCode::kMDUnexpectedError,
"The size of the input argument vector does not match the number of inputs"); "The size of the input argument vector: " + std::to_string(inputs.size()) +
", does not match the number of inputs: " + std::to_string(NumInput()));
outputs = inputs; outputs = inputs;
return Status::OK(); return Status::OK();
#endif #endif

View File

@ -31,11 +31,11 @@ Status SharpnessOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_pt
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input); std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
cv::Mat input_img = input_cv->mat(); cv::Mat input_img = input_cv->mat();
if (!input_cv->mat().data) { if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("Sharpness: load image failed."); RETURN_STATUS_UNEXPECTED("[Internal ERROR] Sharpness: load image failed.");
} }
if (input_cv->Rank() == 1 || input_cv->mat().dims > 2) { if (input_cv->Rank() == 1 || input_cv->mat().dims > 2) {
RETURN_STATUS_UNEXPECTED("Sharpness: input tensor is not in shape of <H,W,C> or <H,W>."); RETURN_STATUS_UNEXPECTED("Sharpness: shape of input is not <H,W,C> or <H,W>, but got rank: " + input_cv->Rank());
} }
/// creating a smoothing filter. 1, 1, 1, /// creating a smoothing filter. 1, 1, 1,

View File

@ -97,7 +97,7 @@ ShapeMisMatch:
TimeoutError: TimeoutError:
ret = Status(StatusCode::kMDTimeOut, __LINE__, __FILE__, ret = Status(StatusCode::kMDTimeOut, __LINE__, __FILE__,
"Expected that PyFunc should return numpy array, got None. If python_multiprocessing is True, " "Expected that PyFunc should return numpy array, got None. If \'python_multiprocessing\' is True, "
"PyFunc may execute time out."); "PyFunc may execute time out.");
goto ComputeReturn; goto ComputeReturn;
} }

View File

@ -148,7 +148,7 @@ def test_center_crop_errors():
try: try:
test_center_crop_op(16777216, 16777216) test_center_crop_op(16777216, 16777216)
except RuntimeError as e: except RuntimeError as e:
assert "CenterCropOp padding size is more than 3 times the original size." in \ assert "CenterCropOp padding size is more than 3 times the original size" in \
str(e) str(e)

View File

@ -373,7 +373,7 @@ def test_cutmix_batch_fail5():
images_cutmix = image.asnumpy() images_cutmix = image.asnumpy()
else: else:
images_cutmix = np.append(images_cutmix, image.asnumpy(), axis=0) images_cutmix = np.append(images_cutmix, image.asnumpy(), axis=0)
error_message = "both image and label columns are required" error_message = "size of input should be 2 (including image and label)"
assert error_message in str(error.value) assert error_message in str(error.value)
@ -400,7 +400,7 @@ def test_cutmix_batch_fail6():
images_cutmix = image.asnumpy() images_cutmix = image.asnumpy()
else: else:
images_cutmix = np.append(images_cutmix, image.asnumpy(), axis=0) images_cutmix = np.append(images_cutmix, image.asnumpy(), axis=0)
error_message = "image doesn't match the NCHW format." error_message = "image doesn't match the <N,C,H,W> format"
assert error_message in str(error.value) assert error_message in str(error.value)

View File

@ -97,7 +97,7 @@ def test_fillop_error_handling():
with pytest.raises(RuntimeError) as error_info: with pytest.raises(RuntimeError) as error_info:
for _ in data: for _ in data:
pass pass
assert "fill datatype does not match the input datatype" in str(error_info.value) assert "fill datatype is string but the input datatype is not string" in str(error_info.value)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -113,15 +113,15 @@ def test_mask_string_comparison():
def test_mask_exceptions_str(): def test_mask_exceptions_str():
with pytest.raises(RuntimeError) as info: with pytest.raises(RuntimeError) as info:
mask_compare([1, 2, 3, 4, 5], ops.Relational.EQ, "3.5") mask_compare([1, 2, 3, 4, 5], ops.Relational.EQ, "3.5")
assert "input datatype does not match the value datatype." in str(info.value) assert "input datatype does not match the value datatype" in str(info.value)
with pytest.raises(RuntimeError) as info: with pytest.raises(RuntimeError) as info:
mask_compare(["1", "2", "3", "4", "5"], ops.Relational.EQ, 3.5) mask_compare(["1", "2", "3", "4", "5"], ops.Relational.EQ, 3.5)
assert "input datatype does not match the value datatype." in str(info.value) assert "input datatype does not match the value datatype" in str(info.value)
with pytest.raises(RuntimeError) as info: with pytest.raises(RuntimeError) as info:
mask_compare(["1", "2", "3", "4", "5"], ops.Relational.EQ, "3.5", mstype.string) mask_compare(["1", "2", "3", "4", "5"], ops.Relational.EQ, "3.5", mstype.string)
assert "Only supports bool or numeric datatype for generated mask type." in str(info.value) assert "Only supports bool or numeric datatype for generated mask type" in str(info.value)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -335,7 +335,7 @@ def test_mixup_batch_fail3():
images_mixup = image.asnumpy() images_mixup = image.asnumpy()
else: else:
images_mixup = np.append(images_mixup, image.asnumpy(), axis=0) images_mixup = np.append(images_mixup, image.asnumpy(), axis=0)
error_message = "input lack of images or labels" error_message = "size of input data should be 2 (including images or labels)"
assert error_message in str(error.value) assert error_message in str(error.value)

View File

@ -215,7 +215,7 @@ def test_rescale_with_random_posterize():
_ = dataset.output_shapes() _ = dataset.output_shapes()
except RuntimeError as e: except RuntimeError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert "input image data type can not be float" in str(e) assert "data type of input image should be int" in str(e)
if __name__ == "__main__": if __name__ == "__main__":