!17384 Fix Magic Numbers in Image Utils

Merge pull request !17384 from EricZ/fix_image_util_ci
This commit is contained in:
i-robot 2021-06-08 21:48:53 +08:00 committed by Gitee
commit 961c2d5626
2 changed files with 71 additions and 60 deletions

View File

@ -63,7 +63,8 @@ int GetCVBorderType(BorderType type) {
bool CheckTensorShape(const std::shared_ptr<Tensor> &tensor, const int &channel) {
bool rc = false;
if (tensor->Rank() != 3 || (tensor->shape()[channel] != 1 && tensor->shape()[channel] != 3)) {
if (tensor->Rank() != DEFAULT_IMAGE_RANK ||
(tensor->shape()[channel] != 1 && tensor->shape()[channel] != DEFAULT_IMAGE_CHANNELS)) {
rc = true;
}
return rc;
@ -106,7 +107,7 @@ Status Resize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *out
if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("Resize: load image failed.");
}
if (input_cv->Rank() != 3 && input_cv->Rank() != 2) {
if (input_cv->Rank() != DEFAULT_IMAGE_RANK && input_cv->Rank() != 2) {
RETURN_STATUS_UNEXPECTED("Resize: input tensor is not in shape of <H,W,C> or <H,W>");
}
@ -127,7 +128,7 @@ Status Resize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *out
cv::Mat in_image = input_cv->mat();
const uint32_t kResizeShapeLimits = 1000;
// resize image too large or too small
// resize image too large or too small, 1000 is arbitrarily chosen here to prevent open cv from segmentation fault
if (output_height > in_image.rows * kResizeShapeLimits || output_width > in_image.cols * kResizeShapeLimits) {
std::string err_msg =
"Resize: the resizing width or height is too big, it's 1000 times bigger than the original image.";
@ -139,8 +140,8 @@ Status Resize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *out
}
try {
TensorShape shape{output_height, output_width};
int num_channels = input_cv->shape()[2];
if (input_cv->Rank() == 3) shape = shape.AppendDim(num_channels);
int num_channels = input_cv->shape()[CHANNEL_INDEX];
if (input_cv->Rank() == DEFAULT_IMAGE_RANK) shape = shape.AppendDim(num_channels);
std::shared_ptr<CVTensor> output_cv;
RETURN_IF_NOT_OK(CVTensor::CreateEmpty(shape, input_cv->type(), &output_cv));
@ -397,7 +398,7 @@ Status Crop(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *outpu
if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("Crop: load image failed.");
}
if (input_cv->Rank() != 3 && input_cv->Rank() != 2) {
if (input_cv->Rank() != DEFAULT_IMAGE_RANK && input_cv->Rank() != 2) {
RETURN_STATUS_UNEXPECTED("Crop: invalid image Shape, only support <H,W,C> or <H,W>");
}
// account for integer overflow
@ -410,8 +411,8 @@ Status Crop(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *outpu
}
try {
TensorShape shape{h, w};
if (input_cv->Rank() == 3) {
int num_channels = input_cv->shape()[2];
if (input_cv->Rank() == DEFAULT_IMAGE_RANK) {
int num_channels = input_cv->shape()[CHANNEL_INDEX];
shape = shape.AppendDim(num_channels);
}
std::shared_ptr<CVTensor> output_cv;
@ -436,9 +437,10 @@ Status HwcToChw(std::shared_ptr<Tensor> input, std::shared_ptr<Tensor> *output)
*output = input;
return Status::OK();
}
int num_channels = input_cv->shape()[2];
int num_channels = input_cv->shape()[CHANNEL_INDEX];
if (input_cv->shape().Size() < 2 || input_cv->shape().Size() > 3 ||
(input_cv->shape().Size() == 3 && num_channels != 3 && num_channels != 1)) {
(input_cv->shape().Size() == 3 && num_channels != DEFAULT_IMAGE_CHANNELS &&
num_channels != MIN_IMAGE_CHANNELS)) {
RETURN_STATUS_UNEXPECTED("HWC2CHW: image shape is not <H,W,C>.");
}
cv::Mat output_img;
@ -473,7 +475,7 @@ Status MaskWithTensor(const std::shared_ptr<Tensor> &sub_mat, std::shared_ptr<Te
"CutMixBatch: MaskWithTensor failed: "
"sub_mat shape doesn't match <H,W,C> format.");
}
int number_of_channels = (*input)->shape()[2];
int number_of_channels = (*input)->shape()[CHANNEL_INDEX];
for (int i = 0; i < crop_width; i++) {
for (int j = 0; j < crop_height; j++) {
for (int c = 0; c < number_of_channels; c++) {
@ -549,8 +551,8 @@ Status CopyTensorValue(const std::shared_ptr<Tensor> &source_tensor, std::shared
Status SwapRedAndBlue(std::shared_ptr<Tensor> input, std::shared_ptr<Tensor> *output) {
try {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(std::move(input));
int num_channels = input_cv->shape()[2];
if (input_cv->shape().Size() != 3 || num_channels != 3) {
int num_channels = input_cv->shape()[CHANNEL_INDEX];
if (input_cv->shape().Size() != 3 || num_channels != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("SwapRedBlue: image shape is not <H,W,C>.");
}
std::shared_ptr<CVTensor> output_cv;
@ -571,10 +573,10 @@ Status CropAndResize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tenso
if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("CropAndResize: load image failed.");
}
if (input_cv->Rank() != 3 && input_cv->Rank() != 2) {
if (input_cv->Rank() != DEFAULT_IMAGE_RANK && input_cv->Rank() != 2) {
RETURN_STATUS_UNEXPECTED("CropAndResize: image shape is not <H,W,C> or <H,W>");
}
// image too large or too small
// image too large or too small, 1000 is arbitrary here to prevent opencv from segmentation fault
const uint32_t kCropShapeLimits = 1000;
if (crop_height == 0 || crop_width == 0 || target_height == 0 || target_height > crop_height * kCropShapeLimits ||
target_width == 0 || target_height > crop_width * kCropShapeLimits) {
@ -607,8 +609,8 @@ Status CropAndResize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tenso
}
TensorShape shape{target_height, target_width};
int num_channels = input_cv->shape()[2];
if (input_cv->Rank() == 3) shape = shape.AppendDim(num_channels);
int num_channels = input_cv->shape()[CHANNEL_INDEX];
if (input_cv->Rank() == DEFAULT_IMAGE_RANK) shape = shape.AppendDim(num_channels);
std::shared_ptr<CVTensor> cvt_out;
RETURN_IF_NOT_OK(CVTensor::CreateEmpty(shape, input_cv->type(), &cvt_out));
cv::resize(cv_in(roi), cvt_out->mat(), cv::Size(target_width, target_height), 0, 0, cv_mode);
@ -677,7 +679,7 @@ void Normalize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *ou
auto itr_out = (*output)->begin<float>();
auto itr = input->begin<T>();
auto end = input->end<T>();
int64_t num_channels = (*output)->shape()[2];
int64_t num_channels = (*output)->shape()[CHANNEL_INDEX];
while (itr != end) {
for (int64_t i = 0; i < num_channels; i++) {
@ -691,22 +693,22 @@ void Normalize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *ou
Status Normalize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, std::vector<float> mean,
std::vector<float> std) {
RETURN_IF_NOT_OK(Tensor::CreateEmpty(input->shape(), DataType(DataType::DE_FLOAT32), output));
if (input->Rank() == 2) {
RETURN_IF_NOT_OK((*output)->ExpandDim(2));
if (input->Rank() == MIN_IMAGE_DIMENSION) {
RETURN_IF_NOT_OK((*output)->ExpandDim(MIN_IMAGE_DIMENSION));
}
CHECK_FAIL_RETURN_UNEXPECTED((*output)->Rank() == 3, "Normalize: image shape is not <H,W,C>.");
CHECK_FAIL_RETURN_UNEXPECTED((*output)->Rank() == DEFAULT_IMAGE_RANK, "Normalize: image shape is not <H,W,C>.");
CHECK_FAIL_RETURN_UNEXPECTED(std.size() == mean.size(), "Normalize: mean and std vectors are not of same size.");
// caller provided 1 mean/std value and there are more than one channel --> duplicate mean/std value
if (mean.size() == 1 && (*output)->shape()[2] != 1) {
if (mean.size() == 1 && (*output)->shape()[CHANNEL_INDEX] != 1) {
std::vector<float> mean_t, std_t;
for (int64_t i = 0; i < (*output)->shape()[2] - 1; i++) {
for (int64_t i = 0; i < (*output)->shape()[CHANNEL_INDEX] - 1; i++) {
mean.push_back(mean[0]);
std.push_back(std[0]);
}
}
CHECK_FAIL_RETURN_UNEXPECTED((*output)->shape()[2] == mean.size(),
CHECK_FAIL_RETURN_UNEXPECTED((*output)->shape()[CHANNEL_INDEX] == mean.size(),
"Normalize: number of channels does not match the size of mean and std vectors.");
switch (input->type().value()) {
@ -752,7 +754,7 @@ Status Normalize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *
RETURN_STATUS_UNEXPECTED("Normalize: unsupported type.");
}
if (input->Rank() == 2) {
if (input->Rank() == MIN_IMAGE_DIMENSION) {
(*output)->Squeeze();
}
return Status::OK();
@ -761,7 +763,7 @@ Status Normalize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *
Status NormalizePad(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output,
const std::shared_ptr<Tensor> &mean, const std::shared_ptr<Tensor> &std, const std::string &dtype) {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (!(input_cv->mat().data && input_cv->Rank() == 3)) {
if (!(input_cv->mat().data && input_cv->Rank() == DEFAULT_IMAGE_CHANNELS)) {
RETURN_STATUS_UNEXPECTED("NormalizePad: load image failed.");
}
DataType tensor_type = DataType(DataType::DE_FLOAT32);
@ -777,12 +779,12 @@ Status NormalizePad(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor
TensorShape new_shape({input_cv->shape()[0], input_cv->shape()[1], input_cv->shape()[2] + 1});
RETURN_IF_NOT_OK(CVTensor::CreateEmpty(new_shape, tensor_type, &output_cv));
mean->Squeeze();
if (mean->type() != DataType::DE_FLOAT32 || mean->Rank() != 1 || mean->shape()[0] != 3) {
if (mean->type() != DataType::DE_FLOAT32 || mean->Rank() != 1 || mean->shape()[0] != DEFAULT_IMAGE_CHANNELS) {
std::string err_msg = "NormalizePad: mean tensor should be of size 3 and type float.";
return Status(StatusCode::kMDShapeMisMatch, err_msg);
}
std->Squeeze();
if (std->type() != DataType::DE_FLOAT32 || std->Rank() != 1 || std->shape()[0] != 3) {
if (std->type() != DataType::DE_FLOAT32 || std->Rank() != 1 || std->shape()[0] != DEFAULT_IMAGE_CHANNELS) {
std::string err_msg = "NormalizePad: std tensor should be of size 3 and type float.";
return Status(StatusCode::kMDShapeMisMatch, err_msg);
}
@ -791,10 +793,10 @@ Status NormalizePad(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor
// and std are in RGB
std::vector<cv::Mat> rgb;
cv::split(in_image, rgb);
if (rgb.size() != 3) {
if (rgb.size() != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("NormalizePad: input image is not in RGB.");
}
for (uint8_t i = 0; i < 3; i++) {
for (int8_t i = 0; i < DEFAULT_IMAGE_CHANNELS; i++) {
float mean_c, std_c;
RETURN_IF_NOT_OK(mean->GetItemAt<float>(&mean_c, {i}));
RETURN_IF_NOT_OK(std->GetItemAt<float>(&std_c, {i}));
@ -816,8 +818,9 @@ Status AdjustBrightness(const std::shared_ptr<Tensor> &input, std::shared_ptr<Te
if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("AdjustBrightness: load image failed.");
}
int num_channels = input_cv->shape()[2];
if (input_cv->Rank() != 3 || num_channels != 3) {
int num_channels = input_cv->shape()[CHANNEL_INDEX];
// Rank of the image represents how many dimensions, image is expected to be HWC
if (input_cv->Rank() != DEFAULT_IMAGE_RANK || num_channels != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("AdjustBrightness: image shape is not <H,W,C> or channel is not 3.");
}
std::shared_ptr<CVTensor> output_cv;
@ -837,8 +840,8 @@ Status AdjustContrast(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tens
if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("AdjustContrast: load image failed.");
}
int num_channels = input_cv->shape()[2];
if (input_cv->Rank() != 3 || num_channels != 3) {
int num_channels = input_cv->shape()[CHANNEL_INDEX];
if (input_cv->Rank() != DEFAULT_IMAGE_CHANNELS || num_channels != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("AdjustContrast: image shape is not <H,W,C> or channel is not 3.");
}
cv::Mat gray, output_img;
@ -864,16 +867,16 @@ Status AutoContrast(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor
if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("AutoContrast: load image failed.");
}
if (input_cv->Rank() != 3 && input_cv->Rank() != 2) {
if (input_cv->Rank() != DEFAULT_IMAGE_RANK && input_cv->Rank() != MIN_IMAGE_DIMENSION) {
RETURN_STATUS_UNEXPECTED("AutoContrast: image shape is not <H,W,C> or <H,W>");
}
// Reshape to extend dimension if rank is 2 for algorithm to work. then reshape output to be of rank 2 like input
if (input_cv->Rank() == 2) {
RETURN_IF_NOT_OK(input_cv->ExpandDim(2));
if (input_cv->Rank() == MIN_IMAGE_DIMENSION) {
RETURN_IF_NOT_OK(input_cv->ExpandDim(MIN_IMAGE_DIMENSION));
}
// Get number of channels and image matrix
std::size_t num_of_channels = input_cv->shape()[2];
if (num_of_channels != 1 && num_of_channels != 3) {
std::size_t num_of_channels = input_cv->shape()[CHANNEL_INDEX];
if (num_of_channels != MIN_IMAGE_CHANNELS && num_of_channels != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("AutoContrast: image shape is not <H,W,C>.");
}
cv::Mat image = input_cv->mat();
@ -914,7 +917,7 @@ Status AutoContrast(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor
for (int32_t i = 0; i < 256; i++) {
int32_t ix = static_cast<int32_t>(i * scale + offset);
ix = std::max(ix, 0);
ix = std::min(ix, 255);
ix = std::min(ix, MAX_BIT_VALUE);
table.push_back(ix);
}
}
@ -942,8 +945,8 @@ Status AdjustSaturation(const std::shared_ptr<Tensor> &input, std::shared_ptr<Te
if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("AdjustSaturation: load image failed.");
}
int num_channels = input_cv->shape()[2];
if (input_cv->Rank() != 3 || num_channels != 3) {
int num_channels = input_cv->shape()[CHANNEL_INDEX];
if (input_cv->Rank() != DEFAULT_IMAGE_RANK || num_channels != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("AdjustSaturation: image shape is not <H,W,C> or channel is not 3.");
}
std::shared_ptr<CVTensor> output_cv;
@ -971,7 +974,7 @@ Status AdjustHue(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *
RETURN_STATUS_UNEXPECTED("AdjustHue: load image failed.");
}
int num_channels = input_cv->shape()[2];
if (input_cv->Rank() != 3 || num_channels != 3) {
if (input_cv->Rank() != DEFAULT_IMAGE_RANK || num_channels != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("AdjustHue: image shape is not <H,W,C> or channel is not 3.");
}
std::shared_ptr<CVTensor> output_cv;
@ -982,7 +985,7 @@ Status AdjustHue(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *
for (int x = 0; x < output_img.rows; x++) {
uint8_t cur1 = output_img.at<cv::Vec3b>(cv::Point(y, x))[0];
uint8_t h_hue = 0;
h_hue = static_cast<uint8_t>(hue * 255);
h_hue = static_cast<uint8_t>(hue * MAX_BIT_VALUE);
cur1 += h_hue;
output_img.at<cv::Vec3b>(cv::Point(y, x))[0] = cur1;
}
@ -1001,16 +1004,16 @@ Status Equalize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *o
if (!input_cv->mat().data) {
RETURN_STATUS_UNEXPECTED("Equalize: load image failed.");
}
if (input_cv->Rank() != 3 && input_cv->Rank() != 2) {
if (input_cv->Rank() != DEFAULT_IMAGE_RANK && input_cv->Rank() != MIN_IMAGE_DIMENSION) {
RETURN_STATUS_UNEXPECTED("Equalize: image shape is not <H,W,C> or <H,W>");
}
// For greyscale images, extend dimension if rank is 2 and reshape output to be of rank 2.
if (input_cv->Rank() == 2) {
RETURN_IF_NOT_OK(input_cv->ExpandDim(2));
if (input_cv->Rank() == MIN_IMAGE_DIMENSION) {
RETURN_IF_NOT_OK(input_cv->ExpandDim(MIN_IMAGE_DIMENSION));
}
// Get number of channels and image matrix
std::size_t num_of_channels = input_cv->shape()[2];
if (num_of_channels != 1 && num_of_channels != 3) {
std::size_t num_of_channels = input_cv->shape()[CHANNEL_INDEX];
if (num_of_channels != MIN_IMAGE_CHANNELS && num_of_channels != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("Equalize: image shape is not <H,W,C>.");
}
cv::Mat image = input_cv->mat();
@ -1041,11 +1044,11 @@ Status Erase(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *outp
uint8_t fill_g, uint8_t fill_b) {
try {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
int num_channels = input_cv->shape()[2];
int num_channels = input_cv->shape()[CHANNEL_INDEX];
if (input_cv->mat().data == nullptr) {
RETURN_STATUS_UNEXPECTED("CutOut: load image failed.");
}
if (input_cv->Rank() != 3 || num_channels != 3) {
if (input_cv->Rank() != DEFAULT_IMAGE_RANK || num_channels != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("CutOut: image shape is not <H,W,C> or channel is not 3.");
}
cv::Mat input_img = input_cv->mat();
@ -1108,7 +1111,7 @@ Status Pad(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
// validate rank
if (input_cv->Rank() == 1 || input_cv->mat().dims > 2) {
if (input_cv->Rank() == 1 || input_cv->mat().dims > MIN_IMAGE_DIMENSION) {
RETURN_STATUS_UNEXPECTED("Pad: input tensor is not in shape of <H,W,C> or <H,W>.");
}
@ -1125,8 +1128,10 @@ Status Pad(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output
std::shared_ptr<CVTensor> output_cv;
RETURN_IF_NOT_OK(CVTensor::CreateFromMat(out_image, &output_cv));
// pad the dimension if shape information is only 2 dimensional, this is grayscale
int num_channels = input_cv->shape()[2];
if (input_cv->Rank() == 3 && num_channels == 1 && output_cv->Rank() == 2) RETURN_IF_NOT_OK(output_cv->ExpandDim(2));
int num_channels = input_cv->shape()[CHANNEL_INDEX];
if (input_cv->Rank() == DEFAULT_IMAGE_RANK && num_channels == MIN_IMAGE_CHANNELS &&
output_cv->Rank() == MIN_IMAGE_DIMENSION)
RETURN_IF_NOT_OK(output_cv->ExpandDim(CHANNEL_INDEX));
*output = std::static_pointer_cast<Tensor>(output_cv);
return Status::OK();
} catch (const cv::Exception &e) {
@ -1137,8 +1142,8 @@ Status Pad(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output
Status RgbaToRgb(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
try {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(std::move(input));
int num_channels = input_cv->shape()[2];
if (input_cv->shape().Size() != 3 || num_channels != 4) {
int num_channels = input_cv->shape()[CHANNEL_INDEX];
if (input_cv->shape().Size() != DEFAULT_IMAGE_CHANNELS || num_channels != 4) {
std::string err_msg =
"RgbaToRgb: Number of channels of image does not equal 4, "
"but got : " +
@ -1159,8 +1164,8 @@ Status RgbaToRgb(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *
Status RgbaToBgr(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
try {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(std::move(input));
int num_channels = input_cv->shape()[2];
if (input_cv->shape().Size() != 3 || num_channels != 4) {
int num_channels = input_cv->shape()[CHANNEL_INDEX];
if (input_cv->shape().Size() != DEFAULT_IMAGE_CHANNELS || num_channels != 4) {
std::string err_msg =
"RgbaToBgr: number of channels of image should be 4, "
"but got : " +
@ -1181,7 +1186,7 @@ Status RgbaToBgr(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *
Status RgbToGray(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
try {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(std::move(input));
if (input_cv->Rank() != 3 || input_cv->shape()[2] != 3) {
if (input_cv->Rank() != DEFAULT_IMAGE_RANK || input_cv->shape()[CHANNEL_INDEX] != DEFAULT_IMAGE_CHANNELS) {
RETURN_STATUS_UNEXPECTED("RgbToGray: image shape is not <H,W,C> or channel is not 3.");
}
TensorShape out_shape = TensorShape({input_cv->shape()[0], input_cv->shape()[1]});
@ -1219,7 +1224,7 @@ Status Affine(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *out
InterpolationMode interpolation, uint8_t fill_r, uint8_t fill_g, uint8_t fill_b) {
try {
std::shared_ptr<CVTensor> input_cv = CVTensor::AsCVTensor(input);
if (input_cv->Rank() == 1 || input_cv->Rank() > 3) {
if (input_cv->Rank() < MIN_IMAGE_DIMENSION || input_cv->Rank() > DEFAULT_IMAGE_RANK) {
RETURN_STATUS_UNEXPECTED("Affine: image shape is not <H,W,C> or <H,W>.");
}

View File

@ -36,6 +36,12 @@
#include "minddata/dataset/kernels/tensor_op.h"
#include "minddata/dataset/util/status.h"
#define CHANNEL_INDEX 2 // images are hwc, so index 2 represents number of channels
#define DEFAULT_IMAGE_CHANNELS 3 // images are 3 channels in general
#define DEFAULT_IMAGE_RANK 3 // images are hwc channels in general
#define MAX_BIT_VALUE 255 // max bit value after decode is 256
#define MIN_IMAGE_CHANNELS 1 // image ops support minimum of 1 channel
#define MIN_IMAGE_DIMENSION 2 // images are at least 2 dimensional
namespace mindspore {
namespace dataset {
void JpegErrorExitCustom(j_common_ptr cinfo);