!28458 [MSLITE][GPU] opencl op checkspec func's log clean

Merge pull request !28458 from Greatpan/master_code_check
This commit is contained in:
i-robot 2021-12-31 09:16:06 +00:00 committed by Gitee
commit 0ca3207994
8 changed files with 26 additions and 25 deletions

View File

@ -53,24 +53,24 @@ int ArithmeticOpenCLKernel::CheckSpecs() {
}
if (in_tensors_.size() != INPUT_TENSOR_SIZE_2 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto *param = reinterpret_cast<const ArithmeticParameter *>(op_parameter_);
if (!IsArithmetic(type())) {
MS_LOG(ERROR) << "UnSupported Operator: " << schema::EnumNamePrimitiveType(type());
MS_LOG(WARNING) << "UnSupported Operator: " << schema::EnumNamePrimitiveType(type());
return RET_ERROR;
}
if (type() == schema::PrimitiveType_Eltwise) {
auto mode = param->eltwise_mode_;
if (mode != EltwiseMode_PROD && mode != EltwiseMode_SUM && mode != EltwiseMode_MAXIMUM) {
MS_LOG(ERROR) << "Eltwise mode not support, mode:" << mode;
MS_LOG(WARNING) << "Eltwise mode not support, mode:" << mode;
return RET_ERROR;
}
}
if (!(param->activation_type_ == ActivationType_NO_ACTIVATION || param->activation_type_ == ActivationType_RELU ||
param->activation_type_ == ActivationType_RELU6)) {
MS_LOG(ERROR) << "Unsupported activation type " << param->activation_type_;
MS_LOG(WARNING) << "Unsupported activation type " << param->activation_type_;
return RET_ERROR;
}
return RET_OK;

View File

@ -40,7 +40,7 @@ int ConcatOpenCLKernel::RunAxis0() {
for (size_t i = 0; i < in_tensors_.size(); i++) {
auto src_data = weight_ptrs_.at(i) == nullptr ? in_tensors_[i]->data() : weight_ptrs_.at(i);
if (allocator_->GetImageSize(src_data, &img_size) != RET_OK) {
MS_LOG(ERROR) << "GetImageSize failed.";
MS_LOG(WARNING) << "GetImageSize failed.";
return RET_ERROR;
}
auto src_origin = cl::array<cl::size_type, 3U>{0, 0, 0};

View File

@ -446,7 +446,7 @@ kernel::InnerKernel *OpenCLMatMulKernelCreator(const std::vector<lite::Tensor *>
new (std::nothrow) MatMulOpenCLKernel(opParameter, inputs, outputs, static_cast<const lite::InnerContext *>(ctx));
}
if (kernel == nullptr) {
MS_LOG(ERROR) << "kernel " << opParameter->name_ << "is nullptr.";
MS_LOG(WARNING) << "kernel " << opParameter->name_ << "is nullptr.";
free(opParameter);
return nullptr;
}
@ -454,14 +454,14 @@ kernel::InnerKernel *OpenCLMatMulKernelCreator(const std::vector<lite::Tensor *>
MS_LOG(WARNING) << "kernel don't infer shape yet!";
auto ret = reinterpret_cast<MatMulOpenCLKernel *>(kernel)->StoreConstData();
if (ret != mindspore::lite::RET_OK) {
MS_LOG(ERROR) << "Store " << opParameter->name_ << " const data failed!";
MS_LOG(WARNING) << "Store " << opParameter->name_ << " const data failed!";
delete kernel;
return nullptr;
}
return kernel;
}
if (kernel->CheckSpecs() != RET_OK || kernel->OpenCLKernel::CheckSpecs() != RET_OK) {
MS_LOG(ERROR) << "Check " << opParameter->name_ << " specification failed!";
MS_LOG(WARNING) << "Check " << opParameter->name_ << " specification failed!";
delete kernel;
return nullptr;
}

View File

@ -110,7 +110,7 @@ int ReduceOpenCLKernel::SetShapeSizeIs0Axes() {
reduction_indices = reduction_indices + (C4NUM % input_shape_size);
reduce_axes_[reduction_indices] = true;
} else {
MS_LOG(ERROR) << "in Reduce: axes tensor's reduction_indices should be -1, 1, 2, 3";
MS_LOG(WARNING) << "in Reduce: axes tensor's reduction_indices should be -1, 1, 2, 3";
return RET_ERROR;
}
return RET_OK;
@ -132,7 +132,7 @@ int ReduceOpenCLKernel::SetShapeSizeIs1Axes() {
axes_[i] = reinterpret_cast<int *>(axes_tensor->data())[i];
}
if (num_axes > 2 || num_axes < 1) {
MS_LOG(ERROR) << "Unsupported reduce num axes " << num_axes;
MS_LOG(WARNING) << "Unsupported reduce num axes " << num_axes;
return RET_PARAM_INVALID;
}
@ -165,7 +165,7 @@ int ReduceOpenCLKernel::SetAxes() {
} else if (axes_tensor->shape().size() == 1) {
return SetShapeSizeIs1Axes();
} else {
MS_LOG(ERROR) << "in Reduce: axes tensor's ndim should be 0 or 1.";
MS_LOG(WARNING) << "in Reduce: axes tensor's ndim should be 0 or 1.";
return RET_ERROR;
}

View File

@ -34,20 +34,20 @@ namespace mindspore::kernel {
int ReshapeOpenCLKernel::CheckSpecs() {
if ((in_tensors_.size() != INPUT_TENSOR_SIZE_1 && in_tensors_.size() != INPUT_TENSOR_SIZE_2) ||
out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "Reshape input output size unsupported.";
MS_LOG(WARNING) << "Reshape input output size unsupported.";
return RET_ERROR;
}
if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16 &&
in_tensors_[0]->data_type() != kNumberTypeInt32) {
MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type();
MS_LOG(WARNING) << "Unsupported data type " << in_tensors_[0]->data_type();
return RET_ERROR;
}
if (in_tensors_[0]->shape().size() > DIMENSION_4D) {
MS_LOG(ERROR) << "Reshape input size should in 0-4, actual: " << in_tensors_[0]->shape().size();
MS_LOG(WARNING) << "Reshape input size should in 0-4, actual: " << in_tensors_[0]->shape().size();
return RET_ERROR;
}
if (out_tensors_[0]->shape().size() > OUTPUT_TENSOR_SIZE_4) {
MS_LOG(ERROR) << "Reshape output size should in 0-4, actual: " << out_tensors_[0]->shape().size();
MS_LOG(WARNING) << "Reshape output size should in 0-4, actual: " << out_tensors_[0]->shape().size();
return RET_ERROR;
}
return RET_OK;

View File

@ -37,6 +37,7 @@ int ScaleOpenCLKernel::CheckSpecs() {
auto *param = reinterpret_cast<const ScaleParameter *>(op_parameter_);
if (param->activation_type_ != ActType_No && param->activation_type_ != ActType_Relu &&
param->activation_type_ != ActType_Relu6) {
MS_LOG(WARNING) << "unsupported scale activation type " << param->activation_type_;
return RET_ERROR;
}
auto *scale_param = reinterpret_cast<const ScaleParameter *>(op_parameter_);

View File

@ -258,14 +258,14 @@ kernel::InnerKernel *OpenCLKernelCreator(const std::vector<lite::Tensor *> &inpu
auto *kernel = new (std::nothrow)
T(reinterpret_cast<OpParameter *>(opParameter), inputs, outputs, static_cast<const lite::InnerContext *>(ctx));
if (kernel == nullptr) {
MS_LOG(ERROR) << "kernel " << opParameter->name_ << "is nullptr.";
MS_LOG(WARNING) << "kernel " << opParameter->name_ << "is nullptr.";
free(opParameter);
return nullptr;
}
auto ret = kernel->CheckSpecsWithoutShape();
if (ret != mindspore::lite::RET_OK) {
MS_LOG(ERROR) << "Check " << opParameter->name_ << " specification Without shape failed!";
MS_LOG(WARNING) << "Check " << opParameter->name_ << " specification Without shape failed!";
delete kernel;
return nullptr;
}
@ -281,19 +281,19 @@ kernel::InnerKernel *OpenCLKernelCreator(const std::vector<lite::Tensor *> &inpu
}
ret = kernel->CheckSpecs();
if (ret != mindspore::lite::RET_OK) {
MS_LOG(ERROR) << "Check " << opParameter->name_ << " specification failed!";
MS_LOG(WARNING) << "Check " << opParameter->name_ << " specification failed!";
delete kernel;
return nullptr;
}
ret = kernel->OpenCLKernel::CheckSpecs();
if (ret != mindspore::lite::RET_OK) {
MS_LOG(ERROR) << "Check " << opParameter->name_ << " specification failed!";
MS_LOG(WARNING) << "Check " << opParameter->name_ << " specification failed!";
delete kernel;
return nullptr;
}
ret = reinterpret_cast<OpenCLKernel *>(kernel)->StoreConstData();
if (ret != mindspore::lite::RET_OK) {
MS_LOG(ERROR) << "Store " << opParameter->name_ << " const data failed!";
MS_LOG(WARNING) << "Store " << opParameter->name_ << " const data failed!";
delete kernel;
return nullptr;
}

View File

@ -338,11 +338,11 @@ void PackNCHWToNHWC4(void *src, void *dst, bool src_is_fp16, bool dst_is_fp16, c
int CheckParamLikeTensor(const std::string &kernel_name, const std::string &tensor_name, lite::Tensor *tensor,
TypeId expect_data_type, const std::vector<int> &expect_shape) {
if (!tensor->IsConst()) {
MS_LOG(ERROR) << "in " << kernel_name << ": tensor " << tensor_name << " must be Const.";
MS_LOG(WARNING) << "in " << kernel_name << ": tensor " << tensor_name << " must be Const.";
return RET_ERROR;
}
if (tensor->data_type() != expect_data_type) {
MS_LOG(ERROR) << "in " << kernel_name << ": tensor's data_type must be " << expect_data_type;
MS_LOG(WARNING) << "in " << kernel_name << ": tensor's data_type must be " << expect_data_type;
return RET_ERROR;
}
if (tensor->shape() != expect_shape) {
@ -358,9 +358,9 @@ int CheckParamLikeTensor(const std::string &kernel_name, const std::string &tens
}
tensor_shape_str += ")";
MS_LOG(ERROR) << "in " << kernel_name
<< ": tensor's shape is error. expect_shape: " + expect_shape_str +
" tensor->shape(): " + tensor_shape_str;
MS_LOG(WARNING) << "in " << kernel_name
<< ": tensor's shape is error. expect_shape: " + expect_shape_str +
" tensor->shape(): " + tensor_shape_str;
return RET_ERROR;
}
return RET_OK;