From 28f7cee8787f3a7382b25d5e548cfda2279e6736 Mon Sep 17 00:00:00 2001 From: Fazzie <1240419984@qq.com> Date: Sat, 18 Sep 2021 14:33:47 +0800 Subject: [PATCH] fix opencl code --- .../runtime/kernel/opencl/kernel/activation.cc | 4 ++-- .../runtime/kernel/opencl/kernel/argminmax.cc | 12 ++++++------ .../runtime/kernel/opencl/kernel/arithmetic.cc | 4 ++-- .../kernel/opencl/kernel/arithmetic_self.cc | 6 +++--- .../kernel/opencl/kernel/batch_to_space_nd.cc | 12 ++++++------ .../runtime/kernel/opencl/kernel/batchnorm.cc | 6 +++--- .../src/runtime/kernel/opencl/kernel/cast.cc | 8 ++++---- .../src/runtime/kernel/opencl/kernel/concat.cc | 12 ++++++------ .../src/runtime/kernel/opencl/kernel/conv2d.cc | 16 ++++++++-------- .../kernel/opencl/kernel/conv2d_transpose.cc | 8 ++++---- .../kernel/opencl/kernel/depthwise_conv2d.cc | 8 ++++---- .../src/runtime/kernel/opencl/kernel/fill.cc | 6 +++--- .../kernel/opencl/kernel/fullconnection.cc | 18 +++++++++--------- .../src/runtime/kernel/opencl/kernel/gather.cc | 16 ++++++++-------- .../opencl/kernel/int8/arithmetic_int8.cc | 12 ++++++------ .../runtime/kernel/opencl/kernel/layer_norm.cc | 8 ++++---- .../src/runtime/kernel/opencl/kernel/matmul.cc | 6 +++--- .../runtime/kernel/opencl/kernel/one_hot.cc | 2 +- .../src/runtime/kernel/opencl/kernel/pad.cc | 14 +++++++------- .../runtime/kernel/opencl/kernel/pooling2d.cc | 8 ++++---- .../src/runtime/kernel/opencl/kernel/power.cc | 8 ++++---- .../src/runtime/kernel/opencl/kernel/prelu.cc | 10 +++++----- .../src/runtime/kernel/opencl/kernel/reduce.cc | 10 +++++----- .../src/runtime/kernel/opencl/kernel/resize.cc | 6 +++--- .../src/runtime/kernel/opencl/kernel/scale.cc | 3 ++- .../runtime/kernel/opencl/kernel/softmax.cc | 6 +++--- .../kernel/opencl/kernel/space_to_batch_nd.cc | 12 ++++++------ .../kernel/opencl/kernel/space_to_depth.cc | 2 +- .../kernel/opencl/kernel/sparse_to_dense.cc | 8 ++++---- .../src/runtime/kernel/opencl/kernel/split.cc | 14 +++++++------- .../src/runtime/kernel/opencl/kernel/stack.cc | 12 ++++++------ .../kernel/opencl/kernel/strided_slice.cc | 14 +++++++------- .../runtime/kernel/opencl/kernel/to_format.cc | 4 ++-- .../runtime/kernel/opencl/kernel/transpose.cc | 6 +++--- .../lite/tools/anf_exporter/anf_exporter.cc | 12 ++++++++++-- .../lite/tools/anf_exporter/fetch_content.cc | 10 ++++++---- .../lite/tools/anf_exporter/fetch_content.h | 4 ++-- 37 files changed, 169 insertions(+), 158 deletions(-) diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc index 02b3a5fe3e7..bda8a526b48 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc @@ -52,11 +52,11 @@ std::string ActivationOpenCLKernel::GetActTypeString(int act_type) { int ActivationOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } if (GetActTypeString(type_).empty()) { - MS_LOG(ERROR) << "schema::ActivationType:" << type_ << "not found"; + MS_LOG(WARNING) << "schema::ActivationType:" << type_ << "not found"; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/argminmax.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/argminmax.cc index 37f755ebb69..d03d98059a5 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/argminmax.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/argminmax.cc @@ -32,21 +32,21 @@ using mindspore::schema::PrimitiveType_ArgMinFusion; namespace mindspore::kernel { int ArgMinMaxOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } if ((in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) || (out_tensors_[0]->data_type() != kNumberTypeFloat32 && out_tensors_[0]->data_type() != kNumberTypeFloat16)) { - MS_LOG(ERROR) << "Unsupported input/output data type. input data type is " << in_tensors_[0]->data_type() - << " output data type is " << out_tensors_[0]->data_type(); + MS_LOG(WARNING) << "Unsupported input/output data type. input data type is " << in_tensors_[0]->data_type() + << " output data type is " << out_tensors_[0]->data_type(); return RET_ERROR; } if (in_tensors_[0]->shape().size() < DIMENSION_1D || in_tensors_[0]->shape().size() > DIMENSION_4D) { - MS_LOG(ERROR) << "input shape size must be (1-4), actual: " << in_tensors_[0]->shape().size(); + MS_LOG(WARNING) << "input shape size must be (1-4), actual: " << in_tensors_[0]->shape().size(); return RET_ERROR; } if (out_tensors_[0]->shape().size() != DIMENSION_1D) { - MS_LOG(ERROR) << "output shape size must be 1, actual" << out_tensors_[0]->shape().size(); + MS_LOG(WARNING) << "output shape size must be 1, actual" << out_tensors_[0]->shape().size(); return RET_ERROR; } auto *param = reinterpret_cast(this->op_parameter_); @@ -55,7 +55,7 @@ int ArgMinMaxOpenCLKernel::CheckSpecs() { CHECK_LESS_RETURN(dims_size, 1); auto axis = (param->axis_ + dims_size) % dims_size; if (axis < 0 || axis >= dims_size) { - MS_LOG(ERROR) << "Invalid axis " << axis; + MS_LOG(WARNING) << "Invalid axis " << axis; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc index 060f5a25929..8c110b14d13 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc @@ -41,13 +41,13 @@ namespace mindspore::kernel { int ArithmeticOpenCLKernel::CheckSpecs() { for (auto &tensor : in_tensors_) { if (tensor->data_type() != kNumberTypeFloat32 && tensor->data_type() != kNumberTypeFloat16) { - MS_LOG(ERROR) << "ArithmeticOpenCLKernel only support fp32/fp16 input"; + MS_LOG(WARNING) << "ArithmeticOpenCLKernel only support fp32/fp16 input"; return RET_ERROR; } } for (auto &tensor : out_tensors_) { if (tensor->data_type() != kNumberTypeFloat32 && tensor->data_type() != kNumberTypeFloat16) { - MS_LOG(ERROR) << "ArithmeticOpenCLKernel only support fp32/fp16 output"; + MS_LOG(WARNING) << "ArithmeticOpenCLKernel only support fp32/fp16 output"; return RET_ERROR; } } diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.cc index 339bda33abb..ad4eaba5aa4 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.cc @@ -29,15 +29,15 @@ using mindspore::lite::RET_OK; namespace mindspore::kernel { int ArithmeticSelfOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } if (!IsArithmeticSelf(type())) { - MS_LOG(ERROR) << "UnSupported Operator: " << schema::EnumNamePrimitiveType(type()); + MS_LOG(WARNING) << "UnSupported Operator: " << schema::EnumNamePrimitiveType(type()); return RET_ERROR; } if (in_tensors_[0]->shape().size() != DIMENSION_4D && in_tensors_[0]->shape().size() != DIMENSION_2D) { - MS_LOG(ERROR) << " only support dim = 4 or 2 but your dim = " << in_tensors_[0]->shape().size(); + MS_LOG(WARNING) << " only support dim = 4 or 2 but your dim = " << in_tensors_[0]->shape().size(); return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/batch_to_space_nd.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/batch_to_space_nd.cc index 95873ad1e82..202b9b259e6 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/batch_to_space_nd.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/batch_to_space_nd.cc @@ -30,26 +30,26 @@ using mindspore::schema::PrimitiveType_BatchToSpaceND; namespace mindspore::kernel { int BatchToSpaceNDOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) { - MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type(); + MS_LOG(WARNING) << "Unsupported data type " << in_tensors_[0]->data_type(); return RET_ERROR; } if (in_tensors_[0]->shape().size() != DIMENSION_4D && out_tensors_[0]->shape().size() != DIMENSION_4D) { - MS_LOG(ERROR) << "input/output shape size must be 4, actual: " << in_tensors_[0]->shape().size() << ", " - << out_tensors_[0]->shape().size(); + MS_LOG(WARNING) << "input/output shape size must be 4, actual: " << in_tensors_[0]->shape().size() << ", " + << out_tensors_[0]->shape().size(); return RET_ERROR; } auto *param = reinterpret_cast(this->op_parameter_); if (param->block_shape_[0] < 1 || param->block_shape_[1] < 1) { - MS_LOG(ERROR) << "block_sizes_ must > 1, actual " << param->block_shape_[0] << ", " << param->block_shape_[1]; + MS_LOG(WARNING) << "block_sizes_ must > 1, actual " << param->block_shape_[0] << ", " << param->block_shape_[1]; return RET_ERROR; } if (in_tensors_[0]->shape()[kNHWC_H] * param->block_shape_[0] <= (param->crops_[0] + param->crops_[1]) || in_tensors_[0]->shape()[kNHWC_W] * param->block_shape_[1] <= (param->crops_[2] + param->crops_[3])) { - MS_LOG(ERROR) << "crop shape error!"; + MS_LOG(WARNING) << "crop shape error!"; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.cc index 8ebbdfaebdf..ffa7ff94740 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.cc @@ -37,15 +37,15 @@ constexpr int kNumInput4 = 4; namespace mindspore::kernel { int BatchNormOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_5 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } if (in_tensors_.at(0)->shape().size() != DIMENSION_4D) { - MS_LOG(ERROR) << "The dim of in_tensors->shape must be 4 but your dim is : " << in_tensors_.at(0)->shape().size(); + MS_LOG(WARNING) << "The dim of in_tensors->shape must be 4 but your dim is : " << in_tensors_.at(0)->shape().size(); return RET_ERROR; } if (in_tensors_.at(0)->shape()[0] > 1) { - MS_LOG(ERROR) << " Unsupported batch_size >1 "; + MS_LOG(WARNING) << " Unsupported batch_size >1 "; return RET_ERROR; } CHECK_NULL_RETURN(in_tensors_[kNumInput0]); diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/cast.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/cast.cc index 7775a8b7e96..211be2420a8 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/cast.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/cast.cc @@ -32,22 +32,22 @@ namespace mindspore::kernel { int CastOpenCLKernel::CheckSpecs() { // the 2nd tensor is DstType if (in_tensors_.size() != INPUT_TENSOR_SIZE_2 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } if (in_tensors_.front()->shape() != out_tensors_.front()->shape()) { - MS_LOG(ERROR) << "input shape must be equal to output shape"; + MS_LOG(WARNING) << "input shape must be equal to output shape"; return RET_ERROR; } auto input_dtype = in_tensors_.front()->data_type(); if (input_dtype != kNumberTypeFloat32 && input_dtype != kNumberTypeFloat16) { - MS_LOG(ERROR) << "input dtype must be float32/float16"; + MS_LOG(WARNING) << "input dtype must be float32/float16"; return RET_ERROR; } auto output_dtype = out_tensors_.front()->data_type(); if (output_dtype != kNumberTypeFloat32 && output_dtype != kNumberTypeFloat16) { - MS_LOG(ERROR) << "output dtype must be float32/float16"; + MS_LOG(WARNING) << "output dtype must be float32/float16"; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc index 5a60627ad09..82d5fe243eb 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc @@ -78,20 +78,20 @@ void ConcatGetWorkGroup(const std::vector &global, std::vector * int ConcatOpenCLKernel::CheckSpecs() { if ((in_tensors_.size() < INPUT_TENSOR_SIZE_2 || in_tensors_.size() > INPUT_TENSOR_SIZE_6) || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } auto param = reinterpret_cast(this->op_parameter_); auto out_tensors_shape_size = out_tensors_[0]->shape().size(); MS_LOG(DEBUG) << " concat at axis=: " << param->axis_; if (out_tensors_shape_size > DIMENSION_4D) { - MS_LOG(ERROR) << " GPU Unsupported shape.size > 4 "; + MS_LOG(WARNING) << " GPU Unsupported shape.size > 4 "; return RET_ERROR; } for (auto &in_tensor : in_tensors_) { auto in_tensors_shape_size = in_tensor->shape().size(); if (in_tensors_shape_size > DIMENSION_4D) { - MS_LOG(ERROR) << " GPU Unsupported in_tensor shape.size > 4 "; + MS_LOG(WARNING) << " GPU Unsupported in_tensor shape.size > 4 "; return RET_ERROR; } } @@ -100,7 +100,7 @@ int ConcatOpenCLKernel::CheckSpecs() { axis_ += in_tensors_.front()->shape().size(); } if (axis_ < 0 || axis_ > 3) { - MS_LOG(ERROR) << " only support axis >= 0 and axis <= 3 "; + MS_LOG(WARNING) << " only support axis >= 0 and axis <= 3 "; return RET_ERROR; } if (out_tensors_shape_size < 4 && type() == PrimitiveType_Concat && axis_ != 0) { @@ -109,12 +109,12 @@ int ConcatOpenCLKernel::CheckSpecs() { } else if (out_tensors_shape_size == DIMENSION_3D) { axis_ = axis_ + 1; } else { - MS_LOG(ERROR) << " Unsupported axis =: " << axis_ << " shape().size()=: " << out_tensors_shape_size; + MS_LOG(WARNING) << " Unsupported axis =: " << axis_ << " shape().size()=: " << out_tensors_shape_size; return RET_ERROR; } } if (in_tensors_.size() < INPUT_TENSOR_SIZE_2 || in_tensors_.size() > INPUT_TENSOR_SIZE_6) { - MS_LOG(ERROR) << "unsupported input size :" << in_tensors_.size(); + MS_LOG(WARNING) << "unsupported input size :" << in_tensors_.size(); return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d.cc index e5c83521668..9ead66026c1 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d.cc @@ -44,24 +44,24 @@ namespace mindspore::kernel { int Conv2DOpenCLKernel::CheckSpecs() { int inputs_num = in_tensors_.size(); if (inputs_num != INPUT_TENSOR_SIZE_2 && inputs_num != INPUT_TENSOR_SIZE_3) { - MS_LOG(ERROR) << "Conv2D only supports 2 or 3 input Tensor but get " << inputs_num; + MS_LOG(WARNING) << "Conv2D only supports 2 or 3 input Tensor but get " << inputs_num; return RET_ERROR; } int outputs_num = out_tensors_.size(); if (outputs_num != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "Conv2D only supports 1 output Tensor but get " << outputs_num; + MS_LOG(WARNING) << "Conv2D only supports 1 output Tensor but get " << outputs_num; return RET_ERROR; } CHECK_NULL_RETURN(in_tensors_.at(kInputIndex)); int input_ndim = in_tensors_.at(kInputIndex)->shape().size(); if (input_ndim != DIMENSION_4D) { - MS_LOG(ERROR) << "Conv2D only supports 4D input Tensor but get " << input_ndim << "D."; + MS_LOG(WARNING) << "Conv2D only supports 4D input Tensor but get " << input_ndim << "D."; return RET_ERROR; } CHECK_NULL_RETURN(out_tensors_.at(kInputIndex)); int output_ndim = out_tensors_.at(kOutputIndex)->shape().size(); if (output_ndim != DIMENSION_4D) { - MS_LOG(ERROR) << "Conv2D only supports 4D output Tensor but get " << output_ndim << "D."; + MS_LOG(WARNING) << "Conv2D only supports 4D output Tensor but get " << output_ndim << "D."; return RET_ERROR; } @@ -69,17 +69,17 @@ int Conv2DOpenCLKernel::CheckSpecs() { CHECK_NULL_RETURN(filter_tensor); int filter_ndim = filter_tensor->shape().size(); if (filter_ndim != DIMENSION_4D) { - MS_LOG(ERROR) << "Conv2D only supports 4D filter Tensor but get " << filter_ndim << "D."; + MS_LOG(WARNING) << "Conv2D only supports 4D filter Tensor but get " << filter_ndim << "D."; return RET_ERROR; } if (!filter_tensor->IsConst()) { - MS_LOG(ERROR) << "Conv2D don't support non-constant filter yet."; + MS_LOG(WARNING) << "Conv2D don't support non-constant filter yet."; return RET_ERROR; } auto *bias_tensor = in_tensors_.size() >= INPUT_TENSOR_SIZE_3 ? in_tensors_.at(kBiasIndex) : nullptr; if (bias_tensor != nullptr && !bias_tensor->IsConst()) { - MS_LOG(ERROR) << "Conv2D don't support non-constant bias yet."; + MS_LOG(WARNING) << "Conv2D don't support non-constant bias yet."; return RET_ERROR; } @@ -92,7 +92,7 @@ int Conv2DOpenCLKernel::CheckSpecs() { case ActivationType_TANH: break; default: { - MS_LOG(ERROR) << "Unsupported activation type " << param_->act_type_; + MS_LOG(WARNING) << "Unsupported activation type " << param_->act_type_; return RET_ERROR; } } diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc index fbc69961442..22be53aa467 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc @@ -35,22 +35,22 @@ namespace mindspore::kernel { int Conv2dTransposeOpenCLKernel::CheckSpecs() { if ((in_tensors_.size() != INPUT_TENSOR_SIZE_2 && in_tensors_.size() != INPUT_TENSOR_SIZE_3) || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } auto *param = reinterpret_cast(op_parameter_); if (param->act_type_ != ActType_No && param->act_type_ != ActType_Relu && param->act_type_ != ActType_Relu6) { - MS_LOG(ERROR) << "Unsupported activation type " << param->act_type_; + MS_LOG(WARNING) << "Unsupported activation type " << param->act_type_; return RET_ERROR; } if (!in_tensors_.at(1)->IsConst()) { - MS_LOG(ERROR) << "Conv2dTranspose doesn't support non-constant filter yet."; + MS_LOG(WARNING) << "Conv2dTranspose doesn't support non-constant filter yet."; return RET_ERROR; } if (in_tensors_.size() == INPUT_TENSOR_SIZE_3 && in_tensors_.at(C2NUM) != nullptr && !in_tensors_.at(C2NUM)->IsConst()) { - MS_LOG(ERROR) << "Conv2dTranspose doesn't support non-constant bias yet."; + MS_LOG(WARNING) << "Conv2dTranspose doesn't support non-constant bias yet."; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc index dd099cc9009..3ea0335bec0 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc @@ -38,19 +38,19 @@ namespace mindspore::kernel { int DepthwiseConv2dOpenCLKernel::CheckSpecs() { if ((in_tensors_.size() != INPUT_TENSOR_SIZE_2 && in_tensors_.size() != INPUT_TENSOR_SIZE_3) || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) { - MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type(); + MS_LOG(WARNING) << "Unsupported data type " << in_tensors_[0]->data_type(); return RET_ERROR; } if (!in_tensors_.at(kWeightIndex)->IsConst()) { - MS_LOG(ERROR) << "DepthwiseConv2d don't support non-constant weight yet."; + MS_LOG(WARNING) << "DepthwiseConv2d don't support non-constant weight yet."; return RET_ERROR; } if (in_tensors_.size() == INPUT_TENSOR_SIZE_3 && !in_tensors_.at(kBiasIndex)->IsConst()) { - MS_LOG(ERROR) << "DepthwiseConv2d don't support non-constant bias yet."; + MS_LOG(WARNING) << "DepthwiseConv2d don't support non-constant bias yet."; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/fill.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/fill.cc index 883c8d6418f..5f6ec3428bd 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/fill.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/fill.cc @@ -81,7 +81,7 @@ void FillOpenCLKernel::SetGlobalLocal() {} int FillOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } auto param = this->op_parameter_; @@ -89,13 +89,13 @@ int FillOpenCLKernel::CheckSpecs() { auto input = in_tensors_.at(0); CHECK_NULL_RETURN(input); if (input->shape().size() > DIMENSION_1D && param->type_ == PrimitiveType_Fill) { - MS_LOG(ERROR) << " fill only support dim = 1"; + MS_LOG(WARNING) << " fill only support dim = 1"; return RET_ERROR; } auto output = out_tensors_.at(0); CHECK_NULL_RETURN(output); if (output->shape().size() > OUTPUT_TENSOR_SIZE_4) { - MS_LOG(ERROR) << " only support dim <= 4"; + MS_LOG(WARNING) << " only support dim <= 4"; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/fullconnection.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/fullconnection.cc index 608b5fa4716..dcc6a931a23 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/fullconnection.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/fullconnection.cc @@ -37,23 +37,23 @@ namespace mindspore::kernel { int FullConnectionOpenCLKernel::CheckSpecs() { if ((in_tensors_.size() != INPUT_TENSOR_SIZE_2 && in_tensors_.size() != INPUT_TENSOR_SIZE_3) || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } auto param = reinterpret_cast(op_parameter_); if (param->a_transpose_) { - MS_LOG(ERROR) << "fullconnection only support a_transpose_=false yet."; + MS_LOG(WARNING) << "fullconnection only support a_transpose_=false yet."; return RET_ERROR; } auto out_gpu_info = GpuTensorInfo(out_tensors_[0]); if (out_gpu_info.H != 1 || out_gpu_info.W != 1) { - MS_LOG(ERROR) << "fullconnection only support 2d output shape or 4d output but H=W=1"; + MS_LOG(WARNING) << "fullconnection only support 2d output shape or 4d output but H=W=1"; return RET_ERROR; } // for fusion: ActivationType_TANH if (param->act_type_ != ActType_No && param->act_type_ != ActType_Relu && param->act_type_ != ActType_Relu6 && static_cast(param->act_type_) != ActivationType_TANH) { - MS_LOG(ERROR) << "Unsupported activation type " << param->act_type_; + MS_LOG(WARNING) << "Unsupported activation type " << param->act_type_; return RET_ERROR; } N_ = out_gpu_info.N; @@ -61,26 +61,26 @@ int FullConnectionOpenCLKernel::CheckSpecs() { auto intensor_shape = GpuTensorInfo(in_tensors_[0]); int input_nhw = intensor_shape.N * intensor_shape.H * intensor_shape.W; if (input_nhw < N_) { - MS_LOG(ERROR) << "Unsupported fullconnection shape"; + MS_LOG(WARNING) << "Unsupported fullconnection shape"; } if (!in_tensors_.at(kWeightIndex)->IsConst()) { weight_var_ = true; if (!param->b_transpose_) { - MS_LOG(ERROR) << "If fullconnection input weight is not constant, b_transpose_ should be true."; + MS_LOG(WARNING) << "If fullconnection input weight is not constant, b_transpose_ should be true."; return RET_ERROR; } if (in_tensors_.at(kWeightIndex)->shape().size() != DIMENSION_2D) { - MS_LOG(ERROR) << "If fullconnection input weight is not constant, it should be 2d."; + MS_LOG(WARNING) << "If fullconnection input weight is not constant, it should be 2d."; return RET_ERROR; } if (intensor_shape.C != in_tensors_.at(kWeightIndex)->shape()[1]) { - MS_LOG(ERROR) + MS_LOG(WARNING) << "If fullconnection input weight is not constant, input channel should equal to weight in_channel."; return RET_ERROR; } } if (in_tensors_.size() == INPUT_TENSOR_SIZE_3 && !in_tensors_.at(2)->IsConst()) { - MS_LOG(ERROR) << "FullConnection don't support non-constant bias yet."; + MS_LOG(WARNING) << "FullConnection don't support non-constant bias yet."; return RET_ERROR; } CI_remainder_ = input_nhw / N_; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/gather.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/gather.cc index 70b771fa3ca..98442d4fc4a 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/gather.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/gather.cc @@ -32,33 +32,33 @@ using mindspore::schema::PrimitiveType_Gather; namespace mindspore::kernel { int GatherOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_3) { - MS_LOG(ERROR) << "GatherOpenCLKernel only supports 3 input Tensor but get " << in_tensors_.size(); + MS_LOG(WARNING) << "GatherOpenCLKernel only supports 3 input Tensor but get " << in_tensors_.size(); return RET_ERROR; } if (out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "GatherOpenCLKernel only supports 1 output Tensor but get " << out_tensors_.size(); + MS_LOG(WARNING) << "GatherOpenCLKernel only supports 1 output Tensor but get " << out_tensors_.size(); return RET_ERROR; } enable_fp16_ = ocl_runtime_->GetFp16Enable(); if (!in_tensors_.at(1)->IsConst() && enable_fp16_) { - MS_LOG(ERROR) << "GatherOpenCLKernel Unsupportted intensor1 = tensor and datatype = fp16 "; + MS_LOG(WARNING) << "GatherOpenCLKernel Unsupportted intensor1 = tensor and datatype = fp16 "; return RET_ERROR; } int input_ndim = in_tensors_.front()->shape().size(); if (input_ndim < 0 || input_ndim > DIMENSION_4D) { - MS_LOG(ERROR) << "GatherOpenCLKernel only supports 1-4D input Tensor but get " << input_ndim << "D."; + MS_LOG(WARNING) << "GatherOpenCLKernel only supports 1-4D input Tensor but get " << input_ndim << "D."; return RET_ERROR; } int indices_ndim = in_tensors_.at(1)->shape().size(); if (indices_ndim > DIMENSION_1D) { - MS_LOG(ERROR) << "GatherOpenCLKernel only supports 1D indices Tensor but get " << indices_ndim << "D."; + MS_LOG(WARNING) << "GatherOpenCLKernel only supports 1D indices Tensor but get " << indices_ndim << "D."; return RET_ERROR; } TypeId data_type = in_tensors_.at(1)->data_type(); if (data_type != kNumberTypeInt32 && data_type != kNumberTypeInt64 && data_type != kNumberTypeFloat32 && data_type != kNumberTypeFloat16) { - MS_LOG(ERROR) << "GatherOpenCLKernel only supports Int32/Int64/Float32/Float16 indices Tensor."; + MS_LOG(WARNING) << "GatherOpenCLKernel only supports Int32/Int64/Float32/Float16 indices Tensor."; return RET_ERROR; } @@ -67,14 +67,14 @@ int GatherOpenCLKernel::CheckSpecs() { } axis_ = *reinterpret_cast(in_tensors_.at(2)->data()); if (in_tensors_.at(2)->data() == nullptr) { - MS_LOG(ERROR) << "GatherOpenCLKernel need Axis."; + MS_LOG(WARNING) << "GatherOpenCLKernel need Axis."; return RET_ERROR; } if (axis_ < 0) { axis_ += input_ndim; } if (axis_ < 0 || axis_ >= input_ndim) { - MS_LOG(ERROR) << "axis is invalid: axis=" << axis_ << "."; + MS_LOG(WARNING) << "axis is invalid: axis=" << axis_ << "."; return RET_ERROR; } else { return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/int8/arithmetic_int8.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/int8/arithmetic_int8.cc index 6b4d137bc2f..705d95501dd 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/int8/arithmetic_int8.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/int8/arithmetic_int8.cc @@ -41,37 +41,37 @@ namespace mindspore::kernel { int ArithmeticInt8OpenCLKernel::CheckSpecs() { for (auto &tensor : in_tensors_) { if (tensor->data_type() != kNumberTypeInt8) { - MS_LOG(ERROR) << "ArithmeticInt8OpenCLKernel only support int8 input"; + MS_LOG(WARNING) << "ArithmeticInt8OpenCLKernel only support int8 input"; return RET_ERROR; } } for (auto &tensor : out_tensors_) { if (tensor->data_type() != kNumberTypeInt8) { - MS_LOG(ERROR) << "ArithmeticInt8OpenCLKernel only support int8 output"; + MS_LOG(WARNING) << "ArithmeticInt8OpenCLKernel only support int8 output"; return RET_ERROR; } } if (in_tensors_.size() != INPUT_TENSOR_SIZE_2 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } auto *param = reinterpret_cast(op_parameter_); CHECK_NULL_RETURN(param); if (!IsArithmetic(type())) { - MS_LOG(ERROR) << "UnSupported Operator: " << schema::EnumNamePrimitiveType(type()); + MS_LOG(WARNING) << "UnSupported Operator: " << schema::EnumNamePrimitiveType(type()); return RET_ERROR; } if (type() == schema::PrimitiveType_Eltwise) { auto mode = param->eltwise_mode_; if (mode != EltwiseMode_PROD && mode != EltwiseMode_SUM && mode != EltwiseMode_MAXIMUM) { - MS_LOG(ERROR) << "Eltwise mode not support, mode:" << mode; + MS_LOG(WARNING) << "Eltwise mode not support, mode:" << mode; return RET_ERROR; } } if (!(param->activation_type_ == ActivationType_NO_ACTIVATION || param->activation_type_ == ActivationType_RELU || param->activation_type_ == ActivationType_RELU6)) { - MS_LOG(ERROR) << "Unsupported activation type " << param->activation_type_; + MS_LOG(WARNING) << "Unsupported activation type " << param->activation_type_; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/layer_norm.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/layer_norm.cc index 36a432280f8..8cccacf625b 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/layer_norm.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/layer_norm.cc @@ -34,8 +34,8 @@ int LayerNormOpenCLKernel::CheckSpecs() { auto param = reinterpret_cast(this->op_parameter_); CHECK_NULL_RETURN(param); if (in_tensors_.size() != INPUT_TENSOR_SIZE_3 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "UnSupported in_tensors_.size: " << in_tensors_.size() - << " out_tensors_.size(): " << out_tensors_.size(); + MS_LOG(WARNING) << "UnSupported in_tensors_.size: " << in_tensors_.size() + << " out_tensors_.size(): " << out_tensors_.size(); return RET_ERROR; } auto *input = in_tensors_.at(0); @@ -43,7 +43,7 @@ int LayerNormOpenCLKernel::CheckSpecs() { auto *output = out_tensors_.at(0); CHECK_NULL_RETURN(output); if (input->shape().size() != DIMENSION_4D) { - MS_LOG(ERROR) << "UnSupported in_tensors_.shape.size: " << input->shape().size(); + MS_LOG(WARNING) << "UnSupported in_tensors_.shape.size: " << input->shape().size(); return RET_ERROR; } normalized_axis_ = param->begin_params_axis_; @@ -52,7 +52,7 @@ int LayerNormOpenCLKernel::CheckSpecs() { normalized_axis_ += input->shape().size(); } if (normalized_axis_ != 3) { - MS_LOG(ERROR) << "UnSupported normalized_axis_ : " << param->normalized_dims_; + MS_LOG(WARNING) << "UnSupported normalized_axis_ : " << param->normalized_dims_; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc index 745f7c7ba80..9ec76a29986 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc @@ -47,13 +47,13 @@ bool IsUseStrassenMatmul(const std::vector &in_tensors_) { int MatMulOpenCLKernel::CheckSpecs() { if (!(in_tensors_.size() == INPUT_TENSOR_SIZE_2 || in_tensors_.size() == INPUT_TENSOR_SIZE_3) || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } auto param = reinterpret_cast(op_parameter_); transposeA = param->a_transpose_; if (transposeA) { - MS_LOG(ERROR) << "matmul only support a_transpose_=false yet."; + MS_LOG(WARNING) << "matmul only support a_transpose_=false yet."; return RET_ERROR; } transposeB = param->b_transpose_; @@ -61,7 +61,7 @@ int MatMulOpenCLKernel::CheckSpecs() { enable_fp16_ = ocl_runtime_->GetFp16Enable(); if (in_tensors_[0]->shape().size() != out_tensors_[0]->shape().size() || in_tensors_[0]->shape().size() < DIMENSION_2D || in_tensors_[0]->shape().size() > DIMENSION_4D) { - MS_LOG(ERROR) << "matmul only support input shape size= 2, 3 or 4."; + MS_LOG(WARNING) << "matmul only support input shape size= 2, 3 or 4."; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/one_hot.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/one_hot.cc index 6a03347b828..e6a4ac8bcdb 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/one_hot.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/one_hot.cc @@ -30,7 +30,7 @@ namespace mindspore::kernel { int OneHotOpenCLKernel::CheckSpecs() { if ((in_tensors_.size() < INPUT_TENSOR_SIZE_2 || in_tensors_.size() > INPUT_TENSOR_SIZE_4) || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/pad.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/pad.cc index 7e686640fff..c4c763e52fc 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/pad.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/pad.cc @@ -35,29 +35,29 @@ int PadOpenCLKernel::CheckSpecs() { auto param = reinterpret_cast(op_parameter_); MS_ASSERT(param); if (in_tensors_.size() != INPUT_TENSOR_SIZE_2) { - MS_LOG(ERROR) << "Pad only support 1 input Tensor."; + MS_LOG(WARNING) << "Pad only support 1 input Tensor."; return RET_ERROR; } if (out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "Pad only support 1 output Tensor."; + MS_LOG(WARNING) << "Pad only support 1 output Tensor."; return RET_ERROR; } auto in_ndim = in_tensors_.front()->shape().size(); if (in_ndim < DIMENSION_1D || in_ndim > DIMENSION_4D) { - MS_LOG(ERROR) << "Pad only supports 1D-4D input Tensor but get " << in_ndim << "D."; + MS_LOG(WARNING) << "Pad only supports 1D-4D input Tensor but get " << in_ndim << "D."; return RET_ERROR; } auto out_ndim = in_tensors_.front()->shape().size(); if (out_ndim < DIMENSION_1D || out_ndim > DIMENSION_4D) { - MS_LOG(ERROR) << "Pad only supports 1D-4D output Tensor but get " << out_ndim << "D."; + MS_LOG(WARNING) << "Pad only supports 1D-4D output Tensor but get " << out_ndim << "D."; return RET_ERROR; } if (in_ndim != out_ndim) { - MS_LOG(ERROR) << "Pad: input ndim != output ndim."; + MS_LOG(WARNING) << "Pad: input ndim != output ndim."; return RET_ERROR; } if (param->pad_mode_ != PaddingMode_CONSTANT) { - MS_LOG(ERROR) << "Pad only support CONSTANT MODE."; + MS_LOG(WARNING) << "Pad only support CONSTANT MODE."; return RET_ERROR; } // Compatibility code @@ -66,7 +66,7 @@ int PadOpenCLKernel::CheckSpecs() { } auto pad_shape = in_tensors_.at(1)->shape(); if (pad_shape.size() != DIMENSION_2D || pad_shape[0] != in_ndim || pad_shape[1] != DIMENSION_2D) { - MS_LOG(ERROR) << "pad tensor shape invalid."; + MS_LOG(WARNING) << "pad tensor shape invalid."; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc index 7e37510949b..5933c89215e 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc @@ -35,19 +35,19 @@ namespace mindspore { namespace kernel { int PoolingOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } if (in_tensors_[0]->shape().size() != DIMENSION_4D) { - MS_LOG(ERROR) << "Only support 4d tensor."; + MS_LOG(WARNING) << "Only support 4d tensor."; return RET_ERROR; } if (parameter_->pool_mode_ != PoolMode_MaxPool && parameter_->pool_mode_ != PoolMode_AvgPool) { - MS_LOG(ERROR) << "Init `Pooling2d` kernel failed, unsupported pool mode!"; + MS_LOG(WARNING) << "Init `Pooling2d` kernel failed, unsupported pool mode!"; return RET_ERROR; } if (parameter_->act_type_ != ActType_No && parameter_->act_type_ != ActType_Relu) { - MS_LOG(ERROR) << "Unsupported activation type " << parameter_->act_type_; + MS_LOG(WARNING) << "Unsupported activation type " << parameter_->act_type_; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/power.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/power.cc index 7664ad0d8b2..08d27cf931a 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/power.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/power.cc @@ -33,17 +33,17 @@ namespace mindspore::kernel { int PowerOpenCLKernel::CheckSpecs() { if ((in_tensors_.size() != INPUT_TENSOR_SIZE_1 && in_tensors_.size() != INPUT_TENSOR_SIZE_2) || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << "out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << "out size: " << out_tensors_.size(); return RET_ERROR; } if (in_tensors_.size() == INPUT_TENSOR_SIZE_2 && in_tensors_.at(0)->shape().size() != in_tensors_.at(1)->shape().size()) { - MS_LOG(ERROR) << "Unsupported input->shape.size " << in_tensors_.at(0)->shape().size() - << "!=" << in_tensors_.at(1)->shape().size(); + MS_LOG(WARNING) << "Unsupported input->shape.size " << in_tensors_.at(0)->shape().size() + << "!=" << in_tensors_.at(1)->shape().size(); return RET_ERROR; } if (in_tensors_.at(0)->shape().size() > DIMENSION_4D) { - MS_LOG(ERROR) << "in_tensors_->shape.size must be less than 4"; + MS_LOG(WARNING) << "in_tensors_->shape.size must be less than 4"; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc index 0552645dde9..3337180ef4a 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc @@ -87,20 +87,20 @@ int PReluOpenCLKernel::InitWeights() { int PReluOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_2 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "PRelu Only supported in_tensors_.size=2 and out_tensors_.size()=1 but your in_tensors_.size=" - << in_tensors_.size() << " out_tensors_.size()=" << out_tensors_.size(); + MS_LOG(WARNING) << "PRelu Only supported in_tensors_.size=2 and out_tensors_.size()=1 but your in_tensors_.size=" + << in_tensors_.size() << " out_tensors_.size()=" << out_tensors_.size(); return RET_ERROR; } auto weight_tensor = in_tensors_.at(1); auto in_tensor_channel = GpuTensorInfo(in_tensors_[0]).C; auto weight_channel = GpuTensorInfo(in_tensors_[1]).C; if (weight_channel != 1 && weight_channel != in_tensor_channel) { - MS_LOG(ERROR) << "PRelu weight must be equal with in_teneors channel size, but your weight size is " - << weight_channel << " and your input channel size is " << in_tensor_channel; + MS_LOG(WARNING) << "PRelu weight must be equal with in_teneors channel size, but your weight size is " + << weight_channel << " and your input channel size is " << in_tensor_channel; return mindspore::lite::RET_ERROR; } if (weight_tensor->data_type() != kNumberTypeFloat16 && weight_tensor->data_type() != kNumberTypeFloat32) { - MS_LOG(ERROR) << "PRelu weight must be float32 or float16"; + MS_LOG(WARNING) << "PRelu weight must be float32 or float16"; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.cc index 4e37b384e15..e601c896cd4 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.cc @@ -126,20 +126,20 @@ int ReduceOpenCLKernel::SetAxes() { int ReduceOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_2 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } auto input = in_tensors_.at(0); CHECK_NULL_RETURN(input); if (input->shape()[0] > DIMENSION_1D) { - MS_LOG(ERROR) << "reduce op only support n = 1"; + MS_LOG(WARNING) << "reduce op only support n = 1"; return RET_PARAM_INVALID; } inShape = GpuTensorInfo(in_tensors_[0]); auto reduce_param = reinterpret_cast(op_parameter_); CHECK_NULL_RETURN(reduce_param); if (GetReduceTypeStr(reduce_param->mode_).empty()) { - MS_LOG(ERROR) << "not supported reduce type:" << reduce_param->mode_; + MS_LOG(WARNING) << "not supported reduce type:" << reduce_param->mode_; return RET_PARAM_INVALID; } auto ret = SetAxes(); @@ -150,11 +150,11 @@ int ReduceOpenCLKernel::CheckSpecs() { wc_reduce_ = IsWCReduce(reduce_axes_); c_reduce_ = IsCReduce(reduce_axes_); if (!hw_reduce_ && !wc_reduce_ && !c_reduce_) { - MS_LOG(ERROR) << "Unsupported reduce axes"; + MS_LOG(WARNING) << "Unsupported reduce axes"; return RET_PARAM_INVALID; } if ((c_reduce_ || wc_reduce_) && !reduce_param->keep_dims_) { - MS_LOG(ERROR) << "reduce axis (2,3) should keep dims"; + MS_LOG(WARNING) << "reduce axis (2,3) should keep dims"; return RET_PARAM_INVALID; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/resize.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/resize.cc index 33c3da03886..aa5d879ac26 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/resize.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/resize.cc @@ -32,20 +32,20 @@ namespace mindspore::kernel { int ResizeOpenCLKernel::CheckSpecs() { if (!(in_tensors_.size() == INPUT_TENSOR_SIZE_1 || in_tensors_.size() == INPUT_TENSOR_SIZE_2) || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } auto in_shape = in_tensors_[0]->shape(); auto out_shape = out_tensors_[0]->shape(); if (in_shape.size() != DIMENSION_4D || out_shape.size() != DIMENSION_4D || in_shape[0] != out_shape[0] || in_shape[3] != out_shape[3]) { - MS_LOG(ERROR) << "resize op only support 4D and axes HW"; + MS_LOG(WARNING) << "resize op only support 4D and axes HW"; return RET_PARAM_INVALID; } auto resize_param = reinterpret_cast(op_parameter_); CHECK_NULL_RETURN(resize_param); if (resize_param->method_ != schema::ResizeMethod_LINEAR && resize_param->method_ != schema::ResizeMethod_NEAREST) { - MS_LOG(ERROR) << "unsupported resize method:" << resize_param->method_; + MS_LOG(WARNING) << "unsupported resize method:" << resize_param->method_; return RET_PARAM_INVALID; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc index 5855ae7763e..c71dd1fef5d 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc @@ -54,7 +54,8 @@ int ScaleOpenCLKernel::CheckSpecs() { bool isScaleC = (in_shape.size() == 4 && axis == 3) || (in_shape.size() == 2 && axis == 1); bool isScaleH = in_shape.size() == 4 && axis == 1; if (isScalar || !(isScaleC || isScaleH)) { - MS_LOG(ERROR) << "unsupported scale axis " << axis << ", in shape " << in_shape << ", scale shape" << scale_shape; + MS_LOG(WARNING) << "unsupported scale axis " << axis << ", in shape " << in_shape << ", scale shape" + << scale_shape; return RET_ERROR; } } diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc index 41e55f15a15..41cda1f8f59 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc @@ -40,14 +40,14 @@ std::vector SoftmaxOpenCLKernel::GetMaskForLastChannel(int channels) { int SoftmaxOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } SoftmaxParameter *parameter = reinterpret_cast(op_parameter_); axis_ = parameter->axis_; auto in_shape = in_tensors_[0]->shape(); if (in_shape.size() > DIMENSION_4D) { - MS_LOG(ERROR) << "Init Softmax kernel failed: Unsupported shape size: " << in_shape.size(); + MS_LOG(WARNING) << "Init Softmax kernel failed: Unsupported shape size: " << in_shape.size(); return RET_ERROR; } if (axis_ < 0) { @@ -55,7 +55,7 @@ int SoftmaxOpenCLKernel::CheckSpecs() { } axis_ += DIMENSION_4D - in_shape.size(); if (axis_ != 1 && axis_ != 2 && axis_ != 3) { - MS_LOG(ERROR) << "Init Softmax kernel failed: softmax axis should be H W or C"; + MS_LOG(WARNING) << "Init Softmax kernel failed: softmax axis should be H W or C"; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/space_to_batch_nd.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/space_to_batch_nd.cc index 52f671fb7fa..74d23d0cec8 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/space_to_batch_nd.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/space_to_batch_nd.cc @@ -29,16 +29,16 @@ using mindspore::schema::PrimitiveType_SpaceToBatchND; namespace mindspore::kernel { int SpaceToBatchNDOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) { - MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type(); + MS_LOG(WARNING) << "Unsupported data type " << in_tensors_[0]->data_type(); return RET_ERROR; } if (in_tensors_[0]->shape().size() != DIMENSION_4D && out_tensors_[0]->shape().size() != DIMENSION_4D) { - MS_LOG(ERROR) << "input/output shape size must be 4, actual: " << in_tensors_[0]->shape().size() << ", " - << out_tensors_[0]->shape().size(); + MS_LOG(WARNING) << "input/output shape size must be 4, actual: " << in_tensors_[0]->shape().size() << ", " + << out_tensors_[0]->shape().size(); return RET_ERROR; } auto *param = reinterpret_cast(this->op_parameter_); @@ -48,14 +48,14 @@ int SpaceToBatchNDOpenCLKernel::CheckSpecs() { param->padded_in_shape_[kNHWC_W] = in_tensors_[0]->shape().at(kNHWC_W) + param->paddings_[2] + param->paddings_[3]; param->padded_in_shape_[kNHWC_C] = in_tensors_[0]->shape().at(kNHWC_C); if (param->block_sizes_[0] < 1 || param->block_sizes_[1] < 1) { - MS_LOG(ERROR) << "block_sizes_ must > 1, actual " << param->block_sizes_[0] << ", " << param->block_sizes_[1]; + MS_LOG(WARNING) << "block_sizes_ must > 1, actual " << param->block_sizes_[0] << ", " << param->block_sizes_[1]; return RET_ERROR; } MS_ASSERT(param->block_sizes_[0]); MS_ASSERT(param->block_sizes_[1]); if (param->padded_in_shape_[kNHWC_H] % param->block_sizes_[0] || param->padded_in_shape_[kNHWC_W] % param->block_sizes_[1]) { - MS_LOG(ERROR) << "padded shape must be multiple of block!"; + MS_LOG(WARNING) << "padded shape must be multiple of block!"; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/space_to_depth.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/space_to_depth.cc index fcfbdc8e13a..a1bad4531df 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/space_to_depth.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/space_to_depth.cc @@ -32,7 +32,7 @@ using mindspore::schema::PrimitiveType_SpaceToDepth; namespace mindspore::kernel { int SpaceToDepthOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/sparse_to_dense.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/sparse_to_dense.cc index 5922f71611c..2382c519b20 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/sparse_to_dense.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/sparse_to_dense.cc @@ -112,12 +112,12 @@ int SparseToDenseOpenCLKernel::InitWeights() { int SparseToDenseOpenCLKernel::CheckSpecs() { if (in_tensors_.size() < DIMENSION_3D || out_tensors_.at(0)->shape().size() > DIMENSION_4D) { - MS_LOG(ERROR) << " only support out_tensors_ dim <= 4 and in_tensors_.size >= 3"; + MS_LOG(WARNING) << " only support out_tensors_ dim <= 4 and in_tensors_.size >= 3"; return RET_ERROR; } if (in_tensors_.at(0)->shape().size() > DIMENSION_4D || out_tensors_.at(0)->shape().size() > DIMENSION_4D) { - MS_LOG(ERROR) << "Unsupported inputdim: " << in_tensors_[0]->shape().size() << "outdim" - << out_tensors_[0]->shape().size(); + MS_LOG(WARNING) << "Unsupported inputdim: " << in_tensors_[0]->shape().size() << "outdim" + << out_tensors_[0]->shape().size(); return RET_ERROR; } if (input_dim_ == DIMENSION_2D) { @@ -129,7 +129,7 @@ int SparseToDenseOpenCLKernel::CheckSpecs() { } auto param = reinterpret_cast(op_parameter_); if (param->validate_indices_) { - MS_LOG(ERROR) << "Unsupported unordered for in_tensors_indices"; + MS_LOG(WARNING) << "Unsupported unordered for in_tensors_indices"; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/split.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/split.cc index 6669b54386a..e140f1da40f 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/split.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/split.cc @@ -69,34 +69,34 @@ int SplitOpenCLKernel::CheckSpecs() { if ((out_tensors_.size() != OUTPUT_TENSOR_SIZE_2 || (out_tensors_.size() != OUTPUT_TENSOR_SIZE_3 && param->split_dim_ == 0)) && in_tensors_.size() != INPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } if (in_tensors_.at(0)->IsConst()) { - MS_LOG(ERROR) << "in_tensors_ must be tensor"; + MS_LOG(WARNING) << "in_tensors_ must be tensor"; return RET_ERROR; } for (auto &out_tensor : out_tensors_) { if (out_tensor->IsConst()) { - MS_LOG(ERROR) << "out_tensor must be tensor"; + MS_LOG(WARNING) << "out_tensor must be tensor"; return RET_ERROR; } } if (!(param->num_split_ == 2 || param->split_dim_ == 0)) { - MS_LOG(ERROR) << "num_split_ only supported = 2 or split_dim_ = 0 yet"; + MS_LOG(WARNING) << "num_split_ only supported = 2 or split_dim_ = 0 yet"; return RET_ERROR; } if (param->split_dim_ < 0 || param->split_dim_ > 3) { - MS_LOG(ERROR) << "split_dim_ must between 0~3"; + MS_LOG(WARNING) << "split_dim_ must between 0~3"; return RET_ERROR; } if (param->split_sizes_ == nullptr) { - MS_LOG(ERROR) << "split_sizes_ can not nullptr"; + MS_LOG(WARNING) << "split_sizes_ can not nullptr"; return RET_ERROR; } if (param->num_split_ == 1 && param->split_sizes_[0] == 0) { - MS_LOG(ERROR) << "param->split_sizes_[0] is zero."; + MS_LOG(WARNING) << "param->split_sizes_[0] is zero."; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/stack.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/stack.cc index ff2f0acf526..76007901dc6 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/stack.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/stack.cc @@ -72,32 +72,32 @@ int StackOpenCLKernel::CheckSpecs() { auto param = reinterpret_cast(this->op_parameter_); axis_ = param->axis_; if (in_tensors_.size() != INPUT_TENSOR_SIZE_2 && out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << " only support input size = 2 and output size = 1"; + MS_LOG(WARNING) << " only support input size = 2 and output size = 1"; return RET_ERROR; } for (auto &tensor : in_tensors_) { if (tensor->data_type() != kNumberTypeFloat32 && tensor->data_type() != kNumberTypeFloat16) { - MS_LOG(ERROR) << " only support fp32/fp16 input"; + MS_LOG(WARNING) << " only support fp32/fp16 input"; return RET_ERROR; } } for (auto &tensor : out_tensors_) { if (tensor->data_type() != kNumberTypeFloat32 && tensor->data_type() != kNumberTypeFloat16) { - MS_LOG(ERROR) << " only support fp32/fp16 output"; + MS_LOG(WARNING) << " only support fp32/fp16 output"; return RET_ERROR; } } if (in_tensors_[0]->shape().size() > DIMENSION_4D || in_tensors_[0]->shape().size() <= 0) { - MS_LOG(ERROR) << " only support 0shape().size() : axis_; if (axis_ > 3) { - MS_LOG(ERROR) << " only support axis <= 3 "; + MS_LOG(WARNING) << " only support axis <= 3 "; return RET_ERROR; } if (axis_ > in_tensors_[0]->shape().size()) { - MS_LOG(ERROR) << " stack axis must been <= in_tensors_[0]->shape().size() "; + MS_LOG(WARNING) << " stack axis must been <= in_tensors_[0]->shape().size() "; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/strided_slice.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/strided_slice.cc index a6c337427aa..3656bec61b0 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/strided_slice.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/strided_slice.cc @@ -33,7 +33,7 @@ namespace mindspore::kernel { int StridedSliceOpenCLKernel::CheckSpecs() { if (type() == PrimitiveType_SliceFusion) { if (in_tensors_.size() != INPUT_TENSOR_SIZE_3) { - MS_LOG(ERROR) << "Slice only supports 3 input Tensor."; + MS_LOG(WARNING) << "Slice only supports 3 input Tensor."; return RET_ERROR; } int in_ndim = in_tensors_.front()->shape().size(); @@ -45,7 +45,7 @@ int StridedSliceOpenCLKernel::CheckSpecs() { } } else if (type() == PrimitiveType_StridedSlice) { if (in_tensors_.size() != INPUT_TENSOR_SIZE_4) { - MS_LOG(ERROR) << "StridedSlice only supports 4 input Tensor."; + MS_LOG(WARNING) << "StridedSlice only supports 4 input Tensor."; return RET_ERROR; } int in_ndim = in_tensors_.front()->shape().size(); @@ -59,26 +59,26 @@ int StridedSliceOpenCLKernel::CheckSpecs() { return RET_ERROR; } } else { - MS_LOG(ERROR) << "type error."; + MS_LOG(WARNING) << "type error."; return RET_ERROR; } const std::string kernel_name = type() == PrimitiveType_SliceFusion ? "Slice" : "StridedSlice"; if (out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << kernel_name + " only supports 1 output Tensor."; + MS_LOG(WARNING) << kernel_name + " only supports 1 output Tensor."; return RET_ERROR; } auto in_ndim = in_tensors_.front()->shape().size(); if (in_ndim == 0 || in_ndim > DIMENSION_4D) { - MS_LOG(ERROR) << kernel_name + " only supports 1D-4D input tensor"; + MS_LOG(WARNING) << kernel_name + " only supports 1D-4D input tensor"; return RET_ERROR; } auto out_ndim = out_tensors_.front()->shape().size(); if (out_ndim > DIMENSION_4D) { - MS_LOG(ERROR) << kernel_name + " only supports 0D-4D output tensor"; + MS_LOG(WARNING) << kernel_name + " only supports 0D-4D output tensor"; return RET_ERROR; } if (InitConstArgs() != RET_OK) { - MS_LOG(ERROR) << "call InitConstArgs() failed"; + MS_LOG(WARNING) << "call InitConstArgs() failed"; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc index 609dff1e571..60fb702cdcc 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc @@ -30,13 +30,13 @@ using mindspore::lite::opencl::MemType; namespace mindspore::kernel { int ToFormatOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } auto data_type = in_tensors_.front()->data_type(); if (data_type != kNumberTypeFloat32 && data_type != kNumberTypeFloat16 && data_type != kNumberTypeInt32 && data_type != kNumberTypeInt8) { - MS_LOG(ERROR) << "Unsupported data type " << data_type; + MS_LOG(WARNING) << "Unsupported data type " << data_type; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc index 6ee00efa612..94d8f02c5b7 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc @@ -30,17 +30,17 @@ using mindspore::schema::PrimitiveType_Transpose; namespace mindspore::kernel { int TransposeOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_2 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "Transpose input output size unsupported."; + MS_LOG(WARNING) << "Transpose input output size unsupported."; return RET_ERROR; } int in_ndim = in_tensors_.at(0)->shape().size(); int out_ndim = out_tensors_.at(0)->shape().size(); if (in_ndim != out_ndim) { - MS_LOG(ERROR) << "Transpose only support in_ndim equal to out_ndim."; + MS_LOG(WARNING) << "Transpose only support in_ndim equal to out_ndim."; return RET_ERROR; } if (in_ndim > DIMENSION_4D) { - MS_LOG(ERROR) << "Transpose don't support 5d tensor or higher."; + MS_LOG(WARNING) << "Transpose don't support 5d tensor or higher."; return RET_ERROR; } if (CheckParamLikeTensor("Transpose", "perm", in_tensors_.at(1), kNumberTypeInt32, {in_ndim}) != RET_OK) { diff --git a/mindspore/lite/tools/anf_exporter/anf_exporter.cc b/mindspore/lite/tools/anf_exporter/anf_exporter.cc index 4cdabceb384..b027a2eb028 100644 --- a/mindspore/lite/tools/anf_exporter/anf_exporter.cc +++ b/mindspore/lite/tools/anf_exporter/anf_exporter.cc @@ -381,7 +381,11 @@ int AnfExporter::Anf2Fb(const FuncGraphPtr &func_graph, const std::unique_ptrname() == mindspore::ops::kNameDepend || prim->name() == mindspore::lite::kNameTupleGetItem || prim->name() == mindspore::lite::kNameMakeTuple) { continue; @@ -389,7 +393,11 @@ int AnfExporter::Anf2Fb(const FuncGraphPtr &func_graph, const std::unique_ptrname() == "make_tuple") { continue; } - RemoveIfMakeTuple(cnode); + ret = RemoveIfMakeTuple(cnode); + if (ret != RET_OK) { + MS_LOG(ERROR) << "RemoveIfMakeTuple failed"; + break; + } auto node = std::make_unique(); if (node == nullptr) { MS_LOG(ERROR) << "object failed to be constructed"; diff --git a/mindspore/lite/tools/anf_exporter/fetch_content.cc b/mindspore/lite/tools/anf_exporter/fetch_content.cc index f7a56f77bf4..8ea4112b5b4 100644 --- a/mindspore/lite/tools/anf_exporter/fetch_content.cc +++ b/mindspore/lite/tools/anf_exporter/fetch_content.cc @@ -402,7 +402,7 @@ int FetchDataFromCNode(const CNodePtr &cnode, size_t index, converter::FmkType f return RET_OK; } -void RemoveIfDepend(const CNodePtr &cnode) { +int RemoveIfDepend(const CNodePtr &cnode) { bool has_depend = false; std::vector inputs; inputs.clear(); @@ -418,7 +418,7 @@ void RemoveIfDepend(const CNodePtr &cnode) { auto value_node = depend_node->input(0)->cast(); if (value_node == nullptr) { MS_LOG(ERROR) << "value node is invalid."; - return; + return RET_ERROR; } if (value_node->value() != nullptr && opt::CheckPrimitiveType(depend_node, prim::kPrimDepend)) { has_depend = true; @@ -439,9 +439,10 @@ void RemoveIfDepend(const CNodePtr &cnode) { if (has_depend) { cnode->set_inputs(inputs); } + return RET_OK; } -void RemoveIfMakeTuple(const CNodePtr &cnode) { +int RemoveIfMakeTuple(const CNodePtr &cnode) { bool has_make_tuple = false; std::vector inputs; inputs.clear(); @@ -457,7 +458,7 @@ void RemoveIfMakeTuple(const CNodePtr &cnode) { auto value_node = make_tuple_node->input(0)->cast(); if (value_node == nullptr) { MS_LOG(ERROR) << "value node is invalid."; - return; + return RET_ERROR; } if (value_node->value() != nullptr && (opt::CheckPrimitiveType(make_tuple_node, prim::kPrimMakeTuple) || opt::CheckPrimitiveType(make_tuple_node, opt::kPrimMakeTupleV2))) { @@ -472,6 +473,7 @@ void RemoveIfMakeTuple(const CNodePtr &cnode) { if (has_make_tuple) { cnode->set_inputs(inputs); } + return RET_OK; } } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/anf_exporter/fetch_content.h b/mindspore/lite/tools/anf_exporter/fetch_content.h index ef75c1f65ef..e59bc0fd3f2 100644 --- a/mindspore/lite/tools/anf_exporter/fetch_content.h +++ b/mindspore/lite/tools/anf_exporter/fetch_content.h @@ -44,9 +44,9 @@ int FetchDataFromValueNode(const CNodePtr &cnode, size_t index, converter::FmkTy DataInfo *data_info); int FetchDataFromCNode(const CNodePtr &cnode, size_t index, converter::FmkType fmk_type, bool train_flag, DataInfo *data_info); -void RemoveIfDepend(const CNodePtr &cnode); +int RemoveIfDepend(const CNodePtr &cnode); -void RemoveIfMakeTuple(const CNodePtr &cnode); +int RemoveIfMakeTuple(const CNodePtr &cnode); } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_ANF_EXPORTER_FETCH_CONTENT_H_