From d1f37486ca26104fe7aabe9e80a3fb62056363f0 Mon Sep 17 00:00:00 2001 From: greatpanc Date: Fri, 31 Dec 2021 10:32:48 +0800 Subject: [PATCH] opencl op log clean --- .../src/runtime/kernel/opencl/kernel/arithmetic.cc | 8 ++++---- .../lite/src/runtime/kernel/opencl/kernel/concat.cc | 2 +- .../lite/src/runtime/kernel/opencl/kernel/matmul.cc | 6 +++--- .../lite/src/runtime/kernel/opencl/kernel/reduce.cc | 6 +++--- .../lite/src/runtime/kernel/opencl/kernel/reshape.cc | 8 ++++---- .../lite/src/runtime/kernel/opencl/kernel/scale.cc | 1 + .../lite/src/runtime/kernel/opencl/opencl_kernel.h | 10 +++++----- mindspore/lite/src/runtime/kernel/opencl/utils.cc | 10 +++++----- 8 files changed, 26 insertions(+), 25 deletions(-) diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc index 27105f52f60..61d0b2848ff 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc @@ -53,24 +53,24 @@ int ArithmeticOpenCLKernel::CheckSpecs() { } if (in_tensors_.size() != INPUT_TENSOR_SIZE_2 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } auto *param = reinterpret_cast(op_parameter_); if (!IsArithmetic(type())) { - MS_LOG(ERROR) << "UnSupported Operator: " << schema::EnumNamePrimitiveType(type()); + MS_LOG(WARNING) << "UnSupported Operator: " << schema::EnumNamePrimitiveType(type()); return RET_ERROR; } if (type() == schema::PrimitiveType_Eltwise) { auto mode = param->eltwise_mode_; if (mode != EltwiseMode_PROD && mode != EltwiseMode_SUM && mode != EltwiseMode_MAXIMUM) { - MS_LOG(ERROR) << "Eltwise mode not support, mode:" << mode; + MS_LOG(WARNING) << "Eltwise mode not support, mode:" << mode; return RET_ERROR; } } if (!(param->activation_type_ == ActivationType_NO_ACTIVATION || param->activation_type_ == ActivationType_RELU || param->activation_type_ == ActivationType_RELU6)) { - MS_LOG(ERROR) << "Unsupported activation type " << param->activation_type_; + MS_LOG(WARNING) << "Unsupported activation type " << param->activation_type_; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc index 8a8f3ea9107..b1358bc2641 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc @@ -40,7 +40,7 @@ int ConcatOpenCLKernel::RunAxis0() { for (size_t i = 0; i < in_tensors_.size(); i++) { auto src_data = weight_ptrs_.at(i) == nullptr ? in_tensors_[i]->data() : weight_ptrs_.at(i); if (allocator_->GetImageSize(src_data, &img_size) != RET_OK) { - MS_LOG(ERROR) << "GetImageSize failed."; + MS_LOG(WARNING) << "GetImageSize failed."; return RET_ERROR; } auto src_origin = cl::array{0, 0, 0}; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc index d0f54ab05ab..6705bf8210a 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc @@ -446,7 +446,7 @@ kernel::InnerKernel *OpenCLMatMulKernelCreator(const std::vector new (std::nothrow) MatMulOpenCLKernel(opParameter, inputs, outputs, static_cast(ctx)); } if (kernel == nullptr) { - MS_LOG(ERROR) << "kernel " << opParameter->name_ << "is nullptr."; + MS_LOG(WARNING) << "kernel " << opParameter->name_ << "is nullptr."; free(opParameter); return nullptr; } @@ -454,14 +454,14 @@ kernel::InnerKernel *OpenCLMatMulKernelCreator(const std::vector MS_LOG(WARNING) << "kernel don't infer shape yet!"; auto ret = reinterpret_cast(kernel)->StoreConstData(); if (ret != mindspore::lite::RET_OK) { - MS_LOG(ERROR) << "Store " << opParameter->name_ << " const data failed!"; + MS_LOG(WARNING) << "Store " << opParameter->name_ << " const data failed!"; delete kernel; return nullptr; } return kernel; } if (kernel->CheckSpecs() != RET_OK || kernel->OpenCLKernel::CheckSpecs() != RET_OK) { - MS_LOG(ERROR) << "Check " << opParameter->name_ << " specification failed!"; + MS_LOG(WARNING) << "Check " << opParameter->name_ << " specification failed!"; delete kernel; return nullptr; } diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.cc index 25cfaa5c355..96959383e8d 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.cc @@ -110,7 +110,7 @@ int ReduceOpenCLKernel::SetShapeSizeIs0Axes() { reduction_indices = reduction_indices + (C4NUM % input_shape_size); reduce_axes_[reduction_indices] = true; } else { - MS_LOG(ERROR) << "in Reduce: axes tensor's reduction_indices should be -1, 1, 2, 3"; + MS_LOG(WARNING) << "in Reduce: axes tensor's reduction_indices should be -1, 1, 2, 3"; return RET_ERROR; } return RET_OK; @@ -132,7 +132,7 @@ int ReduceOpenCLKernel::SetShapeSizeIs1Axes() { axes_[i] = reinterpret_cast(axes_tensor->data())[i]; } if (num_axes > 2 || num_axes < 1) { - MS_LOG(ERROR) << "Unsupported reduce num axes " << num_axes; + MS_LOG(WARNING) << "Unsupported reduce num axes " << num_axes; return RET_PARAM_INVALID; } @@ -165,7 +165,7 @@ int ReduceOpenCLKernel::SetAxes() { } else if (axes_tensor->shape().size() == 1) { return SetShapeSizeIs1Axes(); } else { - MS_LOG(ERROR) << "in Reduce: axes tensor's ndim should be 0 or 1."; + MS_LOG(WARNING) << "in Reduce: axes tensor's ndim should be 0 or 1."; return RET_ERROR; } diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc index ca115ac9ba4..7eac4069d17 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc @@ -34,20 +34,20 @@ namespace mindspore::kernel { int ReshapeOpenCLKernel::CheckSpecs() { if ((in_tensors_.size() != INPUT_TENSOR_SIZE_1 && in_tensors_.size() != INPUT_TENSOR_SIZE_2) || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { - MS_LOG(ERROR) << "Reshape input output size unsupported."; + MS_LOG(WARNING) << "Reshape input output size unsupported."; return RET_ERROR; } if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16 && in_tensors_[0]->data_type() != kNumberTypeInt32) { - MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type(); + MS_LOG(WARNING) << "Unsupported data type " << in_tensors_[0]->data_type(); return RET_ERROR; } if (in_tensors_[0]->shape().size() > DIMENSION_4D) { - MS_LOG(ERROR) << "Reshape input size should in 0-4, actual: " << in_tensors_[0]->shape().size(); + MS_LOG(WARNING) << "Reshape input size should in 0-4, actual: " << in_tensors_[0]->shape().size(); return RET_ERROR; } if (out_tensors_[0]->shape().size() > OUTPUT_TENSOR_SIZE_4) { - MS_LOG(ERROR) << "Reshape output size should in 0-4, actual: " << out_tensors_[0]->shape().size(); + MS_LOG(WARNING) << "Reshape output size should in 0-4, actual: " << out_tensors_[0]->shape().size(); return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc index ab93a27b7e6..4be56e35762 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc @@ -37,6 +37,7 @@ int ScaleOpenCLKernel::CheckSpecs() { auto *param = reinterpret_cast(op_parameter_); if (param->activation_type_ != ActType_No && param->activation_type_ != ActType_Relu && param->activation_type_ != ActType_Relu6) { + MS_LOG(WARNING) << "unsupported scale activation type " << param->activation_type_; return RET_ERROR; } auto *scale_param = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.h b/mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.h index e86e96e97f3..21a733c791b 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.h +++ b/mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.h @@ -258,14 +258,14 @@ kernel::InnerKernel *OpenCLKernelCreator(const std::vector &inpu auto *kernel = new (std::nothrow) T(reinterpret_cast(opParameter), inputs, outputs, static_cast(ctx)); if (kernel == nullptr) { - MS_LOG(ERROR) << "kernel " << opParameter->name_ << "is nullptr."; + MS_LOG(WARNING) << "kernel " << opParameter->name_ << "is nullptr."; free(opParameter); return nullptr; } auto ret = kernel->CheckSpecsWithoutShape(); if (ret != mindspore::lite::RET_OK) { - MS_LOG(ERROR) << "Check " << opParameter->name_ << " specification Without shape failed!"; + MS_LOG(WARNING) << "Check " << opParameter->name_ << " specification Without shape failed!"; delete kernel; return nullptr; } @@ -281,19 +281,19 @@ kernel::InnerKernel *OpenCLKernelCreator(const std::vector &inpu } ret = kernel->CheckSpecs(); if (ret != mindspore::lite::RET_OK) { - MS_LOG(ERROR) << "Check " << opParameter->name_ << " specification failed!"; + MS_LOG(WARNING) << "Check " << opParameter->name_ << " specification failed!"; delete kernel; return nullptr; } ret = kernel->OpenCLKernel::CheckSpecs(); if (ret != mindspore::lite::RET_OK) { - MS_LOG(ERROR) << "Check " << opParameter->name_ << " specification failed!"; + MS_LOG(WARNING) << "Check " << opParameter->name_ << " specification failed!"; delete kernel; return nullptr; } ret = reinterpret_cast(kernel)->StoreConstData(); if (ret != mindspore::lite::RET_OK) { - MS_LOG(ERROR) << "Store " << opParameter->name_ << " const data failed!"; + MS_LOG(WARNING) << "Store " << opParameter->name_ << " const data failed!"; delete kernel; return nullptr; } diff --git a/mindspore/lite/src/runtime/kernel/opencl/utils.cc b/mindspore/lite/src/runtime/kernel/opencl/utils.cc index da3569bd5d6..29ae2d13cc1 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/utils.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/utils.cc @@ -338,11 +338,11 @@ void PackNCHWToNHWC4(void *src, void *dst, bool src_is_fp16, bool dst_is_fp16, c int CheckParamLikeTensor(const std::string &kernel_name, const std::string &tensor_name, lite::Tensor *tensor, TypeId expect_data_type, const std::vector &expect_shape) { if (!tensor->IsConst()) { - MS_LOG(ERROR) << "in " << kernel_name << ": tensor " << tensor_name << " must be Const."; + MS_LOG(WARNING) << "in " << kernel_name << ": tensor " << tensor_name << " must be Const."; return RET_ERROR; } if (tensor->data_type() != expect_data_type) { - MS_LOG(ERROR) << "in " << kernel_name << ": tensor's data_type must be " << expect_data_type; + MS_LOG(WARNING) << "in " << kernel_name << ": tensor's data_type must be " << expect_data_type; return RET_ERROR; } if (tensor->shape() != expect_shape) { @@ -358,9 +358,9 @@ int CheckParamLikeTensor(const std::string &kernel_name, const std::string &tens } tensor_shape_str += ")"; - MS_LOG(ERROR) << "in " << kernel_name - << ": tensor's shape is error. expect_shape: " + expect_shape_str + - " tensor->shape(): " + tensor_shape_str; + MS_LOG(WARNING) << "in " << kernel_name + << ": tensor's shape is error. expect_shape: " + expect_shape_str + + " tensor->shape(): " + tensor_shape_str; return RET_ERROR; } return RET_OK;