From fc370b3e48a9f7d9d89780027f6571959f6e5918 Mon Sep 17 00:00:00 2001 From: wandongdong Date: Tue, 1 Dec 2020 23:50:46 -0800 Subject: [PATCH] support 2-3dim reshape op of opencl --- .../kernel/opencl/kernel/activation.cc | 4 ++++ .../runtime/kernel/opencl/kernel/argminmax.cc | 4 ++++ .../kernel/opencl/kernel/arithmetic.cc | 4 ++++ .../kernel/opencl/kernel/arithmetic_self.cc | 4 ++++ .../kernel/opencl/kernel/batch_to_space_nd.cc | 4 ++++ .../runtime/kernel/opencl/kernel/batchnorm.cc | 4 ++++ .../runtime/kernel/opencl/kernel/biasadd.cc | 4 ++++ .../src/runtime/kernel/opencl/kernel/cast.cc | 8 +++++++- .../runtime/kernel/opencl/kernel/concat.cc | 4 ++++ .../kernel/opencl/kernel/conv2d_transpose.cc | 4 ++++ .../kernel/opencl/kernel/depthwise_conv2d.cc | 4 ++++ .../src/runtime/kernel/opencl/kernel/fill.cc | 4 ++++ .../kernel/opencl/kernel/fullconnection.cc | 4 ++++ .../runtime/kernel/opencl/kernel/matmul.cc | 4 ++++ .../runtime/kernel/opencl/kernel/one_hot.cc | 11 ++++++---- .../runtime/kernel/opencl/kernel/pooling2d.cc | 4 ++++ .../runtime/kernel/opencl/kernel/reduce.cc | 4 ++++ .../runtime/kernel/opencl/kernel/reshape.cc | 12 +++++++++-- .../runtime/kernel/opencl/kernel/resize.cc | 4 ++++ .../runtime/kernel/opencl/kernel/softmax.cc | 4 ++++ .../kernel/opencl/kernel/space_to_batch_nd.cc | 4 ++++ .../kernel/opencl/kernel/space_to_depth.cc | 8 +++++++- .../runtime/kernel/opencl/kernel/to_format.cc | 4 ++++ .../lite/src/runtime/opencl/opencl_runtime.cc | 2 +- .../runtime/kernel/opencl/reshape_tests.cc | 20 +++++++++++++++++++ 25 files changed, 128 insertions(+), 9 deletions(-) diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc index 7e761999014..614855db870 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc @@ -56,6 +56,10 @@ std::string ActivationOpenCLKernel::GetActTypeString(int act_type) { } int ActivationOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 1 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } if (GetActTypeString(type_).empty()) { MS_LOG(ERROR) << "schema::ActivationType:" << type_ << "not found"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/argminmax.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/argminmax.cc index 796dc180a81..3e4a081e70b 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/argminmax.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/argminmax.cc @@ -34,6 +34,10 @@ using mindspore::schema::PrimitiveType_ArgMin; namespace mindspore::kernel { int ArgMinMaxOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 1 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) { MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type(); return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc index 1dccb1acf6e..c579672fd1e 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc @@ -58,6 +58,10 @@ std::set SupportedOpenCLArithmetics = {PrimitiveType_Mul, PrimitiveType_Eltwise}; int ArithmeticOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 2 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } auto *param = reinterpret_cast(op_parameter_); if (param->broadcasting_ && out_tensors_[0]->shape()[0] > 1) { MS_LOG(ERROR) << "Broadcasting don't support N > 1"; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.cc index e8b6c5b12c0..3b78a33ff13 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.cc @@ -90,6 +90,10 @@ void ArithmeticSelfOpenCLKernel::GetKernelName(std::string *kernel_name, Arithme } int ArithmeticSelfOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 1 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } if (in_tensors_[0]->shape().size() != 4 && in_tensors_[0]->shape().size() != 2) { MS_LOG(ERROR) << " only support dim = 4 or 2 but your dim = " << in_tensors_[0]->shape().size(); return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/batch_to_space_nd.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/batch_to_space_nd.cc index 531de236d36..224e61da377 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/batch_to_space_nd.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/batch_to_space_nd.cc @@ -32,6 +32,10 @@ using mindspore::schema::PrimitiveType_BatchToSpaceND; namespace mindspore::kernel { int BatchToSpaceNDOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 1 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) { MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type(); return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.cc index 459962cbdb1..364f4d4235b 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.cc @@ -31,6 +31,10 @@ using mindspore::schema::PrimitiveType_BatchNorm; namespace mindspore::kernel { int BatchNormOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 5 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } if (in_tensors_.at(0)->shape()[0] > 1) { MS_LOG(ERROR) << " Unsupported batch_size >1 "; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/biasadd.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/biasadd.cc index 681d68779d8..acf2f8afb46 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/biasadd.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/biasadd.cc @@ -36,6 +36,10 @@ using mindspore::schema::PrimitiveType_BiasAdd; namespace mindspore::kernel { int BiasAddOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 2 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "Reshape in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } if (in_tensors_.size() == 0) { MS_LOG(ERROR) << "Input data size must be greater than 0, but your size is " << in_tensors_.size(); return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/cast.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/cast.cc index 7e571861f60..1a641ec6a7d 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/cast.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/cast.cc @@ -43,7 +43,13 @@ int CastOpenCLKernel::GetKernelName(std::string *kernel_name, CastParameter *par return RET_OK; } -int CastOpenCLKernel::CheckSpecs() { return RET_OK; } +int CastOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 1 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } + return RET_OK; +} void CastOpenCLKernel::SetConstArgs() { auto input_shape = in_tensors_[0]->shape(); diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc index c6749d36b05..e3383e684fa 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc @@ -64,6 +64,10 @@ void ConcatGetWorkGroup(const std::vector &global, std::vector * } int ConcatOpenCLKernel::CheckSpecs() { + if ((in_tensors_.size() < 2 || in_tensors_.size() > 6) || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } auto param = reinterpret_cast(this->op_parameter_); MS_LOG(DEBUG) << " concat at axis=: " << param->axis_; if (out_tensors_[0]->shape().size() > 4) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc index 953d3e4c08d..6cd6af9fda1 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc @@ -35,6 +35,10 @@ using mindspore::schema::PrimitiveType_DeConv2D; namespace mindspore::kernel { int Conv2dTransposeOpenCLKernel::CheckSpecs() { + if ((in_tensors_.size() != 2 && in_tensors_.size() != 3) || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } ConvParameter *param = reinterpret_cast(op_parameter_); if (param->pad_l_ != param->pad_r_ || param->kernel_h_ - param->stride_h_ != 2 * param->pad_l_ || param->pad_u_ != param->pad_d_ || param->kernel_w_ - param->stride_w_ != 2 * param->pad_u_) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc index 81cb66a5cf2..90e33983d91 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc @@ -43,6 +43,10 @@ using mindspore::schema::PrimitiveType_DepthwiseConv2D; namespace mindspore::kernel { int DepthwiseConv2dOpenCLKernel::CheckSpecs() { + if ((in_tensors_.size() != 2 && in_tensors_.size() != 3) || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) { MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type(); return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/fill.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/fill.cc index 584f85449f6..7126b1c9aa6 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/fill.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/fill.cc @@ -67,6 +67,10 @@ void FillOpenCLKernel::SetConstArgs() {} void FillOpenCLKernel::SetGlobalLocal() {} int FillOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 1 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } auto param = this->op_parameter_; if (out_tensors_[0]->shape().size() > 4) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/fullconnection.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/fullconnection.cc index 1d3f2cbadd9..58b0078da1c 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/fullconnection.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/fullconnection.cc @@ -37,6 +37,10 @@ using mindspore::schema::PrimitiveType_FullConnection; namespace mindspore::kernel { int FullConnectionOpenCLKernel::CheckSpecs() { + if ((in_tensors_.size() != 2 && in_tensors_.size() != 3) || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } auto param = reinterpret_cast(op_parameter_); if (param->a_transpose_) { MS_LOG(ERROR) << "fullconnection only support a_transpose_=false yet."; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc index c220679314b..1e9dcf936ed 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc @@ -31,6 +31,10 @@ using mindspore::schema::PrimitiveType_MatMul; namespace mindspore::kernel { int MatMulOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 2 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } auto param = reinterpret_cast(op_parameter_); transposeA = param->a_transpose_; if (transposeA) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/one_hot.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/one_hot.cc index 78e203449e0..4b1d79cd39b 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/one_hot.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/one_hot.cc @@ -29,7 +29,13 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_OneHot; namespace mindspore::kernel { -int OneHotOpenCLKernel::CheckSpecs() { return RET_OK; } +int OneHotOpenCLKernel::CheckSpecs() { + if ((in_tensors_.size() < 2 || in_tensors_.size() > 4) || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } + return RET_OK; +} int OneHotOpenCLKernel::Prepare() { std::string kernel_name = "OneHot"; @@ -59,9 +65,6 @@ int OneHotOpenCLKernel::Prepare() { } int OneHotOpenCLKernel::InitWeights() { - if (in_tensors_.size() <= 1) { - return RET_ERROR; - } depth_ = static_cast(in_tensors_[1]->data_c())[0]; if (in_tensors_.size() > 2) { on_value_ = static_cast(in_tensors_[2]->data_c())[0]; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc index 3839fba8fb8..044b8d0b986 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc @@ -37,6 +37,10 @@ namespace mindspore { namespace kernel { int PoolingOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 1 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } if (parameter_->pool_mode_ != PoolMode_MaxPool && parameter_->pool_mode_ != PoolMode_AvgPool) { MS_LOG(ERROR) << "Init `Pooling2d` kernel failed, unsupported pool mode!"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.cc index af585261902..93e3f359f62 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.cc @@ -67,6 +67,10 @@ cl_float4 ReduceOpenCLKernel::GenC4Mask() { } int ReduceOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 1 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } if (in_tensors_[0]->shape()[0] > 1) { MS_LOG(ERROR) << "reduce op only support n = 1"; return RET_PARAM_INVALID; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc index 4b39d0931bd..cb23e6964b8 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc @@ -31,12 +31,20 @@ using mindspore::schema::PrimitiveType_Squeeze; namespace mindspore::kernel { int ReshapeOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 1 && out_tensors_.size() != 1) { + MS_LOG(ERROR) << "Reshape in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) { MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type(); return RET_ERROR; } - if (out_tensors_[0]->shape().size() != 2 && out_tensors_[0]->shape().size() != 4) { - MS_LOG(ERROR) << "Reshape output size should in 2,4"; + if (in_tensors_[0]->shape().size() == 0 || in_tensors_[0]->shape().size() > 4) { + MS_LOG(ERROR) << "Reshape input size should in 1-4, actual: " << in_tensors_[0]->shape(); + return RET_ERROR; + } + if (out_tensors_[0]->shape().size() == 0 || out_tensors_[0]->shape().size() > 4) { + MS_LOG(ERROR) << "Reshape output size should in 1-4, actual: " << out_tensors_[0]->shape(); return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/resize.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/resize.cc index a2e491f3663..c25d1ca8c08 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/resize.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/resize.cc @@ -33,6 +33,10 @@ using mindspore::schema::PrimitiveType_Resize; namespace mindspore::kernel { int ResizeOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 1 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } auto in_shape = in_tensors_[0]->shape(); auto out_shape = out_tensors_[0]->shape(); if (in_shape.size() != 4 || out_shape.size() != 4 || in_shape[0] != out_shape[0] || in_shape[3] != out_shape[3]) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc index 7fca4c09edc..f007feeb95d 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc @@ -43,6 +43,10 @@ std::vector SoftmaxOpenCLKernel::GetMaskForLastChannel(int channels) { } int SoftmaxOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 1 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } axis_ = parameter_->axis_; auto in_shape = in_tensors_[0]->shape(); if (in_shape.size() > 4) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/space_to_batch_nd.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/space_to_batch_nd.cc index 5c2c4031f5b..8cef7957972 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/space_to_batch_nd.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/space_to_batch_nd.cc @@ -32,6 +32,10 @@ using mindspore::schema::PrimitiveType_SpaceToBatchND; namespace mindspore::kernel { int SpaceToBatchNDOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 1 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) { MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type(); return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/space_to_depth.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/space_to_depth.cc index e24e58e5e72..686664eb296 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/space_to_depth.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/space_to_depth.cc @@ -31,7 +31,13 @@ using mindspore::lite::RET_PARAM_INVALID; using mindspore::schema::PrimitiveType_SpaceToDepth; namespace mindspore::kernel { -int SpaceToDepthOpenCLKernel::CheckSpecs() { return RET_OK; } +int SpaceToDepthOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 1 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } + return RET_OK; +} int SpaceToDepthOpenCLKernel::Prepare() { std::string kernel_name; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc index 363341fea48..2a786b92dd7 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc @@ -33,6 +33,10 @@ using mindspore::schema::PrimitiveType_ToFormat; namespace mindspore::kernel { int ToFormatOpenCLKernel::CheckSpecs() { + if (in_tensors_.size() != 1 || out_tensors_.size() != 1) { + MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); + return RET_ERROR; + } auto data_type = in_tensors_.front()->data_type(); if (data_type != kNumberTypeFloat32 && data_type != kNumberTypeFloat16 && data_type != kNumberTypeInt32) { MS_LOG(ERROR) << "Unsupported data type " << data_type; diff --git a/mindspore/lite/src/runtime/opencl/opencl_runtime.cc b/mindspore/lite/src/runtime/opencl/opencl_runtime.cc index 129d0c512c7..73422bad331 100644 --- a/mindspore/lite/src/runtime/opencl/opencl_runtime.cc +++ b/mindspore/lite/src/runtime/opencl/opencl_runtime.cc @@ -271,7 +271,7 @@ int OpenCLRuntime::Init() { } int OpenCLRuntime::Uninit() { - if (enable_cache_) { + if (enable_cache_ && !binary_map_.empty()) { StoreCache(); } binary_map_.clear(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/reshape_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/reshape_tests.cc index e95af71b0d4..f5b950497b8 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/reshape_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/reshape_tests.cc @@ -135,4 +135,24 @@ TEST_F(TestOpenCL_Reshape, 4D_4D_test5) { } } +TEST_F(TestOpenCL_Reshape, 3D_2D_test6) { + std::vector shape_in = {5, 3, 8}; + std::vector shape_out = {8, 15}; + float input_data[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}; + float output_data[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}; + + for (auto fp16_enable : {false, true}) { + TestMain({{shape_in, input_data, VAR}}, {shape_out, output_data}, CreateParameter(), fp16_enable); + } +} } // namespace mindspore::lite::opencl::test