!23759 optimize opencl code

Merge pull request !23759 from QianliMa/logfix
This commit is contained in:
i-robot 2021-09-22 03:51:00 +00:00 committed by Gitee
commit d9e992ac8b
37 changed files with 169 additions and 158 deletions

View File

@ -52,11 +52,11 @@ std::string ActivationOpenCLKernel::GetActTypeString(int act_type) {
int ActivationOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (GetActTypeString(type_).empty()) {
MS_LOG(ERROR) << "schema::ActivationType:" << type_ << "not found";
MS_LOG(WARNING) << "schema::ActivationType:" << type_ << "not found";
return RET_ERROR;
}
return RET_OK;

View File

@ -32,21 +32,21 @@ using mindspore::schema::PrimitiveType_ArgMinFusion;
namespace mindspore::kernel {
int ArgMinMaxOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if ((in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) ||
(out_tensors_[0]->data_type() != kNumberTypeFloat32 && out_tensors_[0]->data_type() != kNumberTypeFloat16)) {
MS_LOG(ERROR) << "Unsupported input/output data type. input data type is " << in_tensors_[0]->data_type()
<< " output data type is " << out_tensors_[0]->data_type();
MS_LOG(WARNING) << "Unsupported input/output data type. input data type is " << in_tensors_[0]->data_type()
<< " output data type is " << out_tensors_[0]->data_type();
return RET_ERROR;
}
if (in_tensors_[0]->shape().size() < DIMENSION_1D || in_tensors_[0]->shape().size() > DIMENSION_4D) {
MS_LOG(ERROR) << "input shape size must be (1-4), actual: " << in_tensors_[0]->shape().size();
MS_LOG(WARNING) << "input shape size must be (1-4), actual: " << in_tensors_[0]->shape().size();
return RET_ERROR;
}
if (out_tensors_[0]->shape().size() != DIMENSION_1D) {
MS_LOG(ERROR) << "output shape size must be 1, actual" << out_tensors_[0]->shape().size();
MS_LOG(WARNING) << "output shape size must be 1, actual" << out_tensors_[0]->shape().size();
return RET_ERROR;
}
auto *param = reinterpret_cast<ArgMinMaxParameter *>(this->op_parameter_);
@ -55,7 +55,7 @@ int ArgMinMaxOpenCLKernel::CheckSpecs() {
CHECK_LESS_RETURN(dims_size, 1);
auto axis = (param->axis_ + dims_size) % dims_size;
if (axis < 0 || axis >= dims_size) {
MS_LOG(ERROR) << "Invalid axis " << axis;
MS_LOG(WARNING) << "Invalid axis " << axis;
return RET_ERROR;
}
return RET_OK;

View File

@ -41,13 +41,13 @@ namespace mindspore::kernel {
int ArithmeticOpenCLKernel::CheckSpecs() {
for (auto &tensor : in_tensors_) {
if (tensor->data_type() != kNumberTypeFloat32 && tensor->data_type() != kNumberTypeFloat16) {
MS_LOG(ERROR) << "ArithmeticOpenCLKernel only support fp32/fp16 input";
MS_LOG(WARNING) << "ArithmeticOpenCLKernel only support fp32/fp16 input";
return RET_ERROR;
}
}
for (auto &tensor : out_tensors_) {
if (tensor->data_type() != kNumberTypeFloat32 && tensor->data_type() != kNumberTypeFloat16) {
MS_LOG(ERROR) << "ArithmeticOpenCLKernel only support fp32/fp16 output";
MS_LOG(WARNING) << "ArithmeticOpenCLKernel only support fp32/fp16 output";
return RET_ERROR;
}
}

View File

@ -29,15 +29,15 @@ using mindspore::lite::RET_OK;
namespace mindspore::kernel {
int ArithmeticSelfOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (!IsArithmeticSelf(type())) {
MS_LOG(ERROR) << "UnSupported Operator: " << schema::EnumNamePrimitiveType(type());
MS_LOG(WARNING) << "UnSupported Operator: " << schema::EnumNamePrimitiveType(type());
return RET_ERROR;
}
if (in_tensors_[0]->shape().size() != DIMENSION_4D && in_tensors_[0]->shape().size() != DIMENSION_2D) {
MS_LOG(ERROR) << " only support dim = 4 or 2 but your dim = " << in_tensors_[0]->shape().size();
MS_LOG(WARNING) << " only support dim = 4 or 2 but your dim = " << in_tensors_[0]->shape().size();
return RET_ERROR;
}
return RET_OK;

View File

@ -30,26 +30,26 @@ using mindspore::schema::PrimitiveType_BatchToSpaceND;
namespace mindspore::kernel {
int BatchToSpaceNDOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) {
MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type();
MS_LOG(WARNING) << "Unsupported data type " << in_tensors_[0]->data_type();
return RET_ERROR;
}
if (in_tensors_[0]->shape().size() != DIMENSION_4D && out_tensors_[0]->shape().size() != DIMENSION_4D) {
MS_LOG(ERROR) << "input/output shape size must be 4, actual: " << in_tensors_[0]->shape().size() << ", "
<< out_tensors_[0]->shape().size();
MS_LOG(WARNING) << "input/output shape size must be 4, actual: " << in_tensors_[0]->shape().size() << ", "
<< out_tensors_[0]->shape().size();
return RET_ERROR;
}
auto *param = reinterpret_cast<BatchToSpaceParameter *>(this->op_parameter_);
if (param->block_shape_[0] < 1 || param->block_shape_[1] < 1) {
MS_LOG(ERROR) << "block_sizes_ must > 1, actual " << param->block_shape_[0] << ", " << param->block_shape_[1];
MS_LOG(WARNING) << "block_sizes_ must > 1, actual " << param->block_shape_[0] << ", " << param->block_shape_[1];
return RET_ERROR;
}
if (in_tensors_[0]->shape()[kNHWC_H] * param->block_shape_[0] <= (param->crops_[0] + param->crops_[1]) ||
in_tensors_[0]->shape()[kNHWC_W] * param->block_shape_[1] <= (param->crops_[2] + param->crops_[3])) {
MS_LOG(ERROR) << "crop shape error!";
MS_LOG(WARNING) << "crop shape error!";
return RET_ERROR;
}
return RET_OK;

View File

@ -37,15 +37,15 @@ constexpr int kNumInput4 = 4;
namespace mindspore::kernel {
int BatchNormOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_5 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_.at(0)->shape().size() != DIMENSION_4D) {
MS_LOG(ERROR) << "The dim of in_tensors->shape must be 4 but your dim is : " << in_tensors_.at(0)->shape().size();
MS_LOG(WARNING) << "The dim of in_tensors->shape must be 4 but your dim is : " << in_tensors_.at(0)->shape().size();
return RET_ERROR;
}
if (in_tensors_.at(0)->shape()[0] > 1) {
MS_LOG(ERROR) << " Unsupported batch_size >1 ";
MS_LOG(WARNING) << " Unsupported batch_size >1 ";
return RET_ERROR;
}
CHECK_NULL_RETURN(in_tensors_[kNumInput0]);

View File

@ -32,22 +32,22 @@ namespace mindspore::kernel {
int CastOpenCLKernel::CheckSpecs() {
// the 2nd tensor is DstType
if (in_tensors_.size() != INPUT_TENSOR_SIZE_2 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_.front()->shape() != out_tensors_.front()->shape()) {
MS_LOG(ERROR) << "input shape must be equal to output shape";
MS_LOG(WARNING) << "input shape must be equal to output shape";
return RET_ERROR;
}
auto input_dtype = in_tensors_.front()->data_type();
if (input_dtype != kNumberTypeFloat32 && input_dtype != kNumberTypeFloat16) {
MS_LOG(ERROR) << "input dtype must be float32/float16";
MS_LOG(WARNING) << "input dtype must be float32/float16";
return RET_ERROR;
}
auto output_dtype = out_tensors_.front()->data_type();
if (output_dtype != kNumberTypeFloat32 && output_dtype != kNumberTypeFloat16) {
MS_LOG(ERROR) << "output dtype must be float32/float16";
MS_LOG(WARNING) << "output dtype must be float32/float16";
return RET_ERROR;
}
return RET_OK;

View File

@ -78,20 +78,20 @@ void ConcatGetWorkGroup(const std::vector<size_t> &global, std::vector<size_t> *
int ConcatOpenCLKernel::CheckSpecs() {
if ((in_tensors_.size() < INPUT_TENSOR_SIZE_2 || in_tensors_.size() > INPUT_TENSOR_SIZE_6) ||
out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto param = reinterpret_cast<ConcatParameter *>(this->op_parameter_);
auto out_tensors_shape_size = out_tensors_[0]->shape().size();
MS_LOG(DEBUG) << " concat at axis=: " << param->axis_;
if (out_tensors_shape_size > DIMENSION_4D) {
MS_LOG(ERROR) << " GPU Unsupported shape.size > 4 ";
MS_LOG(WARNING) << " GPU Unsupported shape.size > 4 ";
return RET_ERROR;
}
for (auto &in_tensor : in_tensors_) {
auto in_tensors_shape_size = in_tensor->shape().size();
if (in_tensors_shape_size > DIMENSION_4D) {
MS_LOG(ERROR) << " GPU Unsupported in_tensor shape.size > 4 ";
MS_LOG(WARNING) << " GPU Unsupported in_tensor shape.size > 4 ";
return RET_ERROR;
}
}
@ -100,7 +100,7 @@ int ConcatOpenCLKernel::CheckSpecs() {
axis_ += in_tensors_.front()->shape().size();
}
if (axis_ < 0 || axis_ > 3) {
MS_LOG(ERROR) << " only support axis >= 0 and axis <= 3 ";
MS_LOG(WARNING) << " only support axis >= 0 and axis <= 3 ";
return RET_ERROR;
}
if (out_tensors_shape_size < 4 && type() == PrimitiveType_Concat && axis_ != 0) {
@ -109,12 +109,12 @@ int ConcatOpenCLKernel::CheckSpecs() {
} else if (out_tensors_shape_size == DIMENSION_3D) {
axis_ = axis_ + 1;
} else {
MS_LOG(ERROR) << " Unsupported axis =: " << axis_ << " shape().size()=: " << out_tensors_shape_size;
MS_LOG(WARNING) << " Unsupported axis =: " << axis_ << " shape().size()=: " << out_tensors_shape_size;
return RET_ERROR;
}
}
if (in_tensors_.size() < INPUT_TENSOR_SIZE_2 || in_tensors_.size() > INPUT_TENSOR_SIZE_6) {
MS_LOG(ERROR) << "unsupported input size :" << in_tensors_.size();
MS_LOG(WARNING) << "unsupported input size :" << in_tensors_.size();
return RET_ERROR;
}
return RET_OK;

View File

@ -44,24 +44,24 @@ namespace mindspore::kernel {
int Conv2DOpenCLKernel::CheckSpecs() {
int inputs_num = in_tensors_.size();
if (inputs_num != INPUT_TENSOR_SIZE_2 && inputs_num != INPUT_TENSOR_SIZE_3) {
MS_LOG(ERROR) << "Conv2D only supports 2 or 3 input Tensor but get " << inputs_num;
MS_LOG(WARNING) << "Conv2D only supports 2 or 3 input Tensor but get " << inputs_num;
return RET_ERROR;
}
int outputs_num = out_tensors_.size();
if (outputs_num != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "Conv2D only supports 1 output Tensor but get " << outputs_num;
MS_LOG(WARNING) << "Conv2D only supports 1 output Tensor but get " << outputs_num;
return RET_ERROR;
}
CHECK_NULL_RETURN(in_tensors_.at(kInputIndex));
int input_ndim = in_tensors_.at(kInputIndex)->shape().size();
if (input_ndim != DIMENSION_4D) {
MS_LOG(ERROR) << "Conv2D only supports 4D input Tensor but get " << input_ndim << "D.";
MS_LOG(WARNING) << "Conv2D only supports 4D input Tensor but get " << input_ndim << "D.";
return RET_ERROR;
}
CHECK_NULL_RETURN(out_tensors_.at(kInputIndex));
int output_ndim = out_tensors_.at(kOutputIndex)->shape().size();
if (output_ndim != DIMENSION_4D) {
MS_LOG(ERROR) << "Conv2D only supports 4D output Tensor but get " << output_ndim << "D.";
MS_LOG(WARNING) << "Conv2D only supports 4D output Tensor but get " << output_ndim << "D.";
return RET_ERROR;
}
@ -69,17 +69,17 @@ int Conv2DOpenCLKernel::CheckSpecs() {
CHECK_NULL_RETURN(filter_tensor);
int filter_ndim = filter_tensor->shape().size();
if (filter_ndim != DIMENSION_4D) {
MS_LOG(ERROR) << "Conv2D only supports 4D filter Tensor but get " << filter_ndim << "D.";
MS_LOG(WARNING) << "Conv2D only supports 4D filter Tensor but get " << filter_ndim << "D.";
return RET_ERROR;
}
if (!filter_tensor->IsConst()) {
MS_LOG(ERROR) << "Conv2D don't support non-constant filter yet.";
MS_LOG(WARNING) << "Conv2D don't support non-constant filter yet.";
return RET_ERROR;
}
auto *bias_tensor = in_tensors_.size() >= INPUT_TENSOR_SIZE_3 ? in_tensors_.at(kBiasIndex) : nullptr;
if (bias_tensor != nullptr && !bias_tensor->IsConst()) {
MS_LOG(ERROR) << "Conv2D don't support non-constant bias yet.";
MS_LOG(WARNING) << "Conv2D don't support non-constant bias yet.";
return RET_ERROR;
}
@ -92,7 +92,7 @@ int Conv2DOpenCLKernel::CheckSpecs() {
case ActivationType_TANH:
break;
default: {
MS_LOG(ERROR) << "Unsupported activation type " << param_->act_type_;
MS_LOG(WARNING) << "Unsupported activation type " << param_->act_type_;
return RET_ERROR;
}
}

View File

@ -35,22 +35,22 @@ namespace mindspore::kernel {
int Conv2dTransposeOpenCLKernel::CheckSpecs() {
if ((in_tensors_.size() != INPUT_TENSOR_SIZE_2 && in_tensors_.size() != INPUT_TENSOR_SIZE_3) ||
out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto *param = reinterpret_cast<ConvParameter *>(op_parameter_);
if (param->act_type_ != ActType_No && param->act_type_ != ActType_Relu && param->act_type_ != ActType_Relu6) {
MS_LOG(ERROR) << "Unsupported activation type " << param->act_type_;
MS_LOG(WARNING) << "Unsupported activation type " << param->act_type_;
return RET_ERROR;
}
if (!in_tensors_.at(1)->IsConst()) {
MS_LOG(ERROR) << "Conv2dTranspose doesn't support non-constant filter yet.";
MS_LOG(WARNING) << "Conv2dTranspose doesn't support non-constant filter yet.";
return RET_ERROR;
}
if (in_tensors_.size() == INPUT_TENSOR_SIZE_3 && in_tensors_.at(C2NUM) != nullptr &&
!in_tensors_.at(C2NUM)->IsConst()) {
MS_LOG(ERROR) << "Conv2dTranspose doesn't support non-constant bias yet.";
MS_LOG(WARNING) << "Conv2dTranspose doesn't support non-constant bias yet.";
return RET_ERROR;
}
return RET_OK;

View File

@ -38,19 +38,19 @@ namespace mindspore::kernel {
int DepthwiseConv2dOpenCLKernel::CheckSpecs() {
if ((in_tensors_.size() != INPUT_TENSOR_SIZE_2 && in_tensors_.size() != INPUT_TENSOR_SIZE_3) ||
out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) {
MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type();
MS_LOG(WARNING) << "Unsupported data type " << in_tensors_[0]->data_type();
return RET_ERROR;
}
if (!in_tensors_.at(kWeightIndex)->IsConst()) {
MS_LOG(ERROR) << "DepthwiseConv2d don't support non-constant weight yet.";
MS_LOG(WARNING) << "DepthwiseConv2d don't support non-constant weight yet.";
return RET_ERROR;
}
if (in_tensors_.size() == INPUT_TENSOR_SIZE_3 && !in_tensors_.at(kBiasIndex)->IsConst()) {
MS_LOG(ERROR) << "DepthwiseConv2d don't support non-constant bias yet.";
MS_LOG(WARNING) << "DepthwiseConv2d don't support non-constant bias yet.";
return RET_ERROR;
}
return RET_OK;

View File

@ -81,7 +81,7 @@ void FillOpenCLKernel::SetGlobalLocal() {}
int FillOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto param = this->op_parameter_;
@ -89,13 +89,13 @@ int FillOpenCLKernel::CheckSpecs() {
auto input = in_tensors_.at(0);
CHECK_NULL_RETURN(input);
if (input->shape().size() > DIMENSION_1D && param->type_ == PrimitiveType_Fill) {
MS_LOG(ERROR) << " fill only support dim = 1";
MS_LOG(WARNING) << " fill only support dim = 1";
return RET_ERROR;
}
auto output = out_tensors_.at(0);
CHECK_NULL_RETURN(output);
if (output->shape().size() > OUTPUT_TENSOR_SIZE_4) {
MS_LOG(ERROR) << " only support dim <= 4";
MS_LOG(WARNING) << " only support dim <= 4";
return RET_ERROR;
}
return RET_OK;

View File

@ -37,23 +37,23 @@ namespace mindspore::kernel {
int FullConnectionOpenCLKernel::CheckSpecs() {
if ((in_tensors_.size() != INPUT_TENSOR_SIZE_2 && in_tensors_.size() != INPUT_TENSOR_SIZE_3) ||
out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto param = reinterpret_cast<MatMulParameter *>(op_parameter_);
if (param->a_transpose_) {
MS_LOG(ERROR) << "fullconnection only support a_transpose_=false yet.";
MS_LOG(WARNING) << "fullconnection only support a_transpose_=false yet.";
return RET_ERROR;
}
auto out_gpu_info = GpuTensorInfo(out_tensors_[0]);
if (out_gpu_info.H != 1 || out_gpu_info.W != 1) {
MS_LOG(ERROR) << "fullconnection only support 2d output shape or 4d output but H=W=1";
MS_LOG(WARNING) << "fullconnection only support 2d output shape or 4d output but H=W=1";
return RET_ERROR;
}
// for fusion: ActivationType_TANH
if (param->act_type_ != ActType_No && param->act_type_ != ActType_Relu && param->act_type_ != ActType_Relu6 &&
static_cast<schema::ActivationType>(param->act_type_) != ActivationType_TANH) {
MS_LOG(ERROR) << "Unsupported activation type " << param->act_type_;
MS_LOG(WARNING) << "Unsupported activation type " << param->act_type_;
return RET_ERROR;
}
N_ = out_gpu_info.N;
@ -61,26 +61,26 @@ int FullConnectionOpenCLKernel::CheckSpecs() {
auto intensor_shape = GpuTensorInfo(in_tensors_[0]);
int input_nhw = intensor_shape.N * intensor_shape.H * intensor_shape.W;
if (input_nhw < N_) {
MS_LOG(ERROR) << "Unsupported fullconnection shape";
MS_LOG(WARNING) << "Unsupported fullconnection shape";
}
if (!in_tensors_.at(kWeightIndex)->IsConst()) {
weight_var_ = true;
if (!param->b_transpose_) {
MS_LOG(ERROR) << "If fullconnection input weight is not constant, b_transpose_ should be true.";
MS_LOG(WARNING) << "If fullconnection input weight is not constant, b_transpose_ should be true.";
return RET_ERROR;
}
if (in_tensors_.at(kWeightIndex)->shape().size() != DIMENSION_2D) {
MS_LOG(ERROR) << "If fullconnection input weight is not constant, it should be 2d.";
MS_LOG(WARNING) << "If fullconnection input weight is not constant, it should be 2d.";
return RET_ERROR;
}
if (intensor_shape.C != in_tensors_.at(kWeightIndex)->shape()[1]) {
MS_LOG(ERROR)
MS_LOG(WARNING)
<< "If fullconnection input weight is not constant, input channel should equal to weight in_channel.";
return RET_ERROR;
}
}
if (in_tensors_.size() == INPUT_TENSOR_SIZE_3 && !in_tensors_.at(2)->IsConst()) {
MS_LOG(ERROR) << "FullConnection don't support non-constant bias yet.";
MS_LOG(WARNING) << "FullConnection don't support non-constant bias yet.";
return RET_ERROR;
}
CI_remainder_ = input_nhw / N_;

View File

@ -32,33 +32,33 @@ using mindspore::schema::PrimitiveType_Gather;
namespace mindspore::kernel {
int GatherOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_3) {
MS_LOG(ERROR) << "GatherOpenCLKernel only supports 3 input Tensor but get " << in_tensors_.size();
MS_LOG(WARNING) << "GatherOpenCLKernel only supports 3 input Tensor but get " << in_tensors_.size();
return RET_ERROR;
}
if (out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "GatherOpenCLKernel only supports 1 output Tensor but get " << out_tensors_.size();
MS_LOG(WARNING) << "GatherOpenCLKernel only supports 1 output Tensor but get " << out_tensors_.size();
return RET_ERROR;
}
enable_fp16_ = ocl_runtime_->GetFp16Enable();
if (!in_tensors_.at(1)->IsConst() && enable_fp16_) {
MS_LOG(ERROR) << "GatherOpenCLKernel Unsupportted intensor1 = tensor and datatype = fp16 ";
MS_LOG(WARNING) << "GatherOpenCLKernel Unsupportted intensor1 = tensor and datatype = fp16 ";
return RET_ERROR;
}
int input_ndim = in_tensors_.front()->shape().size();
if (input_ndim < 0 || input_ndim > DIMENSION_4D) {
MS_LOG(ERROR) << "GatherOpenCLKernel only supports 1-4D input Tensor but get " << input_ndim << "D.";
MS_LOG(WARNING) << "GatherOpenCLKernel only supports 1-4D input Tensor but get " << input_ndim << "D.";
return RET_ERROR;
}
int indices_ndim = in_tensors_.at(1)->shape().size();
if (indices_ndim > DIMENSION_1D) {
MS_LOG(ERROR) << "GatherOpenCLKernel only supports 1D indices Tensor but get " << indices_ndim << "D.";
MS_LOG(WARNING) << "GatherOpenCLKernel only supports 1D indices Tensor but get " << indices_ndim << "D.";
return RET_ERROR;
}
TypeId data_type = in_tensors_.at(1)->data_type();
if (data_type != kNumberTypeInt32 && data_type != kNumberTypeInt64 && data_type != kNumberTypeFloat32 &&
data_type != kNumberTypeFloat16) {
MS_LOG(ERROR) << "GatherOpenCLKernel only supports Int32/Int64/Float32/Float16 indices Tensor.";
MS_LOG(WARNING) << "GatherOpenCLKernel only supports Int32/Int64/Float32/Float16 indices Tensor.";
return RET_ERROR;
}
@ -67,14 +67,14 @@ int GatherOpenCLKernel::CheckSpecs() {
}
axis_ = *reinterpret_cast<int32_t *>(in_tensors_.at(2)->data());
if (in_tensors_.at(2)->data() == nullptr) {
MS_LOG(ERROR) << "GatherOpenCLKernel need Axis.";
MS_LOG(WARNING) << "GatherOpenCLKernel need Axis.";
return RET_ERROR;
}
if (axis_ < 0) {
axis_ += input_ndim;
}
if (axis_ < 0 || axis_ >= input_ndim) {
MS_LOG(ERROR) << "axis is invalid: axis=" << axis_ << ".";
MS_LOG(WARNING) << "axis is invalid: axis=" << axis_ << ".";
return RET_ERROR;
} else {
return RET_OK;

View File

@ -41,37 +41,37 @@ namespace mindspore::kernel {
int ArithmeticInt8OpenCLKernel::CheckSpecs() {
for (auto &tensor : in_tensors_) {
if (tensor->data_type() != kNumberTypeInt8) {
MS_LOG(ERROR) << "ArithmeticInt8OpenCLKernel only support int8 input";
MS_LOG(WARNING) << "ArithmeticInt8OpenCLKernel only support int8 input";
return RET_ERROR;
}
}
for (auto &tensor : out_tensors_) {
if (tensor->data_type() != kNumberTypeInt8) {
MS_LOG(ERROR) << "ArithmeticInt8OpenCLKernel only support int8 output";
MS_LOG(WARNING) << "ArithmeticInt8OpenCLKernel only support int8 output";
return RET_ERROR;
}
}
if (in_tensors_.size() != INPUT_TENSOR_SIZE_2 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto *param = reinterpret_cast<const ArithmeticParameter *>(op_parameter_);
CHECK_NULL_RETURN(param);
if (!IsArithmetic(type())) {
MS_LOG(ERROR) << "UnSupported Operator: " << schema::EnumNamePrimitiveType(type());
MS_LOG(WARNING) << "UnSupported Operator: " << schema::EnumNamePrimitiveType(type());
return RET_ERROR;
}
if (type() == schema::PrimitiveType_Eltwise) {
auto mode = param->eltwise_mode_;
if (mode != EltwiseMode_PROD && mode != EltwiseMode_SUM && mode != EltwiseMode_MAXIMUM) {
MS_LOG(ERROR) << "Eltwise mode not support, mode:" << mode;
MS_LOG(WARNING) << "Eltwise mode not support, mode:" << mode;
return RET_ERROR;
}
}
if (!(param->activation_type_ == ActivationType_NO_ACTIVATION || param->activation_type_ == ActivationType_RELU ||
param->activation_type_ == ActivationType_RELU6)) {
MS_LOG(ERROR) << "Unsupported activation type " << param->activation_type_;
MS_LOG(WARNING) << "Unsupported activation type " << param->activation_type_;
return RET_ERROR;
}
return RET_OK;

View File

@ -34,8 +34,8 @@ int LayerNormOpenCLKernel::CheckSpecs() {
auto param = reinterpret_cast<LayerNormParameter *>(this->op_parameter_);
CHECK_NULL_RETURN(param);
if (in_tensors_.size() != INPUT_TENSOR_SIZE_3 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "UnSupported in_tensors_.size: " << in_tensors_.size()
<< " out_tensors_.size(): " << out_tensors_.size();
MS_LOG(WARNING) << "UnSupported in_tensors_.size: " << in_tensors_.size()
<< " out_tensors_.size(): " << out_tensors_.size();
return RET_ERROR;
}
auto *input = in_tensors_.at(0);
@ -43,7 +43,7 @@ int LayerNormOpenCLKernel::CheckSpecs() {
auto *output = out_tensors_.at(0);
CHECK_NULL_RETURN(output);
if (input->shape().size() != DIMENSION_4D) {
MS_LOG(ERROR) << "UnSupported in_tensors_.shape.size: " << input->shape().size();
MS_LOG(WARNING) << "UnSupported in_tensors_.shape.size: " << input->shape().size();
return RET_ERROR;
}
normalized_axis_ = param->begin_params_axis_;
@ -52,7 +52,7 @@ int LayerNormOpenCLKernel::CheckSpecs() {
normalized_axis_ += input->shape().size();
}
if (normalized_axis_ != 3) {
MS_LOG(ERROR) << "UnSupported normalized_axis_ : " << param->normalized_dims_;
MS_LOG(WARNING) << "UnSupported normalized_axis_ : " << param->normalized_dims_;
return RET_ERROR;
}
return RET_OK;

View File

@ -47,13 +47,13 @@ bool IsUseStrassenMatmul(const std::vector<lite::Tensor *> &in_tensors_) {
int MatMulOpenCLKernel::CheckSpecs() {
if (!(in_tensors_.size() == INPUT_TENSOR_SIZE_2 || in_tensors_.size() == INPUT_TENSOR_SIZE_3) ||
out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto param = reinterpret_cast<MatMulParameter *>(op_parameter_);
transposeA = param->a_transpose_;
if (transposeA) {
MS_LOG(ERROR) << "matmul only support a_transpose_=false yet.";
MS_LOG(WARNING) << "matmul only support a_transpose_=false yet.";
return RET_ERROR;
}
transposeB = param->b_transpose_;
@ -61,7 +61,7 @@ int MatMulOpenCLKernel::CheckSpecs() {
enable_fp16_ = ocl_runtime_->GetFp16Enable();
if (in_tensors_[0]->shape().size() != out_tensors_[0]->shape().size() ||
in_tensors_[0]->shape().size() < DIMENSION_2D || in_tensors_[0]->shape().size() > DIMENSION_4D) {
MS_LOG(ERROR) << "matmul only support input shape size= 2, 3 or 4.";
MS_LOG(WARNING) << "matmul only support input shape size= 2, 3 or 4.";
return RET_ERROR;
}
return RET_OK;

View File

@ -30,7 +30,7 @@ namespace mindspore::kernel {
int OneHotOpenCLKernel::CheckSpecs() {
if ((in_tensors_.size() < INPUT_TENSOR_SIZE_2 || in_tensors_.size() > INPUT_TENSOR_SIZE_4) ||
out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
return RET_OK;

View File

@ -35,29 +35,29 @@ int PadOpenCLKernel::CheckSpecs() {
auto param = reinterpret_cast<PadParameter *>(op_parameter_);
MS_ASSERT(param);
if (in_tensors_.size() != INPUT_TENSOR_SIZE_2) {
MS_LOG(ERROR) << "Pad only support 1 input Tensor.";
MS_LOG(WARNING) << "Pad only support 1 input Tensor.";
return RET_ERROR;
}
if (out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "Pad only support 1 output Tensor.";
MS_LOG(WARNING) << "Pad only support 1 output Tensor.";
return RET_ERROR;
}
auto in_ndim = in_tensors_.front()->shape().size();
if (in_ndim < DIMENSION_1D || in_ndim > DIMENSION_4D) {
MS_LOG(ERROR) << "Pad only supports 1D-4D input Tensor but get " << in_ndim << "D.";
MS_LOG(WARNING) << "Pad only supports 1D-4D input Tensor but get " << in_ndim << "D.";
return RET_ERROR;
}
auto out_ndim = in_tensors_.front()->shape().size();
if (out_ndim < DIMENSION_1D || out_ndim > DIMENSION_4D) {
MS_LOG(ERROR) << "Pad only supports 1D-4D output Tensor but get " << out_ndim << "D.";
MS_LOG(WARNING) << "Pad only supports 1D-4D output Tensor but get " << out_ndim << "D.";
return RET_ERROR;
}
if (in_ndim != out_ndim) {
MS_LOG(ERROR) << "Pad: input ndim != output ndim.";
MS_LOG(WARNING) << "Pad: input ndim != output ndim.";
return RET_ERROR;
}
if (param->pad_mode_ != PaddingMode_CONSTANT) {
MS_LOG(ERROR) << "Pad only support CONSTANT MODE.";
MS_LOG(WARNING) << "Pad only support CONSTANT MODE.";
return RET_ERROR;
}
// Compatibility code
@ -66,7 +66,7 @@ int PadOpenCLKernel::CheckSpecs() {
}
auto pad_shape = in_tensors_.at(1)->shape();
if (pad_shape.size() != DIMENSION_2D || pad_shape[0] != in_ndim || pad_shape[1] != DIMENSION_2D) {
MS_LOG(ERROR) << "pad tensor shape invalid.";
MS_LOG(WARNING) << "pad tensor shape invalid.";
return RET_ERROR;
}
return RET_OK;

View File

@ -35,19 +35,19 @@ namespace mindspore {
namespace kernel {
int PoolingOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_[0]->shape().size() != DIMENSION_4D) {
MS_LOG(ERROR) << "Only support 4d tensor.";
MS_LOG(WARNING) << "Only support 4d tensor.";
return RET_ERROR;
}
if (parameter_->pool_mode_ != PoolMode_MaxPool && parameter_->pool_mode_ != PoolMode_AvgPool) {
MS_LOG(ERROR) << "Init `Pooling2d` kernel failed, unsupported pool mode!";
MS_LOG(WARNING) << "Init `Pooling2d` kernel failed, unsupported pool mode!";
return RET_ERROR;
}
if (parameter_->act_type_ != ActType_No && parameter_->act_type_ != ActType_Relu) {
MS_LOG(ERROR) << "Unsupported activation type " << parameter_->act_type_;
MS_LOG(WARNING) << "Unsupported activation type " << parameter_->act_type_;
return RET_ERROR;
}
return RET_OK;

View File

@ -33,17 +33,17 @@ namespace mindspore::kernel {
int PowerOpenCLKernel::CheckSpecs() {
if ((in_tensors_.size() != INPUT_TENSOR_SIZE_1 && in_tensors_.size() != INPUT_TENSOR_SIZE_2) ||
out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << "out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << "out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_.size() == INPUT_TENSOR_SIZE_2 &&
in_tensors_.at(0)->shape().size() != in_tensors_.at(1)->shape().size()) {
MS_LOG(ERROR) << "Unsupported input->shape.size " << in_tensors_.at(0)->shape().size()
<< "!=" << in_tensors_.at(1)->shape().size();
MS_LOG(WARNING) << "Unsupported input->shape.size " << in_tensors_.at(0)->shape().size()
<< "!=" << in_tensors_.at(1)->shape().size();
return RET_ERROR;
}
if (in_tensors_.at(0)->shape().size() > DIMENSION_4D) {
MS_LOG(ERROR) << "in_tensors_->shape.size must be less than 4";
MS_LOG(WARNING) << "in_tensors_->shape.size must be less than 4";
return RET_ERROR;
}
return RET_OK;

View File

@ -87,20 +87,20 @@ int PReluOpenCLKernel::InitWeights() {
int PReluOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_2 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "PRelu Only supported in_tensors_.size=2 and out_tensors_.size()=1 but your in_tensors_.size="
<< in_tensors_.size() << " out_tensors_.size()=" << out_tensors_.size();
MS_LOG(WARNING) << "PRelu Only supported in_tensors_.size=2 and out_tensors_.size()=1 but your in_tensors_.size="
<< in_tensors_.size() << " out_tensors_.size()=" << out_tensors_.size();
return RET_ERROR;
}
auto weight_tensor = in_tensors_.at(1);
auto in_tensor_channel = GpuTensorInfo(in_tensors_[0]).C;
auto weight_channel = GpuTensorInfo(in_tensors_[1]).C;
if (weight_channel != 1 && weight_channel != in_tensor_channel) {
MS_LOG(ERROR) << "PRelu weight must be equal with in_teneors channel size, but your weight size is "
<< weight_channel << " and your input channel size is " << in_tensor_channel;
MS_LOG(WARNING) << "PRelu weight must be equal with in_teneors channel size, but your weight size is "
<< weight_channel << " and your input channel size is " << in_tensor_channel;
return mindspore::lite::RET_ERROR;
}
if (weight_tensor->data_type() != kNumberTypeFloat16 && weight_tensor->data_type() != kNumberTypeFloat32) {
MS_LOG(ERROR) << "PRelu weight must be float32 or float16";
MS_LOG(WARNING) << "PRelu weight must be float32 or float16";
return RET_ERROR;
}
return RET_OK;

View File

@ -126,20 +126,20 @@ int ReduceOpenCLKernel::SetAxes() {
int ReduceOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_2 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto input = in_tensors_.at(0);
CHECK_NULL_RETURN(input);
if (input->shape()[0] > DIMENSION_1D) {
MS_LOG(ERROR) << "reduce op only support n = 1";
MS_LOG(WARNING) << "reduce op only support n = 1";
return RET_PARAM_INVALID;
}
inShape = GpuTensorInfo(in_tensors_[0]);
auto reduce_param = reinterpret_cast<ReduceParameter *>(op_parameter_);
CHECK_NULL_RETURN(reduce_param);
if (GetReduceTypeStr(reduce_param->mode_).empty()) {
MS_LOG(ERROR) << "not supported reduce type:" << reduce_param->mode_;
MS_LOG(WARNING) << "not supported reduce type:" << reduce_param->mode_;
return RET_PARAM_INVALID;
}
auto ret = SetAxes();
@ -150,11 +150,11 @@ int ReduceOpenCLKernel::CheckSpecs() {
wc_reduce_ = IsWCReduce(reduce_axes_);
c_reduce_ = IsCReduce(reduce_axes_);
if (!hw_reduce_ && !wc_reduce_ && !c_reduce_) {
MS_LOG(ERROR) << "Unsupported reduce axes";
MS_LOG(WARNING) << "Unsupported reduce axes";
return RET_PARAM_INVALID;
}
if ((c_reduce_ || wc_reduce_) && !reduce_param->keep_dims_) {
MS_LOG(ERROR) << "reduce axis (2,3) should keep dims";
MS_LOG(WARNING) << "reduce axis (2,3) should keep dims";
return RET_PARAM_INVALID;
}
return RET_OK;

View File

@ -32,20 +32,20 @@ namespace mindspore::kernel {
int ResizeOpenCLKernel::CheckSpecs() {
if (!(in_tensors_.size() == INPUT_TENSOR_SIZE_1 || in_tensors_.size() == INPUT_TENSOR_SIZE_2) ||
out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto in_shape = in_tensors_[0]->shape();
auto out_shape = out_tensors_[0]->shape();
if (in_shape.size() != DIMENSION_4D || out_shape.size() != DIMENSION_4D || in_shape[0] != out_shape[0] ||
in_shape[3] != out_shape[3]) {
MS_LOG(ERROR) << "resize op only support 4D and axes HW";
MS_LOG(WARNING) << "resize op only support 4D and axes HW";
return RET_PARAM_INVALID;
}
auto resize_param = reinterpret_cast<ResizeParameter *>(op_parameter_);
CHECK_NULL_RETURN(resize_param);
if (resize_param->method_ != schema::ResizeMethod_LINEAR && resize_param->method_ != schema::ResizeMethod_NEAREST) {
MS_LOG(ERROR) << "unsupported resize method:" << resize_param->method_;
MS_LOG(WARNING) << "unsupported resize method:" << resize_param->method_;
return RET_PARAM_INVALID;
}
return RET_OK;

View File

@ -54,7 +54,8 @@ int ScaleOpenCLKernel::CheckSpecs() {
bool isScaleC = (in_shape.size() == 4 && axis == 3) || (in_shape.size() == 2 && axis == 1);
bool isScaleH = in_shape.size() == 4 && axis == 1;
if (isScalar || !(isScaleC || isScaleH)) {
MS_LOG(ERROR) << "unsupported scale axis " << axis << ", in shape " << in_shape << ", scale shape" << scale_shape;
MS_LOG(WARNING) << "unsupported scale axis " << axis << ", in shape " << in_shape << ", scale shape"
<< scale_shape;
return RET_ERROR;
}
}

View File

@ -40,14 +40,14 @@ std::vector<float> SoftmaxOpenCLKernel::GetMaskForLastChannel(int channels) {
int SoftmaxOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
SoftmaxParameter *parameter = reinterpret_cast<SoftmaxParameter *>(op_parameter_);
axis_ = parameter->axis_;
auto in_shape = in_tensors_[0]->shape();
if (in_shape.size() > DIMENSION_4D) {
MS_LOG(ERROR) << "Init Softmax kernel failed: Unsupported shape size: " << in_shape.size();
MS_LOG(WARNING) << "Init Softmax kernel failed: Unsupported shape size: " << in_shape.size();
return RET_ERROR;
}
if (axis_ < 0) {
@ -55,7 +55,7 @@ int SoftmaxOpenCLKernel::CheckSpecs() {
}
axis_ += DIMENSION_4D - in_shape.size();
if (axis_ != 1 && axis_ != 2 && axis_ != 3) {
MS_LOG(ERROR) << "Init Softmax kernel failed: softmax axis should be H W or C";
MS_LOG(WARNING) << "Init Softmax kernel failed: softmax axis should be H W or C";
return RET_ERROR;
}
return RET_OK;

View File

@ -29,16 +29,16 @@ using mindspore::schema::PrimitiveType_SpaceToBatchND;
namespace mindspore::kernel {
int SpaceToBatchNDOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) {
MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type();
MS_LOG(WARNING) << "Unsupported data type " << in_tensors_[0]->data_type();
return RET_ERROR;
}
if (in_tensors_[0]->shape().size() != DIMENSION_4D && out_tensors_[0]->shape().size() != DIMENSION_4D) {
MS_LOG(ERROR) << "input/output shape size must be 4, actual: " << in_tensors_[0]->shape().size() << ", "
<< out_tensors_[0]->shape().size();
MS_LOG(WARNING) << "input/output shape size must be 4, actual: " << in_tensors_[0]->shape().size() << ", "
<< out_tensors_[0]->shape().size();
return RET_ERROR;
}
auto *param = reinterpret_cast<SpaceToBatchParameter *>(this->op_parameter_);
@ -48,14 +48,14 @@ int SpaceToBatchNDOpenCLKernel::CheckSpecs() {
param->padded_in_shape_[kNHWC_W] = in_tensors_[0]->shape().at(kNHWC_W) + param->paddings_[2] + param->paddings_[3];
param->padded_in_shape_[kNHWC_C] = in_tensors_[0]->shape().at(kNHWC_C);
if (param->block_sizes_[0] < 1 || param->block_sizes_[1] < 1) {
MS_LOG(ERROR) << "block_sizes_ must > 1, actual " << param->block_sizes_[0] << ", " << param->block_sizes_[1];
MS_LOG(WARNING) << "block_sizes_ must > 1, actual " << param->block_sizes_[0] << ", " << param->block_sizes_[1];
return RET_ERROR;
}
MS_ASSERT(param->block_sizes_[0]);
MS_ASSERT(param->block_sizes_[1]);
if (param->padded_in_shape_[kNHWC_H] % param->block_sizes_[0] ||
param->padded_in_shape_[kNHWC_W] % param->block_sizes_[1]) {
MS_LOG(ERROR) << "padded shape must be multiple of block!";
MS_LOG(WARNING) << "padded shape must be multiple of block!";
return RET_ERROR;
}
return RET_OK;

View File

@ -32,7 +32,7 @@ using mindspore::schema::PrimitiveType_SpaceToDepth;
namespace mindspore::kernel {
int SpaceToDepthOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
return RET_OK;

View File

@ -112,12 +112,12 @@ int SparseToDenseOpenCLKernel::InitWeights() {
int SparseToDenseOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() < DIMENSION_3D || out_tensors_.at(0)->shape().size() > DIMENSION_4D) {
MS_LOG(ERROR) << " only support out_tensors_ dim <= 4 and in_tensors_.size >= 3";
MS_LOG(WARNING) << " only support out_tensors_ dim <= 4 and in_tensors_.size >= 3";
return RET_ERROR;
}
if (in_tensors_.at(0)->shape().size() > DIMENSION_4D || out_tensors_.at(0)->shape().size() > DIMENSION_4D) {
MS_LOG(ERROR) << "Unsupported inputdim: " << in_tensors_[0]->shape().size() << "outdim"
<< out_tensors_[0]->shape().size();
MS_LOG(WARNING) << "Unsupported inputdim: " << in_tensors_[0]->shape().size() << "outdim"
<< out_tensors_[0]->shape().size();
return RET_ERROR;
}
if (input_dim_ == DIMENSION_2D) {
@ -129,7 +129,7 @@ int SparseToDenseOpenCLKernel::CheckSpecs() {
}
auto param = reinterpret_cast<SparseToDenseParameter *>(op_parameter_);
if (param->validate_indices_) {
MS_LOG(ERROR) << "Unsupported unordered for in_tensors_indices";
MS_LOG(WARNING) << "Unsupported unordered for in_tensors_indices";
return RET_ERROR;
}
return RET_OK;

View File

@ -69,34 +69,34 @@ int SplitOpenCLKernel::CheckSpecs() {
if ((out_tensors_.size() != OUTPUT_TENSOR_SIZE_2 ||
(out_tensors_.size() != OUTPUT_TENSOR_SIZE_3 && param->split_dim_ == 0)) &&
in_tensors_.size() != INPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_.at(0)->IsConst()) {
MS_LOG(ERROR) << "in_tensors_ must be tensor";
MS_LOG(WARNING) << "in_tensors_ must be tensor";
return RET_ERROR;
}
for (auto &out_tensor : out_tensors_) {
if (out_tensor->IsConst()) {
MS_LOG(ERROR) << "out_tensor must be tensor";
MS_LOG(WARNING) << "out_tensor must be tensor";
return RET_ERROR;
}
}
if (!(param->num_split_ == 2 || param->split_dim_ == 0)) {
MS_LOG(ERROR) << "num_split_ only supported = 2 or split_dim_ = 0 yet";
MS_LOG(WARNING) << "num_split_ only supported = 2 or split_dim_ = 0 yet";
return RET_ERROR;
}
if (param->split_dim_ < 0 || param->split_dim_ > 3) {
MS_LOG(ERROR) << "split_dim_ must between 0~3";
MS_LOG(WARNING) << "split_dim_ must between 0~3";
return RET_ERROR;
}
if (param->split_sizes_ == nullptr) {
MS_LOG(ERROR) << "split_sizes_ can not nullptr";
MS_LOG(WARNING) << "split_sizes_ can not nullptr";
return RET_ERROR;
}
if (param->num_split_ == 1 && param->split_sizes_[0] == 0) {
MS_LOG(ERROR) << "param->split_sizes_[0] is zero.";
MS_LOG(WARNING) << "param->split_sizes_[0] is zero.";
return RET_ERROR;
}
return RET_OK;

View File

@ -72,32 +72,32 @@ int StackOpenCLKernel::CheckSpecs() {
auto param = reinterpret_cast<StackParameter *>(this->op_parameter_);
axis_ = param->axis_;
if (in_tensors_.size() != INPUT_TENSOR_SIZE_2 && out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << " only support input size = 2 and output size = 1";
MS_LOG(WARNING) << " only support input size = 2 and output size = 1";
return RET_ERROR;
}
for (auto &tensor : in_tensors_) {
if (tensor->data_type() != kNumberTypeFloat32 && tensor->data_type() != kNumberTypeFloat16) {
MS_LOG(ERROR) << " only support fp32/fp16 input";
MS_LOG(WARNING) << " only support fp32/fp16 input";
return RET_ERROR;
}
}
for (auto &tensor : out_tensors_) {
if (tensor->data_type() != kNumberTypeFloat32 && tensor->data_type() != kNumberTypeFloat16) {
MS_LOG(ERROR) << " only support fp32/fp16 output";
MS_LOG(WARNING) << " only support fp32/fp16 output";
return RET_ERROR;
}
}
if (in_tensors_[0]->shape().size() > DIMENSION_4D || in_tensors_[0]->shape().size() <= 0) {
MS_LOG(ERROR) << " only support 0<dim<=4";
MS_LOG(WARNING) << " only support 0<dim<=4";
return RET_ERROR;
}
axis_ = axis_ < 0 ? axis_ + in_tensors_[0]->shape().size() : axis_;
if (axis_ > 3) {
MS_LOG(ERROR) << " only support axis <= 3 ";
MS_LOG(WARNING) << " only support axis <= 3 ";
return RET_ERROR;
}
if (axis_ > in_tensors_[0]->shape().size()) {
MS_LOG(ERROR) << " stack axis must been <= in_tensors_[0]->shape().size() ";
MS_LOG(WARNING) << " stack axis must been <= in_tensors_[0]->shape().size() ";
return RET_ERROR;
}
return RET_OK;

View File

@ -33,7 +33,7 @@ namespace mindspore::kernel {
int StridedSliceOpenCLKernel::CheckSpecs() {
if (type() == PrimitiveType_SliceFusion) {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_3) {
MS_LOG(ERROR) << "Slice only supports 3 input Tensor.";
MS_LOG(WARNING) << "Slice only supports 3 input Tensor.";
return RET_ERROR;
}
int in_ndim = in_tensors_.front()->shape().size();
@ -45,7 +45,7 @@ int StridedSliceOpenCLKernel::CheckSpecs() {
}
} else if (type() == PrimitiveType_StridedSlice) {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_4) {
MS_LOG(ERROR) << "StridedSlice only supports 4 input Tensor.";
MS_LOG(WARNING) << "StridedSlice only supports 4 input Tensor.";
return RET_ERROR;
}
int in_ndim = in_tensors_.front()->shape().size();
@ -59,26 +59,26 @@ int StridedSliceOpenCLKernel::CheckSpecs() {
return RET_ERROR;
}
} else {
MS_LOG(ERROR) << "type error.";
MS_LOG(WARNING) << "type error.";
return RET_ERROR;
}
const std::string kernel_name = type() == PrimitiveType_SliceFusion ? "Slice" : "StridedSlice";
if (out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << kernel_name + " only supports 1 output Tensor.";
MS_LOG(WARNING) << kernel_name + " only supports 1 output Tensor.";
return RET_ERROR;
}
auto in_ndim = in_tensors_.front()->shape().size();
if (in_ndim == 0 || in_ndim > DIMENSION_4D) {
MS_LOG(ERROR) << kernel_name + " only supports 1D-4D input tensor";
MS_LOG(WARNING) << kernel_name + " only supports 1D-4D input tensor";
return RET_ERROR;
}
auto out_ndim = out_tensors_.front()->shape().size();
if (out_ndim > DIMENSION_4D) {
MS_LOG(ERROR) << kernel_name + " only supports 0D-4D output tensor";
MS_LOG(WARNING) << kernel_name + " only supports 0D-4D output tensor";
return RET_ERROR;
}
if (InitConstArgs() != RET_OK) {
MS_LOG(ERROR) << "call InitConstArgs() failed";
MS_LOG(WARNING) << "call InitConstArgs() failed";
return RET_ERROR;
}
return RET_OK;

View File

@ -30,13 +30,13 @@ using mindspore::lite::opencl::MemType;
namespace mindspore::kernel {
int ToFormatOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto data_type = in_tensors_.front()->data_type();
if (data_type != kNumberTypeFloat32 && data_type != kNumberTypeFloat16 && data_type != kNumberTypeInt32 &&
data_type != kNumberTypeInt8) {
MS_LOG(ERROR) << "Unsupported data type " << data_type;
MS_LOG(WARNING) << "Unsupported data type " << data_type;
return RET_ERROR;
}
return RET_OK;

View File

@ -30,17 +30,17 @@ using mindspore::schema::PrimitiveType_Transpose;
namespace mindspore::kernel {
int TransposeOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != INPUT_TENSOR_SIZE_2 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) {
MS_LOG(ERROR) << "Transpose input output size unsupported.";
MS_LOG(WARNING) << "Transpose input output size unsupported.";
return RET_ERROR;
}
int in_ndim = in_tensors_.at(0)->shape().size();
int out_ndim = out_tensors_.at(0)->shape().size();
if (in_ndim != out_ndim) {
MS_LOG(ERROR) << "Transpose only support in_ndim equal to out_ndim.";
MS_LOG(WARNING) << "Transpose only support in_ndim equal to out_ndim.";
return RET_ERROR;
}
if (in_ndim > DIMENSION_4D) {
MS_LOG(ERROR) << "Transpose don't support 5d tensor or higher.";
MS_LOG(WARNING) << "Transpose don't support 5d tensor or higher.";
return RET_ERROR;
}
if (CheckParamLikeTensor("Transpose", "perm", in_tensors_.at(1), kNumberTypeInt32, {in_ndim}) != RET_OK) {

View File

@ -381,7 +381,11 @@ int AnfExporter::Anf2Fb(const FuncGraphPtr &func_graph, const std::unique_ptr<sc
return RET_ERROR;
}
RemoveIfDepend(cnode);
ret = RemoveIfDepend(cnode);
if (ret != RET_OK) {
MS_LOG(ERROR) << "RemoveIfDepend failed";
break;
}
if (prim->name() == mindspore::ops::kNameDepend || prim->name() == mindspore::lite::kNameTupleGetItem ||
prim->name() == mindspore::lite::kNameMakeTuple) {
continue;
@ -389,7 +393,11 @@ int AnfExporter::Anf2Fb(const FuncGraphPtr &func_graph, const std::unique_ptr<sc
if (prim->name() == "make_tuple") {
continue;
}
RemoveIfMakeTuple(cnode);
ret = RemoveIfMakeTuple(cnode);
if (ret != RET_OK) {
MS_LOG(ERROR) << "RemoveIfMakeTuple failed";
break;
}
auto node = std::make_unique<schema::CNodeT>();
if (node == nullptr) {
MS_LOG(ERROR) << "object failed to be constructed";

View File

@ -402,7 +402,7 @@ int FetchDataFromCNode(const CNodePtr &cnode, size_t index, converter::FmkType f
return RET_OK;
}
void RemoveIfDepend(const CNodePtr &cnode) {
int RemoveIfDepend(const CNodePtr &cnode) {
bool has_depend = false;
std::vector<AnfNodePtr> inputs;
inputs.clear();
@ -418,7 +418,7 @@ void RemoveIfDepend(const CNodePtr &cnode) {
auto value_node = depend_node->input(0)->cast<ValueNodePtr>();
if (value_node == nullptr) {
MS_LOG(ERROR) << "value node is invalid.";
return;
return RET_ERROR;
}
if (value_node->value() != nullptr && opt::CheckPrimitiveType(depend_node, prim::kPrimDepend)) {
has_depend = true;
@ -439,9 +439,10 @@ void RemoveIfDepend(const CNodePtr &cnode) {
if (has_depend) {
cnode->set_inputs(inputs);
}
return RET_OK;
}
void RemoveIfMakeTuple(const CNodePtr &cnode) {
int RemoveIfMakeTuple(const CNodePtr &cnode) {
bool has_make_tuple = false;
std::vector<AnfNodePtr> inputs;
inputs.clear();
@ -457,7 +458,7 @@ void RemoveIfMakeTuple(const CNodePtr &cnode) {
auto value_node = make_tuple_node->input(0)->cast<ValueNodePtr>();
if (value_node == nullptr) {
MS_LOG(ERROR) << "value node is invalid.";
return;
return RET_ERROR;
}
if (value_node->value() != nullptr && (opt::CheckPrimitiveType(make_tuple_node, prim::kPrimMakeTuple) ||
opt::CheckPrimitiveType(make_tuple_node, opt::kPrimMakeTupleV2))) {
@ -472,6 +473,7 @@ void RemoveIfMakeTuple(const CNodePtr &cnode) {
if (has_make_tuple) {
cnode->set_inputs(inputs);
}
return RET_OK;
}
} // namespace lite
} // namespace mindspore

View File

@ -44,9 +44,9 @@ int FetchDataFromValueNode(const CNodePtr &cnode, size_t index, converter::FmkTy
DataInfo *data_info);
int FetchDataFromCNode(const CNodePtr &cnode, size_t index, converter::FmkType fmk_type, bool train_flag,
DataInfo *data_info);
void RemoveIfDepend(const CNodePtr &cnode);
int RemoveIfDepend(const CNodePtr &cnode);
void RemoveIfMakeTuple(const CNodePtr &cnode);
int RemoveIfMakeTuple(const CNodePtr &cnode);
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_TOOLS_ANF_EXPORTER_FETCH_CONTENT_H_