From 8e3fe59f24f2c1b80e6e6c9830a94200676f530e Mon Sep 17 00:00:00 2001 From: xuanyue Date: Thu, 17 Sep 2020 21:01:47 +0800 Subject: [PATCH] fix bugs --- mindspore/lite/schema/ops.fbs | 5 +- mindspore/lite/src/lite_session.cc | 2 +- mindspore/lite/src/ops/conv2d.cc | 8 +- mindspore/lite/src/ops/conv2d_grad_filter.cc | 2 +- mindspore/lite/src/ops/conv2d_grad_input.cc | 2 +- mindspore/lite/src/ops/deconv2d.cc | 12 +- mindspore/lite/src/ops/depthwise_conv2d.cc | 6 +- mindspore/lite/src/ops/equal.cc | 10 ++ mindspore/lite/src/ops/equal.h | 1 + mindspore/lite/src/ops/gather.cc | 2 +- mindspore/lite/src/ops/pooling.cc | 4 +- mindspore/lite/src/ops/pooling_grad.cc | 4 +- mindspore/lite/test/ut/src/infer_test.cc | 151 +----------------- .../parser/tflite/tflite_conv_parser_test.cc | 2 +- .../tflite/tflite_deconv_parser_test.cc | 2 +- .../tflite_depthwise_conv_parser_test.cc | 4 +- .../tflite/tflite_pooling_parser_test.cc | 2 +- .../fusion/conv_activation_fusion_test.cc | 4 +- .../fusion/conv_biasadd_fusion_test.cc | 4 +- .../optimizer/fusion/conv_bn_fusion_test.cc | 4 +- .../fusion/conv_scale_fusion_test.cc | 4 +- .../parser/onnx/onnx_deconv_parser.cc | 10 ++ .../converter/parser/onnx/onnx_node_parser.cc | 6 +- .../parser/tflite/tflite_cast_parser.cc | 3 + .../parser/tflite/tflite_pad_parser.cc | 1 + .../converter/parser/tflite/tflite_util.cc | 4 +- 26 files changed, 66 insertions(+), 193 deletions(-) diff --git a/mindspore/lite/schema/ops.fbs b/mindspore/lite/schema/ops.fbs index c728751791..803646a639 100644 --- a/mindspore/lite/schema/ops.fbs +++ b/mindspore/lite/schema/ops.fbs @@ -103,9 +103,10 @@ enum EltwiseMode : byte { enum PadMode : byte { NOTSET = 0, - SAME = 1, + SAME_UPPER = 1, VALID = 2, - CAFFE = 4 + CAFFE = 4, + SAME_LOWER = 5 } enum RoundMode : byte { diff --git a/mindspore/lite/src/lite_session.cc b/mindspore/lite/src/lite_session.cc index 0af5b7d992..141563bff3 100644 --- a/mindspore/lite/src/lite_session.cc +++ b/mindspore/lite/src/lite_session.cc @@ -350,7 +350,7 @@ int LiteSession::Init(Context *context) { MS_LOG(INFO) << "Init OpenCL runtime."; } #endif - executor = new Executor(); + executor = new(std::nothrow) Executor(); if (nullptr == executor) { MS_LOG(ERROR) << "New Executor failed"; is_running_.store(false); diff --git a/mindspore/lite/src/ops/conv2d.cc b/mindspore/lite/src/ops/conv2d.cc index a9c350ebc2..1dccb681a3 100644 --- a/mindspore/lite/src/ops/conv2d.cc +++ b/mindspore/lite/src/ops/conv2d.cc @@ -156,7 +156,7 @@ void Conv2D::PopulaterConv2DMultiGroup(const Primitive &prim, schema::PrimitiveT if (pad_mode == "valid") { attr->padMode = schema::PadMode_VALID; } else if (pad_mode == "same") { - attr->padMode = schema::PadMode_SAME; + attr->padMode = schema::PadMode_SAME_UPPER; } else { attr->padMode = schema::PadMode_NOTSET; } @@ -221,7 +221,7 @@ void Conv2D::PopulaterConv2DSingleGroup(const Primitive &prim, schema::Primitive if (pad_mode == "valid") { attr->padMode = schema::PadMode_VALID; } else if (pad_mode == "same") { - attr->padMode = schema::PadMode_SAME; + attr->padMode = schema::PadMode_SAME_UPPER; } else { attr->padMode = schema::PadMode_NOTSET; } @@ -233,8 +233,6 @@ void Conv2D::PopulaterConv2DSingleGroup(const Primitive &prim, schema::Primitive attr->activationType = schema::ActivationType_NO_ACTIVATION; } - // attr->padMode = schema::PadMode_SAME; - // attr->activationType = schema::ActivationType_RELU; primitive->value.type = schema::PrimitiveType_Conv2D; primitive->value.value = attr.release(); } @@ -319,7 +317,7 @@ void Conv2D::ConvInferShape(int input_h, int input_w, int *output_h, int *output pad_d_ = GetPadDown(); pad_r_ = GetPadRight(); - if (GetPadMode() == schema::PadMode_SAME) { + if (GetPadMode() == schema::PadMode_SAME_UPPER) { *output_w = std::ceil(static_cast(input_w) / static_cast(stride_w)); *output_h = std::ceil(static_cast(input_h) / static_cast(stride_h)); auto pad_h_all = ((*output_h - 1) * stride_h + (kernel_h - 1) * dilate_h + 1 - input_h); diff --git a/mindspore/lite/src/ops/conv2d_grad_filter.cc b/mindspore/lite/src/ops/conv2d_grad_filter.cc index 5004b48ab5..4b5bff7218 100644 --- a/mindspore/lite/src/ops/conv2d_grad_filter.cc +++ b/mindspore/lite/src/ops/conv2d_grad_filter.cc @@ -119,7 +119,7 @@ int Conv2DGradFilter::UnPackAttr(const Primitive &prim, const std::vectorpadMode = schema::PadMode_VALID; } else if (pad_mode == "same") { - attr->padMode = schema::PadMode_SAME; + attr->padMode = schema::PadMode_SAME_UPPER; } else { attr->padMode = schema::PadMode_NOTSET; } diff --git a/mindspore/lite/src/ops/conv2d_grad_input.cc b/mindspore/lite/src/ops/conv2d_grad_input.cc index cd2c2753ce..e68d9c29ac 100644 --- a/mindspore/lite/src/ops/conv2d_grad_input.cc +++ b/mindspore/lite/src/ops/conv2d_grad_input.cc @@ -118,7 +118,7 @@ int Conv2DGradInput::UnPackAttr(const Primitive &prim, const std::vectorpadMode = schema::PadMode_VALID; } else if (pad_mode == "same") { - attr->padMode = schema::PadMode_SAME; + attr->padMode = schema::PadMode_SAME_UPPER; } else { attr->padMode = schema::PadMode_NOTSET; } diff --git a/mindspore/lite/src/ops/deconv2d.cc b/mindspore/lite/src/ops/deconv2d.cc index 8f3da2bceb..abfb8b062b 100644 --- a/mindspore/lite/src/ops/deconv2d.cc +++ b/mindspore/lite/src/ops/deconv2d.cc @@ -93,7 +93,7 @@ void DeConv2D::PopulaterDeConv2DSingleGroup(const Primitive &prim, schema::Primi if (pad_mode == "valid" || pad_mode == "VALID") { attr->padMode = schema::PadMode_VALID; } else if (pad_mode == "same" || pad_mode == "SAME") { - attr->padMode = schema::PadMode_SAME; + attr->padMode = schema::PadMode_SAME_UPPER; } else { attr->padMode = schema::PadMode_NOTSET; } @@ -105,8 +105,6 @@ void DeConv2D::PopulaterDeConv2DSingleGroup(const Primitive &prim, schema::Primi attr->activationType = schema::ActivationType_NO_ACTIVATION; } - // attr->padMode = schema::PadMode_SAME; - // attr->activationType = schema::ActivationType_RELU; primitive->value.type = schema::PrimitiveType_DeConv2D; primitive->value.value = attr.release(); } @@ -206,10 +204,10 @@ int DeConv2D::InferShape(std::vector inputs_, std::vector inputs_, std::vector out_shape = {output_n, output_h, output_w, output_c}; output->set_shape(out_shape); - if (pad_mode == schema::PadMode_SAME) { + if (pad_mode == schema::PadMode_SAME_UPPER) { pad_u_ = ((input_h - 1) * stride_h + (kernel_h - 1) * dilate_h + 1 - output_h) / 2; pad_l_ = ((input_w - 1) * stride_w + (kernel_w - 1) * dilate_w + 1 - output_w) / 2; } else if (pad_mode == schema::PadMode_VALID) { pad_u_ = 0; pad_l_ = 0; - } else if (pad_mode == schema::PadMode_CAFFE) { + } else if (pad_mode == schema::PadMode_CAFFE || pad_mode == schema::PadMode_NOTSET) { } else { MS_LOG(ERROR) << "unsupported pad mode for deconv"; } diff --git a/mindspore/lite/src/ops/depthwise_conv2d.cc b/mindspore/lite/src/ops/depthwise_conv2d.cc index 67cf1aef9e..e5864bb24f 100644 --- a/mindspore/lite/src/ops/depthwise_conv2d.cc +++ b/mindspore/lite/src/ops/depthwise_conv2d.cc @@ -104,7 +104,7 @@ int DepthwiseConv2D::UnPackAttr(const Primitive &prim, const std::vectorpadMode = schema::PadMode_VALID; } else if (pad_mode == "same") { - attr->padMode = schema::PadMode_SAME; + attr->padMode = schema::PadMode_SAME_UPPER; } else { attr->padMode = schema::PadMode_NOTSET; } @@ -114,8 +114,6 @@ int DepthwiseConv2D::UnPackAttr(const Primitive &prim, const std::vectoractivationType = schema::ActivationType_NO_ACTIVATION; } - // attr->padMode = schema::PadMode_SAME; - // attr->activationType = schema::ActivationType_RELU; auto channel_multiplier = GetValue(prim.GetAttr("channel_multiplier")); attr->channelMultiplier = channel_multiplier; @@ -220,7 +218,7 @@ int DepthwiseConv2D::InferShape(std::vector inputs_, std::vector pad_u_ = GetPadUp(); pad_d_ = GetPadDown(); pad_r_ = GetPadRight(); - if (GetPadMode() == schema::PadMode_SAME) { + if (GetPadMode() == schema::PadMode_SAME_UPPER) { output_h = std::ceil(static_cast(input_h) / static_cast(GetStrideH())); output_w = std::ceil(static_cast(input_w) / static_cast(GetStrideW())); auto pad_h_all = ((output_h - 1) * GetStrideH() + (GetKernelH() - 1) * GetDilateH() + 1 - input_h); diff --git a/mindspore/lite/src/ops/equal.cc b/mindspore/lite/src/ops/equal.cc index 9732211061..1178e6f1c7 100644 --- a/mindspore/lite/src/ops/equal.cc +++ b/mindspore/lite/src/ops/equal.cc @@ -29,5 +29,15 @@ int Equal::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers:: } #endif +int Equal::InferShape(std::vector inputs_, std::vector outputs_) { + auto input = inputs_.front(); + MS_ASSERT(input != nullptr); + auto output = outputs_.front(); + MS_ASSERT(output != nullptr); + output->set_shape(input->shape()); + output->set_data_type(TypeId::kNumberTypeUInt8); + output->SetFormat(input->GetFormat()); + return RET_OK; +} } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/equal.h b/mindspore/lite/src/ops/equal.h index 69194b5528..34d2d5d7f6 100644 --- a/mindspore/lite/src/ops/equal.h +++ b/mindspore/lite/src/ops/equal.h @@ -36,6 +36,7 @@ class Equal : public Arithmetic { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif + int InferShape(std::vector inputs_, std::vector outputs_) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/gather.cc b/mindspore/lite/src/ops/gather.cc index 69a29f8ff2..c5575c3c81 100644 --- a/mindspore/lite/src/ops/gather.cc +++ b/mindspore/lite/src/ops/gather.cc @@ -91,7 +91,7 @@ int Gather::InferShape(std::vector inputs_, std::vector outp std::vector out_shape{in_shape}; out_shape.erase(out_shape.begin() + axis); for (int i = indices_rank - 1; i >= 0; --i) { - out_shape.insert(out_shape.begin() + axis + i, indices_shape[i]); + out_shape.insert(out_shape.begin() + axis, indices_shape[i]); } output->set_shape(out_shape); return RET_OK; diff --git a/mindspore/lite/src/ops/pooling.cc b/mindspore/lite/src/ops/pooling.cc index 2e39deb433..7dbeed6fcb 100644 --- a/mindspore/lite/src/ops/pooling.cc +++ b/mindspore/lite/src/ops/pooling.cc @@ -97,7 +97,7 @@ int Pooling::UnPackAttr(const Primitive &prim, const std::vector &in if (pad_mode == "VALID") { attr->padMode = schema::PadMode_VALID; } else if (pad_mode == "SAME") { - attr->padMode = schema::PadMode_SAME; + attr->padMode = schema::PadMode_SAME_UPPER; } else { attr->padMode = schema::PadMode_NOTSET; } @@ -187,7 +187,7 @@ int Pooling::InferShape(std::vector inputs_, std::vector out pad_u_ = GetPadUp(); pad_d_ = GetPadDown(); pad_r_ = GetPadRight(); - if (GetPadMode() == schema::PadMode_SAME) { + if (GetPadMode() == schema::PadMode_SAME_UPPER) { output_w = std::ceil(static_cast(input_w) / static_cast(GetStrideW())); output_h = std::ceil(static_cast(input_h) / static_cast(GetStrideH())); auto pad_h_all = ((output_h - 1) * GetStrideH() + (window_h - 1) + 1 - input_h); diff --git a/mindspore/lite/src/ops/pooling_grad.cc b/mindspore/lite/src/ops/pooling_grad.cc index eed0f2d5f2..bc82884ea8 100644 --- a/mindspore/lite/src/ops/pooling_grad.cc +++ b/mindspore/lite/src/ops/pooling_grad.cc @@ -90,7 +90,7 @@ int PoolingGrad::UnPackAttr(const Primitive &prim, const std::vector if (pad_mode == "VALID") { attr->padMode = schema::PadMode_VALID; } else if (pad_mode == "SAME") { - attr->padMode = schema::PadMode_SAME; + attr->padMode = schema::PadMode_SAME_UPPER; } else { attr->padMode = schema::PadMode_NOTSET; } @@ -162,7 +162,7 @@ int PoolingGrad::InferShape(std::vector inputs_, std::vector pad_u_ = GetPadUp(); pad_d_ = GetPadDown(); pad_r_ = GetPadRight(); - if (GetPadMode() == schema::PadMode_SAME) { + if (GetPadMode() == schema::PadMode_SAME_UPPER) { int output_w = std::ceil(static_cast(input_w) / static_cast(GetStrideW())); int output_h = std::ceil(static_cast(input_h) / static_cast(GetStrideH())); auto pad_h_all = ((output_h - 1) * GetStrideH() + (window_h - 1) + 1 - input_h); diff --git a/mindspore/lite/test/ut/src/infer_test.cc b/mindspore/lite/test/ut/src/infer_test.cc index 555bc6fd7f..8ef11d372c 100644 --- a/mindspore/lite/test/ut/src/infer_test.cc +++ b/mindspore/lite/test/ut/src/infer_test.cc @@ -42,7 +42,7 @@ TEST_F(InferTest, TestConvNode) { node->primitive = std::make_unique(); node->primitive->value.type = schema::PrimitiveType_Conv2D; auto primitive = new schema::Conv2DT; - primitive->padMode = schema::PadMode_SAME; + primitive->padMode = schema::PadMode_SAME_UPPER; primitive->channelIn = 3; primitive->channelOut = 32; primitive->format = schema::Format_NHWC; @@ -231,18 +231,6 @@ TEST_F(InferTest, TestAddNode) { ASSERT_EQ(TypeId::kNumberTypeFloat32, outTensor->data_type()); auto *outData = reinterpret_cast(outTensor->MutableData()); ASSERT_NE(nullptr, outData); - // //=================================================== - // size_t output_size; - // std::string output_path = "./convfp32_out_1_28_28_32.bin"; - // ReadFile(output_path.c_str(), &output_size, buf); - // ASSERT_NE(nullptr, buf[0]); - // auto output_data = reinterpret_cast(buf[0]); - // ASSERT_NE(nullptr, output_data); - // //=================================================== - // ASSERT_EQ(output_size, outTensor->Size()); - // for (size_t i = 0; i < outTensor->ElementsNum(); i++) { - // ASSERT_EQ(output_data[i], outData[i]); - // } MS_LOG(INFO) << "Passed"; } @@ -366,141 +354,4 @@ TEST_F(InferTest, TestModel) { auto outputs = session->GetOutputs(); MS_LOG(INFO) << "Passed"; } - -// TEST_F(TrainTest, TestMultiNode) { -// auto msGraph = std::make_shared(); -// msGraph->name = "graph"; -// auto msSubgraph = std::make_unique(); -// msSubgraph->name = "subGraph"; -// -// auto conv = std::make_unique(); -// conv->inputIndex = {0, 1}; -// conv->outputIndex = {2}; -// conv->attr.type = schema::OpT_Conv2D; -// auto conv_attr = new schema::Conv2DT; -// conv_attr->padMode = schema::PadMode_SAME; -// conv_attr->format = schema::Format_NHWC; -// conv_attr->strideH = 1; -// conv_attr->strideW = 1; -// conv_attr->kernelH = 3; -// conv_attr->kernelW = 3; -// conv_attr->dilateH = 1; -// conv_attr->dilateW = 1; -// -// conv->attr.value = conv_attr; -// conv->name = "Conv2D"; -// conv->fmkType = schema::FmkType_CAFFE; -// msSubgraph->nodes.emplace_back(std::move(conv)); -// -// auto matMul1 = std::make_unique(); -// matMul1->inputIndex = {2, 3}; -// matMul1->outputIndex = {4}; -// matMul1->attr.type = schema::OpT_MatMul; -// auto matMul_attr1 = new schema::MatMulT; -// matMul_attr1->transposeA = false; -// matMul_attr1->transposeB = true; -// matMul1->attr.value = matMul_attr1; -// matMul1->name = "matmul1"; -// matMul1->fmkType = schema::FmkType_CAFFE; -// msSubgraph->nodes.emplace_back(std::move(matMul1)); -// -// auto matMul2 = std::make_unique(); -// matMul2->inputIndex = {4, 5}; -// matMul2->outputIndex = {6}; -// matMul2->attr.type = schema::OpT_MatMul; -// auto matMul_attr2 = new schema::MatMulT; -// matMul_attr2->transposeA = false; -// matMul_attr2->transposeB = true; -// matMul2->attr.value = matMul_attr2; -// matMul2->name = "matmul2"; -// matMul2->fmkType = schema::FmkType_CAFFE; -// msSubgraph->nodes.emplace_back(std::move(matMul2)); -// -// msSubgraph->inputIndex = {0}; -// msSubgraph->outputIndex = {6}; -// -// auto input0 = std::make_unique(); -// input0->refCount = schema::MSCONST_WEIGHT_REFCOUNT; -// input0->format = schema::Format_NHWC; -// input0->dataType = TypeId::kNumberTypeFloat32; -// input0->dims = {1, 5, 5, 3}; -// input0->offset = -1; -// msSubgraph->allTensors.emplace_back(std::move(input0)); -// -// auto conv_weight = std::make_unique(); -// conv_weight->refCount = schema::MSCONST_WEIGHT_REFCOUNT; -// conv_weight->format = schema::Format_KHWC; -// conv_weight->dataType = TypeId::kNumberTypeFloat32; -// conv_weight->dims = {8, 3, 3, 3}; -// conv_weight->data.resize(8*3*3*3*sizeof(float)); -// msSubgraph->allTensors.emplace_back(std::move(conv_weight)); -// -// auto conv_output = std::make_unique(); -// conv_output->refCount = 0; -// conv_output->format = schema::Format_NHWC; -// conv_output->dataType = TypeId::kNumberTypeFloat32; -// conv_output->dims = {1, 5, 5, 8}; -// msSubgraph->allTensors.emplace_back(std::move(conv_output)); -// -// auto add_weight = std::make_unique(); -// add_weight->refCount = schema::MSCONST_WEIGHT_REFCOUNT; -// add_weight->format = schema::Format_NHWC; -// add_weight->dataType = TypeId::kNumberTypeFloat32; -// add_weight->dims = {1, 5, 5, 8}; -// add_weight->data.resize(5*5*8*sizeof(float)); -// msSubgraph->allTensors.emplace_back(std::move(add_weight)); -// -// auto add_output = std::make_unique(); -// add_output->refCount = 0; -// add_output->format = schema::Format_NHWC; -// add_output->dataType = TypeId::kNumberTypeFloat32; -// add_output->dims = {1, 5, 5, 8}; -// msSubgraph->allTensors.emplace_back(std::move(add_output)); -// -// auto mul_weight = std::make_unique(); -// mul_weight->refCount = schema::MSCONST_WEIGHT_REFCOUNT; -// mul_weight->format = schema::Format_NHWC; -// mul_weight->dataType = TypeId::kNumberTypeFloat32; -// mul_weight->dims = {1, 5, 5, 8}; -// mul_weight->data.resize(5*5*8*sizeof(float)); -// msSubgraph->allTensors.emplace_back(std::move(mul_weight)); -// -// auto mul_output = std::make_unique(); -// mul_output->refCount = 0; -// mul_output->format = schema::Format_NHWC; -// mul_output->dataType = TypeId::kNumberTypeFloat32; -// mul_output->dims = {1, 5, 5, 8}; -// msSubgraph->allTensors.emplace_back(std::move(mul_output)); -// msGraph->subgraphs.emplace_back(std::move(msSubgraph)); -// -// flatbuffers::FlatBufferBuilder builder(1024); -// auto offset = schema::GraphDef::Pack(builder, msGraph.get()); -// builder.Finish(offset); -// size_t size = builder.GetSize(); -// const char *content = (char *)builder.GetBufferPointer(); -// const std::string strstub = ""; -// -// auto func_graph = inference::LoadModel(content, size, strstub); -// ASSERT_NE(nullptr, func_graph); -// auto session = inference::MSSession::CreateSession(kCPUDevice, 0); -// ASSERT_NE(nullptr, session); -// auto graphId = session->CompileGraph(func_graph); -// -// auto inTensor = -// std::shared_ptr(inference::MSTensor::CreateTensor(TypeId::kNumberTypeFloat32, {1, 5, 5, 3})); -// ASSERT_NE(nullptr, inTensor); -// ASSERT_EQ(sizeof(float) * (5 * 5 * 3), inTensor->Size()); -// (void)inTensor->MutableData(); -// -// std::vector> inputs; -// inputs.emplace_back(inTensor); -// auto outputs = session->RunGraph(graphId, inputs); -// ASSERT_EQ(1, outputs.size()); -// ASSERT_EQ(1, outputs.front().size()); -// auto runOutput = outputs.front().front(); -// ASSERT_NE(nullptr, runOutput); -// ASSERT_EQ(5 * 5 * 8, runOutput->ElementsNum()); -// ASSERT_EQ(TypeId::kNumberTypeFloat32, runOutput->data_type()); -// MS_LOG(INFO) << "Passed"; -//} } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_conv_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_conv_parser_test.cc index 202079abc6..e7ca65a28d 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_conv_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_conv_parser_test.cc @@ -46,7 +46,7 @@ TEST_F(TestTfliteParserConv, AttrValue) { ASSERT_EQ(val->strideW, 1); ASSERT_EQ(val->dilateH, 1); ASSERT_EQ(val->dilateW, 1); - ASSERT_EQ(val->padMode, schema::PadMode_SAME); + ASSERT_EQ(val->padMode, schema::PadMode_SAME_UPPER); ASSERT_EQ(val->padUp, 1); ASSERT_EQ(val->padDown, 1); ASSERT_EQ(val->padLeft, 1); diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_deconv_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_deconv_parser_test.cc index 8badaa1d33..b1b6609724 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_deconv_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_deconv_parser_test.cc @@ -47,7 +47,7 @@ TEST_F(TestTfliteParserDeConv, AttrValue) { ASSERT_EQ(val->strideW, 1); ASSERT_EQ(val->dilateH, 1); ASSERT_EQ(val->dilateW, 1); - ASSERT_EQ(val->padMode, schema::PadMode_SAME); + ASSERT_EQ(val->padMode, schema::PadMode_SAME_UPPER); ASSERT_EQ(val->padUp, 1); ASSERT_EQ(val->padDown, 1); ASSERT_EQ(val->padLeft, 1); diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_depthwise_conv_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_depthwise_conv_parser_test.cc index 243bbcc867..b42ded9ccd 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_depthwise_conv_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_depthwise_conv_parser_test.cc @@ -46,7 +46,7 @@ TEST_F(TestTfliteParserDepthwiseConv1, AttrValue) { ASSERT_EQ(val->strideW, 1); ASSERT_EQ(val->dilateH, 1); ASSERT_EQ(val->dilateW, 1); - ASSERT_EQ(val->padMode, schema::PadMode_SAME); + ASSERT_EQ(val->padMode, schema::PadMode_SAME_UPPER); ASSERT_EQ(val->padUp, 1); ASSERT_EQ(val->padDown, 1); ASSERT_EQ(val->padLeft, 1); @@ -80,7 +80,7 @@ TEST_F(TestTfliteParserDepthwiseConv2, AttrValue) { ASSERT_EQ(val->strideW, 1); ASSERT_EQ(val->dilateH, 1); ASSERT_EQ(val->dilateW, 1); - ASSERT_EQ(val->padMode, schema::PadMode_SAME); + ASSERT_EQ(val->padMode, schema::PadMode_SAME_UPPER); ASSERT_EQ(val->padUp, 1); ASSERT_EQ(val->padDown, 1); ASSERT_EQ(val->padLeft, 1); diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_pooling_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_pooling_parser_test.cc index 3d9465bc25..337c24f4f8 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_pooling_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_pooling_parser_test.cc @@ -73,7 +73,7 @@ TEST_F(TestTfliteParserAvgPooling, AttrValue) { ASSERT_EQ(val->windowH, 2); ASSERT_EQ(val->strideW, 1); ASSERT_EQ(val->strideH, 1); - ASSERT_EQ(val->padMode, schema::PadMode_SAME); + ASSERT_EQ(val->padMode, schema::PadMode_SAME_UPPER); ASSERT_EQ(val->padUp, 0); ASSERT_EQ(val->padDown, 1); ASSERT_EQ(val->padLeft, 0); diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc index ff03a58e72..9d503e82a2 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc @@ -42,7 +42,7 @@ CNodeTptr BuildConv2D() { convNode->primitive = std::make_unique(); convNode->primitive->value.type = schema::PrimitiveType_Conv2D; auto prim1 = new schema::Conv2DT; - prim1->padMode = schema::PadMode_SAME; + prim1->padMode = schema::PadMode_SAME_UPPER; prim1->format = schema::Format_NHWC; prim1->strideH = 1; prim1->strideW = 1; @@ -62,7 +62,7 @@ CNodeTptr BuildDepthwiseConv2D() { convNode->primitive = std::make_unique(); convNode->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D; auto prim1 = new schema::DepthwiseConv2DT; - prim1->padMode = schema::PadMode_SAME; + prim1->padMode = schema::PadMode_SAME_UPPER; prim1->format = schema::Format_NHWC; prim1->strideH = 1; prim1->strideW = 1; diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc index 4bbf967a15..9a0dc1dbb1 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc @@ -42,7 +42,7 @@ CNodeTptr BuildConv2D() { convNode->primitive = std::make_unique(); convNode->primitive->value.type = schema::PrimitiveType_Conv2D; auto prim1 = new schema::Conv2DT; - prim1->padMode = schema::PadMode_SAME; + prim1->padMode = schema::PadMode_SAME_UPPER; prim1->format = schema::Format_NHWC; prim1->strideH = 1; prim1->strideW = 1; @@ -62,7 +62,7 @@ CNodeTptr BuildDepthwiseConv2D() { convNode->primitive = std::make_unique(); convNode->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D; auto prim1 = new schema::DepthwiseConv2DT; - prim1->padMode = schema::PadMode_SAME; + prim1->padMode = schema::PadMode_SAME_UPPER; prim1->format = schema::Format_NHWC; prim1->strideH = 1; prim1->strideW = 1; diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc index b2d2892436..a06d872575 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc @@ -42,7 +42,7 @@ CNodeTptr BuildConv2D() { convNode->primitive = std::make_unique(); convNode->primitive->value.type = schema::PrimitiveType_Conv2D; auto prim1 = new schema::Conv2DT; - prim1->padMode = schema::PadMode_SAME; + prim1->padMode = schema::PadMode_SAME_UPPER; prim1->format = schema::Format_NHWC; prim1->strideH = 1; prim1->strideW = 1; @@ -62,7 +62,7 @@ CNodeTptr BuildDepthwiseConv2D() { convNode->primitive = std::make_unique(); convNode->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D; auto prim1 = new schema::DepthwiseConv2DT; - prim1->padMode = schema::PadMode_SAME; + prim1->padMode = schema::PadMode_SAME_UPPER; prim1->format = schema::Format_NHWC; prim1->strideH = 1; prim1->strideW = 1; diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc index 8ae10d5d95..f458630ffa 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc @@ -48,7 +48,7 @@ CNodeTptr BuildConv2D(int with_bias_flag) { convNode->primitive = std::make_unique(); convNode->primitive->value.type = schema::PrimitiveType_Conv2D; auto prim1 = new schema::Conv2DT; - prim1->padMode = schema::PadMode_SAME; + prim1->padMode = schema::PadMode_SAME_UPPER; prim1->format = schema::Format_NHWC; prim1->strideH = 1; prim1->strideW = 1; @@ -74,7 +74,7 @@ CNodeTptr BuildDepthwiseConv2D(int with_bias_flag) { convNode->primitive = std::make_unique(); convNode->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D; auto prim1 = new schema::DepthwiseConv2DT; - prim1->padMode = schema::PadMode_SAME; + prim1->padMode = schema::PadMode_SAME_UPPER; prim1->format = schema::Format_NHWC; prim1->strideH = 1; prim1->strideW = 1; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc index 940f7423c3..cb638f718a 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc @@ -71,6 +71,13 @@ STATUS OnnxDeConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::N return RET_NULL_PTR; } + // set default params + attr->padMode = schema::PadMode_NOTSET; + attr->group = 1; + attr->strideW = 1; + attr->strideH = 1; + attr->dilateW = 1; + attr->dilateH = 1; // set opdef each attr params for (const auto &onnx_node_attr : onnx_node.attribute()) { if (onnx_node_attr.name() == "group") { @@ -121,6 +128,9 @@ STATUS OnnxDeConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::N MS_LOG(ERROR) << "Unsupported format: " << onnx_node_attr.s().c_str(); return RET_ERROR; } + } else if (onnx_node_attr.name() == "output_padding") { + MS_LOG(ERROR) << "output_padding param hasn't been supported"; + return RET_NOT_SUPPORT; } } diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc index 8baedfb972..24a9ac8dcc 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc @@ -22,8 +22,10 @@ namespace lite { schema::PadMode OnnxNodeParser::GetOnnxPadMode(const onnx::AttributeProto &onnx_node_attr) { if (onnx_node_attr.s() == "NOTSET") { return schema::PadMode_NOTSET; - } else if (onnx_node_attr.s() == "SAME_UPPER" || onnx_node_attr.s() == "SAME_LOWER") { - return schema::PadMode_SAME; + } else if (onnx_node_attr.s() == "SAME_UPPER") { + return schema::PadMode_SAME_UPPER; + } else if (onnx_node_attr.s() == "SAME_LOWER") { + return schema::PadMode_SAME_LOWER; } else if (onnx_node_attr.s() == "VALID") { return schema::PadMode_VALID; } else { diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.cc index 9d10a00d28..ab631f64cb 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.cc @@ -49,6 +49,9 @@ STATUS TfliteCastParser::Parse(const std::unique_ptr &tflite_ return RET_NULL_PTR; } attr->srcT = GetTfliteDataType(in_tensor->type); + if (attr->srcT == TypeId::kNumberTypeBool) { + attr->srcT = TypeId::kNumberTypeUInt8; + } const auto &out_tensor = tflite_tensors[tflite_op->outputs[0]]; if (out_tensor == nullptr) { MS_LOG(ERROR) << "tensor is null"; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc index 7cc8caccb2..92b56a3fc5 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc @@ -18,6 +18,7 @@ #include #include #include +#include namespace mindspore { namespace lite { diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc index a32ffaaefb..4d06d00a25 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc @@ -157,7 +157,7 @@ TypeId GetTfliteDataType(const tflite::TensorType &tflite_data_type) { schema::PadMode GetPadMode(tflite::Padding tflite_padmode) { if (tflite_padmode == tflite::Padding_SAME) { - return schema::PadMode_SAME; + return schema::PadMode_SAME_UPPER; } else if (tflite_padmode == tflite::Padding_VALID) { return schema::PadMode_VALID; } else { @@ -198,7 +198,7 @@ STATUS getPaddingParam(const std::unique_ptr &tensor, schema::P int padDown = 0; int padLeft = 0; int padRight = 0; - if (pad_mode == schema::PadMode_SAME) { + if (pad_mode == schema::PadMode_SAME_UPPER) { auto shape = tensor->shape; int H_input = shape.at(1); int W_input = shape.at(2);