diff --git a/mindspore/lite/tools/converter/converter.cc b/mindspore/lite/tools/converter/converter.cc index 3f3f5a03a3f..87ab8a9910f 100644 --- a/mindspore/lite/tools/converter/converter.cc +++ b/mindspore/lite/tools/converter/converter.cc @@ -90,7 +90,7 @@ MetaGraphT *Converter::Convert(const converter::Flags *flag) { return nullptr; } - // graph = anfTransform->Transform(graph); + graph = anfTransform->Transform(graph); CreateQuantizer(graph, flag); if (mQuantizer != nullptr) { diff --git a/mindspore/lite/tools/converter/graphdef_transform.cc b/mindspore/lite/tools/converter/graphdef_transform.cc index ec92083597d..d0240ed9f39 100644 --- a/mindspore/lite/tools/converter/graphdef_transform.cc +++ b/mindspore/lite/tools/converter/graphdef_transform.cc @@ -100,20 +100,20 @@ int GraphDefTransform::Transform(const converter::Flags &ctx) { // } // fusion - { - Optimizer fusionOptimizer; - fusionOptimizer.AddPass(new (std::nothrow) ConvBiasAddFusionPass()); - fusionOptimizer.AddPass(new (std::nothrow) ConvBNFusionPass()); - fusionOptimizer.AddPass(new (std::nothrow) ConvScaleFusionPass()); - fusionOptimizer.AddPass(new (std::nothrow) ConvReluFusionPass()); - fusionOptimizer.AddPass(new (std::nothrow) ConvRelu6FusionPass()); - fusionOptimizer.AddPass(new (std::nothrow) IsolatedNodeRemovePass()); - status = fusionOptimizer.Run(graphDefT); - if (status != RET_OK && status != RET_NO_CHANGE) { - MS_LOG(ERROR) << "Run fusionOptimizer graphPasses Failed"; - return status; - } - } + // { + // Optimizer fusionOptimizer; + // fusionOptimizer.AddPass(new (std::nothrow) ConvBiasAddFusionPass()); + // fusionOptimizer.AddPass(new (std::nothrow) ConvBNFusionPass()); + // fusionOptimizer.AddPass(new (std::nothrow) ConvScaleFusionPass()); + // fusionOptimizer.AddPass(new (std::nothrow) ConvReluFusionPass()); + // fusionOptimizer.AddPass(new (std::nothrow) ConvRelu6FusionPass()); + // fusionOptimizer.AddPass(new (std::nothrow) IsolatedNodeRemovePass()); + // status = fusionOptimizer.Run(graphDefT); + // if (status != RET_OK && status != RET_NO_CHANGE) { + // MS_LOG(ERROR) << "Run fusionOptimizer graphPasses Failed"; + // return status; + // } + // } // weight format trans if (ctx.formatTrans) { diff --git a/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc index 2d8d6a86786..c7ebce22322 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc @@ -89,10 +89,10 @@ void GenConvNewBias(const FuncGraphPtr &func_graph, const CNodePtr &conv_node, c auto add_weight_param = bias_add_weight->cast()->default_param(); auto add_weight_tensor = std::dynamic_pointer_cast(add_weight_param); auto add_weight_data = reinterpret_cast(add_weight_tensor->tensor_addr()); - - if (add_weight_tensor->tensor_shape().empty()) { - if (EOK != memset_s(add_bias_data, kernel_nums * sizeof(float), *add_weight_data, kernel_nums * sizeof(float))) { - MS_LOG(EXCEPTION) << "memset_s conv_bias_data failed"; + auto add_weight_shape = add_weight_tensor->tensor_shape(); + if (add_weight_shape.empty() || (add_weight_shape.size() == 1 && add_weight_shape[0] ==1)) { + for (size_t i = 0; i < kernel_nums; i++) { + add_bias_data[i] = *add_weight_data; } } else { if (EOK != memcpy_s(add_bias_data, kernel_nums * sizeof(float), add_weight_data, kernel_nums * sizeof(float))) { diff --git a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc index ee7e56de6ce..e4f346e0cf7 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc @@ -145,8 +145,8 @@ const { // conv has bias,bias_flag true bool bias_flag = false; if (conv_bias_node != nullptr) { - auto bias_weight_param = conv_weight_node->cast()->default_param(); - auto bias_tensor = std::dynamic_pointer_cast(bias_weight_param); + auto conv_bias_param = conv_bias_node->cast()->default_param(); + auto bias_tensor = std::dynamic_pointer_cast(conv_bias_param); bias_data = reinterpret_cast(bias_tensor->tensor_addr()); bias_flag = true; } else { @@ -187,7 +187,7 @@ const void ConvTransformFusion::CalNewBiasTensor(float *bias_data, int kernel_nu MS_ASSERT(bias_data != nullptr); if (bias_flag) { auto tmp_bias_data = new(std::nothrow) float[kernel_num]; - if (EOK != memset_s(bias_data, kernel_num * sizeof(float), 0, kernel_num * sizeof(float))) { + if (EOK != memset_s(tmp_bias_data, kernel_num * sizeof(float), 0, kernel_num * sizeof(float))) { MS_LOG(EXCEPTION) << "memset bias data failed"; } for (size_t i = 0; i < kernel_num; i++) {