forked from mindspore-Ecosystem/mindspore
!4554 fix anf exporter problem
Merge pull request !4554 from wangchangkai/master
This commit is contained in:
commit
de857161ed
|
@ -0,0 +1 @@
|
||||||
|
ssd.pb
|
|
@ -65,6 +65,26 @@ function Run_x86() {
|
||||||
fi
|
fi
|
||||||
done < ${models_onnx_config}
|
done < ${models_onnx_config}
|
||||||
|
|
||||||
|
# Run mindspore converted models:
|
||||||
|
while read line; do
|
||||||
|
model_name=${line}
|
||||||
|
if [[ $model_name == \#* ]]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
echo ${model_name}
|
||||||
|
echo 'cd '${convertor_path}'/MSLite-*-linux_x86_64'
|
||||||
|
cd ${convertor_path}/MSLite-*-linux_x86_64 || return 1
|
||||||
|
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' || return 1
|
||||||
|
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --warmUpLoopCount=1 --loopCount=1 --accuracyThreshold=1.5
|
||||||
|
if [ $? = 0 ]; then
|
||||||
|
run_result='Run_x86: '${model_name}' pass'
|
||||||
|
echo ${run_result} >> ${run_benchmark_result_file}
|
||||||
|
else
|
||||||
|
run_result='Run_x86: '${model_name}' fail <<===========================this is the failed case'
|
||||||
|
echo ${run_result} >> ${run_benchmark_result_file}
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
done < ${models_mindspore_config}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Run on arm64 platform:
|
# Run on arm64 platform:
|
||||||
|
@ -176,6 +196,7 @@ models_tflite_config=${basepath}/models_tflite.cfg
|
||||||
models_caffe_config=${basepath}/models_caffe.cfg
|
models_caffe_config=${basepath}/models_caffe.cfg
|
||||||
models_tflite_posttraining_config=${basepath}/models_tflite_posttraining.cfg
|
models_tflite_posttraining_config=${basepath}/models_tflite_posttraining.cfg
|
||||||
models_onnx_config=${basepath}/models_onnx.cfg
|
models_onnx_config=${basepath}/models_onnx.cfg
|
||||||
|
models_mindspore_config=${basepath}/models_mindspore.cfg
|
||||||
|
|
||||||
rm -rf ${basepath}/ms_models
|
rm -rf ${basepath}/ms_models
|
||||||
mkdir -p ${basepath}/ms_models
|
mkdir -p ${basepath}/ms_models
|
||||||
|
@ -216,6 +237,17 @@ while read line; do
|
||||||
./converter_lite --fmk=ONNX --modelFile=${models_path}/${model_name} --outputFile=${ms_models_path}/${model_name} || exit 1
|
./converter_lite --fmk=ONNX --modelFile=${models_path}/${model_name} --outputFile=${ms_models_path}/${model_name} || exit 1
|
||||||
done < ${models_onnx_config}
|
done < ${models_onnx_config}
|
||||||
|
|
||||||
|
# Convert mindspore models:
|
||||||
|
while read line; do
|
||||||
|
model_name=${line}
|
||||||
|
if [[ $model_name == \#* ]]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
echo ${model_name}
|
||||||
|
pwd
|
||||||
|
echo './converter_lite --fmk=MS --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}''
|
||||||
|
./converter_lite --fmk=MS --modelFile=${models_path}/${model_name} --outputFile=${ms_models_path}/${model_name} || exit 1
|
||||||
|
done < ${models_mindspore_config}
|
||||||
|
|
||||||
# Convert TFLite PostTraining models:
|
# Convert TFLite PostTraining models:
|
||||||
while read line; do
|
while read line; do
|
||||||
|
|
|
@ -408,6 +408,10 @@ void AnfExporter::SetOpOutputNode(const CNodePtr &cnode, const std::unique_ptr<s
|
||||||
node_id_map_[name] = meta_graphT->allTensors.size();
|
node_id_map_[name] = meta_graphT->allTensors.size();
|
||||||
}
|
}
|
||||||
meta_graphT->allTensors.emplace_back(msTensor);
|
meta_graphT->allTensors.emplace_back(msTensor);
|
||||||
|
if (IsPrimitiveCNode(cnode, schema::PrimitiveType_Conv2D)
|
||||||
|
|| IsPrimitiveCNode(cnode, schema::PrimitiveType_DepthwiseConv2D)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
auto ms_tensor = new schema::TensorT();
|
auto ms_tensor = new schema::TensorT();
|
||||||
|
|
|
@ -155,8 +155,8 @@ int AnfDepwiseconv2DPopulater::Populate(const PrimitivePtr &prim, PrimitiveTValu
|
||||||
auto channel_multiplier = GetValue<int>(prim->GetAttr("channel_multiplier"));
|
auto channel_multiplier = GetValue<int>(prim->GetAttr("channel_multiplier"));
|
||||||
attr->channelMultiplier = channel_multiplier;
|
attr->channelMultiplier = channel_multiplier;
|
||||||
|
|
||||||
MS_ASSERT(inputs.size() == kAnfPopulaterThree);
|
MS_ASSERT(inputs.size() == kAnfPopulaterTwo);
|
||||||
auto inputNode = inputs[kAnfPopulaterTwo];
|
auto inputNode = inputs[kAnfPopulaterOne];
|
||||||
MS_ASSERT(inputNode != nullptr);
|
MS_ASSERT(inputNode != nullptr);
|
||||||
if (inputNode->isa<Parameter>()) {
|
if (inputNode->isa<Parameter>()) {
|
||||||
auto paramNode = inputNode->cast<ParameterPtr>();
|
auto paramNode = inputNode->cast<ParameterPtr>();
|
||||||
|
|
|
@ -0,0 +1,35 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
#include <vector>
|
||||||
|
#include <memory>
|
||||||
|
#include "tools/anf_importer/anf_populater/anf_node_populater_registry.h"
|
||||||
|
#include "tools/anf_importer/anf_populater/anf_make_tuple_populater.h"
|
||||||
|
#include "ir/func_graph.h"
|
||||||
|
#include "ir/primitive.h"
|
||||||
|
|
||||||
|
namespace mindspore::lite {
|
||||||
|
int AnfMakeTuplePopulater::Populate(const PrimitivePtr &prim, PrimitiveTValue *primitiveTValuePtr,
|
||||||
|
const std::vector<AnfNodePtr> &inputs) {
|
||||||
|
auto primitive = std::make_unique<schema::PrimitiveT>();
|
||||||
|
auto attr = std::make_unique<schema::MakeTupleT>();
|
||||||
|
primitive->value.type = schema::PrimitiveType_MakeTuple;
|
||||||
|
primitive->value.value = attr.release();
|
||||||
|
MS_ASSERT(primitiveTValuePtr != nullptr);
|
||||||
|
primitiveTValuePtr->SetPrimitiveT(primitive.release());
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
AnfNodePopulaterRegistrar anfMakeTuplePopulater("make_tuple", new AnfMakeTuplePopulater());
|
||||||
|
} // namespace mindspore::lite
|
|
@ -0,0 +1,29 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
#ifndef MINDSPORE_ANF_MAKE_TUPLE_PARSER_H
|
||||||
|
#define MINDSPORE_ANF_MAKE_TUPLE_PARSER_H
|
||||||
|
#include "tools/anf_importer/anf_populater/anf_node_populater.h"
|
||||||
|
#include <vector>
|
||||||
|
namespace mindspore::lite {
|
||||||
|
class AnfMakeTuplePopulater : public AnfNodePopulater {
|
||||||
|
public:
|
||||||
|
AnfMakeTuplePopulater() = default;
|
||||||
|
~AnfMakeTuplePopulater() override = default;
|
||||||
|
int Populate(const PrimitivePtr &prim, PrimitiveTValue *primitiveTValuePtr,
|
||||||
|
const std::vector<AnfNodePtr> &inputs) override;
|
||||||
|
};
|
||||||
|
} // namespace mindspore::lite
|
||||||
|
#endif // MINDSPORE_ANF_MAKE_TUPLE_PARSER_H
|
|
@ -21,10 +21,6 @@
|
||||||
#include "ir/primitive.h"
|
#include "ir/primitive.h"
|
||||||
|
|
||||||
namespace mindspore::lite {
|
namespace mindspore::lite {
|
||||||
namespace {
|
|
||||||
constexpr int kReduceInputNum = 3;
|
|
||||||
constexpr int kReduceInputIndex = 2;
|
|
||||||
} // namespace
|
|
||||||
int AnfReduceMeanPopulater::Populate(const PrimitivePtr &prim, PrimitiveTValue *primitiveTValuePtr,
|
int AnfReduceMeanPopulater::Populate(const PrimitivePtr &prim, PrimitiveTValue *primitiveTValuePtr,
|
||||||
const std::vector<AnfNodePtr> &inputs) {
|
const std::vector<AnfNodePtr> &inputs) {
|
||||||
auto primitive = std::make_unique<schema::PrimitiveT>();
|
auto primitive = std::make_unique<schema::PrimitiveT>();
|
||||||
|
@ -32,8 +28,8 @@ int AnfReduceMeanPopulater::Populate(const PrimitivePtr &prim, PrimitiveTValue *
|
||||||
attr->mode = schema::ReduceMode_ReduceMean;
|
attr->mode = schema::ReduceMode_ReduceMean;
|
||||||
|
|
||||||
attr->keepDims = GetValue<bool>(prim->GetAttr("keep_dims"));
|
attr->keepDims = GetValue<bool>(prim->GetAttr("keep_dims"));
|
||||||
if (inputs.size() == kReduceInputNum) {
|
if (inputs.size() == kAnfPopulaterTwo) {
|
||||||
auto inputNode = inputs[kReduceInputIndex];
|
auto inputNode = inputs[kAnfPopulaterOne];
|
||||||
MS_ASSERT(inputNode != nullptr);
|
MS_ASSERT(inputNode != nullptr);
|
||||||
if (inputNode->isa<ValueNode>()) {
|
if (inputNode->isa<ValueNode>()) {
|
||||||
auto valueNode = inputNode->cast<ValueNodePtr>();
|
auto valueNode = inputNode->cast<ValueNodePtr>();
|
||||||
|
|
|
@ -26,8 +26,8 @@ int AnfTransposePopulater::Populate(const PrimitivePtr &prim, PrimitiveTValue *p
|
||||||
const std::vector<AnfNodePtr> &inputs) {
|
const std::vector<AnfNodePtr> &inputs) {
|
||||||
auto primitive = std::make_unique<schema::PrimitiveT>();
|
auto primitive = std::make_unique<schema::PrimitiveT>();
|
||||||
auto attr = std::make_unique<schema::TransposeT>();
|
auto attr = std::make_unique<schema::TransposeT>();
|
||||||
MS_ASSERT(inputs.size() == kAnfPopulaterThree);
|
MS_ASSERT(inputs.size() == kAnfPopulaterTwo);
|
||||||
auto inputNode = inputs[kAnfPopulaterTwo];
|
auto inputNode = inputs[kAnfPopulaterOne];
|
||||||
if (inputNode->isa<ValueNode>()) {
|
if (inputNode->isa<ValueNode>()) {
|
||||||
auto valNode = inputNode->cast<ValueNodePtr>();
|
auto valNode = inputNode->cast<ValueNodePtr>();
|
||||||
MS_ASSERT(valNode != nullptr);
|
MS_ASSERT(valNode != nullptr);
|
||||||
|
|
|
@ -13,8 +13,8 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
#ifndef MINDSPORE_ANF_BATCHNORM_PARSER_H
|
#ifndef MINDSPORE_TUPLE_GETITEM_PARSER_H
|
||||||
#define MINDSPORE_ANF_BATCHNORM_PARSER_H
|
#define MINDSPORE_TUPLE_GETITEM_PARSER_H
|
||||||
#include "tools/anf_importer/anf_populater/anf_node_populater.h"
|
#include "tools/anf_importer/anf_populater/anf_node_populater.h"
|
||||||
#include <vector>
|
#include <vector>
|
||||||
namespace mindspore::lite {
|
namespace mindspore::lite {
|
||||||
|
|
|
@ -718,6 +718,8 @@ bool AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &nod
|
||||||
MS_EXCEPTION_IF_NULL(param_value);
|
MS_EXCEPTION_IF_NULL(param_value);
|
||||||
param_value->set_tensor_addr(tensor_data_buf);
|
param_value->set_tensor_addr(tensor_data_buf);
|
||||||
param_value->set_tensor_size(tensor_info->Size());
|
param_value->set_tensor_size(tensor_info->Size());
|
||||||
|
param_value->set_tensor_type(tensor_info->data_type());
|
||||||
|
param_value->set_tensor_shape(tensor_info->shape());
|
||||||
node->set_default_param(param_value);
|
node->set_default_param(param_value);
|
||||||
}
|
}
|
||||||
anfnode_build_map_[value_proto.name()] = node;
|
anfnode_build_map_[value_proto.name()] = node;
|
||||||
|
@ -1088,7 +1090,12 @@ bool AnfImporterFromProtobuf::BuildReturnForFuncGraph(const FuncGraphPtr &output
|
||||||
std::vector<AnfNodePtr> inputs;
|
std::vector<AnfNodePtr> inputs;
|
||||||
if (importProto.output_size() > 1) {
|
if (importProto.output_size() > 1) {
|
||||||
inputs.clear();
|
inputs.clear();
|
||||||
inputs.push_back(NewValueNode(prim::kPrimMakeTuple));
|
auto primitiveT = std::make_unique<schema::PrimitiveT>();
|
||||||
|
MS_ASSERT(primitiveT != nullptr);
|
||||||
|
primitiveT->value.type = schema::PrimitiveType_MakeTuple;
|
||||||
|
std::shared_ptr<PrimitiveTValue> primitiveTValuePtr = std::make_shared<PrimitiveTValue>(primitiveT.release());
|
||||||
|
MS_ASSERT(primitiveTValuePtr != nullptr);
|
||||||
|
inputs.push_back(NewValueNode(primitiveTValuePtr));
|
||||||
AbstractBasePtrList elem;
|
AbstractBasePtrList elem;
|
||||||
for (int out_size = 0; out_size < importProto.output_size(); ++out_size) {
|
for (int out_size = 0; out_size < importProto.output_size(); ++out_size) {
|
||||||
const onnx::ValueInfoProto &output_node = importProto.output(out_size);
|
const onnx::ValueInfoProto &output_node = importProto.output(out_size);
|
||||||
|
@ -1099,7 +1106,12 @@ bool AnfImporterFromProtobuf::BuildReturnForFuncGraph(const FuncGraphPtr &output
|
||||||
auto maketuple_ptr = outputFuncGraph->NewCNode(inputs);
|
auto maketuple_ptr = outputFuncGraph->NewCNode(inputs);
|
||||||
maketuple_ptr->set_abstract(std::make_shared<abstract::AbstractTuple>(elem));
|
maketuple_ptr->set_abstract(std::make_shared<abstract::AbstractTuple>(elem));
|
||||||
inputs.clear();
|
inputs.clear();
|
||||||
inputs.push_back(NewValueNode(prim::kPrimReturn));
|
auto primReturn = std::make_unique<schema::PrimitiveT>();
|
||||||
|
MS_ASSERT(primReturn != nullptr);
|
||||||
|
primReturn->value.type = schema::PrimitiveType_Return;
|
||||||
|
std::shared_ptr<PrimitiveTValue> primitiveTReturnValuePtr = std::make_shared<PrimitiveTValue>(primReturn.release());
|
||||||
|
MS_ASSERT(primitiveTReturnValuePtr != nullptr);
|
||||||
|
inputs.push_back(NewValueNode(primitiveTReturnValuePtr));
|
||||||
inputs.push_back(maketuple_ptr);
|
inputs.push_back(maketuple_ptr);
|
||||||
auto return_node = outputFuncGraph->NewCNode(inputs);
|
auto return_node = outputFuncGraph->NewCNode(inputs);
|
||||||
MS_EXCEPTION_IF_NULL(return_node);
|
MS_EXCEPTION_IF_NULL(return_node);
|
||||||
|
|
|
@ -69,6 +69,8 @@ const AnfNodePtr ConvTransformFusion::Process(const FuncGraphPtr &func_graph, co
|
||||||
if (IsMultiOutputTensors(func_graph, conv_node)) {
|
if (IsMultiOutputTensors(func_graph, conv_node)) {
|
||||||
return transform_node;
|
return transform_node;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto abstr = transform_node->abstract();
|
||||||
int kernel_nums = Get_Kenrnel_nums(conv_node);
|
int kernel_nums = Get_Kenrnel_nums(conv_node);
|
||||||
if (kernel_nums <= 0) {
|
if (kernel_nums <= 0) {
|
||||||
MS_LOG(ERROR) << "Unsupported conv node, " << conv_node->DebugString();
|
MS_LOG(ERROR) << "Unsupported conv node, " << conv_node->DebugString();
|
||||||
|
@ -90,6 +92,7 @@ const AnfNodePtr ConvTransformFusion::Process(const FuncGraphPtr &func_graph, co
|
||||||
} else {
|
} else {
|
||||||
MS_LOG(EXCEPTION) << "Unsupported opType, " << type;
|
MS_LOG(EXCEPTION) << "Unsupported opType, " << type;
|
||||||
}
|
}
|
||||||
|
pre_node->set_abstract(abstr);
|
||||||
return pre_node;
|
return pre_node;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue