From dfbb232468da597d47f895bf7a68bc18d2d3db7d Mon Sep 17 00:00:00 2001 From: hangangqiang Date: Sat, 20 Jun 2020 09:40:31 +0800 Subject: [PATCH] mindspore server inference --- include/inference.h | 42 ++ include/ms_tensor.h | 69 +++ mindspore/ccsrc/CMakeLists.txt | 26 + mindspore/ccsrc/ir/CMakeLists.txt | 4 + mindspore/ccsrc/ir/dtype/type.h | 58 +- mindspore/ccsrc/ir/dtype/type_id.h | 81 +++ .../lite/param_value_lite.h} | 14 +- mindspore/ccsrc/ir/lite/tensor.cc | 152 ++++++ mindspore/ccsrc/ir/lite/tensor.h | 97 ++++ mindspore/ccsrc/ir/tensor.cc | 65 +++ mindspore/ccsrc/ir/tensor.h | 43 +- mindspore/ccsrc/kernel/oplib/opinfo.h | 24 + mindspore/ccsrc/kernel/oplib/oplib.h | 5 + mindspore/ccsrc/kernel/oplib/oploader.h | 43 ++ mindspore/ccsrc/minnie/tensor_minnie.h | 76 --- mindspore/ccsrc/pipeline/init.cc | 6 + mindspore/ccsrc/session/session.cc | 148 +++++ mindspore/ccsrc/session/session.h | 50 ++ mindspore/ccsrc/utils/CMakeLists.txt | 6 + mindspore/ccsrc/utils/base_ref_utils.cc | 58 ++ .../base_ref_utils.h} | 24 +- .../ccsrc/utils/load_onnx/anf_converter.cc | 143 +++++ .../ccsrc/utils/load_onnx/anf_converter.h | 40 ++ .../ccsrc/utils/load_onnx/anf_model_parser.cc | 515 ++++++++++++++++++ .../ccsrc/utils/load_onnx/anf_model_parser.h | 79 +++ tests/ut/cpp/CMakeLists.txt | 3 + 26 files changed, 1716 insertions(+), 155 deletions(-) create mode 100644 include/inference.h create mode 100644 include/ms_tensor.h create mode 100644 mindspore/ccsrc/ir/dtype/type_id.h rename mindspore/ccsrc/{minnie/param_value_minnie.h => ir/lite/param_value_lite.h} (72%) create mode 100644 mindspore/ccsrc/ir/lite/tensor.cc create mode 100644 mindspore/ccsrc/ir/lite/tensor.h create mode 100644 mindspore/ccsrc/kernel/oplib/oploader.h delete mode 100644 mindspore/ccsrc/minnie/tensor_minnie.h create mode 100644 mindspore/ccsrc/session/session.cc create mode 100644 mindspore/ccsrc/session/session.h create mode 100644 mindspore/ccsrc/utils/base_ref_utils.cc rename mindspore/ccsrc/{minnie/tensor_minnie.cc => utils/base_ref_utils.h} (58%) create mode 100644 mindspore/ccsrc/utils/load_onnx/anf_converter.cc create mode 100644 mindspore/ccsrc/utils/load_onnx/anf_converter.h create mode 100644 mindspore/ccsrc/utils/load_onnx/anf_model_parser.cc create mode 100644 mindspore/ccsrc/utils/load_onnx/anf_model_parser.h diff --git a/include/inference.h b/include/inference.h new file mode 100644 index 00000000000..bb4cff7ec4d --- /dev/null +++ b/include/inference.h @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_INCLUDE_MS_SESSION_H +#define MINDSPORE_INCLUDE_MS_SESSION_H + +#include +#include +#include +#include "include/ms_tensor.h" + +namespace mindspore { +class FuncGraph; +namespace inference { +class MS_API MSSession { + public: + MSSession() = default; + + static std::shared_ptr CreateSession(const std::string &device, uint32_t device_id); + + virtual uint32_t CompileGraph(std::shared_ptr funcGraphPtr) = 0; + + virtual MultiTensor RunGraph(uint32_t graph_id, const std::vector> &inputs) = 0; +}; + +std::shared_ptr MS_API LoadModel(const char *model_buf, size_t size, const std::string &device); +} // namespace inference +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_MS_SESSION_H diff --git a/include/ms_tensor.h b/include/ms_tensor.h new file mode 100644 index 00000000000..2e715aa7733 --- /dev/null +++ b/include/ms_tensor.h @@ -0,0 +1,69 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_INCLUDE_MS_TENSOR_H_ +#define MINDSPORE_INCLUDE_MS_TENSOR_H_ + +#include +#include +#include +#include "ir/dtype/type_id.h" + +namespace mindspore { +#define MS_API __attribute__((visibility("default"))) +namespace inference { +class MS_API MSTensor { + public: + MSTensor() = default; + // brief Create a MSTensor pointer. + // + // param data_type DataTypeId of tensor to be created. + // param shape Shape of tensor to be created. + // return MSTensor pointer. + static MSTensor *CreateTensor(TypeId data_type, const std::vector &shape); + + ~MSTensor() = default; + + virtual TypeId data_type() const = 0; + + virtual TypeId set_data_type(const TypeId data_type) = 0; + + virtual std::vector shape() const = 0; + + virtual size_t set_shape(const std::vector &shape) = 0; + + virtual int DimensionSize(size_t index) const = 0; + // brief Get number of element in MSTensor. + // + // return Number of element in MSTensor. + virtual int ElementsNum() const = 0; + + virtual std::size_t hash() const = 0; + // brief Get byte size of data in MSTensor. + // + // return Byte size of data in MSTensor. + virtual size_t Size() const = 0; + // brief Get pointer of data in MSTensor. + // + // The data pointer can be used to both write or read data in MSTensor. + // + // return A pointer points to data in MSTensor. + virtual void *MutableData() const = 0; +}; +using MultiTensor = std::vector>>; +} // namespace inference +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_MS_TENSOR_H_ diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index 96da594f60f..44f78d62164 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -237,3 +237,29 @@ if (ENABLE_MINDDATA) add_subdirectory(mindrecord) add_subdirectory(dataset) endif () + +# build inference +set(LOAD_ONNX_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/utils/load_onnx/anf_converter.cc + ${CMAKE_CURRENT_SOURCE_DIR}/utils/load_onnx/anf_model_parser.cc + ) +add_library(inference SHARED + ${CMAKE_CURRENT_SOURCE_DIR}/session/session.cc + ${LOAD_ONNX_SRC} + ) +target_link_libraries(inference PRIVATE ${PYTHON_LIB} ${SECUREC_LIBRARY} + -Wl,--whole-archive mindspore -Wl,--no-whole-archive mindspore_gvar mindspore::protobuf) + +if (ENABLE_CPU) + target_link_libraries(inference PRIVATE mindspore::dnnl mindspore::mkldnn) +endif () + +if (USE_GLOG) + target_link_libraries(inference PRIVATE mindspore::glog) +else() + if (CMAKE_SYSTEM_NAME MATCHES "Linux") + target_link_options(inference PRIVATE -Wl,-init,mindspore_log_init) + elseif (CMAKE_SYSTEM_NAME MATCHES "Darwin") + set_target_properties(inference PROPERTIES MACOSX_RPATH ON) + endif () +endif() diff --git a/mindspore/ccsrc/ir/CMakeLists.txt b/mindspore/ccsrc/ir/CMakeLists.txt index 77bc1b7661a..2a0b81ae047 100644 --- a/mindspore/ccsrc/ir/CMakeLists.txt +++ b/mindspore/ccsrc/ir/CMakeLists.txt @@ -1,3 +1,7 @@ file(GLOB_RECURSE _IR_SRC_LIST ./*.cc dtype/*.cc) +file(GLOB_RECURSE _IR_LITE_SRC_FILES + ./lite/tensor.cc + ) +list(REMOVE_ITEM _IR_SRC_LIST ${_IR_LITE_SRC_FILES}) set_property(SOURCE ${_IR_SRC_LIST} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_IR) add_library(_mindspore_ir_obj OBJECT ${_IR_SRC_LIST}) diff --git a/mindspore/ccsrc/ir/dtype/type.h b/mindspore/ccsrc/ir/dtype/type.h index a4035abf50a..bfe39af43c0 100644 --- a/mindspore/ccsrc/ir/dtype/type.h +++ b/mindspore/ccsrc/ir/dtype/type.h @@ -34,65 +34,9 @@ #include "ir/base.h" #include "ir/named.h" +#include "ir/dtype/type_id.h" namespace mindspore { -// -// Supported meta type -// -enum TypeId : int { - kTypeUnknown = 0, - kMetaTypeBegin = kTypeUnknown, - kMetaTypeType, // Type - kMetaTypeAnything, - kMetaTypeObject, - kMetaTypeTypeType, // TypeType - kMetaTypeProblem, - kMetaTypeExternal, - kMetaTypeNone, - kMetaTypeNull, - kMetaTypeEllipsis, - kMetaTypeEnd, - // - // Object types - // - kObjectTypeBegin = kMetaTypeEnd, - kObjectTypeNumber, - kObjectTypeString, - kObjectTypeList, - kObjectTypeTuple, - kObjectTypeSlice, - kObjectTypeKeyword, - kObjectTypeTensorType, - kObjectTypeClass, - kObjectTypeDictionary, - kObjectTypeFunction, - kObjectTypeJTagged, - kObjectTypeSymbolicKeyType, - kObjectTypeEnvType, - kObjectTypeRefKey, - kObjectTypeRef, - kObjectTypeEnd, - // - // Number Types - // - kNumberTypeBegin = kObjectTypeEnd, - kNumberTypeBool, - kNumberTypeInt, - kNumberTypeInt8, - kNumberTypeInt16, - kNumberTypeInt32, - kNumberTypeInt64, - kNumberTypeUInt, - kNumberTypeUInt8, - kNumberTypeUInt16, - kNumberTypeUInt32, - kNumberTypeUInt64, - kNumberTypeFloat, - kNumberTypeFloat16, - kNumberTypeFloat32, - kNumberTypeFloat64, - kNumberTypeEnd -}; TypeId IntBitsToTypeId(const int nbits); TypeId UIntBitsToTypeId(const int nbits); diff --git a/mindspore/ccsrc/ir/dtype/type_id.h b/mindspore/ccsrc/ir/dtype/type_id.h new file mode 100644 index 00000000000..61a87de964a --- /dev/null +++ b/mindspore/ccsrc/ir/dtype/type_id.h @@ -0,0 +1,81 @@ +/** + * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). + * + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_IR_DTYPE_TYPE_ID_H_ +#define MINDSPORE_CCSRC_IR_DTYPE_TYPE_ID_H_ + +namespace mindspore { +// +// Supported meta type +// +enum TypeId : int { + kTypeUnknown = 0, + kMetaTypeBegin = kTypeUnknown, + kMetaTypeType, // Type + kMetaTypeAnything, + kMetaTypeObject, + kMetaTypeTypeType, // TypeType + kMetaTypeProblem, + kMetaTypeExternal, + kMetaTypeNone, + kMetaTypeNull, + kMetaTypeEllipsis, + kMetaTypeEnd, + // + // Object types + // + kObjectTypeBegin = kMetaTypeEnd, + kObjectTypeNumber, + kObjectTypeString, + kObjectTypeList, + kObjectTypeTuple, + kObjectTypeSlice, + kObjectTypeKeyword, + kObjectTypeTensorType, + kObjectTypeClass, + kObjectTypeDictionary, + kObjectTypeFunction, + kObjectTypeJTagged, + kObjectTypeSymbolicKeyType, + kObjectTypeEnvType, + kObjectTypeRefKey, + kObjectTypeRef, + kObjectTypeEnd, + // + // Number Types + // + kNumberTypeBegin = kObjectTypeEnd, + kNumberTypeBool, + kNumberTypeInt, + kNumberTypeInt8, + kNumberTypeInt16, + kNumberTypeInt32, + kNumberTypeInt64, + kNumberTypeUInt, + kNumberTypeUInt8, + kNumberTypeUInt16, + kNumberTypeUInt32, + kNumberTypeUInt64, + kNumberTypeFloat, + kNumberTypeFloat16, + kNumberTypeFloat32, + kNumberTypeFloat64, + kNumberTypeEnd +}; +} // namespace mindspore +#endif // MINDSPORE_CCSRC_IR_DTYPE_TYPE_ID_H_ diff --git a/mindspore/ccsrc/minnie/param_value_minnie.h b/mindspore/ccsrc/ir/lite/param_value_lite.h similarity index 72% rename from mindspore/ccsrc/minnie/param_value_minnie.h rename to mindspore/ccsrc/ir/lite/param_value_lite.h index da2b157503c..2b249cfa4f4 100644 --- a/mindspore/ccsrc/minnie/param_value_minnie.h +++ b/mindspore/ccsrc/ir/lite/param_value_lite.h @@ -14,18 +14,18 @@ * limitations under the License. */ -#ifndef MINDSPORE_CCSRC_MINNIE_PARAM_VALUE_MINNIE_H_ -#define MINDSPORE_CCSRC_MINNIE_PARAM_VALUE_MINNIE_H_ +#ifndef MINDSPORE_CCSRC_MINNIE_PARAM_VALUE_LITE_H_ +#define MINDSPORE_CCSRC_MINNIE_PARAM_VALUE_LITE_H_ #include #include "ir/anf.h" namespace mindspore { -class ParamValueMinnie : public ParamValue { +class ParamValueLite : public ParamValue { public: - ParamValueMinnie() : tensor_addr_(nullptr), tensor_size_(0) {} - virtual ~ParamValueMinnie() = default; + ParamValueLite() : tensor_addr_(nullptr), tensor_size_(0) {} + virtual ~ParamValueLite() = default; size_t tensor_size() const { return tensor_size_; } void set_tensor_size(size_t size) { tensor_size_ = size; } @@ -38,6 +38,6 @@ class ParamValueMinnie : public ParamValue { size_t tensor_size_; }; -using ParamValueMinniePtr = std::shared_ptr; +using ParamValueLitePtr = std::shared_ptr; } // namespace mindspore -#endif // MINDSPORE_CCSRC_MINNIE_PARAM_VALUE_MINNIE_H_ +#endif // MINDSPORE_CCSRC_MINNIE_PARAM_VALUE_LITE_H_ diff --git a/mindspore/ccsrc/ir/lite/tensor.cc b/mindspore/ccsrc/ir/lite/tensor.cc new file mode 100644 index 00000000000..2957495aa48 --- /dev/null +++ b/mindspore/ccsrc/ir/lite/tensor.cc @@ -0,0 +1,152 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "ir/lite/tensor.h" +#include "securec/include/securec.h" + +namespace mindspore { +namespace tensor { +#define kMaxMallocSize 1024 * 1024 * 100 +Tensor::Tensor(const TypeId data_type, const std::vector &shape) : MetaTensor(data_type, shape) {} + +Tensor::Tensor(const TypePtr &type_ptr, const std::vector &shape) : MetaTensor(type_ptr, shape) {} + +Tensor::Tensor(const Tensor &tensor) : MetaTensor(tensor) { + this->data_type_ = tensor.data_type_; + this->shape_ = tensor.shape_; + auto ret = CopyTensorData(tensor); + if (0 != ret) { + MS_LOG(EXCEPTION) << "CopyTensorData error"; + } +} + +int Tensor::CopyTensorData(const Tensor &srcTensor) { + if (srcTensor.data_ == nullptr) { + MS_LOG(ERROR) << "data of srcTensor is nullptr"; + return -1; + } + size_t data_size = this->Size(); + MS_ASSERT(data_size == tensor.Size()); + if (this->data_ == nullptr) { + if (data_size > kMaxMallocSize) { + MS_LOG(ERROR) << "Malloc size is too big while coping data, " << data_size << " bytes"; + return -1; + } + this->data_ = malloc(data_size); + } + memcpy_s(this->data_, data_size, tensor.data_, tensor.Size()); + return 0; +} + +Tensor::~Tensor() { + if (nullptr != this->data_) { + free(this->data_); + } +} + +Tensor &Tensor::operator=(const Tensor &tensor) { + if (&tensor == this) { + return *this; + } + this->shape_ = tensor.shape_; + this->data_type_ = tensor.data_type_; + auto ret = CopyTensorData(tensor); + if (0 != ret) { + MS_LOG(EXCEPTION) << "CopyTensorData error"; + } + return *this; +} + +bool Tensor::operator==(const Tensor &tensor) { + return data_ == tensor.data_ && shape_ == tensor.shape_ && data_type_ == tensor.data_type_; +} + +bool Tensor::operator==(const Value &other) const { + if (other.isa()) { + auto other_ = static_cast(other); + return *this == other_; + } else { + return false; + } +} +} // namespace tensor + +namespace inference { +MSTensor *MSTensor::CreateTensor(TypeId data_type, const std::vector &shape) { + return new Tensor(data_type, shape); +} + +Tensor::Tensor() { this->tensor_impl_ = std::make_shared(); } + +Tensor::Tensor(TypeId data_type, const std::vector &shape) { + this->tensor_impl_ = std::make_shared(data_type, shape); +} + +Tensor::Tensor(std::shared_ptr tensor_ptr) { this->tensor_impl_ = std::move(tensor_ptr); } + +TypeId Tensor::data_type() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->data_type(); +} + +TypeId Tensor::set_data_type(TypeId data_type) { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->set_data_type(data_type); +} + +std::vector Tensor::shape() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->shape(); +} + +size_t Tensor::set_shape(const std::vector &shape) { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->set_shape(shape); +} + +int Tensor::DimensionSize(size_t index) const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->DimensionSize(index); +} + +int Tensor::ElementsNum() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->ElementsNum(); +} + +std::size_t Tensor::hash() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->hash(); +} + +std::shared_ptr Tensor::tensor() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_; +} + +size_t Tensor::Size() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->Size(); +} + +void *Tensor::MutableData() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->data(); +} +} // namespace inference +} // namespace mindspore diff --git a/mindspore/ccsrc/ir/lite/tensor.h b/mindspore/ccsrc/ir/lite/tensor.h new file mode 100644 index 00000000000..0dcf5cc0eee --- /dev/null +++ b/mindspore/ccsrc/ir/lite/tensor.h @@ -0,0 +1,97 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_IR_LITE_TENSOR_H_ +#define MINDSPORE_CCSRC_IR_LITE_TENSOR_H_ + +#include +#include +#include "ir/meta_tensor.h" +#include "ir/dtype/type.h" + +namespace mindspore { +namespace tensor { +class Tensor : public MetaTensor { + public: + Tensor() : MetaTensor() {} + + Tensor(const TypeId data_type, const std::vector &shape); + + Tensor(const TypePtr &type_ptr, const std::vector &shape); + + Tensor(const Tensor &tensor); + + ~Tensor(); + + int CopyTensorData(const Tensor &srcTensor); + + MS_DECLARE_PARENT(Tensor, MetaTensor) + + virtual Tensor &operator=(const Tensor &tensor); + + virtual bool operator==(const Tensor &tensor); + + bool operator==(const Value &other) const override; + + size_t Size() const { return MetaTensor::ElementsNum() * GetTypeByte(TypeIdToType(this->data_type_)); } + + void *Data() const { return data_; } + + protected: + void *data_; +}; + +using TensorPtr = std::shared_ptr; +} // namespace tensor + +namespace inference { +class Tensor : public MSTensor { + public: + Tensor(); + + Tensor(TypeId data_type, const std::vector &shape); + + explicit Tensor(std::shared_ptr tensor_ptr); + + ~Tensor() = default; + + TypeId data_type() const override; + + TypeId set_data_type(const TypeId data_type) override; + + std::vector shape() const override; + + size_t set_shape(const std::vector &shape) override; + + int DimensionSize(size_t index) const override; + + int ElementsNum() const override; + + std::size_t hash() const override; + + std::shared_ptr tensor() const; + + size_t Size() const override; + + void *MutableData() const override; + + protected: + std::shared_ptr tensor_impl_; +}; +} // namespace inference +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_IR_LITE_TENSOR_H_ diff --git a/mindspore/ccsrc/ir/tensor.cc b/mindspore/ccsrc/ir/tensor.cc index 4e2e996bacf..81107ebbddf 100644 --- a/mindspore/ccsrc/ir/tensor.cc +++ b/mindspore/ccsrc/ir/tensor.cc @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -505,4 +506,68 @@ REGISTER_PYBIND_DEFINE(Tensor, ([](const py::module *m) { .def_property_readonly("shape", &MetaTensor::shape, "Get the MetaTensor's shape."); })); } // namespace tensor + +namespace inference { +MSTensor *MSTensor::CreateTensor(TypeId data_type, const std::vector &shape) { + return new Tensor(data_type, shape); +} + +Tensor::Tensor() { this->tensor_impl_ = std::make_shared(); } + +Tensor::Tensor(TypeId data_type, const std::vector &shape) { + this->tensor_impl_ = std::make_shared(data_type, shape); +} + +Tensor::Tensor(std::shared_ptr tensor_ptr) { this->tensor_impl_ = std::move(tensor_ptr); } + +TypeId Tensor::data_type() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->data_type(); +} + +TypeId Tensor::set_data_type(TypeId data_type) { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->set_data_type(data_type); +} + +std::vector Tensor::shape() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->shape(); +} + +size_t Tensor::set_shape(const std::vector &shape) { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->set_shape(shape); +} + +int Tensor::DimensionSize(size_t index) const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->DimensionSize(index); +} + +int Tensor::ElementsNum() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->ElementsNum(); +} + +std::size_t Tensor::hash() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->hash(); +} + +std::shared_ptr Tensor::tensor() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_; +} + +size_t Tensor::Size() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->data().nbytes(); +} + +void *Tensor::MutableData() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->data_c(true); +} +} // namespace inference } // namespace mindspore diff --git a/mindspore/ccsrc/ir/tensor.h b/mindspore/ccsrc/ir/tensor.h index 700dcd49102..585e9628490 100644 --- a/mindspore/ccsrc/ir/tensor.h +++ b/mindspore/ccsrc/ir/tensor.h @@ -27,6 +27,7 @@ #include "Eigen/Core" #include "device/device_address.h" #include "ir/meta_tensor.h" +#include "include/ms_tensor.h" #include "utils/log_adapter.h" namespace py = pybind11; @@ -215,6 +216,11 @@ class Tensor : public MetaTensor { // return The pointer to the object void *data_c(bool writable = false); + // brief Get Tensor data byte-size for c++ type + // + // return byte size of Tensor data + size_t Size() const { return this->data().nbytes(); } + // brief Get data type from tensor data. // // param buf The buffer info of the py::array data. @@ -266,10 +272,45 @@ class Tensor : public MetaTensor { std::string id_{""}; DeviceAddressPtr device_address_{nullptr}; }; - using TensorPtr = std::shared_ptr; using TensorPtrList = std::vector>; } // namespace tensor + +namespace inference { +class Tensor : public MSTensor { + public: + Tensor(); + + Tensor(TypeId data_type, const std::vector &shape); + + explicit Tensor(std::shared_ptr tensor_ptr); + + ~Tensor() = default; + + TypeId data_type() const override; + + TypeId set_data_type(const TypeId data_type) override; + + std::vector shape() const override; + + size_t set_shape(const std::vector &shape) override; + + int DimensionSize(size_t index) const override; + + int ElementsNum() const override; + + std::size_t hash() const override; + + std::shared_ptr tensor() const; + + size_t Size() const override; + + void *MutableData() const override; + + protected: + std::shared_ptr tensor_impl_; +}; +} // namespace inference } // namespace mindspore #endif // MINDSPORE_CCSRC_IR_TENSOR_H_ diff --git a/mindspore/ccsrc/kernel/oplib/opinfo.h b/mindspore/ccsrc/kernel/oplib/opinfo.h index bb8defe74dd..f224a97efc9 100644 --- a/mindspore/ccsrc/kernel/oplib/opinfo.h +++ b/mindspore/ccsrc/kernel/oplib/opinfo.h @@ -90,6 +90,30 @@ class OpIOInfo { class OpInfo { public: OpInfo() = default; + OpInfo(const OpInfo &opinfo) { + op_name_ = opinfo.op_name(); + imply_type_ = opinfo.imply_type(); + + impl_path_ = opinfo.impl_path(); + fusion_type_ = opinfo.fusion_type(); + async_flag_ = opinfo.async_flag_; + binfile_name_ = opinfo.binfile_name_; + compute_cost_ = opinfo.compute_cost_; + kernel_name_ = opinfo.kernel_name(); + partial_flag_ = opinfo.partial_flag_; + dynamic_format_ = opinfo.dynamic_format_; + op_pattern_ = opinfo.op_pattern(); + for (auto attr : opinfo.attrs_ptr()) { + attrs_ptr_.push_back(std::make_shared(*attr)); + } + for (auto input : opinfo.inputs_ptr()) { + inputs_ptr_.push_back(std::make_shared(*input)); + } + for (auto output : opinfo.outputs_ptr()) { + outputs_ptr_.push_back(std::make_shared(*output)); + } + ref_infos_ = opinfo.ref_infos(); + } ~OpInfo() = default; std::string op_name() const { return op_name_; } OpImplyType imply_type() const { return imply_type_; } diff --git a/mindspore/ccsrc/kernel/oplib/oplib.h b/mindspore/ccsrc/kernel/oplib/oplib.h index 3d4dcad908e..47183455a24 100644 --- a/mindspore/ccsrc/kernel/oplib/oplib.h +++ b/mindspore/ccsrc/kernel/oplib/oplib.h @@ -29,7 +29,12 @@ class OpLib { OpLib() = default; virtual ~OpLib() = default; bool RegOp(const std::string &json_string, const std::string &impl_path); + static void RegOpInfo(std::shared_ptr opinfo) { + op_info_.emplace_back(opinfo); + return; + } static std::shared_ptr FindOp(const std::string &op_name, OpImplyType imply_type); + static const std::vector> &GetAllOpsInfo() { return op_info_; } protected: static std::vector> op_info_; diff --git a/mindspore/ccsrc/kernel/oplib/oploader.h b/mindspore/ccsrc/kernel/oplib/oploader.h new file mode 100644 index 00000000000..dd4c37e80b8 --- /dev/null +++ b/mindspore/ccsrc/kernel/oplib/oploader.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_OPLOADER_H +#define MINDSPORE_OPLOADER_H + +#include +#include "kernel/oplib/oplib.h" + +namespace mindspore { +namespace kernel { +class OpInfoLoaderPy { + public: + OpInfoLoaderPy() = default; + + ~OpInfoLoaderPy() = default; + + size_t GetAllOpsInfo() { + auto ops = OpLib::GetAllOpsInfo(); + auto op_infos = new std::vector(); + for (auto op_info : ops) { + auto new_op_info = new OpInfo(*op_info); + op_infos->emplace_back(new_op_info); + } + return (size_t)op_infos; + } +}; +} // namespace kernel +} // namespace mindspore +#endif // MINDSPORE_OPLOADER_H diff --git a/mindspore/ccsrc/minnie/tensor_minnie.h b/mindspore/ccsrc/minnie/tensor_minnie.h deleted file mode 100644 index 25d94b7076b..00000000000 --- a/mindspore/ccsrc/minnie/tensor_minnie.h +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_MINNIE_TENSOR_MINNIE_H_ -#define MINDSPORE_CCSRC_MINNIE_TENSOR_MINNIE_H_ - -#include - -#include "ir/meta_tensor.h" - -namespace mindspore { -namespace tensor { -// definition of Tensor Minnie -class TensorMinnie : public MetaTensor { - public: - TensorMinnie() : MetaTensor() {} - ~TensorMinnie() override = default; - MS_DECLARE_PARENT(TensorMinnie, MetaTensor) - - // brief Overloads operator = for TensorMinnie. - // - // The constructed TensorMinnie object has the same type and shape with tensor_base. - // - // param meta_tensor An existing TensorMinnie object. - virtual TensorMinnie &operator=(const TensorMinnie &tensor); - - // brief Compares two TensorMinnie objects. - // - // The constructed TensorMinnie object has the same type and shape with tensor_base. - // - // param meta_tensor The TensorMinnie object to be compared. - // return true: If having same type and shape, return true, or return false. - virtual bool operator==(const TensorMinnie &tensor); - - // brief Get the tensor's size for C++ - // - // return size_t - size_t tensor_size() const { return tensor_size_; } - - // brief Set Tensor data size for c++ type - void set_tensor_size(size_t size) { tensor_size_ = size; } - - // brief Get Tensor data pointer for c++ type - // - // return The pointer to the object - void *tensor_addr() const { return tensor_addr_; } - - // brief Set Tensor data pointer for c++ type - void set_tensor_addr(void *addr) { tensor_addr_ = addr; } - - protected: - // brief Data addr of the tensor. - void *tensor_addr_; - - // brief Data size of the tensor. - size_t tensor_size_; -}; - -using TensorMinniePtr = std::shared_ptr; -} // namespace tensor -} // namespace mindspore - -#endif // MINDSPORE_CCSRC_MINNIE_TENSOR_MINNIE_H_ diff --git a/mindspore/ccsrc/pipeline/init.cc b/mindspore/ccsrc/pipeline/init.cc index 5c9a5736342..b8472de4094 100644 --- a/mindspore/ccsrc/pipeline/init.cc +++ b/mindspore/ccsrc/pipeline/init.cc @@ -17,6 +17,7 @@ #include #include #include "kernel/oplib/oplib.h" +#include "kernel/oplib/oploader.h" #include "pipeline/pipeline.h" #include "operator/composite/composite.h" #include "ir/signature.h" @@ -45,6 +46,7 @@ using PrimitivePy = mindspore::PrimitivePy; using MetaFuncGraph = mindspore::MetaFuncGraph; using EventWriter = mindspore::summary::EventWriter; using OpLib = mindspore::kernel::OpLib; +using OpInfoLoaderPy = mindspore::kernel::OpInfoLoaderPy; using ParallelContext = mindspore::parallel::ParallelContext; using CostModelContext = mindspore::parallel::CostModelContext; @@ -325,4 +327,8 @@ PYBIND11_MODULE(_c_expression, m) { "Finalize gpu collective communication mode."); #endif + + (void)py::class_>(m, "OpInfoLoaderPy") + .def(py::init()) + .def("get_all_ops_info", &OpInfoLoaderPy::GetAllOpsInfo, "get all ops info."); } diff --git a/mindspore/ccsrc/session/session.cc b/mindspore/ccsrc/session/session.cc new file mode 100644 index 00000000000..f70ff316da0 --- /dev/null +++ b/mindspore/ccsrc/session/session.cc @@ -0,0 +1,148 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "include/inference.h" +#include "session/session.h" +#include "utils/load_onnx/anf_converter.h" +#include "session/session_basic.h" +#include "session/session_factory.h" +#include "utils/base_ref_utils.h" +#include "kernel/oplib/oplib.h" +#ifdef ENABLE_D +#include "utils/context/ms_context.h" +#include "session/ascend_session.h" +#else +#include "session/cpu_session.h" +#endif + +namespace py = pybind11; +namespace mindspore::inference { +std::shared_ptr LoadModel(const char *model_buf, size_t size, const std::string &device) { + inference::Session::RegAllOp(); + auto anf_graph = lite::AnfConverter::RunAnfConverter(model_buf, size); + return anf_graph; +} + +std::shared_ptr MSSession::CreateSession(const std::string &device, uint32_t device_id) { + auto session = std::make_shared(); + auto ret = session->Init(device, device_id); + if (ret != 0) { + return nullptr; + } + return session; +} + +void Session::RegAllOp() { + static std::mutex init_mutex; + static bool Initialized = false; + + std::lock_guard lock(init_mutex); + if (Initialized) { + return; + } + Initialized = true; + MsContext::GetInstance()->set_execution_mode(kGraphMode); + Py_Initialize(); + auto c_expression = PyImport_ImportModule("mindspore._c_expression"); + if (c_expression == nullptr) { + MS_LOG(EXCEPTION) << "Failed to import mindspore._c_expression module."; + return; + } + PyObject *c_expression_dict = PyModule_GetDict(c_expression); + + PyObject *op_info_loader_class = PyDict_GetItemString(c_expression_dict, "OpInfoLoaderPy"); + if (op_info_loader_class == nullptr) { + MS_LOG(EXCEPTION) << "Failed to get op_info_loader_class from mindspore._c_expression."; + return; + } + PyObject *op_info_loader = PyInstanceMethod_New(op_info_loader_class); + if (op_info_loader == nullptr) { + MS_LOG(EXCEPTION) << "Failed to create op_info_loader instance."; + return; + } + PyObject *op_info_loader_ins = PyObject_CallObject(op_info_loader, nullptr); + if (op_info_loader_ins == nullptr) { + MS_LOG(EXCEPTION) << "Failed to call op_info_loader instance."; + return; + } + auto all_ops_info_vector_addr_ul = PyObject_CallMethod(op_info_loader_ins, "get_all_ops_info", nullptr); + if (all_ops_info_vector_addr_ul == nullptr) { + MS_LOG(EXCEPTION) << "Failed to call get_all_ops_addr."; + return; + } + auto all_ops_info_vector_addr = PyLong_AsVoidPtr(all_ops_info_vector_addr_ul); + auto all_ops_info = static_cast *>(all_ops_info_vector_addr); + for (auto op_info : *all_ops_info) { + kernel::OpLib::RegOpInfo(std::shared_ptr(op_info)); + } + all_ops_info->clear(); + delete all_ops_info; + Py_DECREF(op_info_loader); + Py_DECREF(op_info_loader_class); + Py_DECREF(c_expression_dict); + Py_DECREF(c_expression); + return; +} + +uint32_t Session::CompileGraph(std::shared_ptr funcGraphPtr) { + MS_ASSERT(session_impl_ != nullptr); + return session_impl_->CompileGraph(NOT_NULL(funcGraphPtr)); +} + +MultiTensor Session::RunGraph(uint32_t graph_id, const std::vector> &inputs) { + std::vector inTensors; + bool has_error = false; + std::transform(inputs.begin(), inputs.end(), inTensors.begin(), + [&has_error](const std::shared_ptr &tensor_ptr) -> tensor::TensorPtr { + if (tensor_ptr == nullptr) { + MS_LOG(WARNING) << "input MSTensor is nullptr, return nullptr"; + has_error = true; + return nullptr; + } + auto tensor = static_cast(tensor_ptr.get()); + if (tensor == nullptr) { + MS_LOG(ERROR) << "Can not cast input MSTensor to tensor"; + has_error = true; + return nullptr; + } + return tensor->tensor(); + }); + if (has_error) { + MS_LOG(ERROR) << "Init Tensor failed, returning empty result"; + std::vector>> multiTensor; + return multiTensor; + } + VectorRef outputs; + session_impl_->RunGraph(graph_id, inTensors, &outputs); + + return TransformVectorRefToMultiTensor(outputs); +} + +int Session::Init(const std::string &device, uint32_t device_id) { + RegAllOp(); + session_impl_ = session::SessionFactory::Get().Create(device); + if (session_impl_ == nullptr) { + MS_LOG(ERROR) << "Session create failed!, please make sure target device:" << device << " is available."; + return -1; + } + session_impl_->Init(device_id); + return 0; +} + +Session::Session() = default; +} // namespace mindspore::inference diff --git a/mindspore/ccsrc/session/session.h b/mindspore/ccsrc/session/session.h new file mode 100644 index 00000000000..b608163067d --- /dev/null +++ b/mindspore/ccsrc/session/session.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_SESSION_SESSION_H +#define MINDSPORE_CCSRC_SESSION_SESSION_H + +#include +#include +#include +#include +#include +#include + +#include "session/session_basic.h" +#include "ir/anf.h" +#include "include/inference.h" + +namespace mindspore { +namespace inference { +class Session : public MSSession { + public: + Session(); + + uint32_t CompileGraph(std::shared_ptr funcGraphPtr) override; + + MultiTensor RunGraph(uint32_t graph_id, const std::vector> &inputs) override; + + int Init(const std::string &device, uint32_t device_id); + + static void RegAllOp(); + + private: + std::shared_ptr session_impl_ = nullptr; + std::vector graph_id_; +}; +} // namespace inference +} // namespace mindspore +#endif // MINDSPORE_CCSRC_SESSION_SESSION_BASIC_H diff --git a/mindspore/ccsrc/utils/CMakeLists.txt b/mindspore/ccsrc/utils/CMakeLists.txt index 71d68729b95..72f698a97e5 100644 --- a/mindspore/ccsrc/utils/CMakeLists.txt +++ b/mindspore/ccsrc/utils/CMakeLists.txt @@ -5,5 +5,11 @@ if (NOT ENABLE_GE) list(REMOVE_ITEM _UTILS_SRC_LIST ${_UTILS_GE_SRC_FILES}) endif () +file(GLOB_RECURSE _UTILS_LITE_SRC_FILES + ./load_onnx/anf_converter.cc + ./load_onnx/anf_model_parser.cc + ) +list(REMOVE_ITEM _UTILS_SRC_LIST ${_UTILS_LITE_SRC_FILES}) + set_property(SOURCE ${_UTILS_SRC_LIST} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_UTILS) add_library(_mindspore_utils_obj OBJECT ${_UTILS_SRC_LIST}) diff --git a/mindspore/ccsrc/utils/base_ref_utils.cc b/mindspore/ccsrc/utils/base_ref_utils.cc new file mode 100644 index 00000000000..617057b866b --- /dev/null +++ b/mindspore/ccsrc/utils/base_ref_utils.cc @@ -0,0 +1,58 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "utils/base_ref_utils.h" +#include "include/ms_tensor.h" +#include "ir/tensor.h" + +namespace mindspore { +std::vector> TransformBaseRefToMSTensor(const BaseRef &base_ref) { + std::vector> msTensors; + if (utils::isa(base_ref)) { + auto ref_list = utils::cast(base_ref); + for (size_t i = 0; i < ref_list.size(); ++i) { + if (utils::isa(ref_list[i])) { + auto tensor_ptr = utils::cast>(ref_list[i]); + MS_EXCEPTION_IF_NULL(tensor_ptr); + auto tensor = new inference::Tensor(tensor_ptr); + msTensors.emplace_back(std::shared_ptr(tensor)); + } else { + MS_LOG(EXCEPTION) << "The output is not a tensor!"; + } + } + } else if (utils::isa(base_ref)) { + auto tensor_ptr = utils::cast>(base_ref); + MS_EXCEPTION_IF_NULL(tensor_ptr); + auto tensor = new inference::Tensor(tensor_ptr); + msTensors.emplace_back(std::shared_ptr(tensor)); + } else { + MS_LOG(EXCEPTION) << "The output is not a base ref list or a tensor!"; + } + return msTensors; +} + +std::vector>> TransformVectorRefToMultiTensor( + const VectorRef &vector_ref) { + std::vector>> multiTensor; + for (size_t i = 0; i < vector_ref.size(); ++i) { + auto tensors = TransformBaseRefToMSTensor(vector_ref[i]); + multiTensor.emplace_back(tensors); + } + return multiTensor; +} +} // namespace mindspore diff --git a/mindspore/ccsrc/minnie/tensor_minnie.cc b/mindspore/ccsrc/utils/base_ref_utils.h similarity index 58% rename from mindspore/ccsrc/minnie/tensor_minnie.cc rename to mindspore/ccsrc/utils/base_ref_utils.h index 329bf228e69..787918c7240 100644 --- a/mindspore/ccsrc/minnie/tensor_minnie.cc +++ b/mindspore/ccsrc/utils/base_ref_utils.h @@ -14,21 +14,17 @@ * limitations under the License. */ -#include "minnie/tensor_minnie.h" +#include +#include +#include "utils/base_ref.h" +#include "include/ms_tensor.h" +#ifndef MINDSPORE_CCSRC_UTILS_BASE_REF_UTILS_H +#define MINDSPORE_CCSRC_UTILS_BASE_REF_UTILS_H namespace mindspore { -namespace tensor { -TensorMinnie &TensorMinnie::operator=(const TensorMinnie &tensor) { - if (&tensor == this) { - return *this; - } - this->tensor_addr_ = tensor.tensor_addr(); - this->tensor_size_ = tensor.tensor_size(); - return *this; -} +std::vector> TransformBaseRefToMSTensor(const BaseRef &base_ref); -bool TensorMinnie::operator==(const TensorMinnie &tensor) { - return tensor_addr_ == tensor.tensor_addr() && tensor_size_ == tensor.tensor_size(); -} -} // namespace tensor +std::vector>> TransformVectorRefToMultiTensor( + const VectorRef &vector_ref); } // namespace mindspore +#endif // MINDSPORE_CCSRC_UTILS_BASE_REF_UTILS_H diff --git a/mindspore/ccsrc/utils/load_onnx/anf_converter.cc b/mindspore/ccsrc/utils/load_onnx/anf_converter.cc new file mode 100644 index 00000000000..f46da657cce --- /dev/null +++ b/mindspore/ccsrc/utils/load_onnx/anf_converter.cc @@ -0,0 +1,143 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "utils/load_onnx/anf_model_parser.h" +#include "utils/load_onnx/anf_converter.h" +#include "google/protobuf/io/zero_copy_stream_impl.h" +#include "proto/onnx.pb.h" +#include "utils/log_adapter.h" + +namespace mindspore { +namespace lite { + +const char WHITESPACE[] = "\t\n\v\f\r "; +const int FLAG_PREFIX_LEN = 2; + +void AnfConverter::Trim(std::string *input) { + if (input == nullptr) { + return; + } + if (input->empty()) { + return; + } + input->erase(0, input->find_first_not_of(WHITESPACE)); + input->erase(input->find_last_not_of(WHITESPACE) + 1); +} + +int AnfConverter::ValidateFileStr(const std::string &modelFile, std::string fileType) { + if (modelFile.size() > fileType.size()) { + if (modelFile.substr(modelFile.size() - fileType.size()) == fileType) { + return 0; + } else { + return 1; + } + } else { + return 1; + } +} + +bool AnfConverter::ReadOnnxFromBinary(const std::string &modelFile, google::protobuf::Message *onnx_model) { + std::unique_ptr onnx_file(new (std::nothrow) char[PATH_MAX]{0}); + int fd = open(onnx_file.get(), O_RDONLY); + google::protobuf::io::FileInputStream input(fd); + google::protobuf::io::CodedInputStream code_input(&input); + code_input.SetTotalBytesLimit(INT_MAX, 536870912); + bool ret = onnx_model->ParseFromCodedStream(&code_input); + if (!ret) { + MS_LOG(ERROR) << "load onnx file failed"; + return false; + } + (void)close(fd); + MS_LOG(INFO) << "enter ReadProtoFromBinary success!" << std::endl; + return true; +} + +std::shared_ptr AnfConverter::RunAnfConverter(const std::string &file_path) { + std::string modelFile; + + std::string tmp = file_path; + Trim(&tmp); + const std::string flagItem(tmp); + + size_t pos = flagItem.find_first_of("="); + if (pos == std::string::npos) { + MS_LOG(ERROR) << "Trans data not support input format!"; + } else { + modelFile = flagItem.substr(pos + 1); + std::cout << "input protobuf file path is: " << flagItem.substr(pos + 1) << std::endl; + } + + if (ValidateFileStr(modelFile, ".pb") != 0) { + MS_LOG(EXCEPTION) << "INPUT ILLEGAL: modelFile must be *.pb"; + } + + onnx::ModelProto model_; + ReadOnnxFromBinary(modelFile, &model_); + MSANFModelParser model_parser; + FuncGraphPtr dstgraph_ptr = model_parser.Parse(model_); + MS_EXCEPTION_IF_NULL(dstgraph_ptr); + TestFuncGraphBuild(dstgraph_ptr); + return dstgraph_ptr; +} + +std::shared_ptr AnfConverter::RunAnfConverter(const char *buf, const size_t buf_size) { + Py_Initialize(); + MS_EXCEPTION_IF_NULL(buf); + std::string str((const char *)buf, buf_size); + onnx::ModelProto model_; + if (!model_.ParseFromString(str)) { + MS_LOG(EXCEPTION) << "Parse model from buffer fail!"; + } + MSANFModelParser model_parser; + FuncGraphPtr dstgraph_ptr = model_parser.Parse(model_); + MS_EXCEPTION_IF_NULL(dstgraph_ptr); + TestFuncGraphBuild(dstgraph_ptr); + return dstgraph_ptr; +} + +int AnfConverter::TestFuncGraphBuild(const FuncGraphPtr &graph) { + MS_EXCEPTION_IF_NULL(graph); + auto node_return = graph->get_return(); + std::vector node_list = TopoSort(node_return); + MS_LOG(INFO) << "node_list size is : " << node_list.size(); + for (auto &node : node_list) { + if (node->isa()) { + auto node_CN = node->cast(); + MS_LOG(INFO) << "CN node: " << node_CN->input(0)->ToString() << ", input size :" << node_CN->size(); + } else if (node->isa()) { + auto node_Para = node->cast(); + if (node_Para->has_default()) { + MS_LOG(INFO) << "Parameter node: " << node_Para->name() << "has default value!"; + } else { + MS_LOG(INFO) << "Parameter node: " << node_Para->name(); + } + } else if (node->isa()) { + auto node_Value = node->cast(); + MS_LOG(INFO) << "Value node: " << node_Value->ToString(); + } + } + return 0; +} +} // namespace lite +} // namespace mindspore diff --git a/mindspore/ccsrc/utils/load_onnx/anf_converter.h b/mindspore/ccsrc/utils/load_onnx/anf_converter.h new file mode 100644 index 00000000000..2c820053ee2 --- /dev/null +++ b/mindspore/ccsrc/utils/load_onnx/anf_converter.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_UTILS_LOAD_ONNX_ANF_CONVERTER_H +#define MINDSPORE_CCSRC_UTILS_LOAD_ONNX_ANF_CONVERTER_H +#include +#include +#include "google/protobuf/io/zero_copy_stream_impl.h" +#include "proto/onnx.pb.h" +#include "ir/func_graph.h" + +namespace mindspore { +namespace lite { +class AnfConverter { + public: + static int TestFuncGraphBuild(const FuncGraphPtr &graph); + static std::shared_ptr RunAnfConverter(const std::string &file_path); + static std::shared_ptr RunAnfConverter(const char *buf, const size_t buf_size); + + private: + static void Trim(std::string *input); + static int ValidateFileStr(const std::string &modelFile, std::string fileType); + static bool ReadOnnxFromBinary(const std::string &modelFile, google::protobuf::Message *onnx_model); +}; +} // namespace lite +} // namespace mindspore +#endif diff --git a/mindspore/ccsrc/utils/load_onnx/anf_model_parser.cc b/mindspore/ccsrc/utils/load_onnx/anf_model_parser.cc new file mode 100644 index 00000000000..d624bc51c88 --- /dev/null +++ b/mindspore/ccsrc/utils/load_onnx/anf_model_parser.cc @@ -0,0 +1,515 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include "utils/load_onnx/anf_model_parser.h" +#include "google/protobuf/io/zero_copy_stream_impl.h" +#include "ir/tensor.h" +#include "ir/param_value_py.h" +#include "operator/ops.h" +#include "proto/onnx.pb.h" +#include "utils/log_adapter.h" + +using std::string; + +namespace mindspore { +namespace lite { +static constexpr char kConstantValueNode[] = "Constant"; +static constexpr char kCNodeShapeAttr[] = "shape"; +enum ParseForm : int { + FORM_PARSE_TYPE = 0, + FORM_PARSE_SCALAR = 1, + FORM_PARSE_TENSOR = 2, +}; + +static std::map kParseTypeSwitchMap{ + {"type", FORM_PARSE_TYPE}, {"scalar", FORM_PARSE_SCALAR}, {"tensor", FORM_PARSE_TENSOR}}; + +static std::unordered_map kDefaultValueSwitchMap{ + {onnx::TensorProto_DataType_BOOL, kNumberTypeBool}, {onnx::TensorProto_DataType_INT8, kNumberTypeInt8}, + {onnx::TensorProto_DataType_INT16, kNumberTypeInt16}, {onnx::TensorProto_DataType_INT32, kNumberTypeInt32}, + {onnx::TensorProto_DataType_INT64, kNumberTypeInt64}, {onnx::TensorProto_DataType_UINT8, kNumberTypeUInt8}, + {onnx::TensorProto_DataType_UINT16, kNumberTypeUInt16}, {onnx::TensorProto_DataType_UINT32, kNumberTypeUInt32}, + {onnx::TensorProto_DataType_UINT64, kNumberTypeUInt64}, {onnx::TensorProto_DataType_FLOAT16, kNumberTypeFloat16}, + {onnx::TensorProto_DataType_FLOAT, kNumberTypeFloat32}, {onnx::TensorProto_DataType_DOUBLE, kNumberTypeFloat64}, + {onnx::TensorProto_DataType_STRING, kObjectTypeString}, +}; + +#define PARSE_ONNXATTR_IN_SCALAR_FORM(type, valuetype) \ + void ParseAttrInScalar_##type##_##valuetype(const PrimitivePtr &prim, const std::string &attr_name, \ + const onnx::TensorProto &attr_tensor) { \ + MS_EXCEPTION_IF_NULL(prim); \ + std::vector attr_value_vec; \ + for (int i = 0; i < attr_tensor.type##_data_size(); ++i) { \ + attr_value_vec.push_back(static_cast(attr_tensor.type##_data(i))); \ + } \ + if (attr_value_vec.size() == 1) { \ + prim->AddAttr(attr_name, MakeValue(attr_value_vec[0])); \ + } else { \ + prim->AddAttr(attr_name, MakeValue>(attr_value_vec)); \ + } \ + } + +PARSE_ONNXATTR_IN_SCALAR_FORM(double, double) +PARSE_ONNXATTR_IN_SCALAR_FORM(float, float) +PARSE_ONNXATTR_IN_SCALAR_FORM(string, string) +PARSE_ONNXATTR_IN_SCALAR_FORM(int32, int32) +PARSE_ONNXATTR_IN_SCALAR_FORM(int32, bool) +PARSE_ONNXATTR_IN_SCALAR_FORM(int64, int64) +PARSE_ONNXATTR_IN_SCALAR_FORM(uint64, uint64) + +bool MSANFModelParser::BuildParameterForFuncGraph(const ParameterPtr &node, const onnx::ValueInfoProto &value_proto) { + MS_EXCEPTION_IF_NULL(node); + if (!value_proto.has_type() || !value_proto.has_name()) { + MS_LOG(ERROR) << "onnx ValueInfoProto has no type or name! "; + return false; + } + node->set_name(value_proto.name()); + const auto &type_proto = value_proto.type(); + if (!type_proto.has_tensor_type()) { + MS_LOG(ERROR) << "onnx TypeProto has no tesor_type! "; + return false; + } + const onnx::TypeProto_Tensor &tensor_typeproto = type_proto.tensor_type(); + if (!tensor_typeproto.has_elem_type() || !tensor_typeproto.has_shape()) { + MS_LOG(ERROR) << "onnx TypeProto_Tensor has no elem_type or shape! "; + return false; + } + const onnx::TensorShapeProto &tensor_shape = tensor_typeproto.shape(); + std::vector shape; + for (int i = 0; i < tensor_shape.dim_size(); ++i) { + shape.push_back(tensor_shape.dim(i).dim_value()); + } + + if (kDefaultValueSwitchMap.find(tensor_typeproto.elem_type()) == kDefaultValueSwitchMap.end()) { + MS_LOG(ERROR) << "onnx TypeProto_Tensor elem_type is not support yet!"; + return false; + } + + tensor::TensorPtr tensor_info = + std::make_shared(kDefaultValueSwitchMap[tensor_typeproto.elem_type()], shape); + MS_EXCEPTION_IF_NULL(tensor_info); + auto tensor_abstract = tensor_info->ToAbstract(); + MS_EXCEPTION_IF_NULL(tensor_abstract); + node->set_abstract(tensor_abstract); + + if (default_para_map_.find(value_proto.name()) != default_para_map_.end()) { + const onnx::TensorProto initialize_proto = default_para_map_[value_proto.name()]; + std::string initial_data = initialize_proto.raw_data(); + auto *tensor_data_buf = reinterpret_cast(tensor_info->data_c(true)); + MS_EXCEPTION_IF_NULL(tensor_data_buf); + memcpy_s(tensor_data_buf, tensor_info->data().nbytes(), initial_data.data(), initial_data.size()); + + py::array array_data = tensor_info->data(); + ParamValuePyPtr para_value_ptr = std::make_shared(); + MS_EXCEPTION_IF_NULL(para_value_ptr); + para_value_ptr->set_value(array_data); + node->set_default_param(para_value_ptr); + } + anfnode_build_map_[value_proto.name()] = node; + return true; +} + +bool MSANFModelParser::ImportParametersForGraph(const FuncGraphPtr &outputFuncGraph, + const onnx::GraphProto &importProto) { + MS_EXCEPTION_IF_NULL(outputFuncGraph); + MS_LOG(INFO) << "Parameters had default paramerer size is: " << importProto.initializer_size(); + + for (int i = 0; i < importProto.initializer_size(); ++i) { + const onnx::TensorProto &initializer_proto = importProto.initializer(i); + if (!initializer_proto.has_name()) { + MS_LOG(ERROR) << "initializer vector of onnx GraphProto has no name at index: " << i; + return false; + } + default_para_map_[initializer_proto.name()] = initializer_proto; + } + + MS_LOG(INFO) << "all parameters size: " << importProto.input_size(); + for (int i = 0; i < importProto.input_size(); ++i) { + const onnx::ValueInfoProto &input_proto = importProto.input(i); + if (!BuildParameterForFuncGraph(outputFuncGraph->add_parameter(), input_proto)) { + MS_LOG(ERROR) << "Build parameter for funcgraph fail at index: " << i; + return false; + } + } + return true; +} + +bool MSANFModelParser::ObtainCNodeAttrInTypeForm(const PrimitivePtr &prim, const std::string &attr_name, + const onnx::TensorProto &attr_tensor) { + MS_EXCEPTION_IF_NULL(prim); + const int attr_tensor_type = attr_tensor.data_type(); + if (kDefaultValueSwitchMap.find(attr_tensor_type) == kDefaultValueSwitchMap.end()) { + MS_LOG(ERROR) << "Obtain attr in type-form has not support input type:" << attr_tensor_type; + return false; + } + prim->AddAttr(attr_name, TypeIdToType(kDefaultValueSwitchMap[attr_tensor_type])); + return true; +} + +bool MSANFModelParser::ObtainCNodeAttrInScalarForm(const PrimitivePtr &prim, const std::string &attr_name, + const onnx::TensorProto &attr_tensor) { + MS_EXCEPTION_IF_NULL(prim); + const int attr_tensor_type = attr_tensor.data_type(); + switch (attr_tensor_type) { + case onnx::TensorProto_DataType_STRING: { + ParseAttrInScalar_string_string(prim, attr_name, attr_tensor); + break; + } + case onnx::TensorProto_DataType_INT32: { + ParseAttrInScalar_int32_int32(prim, attr_name, attr_tensor); + break; + } + case onnx::TensorProto_DataType_INT64: { + ParseAttrInScalar_int64_int64(prim, attr_name, attr_tensor); + break; + } + case onnx::TensorProto_DataType_UINT64: { + ParseAttrInScalar_uint64_uint64(prim, attr_name, attr_tensor); + break; + } + case onnx::TensorProto_DataType_FLOAT: { + ParseAttrInScalar_float_float(prim, attr_name, attr_tensor); + break; + } + case onnx::TensorProto_DataType_DOUBLE: { + ParseAttrInScalar_double_double(prim, attr_name, attr_tensor); + break; + } + case onnx::TensorProto_DataType_BOOL: { + ParseAttrInScalar_int32_bool(prim, attr_name, attr_tensor); + auto value = prim->GetAttr(attr_name); + break; + } + default: + MS_LOG(ERROR) << "Obtain attr in scalar-form has not support input type: " << attr_tensor_type; + return false; + } + return true; +} + +bool MSANFModelParser::ObtainCNodeAttrInTensorForm(const PrimitivePtr &prim, const std::string &attr_name, + const onnx::TensorProto &attr_tensor) { + MS_EXCEPTION_IF_NULL(prim); + MS_LOG(ERROR) << "parse attr type don't support attr type is tensor"; + return false; +} + +bool MSANFModelParser::GetAttrValueForCNode(const PrimitivePtr &prim, const onnx::AttributeProto &attr_proto) { + MS_EXCEPTION_IF_NULL(prim); + const std::string &attr_name = attr_proto.name(); + if (!attr_proto.has_ref_attr_name()) { + MS_LOG(ERROR) << "CNode parse attr type has no ref_attr_name"; + return false; + } + const std::string &ref_attr_name = attr_proto.ref_attr_name(); + const onnx::TensorProto &attr_tensor = attr_proto.t(); + switch (kParseTypeSwitchMap[ref_attr_name]) { + case FORM_PARSE_TYPE: { + return ObtainCNodeAttrInTypeForm(prim, attr_name, attr_tensor); + } + case FORM_PARSE_SCALAR: { + return ObtainCNodeAttrInScalarForm(prim, attr_name, attr_tensor); + } + case FORM_PARSE_TENSOR: { + return ObtainCNodeAttrInTensorForm(prim, attr_name, attr_tensor); + } + default: + MS_LOG(ERROR) << "parse attr type don't support input of ref_attr_name"; + return false; + } +} +bool MSANFModelParser::ObtainValueNodeInTensorForm(const std::string &value_node_name, + const onnx::TensorProto &attr_tensor) { + const int attr_tensor_type = attr_tensor.data_type(); + std::vector shape; + for (int i = 0; i < attr_tensor.dims_size(); ++i) { + shape.push_back(attr_tensor.dims(i)); + } + tensor::TensorPtr tensor_info = std::make_shared(kDefaultValueSwitchMap[attr_tensor_type], shape); + const std::string &tensor_buf = attr_tensor.raw_data(); + auto *tensor_data_buf = reinterpret_cast(tensor_info->data_c(true)); + memcpy_s(tensor_data_buf, tensor_info->data().nbytes(), tensor_buf.data(), tensor_buf.size()); + if (attr_tensor_type == onnx::TensorProto_DataType_FLOAT) { + auto *data_valuennode = reinterpret_cast(tensor_info->data_c()); + MS_EXCEPTION_IF_NULL(data_valuennode); + auto new_value_node = std::make_shared(MakeValue(*data_valuennode)); + anfnode_build_map_[value_node_name] = new_value_node; + } else { + auto *data_valuenode = reinterpret_cast(tensor_info->data_c()); + MS_EXCEPTION_IF_NULL(data_valuenode); + auto new_value_node = std::make_shared(MakeValue(*data_valuenode)); + anfnode_build_map_[value_node_name] = new_value_node; + } + return true; +} + +bool MSANFModelParser::ObtainValueNodeInScalarForm(const std::string &value_node_name, + const onnx::TensorProto &attr_tensor) { + const int attr_tensor_type = attr_tensor.data_type(); + ValuePtr value_ptr = nullptr; + switch (attr_tensor_type) { + case onnx::TensorProto_DataType_INT32: { + std::vector add_data; + for (int i = 0; i < attr_tensor.int32_data_size(); ++i) { + add_data.push_back(attr_tensor.int32_data(i)); + } + if (add_data.size() == 1) { + value_ptr = MakeValue(add_data[0]); + } else if (!add_data.empty()) { + value_ptr = MakeValue>(add_data); + } + break; + } + case onnx::TensorProto_DataType_FLOAT: { + std::vector add_data; + for (int i = 0; i < attr_tensor.float_data_size(); ++i) { + add_data.push_back(attr_tensor.float_data(i)); + } + + if (add_data.size() == 1) { + value_ptr = MakeValue(add_data[0]); + } else if (!add_data.empty()) { + value_ptr = MakeValue>(add_data); + } + break; + } + case onnx::TensorProto_DataType_UNDEFINED: { + std::vector elems; + value_ptr = std::make_shared(elems); + break; + } + default: + MS_LOG(ERROR) << "Obtain attr in scalar-form has not support input type: " << attr_tensor_type; + return false; + } + auto new_value_node = NewValueNode(value_ptr); + MS_EXCEPTION_IF_NULL(new_value_node); + new_value_node->set_abstract(value_ptr->ToAbstract()); + anfnode_build_map_[value_node_name] = new_value_node; + + return true; +} + +bool MSANFModelParser::ObtainValueNodeInTypeForm(const std::string &value_node_name, + const onnx::TensorProto &attr_tensor) { + const int attr_tensor_type = attr_tensor.data_type(); + if (kDefaultValueSwitchMap.find(attr_tensor_type) == kDefaultValueSwitchMap.end()) { + MS_LOG(ERROR) << "Obtain ValueNode attr in type-form has not support input type: " << attr_tensor_type; + return false; + } + auto new_value_node = std::make_shared(TypeIdToType(kDefaultValueSwitchMap[attr_tensor_type])); + anfnode_build_map_[value_node_name] = new_value_node; + return true; +} + +bool MSANFModelParser::GetAttrValueForValueNode(const std::string &ref_attr_name, const std::string &value_node_name, + const onnx::TensorProto &attr_tensor) { + switch (kParseTypeSwitchMap[ref_attr_name]) { + case FORM_PARSE_SCALAR: { + return ObtainValueNodeInScalarForm(value_node_name, attr_tensor); + } + case FORM_PARSE_TENSOR: { + return ObtainValueNodeInTensorForm(value_node_name, attr_tensor); + } + case FORM_PARSE_TYPE: { + return ObtainValueNodeInTypeForm(value_node_name, attr_tensor); + } + default: + MS_LOG(ERROR) << "parse ValueNode value don't support input of ref_attr_name"; + return false; + } + return true; +} + +bool MSANFModelParser::BuildValueNodeForFuncGraph(const onnx::NodeProto &node_proto) { + const std::string &value_node_name = node_proto.output(0); + const onnx::AttributeProto &attr_proto = node_proto.attribute(0); + if (!attr_proto.has_ref_attr_name()) { + MS_LOG(ERROR) << "parse ValueNode don't have ref_attr_name"; + return false; + } + const std::string &ref_attr_name = attr_proto.ref_attr_name(); + const onnx::TensorProto &attr_tensor = attr_proto.t(); + + return GetAttrValueForValueNode(ref_attr_name, value_node_name, attr_tensor); +} + +AbstractBasePtr MSANFModelParser::GetAbstractForCNode(const onnx::AttributeProto &attr_proto) { + std::vector shape_vec; + const onnx::TensorProto &attr_tensor = attr_proto.t(); + for (int i = 0; i < attr_tensor.dims_size(); ++i) { + shape_vec.push_back(attr_tensor.dims(i)); + } + tensor::TensorPtr tensor_info = + std::make_shared(kDefaultValueSwitchMap[attr_tensor.data_type()], shape_vec); + MS_EXCEPTION_IF_NULL(tensor_info); + return tensor_info->ToAbstract(); +} + +bool MSANFModelParser::BuildCNodeForFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::NodeProto &node_proto, + const onnx::GraphProto &importProto, const bool &ret_flag) { + MS_EXCEPTION_IF_NULL(outputFuncGraph); + if (!node_proto.has_op_type()) { + MS_LOG(ERROR) << "Get CNode op_type failed!"; + return false; + } + const std::string &node_name = node_proto.output(0); + const std::string &node_type = node_proto.op_type(); + PrimitivePtr prim = std::make_shared(node_type); + MS_EXCEPTION_IF_NULL(prim); + + AbstractBasePtr abstract; + for (int i = 0; i < node_proto.attribute_size(); ++i) { + const onnx::AttributeProto &attr_proto = node_proto.attribute(i); + if (attr_proto.name() == kCNodeShapeAttr) { + abstract = GetAbstractForCNode(attr_proto); + continue; + } + if (!GetAttrValueForCNode(prim, attr_proto)) { + MS_LOG(ERROR) << "Get CNode attr failed!"; + return false; + } + } + + std::vector inputs; + inputs.clear(); + inputs.push_back(NewValueNode(prim)); + for (int i = 0; i < node_proto.input_size(); ++i) { + const std::string &input_name = node_proto.input(i); + if (anfnode_build_map_.find(input_name) == anfnode_build_map_.end()) { + MS_LOG(ERROR) << node_name << " input " << i << input_name << "can't find in nodes have parsed"; + return false; + } + inputs.push_back(anfnode_build_map_[input_name]); + } + CNodePtr cnode_ptr = outputFuncGraph->NewCNode(inputs); + MS_EXCEPTION_IF_NULL(cnode_ptr); + cnode_ptr->set_abstract(abstract); + if (ret_flag) { + const onnx::ValueInfoProto &output_node = importProto.output(0); + const ::onnx::TypeProto &output_typeproto = output_node.type(); + int output_type = output_typeproto.tensor_type().elem_type(); + std::vector output_shape; + for (int i = 0; i < output_typeproto.tensor_type().shape().dim_size(); ++i) { + output_shape.push_back(output_typeproto.tensor_type().shape().dim(i).dim_value()); + } + tensor::TensorPtr tensor_return = + std::make_shared(kDefaultValueSwitchMap[output_type], output_shape); + inputs.clear(); + inputs.push_back(NewValueNode(prim::kPrimReturn)); + inputs.push_back(cnode_ptr); + auto return_node = outputFuncGraph->NewCNode(inputs); + return_node->set_abstract(tensor_return->ToAbstract()); + outputFuncGraph->set_return(return_node); + MS_LOG(INFO) << "Construct funcgraph finined, all success!"; + } + anfnode_build_map_[node_name] = cnode_ptr; + return true; +} + +bool MSANFModelParser::ImportNodesForGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto) { + MS_EXCEPTION_IF_NULL(outputFuncGraph); + bool return_flag = false; + MS_LOG(INFO) << "The CNdoe size : " << importProto.node_size(); + for (int i = 0; i < importProto.node_size(); ++i) { + return_flag = (i == importProto.node_size() - 1) ? true : return_flag; + const onnx::NodeProto &node_proto = importProto.node(i); + const std::string &node_type = node_proto.op_type(); + if (node_type == kConstantValueNode) { + if (!BuildValueNodeForFuncGraph(node_proto)) { + MS_LOG(ERROR) << "Build ValueNode for funcgraph fail at index: : " << i; + return false; + } + continue; + } + if (!BuildCNodeForFuncGraph(outputFuncGraph, node_proto, importProto, return_flag)) { + MS_LOG(ERROR) << "Build CNode for funcgraph fail at index: : " << i; + return false; + } + } + return true; +} + +bool MSANFModelParser::BuildFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto) { + MS_EXCEPTION_IF_NULL(outputFuncGraph); + GraphDebugInfoPtr debug_info_ptr = outputFuncGraph->debug_info(); + MS_EXCEPTION_IF_NULL(debug_info_ptr); + if (importProto.has_name()) { + debug_info_ptr->set_name(importProto.name()); + } else { + MS_LOG(ERROR) << "FuncGraph under converting has not name!"; + } + + if (!ImportParametersForGraph(outputFuncGraph, importProto)) { + return false; + } + return ImportNodesForGraph(outputFuncGraph, importProto); +} + +bool MSANFModelParser::MSANFParseModelConfigureInfo(const onnx::ModelProto &model_proto) { + if (!model_proto.has_producer_name()) { + MS_LOG(ERROR) << "Parse model producer name from pb file failed!"; + return false; + } + producer_name_ = model_proto.producer_name(); + MS_LOG(INFO) << "producer_name :" << producer_name_; + + if (!model_proto.has_producer_version()) { + MS_LOG(ERROR) << "Parse model producer version from pb file failed!"; + return false; + } + producer_version_ = model_proto.producer_version(); + MS_LOG(INFO) << "producer_version : " << producer_version_; + + if (!model_proto.has_ir_version()) { + MS_LOG(ERROR) << "Parse model version from pb file failed!"; + return false; + } + ir_version_ = model_proto.ir_version(); + MS_LOG(INFO) << "ir_version :" << ir_version_; + + const onnx::OperatorSetIdProto &opset_proto = model_proto.opset_import(0); + if (!opset_proto.has_version()) { + MS_LOG(ERROR) << "Parse opset version from pb file failed!"; + return false; + } + opset_version_ = opset_proto.version(); + MS_LOG(INFO) << "opset_version : " << opset_version_; + return true; +} + +FuncGraphPtr MSANFModelParser::Parse(const onnx::ModelProto &model_proto) { + FuncGraphPtr dstGraph = std::make_shared(); + MS_EXCEPTION_IF_NULL(dstGraph); + if (!MSANFParseModelConfigureInfo(model_proto)) { + MS_LOG(ERROR) << "Parse configuration info for pb file failed!"; + return nullptr; + } + const onnx::GraphProto &graphBuild = model_proto.graph(); + if (!BuildFuncGraph(dstGraph, graphBuild)) { + MS_LOG(ERROR) << "Build funcgraph failed!"; + return nullptr; + } + MS_LOG(INFO) << "Parse pb to build FuncGraph Success!"; + return dstGraph; +} +} // namespace lite +} // namespace mindspore diff --git a/mindspore/ccsrc/utils/load_onnx/anf_model_parser.h b/mindspore/ccsrc/utils/load_onnx/anf_model_parser.h new file mode 100644 index 00000000000..20787cbef49 --- /dev/null +++ b/mindspore/ccsrc/utils/load_onnx/anf_model_parser.h @@ -0,0 +1,79 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_UTILS_LOAD_ONNX_ANF_MODEL_PARSER_H +#define MINDSPORE_CCSRC_UTILS_LOAD_ONNX_ANF_MODEL_PARSER_H + +#include +#include +#include +#include "google/protobuf/io/zero_copy_stream_impl.h" +#include "ir/func_graph.h" +#include "proto/onnx.pb.h" + +namespace mindspore { +namespace lite { +using int32 = int32_t; +using int64 = int64_t; +using uint64 = uint64_t; +class MSANFModelParser { + public: + MSANFModelParser() = default; + ~MSANFModelParser() = default; + + FuncGraphPtr Parse(const onnx::ModelProto &model_proto); + bool MSANFParseModelConfigureInfo(const onnx::ModelProto &model_proto); + + std::string GetProducerName() { return producer_name_; } + std::string GetProducerVersion() { return producer_version_; } + int GetIrVersion() { return ir_version_; } + int GetOpsetVersion() { return opset_version_; } + + private: + bool BuildFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto); + bool ImportParametersForGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto); + bool ImportNodesForGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto); + bool BuildParameterForFuncGraph(const ParameterPtr &node, const onnx::ValueInfoProto &value_proto); + bool BuildCNodeForFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::NodeProto &node_proto, + const onnx::GraphProto &importProto, const bool &ret_flag); + bool GetAttrValueForCNode(const PrimitivePtr &prim, const onnx::AttributeProto &attr_proto); + bool ObtainCNodeAttrInTypeForm(const PrimitivePtr &prim, const std::string &attr_name, + const onnx::TensorProto &attr_tensor); + bool ObtainCNodeAttrInScalarForm(const PrimitivePtr &prim, const std::string &attr_name, + const onnx::TensorProto &attr_tensor); + bool ObtainCNodeAttrInTensorForm(const PrimitivePtr &prim, const std::string &attr_name, + const onnx::TensorProto &attr_tensor); + bool BuildValueNodeForFuncGraph(const onnx::NodeProto &node_proto); + bool ObtainValueNodeInTensorForm(const string &value_node_name, const onnx::TensorProto &attr_tensor); + + bool ObtainValueNodeInScalarForm(const string &value_node_name, const onnx::TensorProto &attr_tensor); + bool GetAttrValueForValueNode(const string &ref_attr_name, const std::string &value_node_name, + const onnx::TensorProto &attr_tensor); + bool ObtainValueNodeInTypeForm(const string &value_node_name, const onnx::TensorProto &attr_tensor); + + std::string producer_name_; + std::string producer_version_; + int ir_version_{}; + int opset_version_{}; + std::unordered_map anfnode_build_map_; + std::map default_para_map_; + + AbstractBasePtr GetAbstractForCNode(const onnx::AttributeProto &attr_proto); +}; +} // namespace lite +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_UTILS_LOAD_ONNX_ANF_MODEL_PARSER_H diff --git a/tests/ut/cpp/CMakeLists.txt b/tests/ut/cpp/CMakeLists.txt index 5e30b074a31..840a66ad20c 100644 --- a/tests/ut/cpp/CMakeLists.txt +++ b/tests/ut/cpp/CMakeLists.txt @@ -106,9 +106,12 @@ file(GLOB_RECURSE MINDSPORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ) list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/debug/dump_proto.cc") +list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/ir/lite/tensor.cc") list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/parallel/strategy_checkpoint/parallel_strategy_checkpoint.cc") list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/utils/anf_ir.pb.cc") list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/utils/node_strategy.pb.cc") +list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/utils/load_onnx/anf_model_parser.cc") +list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/utils/load_onnx/anf_converter.cc") file(GLOB_RECURSE UT_SUTB_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "stub/aicpu/*.cc"