diff --git a/cmake/package_lite.cmake b/cmake/package_lite.cmake index feabd48552..129c06f28a 100644 --- a/cmake/package_lite.cmake +++ b/cmake/package_lite.cmake @@ -148,7 +148,7 @@ if(PLATFORM_ARM64) install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype COMPONENT ${RUNTIME_COMPONENT_NAME}) install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api - COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ascend* ops*" EXCLUDE) + COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE) install(DIRECTORY ${TOP_DIR}/mindspore/lite/build/operator_library DESTINATION ${CODEGEN_ROOT_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME}) if(ENABLE_TOOLS) @@ -173,7 +173,7 @@ elseif(PLATFORM_ARM32) install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype COMPONENT ${RUNTIME_COMPONENT_NAME}) install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api - COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ascend*" EXCLUDE) + COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE) install(DIRECTORY ${TOP_DIR}/mindspore/lite/build/operator_library DESTINATION ${CODEGEN_ROOT_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME}) if(ENABLE_TOOLS) @@ -213,7 +213,7 @@ elseif(WIN32) install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype COMPONENT ${RUNTIME_COMPONENT_NAME}) install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api - COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ascend*" EXCLUDE) + COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE) install(FILES ${TOP_DIR}/build/mindspore/src/libmindspore-lite.a DESTINATION ${RUNTIME_LIB_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME}) install(FILES ${TOP_DIR}/build/mindspore/src/libmindspore-lite.dll.a DESTINATION ${RUNTIME_LIB_DIR} @@ -231,7 +231,7 @@ else() install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype COMPONENT ${RUNTIME_COMPONENT_NAME}) install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api - COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ascend*" EXCLUDE) + COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE) install(FILES ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so DESTINATION ${RUNTIME_LIB_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME}) install(FILES ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.a DESTINATION ${RUNTIME_LIB_DIR} diff --git a/include/api/cell.h b/include/api/cell.h index 3039fa816b..d6b815fd74 100644 --- a/include/api/cell.h +++ b/include/api/cell.h @@ -103,8 +103,9 @@ class MS_API GraphCell final : public Cell { std::vector GetOutputs(); private: + friend class Model; friend class ModelImpl; - Status Load(); + Status Load(uint32_t device_id); std::shared_ptr graph_; std::shared_ptr executor_; diff --git a/include/api/context.h b/include/api/context.h index 3f52d7ae9d..635faa250f 100644 --- a/include/api/context.h +++ b/include/api/context.h @@ -24,162 +24,201 @@ #include "include/api/dual_abi_helper.h" namespace mindspore { -constexpr auto kDeviceTypeAscend310 = "Ascend310"; -constexpr auto kDeviceTypeAscend910 = "Ascend910"; -constexpr auto kDeviceTypeGPU = "GPU"; +enum DeviceType { + kCPU = 0, + kMaliGPU, + kNvidiaGPU, + kKirinNPU, + kAscend910, + kAscend310, + // add new type here + kInvalidDeviceType = 100, +}; -struct MS_API Context { +class Allocator; +class DeviceInfoContext; + +class MS_API Context { public: Context(); - virtual ~Context() = default; + ~Context() = default; + + void SetThreadNum(int32_t thread_num); + int32_t GetThreadNum() const; + + void SetAllocator(const std::shared_ptr &allocator); + std::shared_ptr GetAllocator() const; + + std::vector> &MutableDeviceInfo(); + + private: struct Data; - std::shared_ptr data; + std::shared_ptr data_; }; -struct MS_API GlobalContext : public Context { +class MS_API DeviceInfoContext : public std::enable_shared_from_this { public: - static std::shared_ptr GetGlobalContext(); + struct Data; - static inline void SetGlobalDeviceTarget(const std::string &device_target); - static inline std::string GetGlobalDeviceTarget(); + DeviceInfoContext(); + virtual ~DeviceInfoContext() = default; + virtual enum DeviceType GetDeviceType() const = 0; - static void SetGlobalDeviceID(const uint32_t &device_id); - static uint32_t GetGlobalDeviceID(); + template + std::shared_ptr Cast() { + static_assert(std::is_base_of::value, "Wrong cast type."); + if (GetDeviceType() != T().GetDeviceType()) { + return nullptr; + } - static inline void SetGlobalDumpConfigPath(const std::string &cfg_path); - static inline std::string GetGlobalDumpConfigPath(); + return std::static_pointer_cast(shared_from_this()); + } + + protected: + std::shared_ptr data_; +}; + +class MS_API CPUDeviceInfo : public DeviceInfoContext { + public: + enum DeviceType GetDeviceType() const override { return DeviceType::kCPU; }; + + /// \brief Set the thread affinity of CPU cores. + /// + /// \param mode: 0: no affinities, 1: big cores first, 2: little cores first + void SetThreadAffinity(int mode); + int GetThreadAffinity() const; + void SetEnableFP16(bool is_fp16); + bool GetEnableFP16() const; +}; + +class MS_API MaliGPUDeviceInfo : public DeviceInfoContext { + public: + enum DeviceType GetDeviceType() const override { return DeviceType::kMaliGPU; }; + + void SetEnableFP16(bool is_fp16); + bool GetEnableFP16() const; +}; + +class MS_API KirinNPUDeviceInfo : public DeviceInfoContext { + public: + enum DeviceType GetDeviceType() const override { return DeviceType::kKirinNPU; }; + + void SetFrequency(int frequency); + int GetFrequency() const; +}; + +class MS_API NvidiaGPUDeviceInfo : public DeviceInfoContext { + public: + enum DeviceType GetDeviceType() const override { return DeviceType::kNvidiaGPU; }; + + void SetDeviceID(uint32_t device_id); + uint32_t GetDeviceID() const; + + void SetGpuTrtInferMode(bool gpu_trt_infer_mode); + bool GetGpuTrtInferMode() const; +}; + +class MS_API Ascend910DeviceInfo : public DeviceInfoContext { + public: + enum DeviceType GetDeviceType() const override { return DeviceType::kAscend910; }; + + void SetDeviceID(uint32_t device_id); + uint32_t GetDeviceID() const; +}; + +class MS_API Ascend310DeviceInfo : public DeviceInfoContext { + public: + enum DeviceType GetDeviceType() const override { return DeviceType::kAscend310; }; + + void SetDeviceID(uint32_t device_id); + uint32_t GetDeviceID() const; + + inline void SetDumpConfigPath(const std::string &cfg_path); + inline std::string GetDumpConfigPath() const; + + inline void SetInsertOpConfigPath(const std::string &cfg_path); + inline std::string GetInsertOpConfigPath() const; + + inline void SetInputFormat(const std::string &format); + inline std::string GetInputFormat() const; + + inline void SetInputShape(const std::string &shape); + inline std::string GetInputShape() const; + + void SetInputShapeMap(const std::map> &shape); + std::map> GetInputShapeMap() const; + + void SetDynamicBatchSize(const std::vector &dynamic_batch_size); + inline std::string GetDynamicBatchSize() const; + + void SetOutputType(enum DataType output_type); + enum DataType GetOutputType() const; + + inline void SetPrecisionMode(const std::string &precision_mode); + inline std::string GetPrecisionMode() const; + + inline void SetOpSelectImplMode(const std::string &op_select_impl_mode); + inline std::string GetOpSelectImplMode() const; + + inline void SetFusionSwitchConfigPath(const std::string &cfg_path); + inline std::string GetFusionSwitchConfigPath() const; private: - // api without std::string - static void SetGlobalDeviceTarget(const std::vector &device_target); - static std::vector GetGlobalDeviceTargetChar(); + void SetDumpConfigPath(const std::vector &cfg_path); + std::vector GetDumpConfigPathChar() const; - static void SetGlobalDumpConfigPath(const std::vector &cfg_path); - static std::vector GetGlobalDumpConfigPathChar(); + void SetInsertOpConfigPath(const std::vector &cfg_path); + std::vector GetInsertOpConfigPathChar() const; + + void SetInputFormat(const std::vector &format); + std::vector GetInputFormatChar() const; + + void SetInputShape(const std::vector &shape); + std::vector GetInputShapeChar() const; + + std::vector GetDynamicBatchSizeChar() const; + + void SetPrecisionMode(const std::vector &precision_mode); + std::vector GetPrecisionModeChar() const; + + void SetOpSelectImplMode(const std::vector &op_select_impl_mode); + std::vector GetOpSelectImplModeChar() const; + + void SetFusionSwitchConfigPath(const std::vector &cfg_path); + std::vector GetFusionSwitchConfigPathChar() const; }; -struct MS_API ModelContext : public Context { - public: - static inline void SetInsertOpConfigPath(const std::shared_ptr &context, const std::string &cfg_path); - static inline std::string GetInsertOpConfigPath(const std::shared_ptr &context); +void Ascend310DeviceInfo::SetDumpConfigPath(const std::string &cfg_path) { SetDumpConfigPath(StringToChar(cfg_path)); } +std::string Ascend310DeviceInfo::GetDumpConfigPath() const { return CharToString(GetDumpConfigPathChar()); } - static inline void SetInputFormat(const std::shared_ptr &context, const std::string &format); - static inline std::string GetInputFormat(const std::shared_ptr &context); - - static inline void SetInputShape(const std::shared_ptr &context, const std::string &shape); - static inline std::string GetInputShape(const std::shared_ptr &context); - - static void SetInputShapeMap(const std::shared_ptr &context, const std::map> &shape); - static std::map> GetInputShapeMap(const std::shared_ptr &context); - - static void SetDynamicBatchSize(const std::shared_ptr &context, - const std::vector &dynamic_batch_size); - static inline std::string GetDynamicBatchSize(const std::shared_ptr &context); - - static void SetOutputType(const std::shared_ptr &context, enum DataType output_type); - static enum DataType GetOutputType(const std::shared_ptr &context); - - static inline void SetPrecisionMode(const std::shared_ptr &context, const std::string &precision_mode); - static inline std::string GetPrecisionMode(const std::shared_ptr &context); - - static inline void SetOpSelectImplMode(const std::shared_ptr &context, - const std::string &op_select_impl_mode); - static inline std::string GetOpSelectImplMode(const std::shared_ptr &context); - - static inline void SetFusionSwitchConfigPath(const std::shared_ptr &context, const std::string &cfg_path); - static inline std::string GetFusionSwitchConfigPath(const std::shared_ptr &context); - - static inline void SetGpuTrtInferMode(const std::shared_ptr &context, const std::string &gpu_trt_infer_mode); - static inline std::string GetGpuTrtInferMode(const std::shared_ptr &context); - - private: - // api without std::string - static void SetInsertOpConfigPath(const std::shared_ptr &context, const std::vector &cfg_path); - static std::vector GetInsertOpConfigPathChar(const std::shared_ptr &context); - - static void SetInputFormat(const std::shared_ptr &context, const std::vector &format); - static std::vector GetInputFormatChar(const std::shared_ptr &context); - - static void SetInputShape(const std::shared_ptr &context, const std::vector &shape); - static std::vector GetInputShapeChar(const std::shared_ptr &context); - - static void SetPrecisionMode(const std::shared_ptr &context, const std::vector &precision_mode); - static std::vector GetPrecisionModeChar(const std::shared_ptr &context); - - static void SetOpSelectImplMode(const std::shared_ptr &context, - const std::vector &op_select_impl_mode); - static std::vector GetOpSelectImplModeChar(const std::shared_ptr &context); - - static void SetFusionSwitchConfigPath(const std::shared_ptr &context, const std::vector &cfg_path); - static std::vector GetFusionSwitchConfigPathChar(const std::shared_ptr &context); - - static void SetGpuTrtInferMode(const std::shared_ptr &context, const std::vector &gpu_trt_infer_mode); - static std::vector GetGpuTrtInferModeChar(const std::shared_ptr &context); - static std::vector GetDynamicBatchSizeChar(const std::shared_ptr &context); -}; - -void GlobalContext::SetGlobalDeviceTarget(const std::string &device_target) { - SetGlobalDeviceTarget(StringToChar(device_target)); +void Ascend310DeviceInfo::SetInsertOpConfigPath(const std::string &cfg_path) { + SetInsertOpConfigPath(StringToChar(cfg_path)); } -std::string GlobalContext::GetGlobalDeviceTarget() { return CharToString(GetGlobalDeviceTargetChar()); } +std::string Ascend310DeviceInfo::GetInsertOpConfigPath() const { return CharToString(GetInsertOpConfigPathChar()); } -void GlobalContext::SetGlobalDumpConfigPath(const std::string &cfg_path) { - SetGlobalDumpConfigPath(StringToChar(cfg_path)); -} -std::string GlobalContext::GetGlobalDumpConfigPath() { return CharToString(GetGlobalDumpConfigPathChar()); } +void Ascend310DeviceInfo::SetInputFormat(const std::string &format) { SetInputFormat(StringToChar(format)); } +std::string Ascend310DeviceInfo::GetInputFormat() const { return CharToString(GetInputFormatChar()); } -void ModelContext::SetInsertOpConfigPath(const std::shared_ptr &context, const std::string &cfg_path) { - SetInsertOpConfigPath(context, StringToChar(cfg_path)); -} -std::string ModelContext::GetInsertOpConfigPath(const std::shared_ptr &context) { - return CharToString(GetInsertOpConfigPathChar(context)); -} +void Ascend310DeviceInfo::SetInputShape(const std::string &shape) { SetInputShape(StringToChar(shape)); } +std::string Ascend310DeviceInfo::GetInputShape() const { return CharToString(GetInputShapeChar()); } -void ModelContext::SetInputFormat(const std::shared_ptr &context, const std::string &format) { - SetInputFormat(context, StringToChar(format)); -} -std::string ModelContext::GetInputFormat(const std::shared_ptr &context) { - return CharToString(GetInputFormatChar(context)); -} +std::string Ascend310DeviceInfo::GetDynamicBatchSize() const { return CharToString(GetDynamicBatchSizeChar()); } -void ModelContext::SetInputShape(const std::shared_ptr &context, const std::string &shape) { - SetInputShape(context, StringToChar(shape)); -} -std::string ModelContext::GetInputShape(const std::shared_ptr &context) { - return CharToString(GetInputShapeChar(context)); +void Ascend310DeviceInfo::SetPrecisionMode(const std::string &precision_mode) { + SetPrecisionMode(StringToChar(precision_mode)); } +std::string Ascend310DeviceInfo::GetPrecisionMode() const { return CharToString(GetPrecisionModeChar()); } -void ModelContext::SetPrecisionMode(const std::shared_ptr &context, const std::string &precision_mode) { - SetPrecisionMode(context, StringToChar(precision_mode)); -} -std::string ModelContext::GetPrecisionMode(const std::shared_ptr &context) { - return CharToString(GetPrecisionModeChar(context)); +void Ascend310DeviceInfo::SetOpSelectImplMode(const std::string &op_select_impl_mode) { + SetOpSelectImplMode(StringToChar(op_select_impl_mode)); } +std::string Ascend310DeviceInfo::GetOpSelectImplMode() const { return CharToString(GetOpSelectImplModeChar()); } -void ModelContext::SetOpSelectImplMode(const std::shared_ptr &context, - const std::string &op_select_impl_mode) { - SetOpSelectImplMode(context, StringToChar(op_select_impl_mode)); +void Ascend310DeviceInfo::SetFusionSwitchConfigPath(const std::string &cfg_path) { + SetFusionSwitchConfigPath(StringToChar(cfg_path)); } -std::string ModelContext::GetOpSelectImplMode(const std::shared_ptr &context) { - return CharToString(GetOpSelectImplModeChar(context)); -} - -void ModelContext::SetFusionSwitchConfigPath(const std::shared_ptr &context, const std::string &cfg_path) { - SetFusionSwitchConfigPath(context, StringToChar(cfg_path)); -} -std::string ModelContext::GetFusionSwitchConfigPath(const std::shared_ptr &context) { - return CharToString(GetFusionSwitchConfigPathChar(context)); -} - -std::string ModelContext::GetDynamicBatchSize(const std::shared_ptr &context) { - return CharToString(GetDynamicBatchSizeChar(context)); -} - -void ModelContext::SetGpuTrtInferMode(const std::shared_ptr &context, const std::string &gpu_trt_infer_mode) { - SetGpuTrtInferMode(context, StringToChar(gpu_trt_infer_mode)); -} -std::string ModelContext::GetGpuTrtInferMode(const std::shared_ptr &context) { - return CharToString(GetGpuTrtInferModeChar(context)); +std::string Ascend310DeviceInfo::GetFusionSwitchConfigPath() const { + return CharToString(GetFusionSwitchConfigPathChar()); } } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_CONTEXT_H diff --git a/include/api/graph.h b/include/api/graph.h index 892f60495a..f25a6217f3 100644 --- a/include/api/graph.h +++ b/include/api/graph.h @@ -27,6 +27,7 @@ namespace mindspore { class MS_API Graph { public: class GraphData; + Graph(); explicit Graph(const std::shared_ptr &graph_data); explicit Graph(std::shared_ptr &&graph_data); explicit Graph(std::nullptr_t); @@ -34,6 +35,7 @@ class MS_API Graph { enum ModelType ModelType() const; bool operator==(std::nullptr_t) const; + bool operator!=(std::nullptr_t) const; private: friend class GraphCell; diff --git a/include/api/lite_context.h b/include/api/lite_context.h deleted file mode 100644 index bb06cff782..0000000000 --- a/include/api/lite_context.h +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_INCLUDE_API_LITE_CONTEXT_H -#define MINDSPORE_INCLUDE_API_LITE_CONTEXT_H - -#include -#include -#include -#include -#include "include/api/types.h" -#include "include/lite_types.h" - -namespace mindspore { -namespace lite { -class Allocator; -} // namespace lite - -struct MS_API Context { - public: - static void Clear(const std::shared_ptr &context); - - static void SetAsDefault(const std::shared_ptr &context); - - static void SetVendorName(const std::shared_ptr &context, const std::string &name); - static std::string GetVendorName(const std::shared_ptr &context); - - static void SetThreadNum(const std::shared_ptr &context, int num); - static int GetThreadNum(const std::shared_ptr &context); - - static void SetAllocator(const std::shared_ptr &context, std::shared_ptr alloc); - static std::shared_ptr GetAllocator(const std::shared_ptr &context); - - static void ConfigCPU(const std::shared_ptr &context, bool config); - static bool IfCPUEnabled(const std::shared_ptr &context); - - static void ConfigCPUFp16(const std::shared_ptr &context, bool config); - static bool IfCPUFp16Enabled(const std::shared_ptr &context); - - static void SetCPUBindMode(const std::shared_ptr &context, lite::CpuBindMode mode); - static lite::CpuBindMode GetCPUBindMode(const std::shared_ptr &context); - - static void ConfigGPU(const std::shared_ptr &context, bool config); - static bool IfGPUEnabled(const std::shared_ptr &context); - - static void ConfigGPUFp16(const std::shared_ptr &context, bool config); - static bool IfGPUFp16Enabled(const std::shared_ptr &context); - - static void ConfigNPU(const std::shared_ptr &context, bool config); - static bool IfNPUEnabled(const std::shared_ptr &context); - - static void SetNPUFrequency(const std::shared_ptr &context, int freq); - static int GetNPUFrequency(const std::shared_ptr &context); - - private: - std::map context_; -}; -} // namespace mindspore -#endif // MINDSPORE_INCLUDE_API_LITE_CONTEXT_H diff --git a/include/api/model.h b/include/api/model.h index 78f202fae2..9837cc3e1b 100644 --- a/include/api/model.h +++ b/include/api/model.h @@ -24,39 +24,52 @@ #include "include/api/status.h" #include "include/api/types.h" #include "include/api/graph.h" +#include "include/api/context.h" #include "include/api/cell.h" #include "include/api/dual_abi_helper.h" namespace mindspore { class ModelImpl; -struct Context; class MS_API Model { public: - explicit Model(const std::vector &network, const std::shared_ptr &model_context = nullptr); - explicit Model(const GraphCell &graph, const std::shared_ptr &model_context = nullptr); + Model(); ~Model(); Model(const Model &) = delete; void operator=(const Model &) = delete; - Status Build(); + Status Build(GraphCell graph, const std::shared_ptr &model_context = nullptr); Status Resize(const std::vector &inputs, const std::vector> &dims); Status Predict(const std::vector &inputs, std::vector *outputs); std::vector GetInputs(); - std::vector GetOutputs(); + inline MSTensor GetInputByTensorName(const std::string &tensor_name); - static inline bool CheckModelSupport(const std::string &device_type, ModelType model_type); + std::vector GetOutputs(); + inline std::vector GetOutputTensorNames(); + inline MSTensor GetOutputByTensorName(const std::string &tensor_name); + + static bool CheckModelSupport(enum DeviceType device_type, ModelType model_type); private: // api without std::string - static bool CheckModelSupport(const std::vector &device_type, ModelType model_type); + MSTensor GetInputByTensorName(const std::vector &tensor_name); + std::vector> GetOutputTensorNamesChar(); + MSTensor GetOutputByTensorName(const std::vector &tensor_name); + std::vector GetOutputsByNodeName(const std::vector &node_name); + std::shared_ptr impl_; }; -bool Model::CheckModelSupport(const std::string &device_type, ModelType model_type) { - return CheckModelSupport(StringToChar(device_type), model_type); +MSTensor Model::GetInputByTensorName(const std::string &tensor_name) { + return GetInputByTensorName(StringToChar(tensor_name)); +} + +std::vector Model::GetOutputTensorNames() { return VectorCharToString(GetOutputTensorNamesChar()); } + +MSTensor Model::GetOutputByTensorName(const std::string &tensor_name) { + return GetOutputByTensorName(StringToChar(tensor_name)); } } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_MODEL_H diff --git a/include/api/serialization.h b/include/api/serialization.h index c5fb61eb07..5b7f1d9b37 100644 --- a/include/api/serialization.h +++ b/include/api/serialization.h @@ -29,19 +29,19 @@ namespace mindspore { class MS_API Serialization { public: - static Graph LoadModel(const void *model_data, size_t data_size, ModelType model_type); - inline static Graph LoadModel(const std::string &file, ModelType model_type); + static Status Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph); + inline static Status Load(const std::string &file, ModelType model_type, Graph *graph); static Status LoadCheckPoint(const std::string &ckpt_file, std::map *parameters); static Status SetParameters(const std::map ¶meters, Model *model); static Status ExportModel(const Model &model, ModelType model_type, Buffer *model_data); static Status ExportModel(const Model &model, ModelType model_type, const std::string &model_file); private: - static Graph LoadModel(const std::vector &file, ModelType model_type); + static Status Load(const std::vector &file, ModelType model_type, Graph *graph); }; -Graph Serialization::LoadModel(const std::string &file, ModelType model_type) { - return LoadModel(StringToChar(file), model_type); +Status Serialization::Load(const std::string &file, ModelType model_type, Graph *graph) { + return Load(StringToChar(file), model_type, graph); } } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_SERIALIZATION_H diff --git a/include/api/types.h b/include/api/types.h index 482159d8c0..f2d919f802 100644 --- a/include/api/types.h +++ b/include/api/types.h @@ -43,15 +43,19 @@ class MS_API MSTensor { public: class Impl; - static inline MSTensor CreateTensor(const std::string &name, DataType type, const std::vector &shape, - const void *data, size_t data_len) noexcept; - static inline MSTensor CreateRefTensor(const std::string &name, DataType type, const std::vector &shape, - const void *data, size_t data_len) noexcept; + static inline MSTensor *CreateTensor(const std::string &name, DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + static inline MSTensor *CreateRefTensor(const std::string &name, DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + static inline MSTensor *StringsToTensor(const std::string &name, const std::vector &str); + static inline std::vector TensorToStrings(const MSTensor &tensor); + static void DestroyTensorPtr(MSTensor *tensor) noexcept; MSTensor(); explicit MSTensor(const std::shared_ptr &impl); inline MSTensor(const std::string &name, DataType type, const std::vector &shape, const void *data, size_t data_len); + explicit MSTensor(std::nullptr_t); ~MSTensor(); inline std::string Name() const; @@ -65,21 +69,24 @@ class MS_API MSTensor { bool IsDevice() const; - MSTensor Clone() const; + MSTensor *Clone() const; bool operator==(std::nullptr_t) const; + bool operator!=(std::nullptr_t) const; private: // api without std::string - static MSTensor CreateTensor(const std::vector &name, enum DataType type, const std::vector &shape, - const void *data, size_t data_len) noexcept; - static MSTensor CreateRefTensor(const std::vector &name, enum DataType type, const std::vector &shape, - const void *data, size_t data_len) noexcept; + static MSTensor *CreateTensor(const std::vector &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + static MSTensor *CreateRefTensor(const std::vector &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + static MSTensor *CharStringsToTensor(const std::vector &name, const std::vector> &str); + static std::vector> TensorToStringChars(const MSTensor &tensor); + MSTensor(const std::vector &name, enum DataType type, const std::vector &shape, const void *data, size_t data_len); std::vector CharName() const; friend class ModelImpl; - explicit MSTensor(std::nullptr_t); std::shared_ptr impl_; }; @@ -103,16 +110,24 @@ class MS_API Buffer { std::shared_ptr impl_; }; -MSTensor MSTensor::CreateTensor(const std::string &name, enum DataType type, const std::vector &shape, - const void *data, size_t data_len) noexcept { +MSTensor *MSTensor::CreateTensor(const std::string &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { return CreateTensor(StringToChar(name), type, shape, data, data_len); } -MSTensor MSTensor::CreateRefTensor(const std::string &name, enum DataType type, const std::vector &shape, - const void *data, size_t data_len) noexcept { +MSTensor *MSTensor::CreateRefTensor(const std::string &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { return CreateRefTensor(StringToChar(name), type, shape, data, data_len); } +MSTensor *MSTensor::StringsToTensor(const std::string &name, const std::vector &str) { + return CharStringsToTensor(StringToChar(name), VectorStringToChar(str)); +} + +std::vector MSTensor::TensorToStrings(const MSTensor &tensor) { + return VectorCharToString(TensorToStringChars(tensor)); +} + MSTensor::MSTensor(const std::string &name, enum DataType type, const std::vector &shape, const void *data, size_t data_len) : MSTensor(StringToChar(name), type, shape, data, data_len) {} diff --git a/include/infer_log.h b/include/infer_log.h deleted file mode 100644 index 4a5bf5e1fe..0000000000 --- a/include/infer_log.h +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Copyright 2019 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_INFERENCE_LOG_H_ -#define MINDSPORE_INFERENCE_LOG_H_ - -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef ENABLE_ACL -#include "mindspore/core/utils/log_adapter.h" -#else // ENABLE_ACL -#include "acl/acl.h" -#endif - -namespace mindspore::inference { - -class LogStream { - public: - LogStream() { sstream_ = std::make_shared(); } - ~LogStream() = default; - - template - LogStream &operator<<(const T &val) noexcept { - (*sstream_) << val; - return *this; - } - - template - LogStream &operator<<(const std::vector &val) noexcept { - (*sstream_) << "["; - for (size_t i = 0; i < val.size(); i++) { - (*this) << val[i]; - if (i + 1 < val.size()) { - (*sstream_) << ", "; - } - } - (*sstream_) << "]"; - return *this; - } - - LogStream &operator<<(std::ostream &func(std::ostream &os)) noexcept { - (*sstream_) << func; - return *this; - } - - friend class LogWriter; - friend class Status; - - private: - std::shared_ptr sstream_; -}; - -#ifndef ENABLE_ACL -#define MSI_LOG(level) MS_LOG(level) - -#define MSI_LOG_DEBUG MSI_LOG(DEBUG) -#define MSI_LOG_INFO MSI_LOG(INFO) -#define MSI_LOG_WARNING MSI_LOG(WARNING) -#define MSI_LOG_ERROR MSI_LOG(ERROR) - -#define MSI_ASSERT(item) MS_ASSERT(item) - -#else // ENABLE_ACL - -class LogWriter { - public: - LogWriter(const char *file, int line, const char *func, aclLogLevel log_level) - : file_(file), line_(line), func_(func), log_level_(log_level) {} - ~LogWriter() = default; - - void operator<(const LogStream &stream) const noexcept __attribute__((visibility("default"))) { - std::ostringstream msg; - msg << stream.sstream_->rdbuf(); - OutputLog(msg); - } - - private: - void OutputLog(const std::ostringstream &msg) const { aclAppLog(log_level_, func_, file_, line_, msg.str().c_str()); } - - const char *file_; - int line_; - const char *func_; - aclLogLevel log_level_; -}; - -#define MSILOG_IF(level) inference::LogWriter(__FILE__, __LINE__, __FUNCTION__, ACL_##level) < inference::LogStream() - -#define MSI_LOG(level) MSI_LOG_##level - -#define MSI_LOG_DEBUG MSILOG_IF(DEBUG) -#define MSI_LOG_INFO MSILOG_IF(INFO) -#define MSI_LOG_WARNING MSILOG_IF(WARNING) -#define MSI_LOG_ERROR MSILOG_IF(ERROR) - -#define MSI_ASSERT(item) - -#endif // ENABLE_ACL - -#define MSI_TIME_STAMP_START(name) auto time_start_##name = std::chrono::steady_clock::now(); -#define MSI_TIME_STAMP_END(name) \ - { \ - auto time_end_##name = std::chrono::steady_clock::now(); \ - auto time_cost = std::chrono::duration(time_end_##name - time_start_##name).count(); \ - MSI_LOG_INFO << #name " Time Cost # " << time_cost << " ms ---------------------"; \ - } - -#define INFER_STATUS(code) inference::Status(code) < inference::LogStream() -#define ERROR_INFER_STATUS(status, type, msg) \ - MSI_LOG_ERROR << msg; \ - status = inference::Status(type, msg) - -} // namespace mindspore::inference - -#endif // MINDSPORE_INFERENCE_LOG_H_ diff --git a/include/infer_tensor.h b/include/infer_tensor.h deleted file mode 100644 index 3a8b203b3e..0000000000 --- a/include/infer_tensor.h +++ /dev/null @@ -1,217 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_INCLUDE_INFER_TENSOR_H_ -#define MINDSPORE_INCLUDE_INFER_TENSOR_H_ - -#include -#include -#include -#include -#include -#include - -#include "securec/include/securec.h" -#include "include/infer_log.h" - -namespace mindspore { -#define MS_API __attribute__((visibility("default"))) -namespace inference { -enum DataType { - kMSI_Unknown = 0, - kMSI_Bool = 1, - kMSI_Int8 = 2, - kMSI_Int16 = 3, - kMSI_Int32 = 4, - kMSI_Int64 = 5, - kMSI_Uint8 = 6, - kMSI_Uint16 = 7, - kMSI_Uint32 = 8, - kMSI_Uint64 = 9, - kMSI_Float16 = 10, - kMSI_Float32 = 11, - kMSI_Float64 = 12, -}; - -class InferTensorBase { - public: - InferTensorBase() = default; - virtual ~InferTensorBase() = default; - - virtual DataType data_type() const = 0; - virtual void set_data_type(DataType type) = 0; - virtual std::vector shape() const = 0; - virtual void set_shape(const std::vector &shape) = 0; - virtual const void *data() const = 0; - virtual size_t data_size() const = 0; - virtual bool resize_data(size_t data_len) = 0; - virtual void *mutable_data() = 0; - - bool set_data(const void *data, size_t data_len) { - resize_data(data_len); - if (mutable_data() == nullptr) { - MSI_LOG_ERROR << "set data failed, data len " << data_len; - return false; - } - if (data_size() != data_len) { - MSI_LOG_ERROR << "set data failed, tensor current data size " << data_size() << " not match data len " - << data_len; - return false; - } - if (data_len == 0) { - return true; - } - auto ret = memcpy_s(mutable_data(), data_size(), data, data_len); - if (ret != 0) { - MSI_LOG_ERROR << "Set data memcpy_s failed"; - return false; - } - return true; - } - - int64_t ElementNum() const { - std::vector shapex = shape(); - return std::accumulate(shapex.begin(), shapex.end(), 1LL, std::multiplies()); - } - - int GetTypeSize(DataType type) const { - const std::map type_size_map{ - {kMSI_Bool, sizeof(bool)}, {kMSI_Float64, sizeof(double)}, {kMSI_Int8, sizeof(int8_t)}, - {kMSI_Uint8, sizeof(uint8_t)}, {kMSI_Int16, sizeof(int16_t)}, {kMSI_Uint16, sizeof(uint16_t)}, - {kMSI_Int32, sizeof(int32_t)}, {kMSI_Uint32, sizeof(uint32_t)}, {kMSI_Int64, sizeof(int64_t)}, - {kMSI_Uint64, sizeof(uint64_t)}, {kMSI_Float16, sizeof(uint16_t)}, {kMSI_Float32, sizeof(float)}, - }; - auto it = type_size_map.find(type); - if (it != type_size_map.end()) { - return it->second; - } - return 0; - } -}; - -class InferTensor : public InferTensorBase { - public: - DataType type_; - std::vector shape_; - std::vector data_; - - public: - InferTensor() = default; - ~InferTensor() = default; - InferTensor(DataType type, std::vector shape, const void *data, size_t data_len) { - set_data_type(type); - set_shape(shape); - set_data(data, data_len); - } - - void set_data_type(DataType type) override { type_ = type; } - DataType data_type() const override { return type_; } - - void set_shape(const std::vector &shape) override { shape_ = shape; } - std::vector shape() const override { return shape_; } - - const void *data() const override { return data_.data(); } - size_t data_size() const override { return data_.size(); } - - bool resize_data(size_t data_len) override { - data_.resize(data_len); - return true; - } - void *mutable_data() override { return data_.data(); } -}; - -class InferImagesBase { - public: - InferImagesBase() = default; - virtual ~InferImagesBase() = default; - virtual size_t batch_size() const = 0; - virtual bool get(size_t index, const void *&pic_buffer, uint32_t &pic_size) const = 0; - virtual size_t input_index() const = 0; // the index of images as input in model -}; - -class RequestBase { - public: - RequestBase() = default; - virtual ~RequestBase() = default; - virtual size_t size() const = 0; - virtual const InferTensorBase *operator[](size_t index) const = 0; -}; - -class ImagesRequestBase { - public: - ImagesRequestBase() = default; - virtual ~ImagesRequestBase() = default; - virtual size_t size() const = 0; - virtual const InferImagesBase *operator[](size_t index) const = 0; -}; - -class ReplyBase { - public: - ReplyBase() = default; - virtual ~ReplyBase() = default; - virtual size_t size() const = 0; - virtual InferTensorBase *operator[](size_t index) = 0; - virtual const InferTensorBase *operator[](size_t index) const = 0; - virtual InferTensorBase *add() = 0; - virtual void clear() = 0; -}; - -class VectorInferTensorWrapReply : public ReplyBase { - public: - explicit VectorInferTensorWrapReply(std::vector &tensor_list) : tensor_list_(tensor_list) {} - ~VectorInferTensorWrapReply() = default; - - size_t size() const { return tensor_list_.size(); } - InferTensorBase *operator[](size_t index) { - if (index >= tensor_list_.size()) { - MSI_LOG_ERROR << "visit invalid index " << index << " total size " << tensor_list_.size(); - return nullptr; - } - return &(tensor_list_[index]); - } - const InferTensorBase *operator[](size_t index) const { - if (index >= tensor_list_.size()) { - MSI_LOG_ERROR << "visit invalid index " << index << " total size " << tensor_list_.size(); - return nullptr; - } - return &(tensor_list_[index]); - } - InferTensorBase *add() { - tensor_list_.push_back(InferTensor()); - return &(tensor_list_.back()); - } - void clear() { tensor_list_.clear(); } - std::vector &tensor_list_; -}; - -class VectorInferTensorWrapRequest : public RequestBase { - public: - explicit VectorInferTensorWrapRequest(const std::vector &tensor_list) : tensor_list_(tensor_list) {} - ~VectorInferTensorWrapRequest() = default; - - size_t size() const { return tensor_list_.size(); } - const InferTensorBase *operator[](size_t index) const { - if (index >= tensor_list_.size()) { - MSI_LOG_ERROR << "visit invalid index " << index << " total size " << tensor_list_.size(); - return nullptr; - } - return &(tensor_list_[index]); - } - const std::vector &tensor_list_; -}; -} // namespace inference -} // namespace mindspore -#endif // MINDSPORE_INCLUDE_INFER_TENSOR_H_ diff --git a/include/inference.h b/include/inference.h deleted file mode 100644 index ec45109b74..0000000000 --- a/include/inference.h +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_INCLUDE_MS_SESSION_H -#define MINDSPORE_INCLUDE_MS_SESSION_H - -#include -#include -#include -#include "include/infer_tensor.h" -#include "include/infer_log.h" - -namespace mindspore { -namespace inference { -enum StatusCode { SUCCESS = 0, FAILED, INVALID_INPUTS }; - -class Status { - public: - Status() : status_code_(FAILED) {} - Status(enum StatusCode status_code, const std::string &status_msg = "") - : status_code_(status_code), status_msg_(status_msg) {} - ~Status() = default; - - bool IsSuccess() const { return status_code_ == SUCCESS; } - enum StatusCode StatusCode() const { return status_code_; } - std::string StatusMessage() const { return status_msg_; } - bool operator==(const Status &other) const { return status_code_ == other.status_code_; } - bool operator==(enum StatusCode other_code) const { return status_code_ == other_code; } - bool operator!=(const Status &other) const { return status_code_ != other.status_code_; } - bool operator!=(enum StatusCode other_code) const { return status_code_ != other_code; } - operator bool() const = delete; - Status &operator<(const LogStream &stream) noexcept __attribute__((visibility("default"))) { - status_msg_ = stream.sstream_->str(); - return *this; - } - - private: - enum StatusCode status_code_; - std::string status_msg_; -}; - -class MS_API InferSession { - public: - InferSession() = default; - virtual ~InferSession() = default; - virtual Status InitEnv(const std::string &device_type, uint32_t device_id) = 0; - virtual Status FinalizeEnv() = 0; - virtual Status LoadModelFromFile(const std::string &file_name, uint32_t &model_id) = 0; - virtual Status UnloadModel(uint32_t model_id) = 0; - // override this method to avoid request/reply data copy - virtual Status ExecuteModel(uint32_t model_id, const RequestBase &request, ReplyBase &reply) = 0; - - virtual Status ExecuteModel(uint32_t model_id, const std::vector &inputs, - std::vector &outputs) { - VectorInferTensorWrapRequest request(inputs); - VectorInferTensorWrapReply reply(outputs); - return ExecuteModel(model_id, request, reply); - } - // default not support input data preprocess(decode, resize, crop, crop&paste, etc.) - virtual Status ExecuteModel(uint32_t /*model_id*/, - const ImagesRequestBase & /*images_inputs*/, // images for preprocess - const RequestBase & /*request*/, ReplyBase & /*reply*/) { - return FAILED; - } - virtual Status GetModelInputsInfo(uint32_t graph_id, std::vector *tensor_list) const { - Status status(SUCCESS); - return status; - } - static std::shared_ptr CreateSession(const std::string &device, uint32_t device_id); -}; -} // namespace inference -} // namespace mindspore -#endif // MINDSPORE_INCLUDE_MS_SESSION_H diff --git a/mindspore/ccsrc/cxx_api/cell.cc b/mindspore/ccsrc/cxx_api/cell.cc index ebf3a4706e..87ee6b7978 100644 --- a/mindspore/ccsrc/cxx_api/cell.cc +++ b/mindspore/ccsrc/cxx_api/cell.cc @@ -21,12 +21,19 @@ namespace mindspore { std::vector CellBase::operator()(const std::vector &inputs) const { return Clone()->Construct(inputs); } -ParameterCell::ParameterCell(const ParameterCell &cell) : tensor_(cell.tensor_.Clone()) {} +ParameterCell::ParameterCell(const ParameterCell &cell) { + auto tmp_ptr = cell.tensor_.Clone(); + tensor_ = *tmp_ptr; + MSTensor::DestroyTensorPtr(tmp_ptr); +} + ParameterCell &ParameterCell::operator=(const ParameterCell &cell) { if (&cell == this) { return *this; } - tensor_ = cell.tensor_.Clone(); + auto tmp_ptr = cell.tensor_.Clone(); + tensor_ = *tmp_ptr; + MSTensor::DestroyTensorPtr(tmp_ptr); return *this; } @@ -40,10 +47,16 @@ ParameterCell &ParameterCell::operator=(ParameterCell &&cell) { return *this; } -ParameterCell::ParameterCell(const MSTensor &tensor) : tensor_(tensor.Clone()) {} +ParameterCell::ParameterCell(const MSTensor &tensor) { + auto tmp_ptr = tensor.Clone(); + tensor_ = *tmp_ptr; + MSTensor::DestroyTensorPtr(tmp_ptr); +} ParameterCell &ParameterCell::operator=(const MSTensor &tensor) { - tensor_ = tensor.Clone(); + auto tmp_ptr = tensor.Clone(); + tensor_ = *tmp_ptr; + MSTensor::DestroyTensorPtr(tmp_ptr); return *this; } @@ -54,54 +67,67 @@ ParameterCell &ParameterCell::operator=(MSTensor &&tensor) { return *this; } -GraphCell::GraphCell(const Graph &graph) - : graph_(std::make_shared(graph)), - executor_(Factory::Instance().Create(GlobalContext::GetGlobalDeviceTarget())) { - MS_EXCEPTION_IF_NULL(graph_); - MS_EXCEPTION_IF_NULL(executor_); - executor_->SetGraph(graph_); -} +GraphCell::GraphCell(const Graph &graph) : graph_(std::make_shared(graph)) { MS_EXCEPTION_IF_NULL(graph_); } -GraphCell::GraphCell(const std::shared_ptr &graph) - : graph_(graph), - executor_(Factory::Instance().Create(GlobalContext::GetGlobalDeviceTarget())) { - MS_EXCEPTION_IF_NULL(graph_); - MS_EXCEPTION_IF_NULL(executor_); - executor_->SetGraph(graph_); -} +GraphCell::GraphCell(const std::shared_ptr &graph) : graph_(graph) { MS_EXCEPTION_IF_NULL(graph_); } -GraphCell::GraphCell(Graph &&graph) - : graph_(std::make_shared(graph)), - executor_(Factory::Instance().Create(GlobalContext::GetGlobalDeviceTarget())) { - MS_EXCEPTION_IF_NULL(graph_); - MS_EXCEPTION_IF_NULL(executor_); - executor_->SetGraph(graph_); -} +GraphCell::GraphCell(Graph &&graph) : graph_(std::make_shared(graph)) { MS_EXCEPTION_IF_NULL(graph_); } Status GraphCell::Run(const std::vector &inputs, std::vector *outputs) { - MS_EXCEPTION_IF_NULL(executor_); + if (executor_ == nullptr) { + executor_ = Factory::Instance().Create(g_device_target); + if (executor_ == nullptr) { + MS_LOG(ERROR) << "Create graph impl for device target " << g_device_target << " failed."; + return kMEFailed; + } + executor_->SetGraph(graph_); + } return executor_->Run(inputs, outputs); } -Status GraphCell::Load() { - MS_EXCEPTION_IF_NULL(executor_); - return executor_->Load(); +Status GraphCell::Load(uint32_t device_id) { + if (executor_ == nullptr) { + executor_ = Factory::Instance().Create(g_device_target); + if (executor_ == nullptr) { + MS_LOG(ERROR) << "Create graph impl for device target " << g_device_target << " failed."; + return kMEFailed; + } + executor_->SetGraph(graph_); + } + return executor_->Load(device_id); } std::vector GraphCell::GetInputs() { - MS_EXCEPTION_IF_NULL(executor_); + if (executor_ == nullptr) { + executor_ = Factory::Instance().Create(g_device_target); + if (executor_ == nullptr) { + MS_LOG(ERROR) << "Create graph impl for device target " << g_device_target << " failed."; + return {}; + } + executor_->SetGraph(graph_); + } return executor_->GetInputs(); } std::vector GraphCell::GetOutputs() { - MS_EXCEPTION_IF_NULL(executor_); + if (executor_ == nullptr) { + executor_ = Factory::Instance().Create(g_device_target); + if (executor_ == nullptr) { + MS_LOG(ERROR) << "Create graph impl for device target " << g_device_target << " failed."; + return {}; + } + executor_->SetGraph(graph_); + } return executor_->GetOutputs(); } InputAndOutput::InputAndOutput() : cell_(nullptr), prev_(), index_(-1) {} -InputAndOutput::InputAndOutput(const MSTensor &tensor) - : cell_(std::make_shared(tensor.Clone())), prev_(), index_(-1) {} +InputAndOutput::InputAndOutput(const MSTensor &tensor) : prev_(), index_(-1) { + auto tmp_ptr = tensor.Clone(); + cell_ = std::make_shared(*tmp_ptr); + MSTensor::DestroyTensorPtr(tmp_ptr); +} InputAndOutput::InputAndOutput(MSTensor &&tensor) : cell_(std::make_shared(tensor)), prev_(), index_(-1) {} diff --git a/mindspore/ccsrc/cxx_api/context.cc b/mindspore/ccsrc/cxx_api/context.cc index fb3ca74cb6..60f8547b2a 100644 --- a/mindspore/ccsrc/cxx_api/context.cc +++ b/mindspore/ccsrc/cxx_api/context.cc @@ -17,41 +17,57 @@ #include #include #include +#include "cxx_api/factory.h" #include "utils/log_adapter.h" -constexpr auto kGlobalContextDeviceTarget = "mindspore.ascend.globalcontext.device_target"; -constexpr auto kGlobalContextDeviceID = "mindspore.ascend.globalcontext.device_id"; -constexpr auto kGlobalContextDumpCfgPath = "mindspore.ascend.globalcontext.dump_config_file_path"; -constexpr auto kModelOptionInsertOpCfgPath = "mindspore.option.insert_op_config_file_path"; // aipp config file -constexpr auto kModelOptionInputFormat = "mindspore.option.input_format"; // nchw or nhwc -constexpr auto kModelOptionInputShapeMap = "mindspore.option.input_shape_map"; -constexpr auto kModelOptionInputShape = "mindspore.option.input_shape"; +constexpr auto kModelOptionCpuEnableFP16 = "mindspore.option.cpu.enable_fp16"; +constexpr auto kModelOptionCpuThreadAffinity = "mindspore.option.cpu.thread_affinity"; +constexpr auto kModelOptionMaliGpuEnableFP16 = "mindspore.option.mali_gpu.enable_fp16"; +constexpr auto kModelOptionKirinNpuFrequency = "mindspore.option.kirin_npu.frequency"; +constexpr auto kModelOptionDeviceID = "mindspore.option.device_id"; +constexpr auto kModelOptionNvidiaGpuDeviceID = kModelOptionDeviceID; +constexpr auto kModelOptionNvidiaGpuTrtInferMode = "mindspore.option.nvidia_gpu.trt_infer_mode"; +constexpr auto kModelOptionAscend910DeviceID = kModelOptionDeviceID; +constexpr auto kModelOptionAscend310DeviceID = kModelOptionDeviceID; +constexpr auto kModelOptionAscend310DumpCfgPath = "mindspore.option.ascend310.dump_config_file_path"; +constexpr auto kModelOptionAscend310InsertOpCfgPath = + "mindspore.option.ascend310.insert_op_config_file_path"; // aipp config file +constexpr auto kModelOptionAscend310InputFormat = "mindspore.option.ascend310.input_format"; // nchw or nhwc +constexpr auto kModelOptionAscend310InputShapeMap = "mindspore.option.ascend310.input_shape_map"; +constexpr auto kModelOptionAscend310InputShape = "mindspore.option.ascend310.input_shape"; // Mandatory while dynamic batch: e.g. "input_op_name1: n1,c2,h3,w4;input_op_name2: n4,c3,h2,w1" -constexpr auto kModelOptionOutputType = "mindspore.option.output_type"; // "FP32", "UINT8" or "FP16", default as "FP32" -constexpr auto kModelOptionPrecisionMode = "mindspore.option.precision_mode"; +constexpr auto kModelOptionAscend310OutputType = + "mindspore.option.ascend310.output_type"; // "FP32", "UINT8" or "FP16", default as "FP32" +constexpr auto kModelOptionAscend310PrecisionMode = "mindspore.option.ascend310.precision_mode"; // "force_fp16", "allow_fp32_to_fp16", "must_keep_origin_dtype" or "allow_mix_precision", default as "force_fp16" -constexpr auto kModelOptionOpSelectImplMode = "mindspore.option.op_select_impl_mode"; -constexpr auto KModelOptionFusionSwitchCfgPath = "mindspore.option.fusion_switch_config_file_path"; +constexpr auto kModelOptionAscend310OpSelectImplMode = "mindspore.option.ascend310.op_select_impl_mode"; +constexpr auto KModelOptionAscend310FusionSwitchCfgPath = "mindspore.option.ascend310.fusion_switch_config_file_path"; // "False": Inference with native backend, "True": Inference with Tensor-RT engine, default as "False" -constexpr auto kModelOptionGpuTrtInferMode = "mindspore.option.gpu_trt_infer_mode"; -constexpr auto kModelOptionDynamicBatchSize = "mindspore.option.dynamic_batch_size"; -constexpr auto kModelOptionDynamicImageSize = "mindspore.option.dynamic_image_size"; +constexpr auto kModelOptionAscend310DynamicBatchSize = "mindspore.option.ascend310.dynamic_batch_size"; namespace mindspore { +class Allocator {}; + struct Context::Data { + std::vector> device_info_list; + int32_t thread_num; + std::shared_ptr allocator; +}; + +struct DeviceInfoContext::Data { std::map params; }; -Context::Context() : data(std::make_shared()) {} +Context::Context() : data_(std::make_shared()) {} template >> -static const U &GetValue(const std::shared_ptr &context, const std::string &key) { +static const U &GetValue(const std::shared_ptr &data, const std::string &key) { static U empty_result; - if (context == nullptr || context->data == nullptr) { + if (data == nullptr) { return empty_result; } - auto iter = context->data->params.find(key); - if (iter == context->data->params.end()) { + auto iter = data->params.find(key); + if (iter == data->params.end()) { return empty_result; } const std::any &value = iter->second; @@ -62,210 +78,205 @@ static const U &GetValue(const std::shared_ptr &context, const std::str return std::any_cast(value); } -std::shared_ptr GlobalContext::GetGlobalContext() { - static std::shared_ptr g_context = std::make_shared(); - return g_context; +void Context::SetThreadNum(int32_t thread_num) { + MS_EXCEPTION_IF_NULL(data_); + data_->thread_num = thread_num; +} +int32_t Context::GetThreadNum() const { + MS_EXCEPTION_IF_NULL(data_); + return data_->thread_num; } -void GlobalContext::SetGlobalDeviceTarget(const std::vector &device_target) { - auto global_context = GetGlobalContext(); - MS_EXCEPTION_IF_NULL(global_context); - if (global_context->data == nullptr) { - global_context->data = std::make_shared(); - MS_EXCEPTION_IF_NULL(global_context->data); - } - global_context->data->params[kGlobalContextDeviceTarget] = CharToString(device_target); +void Context::SetAllocator(const std::shared_ptr &allocator) { + MS_EXCEPTION_IF_NULL(data_); + data_->allocator = allocator; +} +std::shared_ptr Context::GetAllocator() const { + MS_EXCEPTION_IF_NULL(data_); + return data_->allocator; } -std::vector GlobalContext::GetGlobalDeviceTargetChar() { - auto global_context = GetGlobalContext(); - MS_EXCEPTION_IF_NULL(global_context); - const std::string &ref = GetValue(global_context, kGlobalContextDeviceTarget); +std::vector> &Context::MutableDeviceInfo() { + MS_EXCEPTION_IF_NULL(data_); + return data_->device_info_list; +} + +DeviceInfoContext::DeviceInfoContext() : data_(std::make_shared()) {} + +void CPUDeviceInfo::SetEnableFP16(bool is_fp16) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[kModelOptionCpuEnableFP16] = is_fp16; +} +bool CPUDeviceInfo::GetEnableFP16() const { + MS_EXCEPTION_IF_NULL(data_); + return GetValue(data_, kModelOptionCpuEnableFP16); +} + +void CPUDeviceInfo::SetThreadAffinity(int affinity) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[kModelOptionCpuThreadAffinity] = affinity; +} +int CPUDeviceInfo::GetThreadAffinity() const { + MS_EXCEPTION_IF_NULL(data_); + return GetValue(data_, kModelOptionCpuThreadAffinity); +} + +void MaliGPUDeviceInfo::SetEnableFP16(bool is_fp16) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[kModelOptionMaliGpuEnableFP16] = is_fp16; +} +bool MaliGPUDeviceInfo::GetEnableFP16() const { + MS_EXCEPTION_IF_NULL(data_); + return GetValue(data_, kModelOptionMaliGpuEnableFP16); +} + +void KirinNPUDeviceInfo::SetFrequency(int frequency) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[kModelOptionKirinNpuFrequency] = frequency; +} +int KirinNPUDeviceInfo::GetFrequency() const { + MS_EXCEPTION_IF_NULL(data_); + return GetValue(data_, kModelOptionKirinNpuFrequency); +} + +void NvidiaGPUDeviceInfo::SetDeviceID(uint32_t device_id) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[kModelOptionNvidiaGpuDeviceID] = device_id; +} +uint32_t NvidiaGPUDeviceInfo::GetDeviceID() const { + MS_EXCEPTION_IF_NULL(data_); + return GetValue(data_, kModelOptionNvidiaGpuDeviceID); +} + +void NvidiaGPUDeviceInfo::SetGpuTrtInferMode(bool gpu_trt_infer_mode) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[kModelOptionNvidiaGpuTrtInferMode] = gpu_trt_infer_mode; +} +bool NvidiaGPUDeviceInfo::GetGpuTrtInferMode() const { + MS_EXCEPTION_IF_NULL(data_); + return GetValue(data_, kModelOptionNvidiaGpuTrtInferMode); +} + +void Ascend910DeviceInfo::SetDeviceID(uint32_t device_id) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[kModelOptionAscend910DeviceID] = device_id; +} +uint32_t Ascend910DeviceInfo::GetDeviceID() const { + MS_EXCEPTION_IF_NULL(data_); + return GetValue(data_, kModelOptionAscend910DeviceID); +} + +void Ascend310DeviceInfo::SetDeviceID(uint32_t device_id) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[kModelOptionAscend310DeviceID] = device_id; +} +uint32_t Ascend310DeviceInfo::GetDeviceID() const { + MS_EXCEPTION_IF_NULL(data_); + return GetValue(data_, kModelOptionAscend310DeviceID); +} + +void Ascend310DeviceInfo::SetDumpConfigPath(const std::vector &cfg_path) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[kModelOptionAscend310DumpCfgPath] = CharToString(cfg_path); +} +std::vector Ascend310DeviceInfo::GetDumpConfigPathChar() const { + MS_EXCEPTION_IF_NULL(data_); + const std::string &ref = GetValue(data_, kModelOptionAscend310DeviceID); return StringToChar(ref); } -void GlobalContext::SetGlobalDeviceID(const uint32_t &device_id) { - auto global_context = GetGlobalContext(); - MS_EXCEPTION_IF_NULL(global_context); - if (global_context->data == nullptr) { - global_context->data = std::make_shared(); - MS_EXCEPTION_IF_NULL(global_context->data); - } - global_context->data->params[kGlobalContextDeviceID] = device_id; +void Ascend310DeviceInfo::SetInsertOpConfigPath(const std::vector &cfg_path) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[kModelOptionAscend310InsertOpCfgPath] = CharToString(cfg_path); } - -uint32_t GlobalContext::GetGlobalDeviceID() { - auto global_context = GetGlobalContext(); - MS_EXCEPTION_IF_NULL(global_context); - return GetValue(global_context, kGlobalContextDeviceID); -} - -void GlobalContext::SetGlobalDumpConfigPath(const std::vector &cfg_path) { - auto global_context = GetGlobalContext(); - MS_EXCEPTION_IF_NULL(global_context); - if (global_context->data == nullptr) { - global_context->data = std::make_shared(); - MS_EXCEPTION_IF_NULL(global_context->data); - } - global_context->data->params[kGlobalContextDumpCfgPath] = CharToString(cfg_path); -} - -std::vector GlobalContext::GetGlobalDumpConfigPathChar() { - auto global_context = GetGlobalContext(); - MS_EXCEPTION_IF_NULL(global_context); - const std::string &ref = GetValue(global_context, kGlobalContextDumpCfgPath); +std::vector Ascend310DeviceInfo::GetInsertOpConfigPathChar() const { + MS_EXCEPTION_IF_NULL(data_); + const std::string &ref = GetValue(data_, kModelOptionAscend310InsertOpCfgPath); return StringToChar(ref); } -void ModelContext::SetInsertOpConfigPath(const std::shared_ptr &context, const std::vector &cfg_path) { - MS_EXCEPTION_IF_NULL(context); - if (context->data == nullptr) { - context->data = std::make_shared(); - MS_EXCEPTION_IF_NULL(context->data); - } - context->data->params[kModelOptionInsertOpCfgPath] = CharToString(cfg_path); +void Ascend310DeviceInfo::SetInputFormat(const std::vector &format) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[kModelOptionAscend310InputFormat] = CharToString(format); } - -std::vector ModelContext::GetInsertOpConfigPathChar(const std::shared_ptr &context) { - MS_EXCEPTION_IF_NULL(context); - const std::string &ref = GetValue(context, kModelOptionInsertOpCfgPath); +std::vector Ascend310DeviceInfo::GetInputFormatChar() const { + MS_EXCEPTION_IF_NULL(data_); + const std::string &ref = GetValue(data_, kModelOptionAscend310InputFormat); return StringToChar(ref); } -void ModelContext::SetInputFormat(const std::shared_ptr &context, const std::vector &format) { - MS_EXCEPTION_IF_NULL(context); - if (context->data == nullptr) { - context->data = std::make_shared(); - MS_EXCEPTION_IF_NULL(context->data); - } - context->data->params[kModelOptionInputFormat] = CharToString(format); +void Ascend310DeviceInfo::SetInputShape(const std::vector &shape) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[kModelOptionAscend310InputShape] = CharToString(shape); } - -std::vector ModelContext::GetInputFormatChar(const std::shared_ptr &context) { - MS_EXCEPTION_IF_NULL(context); - const std::string &ref = GetValue(context, kModelOptionInputFormat); +std::vector Ascend310DeviceInfo::GetInputShapeChar() const { + MS_EXCEPTION_IF_NULL(data_); + const std::string &ref = GetValue(data_, kModelOptionAscend310InputShape); return StringToChar(ref); } -void ModelContext::SetInputShape(const std::shared_ptr &context, const std::vector &shape) { - MS_EXCEPTION_IF_NULL(context); - if (context->data == nullptr) { - context->data = std::make_shared(); - MS_EXCEPTION_IF_NULL(context->data); - } - context->data->params[kModelOptionInputShape] = CharToString(shape); -} - -std::vector ModelContext::GetInputShapeChar(const std::shared_ptr &context) { - MS_EXCEPTION_IF_NULL(context); - const std::string &ref = GetValue(context, kModelOptionInputShape); - return StringToChar(ref); -} - -void ModelContext::SetInputShapeMap(const std::shared_ptr &context, - const std::map> &shape) { - MS_EXCEPTION_IF_NULL(context); - context->data->params[kModelOptionInputShapeMap] = shape; -} - -std::map> ModelContext::GetInputShapeMap(const std::shared_ptr &context) { - MS_EXCEPTION_IF_NULL(context); - return GetValue>>(context, kModelOptionInputShapeMap); -} - -void ModelContext::SetOutputType(const std::shared_ptr &context, enum DataType output_type) { - MS_EXCEPTION_IF_NULL(context); - if (context->data == nullptr) { - context->data = std::make_shared(); - MS_EXCEPTION_IF_NULL(context->data); - } - context->data->params[kModelOptionOutputType] = output_type; -} - -enum DataType ModelContext::GetOutputType(const std::shared_ptr &context) { - MS_EXCEPTION_IF_NULL(context); - return GetValue(context, kModelOptionOutputType); -} - -void ModelContext::SetPrecisionMode(const std::shared_ptr &context, const std::vector &precision_mode) { - MS_EXCEPTION_IF_NULL(context); - if (context->data == nullptr) { - context->data = std::make_shared(); - MS_EXCEPTION_IF_NULL(context->data); - } - context->data->params[kModelOptionPrecisionMode] = CharToString(precision_mode); -} - -std::vector ModelContext::GetPrecisionModeChar(const std::shared_ptr &context) { - MS_EXCEPTION_IF_NULL(context); - const std::string &ref = GetValue(context, kModelOptionPrecisionMode); - return StringToChar(ref); -} - -void ModelContext::SetOpSelectImplMode(const std::shared_ptr &context, - const std::vector &op_select_impl_mode) { - MS_EXCEPTION_IF_NULL(context); - if (context->data == nullptr) { - context->data = std::make_shared(); - MS_EXCEPTION_IF_NULL(context->data); - } - context->data->params[kModelOptionOpSelectImplMode] = CharToString(op_select_impl_mode); -} - -std::vector ModelContext::GetOpSelectImplModeChar(const std::shared_ptr &context) { - MS_EXCEPTION_IF_NULL(context); - const std::string &ref = GetValue(context, kModelOptionOpSelectImplMode); - return StringToChar(ref); -} - -void ModelContext::SetFusionSwitchConfigPath(const std::shared_ptr &context, - const std::vector &cfg_path) { - MS_EXCEPTION_IF_NULL(context); - if (context->data == nullptr) { - context->data = std::make_shared(); - MS_EXCEPTION_IF_NULL(context->data); - } - context->data->params[KModelOptionFusionSwitchCfgPath] = CharToString(cfg_path); -} - -std::vector ModelContext::GetFusionSwitchConfigPathChar(const std::shared_ptr &context) { - MS_EXCEPTION_IF_NULL(context); - const std::string &ref = GetValue(context, KModelOptionFusionSwitchCfgPath); - return StringToChar(ref); -} - -void ModelContext::SetGpuTrtInferMode(const std::shared_ptr &context, - const std::vector &gpu_trt_infer_mode) { - MS_EXCEPTION_IF_NULL(context); - if (context->data == nullptr) { - context->data = std::make_shared(); - MS_EXCEPTION_IF_NULL(context->data); - } - context->data->params[kModelOptionGpuTrtInferMode] = CharToString(gpu_trt_infer_mode); -} - -std::vector ModelContext::GetGpuTrtInferModeChar(const std::shared_ptr &context) { - MS_EXCEPTION_IF_NULL(context); - const std::string &ref = GetValue(context, kModelOptionGpuTrtInferMode); - return StringToChar(ref); -} - -void ModelContext::SetDynamicBatchSize(const std::shared_ptr &context, const std::vector &batch_size) { - MS_EXCEPTION_IF_NULL(context); - if (context->data == nullptr) { - context->data = std::make_shared(); - MS_EXCEPTION_IF_NULL(context->data); - } +void Ascend310DeviceInfo::SetDynamicBatchSize(const std::vector &dynamic_batch_size) { + MS_EXCEPTION_IF_NULL(data_); std::string batchs = ""; - for (auto bs : batch_size) { - batchs += std::to_string(bs) + ","; + for (size_t i = 0; i < dynamic_batch_size.size(); ++i) { + if (i != 0) { + batchs.push_back(','); + } + batchs += std::to_string(dynamic_batch_size[i]); } - context->data->params[kModelOptionDynamicBatchSize] = batchs; + data_->params[kModelOptionAscend310DynamicBatchSize] = batchs; +} +std::vector Ascend310DeviceInfo::GetDynamicBatchSizeChar() const { + MS_EXCEPTION_IF_NULL(data_); + const std::string &ref = GetValue(data_, kModelOptionAscend310DynamicBatchSize); + return StringToChar(ref); } -std::vector ModelContext::GetDynamicBatchSizeChar(const std::shared_ptr &context) { - MS_EXCEPTION_IF_NULL(context); - const std::string &ref = GetValue(context, kModelOptionDynamicBatchSize); +void Ascend310DeviceInfo::SetPrecisionMode(const std::vector &precision_mode) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[kModelOptionAscend310PrecisionMode] = CharToString(precision_mode); +} +std::vector Ascend310DeviceInfo::GetPrecisionModeChar() const { + MS_EXCEPTION_IF_NULL(data_); + const std::string &ref = GetValue(data_, kModelOptionAscend310PrecisionMode); return StringToChar(ref); } + +void Ascend310DeviceInfo::SetOpSelectImplMode(const std::vector &op_select_impl_mode) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[kModelOptionAscend310OpSelectImplMode] = CharToString(op_select_impl_mode); +} +std::vector Ascend310DeviceInfo::GetOpSelectImplModeChar() const { + MS_EXCEPTION_IF_NULL(data_); + const std::string &ref = GetValue(data_, kModelOptionAscend310OpSelectImplMode); + return StringToChar(ref); +} + +void Ascend310DeviceInfo::SetFusionSwitchConfigPath(const std::vector &cfg_path) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[KModelOptionAscend310FusionSwitchCfgPath] = CharToString(cfg_path); +} +std::vector Ascend310DeviceInfo::GetFusionSwitchConfigPathChar() const { + MS_EXCEPTION_IF_NULL(data_); + const std::string &ref = GetValue(data_, KModelOptionAscend310FusionSwitchCfgPath); + return StringToChar(ref); +} + +void Ascend310DeviceInfo::SetInputShapeMap(const std::map> &shape) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[kModelOptionAscend310InputShapeMap] = shape; +} +std::map> Ascend310DeviceInfo::GetInputShapeMap() const { + MS_EXCEPTION_IF_NULL(data_); + return GetValue>>(data_, kModelOptionAscend310InputShapeMap); +} + +void Ascend310DeviceInfo::SetOutputType(enum DataType output_type) { + MS_EXCEPTION_IF_NULL(data_); + data_->params[kModelOptionAscend310OutputType] = output_type; +} +enum DataType Ascend310DeviceInfo::GetOutputType() const { + MS_EXCEPTION_IF_NULL(data_); + return GetValue(data_, kModelOptionAscend310OutputType); +} } // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/factory.h b/mindspore/ccsrc/cxx_api/factory.h index e2bdb96cea..7cc175b9d8 100644 --- a/mindspore/ccsrc/cxx_api/factory.h +++ b/mindspore/ccsrc/cxx_api/factory.h @@ -24,6 +24,8 @@ #include "utils/utils.h" namespace mindspore { +inline std::string g_device_target = "Default"; + template class Factory { using U = std::function()>; diff --git a/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.cc b/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.cc index 1dd030f8ff..c02db25747 100644 --- a/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.cc +++ b/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.cc @@ -45,6 +45,9 @@ std::shared_ptr AclEnvGuard::GetAclEnv(std::string_view cfg_file) { acl_env = global_acl_env_; if (acl_env != nullptr) { MS_LOG(INFO) << "Acl has been initialized, skip."; + if (!cfg_file.empty()) { + MS_LOG(WARNING) << "Dump config file option " << cfg_file << " is ignored."; + } } else { acl_env = std::make_shared(cfg_file); aclError ret = acl_env->GetErrno(); diff --git a/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.cc b/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.cc index 6867366ac1..d7ba761091 100644 --- a/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.cc +++ b/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.cc @@ -25,7 +25,7 @@ AclGraphImpl::AclGraphImpl() : init_flag_(false), load_flag_(false), device_type_("AscendCL"), - device_id_(GlobalContext::GetGlobalDeviceID()), + device_id_(0), context_(nullptr), acl_env_(nullptr) {} @@ -33,7 +33,7 @@ AclGraphImpl::~AclGraphImpl() { (void)FinalizeEnv(); } Status AclGraphImpl::Run(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); - Status ret = Load(); + Status ret = Load(device_id_); if (ret != kSuccess) { MS_LOG(ERROR) << "Prepare model resource failed."; return ret; @@ -43,7 +43,7 @@ Status AclGraphImpl::Run(const std::vector &inputs, std::vector AclGraphImpl::GetInputs() { - Status ret = Load(); + Status ret = Load(device_id_); if (ret != kSuccess) { MS_LOG(ERROR) << "Prepare model resource failed."; return {}; @@ -53,7 +53,7 @@ std::vector AclGraphImpl::GetInputs() { } std::vector AclGraphImpl::GetOutputs() { - Status ret = Load(); + Status ret = Load(device_id_); if (ret != kSuccess) { MS_LOG(ERROR) << "Prepare model resource failed."; return {}; @@ -90,7 +90,7 @@ Status AclGraphImpl::InitEnv() { return kSuccess; } - acl_env_ = AclEnvGuard::GetAclEnv(GlobalContext::GetGlobalDumpConfigPath()); + acl_env_ = AclEnvGuard::GetAclEnv(""); if (acl_env_ == nullptr) { MS_LOG(ERROR) << "Acl init failed."; return kMCDeviceError; @@ -161,7 +161,7 @@ Status AclGraphImpl::FinalizeEnv() { return kSuccess; } -Status AclGraphImpl::Load() { +Status AclGraphImpl::Load(uint32_t device_id) { // check graph type if (graph_->ModelType() != ModelType::kOM) { Status ret = ConvertToOM(); @@ -176,6 +176,7 @@ Status AclGraphImpl::Load() { auto om_data = graph_data->GetOMData(); // init + device_id_ = device_id; Status ret = InitEnv(); if (ret != kSuccess) { MS_LOG(ERROR) << "InitEnv failed."; diff --git a/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.h b/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.h index 4d185d5fbe..ab4994c83d 100644 --- a/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.h +++ b/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.h @@ -34,7 +34,7 @@ class AclGraphImpl : public GraphCell::GraphImpl { ~AclGraphImpl() override; Status Run(const std::vector &inputs, std::vector *outputs) override; - Status Load() override; + Status Load(uint32_t device_id) override; std::vector GetInputs() override; std::vector GetOutputs() override; diff --git a/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.cc b/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.cc index a9cead17af..4d229b2189 100644 --- a/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.cc +++ b/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.cc @@ -39,7 +39,7 @@ AscendGraphImpl::AscendGraphImpl() : session_impl_(nullptr), graph_id_(0), device_type_("Ascend"), - device_id_(GlobalContext::GetGlobalDeviceID()), + device_id_(0), context_(nullptr), inputs_info_(), outputs_info_(), @@ -142,7 +142,7 @@ Status AscendGraphImpl::ExecuteModel(const std::vector &request, std:: std::vector AscendGraphImpl::GetInputs() { if (!load_flag_) { - Status ret = Load(); + Status ret = Load(device_id_); if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; return {}; @@ -166,7 +166,7 @@ std::vector AscendGraphImpl::GetInputs() { std::vector AscendGraphImpl::GetOutputs() { if (!load_flag_) { - Status ret = Load(); + Status ret = Load(device_id_); if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; return {}; @@ -188,7 +188,7 @@ std::vector AscendGraphImpl::GetOutputs() { return result; } -Status AscendGraphImpl::Load() { +Status AscendGraphImpl::Load(uint32_t device_id) { // check graph type if (graph_->ModelType() != ModelType::kMindIR) { MS_LOG(ERROR) << "Unsupported model type " << graph_->ModelType(); @@ -200,6 +200,7 @@ Status AscendGraphImpl::Load() { auto func_graph = graph_data->GetFuncGraph(); // init + device_id_ = device_id; Status ret = InitEnv(); if (ret != kSuccess) { MS_LOG(ERROR) << "InitEnv failed."; @@ -247,7 +248,7 @@ Status AscendGraphImpl::Load() { Status AscendGraphImpl::Run(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); if (!load_flag_) { - Status ret = Load(); + Status ret = Load(device_id_); if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; return ret; diff --git a/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.h b/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.h index c8fcb160fe..2456613b24 100644 --- a/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.h +++ b/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.h @@ -36,7 +36,7 @@ class AscendGraphImpl : public GraphCell::GraphImpl { ~AscendGraphImpl() override; Status Run(const std::vector &inputs, std::vector *outputs) override; - Status Load() override; + Status Load(uint32_t device_id) override; std::vector GetInputs() override; std::vector GetOutputs() override; diff --git a/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.cc b/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.cc index f40a62a4bf..263cbd3ee9 100644 --- a/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.cc +++ b/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.cc @@ -30,7 +30,7 @@ API_FACTORY_REG(GraphCell::GraphImpl, GPU, GPUGraphImpl); GPUGraphImpl::GPUGraphImpl() : session_impl_(nullptr), graph_id_(0), - device_id_(GlobalContext::GetGlobalDeviceID()), + device_id_(0), inputs_info_(), outputs_info_(), input_names_(), @@ -83,7 +83,7 @@ Status GPUGraphImpl::FinalizeEnv() { return kSuccess; } -Status GPUGraphImpl::Load() { +Status GPUGraphImpl::Load(uint32_t device_id) { // check graph type if (graph_->ModelType() != ModelType::kMindIR) { MS_LOG(ERROR) << "Unsupported model type " << graph_->ModelType(); @@ -95,6 +95,7 @@ Status GPUGraphImpl::Load() { auto func_graph = graph_data->GetFuncGraph(); // init + device_id_ = device_id; Status ret = InitEnv(); if (ret != kSuccess) { MS_LOG(ERROR) << "InitEnv failed."; @@ -176,7 +177,7 @@ Status GPUGraphImpl::ExecuteModel(const std::vector &request, std::vec Status GPUGraphImpl::Run(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); if (!load_flag_) { - Status ret = Load(); + Status ret = Load(device_id_); if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; return ret; @@ -211,7 +212,7 @@ Status GPUGraphImpl::Run(const std::vector &inputs, std::vector GPUGraphImpl::GetInputs() { if (!load_flag_) { - Status ret = Load(); + Status ret = Load(device_id_); if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; return {}; @@ -235,7 +236,7 @@ std::vector GPUGraphImpl::GetInputs() { std::vector GPUGraphImpl::GetOutputs() { if (!load_flag_) { - Status ret = Load(); + Status ret = Load(device_id_); if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; return {}; diff --git a/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.h b/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.h index 0058e7fbcd..53db5c5f2f 100644 --- a/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.h +++ b/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.h @@ -33,7 +33,7 @@ class GPUGraphImpl : public GraphCell::GraphImpl { ~GPUGraphImpl() override = default; Status Run(const std::vector &inputs, std::vector *outputs) override; - Status Load() override; + Status Load(uint32_t device_id) override; std::vector GetInputs() override; std::vector GetOutputs() override; diff --git a/mindspore/ccsrc/cxx_api/graph/graph.cc b/mindspore/ccsrc/cxx_api/graph/graph.cc index e38266d624..7b6602d211 100644 --- a/mindspore/ccsrc/cxx_api/graph/graph.cc +++ b/mindspore/ccsrc/cxx_api/graph/graph.cc @@ -18,6 +18,8 @@ #include "utils/log_adapter.h" namespace mindspore { +Graph::Graph() : graph_data_(nullptr) {} + Graph::Graph(const std::shared_ptr &graph_data) : graph_data_(graph_data) {} Graph::Graph(std::shared_ptr &&graph_data) : graph_data_(graph_data) {} @@ -28,6 +30,8 @@ Graph::Graph(std::nullptr_t) : graph_data_(nullptr) {} bool Graph::operator==(std::nullptr_t) const { return graph_data_ == nullptr; } +bool Graph::operator!=(std::nullptr_t) const { return graph_data_ != nullptr; } + ModelType Graph::ModelType() const { MS_EXCEPTION_IF_NULL(graph_data_); return graph_data_->ModelType(); diff --git a/mindspore/ccsrc/cxx_api/graph/graph_impl.h b/mindspore/ccsrc/cxx_api/graph/graph_impl.h index 401df187da..23fea0bea9 100644 --- a/mindspore/ccsrc/cxx_api/graph/graph_impl.h +++ b/mindspore/ccsrc/cxx_api/graph/graph_impl.h @@ -36,7 +36,7 @@ class GraphCell::GraphImpl { void SetGraph(const std::shared_ptr &graph) { graph_ = graph; } virtual Status Run(const std::vector &inputs, std::vector *outputs) = 0; - virtual Status Load() = 0; + virtual Status Load(uint32_t device_id) = 0; virtual std::vector GetInputs() = 0; virtual std::vector GetOutputs() = 0; diff --git a/mindspore/ccsrc/cxx_api/model/acl/acl_model.cc b/mindspore/ccsrc/cxx_api/model/acl/acl_model.cc index e22e89c3ba..de69591045 100644 --- a/mindspore/ccsrc/cxx_api/model/acl/acl_model.cc +++ b/mindspore/ccsrc/cxx_api/model/acl/acl_model.cc @@ -18,6 +18,7 @@ #include #include "include/api/context.h" #include "cxx_api/factory.h" +#include "cxx_api/graph/acl/acl_env_guard.h" namespace mindspore { API_FACTORY_REG(ModelImpl, Ascend310, AclModel); @@ -40,6 +41,11 @@ Status AclModel::Build() { std::unique_ptr options = std::make_unique(model_context_); MS_EXCEPTION_IF_NULL(options); + std::string dump_cfg = options->GetDumpCfgPath(); + if (!dump_cfg.empty()) { + MS_LOG(INFO) << "Options dump config file path " << dump_cfg; + (void)AclEnvGuard::GetAclEnv(dump_cfg); + } std::string options_key = options->GenAclOptionsKey(); std::shared_ptr graph; if (auto iter = dynamic_size_graph_map_.find(options_key); iter != dynamic_size_graph_map_.end()) { @@ -75,7 +81,7 @@ Status AclModel::Build() { MS_EXCEPTION_IF_NULL(graph); auto graph_cell = std::make_shared(graph); MS_EXCEPTION_IF_NULL(graph_cell); - auto ret = ModelImpl::Load(graph_cell); + auto ret = ModelImpl::Load(graph_cell, options->GetDeviceID()); if (ret != kSuccess) { MS_LOG(ERROR) << "Load failed."; return ret; @@ -108,7 +114,8 @@ Status AclModel::Resize(const std::vector &inputs, const std::vector(); + model_context_ = std::make_shared(); + model_context_->MutableDeviceInfo().emplace_back(std::make_shared()); } std::string input_shape_option; @@ -130,7 +137,14 @@ Status AclModel::Resize(const std::vector &inputs, const std::vectorMutableDeviceInfo(); + if (device_infos.size() != 1) { + MS_LOG(ERROR) << "Invalid model context, only single device info is supported."; + return kMCInvalidArgs; + } + auto ascend310_info = device_infos[0]->Cast(); + MS_EXCEPTION_IF_NULL(ascend310_info); + ascend310_info->SetInputShape(input_shape_option); auto graph_cell_bak = std::move(graph_cell_); auto ret = Build(); if (ret != kSuccess) { diff --git a/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.cc b/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.cc index 841ebc19b8..4a2941223e 100644 --- a/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.cc +++ b/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.cc @@ -27,10 +27,19 @@ AclModelOptions::AclModelOptions(const std::shared_ptr &context) { if (context == nullptr) { return; } - insert_op_cfg_path_ = ModelContext::GetInsertOpConfigPath(context); - input_format_ = ModelContext::GetInputFormat(context); - input_shape_map_ = ModelContext::GetInputShapeMap(context); - auto out_type = ModelContext::GetOutputType(context); + auto &device_infos = context->MutableDeviceInfo(); + if (device_infos.size() != 1) { + return; + } + auto ascend310_info = device_infos[0]->Cast(); + if (ascend310_info == nullptr) { + return; + } + + insert_op_cfg_path_ = ascend310_info->GetInsertOpConfigPath(); + input_format_ = ascend310_info->GetInputFormat(); + input_shape_map_ = ascend310_info->GetInputShapeMap(); + auto out_type = ascend310_info->GetOutputType(); auto iter = kSupportedDtypeOptionMap.find(out_type); if (out_type == DataType::kTypeUnknown) { // do nothing @@ -39,10 +48,12 @@ AclModelOptions::AclModelOptions(const std::shared_ptr &context) { } else { output_type_ = iter->second; } - dynamic_batch_size_ = ModelContext::GetDynamicBatchSize(context); - precision_mode_ = ModelContext::GetPrecisionMode(context); - op_select_impl_mode_ = ModelContext::GetOpSelectImplMode(context); - fusion_switch_cfg_path_ = ModelContext::GetFusionSwitchConfigPath(context); + dynamic_batch_size_ = ascend310_info->GetDynamicBatchSize(); + precision_mode_ = ascend310_info->GetPrecisionMode(); + op_select_impl_mode_ = ascend310_info->GetOpSelectImplMode(); + fusion_switch_cfg_path_ = ascend310_info->GetFusionSwitchConfigPath(); + device_id_ = ascend310_info->GetDeviceID(); + dump_cfg_path_ = ascend310_info->GetDumpConfigPath(); } void AclModelOptions::RenameInput(const std::vector &input_names) { diff --git a/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.h b/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.h index 06a9f6669a..b315f86600 100644 --- a/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.h +++ b/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.h @@ -31,6 +31,8 @@ class AclModelOptions { explicit AclModelOptions(const std::shared_ptr &context); ~AclModelOptions() = default; std::string GenAclOptionsKey() const; + uint32_t GetDeviceID() const { return device_id_; } + std::string GetDumpCfgPath() const { return dump_cfg_path_; } void RenameInput(const std::vector &); // return tuple @@ -50,7 +52,9 @@ class AclModelOptions { std::string dynamic_batch_size_; std::string dynamic_image_size_; std::map> input_shape_map_; - std::vector dynamic_image_size_nums_; + // other options + uint32_t device_id_; + std::string dump_cfg_path_; }; } // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/model/model.cc b/mindspore/ccsrc/cxx_api/model/model.cc index f50fd0c3dc..0979ebae53 100644 --- a/mindspore/ccsrc/cxx_api/model/model.cc +++ b/mindspore/ccsrc/cxx_api/model/model.cc @@ -21,60 +21,130 @@ namespace mindspore { namespace { -const std::map> kSupportedModelMap = { - {kDeviceTypeAscend310, {kOM, kMindIR}}, - {kDeviceTypeAscend910, {kMindIR}}, - {kDeviceTypeGPU, {kMindIR}}, +const std::map> kSupportedModelMap = { + {kAscend310, {kOM, kMindIR}}, + {kAscend910, {kMindIR}}, + {kNvidiaGPU, {kMindIR}}, }; + +std::string GetDeviceTypeString(enum DeviceType type) { + static const std::map kDeviceTypeStrs = { + {kCPU, "CPU"}, {kMaliGPU, "MaliGPU"}, {kNvidiaGPU, "GPU"}, + {kKirinNPU, "KirinGPU"}, {kAscend910, "Ascend910"}, {kAscend310, "Ascend310"}, + }; + auto iter = kDeviceTypeStrs.find(type); + if (iter != kDeviceTypeStrs.end()) { + return iter->second; + } + + return "InvalidDeviceType" + std::to_string(type); } -Status Model::Build() { - MS_EXCEPTION_IF_NULL(impl_); +} // namespace +Status Model::Build(GraphCell graph_cell, const std::shared_ptr &model_context) { + if (graph_cell.GetGraph() == nullptr) { + MS_LOG(ERROR) << "Invalid graph input."; + return kMCInvalidInput; + } + + if (model_context == nullptr) { + MS_LOG(ERROR) << "Invalid model context."; + return kMCInvalidInput; + } + auto &device_info = model_context->MutableDeviceInfo(); + if (device_info.size() != 1) { + MS_LOG(ERROR) << "Invalid model context, only single device info is supported."; + return kMCInvalidInput; + } + + std::string device_target = GetDeviceTypeString(device_info[0]->GetDeviceType()); + impl_ = Factory::Instance().Create(device_target); + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Create session type " << device_target << " failed"; + return kMEFailed; + } + + g_device_target = device_target; + + impl_->SetGraph(std::make_shared(*graph_cell.GetGraph())); + impl_->SetContext(model_context); + return impl_->Build(); } Status Model::Resize(const std::vector &inputs, const std::vector> &dims) { - MS_EXCEPTION_IF_NULL(impl_); + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Failed because this model has not been built."; + return kMCFailed; + } return impl_->Resize(inputs, dims); } Status Model::Predict(const std::vector &inputs, std::vector *outputs) { - MS_EXCEPTION_IF_NULL(impl_); + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Failed because this model has not been built."; + return kMCFailed; + } return impl_->Predict(inputs, outputs); } std::vector Model::GetInputs() { - MS_EXCEPTION_IF_NULL(impl_); + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Failed because this model has not been built."; + return {}; + } return impl_->GetInputs(); } std::vector Model::GetOutputs() { - MS_EXCEPTION_IF_NULL(impl_); + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Failed because this model has not been built."; + return {}; + } return impl_->GetOutputs(); } -Model::Model(const GraphCell &graph_cell, const std::shared_ptr &model_context) - : impl_(Factory::Instance().Create(mindspore::GlobalContext::GetGlobalDeviceTarget())) { - if (impl_ == nullptr) { - MS_LOG(EXCEPTION) << "Create session type " << mindspore::GlobalContext::GetGlobalDeviceTarget() << " failed"; +MSTensor Model::GetInputByTensorName(const std::vector &tensor_name) { + std::string tensor_name_str = CharToString(tensor_name); + auto inputs = GetInputs(); + for (auto in : inputs) { + if (in.Name() == tensor_name_str) { + return in; + } } - MS_EXCEPTION_IF_NULL(graph_cell.GetGraph()); - impl_->SetGraph(std::make_shared(*graph_cell.GetGraph())); - impl_->SetContext(model_context); + + return MSTensor(std::shared_ptr(nullptr)); } -Model::Model(const std::vector &network, const std::shared_ptr &model_context) { - MS_LOG(EXCEPTION) << "Unsupported feature."; +std::vector> Model::GetOutputTensorNamesChar() { + std::vector> ret; + auto outputs = GetOutputs(); + std::transform(outputs.begin(), outputs.end(), std::back_inserter(ret), + [](MSTensor item) -> std::vector { return StringToChar(item.Name()); }); + return ret; } +MSTensor Model::GetOutputByTensorName(const std::vector &tensor_name) { + std::string tensor_name_str = CharToString(tensor_name); + auto outputs = GetOutputs(); + for (auto out : outputs) { + if (out.Name() == tensor_name_str) { + return out; + } + } + + return MSTensor(std::shared_ptr(nullptr)); +} + +Model::Model() : impl_(nullptr) {} Model::~Model() {} -bool Model::CheckModelSupport(const std::vector &device_type, ModelType model_type) { - std::string device_type_str = CharToString(device_type); +bool Model::CheckModelSupport(enum DeviceType device_type, ModelType model_type) { + std::string device_type_str = GetDeviceTypeString(device_type); if (!Factory::Instance().CheckModelSupport(device_type_str)) { return false; } - auto first_iter = kSupportedModelMap.find(device_type_str); + auto first_iter = kSupportedModelMap.find(device_type); if (first_iter == kSupportedModelMap.end()) { return false; } diff --git a/mindspore/ccsrc/cxx_api/model/model_impl.h b/mindspore/ccsrc/cxx_api/model/model_impl.h index 97a308eafa..251df2bc33 100644 --- a/mindspore/ccsrc/cxx_api/model/model_impl.h +++ b/mindspore/ccsrc/cxx_api/model/model_impl.h @@ -43,9 +43,9 @@ class ModelImpl { virtual std::vector GetOutputs() = 0; protected: - Status Load(const std::shared_ptr &graph_cell) { + Status Load(const std::shared_ptr &graph_cell, uint32_t device_id) { MS_EXCEPTION_IF_NULL(graph_cell); - return graph_cell->Load(); + return graph_cell->Load(device_id); } FuncGraphPtr GetFuncGraph() const { diff --git a/mindspore/ccsrc/cxx_api/model/ms/ms_model.cc b/mindspore/ccsrc/cxx_api/model/ms/ms_model.cc index 23bac732b8..da2aec4634 100644 --- a/mindspore/ccsrc/cxx_api/model/ms/ms_model.cc +++ b/mindspore/ccsrc/cxx_api/model/ms/ms_model.cc @@ -74,7 +74,7 @@ std::shared_ptr MsModel::GenerateGraphCell(const std::vector(graph); MS_EXCEPTION_IF_NULL(graph_cell); - auto ret = ModelImpl::Load(graph_cell); + auto ret = ModelImpl::Load(graph_cell, GetDeviceID()); if (ret != kSuccess) { MS_LOG(ERROR) << "Load failed."; return nullptr; @@ -99,7 +99,7 @@ Status MsModel::Build() { MS_EXCEPTION_IF_NULL(graph); auto graph_cell = std::make_shared(graph); MS_EXCEPTION_IF_NULL(graph_cell); - auto ret = ModelImpl::Load(graph_cell); + auto ret = ModelImpl::Load(graph_cell, GetDeviceID()); if (ret != kSuccess) { MS_LOG(ERROR) << "Load failed."; return ret; @@ -170,4 +170,27 @@ std::vector MsModel::GetOutputs() { MS_EXCEPTION_IF_NULL(graph_cell_); return graph_cell_->GetOutputs(); } + +uint32_t MsModel::GetDeviceID() const { + if (model_context_ == nullptr) { + return 0; + } + + auto &device_infos = model_context_->MutableDeviceInfo(); + if (device_infos.size() != 1) { + return 0; + } + + auto ascend910_info = device_infos[0]->Cast(); + if (ascend910_info != nullptr) { + return ascend910_info->GetDeviceID(); + } + + auto gpu_info = device_infos[0]->Cast(); + if (gpu_info != nullptr) { + return gpu_info->GetDeviceID(); + } + + return 0; +} } // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/model/ms/ms_model.h b/mindspore/ccsrc/cxx_api/model/ms/ms_model.h index 0571b4e409..03f5f022ae 100644 --- a/mindspore/ccsrc/cxx_api/model/ms/ms_model.h +++ b/mindspore/ccsrc/cxx_api/model/ms/ms_model.h @@ -48,6 +48,7 @@ class MsModel : public ModelImpl { private: std::shared_ptr GenerateGraphCell(const std::vector> &dims); + uint32_t GetDeviceID() const; std::shared_ptr graph_cell_; std::map> dynamic_size_graph_map_; diff --git a/mindspore/ccsrc/cxx_api/serialization.cc b/mindspore/ccsrc/cxx_api/serialization.cc index 331f6885e7..4f3e3c7204 100644 --- a/mindspore/ccsrc/cxx_api/serialization.cc +++ b/mindspore/ccsrc/cxx_api/serialization.cc @@ -68,38 +68,59 @@ static Buffer ReadFile(const std::string &file) { return buffer; } -Graph Serialization::LoadModel(const void *model_data, size_t data_size, ModelType model_type) { +Status Serialization::Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph) { + if (graph == nullptr) { + MS_LOG(ERROR) << "Output args graph is nullptr."; + return kMEInvalidInput; + } + if (model_type == kMindIR) { FuncGraphPtr anf_graph = nullptr; try { anf_graph = ConvertStreamToFuncGraph(reinterpret_cast(model_data), data_size); } catch (const std::exception &) { - MS_LOG(EXCEPTION) << "Load MindIR failed."; + MS_LOG(ERROR) << "Load model failed."; + return kMEInvalidInput; } - return Graph(std::make_shared(anf_graph, kMindIR)); + *graph = Graph(std::make_shared(anf_graph, kMindIR)); + return kSuccess; } else if (model_type == kOM) { - return Graph(std::make_shared(Buffer(model_data, data_size), kOM)); + *graph = Graph(std::make_shared(Buffer(model_data, data_size), kOM)); + return kSuccess; } - MS_LOG(EXCEPTION) << "Unsupported ModelType " << model_type; + + MS_LOG(ERROR) << "Unsupported ModelType " << model_type; + return kMEInvalidInput; } -Graph Serialization::LoadModel(const std::vector &file, ModelType model_type) { +Status Serialization::Load(const std::vector &file, ModelType model_type, Graph *graph) { + if (graph == nullptr) { + MS_LOG(ERROR) << "Output args graph is nullptr."; + return kMEInvalidInput; + } + std::string file_path = CharToString(file); if (model_type == kMindIR) { FuncGraphPtr anf_graph = LoadMindIR(file_path); if (anf_graph == nullptr) { - MS_LOG(EXCEPTION) << "Load model failed."; + MS_LOG(ERROR) << "Load model failed."; + return kMEInvalidInput; } - return Graph(std::make_shared(anf_graph, kMindIR)); + *graph = Graph(std::make_shared(anf_graph, kMindIR)); + return kSuccess; } else if (model_type == kOM) { Buffer data = ReadFile(file_path); if (data.Data() == nullptr) { - MS_LOG(EXCEPTION) << "Read file " << file_path << " failed."; + MS_LOG(ERROR) << "Read file " << file_path << " failed."; + return kMEInvalidInput; } - return Graph(std::make_shared(data, kOM)); + *graph = Graph(std::make_shared(data, kOM)); + return kSuccess; } - MS_LOG(EXCEPTION) << "Unsupported ModelType " << model_type; + + MS_LOG(ERROR) << "Unsupported ModelType " << model_type; + return kMEInvalidInput; } Status Serialization::LoadCheckPoint(const std::string &ckpt_file, std::map *parameters) { diff --git a/mindspore/ccsrc/cxx_api/types.cc b/mindspore/ccsrc/cxx_api/types.cc index a3872c09b1..ba002486fd 100644 --- a/mindspore/ccsrc/cxx_api/types.cc +++ b/mindspore/ccsrc/cxx_api/types.cc @@ -134,33 +134,139 @@ class TensorReferenceImpl : public MSTensor::Impl { std::vector shape_; }; -MSTensor MSTensor::CreateTensor(const std::vector &name, enum DataType type, const std::vector &shape, - const void *data, size_t data_len) noexcept { +MSTensor *MSTensor::CreateTensor(const std::vector &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { std::string name_str = CharToString(name); try { std::shared_ptr impl = std::make_shared(name_str, type, shape, data, data_len); - return MSTensor(impl); + MSTensor *ret = new MSTensor(impl); + return ret; } catch (const std::bad_alloc &) { MS_LOG(ERROR) << "Malloc memory failed."; - return MSTensor(nullptr); + return nullptr; } catch (...) { MS_LOG(ERROR) << "Unknown error occurred."; - return MSTensor(nullptr); + return nullptr; } } -MSTensor MSTensor::CreateRefTensor(const std::vector &name, enum DataType type, const std::vector &shape, - const void *data, size_t data_len) noexcept { +MSTensor *MSTensor::CreateRefTensor(const std::vector &name, enum DataType type, + const std::vector &shape, const void *data, size_t data_len) noexcept { std::string name_str = CharToString(name); try { std::shared_ptr impl = std::make_shared(name_str, type, shape, data, data_len); - return MSTensor(impl); + MSTensor *ret = new MSTensor(impl); + return ret; } catch (const std::bad_alloc &) { MS_LOG(ERROR) << "Malloc memory failed."; - return MSTensor(nullptr); + return nullptr; } catch (...) { MS_LOG(ERROR) << "Unknown error occurred."; - return MSTensor(nullptr); + return nullptr; + } +} + +MSTensor *MSTensor::CharStringsToTensor(const std::vector &name, const std::vector> &str) { + // num(4 bytes) + offset1(4 bytes) + offset2(4 bytes) + ... + data1(str1.len) + data2(str2.len) + ... + // str1.len() = offset2 - offset1 + // data1.begin() = start + offset1 + size_t mem_size = 0; + mem_size += sizeof(int32_t); // for num + for (const auto &s : str) { + mem_size += sizeof(int32_t); // for offset + mem_size += s.size(); // for data + } + + auto tensor = CreateTensor(name, DataType::kObjectTypeString, {static_cast(mem_size)}, nullptr, mem_size); + if (tensor == nullptr) { + MS_LOG(ERROR) << "Create tensor failed."; + return nullptr; + } + + int32_t *data = reinterpret_cast(tensor->MutableData()); + if (data == nullptr) { + MS_LOG(ERROR) << "Create tensor failed."; + DestroyTensorPtr(tensor); + return nullptr; + } + uint8_t *cur_data = reinterpret_cast(data + 1 + str.size()); + *reinterpret_cast(data) = str.size(); + for (size_t i = 0; i < str.size(); ++i) { + int32_t offset = (cur_data - reinterpret_cast(data)); + data[i + 1] = offset; + if (str[i].empty()) { + continue; + } + auto ret = memcpy_s(reinterpret_cast(cur_data), str[i].size(), str[i].data(), str[i].size()); + if (ret != 0) { + MS_LOG(ERROR) << "memcpy_s failed, ret = " << ret; + DestroyTensorPtr(tensor); + return nullptr; + } + cur_data += str[i].size(); + } + + return tensor; +} + +std::vector> MSTensor::TensorToStringChars(const MSTensor &tensor) { + if (tensor == nullptr || tensor.DataType() != DataType::kObjectTypeString || tensor.DataSize() < 4) { + MS_LOG(ERROR) << "Invalid tensor."; + return {}; + } + + std::vector> strings; + auto host_data = tensor.Data(); + const int32_t *data = reinterpret_cast(host_data.get()); + int32_t str_num = data[0]; + if (str_num == 0) { + return {}; + } + if (str_num < 0) { + MS_LOG(ERROR) << "str num " << str_num << " cannot be negative."; + return {}; + } + + if (tensor.DataSize() < (str_num + 1) * sizeof(int32_t)) { + MS_LOG(ERROR) << "Invalid tensor data size " << tensor.DataSize() << ", need " << (str_num + 1) * sizeof(int32_t) + << " at least for " << str_num << " strings."; + return {}; + } + for (size_t i = 0; i < static_cast(str_num); ++i) { + strings.push_back({}); + auto &str = strings[i]; + int32_t str_len; + int32_t offset = data[i + 1]; + if (i + 1 != static_cast(str_num)) { + str_len = data[i + 1 + 1] - offset; + } else { + str_len = tensor.DataSize() - offset; + } + + if (str_len == 0) { + continue; + } + + if (str_len < 0) { + MS_LOG(ERROR) << "str " << i << " len " << str_len << " cannot be negative."; + return {}; + } + + str.resize(str_len); + const uint8_t *cur_data = reinterpret_cast(data) + offset; + auto ret = memcpy_s(reinterpret_cast(str.data()), str.size(), cur_data, str_len); + if (ret != 0) { + MS_LOG(ERROR) << "memcpy_s failed, ret = " << ret; + return {}; + } + } + + return strings; +} + +void MSTensor::DestroyTensorPtr(MSTensor *tensor) noexcept { + if (tensor != nullptr) { + delete tensor; } } @@ -174,11 +280,21 @@ MSTensor::~MSTensor() = default; bool MSTensor::operator==(std::nullptr_t) const { return impl_ == nullptr; } -MSTensor MSTensor::Clone() const { +bool MSTensor::operator!=(std::nullptr_t) const { return impl_ != nullptr; } + +MSTensor *MSTensor::Clone() const { MS_EXCEPTION_IF_NULL(impl_); - MSTensor ret; - ret.impl_ = impl_->Clone(); - return ret; + try { + MSTensor *ret = new MSTensor(); + ret->impl_ = impl_->Clone(); + return ret; + } catch (const std::bad_alloc &) { + MS_LOG(ERROR) << "Malloc memory failed."; + return nullptr; + } catch (...) { + MS_LOG(ERROR) << "Unknown error occurred."; + return nullptr; + } } std::vector MSTensor::CharName() const { diff --git a/mindspore/ccsrc/minddata/dataset/api/execute.cc b/mindspore/ccsrc/minddata/dataset/api/execute.cc index ae572a0ae4..8560d20853 100644 --- a/mindspore/ccsrc/minddata/dataset/api/execute.cc +++ b/mindspore/ccsrc/minddata/dataset/api/execute.cc @@ -55,14 +55,14 @@ struct Execute::ExtraInfo { }; // FIXME - Temporarily overload Execute to support both TensorOperation and TensorTransform -Execute::Execute(std::shared_ptr op, MapTargetDevice deviceType) { +Execute::Execute(std::shared_ptr op, MapTargetDevice deviceType, uint32_t device_id) { ops_.emplace_back(std::move(op)); device_type_ = deviceType; info_ = std::make_shared(); #ifdef ENABLE_ACL if (device_type_ == MapTargetDevice::kAscend310) { device_resource_ = std::make_shared(); - Status rc = device_resource_->InitResource(); + Status rc = device_resource_->InitResource(device_id); if (!rc.IsOk()) { device_resource_ = nullptr; MS_LOG(ERROR) << "Initialize Ascend310 resource fail"; @@ -71,7 +71,7 @@ Execute::Execute(std::shared_ptr op, MapTargetDevice deviceType #endif } -Execute::Execute(std::shared_ptr op, MapTargetDevice deviceType) { +Execute::Execute(std::shared_ptr op, MapTargetDevice deviceType, uint32_t device_id) { // Initialize the op and other context transforms_.emplace_back(op); @@ -80,7 +80,7 @@ Execute::Execute(std::shared_ptr op, MapTargetDevice deviceType #ifdef ENABLE_ACL if (device_type_ == MapTargetDevice::kAscend310) { device_resource_ = std::make_shared(); - Status rc = device_resource_->InitResource(); + Status rc = device_resource_->InitResource(device_id); if (!rc.IsOk()) { device_resource_ = nullptr; MS_LOG(ERROR) << "Initialize Ascend310 resource fail"; @@ -89,7 +89,7 @@ Execute::Execute(std::shared_ptr op, MapTargetDevice deviceType #endif } -Execute::Execute(std::reference_wrapper op, MapTargetDevice deviceType) { +Execute::Execute(std::reference_wrapper op, MapTargetDevice deviceType, uint32_t device_id) { // Initialize the transforms_ and other context std::shared_ptr operation = op.get().Parse(); ops_.emplace_back(std::move(operation)); @@ -100,7 +100,7 @@ Execute::Execute(std::reference_wrapper op, MapTargetDevice dev #ifdef ENABLE_ACL if (device_type_ == MapTargetDevice::kAscend310) { device_resource_ = std::make_shared(); - Status rc = device_resource_->InitResource(); + Status rc = device_resource_->InitResource(device_id); if (!rc.IsOk()) { device_resource_ = nullptr; MS_LOG(ERROR) << "Initialize Ascend310 resource fail"; @@ -110,7 +110,7 @@ Execute::Execute(std::reference_wrapper op, MapTargetDevice dev } // Execute function for the example case: auto decode(new vision::Decode()); -Execute::Execute(TensorTransform *op, MapTargetDevice deviceType) { +Execute::Execute(TensorTransform *op, MapTargetDevice deviceType, uint32_t device_id) { // Initialize the transforms_ and other context std::shared_ptr smart_ptr_op(op); transforms_.emplace_back(smart_ptr_op); @@ -120,7 +120,7 @@ Execute::Execute(TensorTransform *op, MapTargetDevice deviceType) { #ifdef ENABLE_ACL if (device_type_ == MapTargetDevice::kAscend310) { device_resource_ = std::make_shared(); - Status rc = device_resource_->InitResource(); + Status rc = device_resource_->InitResource(device_id); if (!rc.IsOk()) { device_resource_ = nullptr; MS_LOG(ERROR) << "Initialize Ascend310 resource fail"; @@ -129,13 +129,13 @@ Execute::Execute(TensorTransform *op, MapTargetDevice deviceType) { #endif } -Execute::Execute(std::vector> ops, MapTargetDevice deviceType) +Execute::Execute(std::vector> ops, MapTargetDevice deviceType, uint32_t device_id) : ops_(std::move(ops)), device_type_(deviceType) { info_ = std::make_shared(); #ifdef ENABLE_ACL if (device_type_ == MapTargetDevice::kAscend310) { device_resource_ = std::make_shared(); - Status rc = device_resource_->InitResource(); + Status rc = device_resource_->InitResource(device_id); if (!rc.IsOk()) { device_resource_ = nullptr; MS_LOG(ERROR) << "Initialize Ascend310 resource fail"; @@ -144,7 +144,7 @@ Execute::Execute(std::vector> ops, MapTargetDev #endif } -Execute::Execute(std::vector> ops, MapTargetDevice deviceType) { +Execute::Execute(std::vector> ops, MapTargetDevice deviceType, uint32_t device_id) { // Initialize the transforms_ and other context transforms_ = ops; @@ -153,7 +153,7 @@ Execute::Execute(std::vector> ops, MapTargetDev #ifdef ENABLE_ACL if (device_type_ == MapTargetDevice::kAscend310) { device_resource_ = std::make_shared(); - Status rc = device_resource_->InitResource(); + Status rc = device_resource_->InitResource(device_id); if (!rc.IsOk()) { device_resource_ = nullptr; MS_LOG(ERROR) << "Initialize Ascend310 resource fail"; @@ -162,7 +162,8 @@ Execute::Execute(std::vector> ops, MapTargetDev #endif } -Execute::Execute(const std::vector> ops, MapTargetDevice deviceType) { +Execute::Execute(const std::vector> ops, MapTargetDevice deviceType, + uint32_t device_id) { // Initialize the transforms_ and other context if (deviceType == MapTargetDevice::kCpu) { (void)std::transform( @@ -180,7 +181,7 @@ Execute::Execute(const std::vector> ops, #ifdef ENABLE_ACL if (device_type_ == MapTargetDevice::kAscend310) { device_resource_ = std::make_shared(); - Status rc = device_resource_->InitResource(); + Status rc = device_resource_->InitResource(device_id); if (!rc.IsOk()) { device_resource_ = nullptr; MS_LOG(ERROR) << "Initialize Ascend310 resource fail"; @@ -190,7 +191,7 @@ Execute::Execute(const std::vector> ops, } // Execute function for the example vector case: auto decode(new vision::Decode()); -Execute::Execute(std::vector ops, MapTargetDevice deviceType) { +Execute::Execute(std::vector ops, MapTargetDevice deviceType, uint32_t device_id) { // Initialize the transforms_ and other context for (auto &op : ops) { std::shared_ptr smart_ptr_op(op); @@ -202,7 +203,7 @@ Execute::Execute(std::vector ops, MapTargetDevice deviceType) #ifdef ENABLE_ACL if (device_type_ == MapTargetDevice::kAscend310) { device_resource_ = std::make_shared(); - Status rc = device_resource_->InitResource(); + Status rc = device_resource_->InitResource(device_id); if (!rc.IsOk()) { device_resource_ = nullptr; MS_LOG(ERROR) << "Initialize Ascend310 resource fail"; diff --git a/mindspore/ccsrc/minddata/dataset/core/ascend_resource.cc b/mindspore/ccsrc/minddata/dataset/core/ascend_resource.cc index 274a91d9c1..df386749cf 100644 --- a/mindspore/ccsrc/minddata/dataset/core/ascend_resource.cc +++ b/mindspore/ccsrc/minddata/dataset/core/ascend_resource.cc @@ -23,10 +23,10 @@ namespace mindspore { namespace dataset { -Status AscendResource::InitResource() { +Status AscendResource::InitResource(uint32_t device_id) { ResourceInfo resource; resource.aclConfigPath = ""; - resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID()); + resource.deviceIds.insert(device_id); ascend_resource_ = ResourceManager::GetInstance(); APP_ERROR ret = ascend_resource_->InitResource(resource); if (ret != APP_ERR_OK) { @@ -35,8 +35,8 @@ Status AscendResource::InitResource() { MS_LOG(ERROR) << err_msg; RETURN_STATUS_UNEXPECTED(err_msg); } - int device_id = *(resource.deviceIds.begin()); - aclrtContext context = ascend_resource_->GetContext(device_id); + int cur_device_id = *(resource.deviceIds.begin()); + aclrtContext context = ascend_resource_->GetContext(cur_device_id); processor_ = std::make_shared(context, false); ret = processor_->InitResource(); if (ret != APP_ERR_OK) { diff --git a/mindspore/ccsrc/minddata/dataset/core/ascend_resource.h b/mindspore/ccsrc/minddata/dataset/core/ascend_resource.h index 3a401d6012..d94694cc97 100644 --- a/mindspore/ccsrc/minddata/dataset/core/ascend_resource.h +++ b/mindspore/ccsrc/minddata/dataset/core/ascend_resource.h @@ -36,7 +36,7 @@ class AscendResource : public DeviceResource { AscendResource() = default; ~AscendResource() = default; - Status InitResource() override; + Status InitResource(uint32_t device_id) override; Status FinalizeResource() override; diff --git a/mindspore/ccsrc/minddata/dataset/core/device_resource.cc b/mindspore/ccsrc/minddata/dataset/core/device_resource.cc index 76b368416c..9ffdc2486e 100644 --- a/mindspore/ccsrc/minddata/dataset/core/device_resource.cc +++ b/mindspore/ccsrc/minddata/dataset/core/device_resource.cc @@ -19,7 +19,7 @@ namespace mindspore { namespace dataset { -Status DeviceResource::InitResource() { +Status DeviceResource::InitResource(uint32_t) { return Status(StatusCode::kMDUnexpectedError, "Is this a valid device? If yes, please implement this InitResource() in the derived class."); } diff --git a/mindspore/ccsrc/minddata/dataset/core/device_resource.h b/mindspore/ccsrc/minddata/dataset/core/device_resource.h index 4517221738..f5fec3d348 100644 --- a/mindspore/ccsrc/minddata/dataset/core/device_resource.h +++ b/mindspore/ccsrc/minddata/dataset/core/device_resource.h @@ -33,7 +33,7 @@ class DeviceResource { virtual ~DeviceResource() = default; - virtual Status InitResource(); + virtual Status InitResource(uint32_t device_id); virtual Status FinalizeResource(); diff --git a/mindspore/ccsrc/minddata/dataset/include/execute.h b/mindspore/ccsrc/minddata/dataset/include/execute.h index d1d64d9059..003df67f2c 100644 --- a/mindspore/ccsrc/minddata/dataset/include/execute.h +++ b/mindspore/ccsrc/minddata/dataset/include/execute.h @@ -34,18 +34,22 @@ class Execute { public: /// \brief Constructor // FIXME - Temporarily overload Execute to support both TensorOperation and TensorTransform - explicit Execute(std::shared_ptr op, MapTargetDevice deviceType = MapTargetDevice::kCpu); - explicit Execute(std::shared_ptr op, MapTargetDevice deviceType = MapTargetDevice::kCpu); - explicit Execute(std::reference_wrapper op, MapTargetDevice deviceType = MapTargetDevice::kCpu); - explicit Execute(TensorTransform *op, MapTargetDevice deviceType = MapTargetDevice::kCpu); + explicit Execute(std::shared_ptr op, MapTargetDevice deviceType = MapTargetDevice::kCpu, + uint32_t device_id = 0); + explicit Execute(std::shared_ptr op, MapTargetDevice deviceType = MapTargetDevice::kCpu, + uint32_t device_id = 0); + explicit Execute(std::reference_wrapper op, MapTargetDevice deviceType = MapTargetDevice::kCpu, + uint32_t device_id = 0); + explicit Execute(TensorTransform *op, MapTargetDevice deviceType = MapTargetDevice::kCpu, uint32_t device_id = 0); explicit Execute(std::vector> ops, - MapTargetDevice deviceType = MapTargetDevice::kCpu); + MapTargetDevice deviceType = MapTargetDevice::kCpu, uint32_t device_id = 0); explicit Execute(std::vector> ops, - MapTargetDevice deviceType = MapTargetDevice::kCpu); + MapTargetDevice deviceType = MapTargetDevice::kCpu, uint32_t device_id = 0); explicit Execute(const std::vector> ops, - MapTargetDevice deviceType = MapTargetDevice::kCpu); - explicit Execute(std::vector ops, MapTargetDevice deviceType = MapTargetDevice::kCpu); + MapTargetDevice deviceType = MapTargetDevice::kCpu, uint32_t device_id = 0); + explicit Execute(std::vector ops, MapTargetDevice deviceType = MapTargetDevice::kCpu, + uint32_t device_id = 0); /// \brief Destructor ~Execute(); diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_crop_jpeg_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_crop_jpeg_op.cc index 3718a2198a..9f6f88267a 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_crop_jpeg_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_crop_jpeg_op.cc @@ -78,7 +78,7 @@ Status DvppCropJpegOp::Compute(const std::shared_ptr &input, std::shared imageinfo.format = PIXEL_FORMAT_YUV_SEMIPLANAR_420; ResourceInfo resource; resource.aclConfigPath = ""; - resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID()); + resource.deviceIds.insert(0); std::shared_ptr instance = ResourceManager::GetInstance(); APP_ERROR ret = instance->InitResource(resource); if (ret != APP_ERR_OK) { diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_jpeg_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_jpeg_op.cc index 8a268cc5c7..7e00ab3052 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_jpeg_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_jpeg_op.cc @@ -71,7 +71,7 @@ Status DvppDecodeJpegOp::Compute(const std::shared_ptr &input, std::shar imageInfo.data = static_cast(buffer); ResourceInfo resource; resource.aclConfigPath = ""; - resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID()); + resource.deviceIds.insert(0); std::shared_ptr instance = ResourceManager::GetInstance(); APP_ERROR ret = instance->InitResource(resource); if (ret != APP_ERR_OK) { diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_png_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_png_op.cc index 42f1cb6f60..630e0f3fe4 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_png_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_png_op.cc @@ -69,7 +69,7 @@ Status DvppDecodePngOp::Compute(const std::shared_ptr &input, std::share imageInfo.data = static_cast(buffer); ResourceInfo resource; resource.aclConfigPath = ""; - resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID()); + resource.deviceIds.insert(0); std::shared_ptr instance = ResourceManager::GetInstance(); APP_ERROR ret = instance->InitResource(resource); if (ret != APP_ERR_OK) { diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.cc index b50c24401b..08c0d1d278 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.cc @@ -70,7 +70,7 @@ Status DvppDecodeResizeCropJpegOp::Compute(const std::shared_ptr &input, imageInfo.data = static_cast(buffer); ResourceInfo resource; resource.aclConfigPath = ""; - resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID()); + resource.deviceIds.insert(0); std::shared_ptr instance = ResourceManager::GetInstance(); APP_ERROR ret = instance->InitResource(resource); if (ret != APP_ERR_OK) { diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_jpeg_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_jpeg_op.cc index 2cf8819bc2..cf39305745 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_jpeg_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_jpeg_op.cc @@ -69,7 +69,7 @@ Status DvppDecodeResizeJpegOp::Compute(const std::shared_ptr &input, std imageInfo.data = static_cast(buffer); ResourceInfo resource; resource.aclConfigPath = ""; - resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID()); + resource.deviceIds.insert(0); std::shared_ptr instance = ResourceManager::GetInstance(); APP_ERROR ret = instance->InitResource(resource); if (ret != APP_ERR_OK) { diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_resize_jpeg_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_resize_jpeg_op.cc index 1a33ece923..c2b093fdab 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_resize_jpeg_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_resize_jpeg_op.cc @@ -79,7 +79,7 @@ Status DvppResizeJpegOp::Compute(const std::shared_ptr &input, std::shar imageinfo.format = PIXEL_FORMAT_YUV_SEMIPLANAR_420; ResourceInfo resource; resource.aclConfigPath = ""; - resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID()); + resource.deviceIds.insert(0); std::shared_ptr instance = ResourceManager::GetInstance(); APP_ERROR ret = instance->InitResource(resource); if (ret != APP_ERR_OK) { diff --git a/mindspore/core/base/base_ref_utils.cc b/mindspore/core/base/base_ref_utils.cc index 69051fa9fd..ad01f36757 100644 --- a/mindspore/core/base/base_ref_utils.cc +++ b/mindspore/core/base/base_ref_utils.cc @@ -16,8 +16,6 @@ #include "base/base_ref_utils.h" #include #include - -#include "include/infer_tensor.h" #include "ir/tensor.h" namespace mindspore { diff --git a/mindspore/core/base/base_ref_utils.h b/mindspore/core/base/base_ref_utils.h index 8ced9134b6..0a5d5d4e94 100644 --- a/mindspore/core/base/base_ref_utils.h +++ b/mindspore/core/base/base_ref_utils.h @@ -18,7 +18,6 @@ #include #include -#include "include/infer_tensor.h" #include "ir/tensor.h" #include "base/base_ref.h" diff --git a/mindspore/lite/include/lite_utils.h b/mindspore/lite/include/lite_utils.h index cd4127aa55..1b22677a9a 100644 --- a/mindspore/lite/include/lite_utils.h +++ b/mindspore/lite/include/lite_utils.h @@ -21,15 +21,17 @@ #include #include "include/ms_tensor.h" -namespace mindspore::schema { -struct Tensor; -} // namespace mindspore::schema +namespace mindspore { +class Allocator; -namespace mindspore::lite { +namespace schema { +struct Tensor; +} // namespace schema + +namespace lite { /// \brief Allocator defined a memory pool for malloc memory and free memory dynamically. /// /// \note List public class and interface for reference. -class Allocator; /// \brief DeviceContext defined a device context. struct DeviceContext; @@ -52,5 +54,6 @@ int MS_API StringsToMSTensor(const std::vector &inputs, tensor::MST /// \param[in] MSTensor. /// \return string vector. std::vector MS_API MSTensorToStrings(const tensor::MSTensor *tensor); -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore #endif // MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_ diff --git a/mindspore/lite/include/ms_tensor.h b/mindspore/lite/include/ms_tensor.h index ac0ee269b7..6d03823255 100644 --- a/mindspore/lite/include/ms_tensor.h +++ b/mindspore/lite/include/ms_tensor.h @@ -45,6 +45,12 @@ class MS_API MSTensor { /// \brief Destructor of MindSpore Lite Model. virtual ~MSTensor() = default; + /// \brief Create a MSTensor. + /// + /// \return Pointer to an instance of MindSpore Lite MSTensor. + static MSTensor *CreateTensor(const std::string &name, TypeId type, const std::vector &shape, const void *data, + size_t data_len); + /// \brief Get data type of the MindSpore Lite MSTensor. /// /// \note TypeId is defined in mindspore/mindspore/include/api/type_id.h. Only number types in TypeId enum are @@ -58,12 +64,8 @@ class MS_API MSTensor { /// \return A vector of int as the shape of the MindSpore Lite MSTensor. virtual std::vector shape() const = 0; - /// \brief Get size of the dimension of the MindSpore Lite MSTensor index by the parameter index. - /// - /// \param[in] index Define index of dimension returned. - /// - /// \return Size of dimension of the MindSpore Lite MSTensor. - virtual int DimensionSize(size_t index) const = 0; + /// \brief Set the shape of MSTensor. + virtual void set_shape(const std::vector &name) = 0; /// \brief Get number of element in MSTensor. /// @@ -75,13 +77,6 @@ class MS_API MSTensor { /// \return Byte size of data in MSTensor. virtual size_t Size() const = 0; - /// \brief Get the pointer of data in MSTensor. - /// - /// \note The data pointer can be used to both write and read data in MSTensor. - /// - /// \return the pointer points to data in MSTensor. - virtual void *MutableData() = 0; - /// \brief Get the name of MSTensor. /// /// \return the name of MSTensor. @@ -90,6 +85,22 @@ class MS_API MSTensor { /// \brief Set the name of MSTensor. virtual void set_tensor_name(const std::string name) = 0; + /// \brief Get the pointer of data in MSTensor. + /// + /// \note The data pointer can be used to both write and read data in MSTensor. The memory buffer will be + /// automatically allocated. + /// + /// \return the pointer points to data in MSTensor. + virtual void *MutableData() = 0; + + /// \brief Get the pointer of data in MSTensor. + /// + /// \note The data pointer can be used to both write and read data in MSTensor. No memory buffer will be + /// allocated. + /// + /// \return the pointer points to data in MSTensor. + virtual void *data() = 0; + /// \brief Set the data of MSTensor. virtual void set_data(void *data) = 0; }; diff --git a/mindspore/lite/minddata/CMakeLists.txt b/mindspore/lite/minddata/CMakeLists.txt index 28ad8cc5b9..4683070173 100644 --- a/mindspore/lite/minddata/CMakeLists.txt +++ b/mindspore/lite/minddata/CMakeLists.txt @@ -110,6 +110,7 @@ if(BUILD_MINDDATA STREQUAL "full") ${TOP_DIR}/mindspore/lite/src/cxx_api/types.cc ${TOP_DIR}/mindspore/lite/src/cxx_api/tensor/tensor_impl.cc ${TOP_DIR}/mindspore/lite/src/tensor.cc + ${TOP_DIR}/mindspore/lite/src/common/string_util.cc ${CORE_DIR}/utils/status.cc ${MINDDATA_DIR}/api/datasets.cc ${MINDDATA_DIR}/kernels/data/data_utils.cc @@ -304,7 +305,6 @@ elseif(BUILD_MINDDATA STREQUAL "wrapper") set(MINDSPORE_LITE_CXXAPI_SRC ${CORE_DIR}/utils/status.cc ${CMAKE_CURRENT_SOURCE_DIR}/../src/cxx_api/types.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../src/cxx_api/tensor/tensor_impl.cc ${CMAKE_CURRENT_SOURCE_DIR}/../src/tensor.cc ) diff --git a/mindspore/lite/src/CMakeLists.txt b/mindspore/lite/src/CMakeLists.txt index d078d35f2d..f45bc11612 100644 --- a/mindspore/lite/src/CMakeLists.txt +++ b/mindspore/lite/src/CMakeLists.txt @@ -27,7 +27,7 @@ set(API_SRC ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/cell.cc ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/serialization.cc ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/types.cc - ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/lite_context.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/context.cc ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/model/model.cc ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/model/model_impl.cc ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/graph/graph.cc diff --git a/mindspore/lite/src/common/string_util.cc b/mindspore/lite/src/common/string_util.cc index 9a77f285e4..795637f676 100644 --- a/mindspore/lite/src/common/string_util.cc +++ b/mindspore/lite/src/common/string_util.cc @@ -53,6 +53,7 @@ int WriteStringsToTensor(Tensor *tensor, const std::vector &string_b } std::vector shape = {offset[num]}; tensor->set_shape(shape); + tensor->set_data_type(kObjectTypeString); tensor->FreeData(); void *data = tensor->MutableData(); if (data == nullptr) { diff --git a/mindspore/lite/src/cxx_api/cell.cc b/mindspore/lite/src/cxx_api/cell.cc index ec3a3c3bcd..8df0bba2d9 100644 --- a/mindspore/lite/src/cxx_api/cell.cc +++ b/mindspore/lite/src/cxx_api/cell.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ * limitations under the License. */ #include "include/api/cell.h" -#include "include/api/lite_context.h" #include "src/common/log_adapter.h" namespace mindspore { @@ -77,7 +76,7 @@ Status GraphCell::Run(const std::vector &inputs, std::vector return kLiteError; } -Status GraphCell::Load() { +Status GraphCell::Load(uint32_t device_id) { MS_LOG(ERROR) << "Unsupported feature."; return kLiteError; } diff --git a/mindspore/lite/src/cxx_api/context.cc b/mindspore/lite/src/cxx_api/context.cc new file mode 100644 index 0000000000..37b48c3b5c --- /dev/null +++ b/mindspore/lite/src/cxx_api/context.cc @@ -0,0 +1,266 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "include/api/context.h" +#include +#include +#include +#include "include/api/types.h" +#include "include/api/data_type.h" +#include "src/runtime/allocator.h" +#include "src/common/log_adapter.h" + +namespace mindspore { +constexpr auto kModelOptionCpuEnableFP16 = "mindspore.option.cpu.enable_fp16"; +constexpr auto kModelOptionCpuThreadAffinity = "mindspore.option.cpu.thread_affinity"; +constexpr auto kModelOptionMaliGpuEnableFP16 = "mindspore.option.mali_gpu.enable_fp16"; +constexpr auto kModelOptionKirinNpuFrequency = "mindspore.option.kirin_npu.frequency"; + +struct Context::Data { + std::vector> device_info_list; + int32_t thread_num; + std::shared_ptr allocator; +}; + +struct DeviceInfoContext::Data { + std::map params; +}; + +Context::Context() : data_(std::shared_ptr(new (std::nothrow) Data())) {} + +template >> +static const U &GetValue(const std::shared_ptr &data, const std::string &key) { + static U empty_result; + if (data == nullptr) { + return empty_result; + } + auto iter = data->params.find(key); + if (iter == data->params.end()) { + return empty_result; + } + const std::any &value = iter->second; + + return std::any_cast(value); +} + +void Context::SetThreadNum(int32_t thread_num) { + if (data_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return; + } + data_->thread_num = thread_num; +} +int32_t Context::GetThreadNum() const { + if (data_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return 0; + } + return data_->thread_num; +} + +void Context::SetAllocator(const std::shared_ptr &allocator) { + if (data_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return; + } + data_->allocator = allocator; +} +std::shared_ptr Context::GetAllocator() const { + if (data_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return nullptr; + } + return data_->allocator; +} + +std::vector> &Context::MutableDeviceInfo() { + static std::vector> empty; + if (data_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return empty; + } + return data_->device_info_list; +} + +DeviceInfoContext::DeviceInfoContext() : data_(std::shared_ptr(new (std::nothrow) Data())) {} + +void CPUDeviceInfo::SetEnableFP16(bool is_fp16) { + if (data_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return; + } + data_->params[kModelOptionCpuEnableFP16] = is_fp16; +} +bool CPUDeviceInfo::GetEnableFP16() const { + if (data_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return false; + } + return GetValue(data_, kModelOptionCpuEnableFP16); +} + +void CPUDeviceInfo::SetThreadAffinity(int affinity) { + if (data_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return; + } + data_->params[kModelOptionCpuThreadAffinity] = affinity; +} +int CPUDeviceInfo::GetThreadAffinity() const { + if (data_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return false; + } + return GetValue(data_, kModelOptionCpuThreadAffinity); +} + +void MaliGPUDeviceInfo::SetEnableFP16(bool is_fp16) { + if (data_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return; + } + data_->params[kModelOptionMaliGpuEnableFP16] = is_fp16; +} +bool MaliGPUDeviceInfo::GetEnableFP16() const { + if (data_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return false; + } + return GetValue(data_, kModelOptionMaliGpuEnableFP16); +} + +void KirinNPUDeviceInfo::SetFrequency(int frequency) { + if (data_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return; + } + data_->params[kModelOptionKirinNpuFrequency] = frequency; +} +int KirinNPUDeviceInfo::GetFrequency() const { + if (data_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return 0; + } + return GetValue(data_, kModelOptionKirinNpuFrequency); +} + +void NvidiaGPUDeviceInfo::SetDeviceID(uint32_t device_id) { MS_LOG(ERROR) << "Unsupported Feature."; } +uint32_t NvidiaGPUDeviceInfo::GetDeviceID() const { + MS_LOG(ERROR) << "Unsupported Feature."; + return 0; +} + +void NvidiaGPUDeviceInfo::SetGpuTrtInferMode(bool gpu_trt_infer_mode) { MS_LOG(ERROR) << "Unsupported Feature."; } +bool NvidiaGPUDeviceInfo::GetGpuTrtInferMode() const { + MS_LOG(ERROR) << "Unsupported Feature."; + return false; +} + +void Ascend910DeviceInfo::SetDeviceID(uint32_t device_id) { MS_LOG(ERROR) << "Unsupported Feature."; } +uint32_t Ascend910DeviceInfo::GetDeviceID() const { + MS_LOG(ERROR) << "Unsupported Feature."; + return 0; +} + +void Ascend310DeviceInfo::SetDeviceID(uint32_t device_id) { MS_LOG(ERROR) << "Unsupported Feature."; } +uint32_t Ascend310DeviceInfo::GetDeviceID() const { + MS_LOG(ERROR) << "Unsupported Feature."; + return 0; +} + +void Ascend310DeviceInfo::SetDumpConfigPath(const std::vector &cfg_path) { + MS_LOG(ERROR) << "Unsupported Feature."; +} +std::vector Ascend310DeviceInfo::GetDumpConfigPathChar() const { + std::vector empty; + MS_LOG(ERROR) << "Unsupported Feature."; + return empty; +} + +void Ascend310DeviceInfo::SetInsertOpConfigPath(const std::vector &cfg_path) { + MS_LOG(ERROR) << "Unsupported Feature."; +} +std::vector Ascend310DeviceInfo::GetInsertOpConfigPathChar() const { + std::vector empty; + MS_LOG(ERROR) << "Unsupported Feature."; + return empty; +} + +void Ascend310DeviceInfo::SetInputFormat(const std::vector &format) { MS_LOG(ERROR) << "Unsupported Feature."; } +std::vector Ascend310DeviceInfo::GetInputFormatChar() const { + std::vector empty; + MS_LOG(ERROR) << "Unsupported Feature."; + return empty; +} + +void Ascend310DeviceInfo::SetInputShape(const std::vector &shape) { MS_LOG(ERROR) << "Unsupported Feature."; } +std::vector Ascend310DeviceInfo::GetInputShapeChar() const { + std::vector empty; + MS_LOG(ERROR) << "Unsupported Feature."; + return empty; +} + +void Ascend310DeviceInfo::SetDynamicBatchSize(const std::vector &dynamic_batch_size) { + MS_LOG(ERROR) << "Unsupported Feature."; +} +std::vector Ascend310DeviceInfo::GetDynamicBatchSizeChar() const { + std::vector empty; + MS_LOG(ERROR) << "Unsupported Feature."; + return empty; +} + +void Ascend310DeviceInfo::SetPrecisionMode(const std::vector &precision_mode) { + MS_LOG(ERROR) << "Unsupported Feature."; +} +std::vector Ascend310DeviceInfo::GetPrecisionModeChar() const { + std::vector empty; + MS_LOG(ERROR) << "Unsupported Feature."; + return empty; +} + +void Ascend310DeviceInfo::SetOpSelectImplMode(const std::vector &op_select_impl_mode) { + MS_LOG(ERROR) << "Unsupported Feature."; +} +std::vector Ascend310DeviceInfo::GetOpSelectImplModeChar() const { + std::vector empty; + MS_LOG(ERROR) << "Unsupported Feature."; + return empty; +} + +void Ascend310DeviceInfo::SetFusionSwitchConfigPath(const std::vector &cfg_path) { + MS_LOG(ERROR) << "Unsupported Feature."; +} +std::vector Ascend310DeviceInfo::GetFusionSwitchConfigPathChar() const { + std::vector empty; + MS_LOG(ERROR) << "Unsupported Feature."; + return empty; +} + +void Ascend310DeviceInfo::SetInputShapeMap(const std::map> &shape) { + MS_LOG(ERROR) << "Unsupported Feature."; +} +std::map> Ascend310DeviceInfo::GetInputShapeMap() const { + std::map> empty; + MS_LOG(ERROR) << "Unsupported Feature."; + return empty; +} + +void Ascend310DeviceInfo::SetOutputType(enum DataType output_type) { MS_LOG(ERROR) << "Unsupported Feature."; } +enum DataType Ascend310DeviceInfo::GetOutputType() const { + MS_LOG(ERROR) << "Unsupported Feature."; + return DataType::kTypeUnknown; +} + +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/graph/graph.cc b/mindspore/lite/src/cxx_api/graph/graph.cc index e1c57cdeff..f93eb46b72 100644 --- a/mindspore/lite/src/cxx_api/graph/graph.cc +++ b/mindspore/lite/src/cxx_api/graph/graph.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,6 +20,8 @@ namespace mindspore { +Graph::Graph() : graph_data_(nullptr) {} + Graph::Graph(const std::shared_ptr &graph_data) : graph_data_(graph_data) {} Graph::Graph(std::shared_ptr &&graph_data) : graph_data_(graph_data) {} @@ -30,5 +32,7 @@ Graph::Graph(std::nullptr_t) : graph_data_(nullptr) {} bool Graph::operator==(std::nullptr_t) const { return graph_data_ == nullptr; } +bool Graph::operator!=(std::nullptr_t) const { return graph_data_ != nullptr; } + ModelType Graph::ModelType() const { return kMindIR; } } // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/graph/graph_data.h b/mindspore/lite/src/cxx_api/graph/graph_data.h index 584035db8d..e24858e741 100644 --- a/mindspore/lite/src/cxx_api/graph/graph_data.h +++ b/mindspore/lite/src/cxx_api/graph/graph_data.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/src/cxx_api/lite_context.cc b/mindspore/lite/src/cxx_api/lite_context.cc deleted file mode 100644 index cc5f646a01..0000000000 --- a/mindspore/lite/src/cxx_api/lite_context.cc +++ /dev/null @@ -1,303 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "include/api/lite_context.h" -#include -#include -#include -#include "include/api/types.h" -#include "src/common/log_adapter.h" - -namespace mindspore { - -constexpr char kVendorName[] = "vendor_name"; -constexpr char kThreadNum[] = "thread_name"; -constexpr char kAllocator[] = "allocator"; -constexpr char kCPU[] = "cpu"; -constexpr char kCPUEanbleFp16[] = "cpu_enable_fp16"; -constexpr char kCPUBindMode[] = "cpu_bind_mode"; -constexpr char kGPU[] = "gpu"; -constexpr char kGPUEanbleFp16[] = "gpu_enable_fp16"; -constexpr char kNPU[] = "npu"; -constexpr char kNPUFrequency[] = "npu_frequency"; - -void Context::Clear(const std::shared_ptr &context) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return; - } - context->context_.clear(); -} - -void Context::SetAsDefault(const std::shared_ptr &context) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return; - } - context->context_.clear(); - context->context_.emplace(kCPU, true); -} - -void Context::SetVendorName(const std::shared_ptr &context, const std::string &name) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return; - } - auto iter = context->context_.find(kVendorName); - if (iter != context->context_.end()) { - iter->second = name; - } else { - context->context_.emplace(kVendorName, name); - } -} - -std::string Context::GetVendorName(const std::shared_ptr &context) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return std::string(); - } - auto iter = context->context_.find(kVendorName); - if (iter != context->context_.end()) { - return std::any_cast(iter->second); - } - return std::string(); -} - -void Context::SetThreadNum(const std::shared_ptr &context, int num) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return; - } - auto iter = context->context_.find(kThreadNum); - if (iter != context->context_.end()) { - iter->second = num; - } else { - context->context_.emplace(kThreadNum, num); - } -} - -int Context::GetThreadNum(const std::shared_ptr &context) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return 0; - } - auto iter = context->context_.find(kThreadNum); - if (iter != context->context_.end()) { - return std::any_cast(iter->second); - } - return 2; -} - -void Context::SetAllocator(const std::shared_ptr &context, std::shared_ptr alloc) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return; - } - auto iter = context->context_.find(kAllocator); - if (iter != context->context_.end()) { - iter->second = alloc; - } else { - context->context_.emplace(kAllocator, alloc); - } -} - -std::shared_ptr Context::GetAllocator(const std::shared_ptr &context) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return nullptr; - } - auto iter = context->context_.find(kAllocator); - if (iter != context->context_.end()) { - return std::any_cast>(iter->second); - } - return nullptr; -} - -void Context::ConfigCPU(const std::shared_ptr &context, bool conf) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return; - } - auto iter = context->context_.find(kCPU); - if (iter != context->context_.end()) { - iter->second = conf; - } else { - context->context_.emplace(kCPU, conf); - } -} - -bool Context::IfCPUEnabled(const std::shared_ptr &context) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return false; - } - auto iter = context->context_.find(kCPU); - if (iter != context->context_.end()) { - return std::any_cast(iter->second); - } - return false; -} - -void Context::ConfigCPUFp16(const std::shared_ptr &context, bool conf) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return; - } - auto iter = context->context_.find(kCPUEanbleFp16); - if (iter != context->context_.end()) { - iter->second = conf; - } else { - context->context_.emplace(kCPUEanbleFp16, conf); - } -} - -bool Context::IfCPUFp16Enabled(const std::shared_ptr &context) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return false; - } - auto iter = context->context_.find(kCPUEanbleFp16); - if (iter != context->context_.end()) { - return std::any_cast(iter->second); - } - return false; -} - -void Context::SetCPUBindMode(const std::shared_ptr &context, lite::CpuBindMode mode) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return; - } - auto iter = context->context_.find(kCPUBindMode); - if (iter != context->context_.end()) { - iter->second = mode; - } else { - context->context_.emplace(kCPUBindMode, mode); - } -} - -lite::CpuBindMode Context::GetCPUBindMode(const std::shared_ptr &context) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return lite::NO_BIND; - } - auto iter = context->context_.find(kCPUBindMode); - if (iter != context->context_.end()) { - return std::any_cast(iter->second); - } - return lite::MID_CPU; -} - -void Context::ConfigGPU(const std::shared_ptr &context, bool conf) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return; - } - auto iter = context->context_.find(kGPU); - if (iter != context->context_.end()) { - iter->second = conf; - } else { - context->context_.emplace(kGPU, conf); - } -} - -bool Context::IfGPUEnabled(const std::shared_ptr &context) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return false; - } - auto iter = context->context_.find(kGPU); - if (iter != context->context_.end()) { - return std::any_cast(iter->second); - } - return false; -} - -void Context::ConfigGPUFp16(const std::shared_ptr &context, bool conf) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return; - } - auto iter = context->context_.find(kGPUEanbleFp16); - if (iter != context->context_.end()) { - iter->second = conf; - } else { - context->context_.emplace(kGPUEanbleFp16, conf); - } -} - -bool Context::IfGPUFp16Enabled(const std::shared_ptr &context) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return false; - } - auto iter = context->context_.find(kGPUEanbleFp16); - if (iter != context->context_.end()) { - return std::any_cast(iter->second); - } - return false; -} - -void Context::ConfigNPU(const std::shared_ptr &context, bool conf) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return; - } - auto iter = context->context_.find(kNPU); - if (iter != context->context_.end()) { - iter->second = conf; - } else { - context->context_.emplace(kNPU, conf); - } -} - -bool Context::IfNPUEnabled(const std::shared_ptr &context) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return false; - } - auto iter = context->context_.find(kNPU); - if (iter != context->context_.end()) { - return std::any_cast(iter->second); - } - return false; -} - -void Context::SetNPUFrequency(const std::shared_ptr &context, int freq) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return; - } - auto iter = context->context_.find(kNPUFrequency); - if (iter != context->context_.end()) { - iter->second = freq; - } else { - context->context_.emplace(kNPUFrequency, freq); - } -} - -int Context::GetNPUFrequency(const std::shared_ptr &context) { - if (context == nullptr) { - MS_LOG(ERROR) << "Context is nullptr."; - return 0; - } - auto iter = context->context_.find(kNPUFrequency); - if (iter != context->context_.end()) { - return std::any_cast(iter->second); - } - return 3; -} - -} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/model/model.cc b/mindspore/lite/src/cxx_api/model/model.cc index b28a27e557..1548e6a132 100644 --- a/mindspore/lite/src/cxx_api/model/model.cc +++ b/mindspore/lite/src/cxx_api/model/model.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,17 +14,30 @@ * limitations under the License. */ #include "include/api/model.h" -#include "include/api/lite_context.h" +#include "include/api/types.h" +#include "include/api/context.h" +#include "include/api/dual_abi_helper.h" #include "src/cxx_api/model/model_impl.h" #include "src/common/log_adapter.h" namespace mindspore { -Status Model::Build() { +Status Model::Build(GraphCell graph, const std::shared_ptr &model_context) { + impl_ = std::shared_ptr(new (std::nothrow) ModelImpl()); if (impl_ == nullptr) { MS_LOG(ERROR) << "Model implement is null."; return kLiteNullptr; } + if (graph.GetGraph() == nullptr) { + MS_LOG(ERROR) << "Invalid graph."; + return kLiteNullptr; + } + if (model_context == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return kLiteNullptr; + } + impl_->SetContext(model_context); + impl_->SetGraph(graph.GetGraph()); return impl_->Build(); } @@ -44,30 +57,11 @@ Status Model::Predict(const std::vector &inputs, std::vector return impl_->Predict(inputs, outputs); } -Model::Model(const GraphCell &graph, const std::shared_ptr &model_context) { - impl_ = std::shared_ptr(new (std::nothrow) ModelImpl()); - if (impl_ == nullptr || graph.GetGraph() == nullptr) { - MS_LOG(ERROR) << "Invalid graph."; - } else if (model_context == nullptr) { - MS_LOG(ERROR) << "Invalid context."; - } else { - auto new_graph_cell = std::shared_ptr(new (std::nothrow) GraphCell(graph)); - if (new_graph_cell != nullptr) { - impl_->SetContext(model_context); - impl_->SetGraphCell(new_graph_cell); - } else { - MS_LOG(ERROR) << "New graphcell failed."; - } - } -} - -Model::Model(const std::vector &network, const std::shared_ptr &model_context) { - MS_LOG(ERROR) << "Unsupported feature."; -} +Model::Model() : impl_(nullptr) {} Model::~Model() {} -bool Model::CheckModelSupport(const std::vector &, ModelType) { +bool Model::CheckModelSupport(enum DeviceType device_type, ModelType model_type) { MS_LOG(ERROR) << "Unsupported feature."; return false; } @@ -90,4 +84,37 @@ std::vector Model::GetOutputs() { return impl_->GetOutputs(); } +MSTensor Model::GetInputByTensorName(const std::vector &name) { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Model implement is null."; + return MSTensor(nullptr); + } + return impl_->GetInputByTensorName(CharToString(name)); +} + +std::vector> Model::GetOutputTensorNamesChar() { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Model implement is null."; + std::vector> empty; + return empty; + } + return VectorStringToChar(impl_->GetOutputTensorNames()); +} + +MSTensor Model::GetOutputByTensorName(const std::vector &name) { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Model implement is null."; + return MSTensor(nullptr); + } + return impl_->GetOutputByTensorName(CharToString(name)); +} + +std::vector Model::GetOutputsByNodeName(const std::vector &node_name) { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Model implement is null."; + std::vector empty; + return empty; + } + return impl_->GetOutputsByNodeName(CharToString(node_name)); +} } // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/model/model_impl.cc b/mindspore/lite/src/cxx_api/model/model_impl.cc index b0a3d85efd..09cf15b0d7 100644 --- a/mindspore/lite/src/cxx_api/model/model_impl.cc +++ b/mindspore/lite/src/cxx_api/model/model_impl.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,14 +19,16 @@ #include #include #include "include/api/types.h" -#include "include/api/lite_context.h" +#include "include/api/context.h" +#include "include/api/dual_abi_helper.h" #include "include/lite_session.h" #include "include/context.h" #include "src/lite_model.h" #include "src/runtime/allocator.h" -#include "src/cxx_api/utils.h" +#include "src/common/string_util.h" #include "src/cxx_api/graph/graph_data.h" #include "src/cxx_api/tensor/tensor_impl.h" +#include "src/cxx_api/tensor_utils.h" #include "src/common/log_adapter.h" namespace mindspore { @@ -39,13 +41,9 @@ Status ModelImpl::Build() { MS_LOG(DEBUG) << "Model has been already built."; return kSuccess; } - if (graph_cell_ == nullptr || graph_cell_->GetGraph() == nullptr || graph_cell_->GetGraph()->graph_data_ == nullptr) { - MS_LOG(ERROR) << "Graph cell is invalid."; - return kLiteNullptr; - } - auto model = graph_cell_->GetGraph()->graph_data_->lite_model(); - if (model == nullptr) { - MS_LOG(ERROR) << "Lite model is nullptr."; + auto model = graph_->graph_data_->lite_model(); + if (graph_ == nullptr || graph_->graph_data_ == nullptr || model == nullptr) { + MS_LOG(ERROR) << "Invalid graph."; return kLiteNullptr; } if (model->buf == nullptr) { @@ -57,36 +55,58 @@ Status ModelImpl::Build() { return kLiteNullptr; } lite::Context model_context; - model_context.allocator = Context::GetAllocator(context_); + auto device_list = context_->MutableDeviceInfo(); + if (device_list.size() == 0) { + MS_LOG(ERROR) << "Invalid device list."; + return kLiteInputParamInvalid; + } + if (device_list.size() > 2) { + MS_LOG(ERROR) << "Only CPU/CPU & GPU/CPU & NPU mode is supported."; + return kLiteInputParamInvalid; + } + model_context.allocator = context_->GetAllocator(); if (model_context.allocator == nullptr) { - model_context.allocator = lite::Allocator::Create(); + model_context.allocator = Allocator::Create(); if (model_context.allocator == nullptr) { MS_LOG(ERROR) << "Create Allocator failed."; return kLiteNullptr; } MS_LOG(DEBUG) << "Set new allocator."; - Context::SetAllocator(context_, model_context.allocator); + context_->SetAllocator(model_context.allocator); } - model_context.vendor_name_ = Context::GetVendorName(context_); - model_context.thread_num_ = Context::GetThreadNum(context_); + model_context.thread_num_ = context_->GetThreadNum(); model_context.device_list_.clear(); - if (Context::IfCPUEnabled(context_) && Context::IfGPUEnabled(context_) && Context::IfNPUEnabled(context_)) { - MS_LOG(ERROR) << "CPU/GPU/NPU cannot be enabled at the same time."; + if (device_list[0]->GetDeviceType() != kCPU) { + MS_LOG(ERROR) << "CPU context must be enabled and in the first place of device list."; return kLiteInputParamInvalid; } - if (!Context::IfCPUEnabled(context_)) { - MS_LOG(INFO) << "CPU is forced to be enabled."; + auto cpu_context = device_list[0]->Cast(); + lite::CpuBindMode mode; + if (cpu_context->GetThreadAffinity() == 0) { + mode = lite::NO_BIND; + } else if (cpu_context->GetThreadAffinity() == 1) { + mode = lite::HIGHER_CPU; + } else if (cpu_context->GetThreadAffinity() == 2) { + mode = lite::MID_CPU; + } else { + MS_LOG(ERROR) << "Invalid thread affinity."; + return kLiteInputParamInvalid; } - lite::DeviceInfo cpu_info = { - .cpu_device_info_ = {Context::IfCPUFp16Enabled(context_), Context::GetCPUBindMode(context_)}}; + lite::DeviceInfo cpu_info = {.cpu_device_info_ = {cpu_context->GetEnableFP16(), mode}}; model_context.device_list_.push_back({lite::DT_CPU, cpu_info}); - if (Context::IfGPUEnabled(context_)) { - lite::DeviceInfo gpu_info = {.gpu_device_info_ = {Context::IfGPUFp16Enabled(context_)}}; - model_context.device_list_.push_back({lite::DT_GPU, gpu_info}); - } - if (Context::IfNPUEnabled(context_)) { - lite::DeviceInfo npu_info = {.npu_device_info_ = {Context::GetNPUFrequency(context_)}}; - model_context.device_list_.push_back({lite::DT_NPU, npu_info}); + if (device_list.size() == 2) { + if (device_list[0]->GetDeviceType() == kMaliGPU) { + auto gpu_context = device_list[0]->Cast(); + lite::DeviceInfo gpu_info = {.gpu_device_info_ = {gpu_context->GetEnableFP16()}}; + model_context.device_list_.push_back({lite::DT_GPU, gpu_info}); + } else if (device_list[0]->GetDeviceType() == kKirinNPU) { + auto npu_context = device_list[0]->Cast(); + lite::DeviceInfo npu_info = {.npu_device_info_ = {npu_context->GetFrequency()}}; + model_context.device_list_.push_back({lite::DT_NPU, npu_info}); + } else { + MS_LOG(ERROR) << "Invalid device."; + return kLiteInputParamInvalid; + } } auto session = std::shared_ptr(session::LiteSession::CreateSession(&model_context)); if (session == nullptr) { @@ -98,12 +118,19 @@ Status ModelImpl::Build() { MS_LOG(ERROR) << "Build model failed."; return static_cast(ret); } + session->BindThread(true); session_.swap(session); model->Free(); MS_LOG(DEBUG) << "Build model success."; return kSuccess; } +static void ResetTensorData(std::vector old_data, std::vector tensors) { + for (size_t j = 0; j < old_data.size(); j++) { + tensors.at(j)->set_data(old_data.at(j)); + } +} + Status ModelImpl::Predict(const std::vector &inputs, std::vector *outputs) { if (session_ == nullptr) { MS_LOG(ERROR) << "Run graph failed."; @@ -122,35 +149,44 @@ Status ModelImpl::Predict(const std::vector &inputs, std::vector(input->data_type())) { + ResetTensorData(old_data, input_tensors); + MS_LOG(ERROR) << "Tensor " << user_input.Name() << " has a different data type from input" << input->tensor_name() + << "."; + return kLiteInputTensorError; + } if (user_input.Name() != input->tensor_name()) { MS_LOG(WARNING) << "Tensor " << user_input.Name() << " has a different name from input" << input->tensor_name() << "."; } - old_data.push_back(input->MutableData()); - if (user_input.MutableData() != input->MutableData()) { - if (input->Size() != user_input.DataSize()) { - for (size_t j = 0; j < old_data.size(); j++) { - input_tensors.at(j)->set_data(old_data.at(j)); - } - MS_LOG(ERROR) << "Tensor " << user_input.Name() << " has wrong data size."; - return kLiteInputTensorError; + old_data.push_back(input->data()); + if (input->data_type() == kObjectTypeString) { + std::vector shape = TruncateShape(user_input.Shape(), input->data_type(), user_input.DataSize(), false); + if (shape.empty() && !(user_input.Shape().empty())) { + ResetTensorData(old_data, input_tensors); + MS_LOG(ERROR) << "Input dims of tensor " << user_input.Name() << " is invalid."; + return kLiteParamInvalid; } - if (user_input.impl_->need_copy()) { - ::memcpy(input->MutableData(), user_input.MutableData(), input->Size()); - } else { + input->set_shape(shape); + input->set_data(user_input.MutableData()); + } else { + if (user_input.MutableData() != input->data()) { + if (input->Size() != user_input.DataSize()) { + ResetTensorData(old_data, input_tensors); + MS_LOG(ERROR) << "Tensor " << user_input.Name() << " has wrong data size."; + return kLiteInputTensorError; + } input->set_data(user_input.MutableData()); } } } auto ret = session_->RunGraph(); + ResetTensorData(old_data, input_tensors); if (ret != RET_OK) { MS_LOG(ERROR) << "Run graph failed."; return static_cast(ret); } MS_LOG(DEBUG) << "Run graph success."; - for (size_t i = 0; i < old_data.size(); i++) { - input_tensors.at(i)->set_data(old_data.at(i)); - } auto res = GetOutputs(); if (res.empty()) { MS_LOG(DEBUG) << "Empty outputs."; @@ -176,7 +212,7 @@ std::vector ModelImpl::GetInputs() { res.resize(inputs.size()); for (size_t i = 0; i < inputs.size(); i++) { auto impl = std::shared_ptr(new (std::nothrow) MSTensor::Impl(inputs[i])); - if (impl == nullptr) { + if (impl == nullptr || impl->lite_tensor() == nullptr) { MS_LOG(ERROR) << "Create tensor failed."; return empty; } @@ -214,7 +250,83 @@ std::vector ModelImpl::GetOutputs() { res.resize(names.size()); for (size_t i = 0; i < names.size(); i++) { auto impl = std::shared_ptr(new (std::nothrow) MSTensor::Impl(outputs[names[i]])); - if (impl == nullptr) { + if (impl == nullptr || impl->lite_tensor() == nullptr) { + MS_LOG(ERROR) << "Create tensor failed."; + return empty; + } + auto tensor = MSTensor(impl); + if (tensor == nullptr) { + MS_LOG(ERROR) << "Create tensor failed."; + return empty; + } + res[i] = tensor; + } + return res; +} + +MSTensor ModelImpl::GetInputByTensorName(const std::string &name) { + if (session_ == nullptr) { + MS_LOG(ERROR) << "Session is null."; + return MSTensor(nullptr); + } + auto res = session_->GetInputsByTensorName(name); + if (res == nullptr) { + MS_LOG(ERROR) << "Model does not contains tensor " << name << " ."; + return MSTensor(nullptr); + } + auto impl = std::shared_ptr(new (std::nothrow) MSTensor::Impl(res)); + if (impl == nullptr || impl->lite_tensor() == nullptr) { + MS_LOG(ERROR) << "Create tensor failed."; + return MSTensor(nullptr); + } + + return MSTensor(impl); +} + +std::vector ModelImpl::GetOutputTensorNames() { + if (session_ == nullptr) { + MS_LOG(ERROR) << "Session is null."; + std::vector empty; + return empty; + } + return session_->GetOutputTensorNames(); +} + +MSTensor ModelImpl::GetOutputByTensorName(const std::string &name) { + if (session_ == nullptr) { + MS_LOG(ERROR) << "Session is null."; + return MSTensor(nullptr); + } + auto res = session_->GetOutputByTensorName(name); + if (res == nullptr) { + MS_LOG(ERROR) << "Model does not contains tensor " << name << " ."; + return MSTensor(nullptr); + } + auto impl = std::shared_ptr(new (std::nothrow) MSTensor::Impl(res)); + if (impl == nullptr || impl->lite_tensor() == nullptr) { + MS_LOG(ERROR) << "Create tensor failed."; + return MSTensor(nullptr); + } + + return MSTensor(impl); +} + +std::vector ModelImpl::GetOutputsByNodeName(const std::string &name) { + std::vector empty; + if (session_ == nullptr) { + MS_LOG(ERROR) << "Session is null."; + return empty; + } + std::vector res; + auto outputs = session_->GetOutputsByNodeName(name); + if (outputs.empty()) { + MS_LOG(ERROR) << "The outputs of model is null."; + return empty; + } + res.resize(outputs.size()); + for (size_t i = 0; i < outputs.size(); i++) { + auto impl = std::shared_ptr(new (std::nothrow) MSTensor::Impl(outputs[i])); + if (impl == nullptr || impl->lite_tensor() == nullptr) { MS_LOG(ERROR) << "Create tensor failed."; return empty; } diff --git a/mindspore/lite/src/cxx_api/model/model_impl.h b/mindspore/lite/src/cxx_api/model/model_impl.h index 0309f1e867..4f0b7d6604 100644 --- a/mindspore/lite/src/cxx_api/model/model_impl.h +++ b/mindspore/lite/src/cxx_api/model/model_impl.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,14 +23,14 @@ #include #include #include "include/api/model.h" -#include "include/api/lite_context.h" +#include "include/api/context.h" #include "include/api/cell.h" #include "include/lite_session.h" namespace mindspore { class ModelImpl { public: - ModelImpl() : graph_cell_(nullptr), session_(nullptr), context_(nullptr) {} + ModelImpl() : graph_(nullptr), session_(nullptr), context_(nullptr) {} ~ModelImpl() = default; Status Build(); @@ -40,15 +40,19 @@ class ModelImpl { std::vector GetInputs(); std::vector GetOutputs(); + MSTensor GetInputByTensorName(const std::string &name); + std::vector GetOutputTensorNames(); + MSTensor GetOutputByTensorName(const std::string &name); + std::vector GetOutputsByNodeName(const std::string &name); static bool CheckModelSupport(const std::string &device_type, ModelType model_type); private: friend class Model; - std::shared_ptr graph_cell_; + std::shared_ptr graph_; std::shared_ptr session_; std::shared_ptr context_; - void SetGraphCell(const std::shared_ptr &graph_cell) { graph_cell_ = graph_cell; } + void SetGraph(const std::shared_ptr &graph) { graph_ = graph; } void SetContext(const std::shared_ptr &context) { context_ = context; } }; } // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/serialization.cc b/mindspore/lite/src/cxx_api/serialization.cc index ca47d0cc1c..bd768c4f70 100644 --- a/mindspore/lite/src/cxx_api/serialization.cc +++ b/mindspore/lite/src/cxx_api/serialization.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ #include #include #include "include/api/graph.h" -#include "include/api/lite_context.h" +#include "include/api/context.h" #include "include/api/types.h" #include "include/model.h" #include "include/ms_tensor.h" @@ -28,28 +28,28 @@ namespace mindspore { -Graph Serialization::LoadModel(const void *model_data, size_t data_size, ModelType model_type) { +Status Serialization::Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph) { if (model_type != kMindIR) { MS_LOG(ERROR) << "Unsupported IR."; - return Graph(nullptr); + return kLiteInputParamInvalid; } auto model = std::shared_ptr(lite::Model::Import(static_cast(model_data), data_size)); if (model == nullptr) { MS_LOG(ERROR) << "New model failed."; - return Graph(nullptr); + return kLiteNullptr; } auto graph_data = std::shared_ptr(new (std::nothrow) Graph::GraphData(model)); if (graph_data == nullptr) { MS_LOG(ERROR) << "New graph data failed."; - return Graph(nullptr); + return kLiteMemoryFailed; } - Graph graph = Graph(graph_data); - return graph; + *graph = Graph(graph_data); + return kSuccess; } -Graph Serialization::LoadModel(const std::vector &file, ModelType model_type) { +Status Serialization::Load(const std::vector &file, ModelType model_type, Graph *graph) { MS_LOG(ERROR) << "Unsupported Feature."; - return Graph(nullptr); + return kLiteError; } Status Serialization::LoadCheckPoint(const std::string &ckpt_file, std::map *parameters) { diff --git a/mindspore/lite/src/cxx_api/tensor/tensor_impl.cc b/mindspore/lite/src/cxx_api/tensor/tensor_impl.cc index a0ec1677ba..9667251bb3 100644 --- a/mindspore/lite/src/cxx_api/tensor/tensor_impl.cc +++ b/mindspore/lite/src/cxx_api/tensor/tensor_impl.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,27 +13,69 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "src/cxx_api/tensor/tensor_impl.h" +#include #include #include #include #include #include #include +#include "src/cxx_api/tensor/tensor_impl.h" +#include "src/cxx_api/tensor_utils.h" #include "include/api/types.h" #include "include/api/status.h" -#include "src/cxx_api/utils.h" +#include "include/ms_tensor.h" +#include "src/common/string_util.h" +#include "src/tensor.h" #include "src/common/log_adapter.h" +#include "ir/dtype/type_id.h" namespace mindspore { -MSTensor::Impl::Impl(const std::string &name, enum DataType type, const std::vector &shape, const void *data, - size_t data_len) { +using mindspore::lite::RET_OK; + +MSTensor::Impl *MSTensor::Impl::CreateTensorImpl(const std::string &name, enum DataType type, + const std::vector &shape, const void *data, size_t data_len) { std::vector truncated_shape = TruncateShape(shape, static_cast(type), data_len, true); if (truncated_shape.empty() && !(shape.empty())) { - lite_tensor_ = nullptr; - } else { - lite_tensor_ = new (std::nothrow) lite::Tensor(name, static_cast(type), truncated_shape, data); + MS_LOG(ERROR) << "Invalid shape for creating tensor."; + return nullptr; } + auto lite_tensor = lite::Tensor::CreateTensor(name, static_cast(type), truncated_shape, data, data_len); + if (lite_tensor == nullptr) { + MS_LOG(ERROR) << "Failed to allocate lite tensor."; + return nullptr; + } + auto impl = new (std::nothrow) Impl(); + if (impl == nullptr) { + MS_LOG(ERROR) << "Failed to allocate tensor impl."; + return nullptr; + } + impl->set_lite_tensor(lite_tensor); + return impl; +} + +MSTensor::Impl *MSTensor::Impl::StringsToTensorImpl(const std::string &name, const std::vector &str) { + auto lite_tensor = new (std::nothrow) lite::Tensor(); + if (lite_tensor == nullptr) { + MS_LOG(ERROR) << "Failed to allocate lite tensor."; + return nullptr; + } + lite_tensor->set_tensor_name(name); + auto ret = lite::StringsToMSTensor(str, lite_tensor); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Convert strings to tensor failed."; + delete lite_tensor; + return nullptr; + } + auto impl = new (std::nothrow) Impl(); + if (impl == nullptr) { + delete lite_tensor; + MS_LOG(ERROR) << "Failed to allocate tensor impl."; + return nullptr; + } + impl->set_lite_tensor(lite_tensor); + impl->set_own_data(true); + return impl; } } // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/tensor/tensor_impl.h b/mindspore/lite/src/cxx_api/tensor/tensor_impl.h index 4825a9230f..b8e9d9833b 100644 --- a/mindspore/lite/src/cxx_api/tensor/tensor_impl.h +++ b/mindspore/lite/src/cxx_api/tensor/tensor_impl.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,25 +22,51 @@ #include #include "include/api/types.h" #include "include/api/status.h" +#include "include/lite_utils.h" #include "include/ms_tensor.h" #include "src/tensor.h" #include "src/common/log_adapter.h" namespace mindspore { +using mindspore::lite::RET_OK; + class MSTensor::Impl { public: Impl() {} - virtual ~Impl() = default; - explicit Impl(tensor::MSTensor *tensor) : lite_tensor_(tensor) { + + virtual ~Impl() { + if (lite_tensor_ == nullptr) { + return; + } + if (!from_session_) { + if (!own_data_) { + lite_tensor_->set_data(nullptr); + } + delete lite_tensor_; + lite_tensor_ = nullptr; + } + } + + explicit Impl(tensor::MSTensor *tensor) : lite_tensor_(tensor), from_session_(true) { if (tensor != nullptr) { tensor_name_ = tensor->tensor_name(); } } - bool operator==(std::nullptr_t) const { return lite_tensor_ == nullptr; } + static Impl *CreateTensorImpl(const std::string &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len); - Impl(const std::string &name, enum DataType type, const std::vector &shape, const void *data, - size_t data_len); + static Impl *StringsToTensorImpl(const std::string &name, const std::vector &str); + + static std::vector TensorImplToStrings(const std::shared_ptr &impl) { + std::vector empty; + auto lite_tensor = impl->lite_tensor(); + if (lite_tensor == nullptr) { + MS_LOG(ERROR) << "Invalid tensor impl."; + return empty; + } + return lite::MSTensorToStrings(lite_tensor); + } virtual const std::string &Name() const { static std::string empty = ""; @@ -110,11 +136,6 @@ class MSTensor::Impl { virtual bool IsDevice() const { return false; } - virtual std::shared_ptr Clone() const { - MS_LOG(ERROR) << "Unsupported feature."; - return nullptr; - } - tensor::MSTensor *lite_tensor() { return lite_tensor_; } Status set_lite_tensor(tensor::MSTensor *tensor) { @@ -126,15 +147,14 @@ class MSTensor::Impl { return kSuccess; } - void set_need_copy(bool need_copy) { need_copy_ = need_copy; } - - bool need_copy() { return need_copy_; } + void set_own_data(bool own_data) { own_data_ = own_data; } private: - tensor::MSTensor *lite_tensor_; - std::string tensor_name_; - std::vector shape_; - bool need_copy_ = true; + tensor::MSTensor *lite_tensor_ = nullptr; + std::string tensor_name_ = ""; + std::vector shape_ = {}; + bool own_data_ = false; + bool from_session_ = false; }; } // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/utils.h b/mindspore/lite/src/cxx_api/tensor_utils.h similarity index 98% rename from mindspore/lite/src/cxx_api/utils.h rename to mindspore/lite/src/cxx_api/tensor_utils.h index 714709ab8c..c0b442b30a 100644 --- a/mindspore/lite/src/cxx_api/utils.h +++ b/mindspore/lite/src/cxx_api/tensor_utils.h @@ -15,7 +15,7 @@ */ #include #include -#include "src/tensor.h" +#include "ir/dtype/type_id.h" namespace mindspore { static std::vector TruncateShape(const std::vector &shape, enum TypeId type, size_t data_len, diff --git a/mindspore/lite/src/cxx_api/types.cc b/mindspore/lite/src/cxx_api/types.cc index 12f987067a..1958dc9c74 100644 --- a/mindspore/lite/src/cxx_api/types.cc +++ b/mindspore/lite/src/cxx_api/types.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,9 @@ #include #include #include "include/api/status.h" +#include "include/api/dual_abi_helper.h" #include "src/cxx_api/tensor/tensor_impl.h" +#include "src/common/string_util.h" #include "src/tensor.h" #include "src/common/log_adapter.h" @@ -62,40 +64,106 @@ MSTensor::MSTensor(std::nullptr_t) : impl_(nullptr) {} MSTensor::MSTensor(const std::shared_ptr &impl) : impl_(impl) {} MSTensor::MSTensor(const std::vector &name, enum DataType type, const std::vector &shape, const void *data, size_t data_len) - : impl_(std::make_shared(CharToString(name), type, shape, data, data_len)) {} + : impl_(std::shared_ptr(Impl::CreateTensorImpl(CharToString(name), type, shape, data, data_len))) {} MSTensor::~MSTensor() = default; bool MSTensor::operator==(std::nullptr_t) const { return impl_ == nullptr; } -MSTensor MSTensor::CreateTensor(const std::vector &name, enum DataType type, const std::vector &shape, - const void *data, size_t data_len) noexcept { - auto impl = std::make_shared(CharToString(name), type, shape, data, data_len); +bool MSTensor::operator!=(std::nullptr_t) const { return impl_ != nullptr; } + +MSTensor *MSTensor::CreateTensor(const std::vector &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { + auto new_data = malloc(data_len); + if (new_data == nullptr) { + MS_LOG(ERROR) << "Allocate data failed."; + return nullptr; + } + ::memcpy(new_data, data, data_len); + auto impl = std::shared_ptr(Impl::CreateTensorImpl(CharToString(name), type, shape, new_data, data_len)); if (impl == nullptr) { MS_LOG(ERROR) << "Allocate tensor impl failed."; - return MSTensor(nullptr); + free(new_data); + return nullptr; } - return MSTensor(impl); + auto ms_tensor = new (std::nothrow) MSTensor(impl); + if (ms_tensor == nullptr) { + MS_LOG(ERROR) << "Allocate tensor impl failed."; + free(new_data); + return nullptr; + } + impl->set_own_data(true); + return ms_tensor; } -MSTensor MSTensor::CreateRefTensor(const std::vector &name, enum DataType type, const std::vector &shape, - const void *data, size_t data_len) noexcept { - auto tensor = CreateTensor(name, type, shape, data, data_len); - if (tensor == nullptr) { - return MSTensor(nullptr); +MSTensor *MSTensor::CreateRefTensor(const std::vector &name, enum DataType type, + const std::vector &shape, const void *data, size_t data_len) noexcept { + auto impl = std::shared_ptr(Impl::CreateTensorImpl(CharToString(name), type, shape, data, data_len)); + if (impl == nullptr) { + MS_LOG(ERROR) << "Allocate tensor impl failed."; + return nullptr; } - tensor.impl_->set_need_copy(false); - return tensor; + auto ms_tensor = new (std::nothrow) MSTensor(impl); + if (ms_tensor == nullptr) { + MS_LOG(ERROR) << "Allocate tensor impl failed."; + return nullptr; + } + return ms_tensor; } -MSTensor MSTensor::Clone() const { - MSTensor ret; +MSTensor *MSTensor::CharStringsToTensor(const std::vector &name, const std::vector> &inputs) { + auto impl = std::shared_ptr(Impl::StringsToTensorImpl(CharToString(name), VectorCharToString(inputs))); + if (impl == nullptr) { + MS_LOG(ERROR) << "Allocate tensor impl failed."; + return nullptr; + } + auto ms_tensor = new (std::nothrow) MSTensor(impl); + if (ms_tensor == nullptr) { + MS_LOG(ERROR) << "Allocate tensor impl failed."; + return nullptr; + } + return ms_tensor; +} + +std::vector> MSTensor::TensorToStringChars(const MSTensor &tensor) { + if (tensor.impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor."; + std::vector> empty; + return empty; + } + return VectorStringToChar(Impl::TensorImplToStrings(tensor.impl_)); +} + +MSTensor *MSTensor::Clone() const { if (impl_ == nullptr) { - MS_LOG(ERROR) << "Invalid tensor inpmlement."; - ret.impl_ = nullptr; - return ret; + MS_LOG(ERROR) << "Invalid tensor."; + return nullptr; } - ret.impl_ = impl_->Clone(); - return ret; + auto data_len = this->DataSize(); + if (data_len <= 0) { + MS_LOG(ERROR) << "Illegal data size of tensor."; + return nullptr; + } + auto new_data = malloc(data_len); + if (new_data == nullptr) { + MS_LOG(ERROR) << "Allocate data failed."; + return nullptr; + } + auto impl = + std::shared_ptr(Impl::CreateTensorImpl(this->Name(), this->DataType(), this->Shape(), new_data, data_len)); + if (impl == nullptr) { + MS_LOG(ERROR) << "Allocate tensor impl failed."; + free(new_data); + return nullptr; + } + auto ms_tensor = new (std::nothrow) MSTensor(impl); + if (ms_tensor == nullptr) { + MS_LOG(ERROR) << "Allocate tensor impl failed."; + free(new_data); + return nullptr; + } + ::memcpy(new_data, impl_->MutableData(), data_len); + impl->set_own_data(true); + return ms_tensor; } std::vector MSTensor::CharName() const { @@ -160,10 +228,14 @@ bool MSTensor::IsDevice() const { return false; } -Buffer::Buffer() : impl_(std::make_shared()) { MS_LOG(ERROR) << "Unsupported feature."; } -Buffer::Buffer(const void *data, size_t data_len) : impl_(std::make_shared(data, data_len)) { - MS_LOG(ERROR) << "Unsupported feature."; +void MSTensor::DestroyTensorPtr(MSTensor *tensor) noexcept { + if (tensor != nullptr) { + delete tensor; + } } + +Buffer::Buffer() : impl_(nullptr) { MS_LOG(ERROR) << "Unsupported feature."; } +Buffer::Buffer(const void *data, size_t data_len) : impl_(nullptr) { MS_LOG(ERROR) << "Unsupported feature."; } Buffer::~Buffer() = default; Buffer Buffer::Clone() const { diff --git a/mindspore/lite/src/executor.cc b/mindspore/lite/src/executor.cc index d21202fa14..79d3e5b9f3 100644 --- a/mindspore/lite/src/executor.cc +++ b/mindspore/lite/src/executor.cc @@ -41,8 +41,8 @@ int Executor::CheckInputs(const std::vector &in_tensors) { } int Executor::Run(const std::vector &in_tensors, const std::vector &out_tensors, - const std::vector &kernels, Allocator *allocator, const KernelCallBack &before, - const KernelCallBack &after) { + const std::vector &kernels, mindspore::Allocator *allocator, + const KernelCallBack &before, const KernelCallBack &after) { MS_ASSERT(nullptr != allocator); auto ret = this->CheckInputs(in_tensors); if (RET_OK != ret) { diff --git a/mindspore/lite/src/executor.h b/mindspore/lite/src/executor.h index bb0e69b22c..016051860a 100644 --- a/mindspore/lite/src/executor.h +++ b/mindspore/lite/src/executor.h @@ -31,7 +31,7 @@ class Executor { virtual int Prepare(const std::vector &kernels) { return RET_OK; } virtual int Run(const std::vector &in_tensors, const std::vector &out_tensors, - const std::vector &kernels, Allocator *allocator = nullptr, + const std::vector &kernels, mindspore::Allocator *allocator = nullptr, const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr); protected: @@ -44,7 +44,7 @@ class CpuExecutor : public Executor { virtual ~CpuExecutor() = default; int Run(const std::vector &in_tensors, const std::vector &out_tensors, - const std::vector &kernels, Allocator *allocator = nullptr, + const std::vector &kernels, mindspore::Allocator *allocator = nullptr, const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) override; }; diff --git a/mindspore/lite/src/inner_context.cc b/mindspore/lite/src/inner_context.cc index 33866be80b..33f01758a4 100644 --- a/mindspore/lite/src/inner_context.cc +++ b/mindspore/lite/src/inner_context.cc @@ -57,7 +57,7 @@ int InnerContext::Init() { } } if (this->allocator == nullptr) { - this->allocator = Allocator::Create(); + this->allocator = mindspore::Allocator::Create(); if (this->allocator == nullptr) { MS_LOG(ERROR) << "Create Allocator failed"; return RET_NULL_PTR; diff --git a/mindspore/lite/src/lite_kernel.cc b/mindspore/lite/src/lite_kernel.cc index 485c7bd58b..e2ae540cad 100644 --- a/mindspore/lite/src/lite_kernel.cc +++ b/mindspore/lite/src/lite_kernel.cc @@ -105,7 +105,7 @@ int LiteKernel::PreProcess() { for (auto *output : this->out_tensors()) { MS_ASSERT(output != nullptr); - if (output->ElementsNum() >= lite::MAX_MALLOC_SIZE / static_cast(sizeof(int64_t))) { + if (output->ElementsNum() >= MAX_MALLOC_SIZE / static_cast(sizeof(int64_t))) { MS_LOG(ERROR) << "The size of output tensor is too big"; return RET_ERROR; } diff --git a/mindspore/lite/src/mindrt_executor.cc b/mindspore/lite/src/mindrt_executor.cc index 732d3f1a39..0c646c5c59 100644 --- a/mindspore/lite/src/mindrt_executor.cc +++ b/mindspore/lite/src/mindrt_executor.cc @@ -58,7 +58,7 @@ int MindrtExecutor::Prepare(const std::vector &kernels) { } int MindrtExecutor::Run(const std::vector &in_tensors, const std::vector &out_tensors, - const std::vector &kernels, Allocator *allocator, + const std::vector &kernels, mindspore::Allocator *allocator, const KernelCallBack &before, const KernelCallBack &after) { MS_ASSERT(nullptr != allocator); if (kernels.front()->Type() != schema::PrimitiveType_Merge) { diff --git a/mindspore/lite/src/mindrt_executor.h b/mindspore/lite/src/mindrt_executor.h index 184d2e70a0..004e69d385 100644 --- a/mindspore/lite/src/mindrt_executor.h +++ b/mindspore/lite/src/mindrt_executor.h @@ -34,7 +34,7 @@ class MindrtExecutor : public Executor { virtual int Prepare(const std::vector &kernels); virtual int Run(const std::vector &in_tensors, const std::vector &out_tensors, - const std::vector &kernels, Allocator *allocator = nullptr, + const std::vector &kernels, mindspore::Allocator *allocator = nullptr, const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr); protected: diff --git a/mindspore/lite/src/runtime/allocator.cc b/mindspore/lite/src/runtime/allocator.cc index 48021eaf3e..90e91ad364 100644 --- a/mindspore/lite/src/runtime/allocator.cc +++ b/mindspore/lite/src/runtime/allocator.cc @@ -18,7 +18,7 @@ #include #include "src/common/log_adapter.h" -namespace mindspore::lite { +namespace mindspore { std::shared_ptr Allocator::Create() { return std::shared_ptr(new (std::nothrow) DefaultAllocator()); } @@ -110,4 +110,4 @@ void DefaultAllocator::Clear() { freeList_.clear(); UnLock(); } -} // namespace mindspore::lite +} // namespace mindspore diff --git a/mindspore/lite/src/runtime/allocator.h b/mindspore/lite/src/runtime/allocator.h index 90fb76b144..dbd4568a99 100644 --- a/mindspore/lite/src/runtime/allocator.h +++ b/mindspore/lite/src/runtime/allocator.h @@ -25,7 +25,8 @@ #include #include -namespace mindspore::lite { +namespace mindspore { + struct AllocatorContext { int shiftFactor; bool lockFlag; @@ -75,6 +76,6 @@ class DefaultAllocator : public Allocator { constexpr int64_t MAX_MALLOC_SIZE = static_cast(2000) * 1024 * 1024; constexpr int64_t MAX_THREAD_POOL_SIZE = static_cast(3000) * 1024 * 1024; -} // namespace mindspore::lite +} // namespace mindspore #endif // MINDSPORE_LITE_SRC_RUNTIME_ALLOCATOR_H_ diff --git a/mindspore/lite/src/runtime/gpu/gpu_runtime.h b/mindspore/lite/src/runtime/gpu/gpu_runtime.h index 51aad9f220..7ad348e86c 100644 --- a/mindspore/lite/src/runtime/gpu/gpu_runtime.h +++ b/mindspore/lite/src/runtime/gpu/gpu_runtime.h @@ -64,7 +64,7 @@ class DevKernel { public: void *data{nullptr}; }; -class GpuAllocator : public Allocator {}; +class GpuAllocator : public mindspore::Allocator {}; class GpuRuntime { public: GpuRuntime() {} diff --git a/mindspore/lite/src/runtime/gpu/opencl/opencl_allocator.h b/mindspore/lite/src/runtime/gpu/opencl/opencl_allocator.h index a9b1026caa..0249506f78 100644 --- a/mindspore/lite/src/runtime/gpu/opencl/opencl_allocator.h +++ b/mindspore/lite/src/runtime/gpu/opencl/opencl_allocator.h @@ -40,7 +40,7 @@ struct ImageSize { } }; -class OpenCLAllocator : public Allocator { +class OpenCLAllocator : public mindspore::Allocator { public: explicit OpenCLAllocator(OpenCLRuntime *ocl_runtime); ~OpenCLAllocator() override; diff --git a/mindspore/lite/src/runtime/gpu/opencl/opencl_executor.cc b/mindspore/lite/src/runtime/gpu/opencl/opencl_executor.cc index b07ddf1335..3786b090c7 100644 --- a/mindspore/lite/src/runtime/gpu/opencl/opencl_executor.cc +++ b/mindspore/lite/src/runtime/gpu/opencl/opencl_executor.cc @@ -22,13 +22,13 @@ namespace mindspore::lite::opencl { int OpenCLExecutor::Run(const std::vector &inputs, const std::vector &outputs, - const std::vector &kernels, Allocator *allocator, + const std::vector &kernels, mindspore::Allocator *allocator, const KernelCallBack &before, const KernelCallBack &after) { return RunOrTune(inputs, outputs, kernels, allocator, before, after, false); } int OpenCLExecutor::RunOrTune(const std::vector &inputs, const std::vector &outputs, - const std::vector &kernels, Allocator *allocator, + const std::vector &kernels, mindspore::Allocator *allocator, const KernelCallBack &before, const KernelCallBack &after, bool is_tune) { int ret{RET_OK}; auto opencl_runtime_ins = ocl_runtime.GetInstance(); diff --git a/mindspore/lite/src/runtime/gpu/opencl/opencl_executor.h b/mindspore/lite/src/runtime/gpu/opencl/opencl_executor.h index fa40750450..291e030301 100644 --- a/mindspore/lite/src/runtime/gpu/opencl/opencl_executor.h +++ b/mindspore/lite/src/runtime/gpu/opencl/opencl_executor.h @@ -32,10 +32,10 @@ class OpenCLExecutor : public Executor { int Prepare(const std::vector &kernels) override { return RET_OK; } int Run(const std::vector &inputs, const std::vector &outputs, - const std::vector &kernels, Allocator *allocator = nullptr, + const std::vector &kernels, mindspore::Allocator *allocator = nullptr, const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) override; int RunOrTune(const std::vector &inputs, const std::vector &outputs, - const std::vector &kernels, Allocator *allocator = nullptr, + const std::vector &kernels, mindspore::Allocator *allocator = nullptr, const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr, bool is_tune = false); protected: diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc index 0934479802..f1b06868d3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc @@ -73,7 +73,7 @@ int GatherFp16CPUKernel::PreProcess() { for (auto *output : this->out_tensors()) { MS_ASSERT(output != nullptr); - if (output->ElementsNum() >= lite::MAX_MALLOC_SIZE / static_cast(sizeof(int64_t))) { + if (output->ElementsNum() >= MAX_MALLOC_SIZE / static_cast(sizeof(int64_t))) { MS_LOG(ERROR) << "The size of output tensor is too big"; return RET_ERROR; } diff --git a/mindspore/lite/src/runtime/parallel_executor.cc b/mindspore/lite/src/runtime/parallel_executor.cc index 18cc0860b1..b30cd2b49e 100644 --- a/mindspore/lite/src/runtime/parallel_executor.cc +++ b/mindspore/lite/src/runtime/parallel_executor.cc @@ -49,7 +49,7 @@ static int RunKernel(void *data, int index) { } int ParallelExecutor::Run(const std::vector &in_tensors, const std::vector &out_tensors, - const std::vector &kernels, Allocator *allocator, + const std::vector &kernels, mindspore::Allocator *allocator, const KernelCallBack &before, const KernelCallBack &after) { MS_ASSERT(nullptr != allocator); for (auto &inTensor : in_tensors) { diff --git a/mindspore/lite/src/runtime/parallel_executor.h b/mindspore/lite/src/runtime/parallel_executor.h index cc428dd699..3b68ec6f65 100644 --- a/mindspore/lite/src/runtime/parallel_executor.h +++ b/mindspore/lite/src/runtime/parallel_executor.h @@ -33,7 +33,7 @@ class ParallelExecutor : public Executor { int Prepare(const std::vector &kernels) override; int Run(const std::vector &in_tensors, const std::vector &out_tensors, - const std::vector &kernels, Allocator *allocator = nullptr, + const std::vector &kernels, mindspore::Allocator *allocator = nullptr, const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) override; inline kernel::LiteKernel *GetReadyKernel(const int index) const { return readyKernels.at(index); } inline void SetResult(const int index, const int result) { results.at(index) = result; } diff --git a/mindspore/lite/src/sub_graph_kernel.h b/mindspore/lite/src/sub_graph_kernel.h index e291d1e43d..7f7ac44fc4 100644 --- a/mindspore/lite/src/sub_graph_kernel.h +++ b/mindspore/lite/src/sub_graph_kernel.h @@ -33,9 +33,9 @@ namespace mindspore::kernel { // store origin data and allocator of input tensor of subgraph for PreProcess and PostProcess struct DataStore { void *data_ = nullptr; - lite::Allocator *allocator_ = nullptr; - static DataStore *CreateDataStore(void *data = nullptr, lite::Allocator *data_allocator = nullptr, - lite::Allocator *allocator = nullptr) { + mindspore::Allocator *allocator_ = nullptr; + static DataStore *CreateDataStore(void *data = nullptr, mindspore::Allocator *data_allocator = nullptr, + mindspore::Allocator *allocator = nullptr) { DataStore *data_store = nullptr; if (allocator == nullptr) { data_store = static_cast(malloc(sizeof(DataStore))); diff --git a/mindspore/lite/src/tensor.cc b/mindspore/lite/src/tensor.cc index f71fb7c94f..523d6f696f 100644 --- a/mindspore/lite/src/tensor.cc +++ b/mindspore/lite/src/tensor.cc @@ -29,11 +29,6 @@ namespace lite { Tensor::Tensor(const TypeId data_type, std::vector shape, const schema::Format &format, Category category) : data_type_(data_type), shape_(std::move(shape)), format_(format), category_(category) {} -Tensor::Tensor(const std::string &name, enum TypeId type, const std::vector &shape, const void *data) - : tensor_name_(name), data_type_(type), shape_(std::move(shape)), category_(VAR) { - data_ = const_cast(data); -} - int Tensor::CopyTensorData(const Tensor &src_tensor, Tensor *dst_tensor) { if (dst_tensor == nullptr) { MS_LOG(ERROR) << "dst_tensor is nullptr"; @@ -298,12 +293,12 @@ int Tensor::set_root_tensor(Tensor *tensor) { return RET_OK; } -int Tensor::MallocData(const mindspore::lite::Allocator *allocator) { +int Tensor::MallocData(const mindspore::Allocator *allocator) { if (nullptr != this->data_) { return RET_OK; } if (allocator != nullptr) { - allocator_ = const_cast(allocator); + allocator_ = const_cast(allocator); } if (allocator_ == nullptr) { this->data_ = malloc(this->Size()); @@ -380,5 +375,21 @@ std::vector TensorVectorCast(const std::vector &sr std::transform(src.begin(), src.end(), target.begin(), [](Tensor *t) { return dynamic_cast(t); }); return target; } + } // namespace lite + +tensor::MSTensor *tensor::MSTensor::CreateTensor(const std::string &name, TypeId type, const std::vector &shape, + const void *data, size_t data_len) { + auto tensor = new (std::nothrow) lite::Tensor(); + if (tensor == nullptr) { + MS_LOG(ERROR) << "Failed to allocate tensor."; + return nullptr; + } + tensor->set_data(const_cast(data)); + tensor->set_shape(shape); + tensor->set_tensor_name(name); + tensor->set_data_type(type); + return tensor; +} + } // namespace mindspore diff --git a/mindspore/lite/src/tensor.h b/mindspore/lite/src/tensor.h index a224d7c34e..d14ec20283 100644 --- a/mindspore/lite/src/tensor.h +++ b/mindspore/lite/src/tensor.h @@ -58,8 +58,6 @@ class Tensor : public mindspore::tensor::MSTensor { Tensor(TypeId data_type, std::vector shape, const schema::Format &format = schema::Format::Format_NHWC, Category category = VAR); - Tensor(const std::string &name, enum TypeId type, const std::vector &shape, const void *data); - Tensor(const Tensor &tensor) = delete; Tensor(Tensor &&other) = delete; @@ -86,9 +84,9 @@ class Tensor : public mindspore::tensor::MSTensor { std::vector shape() const override { return shape_; } - void set_shape(const std::vector &shape) { shape_ = shape; } + void set_shape(const std::vector &shape) override { shape_ = shape; } - int DimensionSize(size_t index) const override; + int DimensionSize(size_t index) const; int ElementsNum() const override; @@ -104,16 +102,18 @@ class Tensor : public mindspore::tensor::MSTensor { size_t Size() const override; - void set_allocator(mindspore::lite::Allocator *allocator) { allocator_ = allocator; } + void set_allocator(mindspore::Allocator *allocator) { allocator_ = allocator; } - mindspore::lite::Allocator *allocator() const { return this->allocator_; } + mindspore::Allocator *allocator() const { return this->allocator_; } - virtual int MallocData(const mindspore::lite::Allocator *allocator = nullptr); + virtual int MallocData(const mindspore::Allocator *allocator = nullptr); virtual void FreeData(); void *MutableData() override; + void *data() override { return this->data_; } + virtual void *data_c() const { if (this->root_tensor_ != nullptr) { return this->root_tensor_->data_; @@ -206,7 +206,7 @@ class Tensor : public mindspore::tensor::MSTensor { size_t init_ref_count_ = 0; std::vector quant_params_; std::vector quant_clusters_; - mindspore::lite::Allocator *allocator_ = nullptr; + mindspore::Allocator *allocator_ = nullptr; Tensor *root_tensor_ = nullptr; bool enable_huffman_code_ = false; }; diff --git a/mindspore/lite/src/tensorlist.cc b/mindspore/lite/src/tensorlist.cc index 0875714483..531fc128a7 100644 --- a/mindspore/lite/src/tensorlist.cc +++ b/mindspore/lite/src/tensorlist.cc @@ -113,9 +113,9 @@ int TensorList::MallocTensorListData(TypeId dtype, const std::vector(allocator); + allocator_ = const_cast(allocator); } // malloc data buf of each tensor in tensors_ for (int i = 0; i < this->ElementsNum(); ++i) { diff --git a/mindspore/lite/src/tensorlist.h b/mindspore/lite/src/tensorlist.h index e8529ffafe..1fae3348aa 100644 --- a/mindspore/lite/src/tensorlist.h +++ b/mindspore/lite/src/tensorlist.h @@ -77,7 +77,7 @@ class TensorList : public Tensor { int MallocTensorListData(TypeId dtype, const std::vector > &tensor_shape); - int MallocData(const mindspore::lite::Allocator *allocator = nullptr) override; + int MallocData(const mindspore::Allocator *allocator = nullptr) override; int FreeTensorListData(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc index 7da0c81eda..5858141fdb 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc @@ -22,7 +22,6 @@ #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/runtime/allocator.h" -using mindspore::lite::Allocator; using mindspore::lite::Tensor; using mindspore::schema::ReduceMode; using mindspore::schema::ReduceMode_ReduceASum; diff --git a/tests/st/cpp/common/common_test.cc b/tests/st/cpp/common/common_test.cc index 24a999a0ba..0321cc0f78 100644 --- a/tests/st/cpp/common/common_test.cc +++ b/tests/st/cpp/common/common_test.cc @@ -14,7 +14,6 @@ * limitations under the License. */ #include "common/common_test.h" -#include "include/api/context.h" #ifdef __cplusplus #if __cplusplus @@ -58,10 +57,10 @@ void Common::ReadFile(const char *file, size_t *size, char **buf) { ifs.close(); } -void Common::ContextAutoSet() { - auto device_target = GetEnv("DEVICE_TARGET"); - if (device_target.empty()) { - device_target = mindspore::kDeviceTypeAscend310; // default is 310 +std::shared_ptr Common::ContextAutoSet() { + auto device_target_str = GetEnv("DEVICE_TARGET"); + if (device_target_str.empty()) { + device_target_str = "Ascend310"; // default is 310 } auto device_id_str = GetEnv("DEVICE_ID"); @@ -69,9 +68,21 @@ void Common::ContextAutoSet() { device_id_str = "0"; // default is 0 } uint32_t device_id = std::strtoul(device_id_str.c_str(), nullptr, 10); + auto context = std::make_shared(); - mindspore::GlobalContext::SetGlobalDeviceTarget(device_target); - mindspore::GlobalContext::SetGlobalDeviceID(device_id); + if (device_target_str == "Ascend310") { + auto ascend310_info = std::make_shared(); + ascend310_info->SetDeviceID(device_id); + context->MutableDeviceInfo().emplace_back(ascend310_info); + } else if (device_target_str == "Ascend910") { + auto ascend310_info = std::make_shared(); + ascend310_info->SetDeviceID(device_id); + context->MutableDeviceInfo().emplace_back(ascend310_info); + } else { + return context; + } + + return context; } } // namespace ST diff --git a/tests/st/cpp/common/common_test.h b/tests/st/cpp/common/common_test.h index 9dc3417a04..388975c125 100644 --- a/tests/st/cpp/common/common_test.h +++ b/tests/st/cpp/common/common_test.h @@ -20,6 +20,8 @@ #include #include #include "gtest/gtest.h" +#include "include/api/context.h" + namespace ST { class Common : public testing::Test { public: @@ -56,7 +58,7 @@ class Common : public testing::Test { void ReadFile(const char *file, size_t *size, char **buf); - void ContextAutoSet(); + std::shared_ptr ContextAutoSet(); }; } // namespace ST #endif // TESTS_CXX_ST_COMMON_COMMON_TEST_H_ diff --git a/tests/st/cpp/dataset/test_de.cc b/tests/st/cpp/dataset/test_de.cc index 08068f2661..79e6c7b083 100644 --- a/tests/st/cpp/dataset/test_de.cc +++ b/tests/st/cpp/dataset/test_de.cc @@ -98,6 +98,12 @@ TEST_F(TestDE, TestDvpp) { ASSERT_TRUE(rc.IsOk()); auto image = MSTensor(std::make_shared(de_tensor)); */ + auto context = ContextAutoSet(); + ASSERT_TRUE(context != nullptr); + ASSERT_TRUE(context->MutableDeviceInfo().size() == 1); + auto ascend310_info = context->MutableDeviceInfo()[0]->Cast(); + ASSERT_TRUE(ascend310_info != nullptr); + auto device_id = ascend310_info->GetDeviceID(); auto image = ReadFileToTensor("./data/dataset/apple.jpg"); @@ -105,7 +111,7 @@ TEST_F(TestDE, TestDvpp) { std::vector crop_paras = {224, 224}; std::vector resize_paras = {256, 256}; std::shared_ptr decode_resize_crop(new vision::DvppDecodeResizeCropJpeg(crop_paras, resize_paras)); - mindspore::dataset::Execute Transform(decode_resize_crop, MapTargetDevice::kAscend310); + mindspore::dataset::Execute Transform(decode_resize_crop, MapTargetDevice::kAscend310, device_id); // Apply transform on images Status rc = Transform(image, &image); @@ -145,6 +151,13 @@ TEST_F(TestDE, TestDvpp) { TEST_F(TestDE, TestDvppSinkMode) { #ifdef ENABLE_ACL + auto context = ContextAutoSet(); + ASSERT_TRUE(context != nullptr); + ASSERT_TRUE(context->MutableDeviceInfo().size() == 1); + auto ascend310_info = context->MutableDeviceInfo()[0]->Cast(); + ASSERT_TRUE(ascend310_info != nullptr); + auto device_id = ascend310_info->GetDeviceID(); + // Read images from target directory auto image = ReadFileToTensor("./data/dataset/apple.jpg"); @@ -155,7 +168,7 @@ TEST_F(TestDE, TestDvppSinkMode) { std::shared_ptr resize(new vision::Resize(resize_paras)); std::shared_ptr centercrop(new vision::CenterCrop(crop_paras)); std::vector> trans_list = {decode, resize, centercrop}; - mindspore::dataset::Execute Transform(trans_list, MapTargetDevice::kAscend310); + mindspore::dataset::Execute Transform(trans_list, MapTargetDevice::kAscend310, device_id); // Apply transform on images Status rc = Transform(image, &image); @@ -186,6 +199,13 @@ TEST_F(TestDE, TestDvppSinkMode) { TEST_F(TestDE, TestDvppDecodeResizeCropNormalize) { #ifdef ENABLE_ACL + auto context = ContextAutoSet(); + ASSERT_TRUE(context != nullptr); + ASSERT_TRUE(context->MutableDeviceInfo().size() == 1); + auto ascend310_info = context->MutableDeviceInfo()[0]->Cast(); + ASSERT_TRUE(ascend310_info != nullptr); + auto device_id = ascend310_info->GetDeviceID(); + auto image = ReadFileToTensor("./data/dataset/apple.jpg"); // Define dvpp transform @@ -200,7 +220,7 @@ TEST_F(TestDE, TestDvppDecodeResizeCropNormalize) { std::shared_ptr normalize(new vision::Normalize(mean, std)); std::vector> trans_list = {decode, resize, centercrop, normalize}; - mindspore::dataset::Execute Transform(trans_list, MapTargetDevice::kAscend310); + mindspore::dataset::Execute Transform(trans_list, MapTargetDevice::kAscend310, device_id); std::string aipp_cfg = Transform.AippCfgGenerator(); ASSERT_EQ(aipp_cfg, "./aipp.cfg"); diff --git a/tests/st/cpp/model/test_dynamic_batch_size.cc b/tests/st/cpp/model/test_dynamic_batch_size.cc index 57462207cd..8f79fa1838 100644 --- a/tests/st/cpp/model/test_dynamic_batch_size.cc +++ b/tests/st/cpp/model/test_dynamic_batch_size.cc @@ -24,62 +24,68 @@ using namespace mindspore; static const char tensor_add_file[] = "/home/workspace/mindspore_dataset/mindir/add/add.mindir"; -static const float input_data_1[2][2] = {{1,2},{3,4}}; -static const float input_data_2[2][2] = {{2,3},{4,5}}; -static const float input_data_3[1] ={2}; +static const float input_data_1[2][2] = {{1, 2}, {3, 4}}; +static const float input_data_2[2][2] = {{2, 3}, {4, 5}}; +static const float input_data_3[1] = {2}; class TestDynamicBatchSize : public ST::Common { public: - TestDynamicBatchSize() {} + TestDynamicBatchSize() {} }; TEST_F(TestDynamicBatchSize, InferMindIR) { - mindspore::GlobalContext::SetGlobalDeviceTarget(mindspore::kDeviceTypeAscend310); - mindspore::GlobalContext::SetGlobalDeviceID(2); - std::map> input_shape; - input_shape.insert(std::make_pair(0,std::vector{-1,2})); - input_shape.insert(std::make_pair(1,std::vector{-1,2})); - auto model_context = std::make_shared(); - std::vector dynamic_batch_size ={1,2,4,8}; - ModelContext::SetDynamicBatchSize(model_context,dynamic_batch_size); - ModelContext::SetInputShapeMap(model_context,input_shape); - auto graph = Serialization::LoadModel(tensor_add_file, ModelType::kMindIR); - Model tensor_add(GraphCell(graph),model_context); - ASSERT_TRUE(tensor_add.Build() == kSuccess); +#ifdef ENABLE_ACL + auto context = ContextAutoSet(); + ASSERT_TRUE(context != nullptr); + ASSERT_TRUE(context->MutableDeviceInfo().size() == 1); + auto ascend310_info = context->MutableDeviceInfo()[0]->Cast(); + ASSERT_TRUE(ascend310_info != nullptr); + + std::map> input_shape; + input_shape.insert(std::make_pair(0, std::vector{-1, 2})); + input_shape.insert(std::make_pair(1, std::vector{-1, 2})); + std::vector dynamic_batch_size = {1, 2, 4, 8}; + ascend310_info->SetDynamicBatchSize(dynamic_batch_size); + ascend310_info->SetInputShapeMap(input_shape); + + Graph graph; + ASSERT_TRUE(Serialization::Load(tensor_add_file, ModelType::kMindIR, &graph) == kSuccess); + Model tensor_add; + ASSERT_TRUE(tensor_add.Build(GraphCell(graph), context) == kSuccess); // get model inputs std::vector origin_inputs = tensor_add.GetInputs(); - ASSERT_EQ(origin_inputs.size()-1, 2); + ASSERT_EQ(origin_inputs.size() - 1, 2); // prepare input std::vector outputs; std::vector inputs; - size_t row = sizeof(input_data_1)/sizeof(input_data_1[0]); - size_t col = sizeof(input_data_1[0])/sizeof(input_data_1[0][0]);; - inputs.emplace_back(origin_inputs[0].Name(), origin_inputs[0].DataType(), origin_inputs[0].Shape(), - input_data_1, sizeof(float) * row*col); - inputs.emplace_back(origin_inputs[1].Name(), origin_inputs[1].DataType(), origin_inputs[1].Shape(), - input_data_2, sizeof(float) * row*col); - inputs.emplace_back(origin_inputs[2].Name(), origin_inputs[2].DataType(), origin_inputs[2].Shape(), - input_data_3, sizeof(float) * 1); + size_t row = sizeof(input_data_1) / sizeof(input_data_1[0]); + size_t col = sizeof(input_data_1[0]) / sizeof(input_data_1[0][0]); + inputs.emplace_back(origin_inputs[0].Name(), origin_inputs[0].DataType(), origin_inputs[0].Shape(), input_data_1, + sizeof(float) * row * col); + inputs.emplace_back(origin_inputs[1].Name(), origin_inputs[1].DataType(), origin_inputs[1].Shape(), input_data_2, + sizeof(float) * row * col); + inputs.emplace_back(origin_inputs[2].Name(), origin_inputs[2].DataType(), origin_inputs[2].Shape(), input_data_3, + sizeof(float) * 1); // infer ASSERT_TRUE(tensor_add.Predict(inputs, &outputs) == kSuccess); // assert input inputs = tensor_add.GetInputs(); - ASSERT_EQ(inputs.size()-1, 2); + ASSERT_EQ(inputs.size() - 1, 2); auto after_input_data_1 = inputs[0].Data(); auto after_input_data_2 = inputs[1].Data(); const float *p = reinterpret_cast(after_input_data_1.get()); - float input_data1[inputs[0].DataSize() / sizeof(float)] ={0}; - float input_data2[inputs[1].DataSize() / sizeof(float)] ={0}; - size_t k=0,t=0; - for(size_t i=0;i origin_inputs = tensor_add.GetInputs(); diff --git a/tests/st/cpp/model/test_zero_copy.cc b/tests/st/cpp/model/test_zero_copy.cc index 5b13bd7d63..ff4d82ab1e 100644 --- a/tests/st/cpp/model/test_zero_copy.cc +++ b/tests/st/cpp/model/test_zero_copy.cc @@ -51,46 +51,49 @@ std::vector GetAllFiles(std::string_view dir_name); TEST_F(TestZeroCopy, TestMindIR) { #ifdef ENABLE_ACL - // Set context - mindspore::GlobalContext::SetGlobalDeviceTarget(mindspore::kDeviceTypeAscend310); - mindspore::GlobalContext::SetGlobalDeviceID(0); - auto model_context = std::make_shared(); - ModelContext::SetInsertOpConfigPath(model_context,aipp_path); - // Define model - auto graph = mindspore::Serialization::LoadModel(resnet_file, mindspore::ModelType::kMindIR); - mindspore::Model resnet50(mindspore::GraphCell(graph),model_context); - // Build model - ASSERT_TRUE(resnet50.Build() == kSuccess); - // Get model info - std::vector model_inputs =resnet50.GetInputs(); - ASSERT_EQ(model_inputs.size(), 1); - // Define transform operations - std::shared_ptr decode(new vision::Decode()); - std::shared_ptr resize(new vision::Resize({256})); - std::shared_ptr center_crop(new vision::CenterCrop({224,224})); - mindspore::dataset::Execute Transform({decode,resize,center_crop},MapTargetDevice::kAscend310); - size_t count=0; - // Read images - std::vector images =GetAllFiles(image_path); - for(const auto &image_file:images){ - // prepare input - std::vector inputs; - std::vector outputs; - std::shared_ptr de_tensor; - mindspore::dataset::Tensor::CreateFromFile(image_file, &de_tensor); - auto image = mindspore::MSTensor(std::make_shared(de_tensor)); - // Apply transform on images - Status rc = Transform(image, &image); - ASSERT_TRUE(rc == kSuccess); - inputs.push_back(image); - // infer - ASSERT_TRUE(resnet50.Predict(inputs, &outputs)==kSuccess); - if(GetMax(outputs[0])==0){ - ++count; - } - Transform.DeviceMemoryRelease(); + // Set context + auto context = ContextAutoSet(); + ASSERT_TRUE(context != nullptr); + ASSERT_TRUE(context->MutableDeviceInfo().size() == 1); + auto ascend310_info = context->MutableDeviceInfo()[0]->Cast(); + ASSERT_TRUE(ascend310_info != nullptr); + ascend310_info->SetInsertOpConfigPath(aipp_path); + auto device_id = ascend310_info->GetDeviceID(); + // Define model + Graph graph; + ASSERT_TRUE(Serialization::Load(resnet_file, ModelType::kMindIR, &graph) == kSuccess); + Model resnet50; + ASSERT_TRUE(resnet50.Build(GraphCell(graph), context) == kSuccess); + // Get model info + std::vector model_inputs = resnet50.GetInputs(); + ASSERT_EQ(model_inputs.size(), 1); + // Define transform operations + std::shared_ptr decode(new vision::Decode()); + std::shared_ptr resize(new vision::Resize({256})); + std::shared_ptr center_crop(new vision::CenterCrop({224, 224})); + mindspore::dataset::Execute Transform({decode, resize, center_crop}, MapTargetDevice::kAscend310, device_id); + size_t count = 0; + // Read images + std::vector images = GetAllFiles(image_path); + for (const auto &image_file : images) { + // prepare input + std::vector inputs; + std::vector outputs; + std::shared_ptr de_tensor; + mindspore::dataset::Tensor::CreateFromFile(image_file, &de_tensor); + auto image = mindspore::MSTensor(std::make_shared(de_tensor)); + // Apply transform on images + Status rc = Transform(image, &image); + ASSERT_TRUE(rc == kSuccess); + inputs.push_back(image); + // infer + ASSERT_TRUE(resnet50.Predict(inputs, &outputs) == kSuccess); + if (GetMax(outputs[0]) == 0) { + ++count; } - ASSERT_GE(static_cast(count)/images.size()*100.0, 20.0); + Transform.DeviceMemoryRelease(); + } + ASSERT_GE(static_cast(count) / images.size() * 100.0, 20.0); #endif } @@ -149,8 +152,7 @@ std::vector GetAllFiles(std::string_view dir_name) { while ((filename = readdir(dir)) != nullptr) { std::string d_name = std::string(filename->d_name); // get rid of "." and ".." - if (d_name == "." || d_name == ".." || filename->d_type != DT_REG) - continue; + if (d_name == "." || d_name == ".." || filename->d_type != DT_REG) continue; res.emplace_back(std::string(dir_name) + "/" + filename->d_name); } diff --git a/tests/ut/cpp/cxx_api/context_test.cc b/tests/ut/cpp/cxx_api/context_test.cc index 204a05dbd6..ea78c7280c 100644 --- a/tests/ut/cpp/cxx_api/context_test.cc +++ b/tests/ut/cpp/cxx_api/context_test.cc @@ -23,45 +23,102 @@ class TestCxxApiContext : public UT::Common { TestCxxApiContext() = default; }; -TEST_F(TestCxxApiContext, test_context_global_context_SUCCESS) { - std::string device_target = "2333"; - uint32_t device_id = 2333; - GlobalContext::SetGlobalDeviceTarget(device_target); - ASSERT_EQ(GlobalContext::GetGlobalDeviceTarget(), device_target); - GlobalContext::SetGlobalDeviceID(device_id); - ASSERT_EQ(GlobalContext::GetGlobalDeviceID(), device_id); +TEST_F(TestCxxApiContext, test_context_device_info_cast_SUCCESS) { + std::shared_ptr cpu = std::make_shared(); + std::shared_ptr mali_gpu = std::make_shared(); + std::shared_ptr kirin_npu = std::make_shared(); + std::shared_ptr nvidia_gpu = std::make_shared(); + std::shared_ptr ascend310 = std::make_shared(); + std::shared_ptr ascend910 = std::make_shared(); + + ASSERT_TRUE(cpu->Cast() != nullptr); + ASSERT_TRUE(mali_gpu->Cast() != nullptr); + ASSERT_TRUE(kirin_npu->Cast() != nullptr); + ASSERT_TRUE(nvidia_gpu->Cast() != nullptr); + ASSERT_TRUE(ascend310->Cast() != nullptr); + ASSERT_TRUE(ascend910->Cast() != nullptr); } -TEST_F(TestCxxApiContext, test_context_ascend310_context_SUCCESS) { +TEST_F(TestCxxApiContext, test_context_device_info_cast_FAILED) { + std::shared_ptr cpu = std::make_shared(); + std::shared_ptr mali_gpu = std::make_shared(); + std::shared_ptr kirin_npu = std::make_shared(); + std::shared_ptr nvidia_gpu = std::make_shared(); + std::shared_ptr ascend310 = std::make_shared(); + std::shared_ptr ascend910 = std::make_shared(); + + ASSERT_TRUE(cpu->Cast() == nullptr); + ASSERT_TRUE(kirin_npu->Cast() == nullptr); + ASSERT_TRUE(nvidia_gpu->Cast() == nullptr); + ASSERT_TRUE(ascend310->Cast() == nullptr); + ASSERT_TRUE(ascend910->Cast() == nullptr); + + ASSERT_TRUE(mali_gpu->Cast() == nullptr); + ASSERT_TRUE(kirin_npu->Cast() == nullptr); + ASSERT_TRUE(nvidia_gpu->Cast() == nullptr); + ASSERT_TRUE(ascend310->Cast() == nullptr); + ASSERT_TRUE(ascend910->Cast() == nullptr); +} + +TEST_F(TestCxxApiContext, test_context_get_set_SUCCESS) { + int32_t thread_num = 22; + auto context = std::make_shared(); + context->SetThreadNum(thread_num); + ASSERT_EQ(context->GetThreadNum(), thread_num); +} + +TEST_F(TestCxxApiContext, test_context_cpu_context_SUCCESS) { + auto context = std::make_shared(); + std::shared_ptr cpu = std::make_shared(); + cpu->SetEnableFP16(true); + context->MutableDeviceInfo().push_back(cpu); + ASSERT_EQ(context->MutableDeviceInfo().size(), 1); + auto cpu_2 = context->MutableDeviceInfo()[0]->Cast(); + ASSERT_TRUE(cpu_2 != nullptr); + ASSERT_TRUE(cpu_2->GetEnableFP16()); +} + +TEST_F(TestCxxApiContext, test_context_ascend_context_FAILED) { std::string option_1 = "aaa"; std::string option_2 = "vvv"; std::string option_3 = "www"; - auto option_4 = DataType::kNumberTypeEnd; - std::string option_5 = "rrr"; - std::string option_6 = "ppp"; - auto ctx = std::make_shared(); - ModelContext::SetInsertOpConfigPath(ctx, option_1); - ModelContext::SetInputFormat(ctx, option_2); - ModelContext::SetInputShape(ctx, option_3); - ModelContext::SetOutputType(ctx, option_4); - ModelContext::SetPrecisionMode(ctx, option_5); - ModelContext::SetOpSelectImplMode(ctx, option_6); + std::string option_4 = "rrr"; + std::string option_5 = "ppp"; + std::string option_6 = "sss"; + uint32_t option_7 = 77; + enum DataType option_8 = DataType::kNumberTypeInt16; + std::vector option_9 = {1, 2, 3, 4, 5}; + std::string option_9_ans = "1,2,3,4,5"; - ASSERT_EQ(ModelContext::GetInsertOpConfigPath(ctx), option_1); - ASSERT_EQ(ModelContext::GetInputFormat(ctx), option_2); - ASSERT_EQ(ModelContext::GetInputShape(ctx), option_3); - ASSERT_EQ(ModelContext::GetOutputType(ctx), option_4); - ASSERT_EQ(ModelContext::GetPrecisionMode(ctx), option_5); - ASSERT_EQ(ModelContext::GetOpSelectImplMode(ctx), option_6); -} + auto context = std::make_shared(); + std::shared_ptr ascend310 = std::make_shared(); + ascend310->SetInputShape(option_1); + ascend310->SetInsertOpConfigPath(option_2); + ascend310->SetOpSelectImplMode(option_3); + ascend310->SetPrecisionMode(option_4); + ascend310->SetInputFormat(option_5); + ascend310->SetFusionSwitchConfigPath(option_6); + ascend310->SetDeviceID(option_7); + ascend310->SetOutputType(option_8); + ascend310->SetDynamicBatchSize(option_9); -TEST_F(TestCxxApiContext, test_context_ascend310_context_nullptr_FAILED) { - auto ctx = std::make_shared(); - EXPECT_ANY_THROW(ModelContext::GetInsertOpConfigPath(nullptr)); + context->MutableDeviceInfo().push_back(ascend310); + ASSERT_EQ(context->MutableDeviceInfo().size(), 1); + auto ctx = context->MutableDeviceInfo()[0]->Cast(); + ASSERT_TRUE(ctx != nullptr); + ASSERT_EQ(ascend310->GetInputShape(), option_1); + ASSERT_EQ(ascend310->GetInsertOpConfigPath(), option_2); + ASSERT_EQ(ascend310->GetOpSelectImplMode(), option_3); + ASSERT_EQ(ascend310->GetPrecisionMode(), option_4); + ASSERT_EQ(ascend310->GetInputFormat(), option_5); + ASSERT_EQ(ascend310->GetFusionSwitchConfigPath(), option_6); + ASSERT_EQ(ascend310->GetDeviceID(), option_7); + ASSERT_EQ(ascend310->GetOutputType(), option_8); + ASSERT_EQ(ascend310->GetDynamicBatchSize(), option_9_ans); } TEST_F(TestCxxApiContext, test_context_ascend310_context_default_value_SUCCESS) { - auto ctx = std::make_shared(); - ASSERT_EQ(ModelContext::GetOpSelectImplMode(ctx), ""); + auto ctx = std::make_shared(); + ASSERT_EQ(ctx->GetOpSelectImplMode(), ""); } } // namespace mindspore diff --git a/tests/ut/cpp/cxx_api/types_test.cc b/tests/ut/cpp/cxx_api/types_test.cc index d6c8a7d911..e6ec0c0201 100644 --- a/tests/ut/cpp/cxx_api/types_test.cc +++ b/tests/ut/cpp/cxx_api/types_test.cc @@ -76,12 +76,13 @@ TEST_F(TestCxxApiTypes, test_tensor_ref_SUCCESS) { TEST_F(TestCxxApiTypes, test_tensor_clone_SUCCESS) { std::vector data = {1, 2, 3, 4}; MSTensor tensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); - MSTensor tensor2 = tensor.Clone(); - auto value = tensor2.Data(); + MSTensor *tensor2 = tensor.Clone(); + auto value = tensor2->Data(); int32_t *p = (int32_t *)value.get(); for (size_t i = 0; i < data.size(); ++i) { ASSERT_EQ(p[i], data[i]); } + MSTensor::DestroyTensorPtr(tensor2); } TEST_F(TestCxxApiTypes, test_tensor_ref_modified_SUCCESS) { @@ -101,37 +102,76 @@ TEST_F(TestCxxApiTypes, test_tensor_clone_modified_SUCCESS) { std::vector data = {1, 2, 3, 4}; std::vector data_modified = {2, 3, 4, 5}; MSTensor tensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); - MSTensor tensor2 = tensor.Clone(); + MSTensor *tensor2 = tensor.Clone(); + ASSERT_TRUE(tensor2 != nullptr); (void)memcpy(tensor.MutableData(), data_modified.data(), data_modified.size() * sizeof(int32_t)); - auto value = tensor2.Data(); + auto value = tensor2->Data(); int32_t *p = (int32_t *)value.get(); for (size_t i = 0; i < data.size(); ++i) { ASSERT_EQ(p[i], data[i]); } + MSTensor::DestroyTensorPtr(tensor2); } TEST_F(TestCxxApiTypes, test_tensor_ref_creator_function_SUCCESS) { std::vector data = {1, 2, 3, 4}; - MSTensor tensor = + MSTensor *tensor = MSTensor::CreateRefTensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); + ASSERT_TRUE(tensor != nullptr); data = {3, 4, 5, 6}; - auto value = tensor.Data(); + auto value = tensor->Data(); int32_t *p = (int32_t *)value.get(); for (size_t i = 0; i < data.size(); ++i) { ASSERT_EQ(p[i], data[i]); } + MSTensor::DestroyTensorPtr(tensor); } TEST_F(TestCxxApiTypes, test_tensor_creator_function_SUCCESS) { std::vector data = {1, 2, 3, 4}; - MSTensor tensor = + MSTensor *tensor = MSTensor::CreateTensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); + ASSERT_TRUE(tensor != nullptr); data = {3, 4, 5, 6}; - auto value = tensor.Data(); + auto value = tensor->Data(); int32_t *p = (int32_t *)value.get(); for (size_t i = 0; i < data.size(); ++i) { ASSERT_NE(p[i], data[i]); } + MSTensor::DestroyTensorPtr(tensor); +} + +TEST_F(TestCxxApiTypes, test_tensor_string_tensor_SUCCESS) { + std::string tensor_name = "tensor_name"; + std::vector origin_strs; + origin_strs.emplace_back("qwe"); + origin_strs.emplace_back("asd"); + origin_strs.emplace_back(""); + origin_strs.emplace_back("zxc"); + auto tensor = MSTensor::StringsToTensor(tensor_name, origin_strs); + ASSERT_TRUE(tensor != nullptr); + ASSERT_EQ(tensor->Name(), tensor_name); + auto new_strs = MSTensor::TensorToStrings(*tensor); + ASSERT_EQ(new_strs.size(), origin_strs.size()); + for (size_t i = 0; i < new_strs.size(); ++i) { + ASSERT_EQ(new_strs[i], origin_strs[i]); + } +} + +TEST_F(TestCxxApiTypes, test_tensor_empty_string_tensor_SUCCESS) { + std::string tensor_name = "tensor_name"; + std::vector origin_strs; + auto tensor = MSTensor::StringsToTensor(tensor_name, origin_strs); + ASSERT_TRUE(tensor != nullptr); + ASSERT_EQ(tensor->Name(), tensor_name); + auto new_strs = MSTensor::TensorToStrings(*tensor); + ASSERT_EQ(new_strs.size(), origin_strs.size()); +} + +TEST_F(TestCxxApiTypes, test_tensor_string_tensor_invalid_type_FAILED) { + MSTensor tensor("", DataType::kNumberTypeInt32, {1}, nullptr, sizeof(int32_t)); + auto new_strs = MSTensor::TensorToStrings(tensor); + ASSERT_TRUE(new_strs.empty()); } TEST_F(TestCxxApiTypes, test_buffer_data_ref_and_copy_SUCCESS) {