diff --git a/cmake/package.cmake b/cmake/package.cmake index b77068157ca..87b30bffa86 100644 --- a/cmake/package.cmake +++ b/cmake/package.cmake @@ -65,7 +65,7 @@ install( install( TARGETS mindspore_shared_lib - LIBRARY DESTINATION ${INSTALL_LIB_DIR} + DESTINATION ${INSTALL_LIB_DIR} COMPONENT mindspore ) @@ -327,7 +327,7 @@ install( ${CMAKE_SOURCE_DIR}/mindspore/ccsrc/minddata/dataset/include/transforms.h ${CMAKE_SOURCE_DIR}/mindspore/ccsrc/minddata/dataset/include/vision.h ${CMAKE_SOURCE_DIR}/mindspore/ccsrc/minddata/dataset/include/vision_lite.h - ${CMAKE_SOURCE_DIR}/mindspore/ccsrc/minddata/dataset/include/minddata_eager.h + ${CMAKE_SOURCE_DIR}/mindspore/ccsrc/minddata/dataset/include/execute.h DESTINATION ${INSTALL_BASE_DIR}/include/minddata/dataset/include COMPONENT mindspore ) diff --git a/cmake/package_lite.cmake b/cmake/package_lite.cmake index 4dc3e099019..c7a0aaf18df 100644 --- a/cmake/package_lite.cmake +++ b/cmake/package_lite.cmake @@ -109,6 +109,8 @@ if(PLATFORM_ARM64) COMPONENT ${RUNTIME_COMPONENT_NAME}) install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype COMPONENT ${RUNTIME_COMPONENT_NAME}) + install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api + COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ascend* ops*" EXCLUDE) if(ENABLE_TOOLS) install(TARGETS benchmark RUNTIME DESTINATION ${RUNTIME_PKG_NAME}/benchmark COMPONENT ${RUNTIME_COMPONENT_NAME}) endif() @@ -128,6 +130,8 @@ elseif(PLATFORM_ARM32) COMPONENT ${RUNTIME_COMPONENT_NAME}) install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype COMPONENT ${RUNTIME_COMPONENT_NAME}) + install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api + COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ascend*" EXCLUDE) if(ENABLE_TOOLS) install(TARGETS benchmark RUNTIME DESTINATION ${RUNTIME_PKG_NAME}/benchmark COMPONENT ${RUNTIME_COMPONENT_NAME}) endif() @@ -162,6 +166,8 @@ elseif(WIN32) endif() install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype COMPONENT ${RUNTIME_COMPONENT_NAME}) + install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api + COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ascend*" EXCLUDE) set(WIN_LIB_DIR_RUN_X86 ${RUNTIME_PKG_NAME}/benchmark) install(FILES ${TOP_DIR}/build/mindspore/src/libmindspore-lite.a DESTINATION ${WIN_LIB_DIR_RUN_X86} COMPONENT ${RUNTIME_COMPONENT_NAME}) @@ -182,6 +188,8 @@ else() endif() install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype COMPONENT ${RUNTIME_COMPONENT_NAME}) + install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api + COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ascend*" EXCLUDE) install(FILES ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so DESTINATION ${RUNTIME_LIB_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME}) install(FILES ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.a DESTINATION ${RUNTIME_LIB_DIR} diff --git a/include/api/cell.h b/include/api/cell.h index 096bb8b1a9c..3039fa816bb 100644 --- a/include/api/cell.h +++ b/include/api/cell.h @@ -24,7 +24,6 @@ #include "include/api/graph.h" namespace mindspore { -namespace api { class InputAndOutput; using Input = InputAndOutput; using Output = InputAndOutput; @@ -35,7 +34,7 @@ class MS_API CellBase { virtual ~CellBase() = default; virtual std::vector Construct(const std::vector &inputs) { return {}; } virtual std::shared_ptr Clone() const = 0; - virtual Status Run(const std::vector &inputs, std::vector *outputs) { return SUCCESS; } + virtual Status Run(const std::vector &inputs, std::vector *outputs) { return kSuccess; } std::vector operator()(const std::vector &inputs) const; }; @@ -57,16 +56,16 @@ class MS_API ParameterCell final : public Cell { ParameterCell(ParameterCell &&); ParameterCell &operator=(ParameterCell &&); - explicit ParameterCell(const Tensor &); - ParameterCell &operator=(const Tensor &); + explicit ParameterCell(const MSTensor &); + ParameterCell &operator=(const MSTensor &); - explicit ParameterCell(Tensor &&); - ParameterCell &operator=(Tensor &&); + explicit ParameterCell(MSTensor &&); + ParameterCell &operator=(MSTensor &&); - Tensor GetTensor() const { return tensor_; } + MSTensor GetTensor() const { return tensor_; } private: - Tensor tensor_; + MSTensor tensor_; }; class MS_API OpCellBase : public CellBase { @@ -99,11 +98,9 @@ class MS_API GraphCell final : public Cell { explicit GraphCell(const std::shared_ptr &); const std::shared_ptr &GetGraph() const { return graph_; } - Status Run(const std::vector &inputs, std::vector *outputs) override; - Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const; - Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const; + Status Run(const std::vector &inputs, std::vector *outputs) override; + std::vector GetInputs(); + std::vector GetOutputs(); private: friend class ModelImpl; @@ -119,8 +116,8 @@ class MS_API InputAndOutput { ~InputAndOutput() = default; // no explicit - InputAndOutput(const Tensor &); // NOLINT(runtime/explicit) - InputAndOutput(Tensor &&); // NOLINT(runtime/explicit) + InputAndOutput(const MSTensor &); // NOLINT(runtime/explicit) + InputAndOutput(MSTensor &&); // NOLINT(runtime/explicit) InputAndOutput(const std::shared_ptr &, const std::vector &, int32_t index); @@ -132,6 +129,5 @@ class MS_API InputAndOutput { std::vector prev_; int32_t index_; }; -} // namespace api } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_CELL_H diff --git a/include/api/context.h b/include/api/context.h index 31552c95f41..0aea49dd995 100644 --- a/include/api/context.h +++ b/include/api/context.h @@ -16,26 +16,49 @@ #ifndef MINDSPORE_INCLUDE_API_CONTEXT_H #define MINDSPORE_INCLUDE_API_CONTEXT_H +#include +#include #include #include #include "include/api/types.h" namespace mindspore { -namespace api { -class MS_API Context { - public: - static Context &Instance(); - const std::string &GetDeviceTarget() const; - Context &SetDeviceTarget(const std::string &device_target); - uint32_t GetDeviceID() const; - Context &SetDeviceID(uint32_t device_id); +constexpr auto kDeviceTypeAscend310 = "Ascend310"; +constexpr auto kDeviceTypeAscend910 = "Ascend910"; - private: - Context(); - ~Context(); - class ContextImpl; - std::shared_ptr impl_; +struct MS_API Context { + virtual ~Context() = default; + std::map params; +}; + +struct MS_API GlobalContext : public Context { + static std::shared_ptr GetGlobalContext(); + + static void SetGlobalDeviceTarget(const std::string &device_target); + static std::string GetGlobalDeviceTarget(); + + static void SetGlobalDeviceID(const uint32_t &device_id); + static uint32_t GetGlobalDeviceID(); +}; + +struct MS_API ModelContext : public Context { + static void SetInsertOpConfigPath(const std::shared_ptr &context, const std::string &cfg_path); + static std::string GetInsertOpConfigPath(const std::shared_ptr &context); + + static void SetInputFormat(const std::shared_ptr &context, const std::string &format); + static std::string GetInputFormat(const std::shared_ptr &context); + + static void SetInputShape(const std::shared_ptr &context, const std::string &shape); + static std::string GetInputShape(const std::shared_ptr &context); + + static void SetOutputType(const std::shared_ptr &context, enum DataType output_type); + static enum DataType GetOutputType(const std::shared_ptr &context); + + static void SetPrecisionMode(const std::shared_ptr &context, const std::string &precision_mode); + static std::string GetPrecisionMode(const std::shared_ptr &context); + + static void SetOpSelectImplMode(const std::shared_ptr &context, const std::string &op_select_impl_mode); + static std::string GetOpSelectImplMode(const std::shared_ptr &context); }; -} // namespace api } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_CONTEXT_H diff --git a/include/api/data_type.h b/include/api/data_type.h new file mode 100644 index 00000000000..a39488a83d3 --- /dev/null +++ b/include/api/data_type.h @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_DATA_TYPE_H_ +#define MINDSPORE_INCLUDE_API_DATA_TYPE_H_ + +namespace mindspore { +enum class DataType : int { + kTypeUnknown = 0, + kObjectTypeString = 12, + kObjectTypeList = 13, + kObjectTypeTuple = 14, + kObjectTypeTensorType = 17, + kNumberTypeBool = 30, + kNumberTypeInt8 = 32, + kNumberTypeInt16 = 33, + kNumberTypeInt32 = 34, + kNumberTypeInt64 = 35, + kNumberTypeUInt8 = 37, + kNumberTypeUInt16 = 38, + kNumberTypeUInt32 = 39, + kNumberTypeUInt64 = 40, + kNumberTypeFloat16 = 42, + kNumberTypeFloat32 = 43, + kNumberTypeFloat64 = 44, + kNumberTypeEnd = 46, + // add new enum here + kInvalidType = INT32_MAX, +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_DATA_TYPE_H_ diff --git a/include/api/graph.h b/include/api/graph.h index 9373f573e6b..a9288eb5a15 100644 --- a/include/api/graph.h +++ b/include/api/graph.h @@ -16,6 +16,7 @@ #ifndef MINDSPORE_INCLUDE_API_GRAPH_H #define MINDSPORE_INCLUDE_API_GRAPH_H +#include #include #include #include @@ -24,21 +25,21 @@ #include "include/api/types.h" namespace mindspore { -namespace api { class MS_API Graph { public: class GraphData; explicit Graph(const std::shared_ptr &graph_data); explicit Graph(std::shared_ptr &&graph_data); + explicit Graph(std::nullptr_t); ~Graph(); enum ModelType ModelType() const; + bool operator==(std::nullptr_t) const; private: friend class GraphCell; friend class ModelImpl; std::shared_ptr graph_data_; }; -} // namespace api } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_GRAPH_H diff --git a/include/api/lite_context.h b/include/api/lite_context.h new file mode 100644 index 00000000000..933c6521e0f --- /dev/null +++ b/include/api/lite_context.h @@ -0,0 +1,77 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_LITE_CONTEXT_H +#define MINDSPORE_INCLUDE_API_LITE_CONTEXT_H + +#include +#include +#include +#include +#include "include/api/types.h" + +namespace mindspore { +namespace lite { +/// \brief CpuBindMode defined for holding bind cpu strategy argument. +typedef enum : uint32_t { + NO_BIND = 0, /**< no bind */ + HIGHER_CPU = 1, /**< bind higher cpu first */ + MID_CPU = 2 /**< bind middle cpu first */ +} CpuBindMode; + +class Allocator; +} // namespace lite + +struct MS_API Context { + public: + static void Clear(const std::shared_ptr &contxet); + + static void SetAsDefault(const std::shared_ptr &contxet); + + static void SetVendorName(const std::shared_ptr &contxet, const std::string &name); + static std::string GetVendorName(const std::shared_ptr &contxet); + + static void SetThreadNum(const std::shared_ptr &contxet, int num); + static int GetThreadNum(const std::shared_ptr &contxet); + + static void SetAllocator(const std::shared_ptr &contxet, std::shared_ptr alloc); + static std::shared_ptr GetAllocator(const std::shared_ptr &contxet); + + static void ConfigCPU(const std::shared_ptr &contxet, bool config); + static bool IfCPUEnabled(const std::shared_ptr &contxet); + + static void ConfigCPUFp16(const std::shared_ptr &contxet, bool config); + static bool IfCPUFp16Enabled(const std::shared_ptr &contxet); + + static void SetCPUBindMode(const std::shared_ptr &contxet, lite::CpuBindMode mode); + static lite::CpuBindMode GetCPUBindMode(const std::shared_ptr &contxet); + + static void ConfigGPU(const std::shared_ptr &contxet, bool config); + static bool IfGPUEnabled(const std::shared_ptr &contxet); + + static void ConfigGPUFp16(const std::shared_ptr &contxet, bool config); + static bool IfGPUFp16Enabled(const std::shared_ptr &contxet); + + static void ConfigNPU(const std::shared_ptr &contxet, bool config); + static bool IfNPUEnabled(const std::shared_ptr &contxet); + + static void SetNPUFrequency(const std::shared_ptr &contxet, int freq); + static int GetNPUFrequency(const std::shared_ptr &contxet); + + private: + std::map context_; +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_LITE_CONTEXT_H diff --git a/include/api/model.h b/include/api/model.h index efd06aedc57..8d401085eb7 100644 --- a/include/api/model.h +++ b/include/api/model.h @@ -20,41 +20,36 @@ #include #include #include +#include #include "include/api/status.h" #include "include/api/types.h" #include "include/api/graph.h" #include "include/api/cell.h" namespace mindspore { -namespace api { class ModelImpl; -// todo: minddata c++ interface -class DataSet {}; +struct Context; class MS_API Model { public: - explicit Model(const std::vector &network); - explicit Model(const GraphCell &graph); + explicit Model(const std::vector &network, const std::shared_ptr &model_context = nullptr); + explicit Model(const GraphCell &graph, const std::shared_ptr &model_context = nullptr); ~Model(); Model(const Model &) = delete; void operator=(const Model &) = delete; - Status Build(const std::map &options); + Status Build(); + Status Resize(const std::vector &inputs, const std::vector> &dims); - Status Train(const DataSet &dataset, bool data_sink, std::map *outputs); - Status Eval(const DataSet &dataset, bool data_sink, std::map *outputs); - Status Predict(const std::vector &inputs, std::vector *outputs); + Status Predict(const std::vector &inputs, std::vector *outputs); - Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const; - Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const; + std::vector GetInputs(); + std::vector GetOutputs(); static bool CheckModelSupport(const std::string &device_type, ModelType model_type); private: std::shared_ptr impl_; }; -} // namespace api } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_MODEL_H diff --git a/include/api/ops/ops.h b/include/api/ops/ops.h index 0715bac8988..5e56c17377d 100644 --- a/include/api/ops/ops.h +++ b/include/api/ops/ops.h @@ -25,7 +25,6 @@ #include "include/api/cell.h" namespace mindspore { -namespace api { struct MS_API Conv2D : public OpCell { Conv2D() : OpCell("Conv2D") {} ~Conv2D() override = default; @@ -45,6 +44,5 @@ struct MS_API Conv2D : public OpCell { std::vector dilation = {1, 1, 1, 1}; int group = 1; }; -} // namespace api } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_OPS_OPS_H diff --git a/include/api/serialization.h b/include/api/serialization.h index 9750337d0d1..2c34b826d35 100644 --- a/include/api/serialization.h +++ b/include/api/serialization.h @@ -26,15 +26,14 @@ #include "include/api/graph.h" namespace mindspore { -namespace api { class MS_API Serialization { public: + static Graph LoadModel(const void *model_data, size_t data_size, ModelType model_type); static Graph LoadModel(const std::string &file, ModelType model_type); static Status LoadCheckPoint(const std::string &ckpt_file, std::map *parameters); static Status SetParameters(const std::map ¶meters, Model *model); static Status ExportModel(const Model &model, ModelType model_type, Buffer *model_data); static Status ExportModel(const Model &model, ModelType model_type, const std::string &model_file); }; -} // namespace api } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_SERIALIZATION_H diff --git a/include/api/status.h b/include/api/status.h index c8284fbaa5b..131a15372cd 100644 --- a/include/api/status.h +++ b/include/api/status.h @@ -17,37 +17,129 @@ #define MINDSPORE_INCLUDE_API_STATUS_H #include +#include +#include namespace mindspore { -namespace api { -enum StatusCode { - SUCCESS = 0, - FAILED, - INVALID_INPUTS, - // insert new status code here - UNKNOWN = 0xFFFFFFFF +enum CompCode : uint32_t { + kCore = 0x00000000u, + kMD = 0x10000000u, + kME = 0x20000000u, + kMC = 0x30000000u, + kLite = 0xF0000000u, +}; + +enum StatusCode : uint32_t { + kSuccess = 0, + // Core + kCoreFailed = kCore | 0x1, + + // MD + kMDOutOfMemory = kMD | 1, + kMDShapeMisMatch = kMD | 2, + kMDInterrupted = kMD | 3, + kMDNoSpace = kMD | 4, + kMDPyFuncException = kMD | 5, + kMDDuplicateKey = kMD | 6, + kMDPythonInterpreterFailure = kMD | 7, + kMDTDTPushFailure = kMD | 8, + kMDFileNotExist = kMD | 9, + kMDProfilingError = kMD | 10, + kMDBoundingBoxOutOfBounds = kMD | 11, + kMDBoundingBoxInvalidShape = kMD | 12, + kMDSyntaxError = kMD | 13, + kMDTimeOut = kMD | 14, + kMDBuddySpaceFull = kMD | 15, + kMDNetWorkError = kMD | 16, + kMDNotImplementedYet = kMD | 17, + // Make this error code the last one. Add new error code above it. + kMDUnexpectedError = kMD | 127, + + // ME + kMEFailed = kME | 0x1, + kMEInvalidInput = kME | 0x2, + + // MC + kMCFailed = kMC | 0x1, + kMCDeviceError = kMC | 0x2, + kMCInvalidInput = kMC | 0x3, + kMCInvalidArgs = kMC | 0x4, + + // Lite // Common error code, range: [-1, -100) + kLiteError = kLite | (0x0FFFFFFF & -1), /**< Common error code. */ + kLiteNullptr = kLite | (0x0FFFFFFF & -2), /**< NULL pointer returned.*/ + kLiteParamInvalid = kLite | (0x0FFFFFFF & -3), /**< Invalid parameter.*/ + kLiteNoChange = kLite | (0x0FFFFFFF & -4), /**< No change. */ + kLiteSuccessExit = kLite | (0x0FFFFFFF & -5), /**< No error but exit. */ + kLiteMemoryFailed = kLite | (0x0FFFFFFF & -6), /**< Fail to create memory. */ + kLiteNotSupport = kLite | (0x0FFFFFFF & -7), /**< Fail to support. */ + kLiteThreadPoolError = kLite | (0x0FFFFFFF & -8), /**< Error occur in thread pool. */ + + // Executor error code, range: [-100,-200) + kLiteOutOfTensorRange = kLite | (0x0FFFFFFF & -100), /**< Failed to check range. */ + kLiteInputTensorError = kLite | (0x0FFFFFFF & -101), /**< Failed to check input tensor. */ + kLiteReentrantError = kLite | (0x0FFFFFFF & -102), /**< Exist executor running. */ + + // Graph error code, range: [-200,-300) + kLiteGraphFileError = kLite | (0x0FFFFFFF & -200), /**< Failed to verify graph file. */ + + // Node error code, range: [-300,-400) + kLiteNotFindOp = kLite | (0x0FFFFFFF & -300), /**< Failed to find operator. */ + kLiteInvalidOpName = kLite | (0x0FFFFFFF & -301), /**< Invalid operator name. */ + kLiteInvalidOpAttr = kLite | (0x0FFFFFFF & -302), /**< Invalid operator attr. */ + kLiteOpExecuteFailure = kLite | (0x0FFFFFFF & -303), /**< Failed to execution operator. */ + + // Tensor error code, range: [-400,-500) + kLiteFormatError = kLite | (0x0FFFFFFF & -400), /**< Failed to checking tensor format. */ + + // InferShape error code, range: [-500,-600) + kLiteInferError = kLite | (0x0FFFFFFF & -500), /**< Failed to infer shape. */ + kLiteInferInvalid = kLite | (0x0FFFFFFF & -501), /**< Invalid infer shape before runtime. */ + + // User input param error code, range: [-600, 700) + kLiteInputParamInvalid = kLite | (0x0FFFFFFF & -600), /**< Invalid input param by user. */ }; class Status { public: - Status() : status_code_(FAILED) {} - Status(enum StatusCode status_code, const std::string &status_msg = "") // NOLINT(runtime/explicit) - : status_code_(status_code), status_msg_(status_msg) {} + Status() : status_code_(kSuccess), line_of_code_(-1) {} + Status(enum StatusCode status_code, const std::string &status_msg = "") // NOLINT(runtime/explicit) + : status_code_(status_code), status_msg_(status_msg), line_of_code_(-1) {} + Status(const StatusCode code, int line_of_code, const char *file_name, const std::string &extra = ""); + ~Status() = default; - bool IsSuccess() const { return status_code_ == SUCCESS; } enum StatusCode StatusCode() const { return status_code_; } - std::string StatusMessage() const { return status_msg_; } + const std::string &ToString() const { return status_msg_; } + + int GetLineOfCode() const { return line_of_code_; } + const std::string &GetErrDescription() const { return status_msg_; } + const std::string &SetErrDescription(const std::string &err_description); + + friend std::ostream &operator<<(std::ostream &os, const Status &s); + bool operator==(const Status &other) const { return status_code_ == other.status_code_; } bool operator==(enum StatusCode other_code) const { return status_code_ == other_code; } bool operator!=(const Status &other) const { return status_code_ != other.status_code_; } bool operator!=(enum StatusCode other_code) const { return status_code_ != other_code; } - operator bool() const = delete; + + explicit operator bool() const { return (status_code_ == kSuccess); } + explicit operator int() const { return static_cast(status_code_); } + + static Status OK() { return Status(StatusCode::kSuccess); } + + bool IsOk() const { return (StatusCode() == StatusCode::kSuccess); } + + bool IsError() const { return !IsOk(); } + + static std::string CodeAsString(enum StatusCode c); private: enum StatusCode status_code_; std::string status_msg_; + int line_of_code_; + std::string file_name_; + std::string err_description_; }; -} // namespace api } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_STATUS_H diff --git a/include/api/types.h b/include/api/types.h index 73950728699..0f4503e122b 100644 --- a/include/api/types.h +++ b/include/api/types.h @@ -16,15 +16,20 @@ #ifndef MINDSPORE_INCLUDE_API_TYPES_H #define MINDSPORE_INCLUDE_API_TYPES_H +#include #include #include #include +#include "include/api/data_type.h" +#ifdef _WIN32 +#define MS_API __declspec(dllexport) +#else #define MS_API __attribute__((visibility("default"))) +#endif namespace mindspore { -namespace api { -enum ModelType { +enum ModelType : uint32_t { kMindIR = 0, kAIR = 1, kOM = 2, @@ -33,52 +38,38 @@ enum ModelType { kUnknownType = 0xFFFFFFFF }; -enum DataType { - kMsUnknown = 0, - kMsBool = 1, - kMsInt8 = 2, - kMsInt16 = 3, - kMsInt32 = 4, - kMsInt64 = 5, - kMsUint8 = 6, - kMsUint16 = 7, - kMsUint32 = 8, - kMsUint64 = 9, - kMsFloat16 = 10, - kMsFloat32 = 11, - kMsFloat64 = 12, - // insert new data type here - kInvalidDataType = 0xFFFFFFFF -}; - -class MS_API Tensor { +class MS_API MSTensor { public: - Tensor(); - Tensor(const std::string &name, DataType type, const std::vector &shape, const void *data, size_t data_len); - ~Tensor(); + class Impl; + + static MSTensor CreateTensor(const std::string &name, DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + static MSTensor CreateRefTensor(const std::string &name, DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + + MSTensor(); + explicit MSTensor(const std::shared_ptr &impl); + MSTensor(const std::string &name, DataType type, const std::vector &shape, const void *data, + size_t data_len); + ~MSTensor(); const std::string &Name() const; - void SetName(const std::string &name); - - api::DataType DataType() const; - void SetDataType(api::DataType type); - + enum DataType DataType() const; const std::vector &Shape() const; - void SetShape(const std::vector &shape); + int64_t ElementNum() const; - const void *Data() const; + std::shared_ptr Data() const; void *MutableData(); size_t DataSize() const; - bool ResizeData(size_t data_len); - bool SetData(const void *data, size_t data_len); + bool IsDevice() const; - int64_t ElementNum() const; - static int GetTypeSize(api::DataType type); - Tensor Clone() const; + MSTensor Clone() const; + bool operator==(std::nullptr_t) const; private: - class Impl; + friend class ModelImpl; + explicit MSTensor(std::nullptr_t); std::shared_ptr impl_; }; @@ -101,21 +92,5 @@ class MS_API Buffer { class Impl; std::shared_ptr impl_; }; - -extern MS_API const char *kDeviceTypeAscend310; -extern MS_API const char *kDeviceTypeAscend910; -extern MS_API const char *kDeviceTypeGpu; - -constexpr auto kModelOptionDumpCfgPath = "mindspore.option.dump_config_file_path"; -constexpr auto kModelOptionInsertOpCfgPath = "mindspore.option.insert_op_config_file_path"; // aipp config file -constexpr auto kModelOptionInputFormat = "mindspore.option.input_format"; // nchw or nhwc -// Mandatory while dynamic batch: e.g. "input_op_name1: n1,c2,h3,w4;input_op_name2: n4,c3,h2,w1" -constexpr auto kModelOptionInputShape = "mindspore.option.input_shape"; -constexpr auto kModelOptionOutputType = "mindspore.option.output_type"; // "FP32", "UINT8" or "FP16", default as "FP32" -constexpr auto kModelOptionPrecisionMode = "mindspore.option.precision_mode"; -// "force_fp16", "allow_fp32_to_fp16", "must_keep_origin_dtype" or "allow_mix_precision", default as "force_fp16" -constexpr auto kModelOptionOpSelectImplMode = "mindspore.option.op_select_impl_mode"; -// "high_precision" or "high_performance", default as "high_performance" -} // namespace api } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_TYPES_H diff --git a/mindspore/ccsrc/cxx_api/CMakeLists.txt b/mindspore/ccsrc/cxx_api/CMakeLists.txt index 4992308f15b..d5e4707593e 100644 --- a/mindspore/ccsrc/cxx_api/CMakeLists.txt +++ b/mindspore/ccsrc/cxx_api/CMakeLists.txt @@ -23,7 +23,7 @@ if(ENABLE_D) endif() if(ENABLE_GPU) - file(GLOB_RECURSE API_MS_INFER_SRC ${CMAKE_CURRENT_SOURCE_DIR} "python_utils.cc" "model/ms/*.cc" "graph/gpu/*.cc") + file(GLOB_RECURSE API_MS_INFER_SRC ${CMAKE_CURRENT_SOURCE_DIR} "model/ms/*.cc" "graph/gpu/*.cc") endif() set(MSLIB_SRC ${CMAKE_CURRENT_SOURCE_DIR}/types.cc @@ -45,8 +45,13 @@ if(CMAKE_SYSTEM_NAME MATCHES "Darwin") target_link_libraries(mindspore_shared_lib PRIVATE ${PYTHON_LIBRARIES} ${SECUREC_LIBRARY} -Wl,-force_load mindspore -Wl,-noall_load proto_input mindspore_gvar mindspore::protobuf) else() - target_link_libraries(mindspore_shared_lib PRIVATE ${PYTHON_LIBRARIES} ${SECUREC_LIBRARY} + if(ENABLE_D OR ENABLE_ACL) + target_link_libraries(mindspore_shared_lib PRIVATE ${PYTHON_LIBRARIES} ${SECUREC_LIBRARY} -Wl,--whole-archive mindspore -Wl,--no-whole-archive proto_input mindspore_gvar mindspore::protobuf) + else() + target_link_libraries(mindspore_shared_lib PRIVATE ${PYTHON_LIBRARIES} ${SECUREC_LIBRARY} + mindspore proto_input mindspore_gvar mindspore::protobuf) + endif() endif() if(ENABLE_CPU) diff --git a/mindspore/ccsrc/cxx_api/cell.cc b/mindspore/ccsrc/cxx_api/cell.cc index 7329675c0f7..ebf3a4706ed 100644 --- a/mindspore/ccsrc/cxx_api/cell.cc +++ b/mindspore/ccsrc/cxx_api/cell.cc @@ -18,7 +18,7 @@ #include "cxx_api/factory.h" #include "cxx_api/graph/graph_impl.h" -namespace mindspore::api { +namespace mindspore { std::vector CellBase::operator()(const std::vector &inputs) const { return Clone()->Construct(inputs); } ParameterCell::ParameterCell(const ParameterCell &cell) : tensor_(cell.tensor_.Clone()) {} @@ -40,23 +40,23 @@ ParameterCell &ParameterCell::operator=(ParameterCell &&cell) { return *this; } -ParameterCell::ParameterCell(const Tensor &tensor) : tensor_(tensor.Clone()) {} +ParameterCell::ParameterCell(const MSTensor &tensor) : tensor_(tensor.Clone()) {} -ParameterCell &ParameterCell::operator=(const Tensor &tensor) { +ParameterCell &ParameterCell::operator=(const MSTensor &tensor) { tensor_ = tensor.Clone(); return *this; } -ParameterCell::ParameterCell(Tensor &&tensor) : tensor_(tensor) {} +ParameterCell::ParameterCell(MSTensor &&tensor) : tensor_(tensor) {} -ParameterCell &ParameterCell::operator=(Tensor &&tensor) { +ParameterCell &ParameterCell::operator=(MSTensor &&tensor) { tensor_ = tensor; return *this; } GraphCell::GraphCell(const Graph &graph) : graph_(std::make_shared(graph)), - executor_(Factory::Instance().Create(Context::Instance().GetDeviceTarget())) { + executor_(Factory::Instance().Create(GlobalContext::GetGlobalDeviceTarget())) { MS_EXCEPTION_IF_NULL(graph_); MS_EXCEPTION_IF_NULL(executor_); executor_->SetGraph(graph_); @@ -64,7 +64,7 @@ GraphCell::GraphCell(const Graph &graph) GraphCell::GraphCell(const std::shared_ptr &graph) : graph_(graph), - executor_(Factory::Instance().Create(Context::Instance().GetDeviceTarget())) { + executor_(Factory::Instance().Create(GlobalContext::GetGlobalDeviceTarget())) { MS_EXCEPTION_IF_NULL(graph_); MS_EXCEPTION_IF_NULL(executor_); executor_->SetGraph(graph_); @@ -72,13 +72,13 @@ GraphCell::GraphCell(const std::shared_ptr &graph) GraphCell::GraphCell(Graph &&graph) : graph_(std::make_shared(graph)), - executor_(Factory::Instance().Create(Context::Instance().GetDeviceTarget())) { + executor_(Factory::Instance().Create(GlobalContext::GetGlobalDeviceTarget())) { MS_EXCEPTION_IF_NULL(graph_); MS_EXCEPTION_IF_NULL(executor_); executor_->SetGraph(graph_); } -Status GraphCell::Run(const std::vector &inputs, std::vector *outputs) { +Status GraphCell::Run(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(executor_); return executor_->Run(inputs, outputs); } @@ -88,25 +88,24 @@ Status GraphCell::Load() { return executor_->Load(); } -Status GraphCell::GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { +std::vector GraphCell::GetInputs() { MS_EXCEPTION_IF_NULL(executor_); - return executor_->GetInputsInfo(names, shapes, data_types, mem_sizes); + return executor_->GetInputs(); } -Status GraphCell::GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { +std::vector GraphCell::GetOutputs() { MS_EXCEPTION_IF_NULL(executor_); - return executor_->GetOutputsInfo(names, shapes, data_types, mem_sizes); + return executor_->GetOutputs(); } InputAndOutput::InputAndOutput() : cell_(nullptr), prev_(), index_(-1) {} -InputAndOutput::InputAndOutput(const Tensor &tensor) +InputAndOutput::InputAndOutput(const MSTensor &tensor) : cell_(std::make_shared(tensor.Clone())), prev_(), index_(-1) {} -InputAndOutput::InputAndOutput(Tensor &&tensor) : cell_(std::make_shared(tensor)), prev_(), index_(-1) {} +InputAndOutput::InputAndOutput(MSTensor &&tensor) + : cell_(std::make_shared(tensor)), prev_(), index_(-1) {} InputAndOutput::InputAndOutput(const std::shared_ptr &cell, const std::vector &prev, int32_t index) : cell_(cell), prev_(prev), index_(index) {} -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/context.cc b/mindspore/ccsrc/cxx_api/context.cc index 6af1915bfb8..a9ea4055a02 100644 --- a/mindspore/ccsrc/cxx_api/context.cc +++ b/mindspore/ccsrc/cxx_api/context.cc @@ -16,49 +16,119 @@ #include "include/api/context.h" #include "utils/log_adapter.h" -namespace mindspore::api { -class Context::ContextImpl { - public: - ContextImpl() : device_target_("NotSet"), device_id_(0) {} - ~ContextImpl() = default; - const std::string &GetDeviceTarget() const { return device_target_; } - void SetDeviceTarget(std::string_view device_target) { device_target_ = device_target; } - uint32_t GetDeviceID() const { return device_id_; } - void SetDeviceID(uint32_t device_id) { device_id_ = device_id; } +constexpr auto kGlobalContextDeviceTarget = "mindspore.ascend.globalcontext.device_target"; +constexpr auto kGlobalContextDeviceID = "mindspore.ascend.globalcontext.device_id"; +constexpr auto kModelOptionInsertOpCfgPath = "mindspore.option.insert_op_config_file_path"; // aipp config file +constexpr auto kModelOptionInputFormat = "mindspore.option.input_format"; // nchw or nhwc +constexpr auto kModelOptionInputShape = "mindspore.option.input_shape"; +// Mandatory while dynamic batch: e.g. "input_op_name1: n1,c2,h3,w4;input_op_name2: n4,c3,h2,w1" +constexpr auto kModelOptionOutputType = "mindspore.option.output_type"; // "FP32", "UINT8" or "FP16", default as "FP32" +constexpr auto kModelOptionPrecisionMode = "mindspore.option.precision_mode"; +// "force_fp16", "allow_fp32_to_fp16", "must_keep_origin_dtype" or "allow_mix_precision", default as "force_fp16" +constexpr auto kModelOptionOpSelectImplMode = "mindspore.option.op_select_impl_mode"; - private: - std::string device_target_; - uint32_t device_id_; -}; +namespace mindspore { +template +static T GetValue(const std::shared_ptr &context, const std::string &key) { + auto iter = context->params.find(key); + if (iter == context->params.end()) { + return T(); + } + const std::any &value = iter->second; + if (value.type() != typeid(T)) { + return T(); + } -Context &Context::Instance() { - static Context context; - return context; + return std::any_cast(value); } -const std::string &Context::GetDeviceTarget() const { - MS_EXCEPTION_IF_NULL(impl_); - return impl_->GetDeviceTarget(); +std::shared_ptr GlobalContext::GetGlobalContext() { + static std::shared_ptr g_context = std::make_shared(); + return g_context; } -Context &Context::SetDeviceTarget(const std::string &device_target) { - MS_EXCEPTION_IF_NULL(impl_); - impl_->SetDeviceTarget(device_target); - return *this; +void GlobalContext::SetGlobalDeviceTarget(const std::string &device_target) { + auto global_context = GetGlobalContext(); + MS_EXCEPTION_IF_NULL(global_context); + global_context->params[kGlobalContextDeviceTarget] = device_target; } -uint32_t Context::GetDeviceID() const { - MS_EXCEPTION_IF_NULL(impl_); - return impl_->GetDeviceID(); +std::string GlobalContext::GetGlobalDeviceTarget() { + auto global_context = GetGlobalContext(); + MS_EXCEPTION_IF_NULL(global_context); + return GetValue(global_context, kGlobalContextDeviceTarget); } -Context &Context::SetDeviceID(uint32_t device_id) { - MS_EXCEPTION_IF_NULL(impl_); - impl_->SetDeviceID(device_id); - return *this; +void GlobalContext::SetGlobalDeviceID(const uint32_t &device_id) { + auto global_context = GetGlobalContext(); + MS_EXCEPTION_IF_NULL(global_context); + global_context->params[kGlobalContextDeviceID] = device_id; } -Context::Context() : impl_(std::make_shared()) { MS_EXCEPTION_IF_NULL(impl_); } +uint32_t GlobalContext::GetGlobalDeviceID() { + auto global_context = GetGlobalContext(); + MS_EXCEPTION_IF_NULL(global_context); + return GetValue(global_context, kGlobalContextDeviceID); +} -Context::~Context() {} -} // namespace mindspore::api +void ModelContext::SetInsertOpConfigPath(const std::shared_ptr &context, const std::string &cfg_path) { + MS_EXCEPTION_IF_NULL(context); + context->params[kModelOptionInsertOpCfgPath] = cfg_path; +} + +std::string ModelContext::GetInsertOpConfigPath(const std::shared_ptr &context) { + MS_EXCEPTION_IF_NULL(context); + return GetValue(context, kModelOptionInsertOpCfgPath); +} + +void ModelContext::SetInputFormat(const std::shared_ptr &context, const std::string &format) { + MS_EXCEPTION_IF_NULL(context); + context->params[kModelOptionInputFormat] = format; +} + +std::string ModelContext::GetInputFormat(const std::shared_ptr &context) { + MS_EXCEPTION_IF_NULL(context); + return GetValue(context, kModelOptionInputFormat); +} + +void ModelContext::SetInputShape(const std::shared_ptr &context, const std::string &shape) { + MS_EXCEPTION_IF_NULL(context); + context->params[kModelOptionInputShape] = shape; +} + +std::string ModelContext::GetInputShape(const std::shared_ptr &context) { + MS_EXCEPTION_IF_NULL(context); + return GetValue(context, kModelOptionInputShape); +} + +void ModelContext::SetOutputType(const std::shared_ptr &context, enum DataType output_type) { + MS_EXCEPTION_IF_NULL(context); + context->params[kModelOptionOutputType] = output_type; +} + +enum DataType ModelContext::GetOutputType(const std::shared_ptr &context) { + MS_EXCEPTION_IF_NULL(context); + return GetValue(context, kModelOptionOutputType); +} + +void ModelContext::SetPrecisionMode(const std::shared_ptr &context, const std::string &precision_mode) { + MS_EXCEPTION_IF_NULL(context); + context->params[kModelOptionPrecisionMode] = precision_mode; +} + +std::string ModelContext::GetPrecisionMode(const std::shared_ptr &context) { + MS_EXCEPTION_IF_NULL(context); + return GetValue(context, kModelOptionPrecisionMode); +} + +void ModelContext::SetOpSelectImplMode(const std::shared_ptr &context, + const std::string &op_select_impl_mode) { + MS_EXCEPTION_IF_NULL(context); + context->params[kModelOptionOpSelectImplMode] = op_select_impl_mode; +} + +std::string ModelContext::GetOpSelectImplMode(const std::shared_ptr &context) { + MS_EXCEPTION_IF_NULL(context); + return GetValue(context, kModelOptionOpSelectImplMode); +} +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/factory.h b/mindspore/ccsrc/cxx_api/factory.h index 7a7b45e12a9..e2bdb96cea6 100644 --- a/mindspore/ccsrc/cxx_api/factory.h +++ b/mindspore/ccsrc/cxx_api/factory.h @@ -23,7 +23,7 @@ #include #include "utils/utils.h" -namespace mindspore::api { +namespace mindspore { template class Factory { using U = std::function()>; @@ -79,5 +79,5 @@ class Registrar { #define API_FACTORY_REG(BASE_CLASS, DEVICE_NAME, DERIVE_CLASS) \ static const Registrar g_api_##DERIVE_CLASS##_registrar_##DEVICE_NAME##_reg( \ #DEVICE_NAME, []() { return std::make_shared(); }); -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_FACTORY_H diff --git a/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.cc b/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.cc index 9feb614f77e..624d1c8832e 100644 --- a/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.cc +++ b/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.cc @@ -17,8 +17,8 @@ #include "utils/log_adapter.h" #include "acl/acl.h" -namespace mindspore::api { -std::weak_ptr AclEnvGuard::global_acl_env_; +namespace mindspore { +std::shared_ptr AclEnvGuard::global_acl_env_; std::mutex AclEnvGuard::global_acl_env_mutex_; AclEnvGuard::AclEnvGuard(std::string_view cfg_file) { @@ -42,7 +42,7 @@ std::shared_ptr AclEnvGuard::GetAclEnv(std::string_view cfg_file) { std::shared_ptr acl_env; std::lock_guard lock(global_acl_env_mutex_); - acl_env = global_acl_env_.lock(); + acl_env = global_acl_env_; if (acl_env != nullptr) { MS_LOG(INFO) << "Acl has been initialized, skip."; } else { @@ -57,4 +57,4 @@ std::shared_ptr AclEnvGuard::GetAclEnv(std::string_view cfg_file) { } return acl_env; } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.h b/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.h index df35385d1f7..8b4ae76c68a 100644 --- a/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.h +++ b/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.h @@ -20,7 +20,7 @@ #include #include "acl/acl_base.h" -namespace mindspore::api { +namespace mindspore { class __attribute__((visibility("default"))) AclEnvGuard { public: explicit AclEnvGuard(std::string_view cfg_file); @@ -29,10 +29,10 @@ class __attribute__((visibility("default"))) AclEnvGuard { static std::shared_ptr GetAclEnv(std::string_view cfg_file); private: - static std::weak_ptr global_acl_env_; + static std::shared_ptr global_acl_env_; static std::mutex global_acl_env_mutex_; aclError errno_; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_GRAPH_ACL_ACL_ENV_GUARD_H diff --git a/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.cc b/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.cc index 262d79bfb78..439161910b5 100644 --- a/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.cc +++ b/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.cc @@ -16,53 +16,50 @@ #include "cxx_api/graph/acl/acl_graph_impl.h" #include "include/api/context.h" #include "cxx_api/model/acl/model_converter.h" -#include "cxx_api/python_utils.h" #include "utils/log_adapter.h" -namespace mindspore::api { +namespace mindspore { API_FACTORY_REG(GraphCell::GraphImpl, Ascend310, AclGraphImpl); AclGraphImpl::AclGraphImpl() : init_flag_(false), load_flag_(false), device_type_("AscendCL"), - device_id_(Context::Instance().GetDeviceID()), + device_id_(GlobalContext::GetGlobalDeviceID()), context_(nullptr), acl_env_(nullptr) {} AclGraphImpl::~AclGraphImpl() { (void)FinalizeEnv(); } -Status AclGraphImpl::Run(const std::vector &inputs, std::vector *outputs) { +Status AclGraphImpl::Run(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Prepare model resource failed."; - return FAILED; + return ret; } return model_process_.PredictFromHost(inputs, outputs); } -Status AclGraphImpl::GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) { +std::vector AclGraphImpl::GetInputs() { Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Prepare model resource failed."; - return FAILED; + return {}; } - return model_process_.GetInputsInfo(names, shapes, data_types, mem_sizes); + return model_process_.GetInputs(); } -Status AclGraphImpl::GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) { +std::vector AclGraphImpl::GetOutputs() { Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Prepare model resource failed."; - return FAILED; + return {}; } - return model_process_.GetOutputsInfo(names, shapes, data_types, mem_sizes); + return model_process_.GetOutputs(); } Status AclGraphImpl::LoadAclModel(Buffer om_data) { @@ -72,44 +69,44 @@ Status AclGraphImpl::LoadAclModel(Buffer om_data) { auto acl_ret = aclmdlLoadFromMem(om_data.Data(), om_data.DataSize(), &acl_model_id); if (acl_ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Call aclmdlLoadFromMem failed."; - return FAILED; + return kMCDeviceError; } // acl init model resource model_process_.set_model_id(acl_model_id); Status ret = model_process_.PreInitModelResource(); - if (ret != SUCCESS) { + if (ret != kSuccess) { (void)aclmdlUnload(acl_model_id); MS_LOG(ERROR) << "Pre init model resource failed."; - return FAILED; + return ret; } MS_LOG(INFO) << "Load acl model success."; - return SUCCESS; + return kSuccess; } Status AclGraphImpl::InitEnv() { if (init_flag_) { - return SUCCESS; + return kSuccess; } acl_env_ = AclEnvGuard::GetAclEnv(""); if (acl_env_ == nullptr) { MS_LOG(ERROR) << "Acl init failed."; - return FAILED; + return kMCDeviceError; } aclError ret = aclrtSetDevice(device_id_); if (ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Acl open device " << device_id_ << " failed"; - return FAILED; + return kMCDeviceError; } MS_LOG(INFO) << "Open device " << device_id_ << " success"; ret = aclrtCreateContext(&context_, device_id_); if (ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Acl create context failed"; - return FAILED; + return kMCDeviceError; } MS_LOG(INFO) << "Create context success"; @@ -117,7 +114,7 @@ Status AclGraphImpl::InitEnv() { ret = aclrtGetRunMode(&run_mode); if (ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Acl get run mode failed"; - return FAILED; + return kMCDeviceError; } bool is_device = (run_mode == ACL_DEVICE); model_process_.SetIsDevice(is_device); @@ -125,24 +122,24 @@ Status AclGraphImpl::InitEnv() { MS_LOG(INFO) << "Init acl success, device id " << device_id_; init_flag_ = true; - return SUCCESS; + return kSuccess; } Status AclGraphImpl::FinalizeEnv() { if (!init_flag_) { - return SUCCESS; + return kSuccess; } aclError rt_ret = aclrtSetCurrentContext(context_); if (rt_ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Set the ascend device context failed"; - return FAILED; + return kMCDeviceError; } Status ret = model_process_.UnLoad(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Unload model inner failed."; - return FAILED; + return ret; } if (context_ != nullptr) { @@ -161,16 +158,16 @@ Status AclGraphImpl::FinalizeEnv() { MS_LOG(INFO) << "End to reset device " << device_id_; init_flag_ = false; - return SUCCESS; + return kSuccess; } Status AclGraphImpl::Load() { // check graph type if (graph_->ModelType() != ModelType::kOM) { Status ret = ConvertToOM(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Load Failed."; - return FAILED; + return ret; } } @@ -180,15 +177,15 @@ Status AclGraphImpl::Load() { // init Status ret = InitEnv(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "InitEnv failed."; - return FAILED; + return ret; } // load model if (!load_flag_) { ret = LoadAclModel(om_data); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Load acl model failed."; return ret; } @@ -198,24 +195,24 @@ Status AclGraphImpl::Load() { aclError rt_ret = aclrtSetCurrentContext(context_); if (rt_ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Set the ascend device context failed"; - return FAILED; + return kMCDeviceError; } - return SUCCESS; + return kSuccess; } Status AclGraphImpl::ConvertToOM() { MS_LOG(INFO) << "Start convert to om model."; if (graph_ == nullptr) { MS_LOG(ERROR) << "Invalid graph_ is null."; - return FAILED; + return kMCFailed; } auto &graph_data = GraphImpl::MutableGraphData(); MS_EXCEPTION_IF_NULL(graph_data); if (graph_->ModelType() == ModelType::kOM) { MS_LOG(INFO) << "This model has been built, skip."; - return SUCCESS; + return kSuccess; } else if (graph_->ModelType() == ModelType::kMindIR) { auto func_graph = graph_data->GetFuncGraph(); MS_EXCEPTION_IF_NULL(func_graph); @@ -223,13 +220,13 @@ Status AclGraphImpl::ConvertToOM() { Buffer om_data = model_converter.LoadMindIR(func_graph); if (om_data.Data() == nullptr || om_data.DataSize() == 0) { MS_LOG(ERROR) << "Convert MindIR to OM failed."; - return FAILED; + return kMCFailed; } graph_data = std::make_shared(om_data, ModelType::kOM); MS_LOG(INFO) << "Convert MindIR to OM success."; - return SUCCESS; + return kSuccess; } MS_LOG(ERROR) << "Unsupported ModelType " << graph_->ModelType(); - return FAILED; + return kMCFailed; } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.h b/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.h index e3085c1b5b0..4d185d5fbe7 100644 --- a/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.h +++ b/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.h @@ -27,18 +27,16 @@ #include "cxx_api/graph/graph_impl.h" #include "cxx_api/factory.h" -namespace mindspore::api { +namespace mindspore { class AclGraphImpl : public GraphCell::GraphImpl { public: AclGraphImpl(); ~AclGraphImpl() override; - Status Run(const std::vector &inputs, std::vector *outputs) override; + Status Run(const std::vector &inputs, std::vector *outputs) override; Status Load() override; - Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) override; - Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) override; + std::vector GetInputs() override; + std::vector GetOutputs() override; private: Status ConvertToOM(); @@ -56,5 +54,5 @@ class AclGraphImpl : public GraphCell::GraphImpl { ModelProcess model_process_; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_GRAPH_ACL_ACL_GRAPH_IMPL_H diff --git a/mindspore/ccsrc/cxx_api/graph/acl/model_process.cc b/mindspore/ccsrc/cxx_api/graph/acl/model_process.cc index 5c10b75a791..c153ae7df24 100644 --- a/mindspore/ccsrc/cxx_api/graph/acl/model_process.cc +++ b/mindspore/ccsrc/cxx_api/graph/acl/model_process.cc @@ -20,17 +20,19 @@ #include #include "utils/utils.h" -namespace mindspore::api { +namespace mindspore { static DataType TransToApiType(aclDataType data_type) { - static const std::map data_type_map = { - {ACL_FLOAT16, api::kMsFloat16}, {ACL_FLOAT, api::kMsFloat32}, {ACL_DOUBLE, api::kMsFloat64}, - {ACL_INT8, api::kMsInt8}, {ACL_INT16, api::kMsInt16}, {ACL_INT32, api::kMsInt32}, - {ACL_INT64, api::kMsInt64}, {ACL_UINT8, api::kMsUint8}, {ACL_UINT16, api::kMsUint16}, - {ACL_UINT32, api::kMsUint32}, {ACL_UINT64, api::kMsUint64}, {ACL_BOOL, api::kMsBool}, + static const std::map data_type_map = { + {ACL_FLOAT16, DataType::kNumberTypeFloat16}, {ACL_FLOAT, DataType::kNumberTypeFloat32}, + {ACL_DOUBLE, DataType::kNumberTypeFloat64}, {ACL_INT8, DataType::kNumberTypeInt8}, + {ACL_INT16, DataType::kNumberTypeInt16}, {ACL_INT32, DataType::kNumberTypeInt32}, + {ACL_INT64, DataType::kNumberTypeInt64}, {ACL_UINT8, DataType::kNumberTypeUInt8}, + {ACL_UINT16, DataType::kNumberTypeUInt16}, {ACL_UINT32, DataType::kNumberTypeUInt32}, + {ACL_UINT64, DataType::kNumberTypeUInt64}, {ACL_BOOL, DataType::kNumberTypeBool}, }; auto it = data_type_map.find(data_type); if (it == data_type_map.end()) { - return api::kInvalidDataType; + return DataType::kTypeUnknown; } else { return it->second; } @@ -51,7 +53,7 @@ inline static void PushbackIfNotNull(U *vec, T &&item) { } static void ConstructTensorDesc(const std::vector &acl_tensor_list, std::vector *names, - std::vector> *shapes, std::vector *data_types, + std::vector> *shapes, std::vector *data_types, std::vector *mem_sizes) { ClearIfNotNull(names); ClearIfNotNull(shapes); @@ -66,41 +68,69 @@ static void ConstructTensorDesc(const std::vector &acl_tensor_lis } } +static std::string ShapeToString(const std::vector &shape) { + std::string result = "["; + for (size_t i = 0; i < shape.size(); ++i) { + result += std::to_string(shape[i]); + if (i + 1 < shape.size()) { + result += ", "; + } + } + result += "]"; + return result; +} + +Status ModelProcess::ConstructTensors(const std::vector &acl_tensor_list, + std::vector *tensor_list) { + MS_EXCEPTION_IF_NULL(tensor_list); + std::vector names; + std::vector> shapes; + std::vector data_types; + std::vector mem_sizes; + + ConstructTensorDesc(acl_tensor_list, &names, &shapes, &data_types, &mem_sizes); + tensor_list->clear(); + if (names.size() != acl_tensor_list.size() || shapes.size() != acl_tensor_list.size() || + data_types.size() != acl_tensor_list.size() || mem_sizes.size() != acl_tensor_list.size()) { + MS_LOG(ERROR) << "Inner error, size do not match: names size " << names.size() << " shapes size " << shapes.size() + << " data types size " << data_types.size() << " mem sizes size " << mem_sizes.size() + << " acl_tensor_list size " << acl_tensor_list.size(); + return kMCFailed; + } + + aclrtMemcpyKind kind = is_run_on_device_ ? ACL_MEMCPY_HOST_TO_HOST : ACL_MEMCPY_DEVICE_TO_HOST; + for (size_t i = 0; i < acl_tensor_list.size(); ++i) { + tensor_list->emplace_back(names[i], data_types[i], shapes[i], nullptr, mem_sizes[i]); + auto ret = aclrtMemcpy((*tensor_list)[i].MutableData(), (*tensor_list)[i].DataSize(), + acl_tensor_list[i].device_data, acl_tensor_list[i].buffer_size, kind); + if (ret != ACL_ERROR_NONE) { + MS_LOG(ERROR) << "Memcpy input " << i << " from " << (is_run_on_device_ ? "host" : "device") + << " to host failed, memory size " << acl_tensor_list[i].buffer_size; + return kMCFailed; + } + } + + return kSuccess; +} + Status ModelProcess::PreInitModelResource() { model_desc_ = aclmdlCreateDesc(); aclError acl_ret = aclmdlGetDesc(model_desc_, model_id_); if (acl_ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Read model desc failed"; - return FAILED; + return kMCDeviceError; } Status ret = InitInputsBuffer(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Create input buffer failed"; - return FAILED; + return ret; } ret = InitOutputsBuffer(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Create output buffer failed"; - return FAILED; + return ret; } - return SUCCESS; -} - -Status ModelProcess::LoadModelFromFile(const std::string &file_name, uint32_t *model_id) { - MS_EXCEPTION_IF_NULL(model_id); - aclError acl_ret = aclmdlLoadFromFile(file_name.c_str(), model_id); - if (acl_ret != ACL_ERROR_NONE) { - MS_LOG(ERROR) << "Read model file failed, file name is " << file_name; - return FAILED; - } - MS_LOG(INFO) << "Load model success " << file_name; - model_id_ = *model_id; - if (PreInitModelResource() != SUCCESS) { - aclmdlUnload(model_id_); - MS_LOG(ERROR) << "Pre init model resource failed, file name is " << file_name; - return FAILED; - } - return SUCCESS; + return kSuccess; } Status ModelProcess::InitInputsBuffer() { @@ -113,8 +143,8 @@ Status ModelProcess::InitInputsBuffer() { if (!is_run_on_device_) { // need to copy input/output to/from device ret = aclrtMalloc(&data_mem_buffer, buffer_size, ACL_MEM_MALLOC_NORMAL_ONLY); if (ret != ACL_ERROR_NONE) { - MS_LOG(ERROR) << "Malloc device input buffer faild , input size " << buffer_size; - return FAILED; + MS_LOG(ERROR) << "Malloc device input buffer failed , input size " << buffer_size; + return kMCDeviceError; } } @@ -125,7 +155,7 @@ Status ModelProcess::InitInputsBuffer() { if (!is_run_on_device_) { aclrtFree(data_mem_buffer); } - return FAILED; + return kMCDeviceError; } aclDataType data_type = aclmdlGetInputDataType(model_desc_, i); std::vector shape(dims.dims, dims.dims + dims.dimCount); @@ -137,7 +167,7 @@ Status ModelProcess::InitInputsBuffer() { input_infos_.emplace_back(AclTensorInfo{data_mem_buffer, buffer_size, data_type, shape, input_name}); } MS_LOG(INFO) << "Create model inputs success"; - return SUCCESS; + return kSuccess; } Status ModelProcess::CreateDataBuffer(void **data_mem_buffer, size_t buffer_size, aclmdlDataset *dataset) { @@ -154,14 +184,14 @@ Status ModelProcess::CreateDataBuffer(void **data_mem_buffer, size_t buffer_size if (!is_run_on_device_) { ret = aclrtMalloc(data_mem_buffer, buffer_size, ACL_MEM_MALLOC_NORMAL_ONLY); if (ret != ACL_ERROR_NONE) { - MS_LOG(ERROR) << "Malloc device buffer faild , buffer size " << buffer_size; - return FAILED; + MS_LOG(ERROR) << "Malloc device buffer failed , buffer size " << buffer_size; + return kMCDeviceError; } } else { ret = aclrtMallocHost(data_mem_buffer, buffer_size); if (ret != ACL_ERROR_NONE) { - MS_LOG(ERROR) << "Malloc device buffer faild , buffer size " << buffer_size; - return FAILED; + MS_LOG(ERROR) << "Malloc device buffer failed , buffer size " << buffer_size; + return kMCDeviceError; } } @@ -169,16 +199,16 @@ Status ModelProcess::CreateDataBuffer(void **data_mem_buffer, size_t buffer_size if (data_buffer == nullptr) { MS_LOG(ERROR) << "Create Data Buffer failed"; free_data_buffer(*data_mem_buffer); - return FAILED; + return kMCDeviceError; } ret = aclmdlAddDatasetBuffer(dataset, data_buffer); if (ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "add data buffer failed"; free_data_buffer(*data_mem_buffer); aclDestroyDataBuffer(data_buffer); - return FAILED; + return kMCDeviceError; } - return SUCCESS; + return kSuccess; } Status ModelProcess::InitOutputsBuffer() { @@ -186,7 +216,7 @@ Status ModelProcess::InitOutputsBuffer() { outputs_ = aclmdlCreateDataset(); if (outputs_ == nullptr) { MS_LOG(ERROR) << "Create input dataset failed"; - return FAILED; + return kMCDeviceError; } size_t output_size = aclmdlGetNumOutputs(model_desc_); MS_LOG(INFO) << "output_size = " << output_size; @@ -194,9 +224,9 @@ Status ModelProcess::InitOutputsBuffer() { auto buffer_size = aclmdlGetOutputSizeByIndex(model_desc_, i); void *data_mem_buffer = nullptr; - if (CreateDataBuffer(&data_mem_buffer, buffer_size, outputs_) != SUCCESS) { + if (CreateDataBuffer(&data_mem_buffer, buffer_size, outputs_) != kSuccess) { MS_LOG(ERROR) << "add output data buffer failed, buffer size " << buffer_size; - return FAILED; + return kMCDeviceError; } aclmdlIODims dims; ret = aclmdlGetOutputDims(model_desc_, i, &dims); @@ -207,7 +237,7 @@ Status ModelProcess::InitOutputsBuffer() { } else { aclrtFreeHost(data_mem_buffer); } - return FAILED; + return kMCDeviceError; } aclDataType data_type = aclmdlGetOutputDataType(model_desc_, i); std::vector shape(dims.dims, dims.dims + dims.dimCount); @@ -219,7 +249,7 @@ Status ModelProcess::InitOutputsBuffer() { output_infos_.emplace_back(AclTensorInfo{data_mem_buffer, buffer_size, data_type, shape, output_name}); } MS_LOG(INFO) << "Create model output success"; - return SUCCESS; + return kSuccess; } void ModelProcess::DestroyInputsDataset() { @@ -273,50 +303,60 @@ Status ModelProcess::UnLoad() { auto ret = aclmdlUnload(model_id_); if (ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Unload model failed"; - return FAILED; + return kMCDeviceError; } if (model_desc_ != nullptr) { ret = aclmdlDestroyDesc(model_desc_); if (ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Unload model failed"; - return FAILED; + return kMCDeviceError; } model_desc_ = nullptr; } DestroyInputsBuffer(); DestroyOutputsBuffer(); MS_LOG(INFO) << "End unload model " << model_id_; - return SUCCESS; + return kSuccess; } -Status ModelProcess::CheckAndInitInput(const std::vector &inputs) { +Status ModelProcess::CheckAndInitInput(const std::vector &inputs) { aclError ret; inputs_ = aclmdlCreateDataset(); // check inputs if (inputs.size() != input_infos_.size()) { - MS_LOG(ERROR) << "inputs count not match, required count " << input_infos_.size() << ", given count " + MS_LOG(ERROR) << "Inputs count not match, required count " << input_infos_.size() << ", given count " << inputs.size(); - return INVALID_INPUTS; + return kMCInvalidInput; } for (size_t i = 0; i < input_infos_.size(); ++i) { + if (inputs[i].Shape() != input_infos_[i].dims) { + MS_LOG(INFO) << "Note: input " << i << " shape not match, required " << ShapeToString(input_infos_[i].dims) + << ", given " << ShapeToString(inputs[i].Shape()); + } + + if (inputs[i].DataType() != TransToApiType(input_infos_[i].data_type)) { + MS_LOG(INFO) << "Note: input " << i << " data type not match, required " + << TransToApiType(input_infos_[i].data_type) << ", given " << inputs[i].DataType(); + } + if (inputs[i].DataSize() != input_infos_[i].buffer_size) { - MS_LOG(ERROR) << "input " << i << " data size not match, required size " << input_infos_[i].buffer_size + MS_LOG(ERROR) << "Input " << i << " data size not match, required size " << input_infos_[i].buffer_size << ", given count " << inputs[i].DataSize(); - return INVALID_INPUTS; + return kMCInvalidInput; } } // copy inputs for (size_t i = 0; i < input_infos_.size(); ++i) { const auto &info = input_infos_[i]; - const auto &input = inputs[i]; - const void *data = input.Data(); + auto input = inputs[i]; + const void *data = input.MutableData(); void *input_buffer = nullptr; if (!is_run_on_device_) { ret = aclrtMemcpy(info.device_data, info.buffer_size, data, input.DataSize(), ACL_MEMCPY_HOST_TO_DEVICE); if (ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Acl memcpy input " << i << " data to device failed, buffer size " << input.DataSize(); - return FAILED; + return kMCDeviceError; } input_buffer = info.device_data; } else { @@ -325,23 +365,23 @@ Status ModelProcess::CheckAndInitInput(const std::vector &inputs) { auto data_buffer = aclCreateDataBuffer(input_buffer, info.buffer_size); if (data_buffer == nullptr) { MS_LOG(ERROR) << "Create Data Buffer failed"; - return FAILED; + return kMCDeviceError; } ret = aclmdlAddDatasetBuffer(inputs_, data_buffer); if (ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "add data buffer failed"; aclDestroyDataBuffer(data_buffer); - return FAILED; + return kMCDeviceError; } } - return SUCCESS; + return kSuccess; } -Status ModelProcess::PredictFromHost(const std::vector &inputs, std::vector *outputs) { +Status ModelProcess::PredictFromHost(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); aclError acl_ret; Status ret = CheckAndInitInput(inputs); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "check or init input failed"; DestroyInputsDataset(); return ret; // forward status error @@ -361,50 +401,48 @@ Status ModelProcess::PredictFromHost(const std::vector &inputs, std::vec DestroyInputsDataset(); if (acl_ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Execute Model Failed"; - return FAILED; + return kMCDeviceError; } ret = BuildOutputs(outputs); - if (ret != SUCCESS) { - MS_LOG(ERROR) << "Build outputs faield"; - return FAILED; + if (ret != kSuccess) { + MS_LOG(ERROR) << "Build outputs failed"; + return ret; } - MS_LOG(INFO) << "excute model success"; - return SUCCESS; + MS_LOG(INFO) << "Execute model success"; + return kSuccess; } -Status ModelProcess::BuildOutputs(std::vector *outputs) { +Status ModelProcess::BuildOutputs(std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); - aclError ret; // copy outputs outputs->clear(); - aclrtMemcpyKind kind = is_run_on_device_ ? ACL_MEMCPY_HOST_TO_HOST : ACL_MEMCPY_DEVICE_TO_HOST; - for (size_t i = 0; i < output_infos_.size(); ++i) { - const auto &info = output_infos_[i]; - outputs->emplace_back(Buffer()); - auto output = outputs->rbegin(); - if (!output->ResizeData(info.buffer_size)) { - MS_LOG(ERROR) << "new output data buffer failed, data size " << info.buffer_size; - return FAILED; - } - ret = aclrtMemcpy(output->MutableData(), output->DataSize(), info.device_data, info.buffer_size, kind); - if (ret != ACL_ERROR_NONE) { - MS_LOG(ERROR) << "Memcpy output " << i << " from " << (is_run_on_device_ ? "host" : "device") - << " to host failed, memory size " << info.buffer_size; - return FAILED; - } + auto inner_outputs = GetOutputs(); + if (inner_outputs.size() != output_infos_.size()) { + MS_LOG(ERROR) << "Invalid inner outputs size " << inner_outputs.size() << " do not match device output infos size " + << output_infos_.size(); + return kMCFailed; } - return SUCCESS; + (*outputs) = inner_outputs; + return kSuccess; } -Status ModelProcess::GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { - ConstructTensorDesc(input_infos_, names, shapes, data_types, mem_sizes); - return SUCCESS; +std::vector ModelProcess::GetInputs() { + Status ret = ConstructTensors(input_infos_, &input_tensors_); + if (ret != kSuccess) { + MS_LOG(ERROR) << "ConstructTensors failed."; + input_tensors_.clear(); + } + + return input_tensors_; } -Status ModelProcess::GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { - ConstructTensorDesc(output_infos_, names, shapes, data_types, mem_sizes); - return SUCCESS; +std::vector ModelProcess::GetOutputs() { + Status ret = ConstructTensors(output_infos_, &output_tensors_); + if (ret != kSuccess) { + MS_LOG(ERROR) << "ConstructTensors failed."; + output_tensors_.clear(); + } + + return output_tensors_; } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/graph/acl/model_process.h b/mindspore/ccsrc/cxx_api/graph/acl/model_process.h index e9c3363bd91..7906b17823c 100644 --- a/mindspore/ccsrc/cxx_api/graph/acl/model_process.h +++ b/mindspore/ccsrc/cxx_api/graph/acl/model_process.h @@ -25,7 +25,7 @@ #include "include/api/status.h" #include "include/api/types.h" -namespace mindspore::api { +namespace mindspore { struct AclTensorInfo { void *device_data; size_t buffer_size; @@ -45,14 +45,12 @@ class ModelProcess { input_infos_(), output_infos_() {} ~ModelProcess() {} - Status LoadModelFromFile(const std::string &file_name, uint32_t *model_id); + Status UnLoad(); - Status PredictFromHost(const std::vector &inputs, std::vector *outputs); + Status PredictFromHost(const std::vector &inputs, std::vector *outputs); Status PreInitModelResource(); - Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const; - Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const; + std::vector GetInputs(); + std::vector GetOutputs(); // override this method to avoid request/reply data copy void SetIsDevice(bool is_device) { is_run_on_device_ = is_device; } @@ -62,8 +60,9 @@ class ModelProcess { private: Status CreateDataBuffer(void **data_mem_buffer, size_t buffer_size, aclmdlDataset *dataset); - Status CheckAndInitInput(const std::vector &inputs); - Status BuildOutputs(std::vector *outputs); + Status CheckAndInitInput(const std::vector &inputs); + Status ConstructTensors(const std::vector &acl_tensor_list, std::vector *tensor_list); + Status BuildOutputs(std::vector *outputs); Status InitInputsBuffer(); Status InitOutputsBuffer(); @@ -80,7 +79,9 @@ class ModelProcess { aclmdlDataset *outputs_; std::vector input_infos_; std::vector output_infos_; + std::vector input_tensors_; + std::vector output_tensors_; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXXAPI_GRAPH_ACL_MODEL_PROCESS_H diff --git a/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.cc b/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.cc index 9490bcf74b1..b0c8572bc36 100644 --- a/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.cc +++ b/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.cc @@ -25,91 +25,51 @@ #include "backend/session/executor_manager.h" #include "runtime/device/kernel_runtime_manager.h" -namespace mindspore::api { +namespace mindspore { API_FACTORY_REG(GraphCell::GraphImpl, Ascend910, AscendGraphImpl); AscendGraphImpl::AscendGraphImpl() : session_impl_(nullptr), graph_id_(0), device_type_("Ascend"), - device_id_(Context::Instance().GetDeviceID()), + device_id_(GlobalContext::GetGlobalDeviceID()), context_(nullptr), - inputs_(), - outputs_(), + inputs_info_(), + outputs_info_(), input_names_(), output_names_(), - init_flag_(false), load_flag_(false) {} -AscendGraphImpl::~AscendGraphImpl() { (void)FinalizeEnv(); } +AscendGraphImpl::~AscendGraphImpl() {} Status AscendGraphImpl::InitEnv() { - if (init_flag_) { - return SUCCESS; - } - RegAllOp(); - auto ms_context = MsContext::GetInstance(); - if (ms_context == nullptr) { - MS_LOG(ERROR) << "Get Context failed!"; - return FAILED; - } - - ms_context->set_param(MS_CTX_EXECUTION_MODE, kGraphMode); - ms_context->set_param(MS_CTX_DEVICE_ID, device_id_); - ms_context->set_param(MS_CTX_DEVICE_TARGET, kAscendDevice); - if (!context::OpenTsd(ms_context)) { - MS_LOG(ERROR) << "Session init OpenTsd failed!"; - return FAILED; + MS_LOG(INFO) << "Start to init env."; + env_guard_ = MsEnvGuard::GetEnv(device_id_); + if (env_guard_ == nullptr) { + MS_LOG(ERROR) << "Env init failed."; + return kMCDeviceError; } session_impl_ = session::SessionFactory::Get().Create(kDavinciInferenceDevice); if (session_impl_ == nullptr) { MS_LOG(ERROR) << "Session create failed!, please make sure target device:" << kDavinciInferenceDevice << " is available."; - return FAILED; + return kMCFailed; } session_impl_->Init(device_id_); - init_flag_ = true; - return SUCCESS; -} - -Status AscendGraphImpl::FinalizeEnv() { - if (!init_flag_) { - return SUCCESS; - } - - MS_LOG_INFO << "Start finalize env"; - session::ExecutorManager::Instance().Clear(); - device::KernelRuntimeManager::Instance().ClearRuntimeResource(); - - auto ms_context = MsContext::GetInstance(); - if (ms_context == nullptr) { - MS_LOG(ERROR) << "Get Context failed!"; - return FAILED; - } - - { - PythonEnvGuard guard; - if (!context::CloseTsd(ms_context)) { - MS_LOG(ERROR) << "CloseTsd failed!"; - return FAILED; - } - } - - init_flag_ = false; - MS_LOG(INFO) << "End finalize env"; - return SUCCESS; + MS_LOG(INFO) << "InitEnv success."; + return kSuccess; } Status AscendGraphImpl::CompileGraph(const std::shared_ptr &funcGraphPtr) { MS_ASSERT(session_impl_ != nullptr); try { graph_id_ = session_impl_->CompileGraph(NOT_NULL(funcGraphPtr)); - return SUCCESS; + return kSuccess; } catch (std::exception &e) { MS_LOG(ERROR) << "CompileGraph failed: " << e.what(); - return FAILED; + return kMCFailed; } } @@ -128,104 +88,104 @@ Status AscendGraphImpl::CheckModelInputs(const std::vector &i MS_ASSERT(session_impl_ != nullptr); std::string error_msg; if (!session_impl_->CheckModelInputs(graph_id_, inputs, &error_msg)) { - return Status(INVALID_INPUTS, error_msg); + return Status(kMCInvalidInput, error_msg); } - return SUCCESS; + return kSuccess; } -Status AscendGraphImpl::ExecuteModel(const std::vector &request, std::vector *reply) { +Status AscendGraphImpl::ExecuteModel(const std::vector &request, std::vector *reply) { MS_EXCEPTION_IF_NULL(reply); if (context_ == nullptr) { MS_LOG(ERROR) << "rtCtx is nullptr"; - return FAILED; + return kMCDeviceError; } rtError_t rt_ret = rtCtxSetCurrent(context_); if (rt_ret != RT_ERROR_NONE) { MS_LOG(ERROR) << "Set Ascend rtCtx failed"; - return FAILED; + return kMCDeviceError; } vector inputs; for (size_t i = 0; i < request.size(); i++) { - auto &item = request[i]; - auto input = inputs_[i]; + auto item = request[i]; + auto input = inputs_info_[i]; if (input->Size() != item.DataSize()) { MS_LOG(ERROR) << "Input " << i << " data size " << item.DataSize() << " not match model input data size " << input->Size(); - return FAILED; + return kMCInvalidInput; } - auto ret = memcpy_s(input->data_c(), input->Size(), item.Data(), item.DataSize()); - if (ret != SUCCESS) { - MS_LOG(ERROR) << "Tensor copy failed"; - return FAILED; + auto ret = memcpy_s(input->data_c(), input->Size(), item.MutableData(), item.DataSize()); + if (ret != kSuccess) { + MS_LOG(ERROR) << "MSTensor copy failed"; + return kMCFailed; } inputs.push_back(input); } - vector outputs = RunGraph(inputs); + last_inputs_ = inputs; + std::vector outputs = RunGraph(inputs); if (outputs.empty()) { MS_LOG(ERROR) << "Execute Model Failed"; - return FAILED; + return kMCFailed; } + last_outputs_ = outputs; reply->clear(); - std::transform(outputs.begin(), outputs.end(), std::back_inserter(*reply), - [](const tensor::TensorPtr &tensor) { return Buffer(tensor->data_c(), tensor->Size()); }); - return SUCCESS; + *reply = GetOutputs(); + return kSuccess; } -Status AscendGraphImpl::GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) { +std::vector AscendGraphImpl::GetInputs() { if (!load_flag_) { Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; - return ret; + return {}; } } - GraphUtils::ClearIfNotNull(names); - GraphUtils::ClearIfNotNull(shapes); - GraphUtils::ClearIfNotNull(data_types); - GraphUtils::ClearIfNotNull(mem_sizes); - for (size_t i = 0; i < inputs_.size(); i++) { - auto &tensor = inputs_[i]; - GraphUtils::PushbackIfNotNull(names, input_names_[i]); - GraphUtils::PushbackIfNotNull(shapes, tensor->shape()); - GraphUtils::PushbackIfNotNull(data_types, GraphUtils::TransTypeId2InferDataType(tensor->data_type())); - GraphUtils::PushbackIfNotNull(mem_sizes, tensor->Size()); + std::vector result(inputs_info_.size()); + for (size_t i = 0; i < inputs_info_.size(); ++i) { + auto &tensor = inputs_info_[i]; + void *data = nullptr; + size_t data_size = tensor->Size(); + if (i < last_inputs_.size()) { + data = last_inputs_[i]->data_c(); + data_size = last_inputs_[i]->Size(); + } + result[i] = + MSTensor(input_names_[i], static_cast(tensor->data_type()), tensor->shape(), data, data_size); } - return SUCCESS; + return result; } -Status AscendGraphImpl::GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) { +std::vector AscendGraphImpl::GetOutputs() { if (!load_flag_) { Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; - return ret; + return {}; } } - GraphUtils::ClearIfNotNull(names); - GraphUtils::ClearIfNotNull(shapes); - GraphUtils::ClearIfNotNull(data_types); - GraphUtils::ClearIfNotNull(mem_sizes); - for (size_t i = 0; i < outputs_.size(); i++) { - auto &tensor = outputs_[i]; - GraphUtils::PushbackIfNotNull(names, output_names_[i]); - GraphUtils::PushbackIfNotNull(shapes, tensor->shape()); - GraphUtils::PushbackIfNotNull(data_types, GraphUtils::TransTypeId2InferDataType(tensor->data_type())); - GraphUtils::PushbackIfNotNull(mem_sizes, tensor->Size()); + std::vector result(outputs_info_.size()); + for (size_t i = 0; i < outputs_info_.size(); ++i) { + auto &tensor = outputs_info_[i]; + void *data = nullptr; + size_t data_size = tensor->Size(); + if (i < last_outputs_.size()) { + data = last_outputs_[i]->data_c(); + data_size = last_outputs_[i]->Size(); + } + result[i] = + MSTensor(output_names_[i], static_cast(tensor->data_type()), tensor->shape(), data, data_size); } - - return SUCCESS; + return result; } Status AscendGraphImpl::Load() { // check graph type if (graph_->ModelType() != ModelType::kMindIR) { MS_LOG(ERROR) << "Unsupported model type " << graph_->ModelType(); - return INVALID_INPUTS; + return kMCInvalidInput; } const auto &graph_data = GraphImpl::MutableGraphData(); @@ -234,34 +194,34 @@ Status AscendGraphImpl::Load() { // init Status ret = InitEnv(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "InitEnv failed."; - return FAILED; + return ret; } // load model if (!load_flag_) { ret = CompileGraph(func_graph); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Compile graph model failed"; - return FAILED; + return ret; } - session_impl_->GetModelInputsInfo(graph_id_, &inputs_, &input_names_); - session_impl_->GetModelOutputsInfo(graph_id_, &outputs_, &output_names_); - if (inputs_.empty() || inputs_.size() != input_names_.size()) { + session_impl_->GetModelInputsInfo(graph_id_, &inputs_info_, &input_names_); + session_impl_->GetModelOutputsInfo(graph_id_, &outputs_info_, &output_names_); + if (inputs_info_.empty() || inputs_info_.size() != input_names_.size()) { MS_LOG_ERROR << "Get model inputs info failed"; - return FAILED; + return kMCInvalidInput; } - if (outputs_.empty() || outputs_.size() != output_names_.size()) { + if (outputs_info_.empty() || outputs_info_.size() != output_names_.size()) { MS_LOG_ERROR << "Get model outputs info failed"; - return FAILED; + return kMCInvalidInput; } // save d context rtError_t rt_ret = rtCtxGetCurrent(&context_); if (rt_ret != RT_ERROR_NONE || context_ == nullptr) { MS_LOG(ERROR) << "the ascend device context is null"; - return FAILED; + return kMCDeviceError; } MS_LOG(INFO) << "Load model success"; @@ -271,44 +231,112 @@ Status AscendGraphImpl::Load() { rtError_t rt_ret = rtCtxSetCurrent(context_); if (rt_ret != RT_ERROR_NONE) { MS_LOG(ERROR) << "Set the ascend device context failed"; - return FAILED; + return kMCDeviceError; } - return SUCCESS; + return kSuccess; } -Status AscendGraphImpl::Run(const std::vector &inputs, std::vector *outputs) { +Status AscendGraphImpl::Run(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); if (!load_flag_) { Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; return ret; } } - if (inputs.size() != inputs_.size()) { - MS_LOG(ERROR) << "inputs count not match, required count " << inputs_.size() << ", given count " << inputs.size(); - return INVALID_INPUTS; + if (inputs.size() != inputs_info_.size()) { + MS_LOG(ERROR) << "inputs count not match, required count " << inputs_info_.size() << ", given count " + << inputs.size(); + return kMCInvalidInput; } - for (size_t i = 0; i < inputs_.size(); ++i) { - if (inputs[i].DataSize() != inputs_[i]->Size()) { - MS_LOG(ERROR) << "input " << i << " data size not match, required size " << inputs_[i]->Size() << ", given count " - << inputs[i].DataSize(); - return INVALID_INPUTS; + for (size_t i = 0; i < inputs_info_.size(); ++i) { + if (inputs[i].DataSize() != inputs_info_[i]->Size()) { + MS_LOG(ERROR) << "input " << i << " data size not match, required size " << inputs_info_[i]->Size() + << ", given count " << inputs[i].DataSize(); + return kMCInvalidInput; } } - if (ExecuteModel(inputs, outputs) != SUCCESS) { + + Status ret = ExecuteModel(inputs, outputs); + if (ret != kSuccess) { MS_LOG(ERROR) << "Execute Model Failed"; - return FAILED; + return ret; } - if (outputs_.size() != outputs->size()) { + if (outputs_info_.size() != outputs->size()) { MS_LOG(ERROR) << "Predict output size " << outputs->size() << " not match output size got from model info " - << outputs_.size(); - return FAILED; + << outputs_info_.size(); + return kMCFailed; } - return SUCCESS; + return kSuccess; } -} // namespace mindspore::api + +AscendGraphImpl::MsEnvGuard::MsEnvGuard(uint32_t device_id) { + MS_LOG(INFO) << "Start to init env."; + device_id_ = device_id; + RegAllOp(); + auto ms_context = MsContext::GetInstance(); + if (ms_context == nullptr) { + MS_LOG(ERROR) << "Get Context failed!"; + errno_ = kMCFailed; + return; + } + + ms_context->set_param(MS_CTX_EXECUTION_MODE, kGraphMode); + ms_context->set_param(MS_CTX_DEVICE_ID, device_id_); + ms_context->set_param(MS_CTX_DEVICE_TARGET, kAscendDevice); + auto ret = rtSetDevice(device_id_); + if (ret != RT_ERROR_NONE) { + MS_LOG(EXCEPTION) << "Device " << device_id_ << " call rtSetDevice failed, ret[" << static_cast(ret) << "]"; + } + + MS_LOG(INFO) << "InitEnv success."; + errno_ = kSuccess; +} + +AscendGraphImpl::MsEnvGuard::~MsEnvGuard() { + MS_LOG(INFO) << "Start finalize env"; + session::ExecutorManager::Instance().Clear(); + device::KernelRuntimeManager::Instance().ClearRuntimeResource(); + + auto ms_context = MsContext::GetInstance(); + if (ms_context == nullptr) { + MS_LOG(ERROR) << "Get Context failed!"; + errno_ = kMCFailed; + return; + } + + auto ret = rtDeviceReset(device_id_); + if (ret != RT_ERROR_NONE) { + MS_LOG(EXCEPTION) << "Device " << device_id_ << " call rtDeviceReset failed, ret[" << static_cast(ret) << "]"; + } + + errno_ = kSuccess; + MS_LOG(INFO) << "End finalize env"; +} + +std::shared_ptr AscendGraphImpl::MsEnvGuard::GetEnv(uint32_t device_id) { + std::shared_ptr acl_env; + std::lock_guard lock(global_ms_env_mutex_); + acl_env = global_ms_env_.lock(); + if (acl_env != nullptr) { + MS_LOG(INFO) << "Env has been initialized, skip."; + } else { + acl_env = std::make_shared(device_id); + if (acl_env->GetErrno() != kSuccess) { + MS_LOG(ERROR) << "Execute aclInit Failed"; + return nullptr; + } + global_ms_env_ = acl_env; + MS_LOG(INFO) << "Env init success"; + } + return acl_env; +} + +std::weak_ptr AscendGraphImpl::MsEnvGuard::global_ms_env_; +std::mutex AscendGraphImpl::MsEnvGuard::global_ms_env_mutex_; +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.h b/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.h index fae683558e3..c4595dab93f 100644 --- a/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.h +++ b/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.h @@ -28,40 +28,56 @@ #include "ir/anf.h" #include "cxx_api/model/model_impl.h" #include "runtime/context.h" -#include "cxx_api/graph/graph_utils.h" -namespace mindspore::api { +namespace mindspore { class AscendGraphImpl : public GraphCell::GraphImpl { public: AscendGraphImpl(); ~AscendGraphImpl() override; - Status Run(const std::vector &inputs, std::vector *outputs) override; + Status Run(const std::vector &inputs, std::vector *outputs) override; Status Load() override; - Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) override; - Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) override; + std::vector GetInputs() override; + std::vector GetOutputs() override; private: + class MsEnvGuard; + Status InitEnv(); - Status FinalizeEnv(); Status CompileGraph(const std::shared_ptr &funcGraphPtr); Status CheckModelInputs(const std::vector &inputs) const; std::vector RunGraph(const std::vector &inputs); - Status ExecuteModel(const std::vector &inputs, std::vector *outputs); + Status ExecuteModel(const std::vector &inputs, std::vector *outputs); std::shared_ptr session_impl_; uint32_t graph_id_; std::string device_type_; uint32_t device_id_; rtContext_t context_; - std::vector inputs_; - std::vector outputs_; + std::vector inputs_info_; + std::vector outputs_info_; + std::vector last_inputs_; + std::vector last_outputs_; std::vector input_names_; std::vector output_names_; - bool init_flag_; bool load_flag_; + + std::shared_ptr env_guard_; }; -} // namespace mindspore::api + +class AscendGraphImpl::MsEnvGuard { + public: + explicit MsEnvGuard(uint32_t device_id); + ~MsEnvGuard(); + Status GetErrno() const { return errno_; } + static std::shared_ptr GetEnv(uint32_t device_id); + + private: + static std::weak_ptr global_ms_env_; + static std::mutex global_ms_env_mutex_; + + Status errno_; + uint32_t device_id_; +}; +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_GRAPH_MS_ASCEND_GRAPH_IMPL_H diff --git a/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.cc b/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.cc index 6af3a9ab9b6..ff7719a7150 100644 --- a/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.cc +++ b/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.cc @@ -23,15 +23,15 @@ #include "backend/session/executor_manager.h" #include "runtime/device/kernel_runtime_manager.h" -namespace mindspore::api { +namespace mindspore { API_FACTORY_REG(GraphCell::GraphImpl, GPU, GPUGraphImpl); GPUGraphImpl::GPUGraphImpl() : session_impl_(nullptr), graph_id_(0), - device_id_(Context::Instance().GetDeviceID()), - inputs_(), - outputs_(), + device_id_(GlobalContext::GetGlobalDeviceID()), + inputs_info_(), + outputs_info_(), input_names_(), output_names_(), init_flag_(false), @@ -40,13 +40,13 @@ GPUGraphImpl::GPUGraphImpl() Status GPUGraphImpl::InitEnv() { if (init_flag_) { MS_LOG(WARNING) << "Initialized again, return success."; - return SUCCESS; + return kSuccess; } auto ms_context = MsContext::GetInstance(); if (ms_context == nullptr) { MS_LOG(ERROR) << "Get Context failed!"; - return FAILED; + return kMCFailed; } ms_context->set_param(MS_CTX_EXECUTION_MODE, kGraphMode); ms_context->set_param(MS_CTX_DEVICE_ID, device_id_); @@ -57,18 +57,18 @@ Status GPUGraphImpl::InitEnv() { if (session_impl_ == nullptr) { MS_LOG(ERROR) << "Session create failed!, please make sure target device:" << kGpuInferenceDevice << " is available."; - return FAILED; + return kMCFailed; } session_impl_->Init(device_id_); init_flag_ = true; - return SUCCESS; + return kSuccess; } Status GPUGraphImpl::FinalizeEnv() { if (!init_flag_) { MS_LOG(WARNING) << "Never initialize before, return success"; - return SUCCESS; + return kSuccess; } MS_LOG_INFO << "Start finalize env"; @@ -77,14 +77,14 @@ Status GPUGraphImpl::FinalizeEnv() { init_flag_ = false; MS_LOG(INFO) << "End finalize env"; - return SUCCESS; + return kSuccess; } Status GPUGraphImpl::Load() { // check graph type if (graph_->ModelType() != ModelType::kMindIR) { MS_LOG(ERROR) << "Unsupported model type " << graph_->ModelType(); - return INVALID_INPUTS; + return kMCInvalidInput; } const auto &graph_data = GraphImpl::MutableGraphData(); @@ -93,38 +93,38 @@ Status GPUGraphImpl::Load() { // init Status ret = InitEnv(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "InitEnv failed."; - return FAILED; + return kMCDeviceError; } ret = CompileGraph(func_graph); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Compile graph model failed"; - return FAILED; + return kMCFailed; } - session_impl_->GetModelInputsInfo(graph_id_, &inputs_, &input_names_); - session_impl_->GetModelOutputsInfo(graph_id_, &outputs_, &output_names_); - if (inputs_.empty() || inputs_.size() != input_names_.size()) { + session_impl_->GetModelInputsInfo(graph_id_, &inputs_info_, &input_names_); + session_impl_->GetModelOutputsInfo(graph_id_, &outputs_info_, &output_names_); + if (inputs_info_.empty() || inputs_info_.size() != input_names_.size()) { MS_LOG_ERROR << "Get model inputs info failed"; - return FAILED; + return kMCInvalidInput; } - if (outputs_.empty() || outputs_.size() != output_names_.size()) { + if (outputs_info_.empty() || outputs_info_.size() != output_names_.size()) { MS_LOG_ERROR << "Get model outputs info failed"; - return FAILED; + return kMCInvalidInput; } load_flag_ = true; - return SUCCESS; + return kSuccess; } Status GPUGraphImpl::CompileGraph(const std::shared_ptr &funcGraphPtr) { MS_ASSERT(session_impl_ != nullptr); try { graph_id_ = session_impl_->CompileGraph(NOT_NULL(funcGraphPtr)); - return SUCCESS; + return kSuccess; } catch (std::exception &e) { MS_LOG(ERROR) << "CompileGraph failed: " << e.what(); - return FAILED; + return kMCFailed; } } @@ -139,118 +139,118 @@ std::vector GPUGraphImpl::RunGraph(const std::vector &request, std::vector *reply) { +Status GPUGraphImpl::ExecuteModel(const std::vector &request, std::vector *reply) { MS_EXCEPTION_IF_NULL(reply); vector inputs; for (size_t i = 0; i < request.size(); i++) { auto &item = request[i]; - auto input = inputs_[i]; + auto input = inputs_info_[i]; if (input->Size() != item.DataSize()) { MS_LOG(ERROR) << "Input " << i << " data size " << item.DataSize() << " not match model input data size " << input->Size(); - return FAILED; + return kMCInvalidInput; } - auto ret = memcpy_s(input->data_c(), input->Size(), item.Data(), item.DataSize()); - if (ret != SUCCESS) { + auto ret = memcpy_s(input->data_c(), input->Size(), item.Data().get(), item.DataSize()); + if (ret != kSuccess) { MS_LOG(ERROR) << "Tensor copy failed"; - return FAILED; + return kMCFailed; } inputs.push_back(input); } - vector outputs = RunGraph(inputs); + last_inputs_ = inputs; + std::vector outputs = RunGraph(inputs); if (outputs.empty()) { MS_LOG(ERROR) << "Execute Model Failed"; - return FAILED; + return kMCFailed; } + last_outputs_ = outputs; reply->clear(); - std::transform(outputs.begin(), outputs.end(), std::back_inserter(*reply), - [](const tensor::TensorPtr &tensor) { return Buffer(tensor->data_c(), tensor->Size()); }); - return SUCCESS; + *reply = GetOutputs(); + return kSuccess; } -Status GPUGraphImpl::Run(const std::vector &inputs, std::vector *outputs) { +Status GPUGraphImpl::Run(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); if (!load_flag_) { Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; return ret; } } - if (inputs.size() != inputs_.size()) { - MS_LOG(ERROR) << "inputs count not match, required count " << inputs_.size() << ", given count " << inputs.size(); - return INVALID_INPUTS; + if (inputs.size() != inputs_info_.size()) { + MS_LOG(ERROR) << "inputs count not match, required count " << inputs_info_.size() << ", given count " + << inputs.size(); + return kMCInvalidInput; } - for (size_t i = 0; i < inputs_.size(); ++i) { - if (inputs[i].DataSize() != inputs_[i]->Size()) { - MS_LOG(ERROR) << "input " << i << " data size not match, required size " << inputs_[i]->Size() << ", given count " - << inputs[i].DataSize(); - return INVALID_INPUTS; + for (size_t i = 0; i < inputs_info_.size(); ++i) { + if (inputs[i].DataSize() != inputs_info_[i]->Size()) { + MS_LOG(ERROR) << "input " << i << " data size not match, required size " << inputs_info_[i]->Size() + << ", given count " << inputs[i].DataSize(); + return kMCInvalidInput; } } - if (ExecuteModel(inputs, outputs) != SUCCESS) { + if (ExecuteModel(inputs, outputs) != kSuccess) { MS_LOG(ERROR) << "Execute Model Failed"; - return FAILED; + return kMCFailed; } - if (outputs_.size() != outputs->size()) { + if (outputs_info_.size() != outputs->size()) { MS_LOG(ERROR) << "Predict output size " << outputs->size() << " not match output size got from model info " - << outputs_.size(); - return FAILED; + << outputs_info_.size(); + return kMCFailed; } - return SUCCESS; + return kSuccess; } -Status GPUGraphImpl::GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) { +std::vector GPUGraphImpl::GetInputs() { if (!load_flag_) { Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; - return ret; + return {}; } } - GraphUtils::ClearIfNotNull(names); - GraphUtils::ClearIfNotNull(shapes); - GraphUtils::ClearIfNotNull(data_types); - GraphUtils::ClearIfNotNull(mem_sizes); - for (size_t i = 0; i < inputs_.size(); i++) { - auto &tensor = inputs_[i]; - GraphUtils::PushbackIfNotNull(names, input_names_[i]); - GraphUtils::PushbackIfNotNull(shapes, tensor->shape()); - GraphUtils::PushbackIfNotNull(data_types, GraphUtils::TransTypeId2InferDataType(tensor->data_type())); - GraphUtils::PushbackIfNotNull(mem_sizes, tensor->Size()); + std::vector result(inputs_info_.size()); + for (size_t i = 0; i < inputs_info_.size(); ++i) { + auto &tensor = inputs_info_[i]; + void *data = nullptr; + size_t data_size = tensor->Size(); + if (i < last_inputs_.size()) { + data = last_inputs_[i]->data_c(); + data_size = last_inputs_[i]->Size(); + } + result[i] = + MSTensor(input_names_[i], static_cast(tensor->data_type()), tensor->shape(), data, data_size); } - return SUCCESS; + return result; } -Status GPUGraphImpl::GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) { +std::vector GPUGraphImpl::GetOutputs() { if (!load_flag_) { Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; - return ret; + return {}; } } - GraphUtils::ClearIfNotNull(names); - GraphUtils::ClearIfNotNull(shapes); - GraphUtils::ClearIfNotNull(data_types); - GraphUtils::ClearIfNotNull(mem_sizes); - for (size_t i = 0; i < outputs_.size(); i++) { - auto &tensor = outputs_[i]; - GraphUtils::PushbackIfNotNull(names, output_names_[i]); - GraphUtils::PushbackIfNotNull(shapes, tensor->shape()); - GraphUtils::PushbackIfNotNull(data_types, GraphUtils::TransTypeId2InferDataType(tensor->data_type())); - GraphUtils::PushbackIfNotNull(mem_sizes, tensor->Size()); + std::vector result(outputs_info_.size()); + for (size_t i = 0; i < outputs_info_.size(); ++i) { + auto &tensor = outputs_info_[i]; + void *data = nullptr; + size_t data_size = tensor->Size(); + if (i < last_outputs_.size()) { + data = last_outputs_[i]->data_c(); + data_size = last_outputs_[i]->Size(); + } + result[i] = + MSTensor(output_names_[i], static_cast(tensor->data_type()), tensor->shape(), data, data_size); } - - return SUCCESS; + return result; } - -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.h b/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.h index fca0323f82f..0058e7fbcd2 100644 --- a/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.h +++ b/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.h @@ -25,20 +25,17 @@ #include "backend/session/session_basic.h" #include "ir/anf.h" #include "cxx_api/model/model_impl.h" -#include "cxx_api/graph/graph_utils.h" -namespace mindspore::api { +namespace mindspore { class GPUGraphImpl : public GraphCell::GraphImpl { public: GPUGraphImpl(); ~GPUGraphImpl() override = default; - Status Run(const std::vector &inputs, std::vector *outputs) override; + Status Run(const std::vector &inputs, std::vector *outputs) override; Status Load() override; - Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) override; - Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) override; + std::vector GetInputs() override; + std::vector GetOutputs() override; private: Status InitEnv(); @@ -46,14 +43,16 @@ class GPUGraphImpl : public GraphCell::GraphImpl { Status CompileGraph(const std::shared_ptr &funcGraphPtr); Status CheckModelInputs(const std::vector &inputs) const; std::vector RunGraph(const std::vector &inputs); - Status ExecuteModel(const std::vector &inputs, std::vector *outputs); + Status ExecuteModel(const std::vector &inputs, std::vector *outputs); std::shared_ptr session_impl_; uint32_t graph_id_; std::string device_type_; uint32_t device_id_; - std::vector inputs_; - std::vector outputs_; + std::vector inputs_info_; + std::vector outputs_info_; + std::vector last_inputs_; + std::vector last_outputs_; std::vector input_names_; std::vector output_names_; bool init_flag_; @@ -63,5 +62,5 @@ class GPUGraphImpl : public GraphCell::GraphImpl { uint32_t batch_size_; uint32_t workspace_size_; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_GRAPH_MS_GPU_GRAPH_IMPL_H diff --git a/mindspore/ccsrc/cxx_api/graph/graph.cc b/mindspore/ccsrc/cxx_api/graph/graph.cc index 902bbcabb09..e38266d6248 100644 --- a/mindspore/ccsrc/cxx_api/graph/graph.cc +++ b/mindspore/ccsrc/cxx_api/graph/graph.cc @@ -17,15 +17,19 @@ #include "cxx_api/graph/graph_data.h" #include "utils/log_adapter.h" -namespace mindspore::api { +namespace mindspore { Graph::Graph(const std::shared_ptr &graph_data) : graph_data_(graph_data) {} Graph::Graph(std::shared_ptr &&graph_data) : graph_data_(graph_data) {} Graph::~Graph() {} +Graph::Graph(std::nullptr_t) : graph_data_(nullptr) {} + +bool Graph::operator==(std::nullptr_t) const { return graph_data_ == nullptr; } + ModelType Graph::ModelType() const { MS_EXCEPTION_IF_NULL(graph_data_); return graph_data_->ModelType(); } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/graph/graph_data.cc b/mindspore/ccsrc/cxx_api/graph/graph_data.cc index a1092e21b16..85be64f8ffd 100644 --- a/mindspore/ccsrc/cxx_api/graph/graph_data.cc +++ b/mindspore/ccsrc/cxx_api/graph/graph_data.cc @@ -19,7 +19,7 @@ #include "framework/common/helper/model_helper.h" #endif -namespace mindspore::api { +namespace mindspore { Graph::GraphData::GraphData(const FuncGraphPtr &func_graph, enum ModelType model_type) : func_graph_(nullptr), om_data_(), model_type_(ModelType::kUnknownType) { if (model_type != ModelType::kMindIR) { @@ -72,4 +72,4 @@ Buffer Graph::GraphData::GetOMData() const { return om_data_; } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/graph/graph_data.h b/mindspore/ccsrc/cxx_api/graph/graph_data.h index 7e7a2ac9c53..7b84ee5efb3 100644 --- a/mindspore/ccsrc/cxx_api/graph/graph_data.h +++ b/mindspore/ccsrc/cxx_api/graph/graph_data.h @@ -24,7 +24,7 @@ #include "include/api/types.h" #include "ir/func_graph.h" -namespace mindspore::api { +namespace mindspore { class Graph::GraphData { public: GraphData(); @@ -46,5 +46,5 @@ class Graph::GraphData { Buffer om_data_; enum ModelType model_type_; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_DATA_H diff --git a/mindspore/ccsrc/cxx_api/graph/graph_impl.h b/mindspore/ccsrc/cxx_api/graph/graph_impl.h index a2c651c4cf4..42c843225d4 100644 --- a/mindspore/ccsrc/cxx_api/graph/graph_impl.h +++ b/mindspore/ccsrc/cxx_api/graph/graph_impl.h @@ -26,7 +26,7 @@ #include "cxx_api/graph/graph_data.h" #include "utils/utils.h" -namespace mindspore::api { +namespace mindspore { class GraphCell::GraphImpl { public: GraphImpl() = default; @@ -35,17 +35,14 @@ class GraphCell::GraphImpl { std::shared_ptr &MutableGraphData() const { return graph_->graph_data_; } void SetGraph(const std::shared_ptr &graph) { graph_ = graph; } - virtual Status Run(const std::vector &inputs, std::vector *outputs) = 0; + virtual Status Run(const std::vector &inputs, std::vector *outputs) = 0; virtual Status Load() = 0; - virtual Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) = 0; - virtual Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) = 0; + virtual std::vector GetInputs() = 0; + virtual std::vector GetOutputs() = 0; protected: std::shared_ptr graph_; }; -} // namespace mindspore::api - +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_IMPL_H diff --git a/mindspore/ccsrc/cxx_api/graph/graph_utils.h b/mindspore/ccsrc/cxx_api/graph/graph_utils.h deleted file mode 100644 index 6a087e019d6..00000000000 --- a/mindspore/ccsrc/cxx_api/graph/graph_utils.h +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_UTILS_H -#define MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_UTILS_H -#include -#include -#include "include/api/types.h" -#include "ir/dtype/type_id.h" -#include "utils/log_adapter.h" - -namespace mindspore::api { -class GraphUtils { - public: - static DataType TransTypeId2InferDataType(TypeId type_id) { - const std::map id2type_map{ - {TypeId::kNumberTypeBegin, api::kMsUnknown}, {TypeId::kNumberTypeBool, api::kMsBool}, - {TypeId::kNumberTypeFloat64, api::kMsFloat64}, {TypeId::kNumberTypeInt8, api::kMsInt8}, - {TypeId::kNumberTypeUInt8, api::kMsUint8}, {TypeId::kNumberTypeInt16, api::kMsInt16}, - {TypeId::kNumberTypeUInt16, api::kMsUint16}, {TypeId::kNumberTypeInt32, api::kMsInt32}, - {TypeId::kNumberTypeUInt32, api::kMsUint32}, {TypeId::kNumberTypeInt64, api::kMsInt64}, - {TypeId::kNumberTypeUInt64, api::kMsUint64}, {TypeId::kNumberTypeFloat16, api::kMsFloat16}, - {TypeId::kNumberTypeFloat32, api::kMsFloat32}, - }; - - auto it = id2type_map.find(type_id); - if (it != id2type_map.end()) { - return it->second; - } - - MS_LOG(WARNING) << "Unsupported data id " << type_id; - return api::kMsUnknown; - } - - template - inline static void ClearIfNotNull(T *vec) { - if (vec != nullptr) { - vec->clear(); - } - } - - template - inline static void PushbackIfNotNull(U *vec, T &&item) { - if (vec != nullptr) { - vec->emplace_back(item); - } - } -}; -} // namespace mindspore::api - -#endif // MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_UTILS_H diff --git a/mindspore/ccsrc/cxx_api/model/acl/acl_model.cc b/mindspore/ccsrc/cxx_api/model/acl/acl_model.cc index 57dfb03387d..0b299c429f2 100644 --- a/mindspore/ccsrc/cxx_api/model/acl/acl_model.cc +++ b/mindspore/ccsrc/cxx_api/model/acl/acl_model.cc @@ -16,47 +16,53 @@ #include "cxx_api/model/acl/acl_model.h" #include +#include "include/api/context.h" #include "cxx_api/factory.h" -#include "cxx_api/python_utils.h" -namespace mindspore::api { +namespace mindspore { API_FACTORY_REG(ModelImpl, Ascend310, AclModel); -Status AclModel::Build(const std::map &options_map) { +Status AclModel::Build() { MS_LOG(INFO) << "Start build model."; MS_EXCEPTION_IF_NULL(graph_); - std::unique_ptr options = std::make_unique(options_map); - std::string options_str = GenerateOptionsStr(options_map); - MS_EXCEPTION_IF_NULL(options); - if (graph_cell_ != nullptr && options_str == options_str_) { + + if (graph_cell_ != nullptr) { MS_LOG(INFO) << "This model has been built, skip."; - return SUCCESS; + return kSuccess; } if (graph_cell_ == nullptr && graph_->ModelType() == ModelType::kOM) { + MS_LOG(INFO) << "Note: Load om model and all build options will be ignored."; graph_cell_ = std::make_shared(graph_); MS_EXCEPTION_IF_NULL(graph_cell_); - if (!options_map.empty()) { - MS_LOG(WARNING) << "All build options will be ignored."; + return kSuccess; + } + + std::unique_ptr options = std::make_unique(model_context_); + MS_EXCEPTION_IF_NULL(options); + std::string options_key = options->GenAclOptionsKey(); + std::shared_ptr graph; + if (auto iter = dynamic_size_graph_map_.find(options_key); iter != dynamic_size_graph_map_.end()) { + MS_LOG(INFO) << "This options has been built, read cache."; + graph = iter->second; + } else { + auto func_graph = ModelImpl::GetFuncGraph(); + MS_EXCEPTION_IF_NULL(func_graph); + model_converter_.set_options(options.get()); + auto om_data = model_converter_.LoadMindIR(func_graph); + if (om_data.Data() == nullptr || om_data.DataSize() == 0) { + MS_LOG(ERROR) << "Load MindIR failed."; + return kMCFailed; } - return SUCCESS; + graph = std::make_shared(std::make_shared(om_data, ModelType::kOM)); + dynamic_size_graph_map_[options_key] = graph; } - auto func_graph = ModelImpl::GetFuncGraph(); - MS_EXCEPTION_IF_NULL(func_graph); - model_converter_.set_options(options.get()); - auto om_data = model_converter_.LoadMindIR(func_graph); - if (om_data.Data() == nullptr || om_data.DataSize() == 0) { - MS_LOG(ERROR) << "Load MindIR failed."; - return FAILED; - } - - auto graph = std::make_shared(std::make_shared(om_data, ModelType::kOM)); MS_EXCEPTION_IF_NULL(graph); auto graph_cell = std::make_shared(graph); MS_EXCEPTION_IF_NULL(graph_cell); auto ret = ModelImpl::Load(graph_cell); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Load failed."; return ret; } @@ -64,64 +70,97 @@ Status AclModel::Build(const std::map &options_map) { // save result graph_cell_ = graph_cell; options_ = std::move(options); - options_str_ = options_str; MS_LOG(INFO) << "Build model success."; - return SUCCESS; + return kSuccess; } -Status AclModel::Train(const DataSet &, std::map *) { - MS_LOG(ERROR) << "Unsupported feature."; - return FAILED; +Status AclModel::Resize(const std::vector &inputs, const std::vector> &dims) { + MS_LOG(INFO) << "Start to resize model."; + MS_EXCEPTION_IF_NULL(graph_); + if (graph_->ModelType() == ModelType::kOM) { + MS_LOG(ERROR) << "OM model is not supported to resize model."; + return kMCFailed; + } + + auto origin_inputs = GetInputs(); + if (inputs.size() != origin_inputs.size()) { + MS_LOG(ERROR) << "Invalid inputs size " << inputs.size() << " not match model inputs size " << origin_inputs.size(); + return kMCInvalidInput; + } + + if (inputs.size() != dims.size()) { + MS_LOG(ERROR) << "Invalid dims size " << dims.size() << " not match inputs size " << inputs.size(); + return kMCInvalidInput; + } + + if (model_context_ == nullptr) { + model_context_ = std::make_shared(); + } + + std::string input_shape_option; + for (size_t i = 0; i < inputs.size(); ++i) { + if (inputs[i].Name() != origin_inputs[i].Name()) { + MS_LOG(ERROR) << "Invalid inputs " << i << " name " << inputs[i].Name() << " not match model input name " + << origin_inputs[i].Name(); + return kMCInvalidInput; + } + input_shape_option += inputs[i].Name() + ":"; + for (size_t j = 0; j < dims[i].size(); ++j) { + input_shape_option += std::to_string(dims[i][j]); + if (j + 1 < dims[i].size()) { + input_shape_option += ","; + } + } + if (i + 1 < inputs.size()) { + input_shape_option += ";"; + } + } + MS_LOG(INFO) << "Set input size option is " << input_shape_option; + ModelContext::SetInputShape(model_context_, input_shape_option); + auto graph_cell_bak = std::move(graph_cell_); + auto ret = Build(); + if (ret != kSuccess) { + MS_LOG(INFO) << "Resize build failed."; + graph_cell_ = std::move(graph_cell_bak); + return ret; + } + MS_LOG(INFO) << "Resize success."; + return kSuccess; } -Status AclModel::Eval(const DataSet &, std::map *) { - MS_LOG(ERROR) << "Unsupported feature."; - return FAILED; -} - -Status AclModel::Predict(const std::vector &inputs, std::vector *outputs) { +Status AclModel::Predict(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); if (graph_ == nullptr) { MS_LOG(ERROR) << "Invalid data, graph_ is null."; - return FAILED; + return kMCFailed; } if (graph_cell_ == nullptr) { MS_LOG(WARNING) << "Model has not been built, it will be built with default options"; - Status ret = Build({}); - if (ret != SUCCESS) { + Status ret = Build(); + if (ret != kSuccess) { MS_LOG(ERROR) << "Build model failed."; - return FAILED; + return ret; } } MS_EXCEPTION_IF_NULL(graph_cell_); Status ret = graph_cell_->Run(inputs, outputs); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Run graph failed."; - return FAILED; + return ret; } - return SUCCESS; + return kSuccess; } -Status AclModel::GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { +std::vector AclModel::GetInputs() { MS_EXCEPTION_IF_NULL(graph_cell_); - return graph_cell_->GetInputsInfo(names, shapes, data_types, mem_sizes); + return graph_cell_->GetInputs(); } -Status AclModel::GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { +std::vector AclModel::GetOutputs() { MS_EXCEPTION_IF_NULL(graph_cell_); - return graph_cell_->GetOutputsInfo(names, shapes, data_types, mem_sizes); + return graph_cell_->GetOutputs(); } - -std::string AclModel::GenerateOptionsStr(const std::map &options) { - std::string ret; - for (auto &[key, value] : options) { - ret += key + "^" + value + "^^"; - } - return ret; -} -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/model/acl/acl_model.h b/mindspore/ccsrc/cxx_api/model/acl/acl_model.h index 4455eba7d10..f9097779d74 100644 --- a/mindspore/ccsrc/cxx_api/model/acl/acl_model.h +++ b/mindspore/ccsrc/cxx_api/model/acl/acl_model.h @@ -31,30 +31,25 @@ #include "ir/tensor.h" #include "ir/anf.h" -namespace mindspore::api { +namespace mindspore { class AclModel : public ModelImpl { public: - AclModel() : model_converter_(), options_(nullptr), options_str_() {} + AclModel() : model_converter_(), options_(nullptr) {} ~AclModel() = default; - Status Build(const std::map &options_map) override; + Status Build() override; + Status Resize(const std::vector &inputs, const std::vector> &dims) override; - Status Train(const DataSet &dataset, std::map *outputs) override; - Status Eval(const DataSet &dataset, std::map *outputs) override; - Status Predict(const std::vector &inputs, std::vector *outputs) override; + Status Predict(const std::vector &inputs, std::vector *outputs) override; - Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const override; - Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const override; + std::vector GetInputs() override; + std::vector GetOutputs() override; private: - static std::string GenerateOptionsStr(const std::map &options); - std::shared_ptr graph_cell_; ModelConverter model_converter_; std::unique_ptr options_; - std::string options_str_; + std::map> dynamic_size_graph_map_; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_ACL_MODEL_H diff --git a/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.cc b/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.cc index 3f6cb0d41e6..ffc059f7709 100644 --- a/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.cc +++ b/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.cc @@ -18,23 +18,31 @@ #include "utils/log_adapter.h" #include "external/ge/ge_api_types.h" -namespace mindspore::api { -static std::string ParseOption(const std::map &options, const std::string &key) { - auto iter = options.find(key); - if (iter != options.end()) { - return iter->second; - } - return ""; -} +namespace mindspore { +static const std::map kSupportedDtypeOptionMap = {{DataType::kNumberTypeFloat16, "FP16"}, + {DataType::kNumberTypeFloat32, "FP32"}, + {DataType::kNumberTypeUInt8, "UINT8"}}; -AclModelOptions::AclModelOptions(const std::map &options) { - // to acl - insert_op_cfg_path = ParseOption(options, kModelOptionInsertOpCfgPath); - input_format = ParseOption(options, kModelOptionInputFormat); - input_shape = ParseOption(options, kModelOptionInputShape); - output_type = ParseOption(options, kModelOptionOutputType); - precision_mode = ParseOption(options, kModelOptionPrecisionMode); - op_select_impl_mode = ParseOption(options, kModelOptionOpSelectImplMode); +AclModelOptions::AclModelOptions(const std::shared_ptr &context) { + if (context == nullptr) { + return; + } + insert_op_cfg_path = ModelContext::GetInsertOpConfigPath(context); + input_format = ModelContext::GetInputFormat(context); + input_shape = ModelContext::GetInputShape(context); + + auto out_type = ModelContext::GetOutputType(context); + auto iter = kSupportedDtypeOptionMap.find(out_type); + if (out_type == DataType::kTypeUnknown) { + // do nothing + } else if (iter == kSupportedDtypeOptionMap.end()) { + MS_LOG(WARNING) << "Unsupported output type " << out_type << ", use FP32 as default."; + } else { + output_type = iter->second; + } + + precision_mode = ModelContext::GetPrecisionMode(context); + op_select_impl_mode = ModelContext::GetOpSelectImplMode(context); } std::tuple, std::map> AclModelOptions::GenAclOptions() @@ -69,4 +77,16 @@ std::tuple, std::map #include #include +#include #include "include/api/types.h" #include "include/api/status.h" +#include "include/api/context.h" -namespace mindspore::api { +namespace mindspore { struct AclModelOptions { - std::string output_node; // todo: at convert.cc::BuildGraph(), no atc options // build options std::string insert_op_cfg_path; std::string input_format; @@ -35,12 +36,13 @@ struct AclModelOptions { std::string op_select_impl_mode; std::string soc_version = "Ascend310"; - explicit AclModelOptions(const std::map &options); + explicit AclModelOptions(const std::shared_ptr &context); ~AclModelOptions() = default; // return tuple std::tuple, std::map> GenAclOptions() const; + std::string GenAclOptionsKey() const; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXXAPI_SESSION_ACL_OPTION_PARSER_H diff --git a/mindspore/ccsrc/cxx_api/model/acl/model_converter.cc b/mindspore/ccsrc/cxx_api/model/acl/model_converter.cc index 28ec34188c1..b1dc2e88582 100644 --- a/mindspore/ccsrc/cxx_api/model/acl/model_converter.cc +++ b/mindspore/ccsrc/cxx_api/model/acl/model_converter.cc @@ -22,9 +22,8 @@ #include "include/api/serialization.h" #include "graph/model.h" #include "cxx_api/model/model_converter_utils/multi_process.h" -#include "cxx_api/python_utils.h" -namespace mindspore::api { +namespace mindspore { namespace { transform::TensorOrderMap GetParams(const FuncGraphPtr &anf_graph) { transform::TensorOrderMap res; @@ -86,25 +85,25 @@ transform::DfGraphPtr ModelConverter::ConvertFuncGraphToAIR(const FuncGraphPtr & para->set_name(name); } - transform::DfGraphConvertor convertor(anf_graph); + transform::DfGraphConvertor converter(anf_graph); std::string net_id = "0"; std::string init_graph = "init_subgraph." + net_id; std::string checkpoint_name = "save." + net_id; - convertor.set_training(false); - (void)convertor.ConvertAllNode().InitParam(GetParams(anf_graph)).BuildGraph(); - (void)convertor.GenerateCheckpointGraph(); - if (convertor.ErrCode() != 0) { + converter.set_training(false); + (void)converter.ConvertAllNode().InitParam(GetParams(anf_graph)).BuildGraph(); + (void)converter.GenerateCheckpointGraph(); + if (converter.ErrCode() != 0) { transform::DfGraphManager::GetInstance().ClearGraph(); - MS_LOG(ERROR) << "Convert df graph failed, err:" << convertor.ErrCode(); + MS_LOG(ERROR) << "Convert df graph failed, err:" << converter.ErrCode(); return nullptr; } - (void)transform::DfGraphManager::GetInstance().AddGraph(anf_graph->ToString(), convertor.GetComputeGraph()); - (void)transform::DfGraphManager::GetInstance().AddGraph(init_graph, convertor.GetInitGraph()); - (void)transform::DfGraphManager::GetInstance().AddGraph(BROADCAST_GRAPH_NAME, convertor.GetBroadcastGraph()); + (void)transform::DfGraphManager::GetInstance().AddGraph(anf_graph->ToString(), converter.GetComputeGraph()); + (void)transform::DfGraphManager::GetInstance().AddGraph(init_graph, converter.GetInitGraph()); + (void)transform::DfGraphManager::GetInstance().AddGraph(BROADCAST_GRAPH_NAME, converter.GetBroadcastGraph()); transform::Status ret = - transform::DfGraphManager::GetInstance().AddGraph(checkpoint_name, convertor.GetSaveCheckpointGraph()); + transform::DfGraphManager::GetInstance().AddGraph(checkpoint_name, converter.GetSaveCheckpointGraph()); if (ret == transform::Status::SUCCESS) { transform::DfGraphManager::GetInstance().SetAnfGraph(checkpoint_name, anf_graph); } @@ -158,7 +157,7 @@ Buffer ModelConverter::LoadMindIR(const FuncGraphPtr &func_graph) { auto df_graph = ConvertFuncGraphToAIR(func_graph); if (df_graph == nullptr) { MS_LOG(ERROR) << "Convert FuncGraph to AscendIR failed."; - return FAILED; + return kMCFailed; } ge::Model model; ge::Buffer model_data; @@ -166,14 +165,14 @@ Buffer ModelConverter::LoadMindIR(const FuncGraphPtr &func_graph) { auto ge_ret = model.Save(model_data); if (ge_ret != ge::SUCCESS) { MS_LOG(ERROR) << "Save ge model to buffer failed."; - return FAILED; + return kMCFailed; } // send original model to child auto status = multi_process->SendMsg(model_data.data(), model_data.size()); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Send original model to child process failed"; - return FAILED; + return status; } // receive convert model result from child CreateBufferCall call = [&buffer_ret](size_t msg_len) -> uint8_t * { @@ -181,11 +180,11 @@ Buffer ModelConverter::LoadMindIR(const FuncGraphPtr &func_graph) { return reinterpret_cast(buffer_ret.MutableData()); }; status = multi_process->ReceiveMsg(call); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Receive result model from child process failed"; - return FAILED; + return status; } - return SUCCESS; + return kSuccess; }; auto child_process = [this](MultiProcess *multi_process) -> Status { MS_EXCEPTION_IF_NULL(multi_process); @@ -196,25 +195,25 @@ Buffer ModelConverter::LoadMindIR(const FuncGraphPtr &func_graph) { return reinterpret_cast(model.MutableData()); }; auto status = multi_process->ReceiveMsg(call); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Receive original model from parent process failed"; - return FAILED; + return status; } Buffer model_result = LoadAscendIRInner(model); if (model_result.DataSize() == 0) { MS_LOG_ERROR << "Convert model from MindIR to OM failed"; - return FAILED; + return kMCFailed; } // send result model to parent status = multi_process->SendMsg(model_result.Data(), model_result.DataSize()); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Send result model to parent process failed"; - return FAILED; + return status; } - return SUCCESS; + return kSuccess; }; auto status = multi_process.MainProcess(parent_process, child_process); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Convert MindIR model to OM model failed"; } else { MS_LOG_INFO << "Convert MindIR model to OM model success"; @@ -229,9 +228,9 @@ Buffer ModelConverter::LoadAscendIR(const Buffer &model_data) { MS_EXCEPTION_IF_NULL(multi_process); // send original model to child auto status = multi_process->SendMsg(model_data.Data(), model_data.DataSize()); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Send original model to child process failed"; - return FAILED; + return status; } // receive convert model result from child CreateBufferCall call = [&buffer_ret](size_t msg_len) -> uint8_t * { @@ -239,11 +238,11 @@ Buffer ModelConverter::LoadAscendIR(const Buffer &model_data) { return reinterpret_cast(buffer_ret.MutableData()); }; status = multi_process->ReceiveMsg(call); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Receive result model from child process failed"; - return FAILED; + return status; } - return SUCCESS; + return kSuccess; }; auto child_process = [this](MultiProcess *multi_process) -> Status { MS_EXCEPTION_IF_NULL(multi_process); @@ -254,25 +253,25 @@ Buffer ModelConverter::LoadAscendIR(const Buffer &model_data) { return reinterpret_cast(model.MutableData()); }; auto status = multi_process->ReceiveMsg(call); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Receive original model from parent process failed"; - return FAILED; + return status; } Buffer model_result = LoadAscendIRInner(model); if (model_result.DataSize() == 0) { MS_LOG_ERROR << "Convert model from AIR to OM failed"; - return FAILED; + return kMCFailed; } // send result model to parent status = multi_process->SendMsg(model_result.Data(), model_result.DataSize()); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Send result model to parent process failed"; - return FAILED; + return status; } - return SUCCESS; + return kSuccess; }; auto status = multi_process.MainProcess(parent_process, child_process); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Convert AIR model to OM model failed"; } else { MS_LOG_INFO << "Convert AIR model to OM model success"; @@ -326,4 +325,4 @@ Buffer ModelConverter::LoadAscendIRInner(const Buffer &model_data) { auto om_data = BuildAirModel(df_graph, init_options, build_options); return om_data; } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/model/acl/model_converter.h b/mindspore/ccsrc/cxx_api/model/acl/model_converter.h index eabc4dd1d33..7e46f142ae8 100644 --- a/mindspore/ccsrc/cxx_api/model/acl/model_converter.h +++ b/mindspore/ccsrc/cxx_api/model/acl/model_converter.h @@ -27,7 +27,7 @@ #include "external/ge/ge_ir_build.h" #include "cxx_api/model/acl/acl_model_options.h" -namespace mindspore::api { +namespace mindspore { class ModelConverter { public: ModelConverter() : options_(nullptr) {} @@ -46,6 +46,5 @@ class ModelConverter { Buffer LoadMindIRInner(const FuncGraphPtr &func_graph); Buffer LoadAscendIRInner(const Buffer &model_data); }; -} // namespace mindspore::api - +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXXAPI_SESSION_ACL_MODEL_CONVERTER_H diff --git a/mindspore/ccsrc/cxx_api/model/model.cc b/mindspore/ccsrc/cxx_api/model/model.cc index b3fc97ef218..88d364f7f2e 100644 --- a/mindspore/ccsrc/cxx_api/model/model.cc +++ b/mindspore/ccsrc/cxx_api/model/model.cc @@ -19,49 +19,45 @@ #include "cxx_api/factory.h" #include "utils/utils.h" -namespace mindspore::api { -Status Model::Build(const std::map &options) { +namespace mindspore { +Status Model::Build() { MS_EXCEPTION_IF_NULL(impl_); - return impl_->Build(options); + return impl_->Build(); } -Status Model::Train(const DataSet &dataset, bool data_sink, std::map *outputs) { +Status Model::Resize(const std::vector &inputs, const std::vector> &dims) { MS_EXCEPTION_IF_NULL(impl_); - return impl_->Train(dataset, outputs); + return impl_->Resize(inputs, dims); } -Status Model::Eval(const DataSet &dataset, bool data_sink, std::map *outputs) { - MS_EXCEPTION_IF_NULL(impl_); - return impl_->Eval(dataset, outputs); -} - -Status Model::Predict(const std::vector &inputs, std::vector *outputs) { +Status Model::Predict(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(impl_); return impl_->Predict(inputs, outputs); } -Status Model::GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { +std::vector Model::GetInputs() { MS_EXCEPTION_IF_NULL(impl_); - return impl_->GetInputsInfo(names, shapes, data_types, mem_sizes); + return impl_->GetInputs(); } -Status Model::GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { +std::vector Model::GetOutputs() { MS_EXCEPTION_IF_NULL(impl_); - return impl_->GetOutputsInfo(names, shapes, data_types, mem_sizes); + return impl_->GetOutputs(); } -Model::Model(const GraphCell &graph_cell) - : impl_(Factory::Instance().Create(Context::Instance().GetDeviceTarget())) { +Model::Model(const GraphCell &graph_cell, const std::shared_ptr &model_context) + : impl_(Factory::Instance().Create(mindspore::GlobalContext::GetGlobalDeviceTarget())) { if (impl_ == nullptr) { - MS_LOG(EXCEPTION) << "Create session type " << Context::Instance().GetDeviceTarget() << " failed"; + MS_LOG(EXCEPTION) << "Create session type " << mindspore::GlobalContext::GetGlobalDeviceTarget() << " failed"; } MS_EXCEPTION_IF_NULL(graph_cell.GetGraph()); impl_->SetGraph(std::make_shared(*graph_cell.GetGraph())); + impl_->SetContext(model_context); } -Model::Model(const std::vector &network) { MS_LOG(EXCEPTION) << "Unsupported feature."; } +Model::Model(const std::vector &network, const std::shared_ptr &model_context) { + MS_LOG(EXCEPTION) << "Unsupported feature."; +} Model::~Model() {} @@ -69,4 +65,4 @@ bool Model::CheckModelSupport(const std::string &device_type, ModelType) { return Factory::Instance().CheckModelSupport(device_type); } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/model/model_converter_utils/multi_process.cc b/mindspore/ccsrc/cxx_api/model/model_converter_utils/multi_process.cc index c56ef354d0e..50ca5477a82 100644 --- a/mindspore/ccsrc/cxx_api/model/model_converter_utils/multi_process.cc +++ b/mindspore/ccsrc/cxx_api/model/model_converter_utils/multi_process.cc @@ -24,7 +24,6 @@ #include "cxx_api/model/model_converter_utils/shared_memory.h" namespace mindspore { -namespace api { namespace { uint64_t kSharedMemorySize = 100ull << 20; // 100 MB } @@ -40,7 +39,7 @@ Status MultiProcess::MainProcess(ProcessFuncCall parent_process, ProcessFuncCall memory_size_ = kSharedMemorySize; // 100 MB SharedMemory shared_memory; ret = shared_memory.Create(memory_size_); - if (!ret.IsSuccess()) { + if (ret != kSuccess) { MS_LOG_ERROR << "Create shared memory failed"; return ret; } @@ -48,10 +47,10 @@ Status MultiProcess::MainProcess(ProcessFuncCall parent_process, ProcessFuncCall if (pid < 0) { shared_memory.Destroy(); MS_LOG_ERROR << "Fork process to convert model failed"; - return FAILED; + return kMEFailed; } ret = shared_memory.Attach(); - if (!ret.IsSuccess()) { + if (ret != kSuccess) { MS_LOG_ERROR << "Process attach shared memory failed, pid " << pid; return ret; } @@ -87,12 +86,12 @@ Status MultiProcess::ParentProcess(ProcessFuncCall parent_process) { Status ret; try { ret = parent_process(this); - if (!ret.IsSuccess()) { + if (ret != kSuccess) { MS_LOG_ERROR << "Parent process process failed"; } } catch (const std::runtime_error &ex) { MS_LOG_ERROR << "Catch parent process runtime error: " << ex.what(); - ret = FAILED; + ret = kMEFailed; } stopped_ = true; send_msg_->stop = true; @@ -108,7 +107,7 @@ void MultiProcess::ChildProcess(ProcessFuncCall child_process) { std::thread heartbeat_thread(MultiProcess::HeartbeatThreadFunc, this); try { auto ret = child_process(this); - if (!ret.IsSuccess()) { + if (ret != kSuccess) { MS_LOG_ERROR << "Child process process failed"; } } catch (const std::runtime_error &ex) { @@ -138,14 +137,14 @@ Status MultiProcess::SendMsg(const void *buffer, uint64_t msg_len) { } if (peer_stopped_) { if (!send_msg_->read_finish_flag) { - return FAILED; + return kMEFailed; } break; } MS_LOG_INFO << "Send end " << cur_offset << ", msg len " << sub_msg_len << ", total len " << msg_len; } MS_LOG_INFO << "End to send message to peer process, msg len " << msg_len; - return SUCCESS; + return kSuccess; } Status MultiProcess::ReceiveMsg(CreateBufferCall create_buffer_call) { @@ -158,7 +157,7 @@ Status MultiProcess::ReceiveMsg(CreateBufferCall create_buffer_call) { usleep(1000); // 1ms } if (peer_stopped_) { - return FAILED; + return kMEFailed; } if (msg_buffer == nullptr) { msg_len = receive_msg_->msg_total_len; @@ -170,7 +169,7 @@ Status MultiProcess::ReceiveMsg(CreateBufferCall create_buffer_call) { receive_msg_->read_finish_flag = true; MS_LOG_INFO << "Receive end, current length " << cur_offset << ", total length " << msg_len << std::endl; } while (msg_len > cur_offset); - return SUCCESS; + return kSuccess; } void MultiProcess::HeartbeatThreadFunc(MultiProcess *multi_process) { multi_process->HeartbeatThreadFuncInner(); } @@ -200,6 +199,4 @@ void MultiProcess::HeartbeatThreadFuncInner() { usleep(100000); // sleep 100 ms } } - -} // namespace api } // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/model/model_converter_utils/multi_process.h b/mindspore/ccsrc/cxx_api/model/model_converter_utils/multi_process.h index a31d9f0a3bc..8958c13e625 100644 --- a/mindspore/ccsrc/cxx_api/model/model_converter_utils/multi_process.h +++ b/mindspore/ccsrc/cxx_api/model/model_converter_utils/multi_process.h @@ -21,7 +21,6 @@ #include "include/api/status.h" namespace mindspore { -namespace api { struct MessageFlag { uint64_t heartbeat = 0; uint64_t stop = false; @@ -60,7 +59,5 @@ class MultiProcess { Status ParentProcess(ProcessFuncCall parent_process); void ChildProcess(ProcessFuncCall child_process); }; -} // namespace api } // namespace mindspore - #endif // MINDSPORE_CCSRC_CXXAPI_MULTI_PROCESS_H diff --git a/mindspore/ccsrc/cxx_api/model/model_converter_utils/shared_memory.cc b/mindspore/ccsrc/cxx_api/model/model_converter_utils/shared_memory.cc index 09dabe0f1da..24ef852746b 100644 --- a/mindspore/ccsrc/cxx_api/model/model_converter_utils/shared_memory.cc +++ b/mindspore/ccsrc/cxx_api/model/model_converter_utils/shared_memory.cc @@ -20,26 +20,25 @@ #include "mindspore/core/utils/log_adapter.h" namespace mindspore { -namespace api { Status SharedMemory::Create(uint64_t memory_size) { auto access_mode = S_IRUSR | S_IWUSR | S_IROTH | S_IWOTH | S_IRGRP | S_IWGRP; shm_id_ = shmget(IPC_PRIVATE, memory_size, IPC_CREAT | IPC_EXCL | access_mode); if (shm_id_ == -1) { MS_LOG_ERROR << "Shared memory creation failed. Errno " + std::to_string(errno); - return FAILED; + return kMCFailed; } MS_LOG_INFO << "shmget success, shm id " << shm_id_; - return SUCCESS; + return kSuccess; } Status SharedMemory::Attach() { void *shmat_addr = shmat(shm_id_, nullptr, 0); if (shmat_addr == reinterpret_cast(-1)) { MS_LOG_ERROR << "Shared memory attach failed. Errno " + std::to_string(errno); - return FAILED; + return kMCFailed; } shmat_addr_ = reinterpret_cast(shmat_addr); - return SUCCESS; + return kSuccess; } void SharedMemory::Detach() { @@ -63,5 +62,4 @@ void SharedMemory::Destroy() { MS_LOG_ERROR << errMsg; } } -} // namespace api } // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/model/model_converter_utils/shared_memory.h b/mindspore/ccsrc/cxx_api/model/model_converter_utils/shared_memory.h index 77c9423d586..5200a2d26d6 100644 --- a/mindspore/ccsrc/cxx_api/model/model_converter_utils/shared_memory.h +++ b/mindspore/ccsrc/cxx_api/model/model_converter_utils/shared_memory.h @@ -20,7 +20,6 @@ #include "include/api/status.h" namespace mindspore { -namespace api { class SharedMemory { public: Status Create(uint64_t memory_size); @@ -33,7 +32,5 @@ class SharedMemory { int shm_id_ = -1; uint8_t *shmat_addr_ = nullptr; }; -} // namespace api } // namespace mindspore - #endif // MINDSPORE_CCSRC_CXXAPI_SHARED_MEMORY_H diff --git a/mindspore/ccsrc/cxx_api/model/model_impl.h b/mindspore/ccsrc/cxx_api/model/model_impl.h index 5ada9782b50..97a308eafad 100644 --- a/mindspore/ccsrc/cxx_api/model/model_impl.h +++ b/mindspore/ccsrc/cxx_api/model/model_impl.h @@ -21,28 +21,26 @@ #include #include #include +#include "include/api/context.h" #include "include/api/model.h" #include "include/api/graph.h" #include "cxx_api/graph/graph_data.h" #include "utils/utils.h" #include "ir/func_graph.h" -namespace mindspore::api { +namespace mindspore { class ModelImpl { public: ModelImpl() = default; virtual ~ModelImpl() = default; - virtual Status Build(const std::map &options) = 0; + virtual Status Build() = 0; + virtual Status Resize(const std::vector &inputs, const std::vector> &dims) = 0; - virtual Status Train(const DataSet &dataset, std::map *outputs) = 0; - virtual Status Eval(const DataSet &dataset, std::map *outputs) = 0; - virtual Status Predict(const std::vector &inputs, std::vector *outputs) = 0; + virtual Status Predict(const std::vector &inputs, std::vector *outputs) = 0; - virtual Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const = 0; - virtual Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const = 0; + virtual std::vector GetInputs() = 0; + virtual std::vector GetOutputs() = 0; protected: Status Load(const std::shared_ptr &graph_cell) { @@ -61,11 +59,16 @@ class ModelImpl { } std::shared_ptr graph_; + std::shared_ptr model_context_; private: friend class Model; void SetGraph(const std::shared_ptr &graph) { graph_ = graph; } + void SetContext(const std::shared_ptr &model_context) { + if (model_context != nullptr) { + model_context_ = std::make_shared(*model_context); + } + } }; -} // namespace mindspore::api - +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_MODEL_MODEL_IMPL_H diff --git a/mindspore/ccsrc/cxx_api/model/ms/ms_model.cc b/mindspore/ccsrc/cxx_api/model/ms/ms_model.cc index 7349aba9432..5a4366d0b7d 100644 --- a/mindspore/ccsrc/cxx_api/model/ms/ms_model.cc +++ b/mindspore/ccsrc/cxx_api/model/ms/ms_model.cc @@ -16,18 +16,78 @@ #include "cxx_api/model/ms/ms_model.h" #include +#include "include/api/context.h" #include "utils/ms_context.h" #include "cxx_api/factory.h" namespace mindspore { -namespace api { API_FACTORY_REG(ModelImpl, Ascend910, MsModel); API_FACTORY_REG(ModelImpl, GPU, MsModel); -Status MsModel::Build(const std::map &) { +static std::string GenerateShapeKey(const std::vector> &dims) { + std::string shape_key; + for (size_t i = 0; i < dims.size(); ++i) { + shape_key += std::to_string(i) + ":"; + for (size_t j = 0; j < dims[i].size(); ++j) { + shape_key += std::to_string(dims[i][j]); + if (j + 1 < dims[i].size()) { + shape_key += ","; + } + } + if (i + 1 < dims.size()) { + shape_key += ";"; + } + } + return shape_key; +} + +std::shared_ptr MsModel::GenerateGraphCell(const std::vector> &dims) { + std::string shape_key = GenerateShapeKey(dims); + if (auto iter = dynamic_size_graph_map_.find(shape_key); iter != dynamic_size_graph_map_.end()) { + MS_LOG(INFO) << "This options has been built, read cache."; + return iter->second; + } + + auto func_graph = ModelImpl::GetFuncGraph(); + MS_EXCEPTION_IF_NULL(func_graph); + + const auto &inputs = func_graph->parameters(); + if (dims.size() != inputs.size()) { + MS_LOG(ERROR) << "Invalid dims size " << dims.size() << " not match model inputs size " << inputs.size(); + return nullptr; + } + for (size_t i = 0; i < dims.size(); ++i) { + const auto ¶m = inputs[i]; + auto shape_ptr = std::dynamic_pointer_cast(param->Shape()); + if (shape_ptr == nullptr) { + MS_LOG(ERROR) << "Inputs " << i << " is not supported to resize, debug string: " << param->DebugString(); + return nullptr; + } + shape_ptr->shape() = dims[i]; + } + + auto graph = std::make_shared(std::make_shared(func_graph, ModelType::kMindIR)); + MS_EXCEPTION_IF_NULL(graph); + auto graph_cell = std::make_shared(graph); + MS_EXCEPTION_IF_NULL(graph_cell); + auto ret = ModelImpl::Load(graph_cell); + if (ret != kSuccess) { + MS_LOG(ERROR) << "Load failed."; + return nullptr; + } + dynamic_size_graph_map_[shape_key] = graph_cell; + return graph_cell; +} + +Status MsModel::Build() { MS_LOG(INFO) << "Start build model."; MS_EXCEPTION_IF_NULL(graph_); + if (graph_cell_ != nullptr) { + MS_LOG(INFO) << "This model has been built, skip."; + return kSuccess; + } + auto func_graph = ModelImpl::GetFuncGraph(); MS_EXCEPTION_IF_NULL(func_graph); @@ -36,7 +96,7 @@ Status MsModel::Build(const std::map &) { auto graph_cell = std::make_shared(graph); MS_EXCEPTION_IF_NULL(graph_cell); auto ret = ModelImpl::Load(graph_cell); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Load failed."; return ret; } @@ -44,55 +104,66 @@ Status MsModel::Build(const std::map &) { // save result graph_cell_ = graph_cell; MS_LOG(INFO) << "Build model success."; - return SUCCESS; + return kSuccess; } -Status MsModel::Train(const DataSet &, std::map *) { - MS_LOG(ERROR) << "Unsupported feature."; - return FAILED; +Status MsModel::Resize(const std::vector &inputs, const std::vector> &dims) { + MS_LOG(INFO) << "Start to resize model"; + auto origin_inputs = GetInputs(); + if (inputs.size() != origin_inputs.size()) { + MS_LOG(ERROR) << "Invalid inputs size " << inputs.size() << " not match model inputs size " << origin_inputs.size(); + return kMCInvalidInput; + } + + if (inputs.size() != dims.size()) { + MS_LOG(ERROR) << "Invalid dims size " << dims.size() << " not match inputs size " << inputs.size(); + return kMCInvalidInput; + } + + auto graph_cell = GenerateGraphCell(dims); + if (graph_cell == nullptr) { + MS_LOG(ERROR) << "GenerateGraphCell failed."; + return kMCFailed; + } + + MS_LOG(INFO) << "Resize model success."; + graph_cell_ = std::move(graph_cell); + return kSuccess; } -Status MsModel::Eval(const DataSet &, std::map *) { - MS_LOG(ERROR) << "Unsupported feature."; - return FAILED; -} - -Status MsModel::Predict(const std::vector &inputs, std::vector *outputs) { +Status MsModel::Predict(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); if (graph_ == nullptr) { MS_LOG(ERROR) << "Invalid data, graph_ is null."; - return FAILED; + return kMCFailed; } if (graph_cell_ == nullptr) { MS_LOG(INFO) << "Model has not been built, it will be built with default options"; - Status ret = Build({}); - if (ret != SUCCESS) { + Status ret = Build(); + if (ret != kSuccess) { MS_LOG(ERROR) << "Build model failed."; - return FAILED; + return ret; } } MS_EXCEPTION_IF_NULL(graph_cell_); Status ret = graph_cell_->Run(inputs, outputs); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Run graph failed."; - return FAILED; + return ret; } - return SUCCESS; + return kSuccess; } -Status MsModel::GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { +std::vector MsModel::GetInputs() { MS_EXCEPTION_IF_NULL(graph_cell_); - return graph_cell_->GetInputsInfo(names, shapes, data_types, mem_sizes); + return graph_cell_->GetInputs(); } -Status MsModel::GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { +std::vector MsModel::GetOutputs() { MS_EXCEPTION_IF_NULL(graph_cell_); - return graph_cell_->GetOutputsInfo(names, shapes, data_types, mem_sizes); + return graph_cell_->GetOutputs(); } -} // namespace api } // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/model/ms/ms_model.h b/mindspore/ccsrc/cxx_api/model/ms/ms_model.h index 747ff0da8b1..0571b4e4098 100644 --- a/mindspore/ccsrc/cxx_api/model/ms/ms_model.h +++ b/mindspore/ccsrc/cxx_api/model/ms/ms_model.h @@ -33,26 +33,24 @@ #endif namespace mindspore { -namespace api { class MsModel : public ModelImpl { public: MsModel() {} ~MsModel() = default; - Status Build(const std::map &options_map) override; + Status Build() override; + Status Resize(const std::vector &inputs, const std::vector> &dims) override; - Status Train(const DataSet &dataset, std::map *outputs) override; - Status Eval(const DataSet &dataset, std::map *outputs) override; - Status Predict(const std::vector &inputs, std::vector *outputs) override; + Status Predict(const std::vector &inputs, std::vector *outputs) override; - Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const override; - Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const override; + std::vector GetInputs() override; + std::vector GetOutputs() override; private: + std::shared_ptr GenerateGraphCell(const std::vector> &dims); + std::shared_ptr graph_cell_; + std::map> dynamic_size_graph_map_; }; -} // namespace api } // namespace mindspore #endif // MINDSPORE_CCSRC_SESSION_SESSION_BASIC_H diff --git a/mindspore/ccsrc/cxx_api/ops/ops.cc b/mindspore/ccsrc/cxx_api/ops/ops.cc index 1d028a6d8d6..6fe3171af95 100644 --- a/mindspore/ccsrc/cxx_api/ops/ops.cc +++ b/mindspore/ccsrc/cxx_api/ops/ops.cc @@ -15,7 +15,7 @@ */ #include "include/api/ops/ops.h" -namespace mindspore::api { +namespace mindspore { Conv2D::Conv2D(int out_channel, const std::vector &kernel_size, int mode, const std::string &pad_mode, const std::vector &pad, const std::vector &stride, const std::vector &dilation, int group) : OpCell("Conv2D"), @@ -35,4 +35,4 @@ Output Conv2D::operator()(const Input &input1, const Input &input2) const { std::vector Conv2D::Construct(const std::vector &inputs) { return {Output(shared_from_this(), inputs, 1)}; } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/python_utils.cc b/mindspore/ccsrc/cxx_api/python_utils.cc index ecf737dcf55..89a0d2793b2 100644 --- a/mindspore/ccsrc/cxx_api/python_utils.cc +++ b/mindspore/ccsrc/cxx_api/python_utils.cc @@ -29,7 +29,7 @@ namespace py = pybind11; static std::mutex init_mutex; static bool Initialized = false; -namespace mindspore::api { +namespace mindspore { static void RegAllOpFromPython() { MsContext::GetInstance()->set_param(MS_CTX_EXECUTION_MODE, kGraphMode); Py_Initialize(); @@ -143,4 +143,4 @@ PythonEnvGuard::~PythonEnvGuard() { FinalizePython(); } } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/python_utils.h b/mindspore/ccsrc/cxx_api/python_utils.h index e7c91a203f9..dbc6dacb9f2 100644 --- a/mindspore/ccsrc/cxx_api/python_utils.h +++ b/mindspore/ccsrc/cxx_api/python_utils.h @@ -16,7 +16,7 @@ #ifndef MINDSPORE_CCSRC_CXXAPI_PYTHON_UTILS_H #define MINDSPORE_CCSRC_CXXAPI_PYTHON_UTILS_H -namespace mindspore::api { +namespace mindspore { void RegAllOp(); bool PythonIsInited(); void InitPython(); @@ -30,5 +30,5 @@ class PythonEnvGuard { private: bool origin_init_status_; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXXAPI_PYTHON_UTILS_H diff --git a/mindspore/ccsrc/cxx_api/serialization.cc b/mindspore/ccsrc/cxx_api/serialization.cc index 8a8b26b00b8..5ff271d8f19 100644 --- a/mindspore/ccsrc/cxx_api/serialization.cc +++ b/mindspore/ccsrc/cxx_api/serialization.cc @@ -19,7 +19,7 @@ #include "utils/log_adapter.h" #include "mindspore/core/load_mindir/load_model.h" -namespace mindspore::api { +namespace mindspore { static Buffer ReadFile(const std::string &file) { Buffer buffer; if (file.empty()) { @@ -68,6 +68,22 @@ static Buffer ReadFile(const std::string &file) { return buffer; } +Graph Serialization::LoadModel(const void *model_data, size_t data_size, ModelType model_type) { + if (model_type == kMindIR) { + FuncGraphPtr anf_graph = nullptr; + try { + anf_graph = ConvertStreamToFuncGraph(reinterpret_cast(model_data), data_size); + } catch (const std::exception &) { + MS_LOG(EXCEPTION) << "Load MindIR failed."; + } + + return Graph(std::make_shared(anf_graph, kMindIR)); + } else if (model_type == kOM) { + return Graph(std::make_shared(Buffer(model_data, data_size), kOM)); + } + MS_LOG(EXCEPTION) << "Unsupported ModelType " << model_type; +} + Graph Serialization::LoadModel(const std::string &file, ModelType model_type) { Buffer data = ReadFile(file); if (data.Data() == nullptr) { @@ -77,7 +93,7 @@ Graph Serialization::LoadModel(const std::string &file, ModelType model_type) { FuncGraphPtr anf_graph = nullptr; try { anf_graph = ConvertStreamToFuncGraph(reinterpret_cast(data.Data()), data.DataSize()); - } catch (std::exception &e) { + } catch (const std::exception &) { MS_LOG(EXCEPTION) << "Load MindIR failed."; } @@ -90,21 +106,21 @@ Graph Serialization::LoadModel(const std::string &file, ModelType model_type) { Status Serialization::LoadCheckPoint(const std::string &ckpt_file, std::map *parameters) { MS_LOG(ERROR) << "Unsupported feature."; - return FAILED; + return kMEFailed; } Status Serialization::SetParameters(const std::map ¶meters, Model *model) { MS_LOG(ERROR) << "Unsupported feature."; - return FAILED; + return kMEFailed; } Status Serialization::ExportModel(const Model &model, ModelType model_type, Buffer *model_data) { MS_LOG(ERROR) << "Unsupported feature."; - return FAILED; + return kMEFailed; } Status Serialization::ExportModel(const Model &model, ModelType model_type, const std::string &model_file) { MS_LOG(ERROR) << "Unsupported feature."; - return FAILED; + return kMEFailed; } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/types.cc b/mindspore/ccsrc/cxx_api/types.cc index 98178f108b7..38ecf4dee18 100644 --- a/mindspore/ccsrc/cxx_api/types.cc +++ b/mindspore/ccsrc/cxx_api/types.cc @@ -17,17 +17,20 @@ #include #include "securec/include/securec.h" #include "utils/utils.h" +#include "mindspore/core/ir/api_tensor_impl.h" -namespace mindspore::api { -const char *kDeviceTypeAscend310 = "Ascend310"; -const char *kDeviceTypeAscend910 = "Ascend910"; -const char *kDeviceTypeGpu = "GPU"; - -class DataImpl { +namespace mindspore { +class Buffer::Impl { public: - DataImpl() : data_() {} - ~DataImpl() = default; - DataImpl(const void *data, size_t data_len) { SetData(data, data_len); } + Impl() : data_() {} + ~Impl() = default; + Impl(const void *data, size_t data_len) { + if (data != nullptr) { + (void)SetData(data, data_len); + } else { + ResizeData(data_len); + } + } const void *Data() const { return data_.data(); } void *MutableData() { return data_.data(); } @@ -66,132 +69,162 @@ class DataImpl { std::vector data_; }; -class Buffer::Impl : public DataImpl { +class TensorDefaultImpl : public MSTensor::Impl { public: - Impl() : DataImpl() {} - ~Impl() = default; - Impl(const void *data, size_t data_len) : DataImpl(data, data_len) {} -}; + TensorDefaultImpl() : buffer_(), name_(), type_(DataType::kTypeUnknown), shape_() {} + ~TensorDefaultImpl() override = default; + TensorDefaultImpl(const std::string &name, enum DataType type, const std::vector &shape, const void *data, + size_t data_len) + : buffer_(data, data_len), name_(name), type_(type), shape_(shape) {} -class Tensor::Impl : public DataImpl { - public: - Impl() : DataImpl(), name_(), type_(DataType::kMsUnknown), shape_() {} - ~Impl() = default; - Impl(const std::string &name, api::DataType type, const std::vector &shape, const void *data, - size_t data_len) - : DataImpl(data, data_len), name_(name), type_(type), shape_(shape) {} + const std::string &Name() const override { return name_; } + enum DataType DataType() const override { return type_; } + const std::vector &Shape() const override { return shape_; } - const std::string &Name() const { return name_; } - void SetName(const std::string &name) { name_ = name; } - - api::DataType DataType() const { return type_; } - void SetDataType(api::DataType type) { type_ = type; } - - void SetShape(const std::vector &shape) { shape_ = shape; } - const std::vector &Shape() const { return shape_; } - - int64_t ElementNum() const { - std::vector shapex = Shape(); - return std::accumulate(shapex.begin(), shapex.end(), 1LL, std::multiplies()); + std::shared_ptr Data() const override { + return std::shared_ptr(buffer_.Data(), [](const void *) {}); } - static int GetTypeSize(api::DataType type) { - static const std::map type_size_map = { - {kMsBool, sizeof(bool)}, {kMsFloat64, sizeof(double)}, {kMsInt8, sizeof(int8_t)}, - {kMsUint8, sizeof(uint8_t)}, {kMsInt16, sizeof(int16_t)}, {kMsUint16, sizeof(uint16_t)}, - {kMsInt32, sizeof(int32_t)}, {kMsUint32, sizeof(uint32_t)}, {kMsInt64, sizeof(int64_t)}, - {kMsUint64, sizeof(uint64_t)}, {kMsFloat16, sizeof(uint16_t)}, {kMsFloat32, sizeof(float)}, - }; - auto it = type_size_map.find(type); - if (it != type_size_map.end()) { - return it->second; - } + void *MutableData() override { return buffer_.MutableData(); } + size_t DataSize() const override { return buffer_.DataSize(); } - MS_LOG(WARNING) << "Cannot find data type " << type; - return 0; + bool IsDevice() const override { return false; } + + std::shared_ptr Clone() const override { + return std::make_shared(name_, type_, shape_, buffer_.Data(), buffer_.DataSize()); } private: + Buffer buffer_; std::string name_; - api::DataType type_; + enum DataType type_; std::vector shape_; }; -Tensor::Tensor() : impl_(std::make_shared()) {} -Tensor::Tensor(const std::string &name, api::DataType type, const std::vector &shape, const void *data, - size_t data_len) - : impl_(std::make_shared(name, type, shape, data, data_len)) {} -Tensor::~Tensor() = default; +class TensorReferenceImpl : public MSTensor::Impl { + public: + TensorReferenceImpl() : data_(nullptr), data_size_(0), name_(), type_(DataType::kTypeUnknown), shape_() {} + ~TensorReferenceImpl() override = default; + TensorReferenceImpl(const std::string &name, enum DataType type, const std::vector &shape, const void *data, + size_t data_len) + : data_(data), data_size_(data_len), name_(name), type_(type), shape_(shape) {} -Tensor Tensor::Clone() const { + const std::string &Name() const override { return name_; } + enum DataType DataType() const override { return type_; } + const std::vector &Shape() const override { return shape_; } + + std::shared_ptr Data() const override { + return std::shared_ptr(data_, [](const void *) {}); + } + + void *MutableData() override { return const_cast(data_); } + size_t DataSize() const override { return data_size_; } + + bool IsDevice() const override { return false; } + + std::shared_ptr Clone() const override { + return std::make_shared(name_, type_, shape_, data_, data_size_); + } + + protected: + const void *data_; + size_t data_size_; + std::string name_; + enum DataType type_; + std::vector shape_; +}; + +MSTensor MSTensor::CreateTensor(const std::string &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { + try { + std::shared_ptr impl = std::make_shared(name, type, shape, data, data_len); + return MSTensor(impl); + } catch (const std::bad_alloc &) { + MS_LOG(ERROR) << "Malloc memory failed."; + return MSTensor(nullptr); + } catch (...) { + MS_LOG(ERROR) << "Unknown error occurred."; + return MSTensor(nullptr); + } +} + +MSTensor MSTensor::CreateRefTensor(const std::string &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { + try { + std::shared_ptr impl = std::make_shared(name, type, shape, data, data_len); + return MSTensor(impl); + } catch (const std::bad_alloc &) { + MS_LOG(ERROR) << "Malloc memory failed."; + return MSTensor(nullptr); + } catch (...) { + MS_LOG(ERROR) << "Unknown error occurred."; + return MSTensor(nullptr); + } +} + +MSTensor::MSTensor() : impl_(std::make_shared()) {} +MSTensor::MSTensor(std::nullptr_t) : impl_(nullptr) {} +MSTensor::MSTensor(const std::shared_ptr &impl) : impl_(impl) { MS_EXCEPTION_IF_NULL(impl); } +MSTensor::MSTensor(const std::string &name, enum DataType type, const std::vector &shape, const void *data, + size_t data_len) + : impl_(std::make_shared(name, type, shape, data, data_len)) {} +MSTensor::~MSTensor() = default; + +bool MSTensor::operator==(std::nullptr_t) const { return impl_ == nullptr; } + +MSTensor MSTensor::Clone() const { MS_EXCEPTION_IF_NULL(impl_); - Tensor ret; - ret.impl_ = std::make_shared(*impl_); + MSTensor ret; + ret.impl_ = impl_->Clone(); return ret; } -const std::string &Tensor::Name() const { +const std::string &MSTensor::Name() const { MS_EXCEPTION_IF_NULL(impl_); return impl_->Name(); } -void Tensor::SetName(const std::string &name) { - MS_EXCEPTION_IF_NULL(impl_); - impl_->SetName(name); -} - -DataType Tensor::DataType() const { +enum DataType MSTensor::DataType() const { MS_EXCEPTION_IF_NULL(impl_); return impl_->DataType(); } -void Tensor::SetDataType(api::DataType type) { - MS_EXCEPTION_IF_NULL(impl_); - impl_->SetDataType(type); -} - -const std::vector &Tensor::Shape() const { +const std::vector &MSTensor::Shape() const { MS_EXCEPTION_IF_NULL(impl_); return impl_->Shape(); } -void Tensor::SetShape(const std::vector &shape) { +int64_t MSTensor::ElementNum() const { MS_EXCEPTION_IF_NULL(impl_); - impl_->SetShape(shape); + const auto &shape = impl_->Shape(); + if (shape.empty()) { + // element number of scalar is 1 + return 1; + } + + return std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()); } -const void *Tensor::Data() const { +std::shared_ptr MSTensor::Data() const { MS_EXCEPTION_IF_NULL(impl_); return impl_->Data(); } -void *Tensor::MutableData() { +void *MSTensor::MutableData() { MS_EXCEPTION_IF_NULL(impl_); return impl_->MutableData(); } -size_t Tensor::DataSize() const { +size_t MSTensor::DataSize() const { MS_EXCEPTION_IF_NULL(impl_); return impl_->DataSize(); } -bool Tensor::ResizeData(size_t data_len) { +bool MSTensor::IsDevice() const { MS_EXCEPTION_IF_NULL(impl_); - return impl_->ResizeData(data_len); + return impl_->IsDevice(); } -bool Tensor::SetData(const void *data, size_t data_len) { - MS_EXCEPTION_IF_NULL(impl_); - return impl_->SetData(data, data_len); -} - -int64_t Tensor::ElementNum() const { - MS_EXCEPTION_IF_NULL(impl_); - return impl_->ElementNum(); -} - -int Tensor::GetTypeSize(api::DataType type) { return Impl::GetTypeSize(type); } - Buffer::Buffer() : impl_(std::make_shared()) {} Buffer::Buffer(const void *data, size_t data_len) : impl_(std::make_shared(data, data_len)) {} Buffer::~Buffer() = default; @@ -227,4 +260,4 @@ bool Buffer::SetData(const void *data, size_t data_len) { MS_EXCEPTION_IF_NULL(impl_); return impl_->SetData(data, data_len); } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/CMakeLists.txt b/mindspore/ccsrc/minddata/dataset/CMakeLists.txt index a772f096f74..ecacf5e4ff6 100644 --- a/mindspore/ccsrc/minddata/dataset/CMakeLists.txt +++ b/mindspore/ccsrc/minddata/dataset/CMakeLists.txt @@ -284,14 +284,7 @@ else() endif() add_dependencies(_c_dataengine mindspore_shared_lib) -if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") - set(MINDSPORE_LINK_OBJECT ${CMAKE_BINARY_DIR}/mindspore/ccsrc/cxx_api/CMakeFiles/mindspore_shared_lib.dir/objects.a) - target_link_libraries(_c_dataengine PRIVATE mindspore_shared_lib ${MINDSPORE_LINK_OBJECT}) -else() - if(ENABLE_ACL) - target_link_libraries(_c_dataengine PRIVATE mindspore_shared_lib) - endif() -endif() +target_link_libraries(_c_dataengine PRIVATE mindspore_shared_lib) if(USE_GLOG) target_link_libraries(_c_dataengine PRIVATE mindspore::glog) diff --git a/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt b/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt index dda0b2bc525..ac0b8a4a662 100644 --- a/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt +++ b/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt @@ -26,28 +26,13 @@ if(ENABLE_PYTHON) target_include_directories(APItoPython PRIVATE ${pybind11_INCLUDE_DIRS}) endif() - -if(ENABLE_ACL) - add_library(cpp-API OBJECT - config.cc - datasets.cc - execute.cc - iterator.cc - minddata_eager.cc - transforms.cc - samplers.cc - text.cc - vision.cc - ) -else() - add_library(cpp-API OBJECT - config.cc - datasets.cc - execute.cc - iterator.cc - transforms.cc - samplers.cc - text.cc - vision.cc - ) -endif() +add_library(cpp-API OBJECT + config.cc + datasets.cc + execute.cc + iterator.cc + transforms.cc + samplers.cc + text.cc + vision.cc + ) diff --git a/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc b/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc deleted file mode 100644 index fc92ba30f32..00000000000 --- a/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc +++ /dev/null @@ -1,142 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "minddata/dataset/core/constants.h" -#include "minddata/dataset/core/data_type.h" -#include "minddata/dataset/include/de_tensor.h" -#include "minddata/dataset/include/type_id.h" -#include "mindspore/core/ir/dtype/type_id.h" -#include "mindspore/lite/include/ms_tensor.h" -#include "utils/hashing.h" -#ifndef ENABLE_ANDROID -#include "utils/log_adapter.h" -#else -#include "mindspore/lite/src/common/log_adapter.h" -#endif - -namespace mindspore { -namespace tensor { -MSTensor *DETensor::CreateTensor(TypeId data_type, const std::vector &shape) { - return new DETensor(data_type, shape); -} - -MSTensor *DETensor::CreateTensor(const std::string &path) { - std::shared_ptr t; - (void)dataset::Tensor::CreateFromFile(path, &t); - return new DETensor(std::move(t)); -} - -MSTensor *DETensor::CreateFromMemory(TypeId data_type, const std::vector &shape, void *data) { - std::shared_ptr t; - // prepare shape info - std::vector t_shape; - - std::transform(shape.begin(), shape.end(), std::back_inserter(t_shape), - [](int s) -> dataset::dsize_t { return static_cast(s); }); - - (void)dataset::Tensor::CreateFromMemory(dataset::TensorShape(t_shape), dataset::MSTypeToDEType(data_type), - static_cast(data), &t); - return new DETensor(std::move(t)); -} - -DETensor::DETensor(TypeId data_type, const std::vector &shape) { - std::vector t_shape; - t_shape.reserve(shape.size()); - std::transform(shape.begin(), shape.end(), std::back_inserter(t_shape), - [](int s) -> dataset::dsize_t { return static_cast(s); }); - dataset::Tensor::CreateEmpty(dataset::TensorShape(t_shape), dataset::MSTypeToDEType(data_type), &this->tensor_impl_); -} - -DETensor::DETensor(std::shared_ptr tensor_ptr) { this->tensor_impl_ = std::move(tensor_ptr); } - -MSTensor *DETensor::ConvertToLiteTensor() { - // static MSTensor::CreateTensor is only for the LiteTensor - MSTensor *tensor = CreateTensor(this->data_type(), this->shape()); - MS_ASSERT(tensor->Size() == this->Size()); - memcpy_s(tensor->MutableData(), tensor->Size(), this->MutableData(), this->Size()); - return tensor; -} - -std::shared_ptr DETensor::tensor() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_; -} - -TypeId DETensor::data_type() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return dataset::DETypeToMSType(this->tensor_impl_->type()); -} - -TypeId DETensor::set_data_type(TypeId data_type) { - MS_ASSERT(this->tensor_impl_ != nullptr); - if (data_type != this->data_type()) { - std::shared_ptr temp; - dataset::Tensor::CreateFromMemory(this->tensor_impl_->shape(), dataset::MSTypeToDEType(data_type), - this->tensor_impl_->GetBuffer(), &temp); - this->tensor_impl_ = temp; - } - return data_type; -} - -std::vector DETensor::shape() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - std::vector t_shape = this->tensor_impl_->shape().AsVector(); - std::vector shape; - shape.reserve(t_shape.size()); - std::transform(t_shape.begin(), t_shape.end(), std::back_inserter(shape), - [](dataset::dsize_t s) -> int { return static_cast(s); }); - return shape; -} - -size_t DETensor::set_shape(const std::vector &shape) { - MS_ASSERT(this->tensor_impl_ != nullptr); - std::vector t_shape; - t_shape.reserve(shape.size()); - std::transform(shape.begin(), shape.end(), std::back_inserter(t_shape), - [](int s) -> dataset::dsize_t { return static_cast(s); }); - dataset::Status rc = this->tensor_impl_->Reshape(dataset::TensorShape(t_shape)); - return shape.size(); -} - -int DETensor::DimensionSize(size_t index) const { - MS_ASSERT(this->tensor_impl_ != nullptr); - int dim_size = -1; - auto shape = this->shape(); - if (index < shape.size()) { - dim_size = shape[index]; - } else { - MS_LOG(ERROR) << "Dimension index is wrong: " << index; - } - return dim_size; -} - -int DETensor::ElementsNum() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_->Size(); -} - -size_t DETensor::Size() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_->SizeInBytes(); -} - -void *DETensor::MutableData() { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_->GetMutableBuffer(); -} - -} // namespace tensor -} // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/api/execute.cc b/mindspore/ccsrc/minddata/dataset/api/execute.cc index bfa05669565..b72bcbf7416 100644 --- a/mindspore/ccsrc/minddata/dataset/api/execute.cc +++ b/mindspore/ccsrc/minddata/dataset/api/execute.cc @@ -14,12 +14,11 @@ * limitations under the License. */ -#include "minddata/dataset/core/tensor_row.h" -#ifdef ENABLE_ANDROID -#include "minddata/dataset/include/de_tensor.h" -#endif #include "minddata/dataset/include/execute.h" +#include "minddata/dataset/core/de_tensor.h" +#include "minddata/dataset/core/tensor_row.h" #include "minddata/dataset/include/tensor.h" +#include "minddata/dataset/include/type_id.h" #include "minddata/dataset/kernels/tensor_op.h" #ifndef ENABLE_ANDROID #include "utils/log_adapter.h" @@ -30,78 +29,85 @@ namespace mindspore { namespace dataset { -Execute::Execute(std::shared_ptr op) : op_(std::move(op)) {} +Execute::Execute(std::shared_ptr op) { ops_.emplace_back(std::move(op)); } -/// \brief Destructor -Execute::~Execute() = default; +Execute::Execute(std::vector> ops) : ops_(std::move(ops)) {} -#ifdef ENABLE_ANDROID -std::shared_ptr Execute::operator()(std::shared_ptr input) { - // Build the op - if (op_ == nullptr) { - MS_LOG(ERROR) << "Input TensorOperation is not valid"; - return nullptr; +Status Execute::operator()(const mindspore::MSTensor &input, mindspore::MSTensor *output) { + // Validate input tensor + CHECK_FAIL_RETURN_UNEXPECTED(input.DataSize() > 0, "Input Tensor has no data"); + CHECK_FAIL_RETURN_UNEXPECTED(!ops_.empty(), "Input TensorOperation should be provided"); + + // Validate and build runtime ops + std::vector> transforms; + for (int32_t i = 0; i < ops_.size(); i++) { + CHECK_FAIL_RETURN_UNEXPECTED(ops_[i] != nullptr, "Input TensorOperation[" + std::to_string(i) + "] is null"); + RETURN_IF_NOT_OK(ops_[i]->ValidateParams()); + transforms.emplace_back(ops_[i]->Build()); } - std::shared_ptr de_input = std::dynamic_pointer_cast(input)->tensor(); - if (de_input == nullptr) { - MS_LOG(ERROR) << "Input Tensor is not valid"; - return nullptr; - } - std::shared_ptr transform = op_->Build(); - std::shared_ptr de_output; - Status rc = transform->Compute(de_input, &de_output); + // Convert mindspore::Tensor to dataset::Tensor + std::shared_ptr de_tensor; + Status rc = dataset::Tensor::CreateFromMemory(dataset::TensorShape(input.Shape()), + MSTypeToDEType(static_cast(input.DataType())), + (const uchar *)(input.Data().get()), input.DataSize(), &de_tensor); + RETURN_IF_NOT_OK(rc); - if (rc.IsError()) { - // execution failed - MS_LOG(ERROR) << "Operation execution failed : " << rc.ToString(); - return nullptr; - } - return std::make_shared(std::move(de_output)); -} -#endif + // Apply transforms on tensor + for (auto &t : transforms) { + std::shared_ptr de_output; + RETURN_IF_NOT_OK(t->Compute(de_tensor, &de_output)); -std::shared_ptr Execute::operator()(std::shared_ptr input) { - // Build the op - if (op_ == nullptr) { - MS_LOG(ERROR) << "Input TensorOperation is not valid"; - return nullptr; + // For next transform + de_tensor = std::move(de_output); } - if (input == nullptr) { - MS_LOG(ERROR) << "Input Tensor is not valid"; - return nullptr; - } - // will add validate params once API is set - std::shared_ptr transform = op_->Build(); - std::shared_ptr de_output; - Status rc = transform->Compute(input, &de_output); - - if (rc.IsError()) { - // execution failed - MS_LOG(ERROR) << "Operation execution failed : " << rc.ToString(); - return nullptr; - } - return de_output; + // Convert dataset::Tensor to mindspore::Tensor + CHECK_FAIL_RETURN_UNEXPECTED(de_tensor->HasData(), "Apply transform failed, output tensor has no data"); + *output = mindspore::MSTensor(std::make_shared(de_tensor)); + return Status::OK(); } -Status Execute::operator()(const std::vector> &input_tensor_list, - std::vector> *output_tensor_list) { - CHECK_FAIL_RETURN_UNEXPECTED(op_ != nullptr, "Input TensorOperation is not valid"); +Status Execute::operator()(const std::vector &input_tensor_list, std::vector *output_tensor_list) { + // Validate input tensor CHECK_FAIL_RETURN_UNEXPECTED(!input_tensor_list.empty(), "Input Tensor is not valid"); + for (auto &tensor : input_tensor_list) { + CHECK_FAIL_RETURN_UNEXPECTED(tensor.DataSize() > 0, "Input Tensor has no data"); + } + CHECK_FAIL_RETURN_UNEXPECTED(!ops_.empty(), "Input TensorOperation should be provided"); - TensorRow input, output; - std::copy(input_tensor_list.begin(), input_tensor_list.end(), std::back_inserter(input)); - CHECK_FAIL_RETURN_UNEXPECTED(!input.empty(), "Input Tensor is not valid"); - - std::shared_ptr transform = op_->Build(); - Status rc = transform->Compute(input, &output); - if (rc.IsError()) { - // execution failed - RETURN_STATUS_UNEXPECTED("Operation execution failed : " + rc.ToString()); + // Validate and build runtime ops + std::vector> transforms; + for (int32_t i = 0; i < ops_.size(); i++) { + CHECK_FAIL_RETURN_UNEXPECTED(ops_[i] != nullptr, "Input TensorOperation[" + std::to_string(i) + "] is null"); + RETURN_IF_NOT_OK(ops_[i]->ValidateParams()); + transforms.emplace_back(ops_[i]->Build()); } - std::copy(output.begin(), output.end(), std::back_inserter(*output_tensor_list)); + TensorRow de_tensor_list; + for (auto &tensor : input_tensor_list) { + std::shared_ptr de_tensor; + Status rc = dataset::Tensor::CreateFromMemory(dataset::TensorShape(tensor.Shape()), + MSTypeToDEType(static_cast(tensor.DataType())), + (const uchar *)(tensor.Data().get()), tensor.DataSize(), &de_tensor); + RETURN_IF_NOT_OK(rc); + de_tensor_list.emplace_back(std::move(de_tensor)); + } + + // Apply transforms on tensor + for (auto &t : transforms) { + TensorRow de_output_list; + RETURN_IF_NOT_OK(t->Compute(de_tensor_list, &de_output_list)); + // For next transform + de_tensor_list = std::move(de_output_list); + } + + for (auto &tensor : de_tensor_list) { + CHECK_FAIL_RETURN_UNEXPECTED(tensor->HasData(), "Apply transform failed, output tensor has no data"); + auto ms_tensor = mindspore::MSTensor(std::make_shared(tensor)); + output_tensor_list->emplace_back(ms_tensor); + } + CHECK_FAIL_RETURN_UNEXPECTED(!output_tensor_list->empty(), "Output Tensor is not valid"); return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/api/minddata_eager.cc b/mindspore/ccsrc/minddata/dataset/api/minddata_eager.cc deleted file mode 100644 index 154ec4ab035..00000000000 --- a/mindspore/ccsrc/minddata/dataset/api/minddata_eager.cc +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -#include "minddata/dataset/include/minddata_eager.h" -#include "minddata/dataset/include/vision.h" -#include "minddata/dataset/core/tensor.h" -#include "minddata/dataset/kernels/tensor_op.h" -#include "minddata/dataset/util/path.h" - -namespace mindspore { -namespace api { - -MindDataEager::MindDataEager(std::vector> ops) : ops_(ops) {} - -// Helper function to convert Type from DE to MS -DataType ToMSType(dataset::DataType type) { - switch (dataset::DataType::Type(type)) { - case dataset::DataType::DE_BOOL: - return DataType::kMsBool; - case dataset::DataType::DE_UINT8: - return DataType::kMsUint8; - case dataset::DataType::DE_INT32: - return DataType::kMsInt32; - case dataset::DataType::DE_INT64: - return DataType::kMsInt64; - case dataset::DataType::DE_FLOAT32: - return DataType::kMsFloat32; - default: - return DataType::kMsUnknown; - } -} - -// Helper function to convert Type from MS to DE -dataset::DataType ToDEType(DataType type) { - switch (type) { - case DataType::kMsBool: - return dataset::DataType(dataset::DataType::DE_BOOL); - case DataType::kMsUint8: - return dataset::DataType(dataset::DataType::DE_UINT8); - case DataType::kMsInt32: - return dataset::DataType(dataset::DataType::DE_INT32); - case DataType::kMsInt64: - return dataset::DataType(dataset::DataType::DE_INT64); - case DataType::kMsFloat32: - return dataset::DataType(dataset::DataType::DE_FLOAT32); - default: - return dataset::DataType(dataset::DataType::DE_UNKNOWN); - } -} - -Status MindDataEager::LoadImageFromDir(const std::string &image_dir, std::vector> *images) { - // Check target directory - dataset::Path image_dir_(image_dir); - if (!image_dir_.Exists() || !image_dir_.IsDirectory()) { - std::string err_msg = "Target directory: " + image_dir + " does not exist or not a directory."; - MS_LOG(ERROR) << err_msg; - return Status(StatusCode::FAILED, err_msg); - } - if (access(image_dir_.toString().c_str(), R_OK) == -1) { - std::string err_msg = "No access to target directory: " + image_dir; - MS_LOG(ERROR) << err_msg; - return Status(StatusCode::FAILED, err_msg); - } - - // Start reading images and constructing tensors - auto path_itr = dataset::Path::DirIterator::OpenDirectory(&image_dir_); - while (path_itr->hasNext()) { - dataset::Path file = path_itr->next(); - std::shared_ptr image; - dataset::Tensor::CreateFromFile(file.toString(), &image); - - std::shared_ptr ms_image = std::make_shared("image", DataType(kMsUint8), image->shape().AsVector(), - image->GetBuffer(), image->SizeInBytes()); - images->push_back(ms_image); - } - - // Check if read images or not - if (images->empty()) { - std::string err_msg = "No images found in target directory: " + image_dir; - MS_LOG(ERROR) << err_msg; - return Status(StatusCode::FAILED, err_msg); - } - - return Status(StatusCode::SUCCESS); -} - -std::shared_ptr MindDataEager::operator()(std::shared_ptr input) { - // Validate ops - if (ops_.empty()) { - MS_LOG(ERROR) << "Input TensorOperation should be provided"; - return nullptr; - } - for (int32_t i = 0; i < ops_.size(); i++) { - if (ops_[i] == nullptr) { - MS_LOG(ERROR) << "Input TensorOperation[" << i << "] is invalid or null"; - return nullptr; - } - } - // Validate input tensor - if (input == nullptr) { - MS_LOG(ERROR) << "Input Tensor should not be null"; - return nullptr; - } - - // Start applying transforms in ops - std::shared_ptr de_input; - dataset::Tensor::CreateFromMemory(dataset::TensorShape(input->Shape()), ToDEType(input->DataType()), - (const uchar *)(input->Data()), &de_input); - - for (int32_t i = 0; i < ops_.size(); i++) { - // Build runtime op and run - std::shared_ptr de_output; - std::shared_ptr transform = ops_[i]->Build(); - dataset::Status rc = transform->Compute(de_input, &de_output); - - // check execution failed - if (rc.IsError()) { - MS_LOG(ERROR) << "Operation execution failed : " << rc.ToString(); - return nullptr; - } - - // For next transform - de_input = std::move(de_output); - } - - // Convert DETensor to Tensor - if (!de_input->HasData()) { - MS_LOG(ERROR) << "Apply transform failed, output tensor has no data"; - return nullptr; - } - std::shared_ptr output = - std::make_shared("transfomed", ToMSType(de_input->type()), de_input->shape().AsVector(), - de_input->GetBuffer(), de_input->SizeInBytes()); - return output; -} - -} // namespace api -} // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/include/execute_binding.cc b/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/include/execute_binding.cc index 56dc7d793e3..094a070e695 100644 --- a/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/include/execute_binding.cc +++ b/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/include/execute_binding.cc @@ -29,25 +29,42 @@ PYBIND_REGISTER(Execute, 0, ([](const py::module *m) { return execute; })) .def("__call__", - [](Execute &self, std::shared_ptr in) { - std::shared_ptr out = self(in); - if (out == nullptr) { - THROW_IF_ERROR([]() { - RETURN_STATUS_UNEXPECTED( - "Failed to execute op in eager mode, please check ERROR log above."); + [](Execute &self, const std::shared_ptr &de_tensor) { + auto ms_tensor = mindspore::MSTensor(std::make_shared(de_tensor)); + Status rc = self(ms_tensor, &ms_tensor); + if (rc.IsError()) { + THROW_IF_ERROR([&rc]() { + RETURN_STATUS_UNEXPECTED("Failed to execute transform op, " + rc.ToString()); }()); } - return out; + std::shared_ptr de_output_tensor; + dataset::Tensor::CreateFromMemory(dataset::TensorShape(ms_tensor.Shape()), + MSTypeToDEType(static_cast(ms_tensor.DataType())), + (const uchar *)(ms_tensor.Data().get()), + ms_tensor.DataSize(), &de_output_tensor); + return de_output_tensor; }) .def("__call__", [](Execute &self, const std::vector> &input_tensor_list) { - std::vector> output_tensor_list; - THROW_IF_ERROR(self(input_tensor_list, &output_tensor_list)); - if (output_tensor_list.empty()) { - THROW_IF_ERROR([]() { - RETURN_STATUS_UNEXPECTED("Failed to execute op in eager mode, please check ERROR log above."); - }()); + std::vector ms_input_tensor_list; + std::vector ms_output_tensor_list; + for (auto &tensor : input_tensor_list) { + auto ms_tensor = mindspore::MSTensor(std::make_shared(tensor)); + ms_input_tensor_list.emplace_back(std::move(ms_tensor)); } - return output_tensor_list; + Status rc = self(ms_input_tensor_list, &ms_output_tensor_list); + if (rc.IsError()) { + THROW_IF_ERROR( + [&rc]() { RETURN_STATUS_UNEXPECTED("Failed to execute transform op, " + rc.ToString()); }()); + } + std::vector> de_output_tensor_list; + for (auto &tensor : ms_output_tensor_list) { + std::shared_ptr de_output_tensor; + dataset::Tensor::CreateFromMemory( + dataset::TensorShape(tensor.Shape()), MSTypeToDEType(static_cast(tensor.DataType())), + (const uchar *)(tensor.Data().get()), tensor.DataSize(), &de_output_tensor); + de_output_tensor_list.emplace_back(std::move(de_output_tensor)); + } + return de_output_tensor_list; }); })); } // namespace dataset diff --git a/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/kernels/data/bindings.cc b/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/kernels/data/bindings.cc index e58557a5112..03044106ee9 100644 --- a/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/kernels/data/bindings.cc +++ b/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/kernels/data/bindings.cc @@ -84,7 +84,8 @@ PYBIND_REGISTER(SliceOption, 0, ([](const py::module *m) { } if (!c_slice.valid()) { - THROW_IF_ERROR(Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Wrong slice object")); + THROW_IF_ERROR( + Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Wrong slice object")); } return SliceOption(c_slice); })) diff --git a/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/kernels/ir/image/bindings.cc b/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/kernels/ir/image/bindings.cc index f0d73ca2bb5..830696337d9 100644 --- a/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/kernels/ir/image/bindings.cc +++ b/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/kernels/ir/image/bindings.cc @@ -354,7 +354,7 @@ PYBIND_REGISTER( for (auto handle : py_sub.cast()) { py::tuple tp = handle.cast(); if (tp.is_none() || tp.size() != 2) { - THROW_IF_ERROR(Status(StatusCode::kUnexpectedError, "Each tuple in subpolicy should be (op, prob).")); + THROW_IF_ERROR(Status(StatusCode::kMDUnexpectedError, "Each tuple in subpolicy should be (op, prob).")); } std::shared_ptr t_op; if (py::isinstance(tp[0])) { @@ -366,11 +366,11 @@ PYBIND_REGISTER( std::make_shared((tp[0]).cast())); } else { THROW_IF_ERROR( - Status(StatusCode::kUnexpectedError, "op is neither a tensorOp, tensorOperation nor a pyfunc.")); + Status(StatusCode::kMDUnexpectedError, "op is neither a tensorOp, tensorOperation nor a pyfunc.")); } double prob = (tp[1]).cast(); if (prob < 0 || prob > 1) { - THROW_IF_ERROR(Status(StatusCode::kUnexpectedError, "prob needs to be with [0,1].")); + THROW_IF_ERROR(Status(StatusCode::kMDUnexpectedError, "prob needs to be with [0,1].")); } cpp_policy.back().emplace_back(std::make_pair(t_op, prob)); } diff --git a/mindspore/ccsrc/minddata/dataset/callback/py_ds_callback.cc b/mindspore/ccsrc/minddata/dataset/callback/py_ds_callback.cc index 6763dada429..85e1177906a 100644 --- a/mindspore/ccsrc/minddata/dataset/callback/py_ds_callback.cc +++ b/mindspore/ccsrc/minddata/dataset/callback/py_ds_callback.cc @@ -51,12 +51,12 @@ Status PyDSCallback::ExecutePyfunc(py::function f, const CallbackParam &cb_param // Acquire Python GIL py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); } try { f(cb_param); } catch (const py::error_already_set &e) { - return Status(StatusCode::kPyFuncException, e.what()); + return Status(StatusCode::kMDPyFuncException, e.what()); } } return Status::OK(); diff --git a/mindspore/ccsrc/minddata/dataset/core/CMakeLists.txt b/mindspore/ccsrc/minddata/dataset/core/CMakeLists.txt index a991323174c..9a204f6deb7 100644 --- a/mindspore/ccsrc/minddata/dataset/core/CMakeLists.txt +++ b/mindspore/ccsrc/minddata/dataset/core/CMakeLists.txt @@ -5,6 +5,7 @@ set(DATASET_CORE_SRC_FILES config_manager.cc cv_tensor.cc data_type.cc + de_tensor.cc global_context.cc tensor.cc tensor_helpers.cc diff --git a/mindspore/ccsrc/minddata/dataset/core/de_tensor.cc b/mindspore/ccsrc/minddata/dataset/core/de_tensor.cc new file mode 100644 index 00000000000..041533fd917 --- /dev/null +++ b/mindspore/ccsrc/minddata/dataset/core/de_tensor.cc @@ -0,0 +1,67 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "minddata/dataset/core/de_tensor.h" +#include "minddata/dataset/core/constants.h" +#include "minddata/dataset/core/data_type.h" +#include "minddata/dataset/include/type_id.h" +#include "mindspore/core/ir/dtype/type_id.h" +#include "utils/hashing.h" +#ifndef ENABLE_ANDROID +#include "utils/log_adapter.h" +#define ASSERT_NULL(ptr) MS_EXCEPTION_IF_NULL(ptr) +#else +#include "mindspore/lite/src/common/log_adapter.h" +#define ASSERT_NULL(ptr) MS_ASSERT((ptr) != nullptr) +#endif + +namespace mindspore { +namespace dataset { + +DETensor::DETensor(std::shared_ptr tensor_impl) + : tensor_impl_(tensor_impl), + name_("MindDataTensor"), + type_(static_cast(DETypeToMSType(tensor_impl_->type()))), + shape_(tensor_impl_->shape().AsVector()) {} + +const std::string &DETensor::Name() const { return name_; } + +enum mindspore::DataType DETensor::DataType() const { + ASSERT_NULL(tensor_impl_); + return static_cast(DETypeToMSType(tensor_impl_->type())); +} + +size_t DETensor::DataSize() const { + ASSERT_NULL(tensor_impl_); + return tensor_impl_->SizeInBytes(); +} + +const std::vector &DETensor::Shape() const { return shape_; } + +std::shared_ptr DETensor::Data() const { + return std::shared_ptr(tensor_impl_->GetBuffer(), [](const void *) {}); +} + +void *DETensor::MutableData() { + ASSERT_NULL(tensor_impl_); + return tensor_impl_->GetMutableBuffer(); +} + +bool DETensor::IsDevice() const { return false; } + +std::shared_ptr DETensor::Clone() const { return std::make_shared(tensor_impl_); } +} // namespace dataset +} // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/core/de_tensor.h b/mindspore/ccsrc/minddata/dataset/core/de_tensor.h new file mode 100644 index 00000000000..be3eb68e2f8 --- /dev/null +++ b/mindspore/ccsrc/minddata/dataset/core/de_tensor.h @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_DETENSOR_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_DETENSOR_H_ +#include +#include +#include +#include "include/api/types.h" +#include "mindspore/core/ir/api_tensor_impl.h" +#include "minddata/dataset/include/status.h" +#include "minddata/dataset/include/tensor.h" + +namespace mindspore { +namespace dataset { +class DETensor : public mindspore::MSTensor::Impl { + public: + DETensor() = default; + ~DETensor() override = default; + explicit DETensor(std::shared_ptr tensor_impl); + + const std::string &Name() const override; + + enum mindspore::DataType DataType() const override; + + size_t DataSize() const override; + + const std::vector &Shape() const override; + + std::shared_ptr Data() const override; + + void *MutableData() override; + + bool IsDevice() const override; + + std::shared_ptr Clone() const override; + + private: + std::shared_ptr tensor_impl_; + std::string name_; + enum mindspore::DataType type_; + std::vector shape_; +}; +} // namespace dataset +} // namespace mindspore +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_DETENSOR_H_ diff --git a/mindspore/ccsrc/minddata/dataset/core/tensor.h b/mindspore/ccsrc/minddata/dataset/core/tensor.h index 12bebf8d8a4..ac603240989 100644 --- a/mindspore/ccsrc/minddata/dataset/core/tensor.h +++ b/mindspore/ccsrc/minddata/dataset/core/tensor.h @@ -41,23 +41,17 @@ #include "minddata/dataset/core/data_type.h" #include "minddata/dataset/core/tensor_helpers.h" #include "minddata/dataset/core/tensor_shape.h" +#include "minddata/dataset/core/de_tensor.h" #include "minddata/dataset/util/status.h" #include "utils/ms_utils.h" #ifndef ENABLE_ANDROID #include "proto/example.pb.h" -#else -#include "minddata/dataset/include/de_tensor.h" #endif #ifdef ENABLE_PYTHON namespace py = pybind11; #endif namespace mindspore { -#ifdef ENABLE_ANDROID -namespace tensor { -class DETensor; -} // namespace tensor -#endif namespace dataset { class Tensor; template @@ -85,7 +79,7 @@ class Tensor { /// \param other Tensor to be moved Tensor(Tensor &&other) noexcept; - /// Move assigment operator + /// Move assignment operator /// \param other Tensor to be moved Tensor &operator=(Tensor &&other) noexcept; @@ -134,7 +128,7 @@ class Tensor { #ifndef ENABLE_ANDROID /// Create a tensor of type DE_STRING from a BytesList. /// \param[in] bytes_list protobuf's Bytelist - /// \param[in] shape shape of the outout tensor + /// \param[in] shape shape of the output tensor /// \param[out] out created Tensor /// \return Status Code static Status CreateFromByteList(const dataengine::BytesList &bytes_list, const TensorShape &shape, TensorPtr *out); @@ -292,7 +286,7 @@ class Tensor { std::string err; err += (data_ == nullptr) ? "data_ is nullptr \t" : ""; err += type_.IsCompatible() ? "data type not compatible\t" : ""; - return Status(StatusCode::kUnexpectedError, err); + return Status(StatusCode::kMDUnexpectedError, err); } } @@ -343,7 +337,7 @@ class Tensor { void Invalidate(); /// Copy input tensor into self at the location index. - /// Index is a vector of axises which can be incomplete: + /// Index is a vector of axes which can be incomplete: /// Ex: shape <2,3>, inserting into index {0} will replace the first row. index {1,2} will replace the last cell. /// \param index /// \param input @@ -686,9 +680,7 @@ class Tensor { unsigned char *data_end_ = nullptr; private: -#ifdef ENABLE_ANDROID - friend class tensor::DETensor; -#endif + friend class DETensor; /// Slice numeric tensors. Status SliceNumeric(TensorPtr *out, const std::vector> &indices, const TensorShape &shape); diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/CMakeLists.txt b/mindspore/ccsrc/minddata/dataset/engine/cache/CMakeLists.txt index 99b8a481498..7ee95fc5e8a 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/CMakeLists.txt +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/CMakeLists.txt @@ -73,6 +73,7 @@ if(ENABLE_CACHE) engine-cache-server _c_dataengine _c_mindrecord + mindspore mindspore::protobuf mindspore::grpc++ mindspore_gvar @@ -85,6 +86,7 @@ if(ENABLE_CACHE) engine-cache-server _c_dataengine _c_mindrecord + mindspore mindspore::protobuf mindspore::grpc++ mindspore_gvar @@ -103,6 +105,7 @@ if(ENABLE_CACHE) add_executable(cache_admin cache_admin.cc cache_admin_arg.cc) target_link_libraries(cache_admin _c_dataengine _c_mindrecord mindspore::protobuf ${PYTHON_LIBRARIES} pthread) + target_link_libraries(cache_admin mindspore mindspore_shared_lib) if(USE_GLOG) target_link_libraries(cache_admin mindspore::glog) diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_admin.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_admin.cc index 0995c4ea485..79ec9c71195 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_admin.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_admin.cc @@ -22,10 +22,11 @@ #include "minddata/dataset/engine/cache/cache_common.h" #include "minddata/dataset/util/path.h" +namespace ms = mindspore; namespace ds = mindspore::dataset; int main(int argc, char **argv) { - ds::Status rc; + ms::Status rc; ds::CacheAdminArgHandler args; std::stringstream arg_stream; diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_admin_arg.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_admin_arg.cc index ab30ceaa449..5774c80e410 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_admin_arg.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_admin_arg.cc @@ -89,7 +89,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, int32_t *out_arg, std ArgValue selected_arg = arg_map_[option]; if (used_args_[selected_arg]) { std::string err_msg = "The " + option + " argument was given more than once."; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } // Flag that this arg is used now @@ -101,7 +101,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, int32_t *out_arg, std if (command_id != CommandId::kCmdUnknown) { if (command_id_ != CommandId::kCmdUnknown) { std::string err_msg = "Only one command at a time is allowed. Invalid command: " + option; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } else { command_id_ = command_id; } @@ -113,7 +113,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, int32_t *out_arg, std *arg_stream >> value_as_string; if (value_as_string.empty()) { std::string err_msg = option + " option requires an argument field. Syntax: " + option + " "; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } // Now, attempt to convert the value into it's numeric format for output @@ -121,7 +121,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, int32_t *out_arg, std *out_arg = std::stoul(value_as_string); } catch (const std::exception &e) { std::string err_msg = "Invalid numeric value: " + value_as_string; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } return Status::OK(); @@ -133,7 +133,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, std::string *out_arg, ArgValue selected_arg = arg_map_[option]; if (used_args_[selected_arg]) { std::string err_msg = "The " + option + " argument was given more than once."; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } // Flag that this arg is used now @@ -145,7 +145,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, std::string *out_arg, if (command_id != CommandId::kCmdUnknown) { if (command_id_ != CommandId::kCmdUnknown) { std::string err_msg = "Only one command at a time is allowed. Invalid command: " + option; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } else { command_id_ = command_id; } @@ -158,12 +158,12 @@ Status CacheAdminArgHandler::AssignArg(std::string option, std::string *out_arg, *arg_stream >> *out_arg; } else { std::string err_msg = option + " option requires an argument field. Syntax: " + option + " "; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } if (out_arg->empty()) { std::string err_msg = option + " option requires an argument field. Syntax: " + option + " "; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } } @@ -176,7 +176,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, float *out_arg, std:: ArgValue selected_arg = arg_map_[option]; if (used_args_[selected_arg]) { std::string err_msg = "The " + option + " argument was given more than once."; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } // Flag that this arg is used now @@ -188,7 +188,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, float *out_arg, std:: if (command_id != CommandId::kCmdUnknown) { if (command_id_ != CommandId::kCmdUnknown) { std::string err_msg = "Only one command at a time is allowed. Invalid command: " + option; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } else { command_id_ = command_id; } @@ -200,7 +200,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, float *out_arg, std:: *arg_stream >> value_as_string; if (value_as_string.empty()) { std::string err_msg = option + " option requires an argument field. Syntax: " + option + " "; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } // Now, attempt to convert the value into it's string format for output @@ -208,7 +208,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, float *out_arg, std:: *out_arg = std::stof(value_as_string, nullptr); } catch (const std::exception &e) { std::string err_msg = "Invalid numeric value: " + value_as_string; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } return Status::OK(); @@ -224,7 +224,7 @@ Status CacheAdminArgHandler::ParseArgStream(std::stringstream *arg_stream) { if (hostname_ != std::string(kCfgDefaultCacheHost)) { std::string err_msg = "Invalid host interface: " + hostname_ + ". Current limitation, only 127.0.0.1 can be used."; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } break; } @@ -304,7 +304,7 @@ Status CacheAdminArgHandler::Validate() { if (!trailing_args_.empty()) { std::string err_msg = "Invalid arguments provided: " + trailing_args_; err_msg += "\nPlease try `cache_admin --help` for more information"; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } // The user must pick at least one command. i.e. it's meaningless to just give a hostname or port but no command to @@ -312,18 +312,18 @@ Status CacheAdminArgHandler::Validate() { if (command_id_ == CommandId::kCmdUnknown) { std::string err_msg = "No command provided"; err_msg += "\nPlease try `cache_admin --help` for more information"; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } // Additional checks here auto max_num_workers = std::max(std::thread::hardware_concurrency(), 100); if (num_workers_ < 1 || num_workers_ > max_num_workers) - return Status(StatusCode::kSyntaxError, + return Status(StatusCode::kMDSyntaxError, "Number of workers must be in range of 1 and " + std::to_string(max_num_workers) + "."); - if (log_level_ < 0 || log_level_ > 3) return Status(StatusCode::kSyntaxError, "Log level must be in range (0..3)."); + if (log_level_ < 0 || log_level_ > 3) return Status(StatusCode::kMDSyntaxError, "Log level must be in range (0..3)."); if (memory_cap_ratio_ <= 0 || memory_cap_ratio_ > 1) - return Status(StatusCode::kSyntaxError, "Memory cap ratio should be positive and no greater than 1"); - if (port_ < 1025 || port_ > 65535) return Status(StatusCode::kSyntaxError, "Port must be in range (1025..65535)."); + return Status(StatusCode::kMDSyntaxError, "Memory cap ratio should be positive and no greater than 1"); + if (port_ < 1025 || port_ > 65535) return Status(StatusCode::kMDSyntaxError, "Port must be in range (1025..65535)."); return Status::OK(); } @@ -467,9 +467,9 @@ Status CacheAdminArgHandler::StopServer(CommandId command_id) { Status rc = rq->Wait(); if (rc.IsError()) { msg.RemoveResourcesOnExit(); - if (rc.IsNetWorkError()) { + if (rc == StatusCode::kMDNetWorkError) { std::string errMsg = "Server on port " + std::to_string(port_) + " is not up or has been shutdown already."; - return Status(StatusCode::kNetWorkError, errMsg); + return Status(StatusCode::kMDNetWorkError, errMsg); } return rc; } @@ -544,7 +544,7 @@ Status CacheAdminArgHandler::StartServer(CommandId command_id) { if (WIFEXITED(status)) { auto exit_status = WEXITSTATUS(status); if (exit_status) { - return Status(StatusCode::kUnexpectedError, msg); + return Status(StatusCode::kMDUnexpectedError, msg); } else { // Not an error, some info message goes to stdout std::cout << msg << std::endl; diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_arena.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_arena.cc index 5542f611899..27cb0de8d97 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_arena.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_arena.cc @@ -75,7 +75,7 @@ Status CachedSharedMemory::AllocateSharedMemory(int32_t client_id, size_t sz, vo do { std::unique_lock lock(mux_[slot]); rc = shm_pool_[slot]->Allocate(sz, p); - if (rc.IsOutofMemory()) { + if (rc == StatusCode::kMDOutOfMemory) { slot = (slot + 1) % shm_pool_.size(); } } while (rc.IsError() && slot != begin_slot); diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_client.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_client.cc index b54927b115d..b2ae11a13e0 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_client.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_client.cc @@ -137,7 +137,7 @@ Status CacheClient::WriteBuffer(std::unique_ptr &&in) const { Status CacheClient::AsyncWriteRow(const TensorRow &row) { if (async_buffer_stream_ == nullptr) { - return Status(StatusCode::kNotImplementedYet); + return Status(StatusCode::kMDNotImplementedYet); } RETURN_IF_NOT_OK(async_buffer_stream_->AsyncWrite(row)); return Status::OK(); @@ -145,7 +145,7 @@ Status CacheClient::AsyncWriteRow(const TensorRow &row) { Status CacheClient::AsyncWriteBuffer(std::unique_ptr &&in) { if (async_buffer_stream_ == nullptr) { - return Status(StatusCode::kNotImplementedYet); + return Status(StatusCode::kMDNotImplementedYet); } else { Status rc; std::unique_ptr tensor_table = std::make_unique(); @@ -155,7 +155,7 @@ Status CacheClient::AsyncWriteBuffer(std::unique_ptr &&in) { TensorRow row; RETURN_IF_NOT_OK(in->PopRow(&row)); rc = AsyncWriteRow(row); - if (rc.get_code() == StatusCode::kNotImplementedYet) { + if (rc.StatusCode() == StatusCode::kMDNotImplementedYet) { tensor_table->push_back(row); } else if (rc.IsError()) { return rc; @@ -165,7 +165,7 @@ Status CacheClient::AsyncWriteBuffer(std::unique_ptr &&in) { // If not all of them can be sent async, return what's left back to the caller. if (!tensor_table->empty()) { in->set_tensor_table(std::move(tensor_table)); - return Status(StatusCode::kNotImplementedYet); + return Status(StatusCode::kMDNotImplementedYet); } } return Status::OK(); @@ -225,7 +225,8 @@ Status CacheClient::CreateCache(uint32_t tree_crc, bool generate_id) { auto cache_state = static_cast(out); if (cache_state == CacheServiceState::kFetchPhase || (cache_state == CacheServiceState::kBuildPhase && cookie_.empty())) { - return Status(StatusCode::kDuplicateKey, __LINE__, __FILE__, "Not an error and we should bypass the build phase"); + return Status(StatusCode::kMDDuplicateKey, __LINE__, __FILE__, + "Not an error and we should bypass the build phase"); } } else { cinfo_.set_crc(tree_crc); // It's really a new cache we're creating so save our crc in the client @@ -243,10 +244,10 @@ Status CacheClient::CreateCache(uint32_t tree_crc, bool generate_id) { auto rq = std::make_shared(this, cinfo_, cache_mem_sz_, createFlag); RETURN_IF_NOT_OK(PushRequest(rq)); Status rc = rq->Wait(); - bool success = (rc.IsOk() || rc.get_code() == StatusCode::kDuplicateKey); + bool success = (rc.IsOk() || rc.StatusCode() == StatusCode::kMDDuplicateKey); // If we get kDuplicateKey, it just means we aren't the first one to create the cache, // and we will continue to parse the result. - if (rc.get_code() == StatusCode::kDuplicateKey) { + if (rc.StatusCode() == StatusCode::kMDDuplicateKey) { RETURN_IF_NOT_OK(rq->PostReply()); } if (success) { @@ -443,7 +444,7 @@ Status CacheClient::AsyncBufferStream::AsyncWrite(const TensorRow &row) { } // If the size is too big, tell the user to send it directly. if (sz > kAsyncBufferSize) { - return Status(StatusCode::kNotImplementedYet); + return Status(StatusCode::kMDNotImplementedYet); } std::unique_lock lock(mux_); // Check error from the server side while we have the lock; diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_common.h b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_common.h index 637bbe38c84..40922fea23a 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_common.h +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_common.h @@ -66,7 +66,7 @@ enum class CacheServiceState : int8_t { /// \param rc[in] Status object /// \param reply[in/out] pointer to pre-allocated protobuf object inline void Status2CacheReply(const Status &rc, CacheReply *reply) { - reply->set_rc(static_cast(rc.get_code())); + reply->set_rc(static_cast(rc.StatusCode())); reply->set_msg(rc.ToString()); } /// \brief Generate the unix socket file we use on both client/server side given a tcp/ip port number diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_fbb.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_fbb.cc index 7a49dfc2370..5b95068f128 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_fbb.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_fbb.cc @@ -98,7 +98,7 @@ Status SerializeTensorRowHeader(const TensorRow &row, std::shared_ptr rq) { std::unique_lock lck(mux_); auto r = req_.emplace(seqNo, std::move(tag)); if (!r.second) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__); } } // Last step is to tag the request. @@ -124,7 +124,7 @@ Status CacheClientGreeter::WorkerEntry() { } else { err_msg = rq->rc_.error_message() + ". GRPC Code " + std::to_string(error_code); } - Status remote_rc = Status(StatusCode::kNetWorkError, __LINE__, __FILE__, err_msg); + Status remote_rc = Status(StatusCode::kMDNetWorkError, __LINE__, __FILE__, err_msg); Status2CacheReply(remote_rc, &rq->base_rq_->reply_); } // Notify the waiting thread. diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_ipc.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_ipc.cc index 1b822e684b2..ae75d064d18 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_ipc.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_ipc.cc @@ -25,7 +25,7 @@ Status PortToFtok(int port, SharedMemory::shm_key_t *out) { shmkey = ftok(unix_path.data(), 'a'); if (shmkey == (key_t)-1) { std::string errMsg = "Unable to create a ftok token. Errno = " + std::to_string(errno); - return Status(errno == ENOENT ? StatusCode::kFileNotExist : StatusCode::kUnexpectedError, errMsg); + return Status(errno == ENOENT ? StatusCode::kMDFileNotExist : StatusCode::kMDUnexpectedError, errMsg); } *out = shmkey; return Status::OK(); @@ -56,7 +56,7 @@ Status SharedMessage::SendStatus(const Status &rc) { CacheMsgBuf msg{ 1, }; - msg.body.status.err_code = static_cast(rc.get_code()); + msg.body.status.err_code = static_cast(rc.StatusCode()); auto err = memcpy_s(msg.body.status.err_msg, kSharedMessageSize, rc.ToString().data(), rc.ToString().size()); CHECK_FAIL_RETURN_UNEXPECTED(err == EOK, "memcpy_s failed. err = " + std::to_string(err)); msg.body.status.err_msg[rc.ToString().size()] = '\0'; diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_main.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_main.cc index cdb36cbdf82..a118a5e6091 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_main.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_main.cc @@ -25,16 +25,17 @@ #include #include "minddata/dataset/engine/cache/cache_common.h" #include "minddata/dataset/engine/cache/cache_ipc.h" +namespace ms = mindspore; namespace ds = mindspore::dataset; /// Start the server /// \param argv /// \return Status object -ds::Status StartServer(int argc, char **argv) { - ds::Status rc; +ms::Status StartServer(int argc, char **argv) { + ms::Status rc; ds::CacheServer::Builder builder; if (argc != 8) { - return ds::Status(ds::StatusCode::kSyntaxError); + return ms::Status(ms::StatusCode::kMDSyntaxError); } int32_t port = strtol(argv[3], nullptr, 10); @@ -53,7 +54,7 @@ ds::Status StartServer(int argc, char **argv) { // is called. This is a standard procedure for daemonize a process on unix. if (chdir("/") == -1) { std::string errMsg = "Unable to change directory to /. Errno = " + std::to_string(errno); - return ds::Status(ds::StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return ms::Status(ms::StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } // A message queue for communication between parent and child (if we fork). @@ -80,13 +81,13 @@ ds::Status StartServer(int argc, char **argv) { // failed to fork if (pid < 0) { std::string errMsg = "Failed to fork process for cache server. Errno = " + std::to_string(errno); - return ds::Status(ds::StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return ms::Status(ms::StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else if (pid > 0) { // Parent and will be responsible for remove the queue on exit. msg.RemoveResourcesOnExit(); // Sleep one second and we attach to the msg que std::this_thread::sleep_for(std::chrono::seconds(1)); - ds::Status child_rc; + ms::Status child_rc; rc = msg.ReceiveStatus(&child_rc); if (rc.IsError()) { return rc; @@ -101,7 +102,7 @@ ds::Status StartServer(int argc, char **argv) { "logs (under " << ds::DefaultLogDir() << ") for any issues that may happen after startup\n"; signal(SIGCHLD, SIG_IGN); // ignore sig child signal. - return ds::Status::OK(); + return ms::Status::OK(); } else { // Child process will continue from here if daemonize and parent has already exited. // If we are running in the foreground, none of the code in block below will be run. @@ -110,7 +111,7 @@ ds::Status StartServer(int argc, char **argv) { sid = setsid(); if (sid < 0) { std::string errMsg = "Failed to setsid(). Errno = " + std::to_string(errno); - return ds::Status(ds::StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return ms::Status(ms::StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } close(0); close(1); @@ -137,10 +138,10 @@ ds::Status StartServer(int argc, char **argv) { int main(int argc, char **argv) { // This executable is not to be called directly, and should be invoked by cache_admin executable. - ds::Status rc = StartServer(argc, argv); + ms::Status rc = StartServer(argc, argv); // Check result if (rc.IsError()) { - auto errCode = rc.get_code(); + auto errCode = rc.StatusCode(); auto errMsg = rc.ToString(); std::cerr << errMsg << std::endl; return static_cast(errCode); diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_numa.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_numa.cc index 35ddc1df9db..7e75fb0b3e4 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_numa.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_numa.cc @@ -136,7 +136,7 @@ Status NumaMemoryPool::Allocate(size_t n, void **p) { if (rc.IsOk()) { *p = ptr; break; - } else if (rc.IsOutofMemory()) { + } else if (rc == StatusCode::kMDOutOfMemory) { inx = (inx + 1) % num_slots; } else { return rc; @@ -162,7 +162,7 @@ Status NumaMemoryPool::Allocate(size_t n, void **p) { if (rc.IsOk()) { *p = ptr; break; - } else if (rc.IsOutofMemory()) { + } else if (rc == StatusCode::kMDOutOfMemory) { // Make the next arena and continue. slot = (slot + 1) % num_segments; } else { @@ -172,7 +172,7 @@ Status NumaMemoryPool::Allocate(size_t n, void **p) { } // Handle the case we have done one round robin search. if (ptr == nullptr) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__); } return rc; } diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_pool.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_pool.cc index e677c58f067..47611836d3d 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_pool.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_pool.cc @@ -108,7 +108,7 @@ Status CachePool::Insert(CachePool::key_type key, const std::vectorDoInsert(key, bl); } catch (const std::bad_alloc &e) { - rc = Status(StatusCode::kOutOfMemory, __LINE__, __FILE__); + rc = Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__); } // Duplicate key is treated as error and we will also free the memory. if (rc.IsError() && bl.ptr != nullptr) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_request.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_request.cc index df68f406a1b..14803bcf28a 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_request.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_request.cc @@ -223,7 +223,7 @@ Status CreateCacheRequest::Prepare() { rq_.add_buf_data(fbb.GetBufferPointer(), fbb.GetSize()); return Status::OK(); } catch (const std::bad_alloc &e) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__); } } @@ -277,7 +277,7 @@ Status CacheSchemaRequest::SerializeCacheSchemaRequest(const std::unordered_map< rq_.add_buf_data(fbb.GetBufferPointer(), fbb.GetSize()); return Status::OK(); } catch (const std::bad_alloc &e) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__); } } diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.cc index 4f40fe0a778..14c38302092 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.cc @@ -169,7 +169,7 @@ Status CacheServer::GlobalMemoryCheck(uint64_t cache_mem_sz) { int64_t mem_consumed = stat.stat_.num_mem_cached * stat.stat_.average_cache_sz; max_avail -= mem_consumed; if (max_avail <= 0) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__, "Please destroy some sessions"); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__, "Please destroy some sessions"); } ++it; } @@ -179,12 +179,12 @@ Status CacheServer::GlobalMemoryCheck(uint64_t cache_mem_sz) { if (max_avail < avail_mem) { int64_t req_mem = cache_mem_sz * 1048576L; // It is in MB unit. if (req_mem > max_avail) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__, "Please destroy some sessions"); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__, "Please destroy some sessions"); } else if (req_mem == 0) { // This cache request is specifying unlimited memory up to the memory cap. If we have consumed more than // 85% of our limit, fail this request. if (static_cast(max_avail) / static_cast(avail_mem) <= 0.15) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__, "Please destroy some sessions"); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__, "Please destroy some sessions"); } } } @@ -249,7 +249,7 @@ Status CacheServer::CreateService(CacheRequest *rq, CacheReply *reply) { client_id = cs->num_clients_.fetch_add(1); all_caches_.emplace(connection_id, std::move(cs)); } catch (const std::bad_alloc &e) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } } @@ -276,7 +276,7 @@ Status CacheServer::CreateService(CacheRequest *rq, CacheReply *reply) { reply->set_result(fbb.GetBufferPointer(), fbb.GetSize()); // We can return OK but we will return a duplicate key so user can act accordingly to either ignore it // treat it as OK. - return duplicate ? Status(StatusCode::kDuplicateKey) : Status::OK(); + return duplicate ? Status(StatusCode::kMDDuplicateKey) : Status::OK(); } Status CacheServer::DestroyCache(CacheRequest *rq) { @@ -306,7 +306,7 @@ Status CacheServer::CacheRow(CacheRequest *rq, CacheReply *reply) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Cache id " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { auto sz = rq->buf_data_size(); std::vector buffers; @@ -326,7 +326,7 @@ Status CacheServer::CacheRow(CacheRequest *rq, CacheReply *reply) { RETURN_IF_NOT_OK(cs->CacheRow(buffers, &id)); reply->set_result(std::to_string(id)); } else { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Cookie mismatch"); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Cookie mismatch"); } } return Status::OK(); @@ -353,7 +353,7 @@ Status CacheServer::FastCacheRow(CacheRequest *rq, CacheReply *reply) { Status rc; if (cs == nullptr) { std::string errMsg = "Cache id " + std::to_string(connection_id) + " not found"; - rc = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + rc = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { // Only if the cookie matches, we can accept insert into this cache that has a build phase if (!cs->HasBuildPhase() || cookie == cs->cookie()) { @@ -365,11 +365,11 @@ Status CacheServer::FastCacheRow(CacheRequest *rq, CacheReply *reply) { } else { auto state = cs->GetState(); if (state != CacheServiceState::kFetchPhase) { - rc = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + rc = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Cache service is not in fetch phase. The current phase is " + std::to_string(static_cast(state)) + ". Client id: " + std::to_string(client_id)); } else { - rc = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + rc = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Cookie mismatch. Client id: " + std::to_string(client_id)); } } @@ -413,7 +413,7 @@ Status CacheServer::InternalFetchRow(CacheRequest *rq) { Status rc; if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } rc = cs->InternalFetchRow(flatbuffers::GetRoot(rq->buf_data(0).data())); // This is an internal request and is not tied to rpc. But need to post because there @@ -494,7 +494,7 @@ Status CacheServer::BatchFetchRows(CacheRequest *rq, CacheReply *reply) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Cache id " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { CHECK_FAIL_RETURN_UNEXPECTED(!rq->buf_data().empty(), "Missing row id"); auto &row_id_buf = rq->buf_data(0); @@ -551,7 +551,7 @@ Status CacheServer::BatchFetchRows(CacheRequest *rq, CacheReply *reply) { mem.resize(mem_sz); CHECK_FAIL_RETURN_UNEXPECTED(mem.capacity() >= mem_sz, "Programming error"); } catch (const std::bad_alloc &e) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } WritableSlice dest(mem.data(), mem_sz); RETURN_IF_NOT_OK(BatchFetch(fbb, &dest)); @@ -568,7 +568,7 @@ Status CacheServer::GetStat(CacheRequest *rq, CacheReply *reply) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { CacheService::ServiceStat svc_stat; RETURN_IF_NOT_OK(cs->GetStat(&svc_stat)); @@ -595,7 +595,7 @@ Status CacheServer::CacheSchema(CacheRequest *rq) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { CHECK_FAIL_RETURN_UNEXPECTED(!rq->buf_data().empty(), "Missing schema information"); auto &create_schema_buf = rq->buf_data(0); @@ -611,7 +611,7 @@ Status CacheServer::FetchSchema(CacheRequest *rq, CacheReply *reply) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { // We are going to use std::string to allocate and hold the result which will be eventually // 'moved' to the protobuf message (which underneath is also a std::string) for the purpose @@ -630,7 +630,7 @@ Status CacheServer::BuildPhaseDone(CacheRequest *rq) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { // First piece of data is the cookie CHECK_FAIL_RETURN_UNEXPECTED(!rq->buf_data().empty(), "Missing cookie"); @@ -639,7 +639,7 @@ Status CacheServer::BuildPhaseDone(CacheRequest *rq) { if (cookie == cs->cookie()) { RETURN_IF_NOT_OK(cs->BuildPhaseDone()); } else { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Cookie mismatch"); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Cookie mismatch"); } } return Status::OK(); @@ -652,7 +652,7 @@ Status CacheServer::GetCacheMissKeys(CacheRequest *rq, CacheReply *reply) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { std::vector gap; RETURN_IF_NOT_OK(cs->FindKeysMiss(&gap)); @@ -680,7 +680,7 @@ Status CacheServer::ToggleWriteMode(CacheRequest *rq) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { // First piece of data is the on/off flag CHECK_FAIL_RETURN_UNEXPECTED(!rq->buf_data().empty(), "Missing action flag"); @@ -747,7 +747,7 @@ Status CacheServer::ConnectReset(CacheRequest *rq) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { auto client_id = rq->client_id(); MS_LOG(WARNING) << "Client id " << client_id << " with connection id " << connection_id << " disconnects"; @@ -836,7 +836,7 @@ Status CacheServer::ProcessRowRequest(CacheServerRequest *cache_req, bool *inter default: std::string errMsg("Internal error, request type is not row request: "); errMsg += std::to_string(static_cast(cache_req->type_)); - cache_req->rc_ = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + cache_req->rc_ = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } return Status::OK(); } @@ -860,7 +860,7 @@ Status CacheServer::ProcessSessionRequest(CacheServerRequest *cache_req) { default: std::string errMsg("Internal error, request type is not session request: "); errMsg += std::to_string(static_cast(cache_req->type_)); - cache_req->rc_ = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + cache_req->rc_ = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } return Status::OK(); } @@ -931,7 +931,7 @@ Status CacheServer::ProcessAdminRequest(CacheServerRequest *cache_req) { default: std::string errMsg("Internal error, request type is not admin request: "); errMsg += std::to_string(static_cast(cache_req->type_)); - cache_req->rc_ = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + cache_req->rc_ = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } return Status::OK(); } @@ -949,7 +949,7 @@ Status CacheServer::ProcessRequest(CacheServerRequest *cache_req) { } else { std::string errMsg("Unknown request type : "); errMsg += std::to_string(static_cast(cache_req->type_)); - cache_req->rc_ = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + cache_req->rc_ = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } // Notify it is done, and move on to the next request. @@ -1045,7 +1045,7 @@ Status CacheServer::GetFreeRequestTag(CacheServerRequest **q) { RETURN_UNEXPECTED_IF_NULL(q); auto *p = new (std::nothrow) CacheServerRequest(); if (p == nullptr) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__); } *q = p; return Status::OK(); @@ -1091,7 +1091,7 @@ Status CacheServer::DestroySession(CacheRequest *rq) { } else { std::string errMsg = "Session id " + std::to_string(drop_session_id) + " not found in server on port " + std::to_string(port_) + "."; - return Status(StatusCode::kFileNotExist, errMsg); + return Status(StatusCode::kMDFileNotExist, errMsg); } } } @@ -1148,7 +1148,7 @@ Status CacheServer::GetCacheState(CacheRequest *rq, CacheReply *reply) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { auto state = cs->GetState(); reply->set_result(std::to_string(static_cast(state))); @@ -1247,7 +1247,7 @@ Status CacheServer::Builder::IpcResourceCleanup() { std::string errMsg = "Cache server is already up and running"; // We return a duplicate error. The main() will intercept // and output a proper message - return Status(StatusCode::kDuplicateKey, errMsg); + return Status(StatusCode::kMDDuplicateKey, errMsg); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.h b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.h index 1297e1018af..c8f2b95a980 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.h +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.h @@ -419,7 +419,7 @@ class CacheServer : public Service { Status GetRc() { Status rc; for (auto &cache_rc : rc_lists_) { - if (cache_rc.IsError() && !cache_rc.IsInterrupted() && rc.IsOk()) { + if (cache_rc.IsError() && cache_rc != StatusCode::kMDInterrupted && rc.IsOk()) { rc = cache_rc; } } diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_service.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_service.cc index 4679ac214ec..790832e36ff 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_service.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_service.cc @@ -42,7 +42,7 @@ Status CacheService::DoServiceStart() { // Return an error if we use more than recommended memory. std::string errMsg = "Requesting cache size " + std::to_string(cache_mem_sz_) + " while available system memory " + std::to_string(avail_mem); - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__, errMsg); } memory_cap_ratio = static_cast(cache_mem_sz_) / avail_mem; } @@ -79,7 +79,7 @@ Status CacheService::CacheRow(const std::vector &buf, row_id_type if (st_ == CacheServiceState::kNoLocking) { // We ignore write this request once we turn off locking on the B+ tree. So we will just // return out of memory from now on. - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } try { // The first buffer is a flatbuffer which describes the rest of the buffers follow @@ -119,16 +119,16 @@ Status CacheService::CacheRow(const std::vector &buf, row_id_type } // Now we cache the buffer. Status rc = cp_->Insert(*row_id_generated, all_data); - if (rc == Status(StatusCode::kDuplicateKey)) { + if (rc == Status(StatusCode::kMDDuplicateKey)) { MS_LOG(DEBUG) << "Ignoring duplicate key."; } else { if (HasBuildPhase()) { // For cache service that has a build phase, record the error in the state // so other clients can be aware of the new state. There is nothing one can // do to resume other than to drop the cache. - if (rc.IsNoSpace()) { + if (rc == StatusCode::kMDNoSpace) { st_ = CacheServiceState::kNoSpace; - } else if (rc.IsOutofMemory()) { + } else if (rc == StatusCode::kMDOutOfMemory) { st_ = CacheServiceState::kOutOfMemory; } } @@ -152,7 +152,7 @@ Status CacheService::FastCacheRow(const ReadableSlice &src, row_id_type *row_id_ if (st_ == CacheServiceState::kNoLocking) { // We ignore write this request once we turn off locking on the B+ tree. So we will just // return out of memory from now on. - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } try { // If we don't need to generate id, we need to find it from the buffer. @@ -172,16 +172,16 @@ Status CacheService::FastCacheRow(const ReadableSlice &src, row_id_type *row_id_ } // Now we cache the buffer. Status rc = cp_->Insert(*row_id_generated, {src}); - if (rc == Status(StatusCode::kDuplicateKey)) { + if (rc == Status(StatusCode::kMDDuplicateKey)) { MS_LOG(DEBUG) << "Ignoring duplicate key."; } else { if (HasBuildPhase()) { // For cache service that has a build phase, record the error in the state // so other clients can be aware of the new state. There is nothing one can // do to resume other than to drop the cache. - if (rc.IsNoSpace()) { + if (rc == StatusCode::kMDNoSpace) { st_ = CacheServiceState::kNoSpace; - } else if (rc.IsOutofMemory()) { + } else if (rc == StatusCode::kMDOutOfMemory) { st_ = CacheServiceState::kOutOfMemory; } } @@ -307,7 +307,7 @@ Status CacheService::FetchSchema(std::string *out) const { if (!mem.empty()) { *out = std::move(mem); } else { - return Status(StatusCode::kFileNotExist, __LINE__, __FILE__, "No schema has been cached"); + return Status(StatusCode::kMDFileNotExist, __LINE__, __FILE__, "No schema has been cached"); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_msg.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_msg.cc index 9a63788b9d4..82782d7900b 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_msg.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_msg.cc @@ -36,7 +36,7 @@ Status CachePerfMsg::Receive(int32_t qID) { auto err = msgrcv(qID, reinterpret_cast(&small_msg_), sizeof(small_msg_.body.msg), 0, MSG_NOERROR); if (err == -1) { if (errno == EIDRM) { - return Status(StatusCode::kInterrupted); + return Status(StatusCode::kMDInterrupted); } else { std::string errMsg = "Failed to call msgrcv. Errno = " + std::to_string(errno); RETURN_STATUS_UNEXPECTED(errMsg); diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_perf.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_perf.cc index 990f1f518d2..92a36a48654 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_perf.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_perf.cc @@ -33,7 +33,7 @@ int main(int argc, char **argv) { if (rc.IsError()) { std::cerr << rc.ToString() << std::endl; } - return static_cast(rc.get_code()); + return static_cast(rc.StatusCode()); } return 0; } diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_perf_run.h b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_perf_run.h index dac9c8012e3..3324e460d24 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_perf_run.h +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_perf_run.h @@ -100,5 +100,7 @@ class CachePerfRun { }; } // namespace dataset } // namespace mindspore - +// todo: waiting for the master of the codes to refactor +#define get_code StatusCode +#define kDuplicateKey kMDDuplicateKey #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_CACHE_PERF_RUN_H_ diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_pipeline.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_pipeline.cc index 130bc102e66..bf03749fd92 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_pipeline.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_pipeline.cc @@ -33,12 +33,12 @@ int main(int argc, char **argv) { // If we hit any error, send the rc back to the parent. if (rc.IsError()) { ds::ErrorMsg proto; - proto.set_rc(static_cast(rc.get_code())); + proto.set_rc(static_cast(rc.StatusCode())); proto.set_msg(rc.ToString()); ds::CachePerfMsg msg; (void)cachePipelineRun.SendMessage(&msg, ds::CachePerfMsg::MessageType::kError, &proto); } - return static_cast(rc.get_code()); + return static_cast(rc.StatusCode()); } return 0; } diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_pipeline_run.h b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_pipeline_run.h index d1d617133a6..c0f8d8e738f 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_pipeline_run.h +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_pipeline_run.h @@ -115,5 +115,9 @@ class CachePipelineRun { }; } // namespace dataset } // namespace mindspore - +// todo: waiting for the master of the codes to refactor +#define get_code StatusCode +#define kDuplicateKey kMDDuplicateKey +#define IsOutofMemory() StatusCode() == StatusCode::kMDOutOfMemory +#define IsNoSpace() StatusCode() == StatusCode::kMDNoSpace #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_CACHE_PIPELINE_RUN_H_ diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/storage_container.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/storage_container.cc index ea6cb44f550..bc1ebce07ce 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/storage_container.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/storage_container.cc @@ -104,7 +104,7 @@ Status StorageContainer::Write(const ReadableSlice &dest, off64_t offset) const if (r_sz != sz) { errno_t err = (r_sz == 0) ? EOF : errno; if (errno == ENOSPC) { - return Status(StatusCode::kNoSpace, __LINE__, __FILE__); + return Status(StatusCode::kMDNoSpace, __LINE__, __FILE__); } else { RETURN_STATUS_UNEXPECTED(strerror(err)); } @@ -157,7 +157,7 @@ Status StorageContainer::CreateStorageContainer(std::shared_ptrCreate(); if (rc.IsOk()) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/storage_manager.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/storage_manager.cc index 2e16e843f50..26b12ea5f25 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/storage_manager.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/storage_manager.cc @@ -96,9 +96,9 @@ Status StorageManager::Write(key_type *key, const std::vector &bu cont = containers_.at(num_containers - 1); off64_t offset; Status rc = cont->Insert(buf, &offset); - if (rc.get_code() == StatusCode::kBuddySpaceFull) { + if (rc.StatusCode() == StatusCode::kMDBuddySpaceFull) { create_new_container = true; - // Remember how many containers we saw. In the next iteration we will do a comparision to see + // Remember how many containers we saw. In the next iteration we will do a comparison to see // if someone has already created it. last_num_container = num_containers; } else if (rc.IsOk()) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/data_schema.cc b/mindspore/ccsrc/minddata/dataset/engine/data_schema.cc index 2273a5d1a2e..6968566a88f 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/data_schema.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/data_schema.cc @@ -140,7 +140,7 @@ Status ColDescriptor::MaterializeTensorShape(int32_t num_elements, TensorShape * // If we already had an unknown dimension, then we cannot have a second unknown dimension. // We only support the compute of a single unknown dim. if (requested_shape[i] == TensorShape::kDimUnknown && unknown_dim_position != TensorShape::kDimUnknown) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Requested shape has more than one unknown dimension!"); } @@ -312,12 +312,12 @@ Status DataSchema::ColumnLoad(nlohmann::json column_child_tree, const std::strin } // data type is mandatory field if (type_str.empty()) - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "json schema file for column " + col_name + " has invalid or missing column type."); // rank number is mandatory field if (rank_value <= -1) - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "json schema file for column " + col_name + " must define a positive rank value."); // Create the column descriptor for this column from the data we pulled from the json file @@ -425,7 +425,7 @@ Status DataSchema::AddColumn(const ColDescriptor &cd) { Status DataSchema::PreLoadExceptionCheck(const nlohmann::json &js) { // Check if columns node exists. It is required for building schema from file. if (js.find("columns") == js.end()) - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "\"columns\" node is required in the schema json file."); return Status::OK(); } @@ -434,12 +434,12 @@ Status DataSchema::PreLoadExceptionCheck(const nlohmann::json &js) { // name to column index number. Status DataSchema::GetColumnNameMap(std::unordered_map *out_column_name_map) { if (out_column_name_map == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "unexpected null output column name map."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "unexpected null output column name map."); } for (int32_t i = 0; i < col_descs_.size(); ++i) { if (col_descs_[i].name().empty()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Constructing column name map from schema, but found empty column name."); } (*out_column_name_map)[col_descs_[i].name()] = i; diff --git a/mindspore/ccsrc/minddata/dataset/engine/dataset_iterator.cc b/mindspore/ccsrc/minddata/dataset/engine/dataset_iterator.cc index 714b9e5647c..3c654ea9c99 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/dataset_iterator.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/dataset_iterator.cc @@ -290,7 +290,7 @@ Status ChildIterator::Drain() { RETURN_IF_NOT_OK(current_op_->GetNextInput(&curr_buffer_, worker_id_, child_idx_)); } if (curr_buffer_->eof()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Child iterator picked up EOF in drain."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Child iterator picked up EOF in drain."); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/barrier_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/barrier_op.cc index f240c9bc4c7..12f4f1595b5 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/barrier_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/barrier_op.cc @@ -122,7 +122,8 @@ Status BarrierOp::prepare(TensorQTable *const table) { clean_up_ = false; buffer_id_ = 0; if (table == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "BarrierOp prepare phase requires a tensor table."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, + "BarrierOp prepare phase requires a tensor table."); } // fill initial row TensorRow new_row = {}; @@ -150,7 +151,7 @@ Status BarrierOp::prepare(TensorQTable *const table) { // fillBuffer always expects a new table to fill Status BarrierOp::fillBuffer(TensorQTable *const table) { if (table == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "BarrierOp fillBuffer null table pointer."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "BarrierOp fillBuffer null table pointer."); } TensorRow new_row = {}; while (table->size() < static_cast(rows_per_buffer_)) { @@ -172,7 +173,7 @@ Status BarrierOp::blockCond() { { py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); } // we have condition name, however the flexibility is in python today try { @@ -180,11 +181,11 @@ Status BarrierOp::blockCond() { py::object ret_py_obj = condition_function_(); // Process the return value if (!py::isinstance(ret_py_obj)) { - return Status(StatusCode::kPyFuncException, + return Status(StatusCode::kMDPyFuncException, "Invalid parameter, condition wait function should return true/false."); } } catch (const py::error_already_set &e) { - return Status(StatusCode::kPyFuncException, e.what()); + return Status(StatusCode::kMDPyFuncException, e.what()); } } return Status::OK(); diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/batch_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/batch_op.cc index 4c64c47057e..5590ae71d80 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/batch_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/batch_op.cc @@ -61,7 +61,7 @@ Status BatchOp::Builder::SanityCheck() { err += builder_num_workers_ <= 0 ? "Invalid parameter, num_parallel_workers must be greater than 0, but got " + std::to_string(builder_num_workers_) + ".\n" : ""; - return err.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, common::SafeCStr(err)); + return err.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, common::SafeCStr(err)); } #ifdef ENABLE_PYTHON @@ -261,7 +261,7 @@ Status BatchOp::MakeBatchedBuffer(std::pair, CBatc Status BatchOp::LaunchThreadsAndInitOp() { if (tree_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); } RETURN_IF_NOT_OK(worker_queues_.Register(tree_->AllTasks())); RETURN_IF_NOT_OK( @@ -338,23 +338,23 @@ Status BatchOp::InvokeBatchSizeFunc(int32_t *batch_size, CBatchInfo info) { // Acquire Python GIL py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized."); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized."); } try { py::object size = batch_size_func_(info); *batch_size = size.cast(); if (*batch_size <= 0) { - return Status(StatusCode::kPyFuncException, + return Status(StatusCode::kMDPyFuncException, "Invalid parameter, batch size function should return an integer greater than 0."); } } catch (const py::error_already_set &e) { - return Status(StatusCode::kPyFuncException, e.what()); + return Status(StatusCode::kMDPyFuncException, e.what()); } catch (const py::cast_error &e) { - return Status(StatusCode::kPyFuncException, + return Status(StatusCode::kMDPyFuncException, "Invalid parameter, batch size function should return an integer greater than 0."); } } - return Status(StatusCode::kOK, "Batch size func call succeed."); + return Status(StatusCode::kSuccess, "Batch size func call succeed."); } Status BatchOp::InvokeBatchMapFunc(TensorTable *input, TensorTable *output, CBatchInfo info) { @@ -362,7 +362,7 @@ Status BatchOp::InvokeBatchMapFunc(TensorTable *input, TensorTable *output, CBat // Acquire Python GIL py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized."); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized."); } try { // Prepare batch map call back parameters @@ -407,9 +407,9 @@ Status BatchOp::InvokeBatchMapFunc(TensorTable *input, TensorTable *output, CBat output->push_back(std::move(output_batch)); } } catch (const py::error_already_set &e) { - return Status(StatusCode::kPyFuncException, e.what()); + return Status(StatusCode::kMDPyFuncException, e.what()); } catch (const py::cast_error &e) { - return Status(StatusCode::kPyFuncException, + return Status(StatusCode::kMDPyFuncException, "Invalid parameter, batch map function should return a tuple of list of numpy array."); } } diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.cc index d3057353f3a..03f9cf4fbff 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.cc @@ -191,7 +191,7 @@ Status BucketBatchByLengthOp::PadAndBatchBucket(int32_t bucket_index, int32_t ba if (bucket_index + 1 >= bucket_boundaries_.size()) { std::string error_message = "Invalid data, requested to pad to bucket boundary, element falls in last bucket."; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, error_message); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, error_message); } pad_shape[i] = bucket_boundaries_[bucket_index + 1] - 1; diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/build_sentence_piece_vocab_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/build_sentence_piece_vocab_op.cc index a633784fc5c..8053baa5676 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/build_sentence_piece_vocab_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/build_sentence_piece_vocab_op.cc @@ -42,7 +42,7 @@ BuildSentencePieceVocabOp::BuildSentencePieceVocabOp(std::shared_ptrRegister(tree_->AllTasks())); RETURN_IF_NOT_OK(tree_->AllTasks()->CreateAsyncTask( @@ -84,10 +84,10 @@ Status BuildSentencePieceVocabOp::SentenceThread() { sentencepiece::util::Status s_status = sentencepiece::SentencePieceTrainer::Train(BuildParams(), sentence_iter.get(), &model_proto); if (!s_status.ok()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, s_status.message()); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, s_status.message()); } else { if (vocab_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, sentencepiece vocab not set."); } vocab_->set_model_proto(model_proto); @@ -145,7 +145,7 @@ void BuildSentencePieceVocabOp::Next(std::string *sentence) { if (new_row[col_id_]->type().IsNumeric() || new_row[col_id_]->Rank() > 1) { ret_status_ = - Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid data, build_sentence_piece_vocab only works on string data with rank equal to 1, got type: " + new_row[col_id_]->type().ToString() + "and rank: " + std::to_string(new_row[col_id_]->Rank())); read_done_ = true; diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/build_vocab_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/build_vocab_op.cc index c2145e0e421..6d915abe889 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/build_vocab_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/build_vocab_op.cc @@ -80,7 +80,7 @@ Status BuildVocabOp::WorkerEntry(int32_t worker_id) { Status BuildVocabOp::operator()() { // launch the collector thread if (tree_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); } RETURN_IF_NOT_OK(distributor_queue_->Register(tree_->AllTasks())); RETURN_IF_NOT_OK(collector_queue_->Register(tree_->AllTasks())); diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_base_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_base_op.cc index 8b914a780d7..5accad7c76e 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_base_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_base_op.cc @@ -233,7 +233,7 @@ Status CacheBase::UpdateColumnMapFromCache() { // Get the schema from the server. It may not be there yet. So tolerate the error. if (column_name_id_map_.empty()) { rc = cache_client_->FetchSchema(&column_name_id_map_); - if (rc == Status(StatusCode::kFileNotExist)) { + if (rc == Status(StatusCode::kMDFileNotExist)) { MS_LOG(DEBUG) << "Schema not in the server yet."; rc = Status::OK(); } @@ -304,14 +304,14 @@ Status CacheBase::Prefetcher(int32_t worker_id) { int32_t retry_count = 0; do { rc = PrefetchRows(prefetch_keys, &cache_miss); - if (rc.IsNetWorkError() && retry_count < max_retries) { + if (rc == StatusCode::kMDNetWorkError && retry_count < max_retries) { // If we get some network error, we will attempt some retries retry_count++; } else if (rc.IsError()) { MS_LOG(WARNING) << rc.ToString(); return rc; } - } while (rc.IsNetWorkError()); + } while (rc == StatusCode::kMDNetWorkError); // In case any thread is waiting for the rows to come back and blocked on a semaphore, // we will put an empty row in the local cache. if (rc.IsError() && AllowCacheMiss()) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_lookup_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_lookup_op.cc index 8549caf82cb..7beb6c15b07 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_lookup_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_lookup_op.cc @@ -39,12 +39,12 @@ CacheLookupOp::Builder::Builder() : build_cache_client_(nullptr), build_sampler_ // Check if the required parameters are set by the builder. Status CacheLookupOp::Builder::SanityCheck() const { if (build_cache_client_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, CacheLookupOp requires a CacheClient, but got nullptr."); } // Make sure the cache client has a valid session if (!build_cache_client_->session_id()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, cache client for CacheLookupOp requires a session id which is not equal to 0."); } return Status::OK(); @@ -59,7 +59,7 @@ Status CacheLookupOp::Builder::Build(std::shared_ptr *ptr) { } Status CacheLookupOp::operator()() { if (!sampler_) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, CacheLookupOp requires a sampler before it can be executed, but got nullptr."); } RETURN_IF_NOT_OK(RegisterResources()); diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_merge_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_merge_op.cc index 6037897fad2..e02eecbd7c9 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_merge_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_merge_op.cc @@ -129,7 +129,7 @@ Status CacheMergeOp::CacheMissWorkerEntry(int32_t workerId) { Status rc; if ((rc = cache_client_->FlushAsyncWriteBuffer()).IsError()) { cache_missing_rows_ = false; - if (rc.IsOutofMemory() || rc.IsNoSpace()) { + if (rc == StatusCode::kMDOutOfMemory || rc == kMDNoSpace) { cache_client_->ServerRunningOutOfResources(); } else { MS_LOG(INFO) << "Async row flushing not successful: " << rc.ToString(); @@ -156,7 +156,7 @@ Status CacheMergeOp::CacheMissWorkerEntry(int32_t workerId) { rc = rq->AsyncSendCacheRequest(cache_client_, row); if (rc.IsOk()) { RETURN_IF_NOT_OK(io_que_->EmplaceBack(row_id)); - } else if (rc.IsOutofMemory() || rc.IsNoSpace()) { + } else if (rc == StatusCode::kMDOutOfMemory || rc == kMDNoSpace) { cache_missing_rows_ = false; cache_client_->ServerRunningOutOfResources(); } @@ -188,9 +188,9 @@ Status CacheMergeOp::Cleaner() { Status rc = rq->CheckCacheResult(); if (rc.IsError()) { // If interrupt, time to quit. - if (rc.IsInterrupted()) { + if (rc == StatusCode::kMDInterrupted) { return Status::OK(); - } else if (rc.IsOutofMemory() || rc.IsNoSpace()) { + } else if (rc == StatusCode::kMDOutOfMemory || rc == kMDNoSpace) { // The server is hitting some limit and we will turn off caching from now on. cache_missing_rows_ = false; cache_client_->ServerRunningOutOfResources(); @@ -215,7 +215,7 @@ Status CacheMergeOp::PrepareNodePostAction() { // Run any common code from supe // Construct the cache const bool generate_ids = false; Status rc = cache_client_->CreateCache(cache_crc, generate_ids); - if (rc.get_code() == StatusCode::kDuplicateKey) { + if (rc.StatusCode() == StatusCode::kMDDuplicateKey) { // We are told the cache has been created already. MS_LOG(INFO) << "Cache created already"; rc = Status::OK(); @@ -244,12 +244,12 @@ CacheMergeOp::Builder::Builder() : build_cache_client_(nullptr), build_sampler_( // Check if the required parameters are set by the builder. Status CacheMergeOp::Builder::SanityCheck() const { if (build_cache_client_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, CacheMergeOp requires a CacheClient, but got nullptr."); } // Make sure the cache client has a valid session if (!build_cache_client_->session_id()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, cache client for CacheMergeOp requires a session id which is not equal to 0."); } return Status::OK(); @@ -316,7 +316,7 @@ Status CacheMergeOp::TensorRowCacheRequest::AsyncSendCacheRequest(const std::sha // We will do a deep copy but write directly into CacheRequest protobuf or shared memory Status rc; rc = cc->AsyncWriteRow(row); - if (rc.get_code() == StatusCode::kNotImplementedYet) { + if (rc.StatusCode() == StatusCode::kMDNotImplementedYet) { cleaner_copy_ = std::make_shared(cc.get()); rc = cleaner_copy_->SerializeCacheRowRequest(cc.get(), row); if (rc.IsOk()) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_op.cc index ae73cc1bdf9..5c80a5523aa 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_op.cc @@ -41,12 +41,12 @@ CacheOp::Builder::Builder() : build_cache_client_(nullptr), build_sampler_(nullp // Check if the required parameters are set by the builder. Status CacheOp::Builder::SanityCheck() const { if (build_cache_client_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, CacheOp requires a CacheClient, but got nullptr."); } // Make sure the cache client has a valid session if (!build_cache_client_->session_id()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, cache client for CacheOp requires a session id which is not equal to 0."); } return Status::OK(); @@ -78,7 +78,7 @@ Status CacheOp::InitCache() { return Status::OK(); } // This class functor will provide the master loop that drives the logic for performing the work Status CacheOp::operator()() { if (!sampler_) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, CacheOp requires a sampler before it can be executed, but got nullptr."); } RETURN_IF_NOT_OK(RegisterResources()); @@ -113,7 +113,7 @@ Status CacheOp::CacheAllRows(int32_t worker_id) { Status rc; // Do the Async write if we attach to the shared memory. rc = cache_client_->AsyncWriteBuffer(std::move(db_ptr)); - if (rc.get_code() == StatusCode::kNotImplementedYet) { + if (rc.StatusCode() == StatusCode::kMDNotImplementedYet) { RETURN_IF_NOT_OK(cache_client_->WriteBuffer(std::move(db_ptr))); } else if (rc.IsError()) { return rc; @@ -169,9 +169,9 @@ Status CacheOp::WaitForCachingAllRows() { BuildPhaseDone = true; break; case CacheServiceState::kOutOfMemory: - return Status(StatusCode::kOutOfMemory, "Cache server is running out of memory"); + return Status(StatusCode::kMDOutOfMemory, "Cache server is running out of memory"); case CacheServiceState::kNoSpace: - return Status(StatusCode::kNoSpace, "Cache server is running of out spill storage"); + return Status(StatusCode::kMDNoSpace, "Cache server is running of out spill storage"); case CacheServiceState::kNone: case CacheServiceState::kError: default: @@ -246,7 +246,7 @@ Status CacheOp::PrepareNodePostAction() { // Construct the cache const bool generate_ids = true; Status rc = cache_client_->CreateCache(cache_crc, generate_ids); - if (rc.get_code() == StatusCode::kDuplicateKey) { + if (rc.StatusCode() == StatusCode::kMDDuplicateKey) { // We are told the cache has been created already. So we skip the build phase. phase_ = Phase::kFetchPhase; rc = Status::OK(); diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/device_queue_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/device_queue_op.cc index e1876e232d5..05714d88659 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/device_queue_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/device_queue_op.cc @@ -157,18 +157,14 @@ Status DeviceQueueOp::SendDataToAscend() { TensorRow currRow; for (int row_id = 0; row_id < current_buffer->NumRows(); row_id++) { RETURN_IF_NOT_OK(current_buffer->GetRow(row_id, &currRow)); - while (stop_send_ && ascend_keep_waiting_) { - MS_LOG(DEBUG) << "stop_send flag is set, waiting for continue signal..."; - std::this_thread::sleep_for(std::chrono::microseconds(100)); - } + WaitContinueSignal(); auto status = tdtInstancePtr->hostPush(currRow, true, channel_name_, isProfilingEnable, tdt_cost); if (status == TdtStatus::FAILED) { if (stop_send_) { MS_LOG(INFO) << "stop_send received"; return Status::OK(); - } else { - return Status(StatusCode::kTDTPushFailure, "TDT Push Failed"); } + return Status(StatusCode::kMDTDTPushFailure, "TDT Push Failed"); } if (create_data_info_queue_) { DATA_INFO data_info; @@ -200,9 +196,8 @@ Status DeviceQueueOp::SendDataToAscend() { if (stop_send_) { MS_LOG(INFO) << "stop_send received"; return Status::OK(); - } else { - return Status(StatusCode::kTDTPushFailure, "TDT Push Failed"); } + return Status(StatusCode::kMDTDTPushFailure, "TDT Push Failed"); } MS_LOG(INFO) << "an epoch has already sent, now stop send data."; stop_send_ = true; @@ -219,13 +214,19 @@ Status DeviceQueueOp::SendDataToAscend() { return Status::OK(); } +void DeviceQueueOp::WaitContinueSignal() const { + while (stop_send_ && ascend_keep_waiting_) { + MS_LOG(DEBUG) << "stop_send flag is set, waiting for continue signal..."; + std::this_thread::sleep_for(std::chrono::microseconds(100)); + } +} #endif #ifdef ENABLE_TDTQUE Status DeviceQueueOp::GetDataInfo(DATA_INFO *data_info) { if (!create_data_info_queue_) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "DataInfo queue is not created."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "DataInfo queue is not created."); } // This place has a race condition with operator(), so the first one // arrive here will do the initialize work. @@ -241,7 +242,7 @@ Status DeviceQueueOp::GetDataInfo(DATA_INFO *data_info) { } #else Status DeviceQueueOp::GetDataInfo(DATA_INFO *data_info) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "GetDataInfo is not supported yet."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "GetDataInfo is not supported yet."); } #endif @@ -301,7 +302,7 @@ Status DeviceQueueOp::PushDataToGPU() { } handle = GpuBufferMgr::GetInstance().Open(0, channel_name_, data_size, release_function); if (handle == INVALID_HANDLE) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Failed to open channel for sending data."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Failed to open channel for sending data."); } is_open = true; } @@ -309,14 +310,14 @@ Status DeviceQueueOp::PushDataToGPU() { // Data prefetch only when PS mode enables cache. if (items.size() > 0) { if (!ps::PsDataPrefetch::GetInstance().PrefetchData(channel_name_, items[0].data_ptr_, items[0].data_len_)) { - return Status(StatusCode::kTimeOut, __LINE__, __FILE__, "Failed to prefetch data."); + return Status(StatusCode::kMDTimeOut, __LINE__, __FILE__, "Failed to prefetch data."); } } while (!GpuBufferMgr::GetInstance().IsClosed() && !TaskManager::FindMe()->Interrupted()) { BlockQueueStatus_T ret = GpuBufferMgr::GetInstance().Push(handle, items, WAIT_TIME); if (ret) { if (ret == BlockQueueStatus_T::ERROR_INPUT) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Invalid input data, please check it."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid input data, please check it."); } else { if (!stop_send_) { MS_LOG(DEBUG) << "Retry pushing data..."; @@ -438,13 +439,13 @@ Status DeviceQueueOp::MallocForGPUData(std::vector *items, for (auto &sub_item : *items) { RETURN_IF_NOT_OK(pool_[worker_id]->Allocate(sub_item.data_len_, &sub_item.data_ptr_)); if (sub_item.data_ptr_ == nullptr) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__, "Memory malloc failed."); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__, "Memory malloc failed."); } const unsigned char *column_data = curr_row[i]->GetBuffer(); if (memcpy_s(sub_item.data_ptr_, sub_item.data_len_, column_data, static_cast(curr_row[i++]->SizeInBytes())) != 0) { MS_LOG(ERROR) << "memcpy_s failed!"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "memcpy_s failed."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "memcpy_s failed."); } } diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/device_queue_op.h b/mindspore/ccsrc/minddata/dataset/engine/datasetops/device_queue_op.h index 16b45b5511d..dcf77cd262f 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/device_queue_op.h +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/device_queue_op.h @@ -190,6 +190,7 @@ class DeviceQueueOp : public PipelineOp { private: #ifdef ENABLE_TDTQUE + void WaitContinueSignal() const; Status SendDataToAscend(); bool ascend_keep_waiting_; #endif diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/filter_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/filter_op.cc index 487501ba739..f674582c265 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/filter_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/filter_op.cc @@ -43,7 +43,7 @@ Status FilterOp::Builder::SanityCheck() { err += builder_num_workers_ <= 0 ? "Invalid parameter, num_parallel_workers must be greater than 0, but got " + std::to_string(builder_num_workers_) + ".\n" : ""; - return err.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, common::SafeCStr(err)); + return err.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, common::SafeCStr(err)); } FilterOp::Builder::Builder() { @@ -66,7 +66,7 @@ FilterOp::FilterOp(const std::vector &in_col_names, int32_t num_wor Status FilterOp::operator()() { // The operator class just starts off threads by calling the tree_ function. if (tree_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); } filter_queues_.Init(num_workers_, oc_queue_size_); RETURN_IF_NOT_OK(filter_queues_.Register(tree_->AllTasks())); @@ -244,7 +244,7 @@ Status FilterOp::InvokePredicateFunc(const TensorRow &input, bool *out_predicate RETURN_IF_NOT_OK(predicate_func_->Compute(input, &output)); RETURN_IF_NOT_OK(output.at(0)->GetItemAt(out_predicate, {})); - return Status(StatusCode::kOK, "FilterOp predicate func call succeed"); + return Status(StatusCode::kSuccess, "FilterOp predicate func call succeed"); } // Visitor accept method for NodePass diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/map_op/map_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/map_op/map_op.cc index 795381fbba6..c951a61b80b 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/map_op/map_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/map_op/map_op.cc @@ -43,7 +43,7 @@ MapOp::Builder::Builder() { // Check if the required parameters are set by the builder. Status MapOp::Builder::sanityCheck() const { if (build_tensor_funcs_.empty()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Building a MapOp without providing any function/operation to apply"); } return Status::OK(); diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/shuffle_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/shuffle_op.cc index b4637faf5e8..0eaa31d2a07 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/shuffle_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/shuffle_op.cc @@ -126,7 +126,7 @@ Status ShuffleOp::AddRowToShuffleBuffer(TensorRow new_shuffle_row) { shuffle_last_row_idx_ = (shuffle_buffer_->size()) - 1; } else { if (!(*shuffle_buffer_)[shuffle_last_row_idx_].empty()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Last row of shuffle buffer should not be occupied!"); } (*shuffle_buffer_)[shuffle_last_row_idx_] = std::move(new_shuffle_row); @@ -245,7 +245,7 @@ Status ShuffleOp::InitShuffleBuffer() { // shuffle buffer to it's max size, or the dataset below us is not providing any more // rows. if (shuffle_buffer_state_ != kShuffleStateInit) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid shuffle buffer state (SHUFFLE_STATE_INIT expected)"); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/album_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/album_op.cc index 97e7104a986..4e0482588fd 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/album_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/album_op.cc @@ -67,7 +67,7 @@ Status AlbumOp::Builder::SanityCheck() { err_msg += builder_num_workers_ <= 0 ? "Invalid parameter, num_parallel_workers must be greater than 0, but got " + std::to_string(builder_num_workers_) + ".\n" : ""; - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } AlbumOp::AlbumOp(int32_t num_wkrs, int32_t rows_per_buffer, std::string file_dir, int32_t queue_size, bool do_decode, @@ -577,7 +577,7 @@ Status AlbumOp::InitSampler() { Status AlbumOp::LaunchThreadsAndInitOp() { if (tree_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); } // registers QueueList and individual Queues for interrupt services RETURN_IF_NOT_OK(io_block_queues_.Register(tree_->AllTasks())); diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/celeba_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/celeba_op.cc index 3dae849e659..54c8cf6da96 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/celeba_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/celeba_op.cc @@ -59,7 +59,7 @@ Status CelebAOp::Builder::Build(std::shared_ptr *op) { builder_op_connector_size_, builder_decode_, builder_usage_, builder_extensions_, std::move(builder_schema_), std::move(builder_sampler_)); if (*op == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "CelebAOp init failed."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "CelebAOp init failed."); } return Status::OK(); @@ -74,7 +74,7 @@ Status CelebAOp::Builder::SanityCheck() { err_msg += builder_num_workers_ <= 0 ? "Invalid parameter, num_parallel_workers must be greater than 0, but got " + std::to_string(builder_num_workers_) + ".\n" : ""; - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } CelebAOp::CelebAOp(int32_t num_workers, int32_t rows_per_buffer, const std::string &dir, int32_t queue_size, @@ -95,7 +95,7 @@ CelebAOp::CelebAOp(int32_t num_workers, int32_t rows_per_buffer, const std::stri Status CelebAOp::LaunchThreadsAndInitOp() { if (tree_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); } RETURN_IF_NOT_OK(io_block_queues_.Register(tree_->AllTasks())); @@ -119,7 +119,7 @@ Status CelebAOp::ParseAttrFile() { std::ifstream attr_file((folder_path / "list_attr_celeba.txt").toString()); if (!attr_file.is_open()) { std::string attr_file_name = (folder_path / "list_attr_celeba.txt").toString(); - return Status(StatusCode::kFileNotExist, __LINE__, __FILE__, + return Status(StatusCode::kMDFileNotExist, __LINE__, __FILE__, "Invalid file, failed to open Celeba attr file: " + attr_file_name); } @@ -368,7 +368,7 @@ Status CelebAOp::WorkerEntry(int32_t worker_id) { } RETURN_IF_NOT_OK(io_block_queues_[worker_id]->PopFront(&io_block)); } - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Unexpected nullptr received in worker."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Unexpected nullptr received in worker."); } Status CelebAOp::LoadBuffer(const std::vector &keys, std::unique_ptr *db) { @@ -396,7 +396,7 @@ Status CelebAOp::LoadTensorRow(row_id_type row_id, const std::pair *op) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/coco_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/coco_op.cc index 223b12499e5..01a167f006f 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/coco_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/coco_op.cc @@ -118,7 +118,7 @@ Status CocoOp::Builder::SanityCheck() { err_msg += builder_num_workers_ <= 0 ? "Invalid parameter, num_parallel_workers must be greater than 0, but got " + std::to_string(builder_num_workers_) + ".\n" : ""; - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } CocoOp::CocoOp(const TaskType &task_type, const std::string &image_folder_path, const std::string &annotation_path, diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/csv_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/csv_op.cc index 32118e7e78c..8575d06006e 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/csv_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/csv_op.cc @@ -46,7 +46,7 @@ Status CsvOp::Builder::ValidateInputs() const { ? "Invalid parameter, num_shard must be greater than shard_id and greater than 0, got num_shard: " + std::to_string(builder_num_devices_) + ", shard_id: " + std::to_string(builder_device_id_) + ".\n" : ""; - return err.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err); + return err.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err); } Status CsvOp::Builder::Build(std::shared_ptr *op) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/generator_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/generator_op.cc index 90b3a5939e9..4bbecff4311 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/generator_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/generator_op.cc @@ -89,7 +89,7 @@ Status GeneratorOp::CreateGeneratorObject() { // Acquire Python GIL py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); } try { py::array sample_ids; @@ -103,7 +103,7 @@ Status GeneratorOp::CreateGeneratorObject() { generator_ = generator_function_(); } } catch (const py::error_already_set &e) { - ret = Status(StatusCode::kPyFuncException, e.what()); + ret = Status(StatusCode::kMDPyFuncException, e.what()); } } return ret; @@ -118,33 +118,33 @@ Status GeneratorOp::Init() { Status GeneratorOp::PyRowToTensorRow(py::object py_data, TensorRow *tensor_row) { if (!py::isinstance(py_data)) { - return Status(StatusCode::kPyFuncException, __LINE__, __FILE__, + return Status(StatusCode::kMDPyFuncException, __LINE__, __FILE__, "Invalid parameter, Generator should return a tuple of numpy arrays."); } py::tuple py_row = py_data.cast(); // Check if returned number of columns matches with column names if (py_row.size() != column_names_.size()) { return Status( - StatusCode::kPyFuncException, __LINE__, __FILE__, + StatusCode::kMDPyFuncException, __LINE__, __FILE__, "Invalid parameter, Generator should return same number of numpy arrays as specified in column names."); } // Iterate over two containers simultaneously for memory copy for (int i = 0; i < py_row.size(); ++i) { py::object ret_py_ele = py_row[i]; if (!py::isinstance(ret_py_ele)) { - return Status(StatusCode::kPyFuncException, __LINE__, __FILE__, + return Status(StatusCode::kMDPyFuncException, __LINE__, __FILE__, "Invalid parameter, Generator should return a tuple of numpy arrays."); } std::shared_ptr tensor; RETURN_IF_NOT_OK(Tensor::CreateFromNpArray(ret_py_ele.cast(), &tensor)); if ((!column_types_.empty()) && (column_types_[i] != DataType::DE_UNKNOWN) && (column_types_[i] != tensor->type())) { - return Status(StatusCode::kPyFuncException, __LINE__, __FILE__, + return Status(StatusCode::kMDPyFuncException, __LINE__, __FILE__, "Invalid parameter, input column type is not same with output tensor type."); } tensor_row->push_back(tensor); } - return Status(StatusCode::kOK, ""); + return Status(StatusCode::kSuccess, ""); } Status GeneratorOp::FillBuffer(TensorQTable *tt) { @@ -207,7 +207,7 @@ Status GeneratorOp::operator()() { { py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); } try { RETURN_IF_NOT_OK(FillBuffer(fetched_table.get())); @@ -217,14 +217,14 @@ Status GeneratorOp::operator()() { e.restore(); // Pop up non StopIteration Python Exception if (!eoe) { - return Status(StatusCode::kPyFuncException, __LINE__, __FILE__, e.what()); + return Status(StatusCode::kMDPyFuncException, __LINE__, __FILE__, e.what()); } if (num_rows_sampled != -1 && num_rows_sampled != generator_counter_) { std::stringstream ss; ss << "The actual amount of data read from generator " << generator_counter_ << " is different from generator.len " << num_rows_sampled << ", you should adjust generator.len to make them match."; - return Status(StatusCode::kPyFuncException, __LINE__, __FILE__, ss.str()); + return Status(StatusCode::kMDPyFuncException, __LINE__, __FILE__, ss.str()); } } } @@ -275,7 +275,7 @@ Status GeneratorOp::Reset() { wp_.Set(); } generator_counter_ = 0; - return Status(StatusCode::kOK, "GeneratorOp Reset Succeed"); + return Status(StatusCode::kSuccess, "GeneratorOp Reset Succeed"); } // Visitor accept method for NodePass diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/image_folder_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/image_folder_op.cc index dbece1371d3..1e806fc45c6 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/image_folder_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/image_folder_op.cc @@ -62,7 +62,7 @@ Status ImageFolderOp::Builder::SanityCheck() { err_msg += builder_num_workers_ <= 0 ? "Invalid parameter, num_parallel_workers must be greater than 0, but got " + std::to_string(builder_num_workers_) + ".\n" : ""; - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } ImageFolderOp::ImageFolderOp(int32_t num_wkrs, int32_t rows_per_buffer, std::string file_dir, int32_t queue_size, diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/manifest_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/manifest_op.cc index a7e83d48f3f..eff09d53c8a 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/manifest_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/manifest_op.cc @@ -62,7 +62,7 @@ Status ManifestOp::Builder::SanityCheck() { err_msg += builder_num_workers_ <= 0 ? "Invalid parameter, num_parallel_workers must be greater than 0, but got " + std::to_string(builder_num_workers_) + ".\n" : ""; - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } ManifestOp::ManifestOp(int32_t num_works, int32_t rows_per_buffer, std::string file, int32_t queue_size, bool decode, diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mindrecord_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mindrecord_op.cc index d3d68b25215..80f2a09eee1 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mindrecord_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mindrecord_op.cc @@ -63,7 +63,7 @@ Status MindRecordOp::Builder::Build(std::shared_ptr *ptr) { std::shared_ptr new_mind_record_op; if (build_dataset_file_.empty()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid file, MindRecord path is invalid or not set."); } mindrecord::json sample_json; diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc index 363747b10c0..bdf70066582 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc @@ -71,7 +71,7 @@ Status MnistOp::Builder::SanityCheck() { err_msg += valid.find(builder_usage_) == valid.end() ? "Invalid parameter, usage must be 'train','test' or 'all', but got " + builder_usage_ + ".\n" : ""; - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } MnistOp::MnistOp(const std::string &usage, int32_t num_workers, int32_t rows_per_buffer, std::string folder_path, diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/random_data_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/random_data_op.cc index 0beb69e1be6..eccedcecd3a 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/random_data_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/random_data_op.cc @@ -327,7 +327,7 @@ Status RandomDataOp::PackAndSend(int32_t worker_id, std::unique_ptr(size_in_bytes); int ret_code = memset_s(buf.get(), size_in_bytes, random_byte, size_in_bytes); if (ret_code != 0) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Failed to set random bytes for a tensor."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Failed to set random bytes for a tensor."); } RETURN_IF_NOT_OK(Tensor::CreateFromMemory(*new_shape, current_col.type(), buf.get(), &new_tensor)); @@ -377,7 +377,7 @@ Status RandomDataOp::Reset() { // Ensure all guys are in the waitpost if (guys_in_ != num_workers_) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Issuing a reset, but some workers are missing from epochSync!"); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/python_sampler.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/python_sampler.cc index c4107e7bcd9..4c283ca2726 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/python_sampler.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/python_sampler.cc @@ -36,7 +36,7 @@ Status PythonSamplerRT::GetNextSample(std::unique_ptr *out_buffer) { py::gil_scoped_acquire gil_acquire; (*out_buffer) = std::make_unique(0, DataBuffer::kDeBFlagNone); if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); } try { py::object py_ret = py_sampler_instance.attr("_get_indices")(); @@ -51,9 +51,9 @@ Status PythonSamplerRT::GetNextSample(std::unique_ptr *out_buffer) { } } } catch (const py::error_already_set &e) { - return Status(StatusCode::kPyFuncException, e.what()); + return Status(StatusCode::kMDPyFuncException, e.what()); } catch (const py::cast_error &e) { - return Status(StatusCode::kPyFuncException, + return Status(StatusCode::kMDPyFuncException, "Invalid data, python sampler iterator should return an integer index."); } } @@ -78,12 +78,12 @@ Status PythonSamplerRT::InitSampler() { { py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); } try { py_sampler_instance.attr("_handshake")(num_rows_, num_samples_); } catch (const py::error_already_set &e) { - return Status(StatusCode::kPyFuncException, e.what()); + return Status(StatusCode::kMDPyFuncException, e.what()); } } @@ -96,12 +96,12 @@ Status PythonSamplerRT::ResetSampler() { need_to_reset_ = false; py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); } try { py_sampler_instance.attr("reset")(); } catch (const py::error_already_set &e) { - return Status(StatusCode::kPyFuncException, e.what()); + return Status(StatusCode::kMDPyFuncException, e.what()); } if (HasChildSampler()) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/sampler.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/sampler.cc index 9c1e15aa6ad..485255e277a 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/sampler.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/sampler.cc @@ -116,12 +116,12 @@ Status SamplerRT::GetAllIdsThenReset(py::array *data) { { py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); } try { RETURN_IF_NOT_OK(sample_ids->GetDataAsNumpy(data)); } catch (const std::runtime_error &e) { - return Status(StatusCode::kPyFuncException, e.what()); + return Status(StatusCode::kMDPyFuncException, e.what()); } } return Status::OK(); diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc index 9ca5aefc6cb..3168645fa0c 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc @@ -54,7 +54,7 @@ Status WeightedRandomSamplerRT::InitSampler() { std::to_string(samples_per_buffer_) + ".\n"); if (weights_.size() > static_cast(num_rows_)) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, size of sample weights must be less than or equal to num of data, " "otherwise might cause generated id out of bound or other errors, but got weight size: " + std::to_string(weights_.size()) + ", num of data: " + std::to_string(num_rows_)); @@ -119,7 +119,7 @@ Status WeightedRandomSamplerRT::ResetSampler() { // Get the sample ids. Status WeightedRandomSamplerRT::GetNextSample(std::unique_ptr *out_buffer) { if (weights_.size() > static_cast(num_rows_)) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, size of sample weights must be less than or equal to num of data, " "otherwise might cause generated id out of bound or other errors, but got weight size: " + std::to_string(weights_.size()) + ", num of data: " + std::to_string(num_rows_)); diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/text_file_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/text_file_op.cc index 0680f9aca85..97eaf216045 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/text_file_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/text_file_op.cc @@ -51,7 +51,7 @@ Status TextFileOp::Builder::ValidateInputs() const { ? "Invalid parameter, num_shard must be greater than shard_id and greater than 0, got num_shard: " + std::to_string(builder_num_devices_) + ", shard_id: " + std::to_string(builder_device_id_) + ".\n" : ""; - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } Status TextFileOp::Builder::Build(std::shared_ptr *op) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/tf_reader_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/tf_reader_op.cc index 562545b3134..0f85128ebf9 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/tf_reader_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/tf_reader_op.cc @@ -103,7 +103,7 @@ Status TFReaderOp::Builder::ValidateInputs() const { err_msg += accumulated_filenames; } - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } Status TFReaderOp::Builder::Build(std::shared_ptr *out_tf_reader_op) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/voc_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/voc_op.cc index bb4946d9103..5346dc69372 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/voc_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/voc_op.cc @@ -93,7 +93,7 @@ Status VOCOp::Builder::SanityCheck() { err_msg += builder_num_workers_ <= 0 ? "Invalid parameter, num_parallel_workers must be greater than 0, but got " + std::to_string(builder_num_workers_) + ".\n" : ""; - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } VOCOp::VOCOp(const TaskType &task_type, const std::string &task_mode, const std::string &folder_path, diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/zip_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/zip_op.cc index 2177e3886ce..2ce1d895456 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/zip_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/zip_op.cc @@ -123,7 +123,7 @@ Status ZipOp::prepare(TensorQTable *const table) { draining_ = false; buffer_id_ = 0; if (table == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid data, ZipOp prepare phase requires a tensor table, but got nullptr."); } // fill initial row @@ -148,7 +148,7 @@ Status ZipOp::prepare(TensorQTable *const table) { // fillBuffer always expects a new table to fill Status ZipOp::fillBuffer(TensorQTable *const table) { if (table == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid data, ZipOp fillBuffer null table pointer."); } TensorRow new_row; @@ -199,7 +199,7 @@ Status ZipOp::getNextTensorRow(TensorRow *const new_zip_row) { Status ZipOp::drainPipeline() { // we don't need to drain if we reached eof if (eof_) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "ZipOp draining should not be done if already at eof!"); } for (int32_t con = 0; con < children_num_; ++con) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/db_connector.h b/mindspore/ccsrc/minddata/dataset/engine/db_connector.h index 2d2cf6d226b..c6647a798f7 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/db_connector.h +++ b/mindspore/ccsrc/minddata/dataset/engine/db_connector.h @@ -58,7 +58,7 @@ class DbConnector : public Connector> { // @param retry_if_eoe A flag to allow the same thread invoke pop() again if the current pop returns eoe buffer. Status PopWithRetry(int32_t worker_id, std::unique_ptr *result, bool retry_if_eoe = false) noexcept { if (result == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "[ERROR] nullptr detected when getting data from db connector"); } else { std::unique_lock lk(m_); @@ -69,7 +69,7 @@ class DbConnector : public Connector> { } else { RETURN_IF_NOT_OK(queues_[pop_from_]->PopFront(result)); if (*result == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "[ERROR] nullptr detected when getting data from db connector"); } // Setting the internal flag once the first EOF is encountered. diff --git a/mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/source/tf_record_node.cc b/mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/source/tf_record_node.cc index bd1e3e61767..de9fdbfc0e2 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/source/tf_record_node.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/source/tf_record_node.cc @@ -53,7 +53,7 @@ Status TFRecordNode::ValidateParams() { if (dataset_files_.empty()) { std::string err_msg = "TFRecordNode: dataset_files is not specified."; MS_LOG(ERROR) << err_msg; - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, err_msg); + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, err_msg); } for (const auto &f : dataset_files_) { @@ -62,7 +62,7 @@ Status TFRecordNode::ValidateParams() { std::string err_msg = "TFRecordNode: dataset file: [" + f + "] is invalid or does not exist."; MS_LOG(ERROR) << err_msg; - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, err_msg); + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, err_msg); } } @@ -70,14 +70,14 @@ Status TFRecordNode::ValidateParams() { std::string err_msg = "TFRecordNode: Invalid number of samples: " + std::to_string(num_samples_); MS_LOG(ERROR) << err_msg; - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, err_msg); + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, err_msg); } if (num_shards_ <= 0) { std::string err_msg = "TFRecordNode: Invalid num_shards: " + std::to_string(num_shards_); MS_LOG(ERROR) << err_msg; - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, err_msg); + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, err_msg); } if (shard_id_ < 0 || shard_id_ >= num_shards_) { @@ -85,7 +85,7 @@ Status TFRecordNode::ValidateParams() { ", num_shards: " + std::to_string(num_shards_); MS_LOG(ERROR) << err_msg; - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, err_msg); + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, err_msg); } std::vector invalid_files(dataset_files_.size()); @@ -101,7 +101,7 @@ Status TFRecordNode::ValidateParams() { [](const std::string &accumulated, const std::string &next) { return accumulated + " " + next + "\n"; }); err_msg += accumulated_filenames; } - return err_msg.empty() ? Status::OK() : Status(StatusCode::kSyntaxError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, err_msg); } // Function to build TFRecordNode diff --git a/mindspore/ccsrc/minddata/dataset/engine/opt/pass.cc b/mindspore/ccsrc/minddata/dataset/engine/opt/pass.cc index b5981e6442c..e3e82b3baa1 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/opt/pass.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/opt/pass.cc @@ -102,7 +102,7 @@ namespace dataset { // Driver method for TreePass Status IRTreePass::Run(std::shared_ptr root_ir, bool *const modified) { if (root_ir == nullptr || modified == nullptr) { - return Status(StatusCode::kUnexpectedError, "Null pointer passed to TreePass"); + return Status(StatusCode::kMDUnexpectedError, "Null pointer passed to TreePass"); } // Initialize modified flag *modified = false; @@ -112,7 +112,7 @@ Status IRTreePass::Run(std::shared_ptr root_ir, bool *const modifie // Driver method for NodePass Status IRNodePass::Run(std::shared_ptr root_ir, bool *const modified) { if (root_ir == nullptr || modified == nullptr) { - return Status(StatusCode::kUnexpectedError, "Null pointer passed to NodePass"); + return Status(StatusCode::kMDUnexpectedError, "Null pointer passed to NodePass"); } // Initialize modified flag *modified = false; @@ -337,7 +337,7 @@ Status IRNodePass::Visit(std::shared_ptr node, bool *cons // Driver method for TreePass Status TreePass::Run(ExecutionTree *tree, bool *const modified) { if (tree == nullptr || modified == nullptr) { - return Status(StatusCode::kUnexpectedError, "Null pointer passed to TreePass"); + return Status(StatusCode::kMDUnexpectedError, "Null pointer passed to TreePass"); } // Initialize modified flag *modified = false; @@ -347,7 +347,7 @@ Status TreePass::Run(ExecutionTree *tree, bool *const modified) { // Driver method for NodePass Status NodePass::Run(ExecutionTree *tree, bool *const modified) { if (tree == nullptr || modified == nullptr) { - return Status(StatusCode::kUnexpectedError, "Null pointer passed to NodePass"); + return Status(StatusCode::kMDUnexpectedError, "Null pointer passed to NodePass"); } // Initialize modified flag *modified = false; diff --git a/mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.cc b/mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.cc index feaaec15149..7e4510ef96f 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.cc @@ -80,7 +80,7 @@ Status CacheTransformPass::CachePass::Visit(std::shared_ptr node, bo MS_LOG(DEBUG) << "Cache transform pass: Non mappable leaf in a cache descendant tree detected"; // If a leaf has already been assigned, then we have more than one leaf inside this cache descendant tree. if (leaf_node_) { - return Status(StatusCode::kNotImplementedYet, __LINE__, __FILE__, + return Status(StatusCode::kMDNotImplementedYet, __LINE__, __FILE__, "There is currently no support for multiple leaf nodes under cache."); } // Set up a sampler here to be used by cache if we are a non-mappable leaf in a caching tree. @@ -127,7 +127,7 @@ Status CacheTransformPass::CachePass::Visit(std::shared_ptr MS_LOG(DEBUG) << "Cache transform pass: Mappable leaf in a cache descendant tree detected"; // If a leaf has already been assigned, then we have more than one leaf inside this cache descendant tree. if (leaf_node_) { - return Status(StatusCode::kNotImplementedYet, __LINE__, __FILE__, + return Status(StatusCode::kMDNotImplementedYet, __LINE__, __FILE__, "There is currently no support for multiple leaf nodes under cache."); } // If we are a leaf in the caching path, then save this leaf @@ -140,7 +140,7 @@ Status CacheTransformPass::CachePass::Visit(std::shared_ptr // Perform leaf node cache transform identification Status CacheTransformPass::CachePass::Visit(std::shared_ptr node, bool *const modified) { if (node->IsCached() || is_caching_) { - return Status(StatusCode::kNotImplementedYet, __LINE__, __FILE__, + return Status(StatusCode::kMDNotImplementedYet, __LINE__, __FILE__, "There is currently no support for MindRecordOp under cache."); } return Status::OK(); @@ -151,7 +151,7 @@ Status CacheTransformPass::CachePass::Visit(std::shared_ptr node, // Perform leaf node cache transform identification Status CacheTransformPass::CachePass::Visit(std::shared_ptr node, bool *const modified) { if (node->IsCached() || is_caching_) { - return Status(StatusCode::kNotImplementedYet, __LINE__, __FILE__, + return Status(StatusCode::kMDNotImplementedYet, __LINE__, __FILE__, "There is currently no support for GeneratorOp under cache."); } return Status::OK(); diff --git a/mindspore/ccsrc/minddata/dataset/engine/perf/connector_throughput.cc b/mindspore/ccsrc/minddata/dataset/engine/perf/connector_throughput.cc index b206f43ee06..5cc15750384 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/perf/connector_throughput.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/perf/connector_throughput.cc @@ -146,7 +146,7 @@ Status ConnectorThroughput::ChangeFileMode() { if (chmod(common::SafeCStr(file_path_), S_IRUSR | S_IWUSR) == -1) { std::string err_str = "Change file mode failed," + file_path_; - return Status(StatusCode::kUnexpectedError, err_str); + return Status(StatusCode::kMDUnexpectedError, err_str); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/perf/cpu_sampling.cc b/mindspore/ccsrc/minddata/dataset/engine/perf/cpu_sampling.cc index 5b1ef29f2d5..d2d788cd9e4 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/perf/cpu_sampling.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/perf/cpu_sampling.cc @@ -53,7 +53,7 @@ Status DeviceCpu::ParseCpuInfo(const std::string &str) { uint64_t softirq = 0; if (std::sscanf(str.c_str(), "%*s %lu %lu %lu %lu %lu %lu %lu", &cpu_stat.user_stat_, &nice, &cpu_stat.sys_stat_, &cpu_stat.idle_stat_, &cpu_stat.io_stat_, &irq, &softirq) == EOF) { - return Status(StatusCode::kUnexpectedError, "Get device CPU failed."); + return Status(StatusCode::kMDUnexpectedError, "Get device CPU failed."); } cpu_stat.total_stat_ = @@ -87,7 +87,7 @@ Status DeviceCpu::ParseCpuInfo(const std::string &str) { Status DeviceCpu::ParseCtxt(const std::string &str) { uint64_t ctxt; if (std::sscanf(str.c_str(), "%*s %lu", &ctxt) == EOF) { - return Status(StatusCode::kUnexpectedError, "Get context switch count failed."); + return Status(StatusCode::kMDUnexpectedError, "Get context switch count failed."); } // Calculate the utilization from the second sampling if (!first_collect_) { @@ -100,7 +100,7 @@ Status DeviceCpu::ParseCtxt(const std::string &str) { Status DeviceCpu::ParseRunningProcess(const std::string &str) { uint32_t running_process; if (std::sscanf(str.c_str(), "%*s %ud", &running_process) == EOF) { - return Status(StatusCode::kUnexpectedError, "Get context switch count failed."); + return Status(StatusCode::kMDUnexpectedError, "Get context switch count failed."); } // Drop the first value in order to collect same amount of CPU utilization if (!first_collect_) { @@ -188,7 +188,7 @@ Status OperatorCpu::ParseCpuInfo(int32_t op_id, int64_t thread_id, if (!temp_path.Exists()) { (*op_stat)[op_id][thread_id].user_stat_ = 0; (*op_stat)[op_id][thread_id].sys_stat_ = 0; - return Status(StatusCode::kFileNotExist); + return Status(StatusCode::kMDFileNotExist); } std::ifstream file(stat_path); @@ -203,7 +203,7 @@ Status OperatorCpu::ParseCpuInfo(int32_t op_id, int64_t thread_id, if (std::sscanf(str.c_str(), "%*d %*s %*s %*lu %*lu %*lu %*lu %*lu %*lu %*lu %*lu %*lu %*lu %lu %lu", &utime, &stime) == EOF) { file.close(); - return Status(StatusCode::kUnexpectedError, "Get device CPU failed."); + return Status(StatusCode::kMDUnexpectedError, "Get device CPU failed."); } file.close(); (*op_stat)[op_id][thread_id].user_stat_ = utime; @@ -224,7 +224,7 @@ Status OperatorCpu::GetTotalCpuTime(uint64_t *total_stat) { if (std::sscanf(str.c_str(), "%*s %lu %lu %lu %lu %lu %lu %lu", &user, &nice, &sys, &idle, &iowait, &irq, &softirq) == EOF) { file.close(); - return Status(StatusCode::kUnexpectedError, "Get device CPU failed."); + return Status(StatusCode::kMDUnexpectedError, "Get device CPU failed."); } file.close(); *total_stat = user + nice + sys + idle + iowait + irq + softirq; @@ -398,7 +398,7 @@ Status ProcessCpu::ParseCpuInfo() { if (std::sscanf(str.c_str(), "%*d %*s %*s %*lu %*lu %*lu %*lu %*lu %*lu %*lu %*lu %*lu %*lu %lu %lu", &user, &sys) == EOF) { file.close(); - return Status(StatusCode::kUnexpectedError, "Get device CPU failed."); + return Status(StatusCode::kMDUnexpectedError, "Get device CPU failed."); } file.close(); @@ -434,7 +434,7 @@ Status ProcessCpu::GetTotalCpuTime(uint64_t *total_stat) { if (std::sscanf(str.c_str(), "%*s %lu %lu %lu %lu %lu %lu %lu", &user, &nice, &sys, &idle, &iowait, &irq, &softirq) == EOF) { file.close(); - return Status(StatusCode::kUnexpectedError, "Get device CPU failed."); + return Status(StatusCode::kMDUnexpectedError, "Get device CPU failed."); } file.close(); *total_stat = user + nice + sys + idle + iowait + irq + softirq; @@ -559,7 +559,7 @@ Status CpuSampling::Init(const std::string &dir_path, const std::string &device_ Status CpuSampling::ChangeFileMode() { if (chmod(common::SafeCStr(file_path_), S_IRUSR | S_IWUSR) == -1) { std::string err_str = "Change file mode failed," + file_path_; - return Status(StatusCode::kUnexpectedError, err_str); + return Status(StatusCode::kMDUnexpectedError, err_str); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/perf/dataset_iterator_tracing.cc b/mindspore/ccsrc/minddata/dataset/engine/perf/dataset_iterator_tracing.cc index b2188ed0cd9..ee6e3dae884 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/perf/dataset_iterator_tracing.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/perf/dataset_iterator_tracing.cc @@ -70,7 +70,7 @@ Status DatasetIteratorTracing::ChangeFileMode() { if (chmod(common::SafeCStr(file_path_), S_IRUSR | S_IWUSR) == -1) { std::string err_str = "Change file mode failed," + file_path_; - return Status(StatusCode::kUnexpectedError, err_str); + return Status(StatusCode::kMDUnexpectedError, err_str); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/perf/device_queue_tracing.cc b/mindspore/ccsrc/minddata/dataset/engine/perf/device_queue_tracing.cc index 8255ed677bf..eaf36070311 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/perf/device_queue_tracing.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/perf/device_queue_tracing.cc @@ -71,7 +71,7 @@ Status DeviceQueueTracing::ChangeFileMode() { if (chmod(common::SafeCStr(file_path_), S_IRUSR | S_IWUSR) == -1) { std::string err_str = "Change file mode failed," + file_path_; - return Status(StatusCode::kUnexpectedError, err_str); + return Status(StatusCode::kMDUnexpectedError, err_str); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/perf/profiling.cc b/mindspore/ccsrc/minddata/dataset/engine/perf/profiling.cc index 49fc54188d2..907f4612c15 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/perf/profiling.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/perf/profiling.cc @@ -97,7 +97,7 @@ Status ProfilingManager::RegisterTracingNode(std::shared_ptr node) { // Check if node with the same name has already been registered. auto exist = tracing_nodes_.find(node->Name()); if (exist != tracing_nodes_.end()) { - return Status(StatusCode::kProfilingError, "Profiling node already exist: " + node->Name()); + return Status(StatusCode::kMDProfilingError, "Profiling node already exist: " + node->Name()); } // Register the node with its name as key. RETURN_IF_NOT_OK(node->Init(dir_path_, device_id_)); @@ -110,7 +110,7 @@ Status ProfilingManager::GetTracingNode(const std::string &name, std::shared_ptr // Check if node with the same name has already been registered. auto exist = tracing_nodes_.find(name); if (exist == tracing_nodes_.end()) { - return Status(StatusCode::kProfilingError, "Profiling node does not exist: " + name); + return Status(StatusCode::kMDProfilingError, "Profiling node does not exist: " + name); } // Fetch node. *node = tracing_nodes_[name]; @@ -122,7 +122,7 @@ Status ProfilingManager::RegisterSamplingNode(std::shared_ptr node) { // Check if node with the same name has already been registered. auto exist = sampling_nodes_.find(node->Name()); if (exist != sampling_nodes_.end()) { - return Status(StatusCode::kProfilingError, "Profiling node already exist: " + node->Name()); + return Status(StatusCode::kMDProfilingError, "Profiling node already exist: " + node->Name()); } // Register the node with its name as key. RETURN_IF_NOT_OK(node->Init(dir_path_, device_id_)); @@ -135,7 +135,7 @@ Status ProfilingManager::GetSamplingNode(const std::string &name, std::shared_pt // Check if node with the same name has already been registered. auto exist = sampling_nodes_.find(name); if (exist == sampling_nodes_.end()) { - return Status(StatusCode::kProfilingError, "Profiling node does not exist: " + name); + return Status(StatusCode::kMDProfilingError, "Profiling node does not exist: " + name); } // Fetch node. *node = sampling_nodes_[name]; diff --git a/mindspore/ccsrc/minddata/dataset/include/de_tensor.h b/mindspore/ccsrc/minddata/dataset/include/de_tensor.h deleted file mode 100644 index 2e20ca12b9d..00000000000 --- a/mindspore/ccsrc/minddata/dataset/include/de_tensor.h +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DETENSOR_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DETENSOR_H_ -#include -#include -#include -#include "include/ms_tensor.h" -#include "minddata/dataset/include/status.h" -#include "minddata/dataset/include/tensor.h" -namespace mindspore { -namespace tensor { -class DETensor : public mindspore::tensor::MSTensor { - public: - /// \brief Create a MSTensor pointer. - /// \param[in] data_type DataTypeId of tensor to be created - /// \param[in] shape Shape of tensor to be created - /// \return MSTensor pointer - static MSTensor *CreateTensor(TypeId data_type, const std::vector &shape); - - /// \brief Create a MSTensor pointer. - /// \param[in] path Path to file to read - /// \return MSTensor pointer - static MSTensor *CreateTensor(const std::string &path); - - /// \brief Create a MSTensor pointer. - /// \param[in] data_type Data TypeId of tensor to be created - /// \param[in] shape Shape of tensor to be created - /// \param[in] data Data pointer - /// \return MSTensor pointer - static MSTensor *CreateFromMemory(TypeId data_type, const std::vector &shape, void *data); - - DETensor(TypeId data_type, const std::vector &shape); - - explicit DETensor(std::shared_ptr tensor_ptr); - - ~DETensor() = default; - - /// \brief Create a duplicate instance, convert the DETensor to the LiteTensor. - /// \return MSTensor pointer - MSTensor *ConvertToLiteTensor(); - - std::shared_ptr tensor() const; - - TypeId data_type() const override; - - TypeId set_data_type(const TypeId data_type); - - std::vector shape() const override; - - size_t set_shape(const std::vector &shape); - - int DimensionSize(size_t index) const override; - - int ElementsNum() const override; - - std::size_t hash() const; - - size_t Size() const override; - - void *MutableData() override; - - protected: - std::shared_ptr tensor_impl_; -}; -} // namespace tensor -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DETENSOR_H_ diff --git a/mindspore/ccsrc/minddata/dataset/include/execute.h b/mindspore/ccsrc/minddata/dataset/include/execute.h index d4320e43e3e..c2c4ace8c79 100644 --- a/mindspore/ccsrc/minddata/dataset/include/execute.h +++ b/mindspore/ccsrc/minddata/dataset/include/execute.h @@ -19,49 +19,38 @@ #include #include - +#include "include/api/types.h" #include "minddata/dataset/include/constants.h" -#ifdef ENABLE_ANDROID -#include "minddata/dataset/include/de_tensor.h" -#endif -#include "minddata/dataset/include/tensor.h" #include "minddata/dataset/include/transforms.h" namespace mindspore { namespace dataset { -class TensorOp; - // class to run tensor operations in eager mode class Execute { public: /// \brief Constructor explicit Execute(std::shared_ptr op); + explicit Execute(std::vector> ops); + /// \brief Destructor - ~Execute(); - -#ifdef ENABLE_ANDROID - /// \brief callable function to execute the TensorOperation in eager mode - /// \param[in] input - the tensor to be transformed - /// \return - the output tensor, nullptr if Compute fails - std::shared_ptr operator()(std::shared_ptr input); -#endif + ~Execute() = default; /// \brief callable function to execute the TensorOperation in eager mode - /// \param[in] input - the tensor to be transformed - /// \return - the output tensor, nullptr if Compute fails - std::shared_ptr operator()(std::shared_ptr input); + /// \param[in] input Tensor to be transformed + /// \param[out] output Transformed tensor + /// \return Status code + Status operator()(const mindspore::MSTensor &input, mindspore::MSTensor *output); /// \brief callable function to execute the TensorOperation in eager mode - /// \param[in] input_tensor_list - the tensor to be transformed - /// \param[out] out - the result tensor after transform + /// \param[in] input_tensor_list List of Tensor to be transformed + /// \param[out] out Result tensor after transform /// \return - Status - Status operator()(const std::vector> &input_tensor_list, - std::vector> *out); + Status operator()(const std::vector &input_tensor_list, std::vector *out); private: - std::shared_ptr op_; + std::vector> ops_; }; } // namespace dataset diff --git a/mindspore/ccsrc/minddata/dataset/include/minddata_eager.h b/mindspore/ccsrc/minddata/dataset/include/minddata_eager.h deleted file mode 100644 index 563ef66e855..00000000000 --- a/mindspore/ccsrc/minddata/dataset/include/minddata_eager.h +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_MINDDATA_EAGER_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_MINDDATA_EAGER_H_ - -#include -#include -#include - -#include "include/api/status.h" -#include "include/api/types.h" -#include "minddata/dataset/include/transforms.h" -#include "minddata/dataset/include/vision.h" - -namespace mindspore { -namespace api { - -// class to run tensor operations in eager mode -class MindDataEager { - public: - /// \brief Constructor - MindDataEager() = default; - - /// \brief Constructor - /// \param[inout] ops Transforms to be applied - explicit MindDataEager(std::vector> ops); - - /// \brief Destructor - ~MindDataEager() = default; - - /// \brief Function to read images from local directory - /// \param[inout] image_dir Target directory which contains images - /// \param[output] images Vector of image Tensor - /// \return Status The status code returned - static Status LoadImageFromDir(const std::string &image_dir, std::vector> *images); - - /// \brief Callable function to execute the TensorOperation in eager mode - /// \param[inout] input Tensor to be transformed - /// \return Output tensor, nullptr if Compute fails - std::shared_ptr operator()(std::shared_ptr input); - - private: - std::vector> ops_; -}; - -} // namespace api -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_MINDDATA_EAGER_H_ diff --git a/mindspore/ccsrc/minddata/dataset/include/status.h b/mindspore/ccsrc/minddata/dataset/include/status.h index 7002b05f10e..e5896687d22 100644 --- a/mindspore/ccsrc/minddata/dataset/include/status.h +++ b/mindspore/ccsrc/minddata/dataset/include/status.h @@ -29,6 +29,8 @@ #include #include +#include "include/api/status.h" + namespace mindspore { namespace dataset { #define RETURN_IF_NOT_OK(_s) \ @@ -39,23 +41,30 @@ namespace dataset { } \ } while (false) -#define RETURN_STATUS_UNEXPECTED(_e) \ - do { \ - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, _e); \ - } while (false) - -#define CHECK_FAIL_RETURN_UNEXPECTED(_condition, _e) \ +#define RETURN_STATUS_UNEXPECTED(_e) \ do { \ - if (!(_condition)) { \ - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, _e); \ - } \ + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, _e); \ } while (false) -#define CHECK_FAIL_RETURN_SYNTAX_ERROR(_condition, _e) \ - do { \ - if (!(_condition)) { \ - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, _e); \ - } \ +#define CHECK_FAIL_RETURN_UNEXPECTED(_condition, _e) \ + do { \ + if (!(_condition)) { \ + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, _e); \ + } \ + } while (false) + +#define CHECK_FAIL_RETURN_SYNTAX_ERROR(_condition, _e) \ + do { \ + if (!(_condition)) { \ + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, _e); \ + } \ + } while (false) + +#define CHECK_FAIL_RETURN_SYNTAX_ERROR(_condition, _e) \ + do { \ + if (!(_condition)) { \ + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, _e); \ + } \ } while (false) #define RETURN_UNEXPECTED_IF_NULL(_ptr) \ @@ -73,9 +82,9 @@ namespace dataset { } \ } while (false) -#define RETURN_STATUS_SYNTAX_ERROR(_e) \ - do { \ - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, _e); \ +#define RETURN_STATUS_SYNTAX_ERROR(_e) \ + do { \ + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, _e); \ } while (false) #define RETURN_SECOND_IF_ERROR(_s, _r) \ @@ -87,99 +96,8 @@ namespace dataset { } \ } while (false) -enum class StatusCode : char { - kOK = 0, - kOutOfMemory = 1, - kShapeMisMatch = 2, - kInterrupted = 3, - kNoSpace = 4, - kPyFuncException = 5, - kDuplicateKey = 6, - kPythonInterpreterFailure = 7, - kTDTPushFailure = 8, - kFileNotExist = 9, - kProfilingError = 10, - kBoundingBoxOutOfBounds = 11, - kBoundingBoxInvalidShape = 12, - kSyntaxError = 13, - kTimeOut = 14, - kBuddySpaceFull = 15, - kNetWorkError = 16, - kNotImplementedYet = 17, - // Make this error code the last one. Add new error code above it. - kUnexpectedError = 127 -}; - -std::string CodeAsString(const StatusCode c); - -class Status { - public: - Status() noexcept; - - explicit Status(StatusCode c) noexcept; - - ~Status() noexcept; - - // Copy constructor - Status(const Status &s); - - Status &operator=(const Status &s); - - // Move constructor - Status(Status &&) noexcept; - - Status &operator=(Status &&) noexcept; - - Status(const StatusCode code, const std::string &msg); - - Status(const StatusCode code, int line_of_code, const char *file_name, const std::string &extra = ""); - - // Return a success status - static Status OK() { return Status(StatusCode::kOK); } - - std::string ToString() const; - - StatusCode get_code() const; - - int GetLineOfCode() const { return line_of_code_; } - - std::string SetErrDescription(const std::string &err_description); - - std::string GetErrDescription() const { return err_description_; } - - friend std::ostream &operator<<(std::ostream &os, const Status &s); - - explicit operator bool() const { return (get_code() == StatusCode::kOK); } - - bool operator==(const Status &other) const { return (this->get_code() == other.get_code()); } - - bool operator!=(const Status &other) const { return !(*this == other); } - - bool IsOk() const { return (get_code() == StatusCode::kOK); } - - bool IsError() const { return !IsOk(); } - - bool IsOutofMemory() const { return (get_code() == StatusCode::kOutOfMemory); } - - bool IsInterrupted() const { return (get_code() == StatusCode::kInterrupted); } - - bool IsShapeIncorrect() const { return (get_code() == StatusCode::kShapeMisMatch); } - - bool IsNoSpace() const { return (get_code() == StatusCode::kNoSpace); } - - bool IsNetWorkError() const { return (get_code() == StatusCode::kNetWorkError); } - - private: - StatusCode code_; - int line_of_code_; - std::string file_name_; - std::string err_description_; - std::string err_msg_; -}; - #if !defined(_WIN32) && !defined(_WIN64) const float MAX_MEMORY_USAGE_THRESHOLD = 0.95; - float GetMemoryUsage(); #endif } // namespace dataset diff --git a/mindspore/ccsrc/minddata/dataset/include/tensor.h b/mindspore/ccsrc/minddata/dataset/include/tensor.h index e149f872024..2113036ae5d 100644 --- a/mindspore/ccsrc/minddata/dataset/include/tensor.h +++ b/mindspore/ccsrc/minddata/dataset/include/tensor.h @@ -41,22 +41,16 @@ #include "minddata/dataset/core/constants.h" #include "minddata/dataset/core/data_type.h" #include "minddata/dataset/core/tensor_shape.h" +#include "minddata/dataset/core/de_tensor.h" #include "minddata/dataset/util/status.h" #ifndef ENABLE_ANDROID #include "proto/example.pb.h" -#else -#include "minddata/dataset/include/de_tensor.h" #endif #ifdef ENABLE_PYTHON namespace py = pybind11; #endif namespace mindspore { -#ifdef ENABLE_ANDROID -namespace tensor { -class DETensor; -} // namespace tensor -#endif namespace dataset { class Tensor; template @@ -84,7 +78,7 @@ class Tensor { /// \param other Tensor to be moved Tensor(Tensor &&other) noexcept; - /// Move assigment operator + /// Move assignment operator /// \param other Tensor to be moved Tensor &operator=(Tensor &&other) noexcept; @@ -133,7 +127,7 @@ class Tensor { #ifndef ENABLE_ANDROID /// Create a tensor of type DE_STRING from a BytesList. /// \param[in] bytes_list protobuf's Bytelist - /// \param[in] shape shape of the outout tensor + /// \param[in] shape shape of the output tensor /// \param[out] out created Tensor /// \return Status Code static Status CreateFromByteList(const dataengine::BytesList &bytes_list, const TensorShape &shape, TensorPtr *out); @@ -279,7 +273,7 @@ class Tensor { std::string err; err += (data_ == nullptr) ? "data_ is nullptr \t" : ""; err += type_.IsCompatible() ? "data type not compatible\t" : ""; - return Status(StatusCode::kUnexpectedError, err); + return Status(StatusCode::kMDUnexpectedError, err); } } @@ -330,7 +324,7 @@ class Tensor { void Invalidate(); /// Copy input tensor into self at the location index. - /// Index is a vector of axises which can be incomplete: + /// Index is a vector of axes which can be incomplete: /// Ex: shape <2,3>, inserting into index {0} will replace the first row. index {1,2} will replace the last cell. /// \param index /// \param input @@ -375,7 +369,7 @@ class Tensor { /// Handle negative indices. static inline dsize_t HandleNeg(dsize_t index, dsize_t length) { return (index < 0) ? (index + length) : index; } - /// Slice tensor bases on the given indicies. Copy the sliced data into out tensor. Only rank1 tensors are supported. + /// Slice tensor bases on the given indices. Copy the sliced data into out tensor. Only rank1 tensors are supported. /// Based on the type of tensor, SliceNumeric or SliceString will be called /// \param[out] out Tensor /// \param[in] indices vector of indices @@ -663,9 +657,8 @@ class Tensor { unsigned char *data_end_ = nullptr; private: -#ifdef ENABLE_ANDROID - friend class tensor::DETensor; -#endif + friend class DETensor; + /// Copy raw data of a array based on shape and strides to the destination pointer /// \param dst [out] Pointer to the destination array where the content is to be copied /// \param[in] src Pointer to the source of strided array to be copied diff --git a/mindspore/ccsrc/minddata/dataset/include/type_id.h b/mindspore/ccsrc/minddata/dataset/include/type_id.h index d4c05609315..ef077a832d2 100644 --- a/mindspore/ccsrc/minddata/dataset/include/type_id.h +++ b/mindspore/ccsrc/minddata/dataset/include/type_id.h @@ -17,6 +17,7 @@ #define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_TYPEID_H_ #include "mindspore/core/ir/dtype/type_id.h" +#include "minddata/dataset/core/data_type.h" namespace mindspore { namespace dataset { @@ -46,6 +47,8 @@ inline dataset::DataType MSTypeToDEType(TypeId data_type) { return dataset::DataType(dataset::DataType::DE_FLOAT32); case kNumberTypeFloat64: return dataset::DataType(dataset::DataType::DE_FLOAT64); + case kObjectTypeString: + return dataset::DataType(dataset::DataType::DE_STRING); default: return dataset::DataType(dataset::DataType::DE_UNKNOWN); } @@ -77,6 +80,8 @@ inline TypeId DETypeToMSType(dataset::DataType data_type) { return mindspore::TypeId::kNumberTypeFloat32; case dataset::DataType::DE_FLOAT64: return mindspore::TypeId::kNumberTypeFloat64; + case dataset::DataType::DE_STRING: + return mindspore::TypeId::kObjectTypeString; default: return kTypeUnknown; } diff --git a/mindspore/ccsrc/minddata/dataset/kernels/data/one_hot_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/data/one_hot_op.cc index b28a2930c79..983ab898e91 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/data/one_hot_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/data/one_hot_op.cc @@ -35,7 +35,7 @@ Status OneHotOp::OutputShape(const std::vector &inputs, std::vector if (inputs_copy[0].Rank() == 0) outputs.emplace_back(std::vector{num_classes_}); if (inputs_copy[0].Rank() == 1) outputs.emplace_back(std::vector{inputs_copy[0][0], num_classes_}); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "OneHot: invalid input shape."); + return Status(StatusCode::kMDUnexpectedError, "OneHot: invalid input shape."); } Status OneHotOp::to_json(nlohmann::json *out_json) { diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/bounding_box.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/bounding_box.cc index 4a6a55df38a..791f189c431 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/bounding_box.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/bounding_box.cc @@ -43,16 +43,16 @@ Status BoundingBox::ReadFromTensor(const TensorPtr &bbox_tensor, dsize_t index_o Status BoundingBox::ValidateBoundingBoxes(const TensorRow &image_and_bbox) { if (image_and_bbox.size() != 2) { - return Status(StatusCode::kBoundingBoxInvalidShape, __LINE__, __FILE__, + return Status(StatusCode::kMDBoundingBoxInvalidShape, __LINE__, __FILE__, "BoundingBox: invalid input, likely missed bounding boxes."); } if (image_and_bbox[1]->shape().Size() < 2) { - return Status(StatusCode::kBoundingBoxInvalidShape, __LINE__, __FILE__, + return Status(StatusCode::kMDBoundingBoxInvalidShape, __LINE__, __FILE__, "BoundingBox: bounding boxes should have to be two-dimensional matrix at least."); } uint32_t num_of_features = image_and_bbox[1]->shape()[1]; if (num_of_features < 4) { - return Status(StatusCode::kBoundingBoxInvalidShape, __LINE__, __FILE__, + return Status(StatusCode::kMDBoundingBoxInvalidShape, __LINE__, __FILE__, "BoundingBox: bounding boxes should be have at least 4 features."); } std::vector> bbox_list; @@ -61,11 +61,11 @@ Status BoundingBox::ValidateBoundingBoxes(const TensorRow &image_and_bbox) { uint32_t img_w = image_and_bbox[0]->shape()[1]; for (auto &bbox : bbox_list) { if ((bbox->x() + bbox->width() > img_w) || (bbox->y() + bbox->height() > img_h)) { - return Status(StatusCode::kBoundingBoxOutOfBounds, __LINE__, __FILE__, + return Status(StatusCode::kMDBoundingBoxOutOfBounds, __LINE__, __FILE__, "BoundingBox: bounding boxes is out of bounds of the image"); } if (static_cast(bbox->x()) < 0 || static_cast(bbox->y()) < 0) { - return Status(StatusCode::kBoundingBoxOutOfBounds, __LINE__, __FILE__, + return Status(StatusCode::kMDBoundingBoxOutOfBounds, __LINE__, __FILE__, "BoundingBox: the coordinates of the bounding boxes has negative value."); } } diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/center_crop_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/center_crop_op.cc index 674acb6e53a..70529817dab 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/center_crop_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/center_crop_op.cc @@ -72,7 +72,7 @@ Status CenterCropOp::OutputShape(const std::vector &inputs, std::ve if (inputs[0].Rank() == 2) outputs.emplace_back(out); if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2])); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "CenterCrop: invalid input shape."); + return Status(StatusCode::kMDUnexpectedError, "CenterCrop: invalid input shape."); } } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/crop_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/crop_op.cc index 8d287c7b323..426052e1913 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/crop_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/crop_op.cc @@ -44,7 +44,7 @@ Status CropOp::OutputShape(const std::vector &inputs, std::vector &inputs, std::vector TensorShape out({-1, -1, 3}); // we don't know what is output image size, but we know it should be 3 channels if (inputs[0].Rank() == 1) outputs.emplace_back(out); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "Decode: invalid input shape."); + return Status(StatusCode::kMDUnexpectedError, "Decode: invalid input shape."); } Status DecodeOp::OutputType(const std::vector &inputs, std::vector &outputs) { diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.cc index cb1be1d76b1..080c6fbd3d9 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.cc @@ -1,106 +1,106 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include "minddata/dataset/kernels/image/dvpp/utils/AclProcess.h" -#include "minddata/dataset/core/cv_tensor.h" -#include "minddata/dataset/kernels/image/image_utils.h" -#include "minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h" -#include "minddata/dataset/core/data_type.h" -#include "minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.h" -#include "include/api/context.h" - -namespace mindspore { -namespace dataset { -Status DvppDecodeResizeCropJpegOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { - IO_CHECK(input, output); - if (!IsNonEmptyJPEG(input)) { - RETURN_STATUS_UNEXPECTED("SoftDvppDecodeReiszeJpegOp only support process jpeg image."); - } - try { - CHECK_FAIL_RETURN_UNEXPECTED(input->GetBuffer() != nullptr, "The input image buffer is empty."); - unsigned char *buffer = const_cast(input->GetBuffer()); - RawData imageInfo; - uint32_t filesize = input->SizeInBytes(); - imageInfo.lenOfByte = filesize; - imageInfo.data = std::make_shared(); - imageInfo.data.reset(new uint8_t[filesize], std::default_delete()); - memcpy_s(imageInfo.data.get(), filesize, buffer, filesize); - // First part end, whose function is to transform data from a Tensor to imageinfo data structure which can be - // applied on device - ResourceInfo resource; - resource.aclConfigPath = ""; - resource.deviceIds.insert(api::Context::Instance().GetDeviceID()); - std::shared_ptr instance = ResourceManager::GetInstance(); - APP_ERROR ret = instance->InitResource(resource); - if (ret != APP_ERR_OK) { - instance->Release(); - std::string error = "Error in Init D-chip:" + std::to_string(ret); - RETURN_STATUS_UNEXPECTED(error); - } - int deviceId = *(resource.deviceIds.begin()); - aclrtContext context = instance->GetContext(deviceId); - // Second part end where we initialize the resource of D chip and set up all configures - AclProcess process(resized_width_, resized_height_, crop_width_, crop_height_, context); - process.set_mode(true); - ret = process.InitResource(); - if (ret != APP_ERR_OK) { - instance->Release(); - std::string error = "Error in Init resource:" + std::to_string(ret); - RETURN_STATUS_UNEXPECTED(error); - } - ret = process.Process(imageInfo); - if (ret != APP_ERR_OK) { - instance->Release(); - std::string error = "Error in dvpp processing:" + std::to_string(ret); - RETURN_STATUS_UNEXPECTED(error); - } - // Third part end where we execute the core function of dvpp - auto data = std::static_pointer_cast(process.Get_Memory_Data()); - unsigned char *ret_ptr = data.get(); - std::shared_ptr CropOut = process.Get_Device_Memory_Data(); - dsize_t dvpp_length = CropOut->dataSize; - const TensorShape dvpp_shape({dvpp_length, 1, 1}); - const DataType dvpp_data_type(DataType::DE_UINT8); - mindspore::dataset::Tensor::CreateFromMemory(dvpp_shape, dvpp_data_type, ret_ptr, output); - if (!((*output)->HasData())) { - std::string error = "[ERROR] Fail to get the Output result from memory!"; - RETURN_STATUS_UNEXPECTED(error); - } - process.device_memory_release(); - process.Release(); - // Last part end where we transform the processed data into a tensor which can be applied in later units. - } catch (const cv::Exception &e) { - std::string error = "[ERROR] Fail in DvppDecodeResizeCropJpegOp:" + std::string(e.what()); - RETURN_STATUS_UNEXPECTED(error); - } - return Status::OK(); -} - -Status DvppDecodeResizeCropJpegOp::OutputShape(const std::vector &inputs, - std::vector &outputs) { - RETURN_IF_NOT_OK(TensorOp::OutputShape(inputs, outputs)); - outputs.clear(); - TensorShape out({-1, 1, 1}); // we don't know what is output image size, but we know it should be 3 channels - if (inputs[0].Rank() == 1) outputs.emplace_back(out); - if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "Input has a wrong shape"); -} - -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "minddata/dataset/kernels/image/dvpp/utils/AclProcess.h" +#include "minddata/dataset/core/cv_tensor.h" +#include "minddata/dataset/kernels/image/image_utils.h" +#include "minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h" +#include "minddata/dataset/core/data_type.h" +#include "minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.h" +#include "include/api/context.h" + +namespace mindspore { +namespace dataset { +Status DvppDecodeResizeCropJpegOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { + IO_CHECK(input, output); + if (!IsNonEmptyJPEG(input)) { + RETURN_STATUS_UNEXPECTED("SoftDvppDecodeReiszeJpegOp only support process jpeg image."); + } + try { + CHECK_FAIL_RETURN_UNEXPECTED(input->GetBuffer() != nullptr, "The input image buffer is empty."); + unsigned char *buffer = const_cast(input->GetBuffer()); + RawData imageInfo; + uint32_t filesize = input->SizeInBytes(); + imageInfo.lenOfByte = filesize; + imageInfo.data = std::make_shared(); + imageInfo.data.reset(new uint8_t[filesize], std::default_delete()); + memcpy_s(imageInfo.data.get(), filesize, buffer, filesize); + // First part end, whose function is to transform data from a Tensor to imageinfo data structure which can be + // applied on device + ResourceInfo resource; + resource.aclConfigPath = ""; + resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID()); + std::shared_ptr instance = ResourceManager::GetInstance(); + APP_ERROR ret = instance->InitResource(resource); + if (ret != APP_ERR_OK) { + instance->Release(); + std::string error = "Error in Init D-chip:" + std::to_string(ret); + RETURN_STATUS_UNEXPECTED(error); + } + int deviceId = *(resource.deviceIds.begin()); + aclrtContext context = instance->GetContext(deviceId); + // Second part end where we initialize the resource of D chip and set up all configures + AclProcess process(resized_width_, resized_height_, crop_width_, crop_height_, context); + process.set_mode(true); + ret = process.InitResource(); + if (ret != APP_ERR_OK) { + instance->Release(); + std::string error = "Error in Init resource:" + std::to_string(ret); + RETURN_STATUS_UNEXPECTED(error); + } + ret = process.Process(imageInfo); + if (ret != APP_ERR_OK) { + instance->Release(); + std::string error = "Error in dvpp processing:" + std::to_string(ret); + RETURN_STATUS_UNEXPECTED(error); + } + // Third part end where we execute the core function of dvpp + auto data = std::static_pointer_cast(process.Get_Memory_Data()); + unsigned char *ret_ptr = data.get(); + std::shared_ptr CropOut = process.Get_Device_Memory_Data(); + dsize_t dvpp_length = CropOut->dataSize; + const TensorShape dvpp_shape({dvpp_length, 1, 1}); + const DataType dvpp_data_type(DataType::DE_UINT8); + mindspore::dataset::Tensor::CreateFromMemory(dvpp_shape, dvpp_data_type, ret_ptr, output); + if (!((*output)->HasData())) { + std::string error = "[ERROR] Fail to get the Output result from memory!"; + RETURN_STATUS_UNEXPECTED(error); + } + process.device_memory_release(); + process.Release(); + // Last part end where we transform the processed data into a tensor which can be applied in later units. + } catch (const cv::Exception &e) { + std::string error = "[ERROR] Fail in DvppDecodeResizeCropJpegOp:" + std::string(e.what()); + RETURN_STATUS_UNEXPECTED(error); + } + return Status::OK(); +} + +Status DvppDecodeResizeCropJpegOp::OutputShape(const std::vector &inputs, + std::vector &outputs) { + RETURN_IF_NOT_OK(TensorOp::OutputShape(inputs, outputs)); + outputs.clear(); + TensorShape out({-1, 1, 1}); // we don't know what is output image size, but we know it should be 3 channels + if (inputs[0].Rank() == 1) outputs.emplace_back(out); + if (!outputs.empty()) return Status::OK(); + return Status(StatusCode::kMDUnexpectedError, "Input has a wrong shape"); +} + +} // namespace dataset +} // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.h b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.h index aae9c77f6d9..33df9bf4999 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.h +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.h @@ -1,60 +1,60 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H -#define MINDSPORE_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H - -#include -#include -#include - -#include "minddata/dataset/core/tensor.h" -#include "minddata/dataset/kernels/tensor_op.h" -#include "minddata/dataset/util/status.h" -#include "minddata/dataset/core/data_type.h" -#include "mindspore/core/utils/log_adapter.h" -#include "minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h" -#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h" -#include "acl/acl.h" - -namespace mindspore { -namespace dataset { -class DvppDecodeResizeCropJpegOp : public TensorOp { - public: - DvppDecodeResizeCropJpegOp(int32_t crop_height, int32_t crop_width, int32_t resized_height, int32_t resized_width) - : crop_height_(crop_height), - crop_width_(crop_width), - resized_height_(resized_height), - resized_width_(resized_width) {} - - /// \brief Destructor - ~DvppDecodeResizeCropJpegOp() = default; - - Status Compute(const std::shared_ptr &input, std::shared_ptr *output) override; - Status OutputShape(const std::vector &inputs, std::vector &outputs) override; - - std::string Name() const override { return kDvppDecodeResizeCropJpegOp; } - - private: - int32_t crop_height_; - int32_t crop_width_; - int32_t resized_height_; - int32_t resized_width_; -}; -} // namespace dataset -} // namespace mindspore - -#endif // MINDSPORE_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H +#define MINDSPORE_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H + +#include +#include +#include + +#include "minddata/dataset/core/tensor.h" +#include "minddata/dataset/kernels/tensor_op.h" +#include "minddata/dataset/util/status.h" +#include "minddata/dataset/core/data_type.h" +#include "mindspore/core/utils/log_adapter.h" +#include "minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h" +#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h" +#include "acl/acl.h" + +namespace mindspore { +namespace dataset { +class DvppDecodeResizeCropJpegOp : public TensorOp { + public: + DvppDecodeResizeCropJpegOp(int32_t crop_height, int32_t crop_width, int32_t resized_height, int32_t resized_width) + : crop_height_(crop_height), + crop_width_(crop_width), + resized_height_(resized_height), + resized_width_(resized_width) {} + + /// \brief Destructor + ~DvppDecodeResizeCropJpegOp() = default; + + Status Compute(const std::shared_ptr &input, std::shared_ptr *output) override; + Status OutputShape(const std::vector &inputs, std::vector &outputs) override; + + std::string Name() const override { return kDvppDecodeResizeCropJpegOp; } + + private: + int32_t crop_height_; + int32_t crop_width_; + int32_t resized_height_; + int32_t resized_width_; +}; +} // namespace dataset +} // namespace mindspore + +#endif // MINDSPORE_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/utils/ResourceManager.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/utils/ResourceManager.cc index e27d1d08b44..4ac49c47283 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/utils/ResourceManager.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/utils/ResourceManager.cc @@ -90,14 +90,14 @@ APP_ERROR ResourceManager::InitResource(ResourceInfo &resourceInfo) { APP_ERROR ret; if (aclConfigPath.length() == 0) { // Init acl without aclconfig - acl_env_ = mindspore::api::AclEnvGuard::GetAclEnv(""); + acl_env_ = mindspore::AclEnvGuard::GetAclEnv(""); } else { ret = ExistFile(aclConfigPath); if (ret != APP_ERR_OK) { MS_LOG(ERROR) << "Acl config file not exist, ret = " << ret << "."; return ret; } - acl_env_ = mindspore::api::AclEnvGuard::GetAclEnv(aclConfigPath); + acl_env_ = mindspore::AclEnvGuard::GetAclEnv(aclConfigPath); } if (acl_env_ == nullptr) { MS_LOG(ERROR) << "Failed to init acl."; diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h index 88b5eda4f4d..d27b9611d28 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h @@ -86,7 +86,7 @@ class ResourceManager { std::vector deviceIds_; std::vector contexts_; std::unordered_map deviceIdMap_; // Map of device to index - std::shared_ptr acl_env_; + std::shared_ptr acl_env_; }; #endif diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/hwc_to_chw_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/hwc_to_chw_op.cc index fe1215d5b45..311b65020bd 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/hwc_to_chw_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/hwc_to_chw_op.cc @@ -33,7 +33,7 @@ Status HwcToChwOp::OutputShape(const std::vector &inputs, std::vect TensorShape out = TensorShape{in[2], in[0], in[1]}; if (inputs[0].Rank() == 3) outputs.emplace_back(out); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "HWC2CHW: invalid input shape."); + return Status(StatusCode::kMDUnexpectedError, "HWC2CHW: invalid input shape."); } } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/image_utils.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/image_utils.cc index c70fff9c8e3..85f34f71342 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/image_utils.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/image_utils.cc @@ -110,11 +110,11 @@ Status Resize(const std::shared_ptr &input, std::shared_ptr *out if (output_height > in_image.rows * 1000 || output_width > in_image.cols * 1000) { std::string err_msg = "Resize: the resizing width or height is too big, it's 1000 times bigger than the original image."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } if (output_height == 0 || output_width == 0) { std::string err_msg = "Resize: the resizing width or height is invalid, width or height is zero."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } try { TensorShape shape{output_height, output_width}; @@ -632,12 +632,12 @@ Status Normalize(const std::shared_ptr &input, std::shared_ptr * mean->Squeeze(); if (mean->type() != DataType::DE_FLOAT32 || mean->Rank() != 1 || mean->shape()[0] != 3) { std::string err_msg = "Normalize: mean should be of size 3 and type float."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } std->Squeeze(); if (std->type() != DataType::DE_FLOAT32 || std->Rank() != 1 || std->shape()[0] != 3) { std::string err_msg = "Normalize: std tensor should be of size 3 and type float."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } try { // NOTE: We are assuming the input image is in RGB and the mean @@ -682,12 +682,12 @@ Status NormalizePad(const std::shared_ptr &input, std::shared_ptrSqueeze(); if (mean->type() != DataType::DE_FLOAT32 || mean->Rank() != 1 || mean->shape()[0] != 3) { std::string err_msg = "NormalizePad: mean tensor should be of size 3 and type float."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } std->Squeeze(); if (std->type() != DataType::DE_FLOAT32 || std->Rank() != 1 || std->shape()[0] != 3) { std::string err_msg = "NormalizePad: std tensor should be of size 3 and type float."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } try { // NOTE: We are assuming the input image is in RGB and the mean diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc index ca1785b726d..782b60cd93a 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc @@ -320,12 +320,12 @@ Status Normalize(const std::shared_ptr &input, std::shared_ptr * mean->Squeeze(); if (mean->type() != DataType::DE_FLOAT32 || mean->Rank() != 1 || mean->shape()[0] != 3) { std::string err_msg = "Normalize: mean should be of size 3 and type float."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } std->Squeeze(); if (std->type() != DataType::DE_FLOAT32 || std->Rank() != 1 || std->shape()[0] != 3) { std::string err_msg = "Normalize: std should be of size 3 and type float."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } // convert mean, std back to vector std::vector vec_mean; @@ -385,7 +385,7 @@ Status Resize(const std::shared_ptr &input, std::shared_ptr *out std::string err_msg = "Resize: the resizing width or height 1) is too big, it's up to " "1000 times the original image; 2) can not be 0."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } try { LiteMat lite_mat_rgb; diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/pad_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/pad_op.cc index 4d320362d91..0105a4cc7fa 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/pad_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/pad_op.cc @@ -48,7 +48,7 @@ Status PadOp::OutputShape(const std::vector &inputs, std::vector &inputs if (inputs[0].Rank() == 2) outputs.emplace_back(out); if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2])); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "RandomCropAndResize: invalid input shape"); + return Status(StatusCode::kMDUnexpectedError, "RandomCropAndResize: invalid input shape"); } Status RandomCropAndResizeOp::GetCropBox(int h_in, int w_in, int *x, int *y, int *crop_height, int *crop_width) { *crop_width = w_in; diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/random_crop_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/random_crop_op.cc index 812f71309a3..c0dea9d87b5 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/random_crop_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/random_crop_op.cc @@ -94,11 +94,11 @@ Status RandomCropOp::ImagePadding(const std::shared_ptr &input, std::sha } if (crop_height_ == 0 || crop_width_ == 0) { - return Status(StatusCode::kShapeMisMatch, __LINE__, __FILE__, + return Status(StatusCode::kMDShapeMisMatch, __LINE__, __FILE__, "RandomCrop: invalid crop size, crop dimension is not allowed to be zero."); } if (*padded_image_h < crop_height_ || *padded_image_w < crop_width_ || crop_height_ == 0 || crop_width_ == 0) { - return Status(StatusCode::kShapeMisMatch, __LINE__, __FILE__, + return Status(StatusCode::kMDShapeMisMatch, __LINE__, __FILE__, "RandomCrop: invalid crop size, crop size is bigger than the image dimensions."); } return Status::OK(); @@ -144,7 +144,7 @@ Status RandomCropOp::OutputShape(const std::vector &inputs, std::ve if (inputs[0].Rank() == 2) outputs.emplace_back(out); if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2])); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "RandomCrop: invalid input shape."); + return Status(StatusCode::kMDUnexpectedError, "RandomCrop: invalid input shape."); } } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/random_rotation_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/random_rotation_op.cc index bf965dec1e5..1908cabd201 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/random_rotation_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/random_rotation_op.cc @@ -77,7 +77,7 @@ Status RandomRotationOp::OutputShape(const std::vector &inputs, std if (inputs[0].Rank() == 2) outputs.emplace_back(out); if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2])); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "RandomRotation: invalid input shape."); + return Status(StatusCode::kMDUnexpectedError, "RandomRotation: invalid input shape."); } } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/resize_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/resize_op.cc index 4a7ede6d199..6d9ec7a1279 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/resize_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/resize_op.cc @@ -64,7 +64,7 @@ Status ResizeOp::OutputShape(const std::vector &inputs, std::vector if (inputs[0].Rank() == 2) outputs.emplace_back(out); if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2])); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "Resize: invalid input wrong shape."); + return Status(StatusCode::kMDUnexpectedError, "Resize: invalid input wrong shape."); } } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/soft_dvpp/soft_dvpp_decode_resize_jpeg_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/soft_dvpp/soft_dvpp_decode_resize_jpeg_op.cc index 3b6944e9112..2c684b1080b 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/soft_dvpp/soft_dvpp_decode_resize_jpeg_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/soft_dvpp/soft_dvpp_decode_resize_jpeg_op.cc @@ -82,7 +82,7 @@ Status SoftDvppDecodeResizeJpegOp::OutputShape(const std::vector &i TensorShape out({-1, -1, 3}); // we don't know what is output image size, but we know it should be 3 channels if (inputs[0].Rank() == 1) outputs.emplace_back(out); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "Input has a wrong shape"); + return Status(StatusCode::kMDUnexpectedError, "Input has a wrong shape"); } } // namespace dataset diff --git a/mindspore/ccsrc/minddata/dataset/kernels/py_func_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/py_func_op.cc index 30df7535402..11fc31b58d5 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/py_func_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/py_func_op.cc @@ -26,12 +26,12 @@ namespace mindspore { namespace dataset { Status PyFuncOp::Compute(const TensorRow &input, TensorRow *output) { IO_CHECK_VECTOR(input, output); - Status ret = Status(StatusCode::kOK, "PyFunc Call Succeed"); + Status ret = Status(StatusCode::kSuccess, "PyFunc Call Succeed"); { // Acquire Python GIL py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - ret = Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + ret = Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); goto ComputeReturn; } try { @@ -81,7 +81,7 @@ Status PyFuncOp::Compute(const TensorRow &input, TensorRow *output) { } } } catch (const py::error_already_set &e) { - ret = Status(StatusCode::kPyFuncException, e.what()); + ret = Status(StatusCode::kMDPyFuncException, e.what()); } } @@ -89,12 +89,12 @@ ComputeReturn: return ret; ShapeMisMatch: - ret = - Status(StatusCode::kShapeMisMatch, __LINE__, __FILE__, "PyFunc should return a numpy array or a numpy array tuple"); + ret = Status(StatusCode::kMDShapeMisMatch, __LINE__, __FILE__, + "PyFunc should return a numpy array or a numpy array tuple"); goto ComputeReturn; TimeoutError: - ret = Status(StatusCode::kTimeOut, __LINE__, __FILE__, "PyFunc execute time out"); + ret = Status(StatusCode::kMDTimeOut, __LINE__, __FILE__, "PyFunc execute time out"); goto ComputeReturn; } @@ -114,7 +114,7 @@ Status PyFuncOp::CastOutput(const py::object &ret_py_obj, TensorRow *output) { } output->push_back(out); } catch (const std::exception &e) { - return Status(StatusCode::kUnexpectedError, e.what()); + return Status(StatusCode::kMDUnexpectedError, e.what()); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/kernels/tensor_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/tensor_op.cc index e394284679f..974de7f3ab0 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/tensor_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/tensor_op.cc @@ -27,9 +27,9 @@ namespace dataset { Status TensorOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); if (!OneToOne()) { - return Status(StatusCode::kUnexpectedError, "Wrong Compute() function is called. This is not 1-1 TensorOp."); + return Status(StatusCode::kMDUnexpectedError, "Wrong Compute() function is called. This is not 1-1 TensorOp."); } else { - return Status(StatusCode::kUnexpectedError, + return Status(StatusCode::kMDUnexpectedError, "Is this TensorOp 1-1? If yes, please implement this Compute() in the derived class."); } } @@ -44,13 +44,13 @@ Status TensorOp::Compute(const TensorRow &input, TensorRow *output) { return Compute(input[0], &(*output)[0]); } - return Status(StatusCode::kUnexpectedError, + return Status(StatusCode::kMDUnexpectedError, "Is this TensorOp oneToOne? If no, please implement this Compute() in the derived class."); } Status TensorOp::OutputShape(const std::vector &inputs, std::vector &outputs) { if (inputs.size() != NumInput()) - return Status(StatusCode::kUnexpectedError, + return Status(StatusCode::kMDUnexpectedError, "The size of the input argument vector does not match the number of inputs"); outputs = inputs; return Status::OK(); @@ -58,7 +58,7 @@ Status TensorOp::OutputShape(const std::vector &inputs, std::vector Status TensorOp::OutputType(const std::vector &inputs, std::vector &outputs) { if (inputs.size() != NumInput()) - return Status(StatusCode::kUnexpectedError, + return Status(StatusCode::kMDUnexpectedError, "The size of the input argument vector does not match the number of inputs"); outputs = inputs; return Status::OK(); diff --git a/mindspore/ccsrc/minddata/dataset/text/kernels/jieba_tokenizer_op.cc b/mindspore/ccsrc/minddata/dataset/text/kernels/jieba_tokenizer_op.cc index c1d3b6bc03e..82f89f5d91e 100644 --- a/mindspore/ccsrc/minddata/dataset/text/kernels/jieba_tokenizer_op.cc +++ b/mindspore/ccsrc/minddata/dataset/text/kernels/jieba_tokenizer_op.cc @@ -83,7 +83,7 @@ Status JiebaTokenizerOp::Compute(const TensorRow &input, TensorRow *output) { Status JiebaTokenizerOp::AddWord(const std::string &word, int freq) { RETURN_UNEXPECTED_IF_NULL(jieba_parser_); if (jieba_parser_->InsertUserWord(word, freq, "") == false) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "AddWord: add word failed."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "AddWord: add word failed."); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/text/kernels/sentence_piece_tokenizer_op.cc b/mindspore/ccsrc/minddata/dataset/text/kernels/sentence_piece_tokenizer_op.cc index f2bbdc15966..aecdabb99ba 100644 --- a/mindspore/ccsrc/minddata/dataset/text/kernels/sentence_piece_tokenizer_op.cc +++ b/mindspore/ccsrc/minddata/dataset/text/kernels/sentence_piece_tokenizer_op.cc @@ -31,7 +31,7 @@ SentencePieceTokenizerOp::SentencePieceTokenizerOp(const std::shared_ptrmodel_proto()); if (!status.ok()) { model_status_ = - Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "SentencePieceTokenizer: parser vocab model filed."); + Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "SentencePieceTokenizer: parser vocab model filed."); } else { model_status_ = Status::OK(); } @@ -46,7 +46,7 @@ SentencePieceTokenizerOp::SentencePieceTokenizerOp(const std::string &model_path if (!status.ok()) { std::string err_msg = "SentencePieceTokenizer: "; err_msg += "load vocab model file: " + file_path_ + " failed."; - model_status_ = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + model_status_ = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } else { model_status_ = Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/text/sentence_piece_vocab.cc b/mindspore/ccsrc/minddata/dataset/text/sentence_piece_vocab.cc index ac446c4b660..7b4406665bf 100644 --- a/mindspore/ccsrc/minddata/dataset/text/sentence_piece_vocab.cc +++ b/mindspore/ccsrc/minddata/dataset/text/sentence_piece_vocab.cc @@ -74,7 +74,7 @@ Status SentencePieceVocab::BuildFromFile(const std::vector &path_li sentencepiece::util::Status s_status = sentencepiece::SentencePieceTrainer::Train(unorder_map, nullptr, &model_proto); if (!s_status.ok()) { std::string err_msg = "SentencePieceVocab: " + std::string(s_status.message()); - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } vocab->get()->set_model_proto(model_proto); diff --git a/mindspore/ccsrc/minddata/dataset/util/allocator.h b/mindspore/ccsrc/minddata/dataset/util/allocator.h index f6e0ef846cc..82cf9956fc2 100644 --- a/mindspore/ccsrc/minddata/dataset/util/allocator.h +++ b/mindspore/ccsrc/minddata/dataset/util/allocator.h @@ -73,7 +73,7 @@ class Allocator { Status rc = pool_->Allocate(n * sizeof(T), &p); if (rc.IsOk()) { return reinterpret_cast(p); - } else if (rc.IsOutofMemory()) { + } else if (rc == StatusCode::kMDOutOfMemory) { throw std::bad_alloc(); } else { throw std::exception(); @@ -97,7 +97,7 @@ Status MakeUnique(std::unique_ptr> *out, C alloc, // Some of our implementation of allocator (e.g. NumaAllocator) don't throw std::bad_alloc. // So we have to catch for null ptr if (data == nullptr) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } if (!std::is_arithmetic::value) { for (auto i = 0; i < n; i++) { @@ -114,7 +114,7 @@ Status MakeUnique(std::unique_ptr> *out, C alloc, }; *out = std::unique_ptr>(data, std::bind(deleter, std::placeholders::_1, alloc, n)); } catch (const std::bad_alloc &e) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } catch (const std::exception &e) { RETURN_STATUS_UNEXPECTED(e.what()); } diff --git a/mindspore/ccsrc/minddata/dataset/util/arena.cc b/mindspore/ccsrc/minddata/dataset/util/arena.cc index a4194b96327..7d77d3e0005 100644 --- a/mindspore/ccsrc/minddata/dataset/util/arena.cc +++ b/mindspore/ccsrc/minddata/dataset/util/arena.cc @@ -50,7 +50,7 @@ Status ArenaImpl::Allocate(size_t n, void **p) { // Round up n to 1K block uint64_t req_size = static_cast(n) + ARENA_WALL_OVERHEAD_SZ; if (req_size > this->get_max_size()) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } uint64_t reqBlk = SizeToBlk(req_size); // Do a first fit search @@ -67,7 +67,7 @@ Status ArenaImpl::Allocate(size_t n, void **p) { MemHdr::setHdr(q, addr, reqBlk); *p = get_user_addr(q); } else { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } return Status::OK(); } @@ -240,7 +240,7 @@ Status Arena::Init() { auto ret = cudaHostAlloc(&ptr_, sz, cudaHostAllocDefault); if (ret != cudaSuccess) { MS_LOG(ERROR) << "cudaHostAlloc failed, ret[" << static_cast(ret) << "], " << cudaGetErrorString(ret); - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } impl_ = std::make_unique(ptr_, sz); } else { @@ -252,7 +252,7 @@ Status Arena::Init() { impl_ = std::make_unique(ptr_, sz); #endif } catch (std::bad_alloc &e) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } return Status::OK(); } @@ -265,7 +265,7 @@ Status Arena::CreateArena(std::shared_ptr *p_ba, size_t val_in_MB, bool i RETURN_UNEXPECTED_IF_NULL(p_ba); auto ba = new (std::nothrow) Arena(val_in_MB, is_cuda_malloc); if (ba == nullptr) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } (*p_ba).reset(ba); RETURN_IF_NOT_OK(ba->Init()); @@ -278,7 +278,7 @@ Status Arena::CreateArena(std::shared_ptr *p_ba, size_t val_in_MB) { RETURN_UNEXPECTED_IF_NULL(p_ba); auto ba = new (std::nothrow) Arena(val_in_MB); if (ba == nullptr) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } (*p_ba).reset(ba); RETURN_IF_NOT_OK(ba->Init()); diff --git a/mindspore/ccsrc/minddata/dataset/util/btree.h b/mindspore/ccsrc/minddata/dataset/util/btree.h index 920e79c9859..9e99fbcd5a3 100644 --- a/mindspore/ccsrc/minddata/dataset/util/btree.h +++ b/mindspore/ccsrc/minddata/dataset/util/btree.h @@ -72,11 +72,11 @@ class BPlusTree { Status IndexRc2Status(IndexRc rc) { if (rc == IndexRc::kOk) { - return Status(StatusCode::kOK); + return Status(StatusCode::kSuccess); } else if (rc == IndexRc::kOutOfMemory) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } else if (rc == IndexRc::kDuplicateKey) { - return Status(StatusCode::kDuplicateKey); + return Status(StatusCode::kMDDuplicateKey); } else { RETURN_STATUS_UNEXPECTED(std::to_string(static_cast(rc))); } diff --git a/mindspore/ccsrc/minddata/dataset/util/buddy.cc b/mindspore/ccsrc/minddata/dataset/util/buddy.cc index bbeeaff0db0..64870b04a18 100644 --- a/mindspore/ccsrc/minddata/dataset/util/buddy.cc +++ b/mindspore/ccsrc/minddata/dataset/util/buddy.cc @@ -36,11 +36,11 @@ namespace mindspore { namespace dataset { Status BuddySpace::Init() { if (log_min_ < 0) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "log_min must be positive : " + std::to_string(log_min_)); } if (num_lvl_ < 3 || num_lvl_ > 18) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "num_lvl must be between 3 and 18 : " + std::to_string(num_lvl_)); } min_ = BitLeftShift(1, log_min_); @@ -51,7 +51,7 @@ Status BuddySpace::Init() { try { mem_ = std::make_unique(offset_3); } catch (const std::bad_alloc &e) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } (void)memset_s(mem_.get(), offset_3, 0, offset_3); auto ptr = mem_.get(); @@ -70,7 +70,7 @@ Status BuddySpace::Alloc(const uint64_t sz, BSpaceDescriptor *desc, addr_t *p) n *p = addr; return Status::OK(); } else { - return Status(StatusCode::kBuddySpaceFull, "BuddySpace full. Not an error. Please ignore."); + return Status(StatusCode::kMDBuddySpaceFull, "BuddySpace full. Not an error. Please ignore."); } } @@ -126,7 +126,7 @@ std::ostream &operator<<(std::ostream &os, const BuddySpace &s) { BuddySpace::STATE st; s.GetBuddySegState(addr, &sz, &st); os << "Address : " << std::left << std::setw(8) << addr << " Size : " << std::setw(8) << sz << " State : " - << ((st == BuddySpace::STATE::kAlloc) ? "ALLOC" : ((st == BuddySpace::STATE::kFree) ? "FREE" : "Unkonwn")) + << ((st == BuddySpace::STATE::kAlloc) ? "ALLOC" : ((st == BuddySpace::STATE::kFree) ? "FREE" : "Unknown")) << "\n"; addr += sz; } @@ -371,7 +371,7 @@ Status BuddySpace::CreateBuddySpace(std::unique_ptr *out_bs, int log Status rc; auto bs = new (std::nothrow) BuddySpace(log_min, num_lvl); if (bs == nullptr) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } rc = bs->Init(); if (rc.IsOk()) { diff --git a/mindspore/ccsrc/minddata/dataset/util/circular_pool.cc b/mindspore/ccsrc/minddata/dataset/util/circular_pool.cc index cc9fc9a9b8c..39dfd7bf76d 100644 --- a/mindspore/ccsrc/minddata/dataset/util/circular_pool.cc +++ b/mindspore/ccsrc/minddata/dataset/util/circular_pool.cc @@ -93,7 +93,7 @@ Status CircularPool::Allocate(size_t n, void **p) { auto it = cirIt.Next(); Arena *ba = it->get(); if (ba->get_max_size() < n) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } // If we are asked to move forward the tail if (move_tail) { @@ -105,7 +105,7 @@ Status CircularPool::Allocate(size_t n, void **p) { if (rc.IsOk()) { *p = ptr; break; - } else if (rc.IsOutofMemory()) { + } else if (rc == StatusCode::kMDOutOfMemory) { // Make the next arena a new tail and continue. move_tail = true; } else { @@ -126,7 +126,7 @@ Status CircularPool::Allocate(size_t n, void **p) { // Re-acquire the shared lock and try again lock_s.Downgrade(); } else { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__); } } } while (ptr == nullptr); @@ -164,7 +164,7 @@ Status CircularPool::Reallocate(void **pp, size_t old_sz, size_t new_sz) { MS_ASSERT(it != mem_segments_.end()); Arena *ba = it->get(); Status rc = ba->Reallocate(pp, old_sz, new_sz); - if (rc.IsOutofMemory()) { + if (rc == StatusCode::kMDOutOfMemory) { // The current arena has no room for the bigger size. // Allocate free space from another arena and copy // the content over. @@ -222,7 +222,7 @@ Status CircularPool::CreateCircularPool(std::shared_ptr *out_pool, i } auto pool = new (std::nothrow) CircularPool(max_size_in_gb, arena_size, is_cuda_malloc); if (pool == nullptr) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } if (createOneArena) { rc = pool->AddOneArena(); @@ -243,7 +243,7 @@ Status CircularPool::CreateCircularPool(std::shared_ptr *out_pool, i } auto pool = new (std::nothrow) CircularPool(max_size_in_gb, arena_size); if (pool == nullptr) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } if (createOneArena) { rc = pool->AddOneArena(); diff --git a/mindspore/ccsrc/minddata/dataset/util/intrp_resource.h b/mindspore/ccsrc/minddata/dataset/util/intrp_resource.h index 00ba0d84bb6..b578b84de1d 100644 --- a/mindspore/ccsrc/minddata/dataset/util/intrp_resource.h +++ b/mindspore/ccsrc/minddata/dataset/util/intrp_resource.h @@ -39,7 +39,7 @@ class IntrpResource { virtual Status GetInterruptStatus() const { if (Interrupted()) { - return Status(StatusCode::kInterrupted); + return Status(StatusCode::kMDInterrupted); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/util/intrp_service.cc b/mindspore/ccsrc/minddata/dataset/util/intrp_service.cc index 80417ac2a01..17b990b02ff 100644 --- a/mindspore/ccsrc/minddata/dataset/util/intrp_service.cc +++ b/mindspore/ccsrc/minddata/dataset/util/intrp_service.cc @@ -39,7 +39,7 @@ Status IntrpService::Register(const std::string &name, IntrpResource *res) { SharedLock stateLck(&state_lock_); // Now double check the state if (ServiceState() != STATE::kRunning) { - return Status(StatusCode::kInterrupted, __LINE__, __FILE__, "Interrupt service is shutting down"); + return Status(StatusCode::kMDInterrupted, __LINE__, __FILE__, "Interrupt service is shutting down"); } else { std::lock_guard lck(mutex_); try { @@ -48,7 +48,7 @@ Status IntrpService::Register(const std::string &name, IntrpResource *res) { MS_LOG(DEBUG) << "Register resource with name " << name << ". Thread ID " << ss.str() << "."; auto it = all_intrp_resources_.emplace(name, res); if (it.second == false) { - return Status(StatusCode::kDuplicateKey, __LINE__, __FILE__, name); + return Status(StatusCode::kMDDuplicateKey, __LINE__, __FILE__, name); } high_water_mark_++; } catch (std::exception &e) { diff --git a/mindspore/ccsrc/minddata/dataset/util/memory_pool.cc b/mindspore/ccsrc/minddata/dataset/util/memory_pool.cc index 0e1be9d798f..38351bcb76b 100644 --- a/mindspore/ccsrc/minddata/dataset/util/memory_pool.cc +++ b/mindspore/ccsrc/minddata/dataset/util/memory_pool.cc @@ -24,7 +24,7 @@ Status DeMalloc(std::size_t s, void **p, bool init_to_zero = false) { } void *q = ::malloc(s); if (q == nullptr) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__); } else { *p = q; if (init_to_zero) { @@ -36,13 +36,13 @@ Status DeMalloc(std::size_t s, void **p, bool init_to_zero = false) { } // namespace dataset } // namespace mindspore -void *operator new(std::size_t s, mindspore::dataset::Status *rc, std::shared_ptr b) { +void *operator new(std::size_t s, mindspore::Status *rc, std::shared_ptr b) { void *ptr = nullptr; *rc = b->Allocate(s, &ptr); return ptr; } -void *operator new[](std::size_t s, mindspore::dataset::Status *rc, std::shared_ptr b) { +void *operator new[](std::size_t s, mindspore::Status *rc, std::shared_ptr b) { void *ptr = nullptr; *rc = b->Allocate(s, &ptr); return ptr; diff --git a/mindspore/ccsrc/minddata/dataset/util/memory_pool.h b/mindspore/ccsrc/minddata/dataset/util/memory_pool.h index 33e6012626e..150bd9ddcb0 100644 --- a/mindspore/ccsrc/minddata/dataset/util/memory_pool.h +++ b/mindspore/ccsrc/minddata/dataset/util/memory_pool.h @@ -48,9 +48,9 @@ Status DeMalloc(std::size_t s, void **p, bool); } // namespace dataset } // namespace mindspore -void *operator new(std::size_t, mindspore::dataset::Status *, std::shared_ptr); +void *operator new(std::size_t, mindspore::Status *, std::shared_ptr); -void *operator new[](std::size_t, mindspore::dataset::Status *, std::shared_ptr); +void *operator new[](std::size_t, mindspore::Status *, std::shared_ptr); void operator delete(void *, std::shared_ptr); diff --git a/mindspore/ccsrc/minddata/dataset/util/queue.h b/mindspore/ccsrc/minddata/dataset/util/queue.h index 2f2e0c70066..3a62c246264 100644 --- a/mindspore/ccsrc/minddata/dataset/util/queue.h +++ b/mindspore/ccsrc/minddata/dataset/util/queue.h @@ -186,7 +186,8 @@ class QueueList { Status Register(TaskGroup *vg) { if (vg == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Null task group during QueueList registration."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, + "Null task group during QueueList registration."); } for (int i = 0; i < queue_list_.size(); ++i) { RETURN_IF_NOT_OK(queue_list_[i]->Register(vg)); diff --git a/mindspore/ccsrc/minddata/dataset/util/services.h b/mindspore/ccsrc/minddata/dataset/util/services.h index 92692add3e0..9600ffd1a99 100644 --- a/mindspore/ccsrc/minddata/dataset/util/services.h +++ b/mindspore/ccsrc/minddata/dataset/util/services.h @@ -92,7 +92,7 @@ class Services { std::unique_ptr svc(*out); hook_.push_back(std::move(svc)); } catch (const std::bad_alloc &e) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/util/status.cc b/mindspore/ccsrc/minddata/dataset/util/status.cc index a18b3cb3ab1..6d0f7bb7465 100644 --- a/mindspore/ccsrc/minddata/dataset/util/status.cc +++ b/mindspore/ccsrc/minddata/dataset/util/status.cc @@ -30,155 +30,6 @@ namespace mindspore { namespace dataset { -std::string CodeAsString(const StatusCode c) { - const char *s = nullptr; - if (c == StatusCode::kOK) { - // Optimize the most frequent case - return std::string("OK"); - } else { - switch (c) { - case StatusCode::kOutOfMemory: - s = "Out of memory"; - break; - case StatusCode::kInterrupted: - s = "Interrupted system call"; - break; - case StatusCode::kShapeMisMatch: - s = "Shape is incorrect"; - break; - case StatusCode::kNoSpace: - s = "No space left on device"; - break; - case StatusCode::kPyFuncException: - s = "Exception thrown from PyFunc"; - break; - case StatusCode::kDuplicateKey: - s = "Duplicate key"; - break; - case StatusCode::kProfilingError: - s = "Error encountered while profiling"; - break; - case StatusCode::kSyntaxError: - s = "Syntax error"; - break; - case StatusCode::kBuddySpaceFull: - s = "BuddySpace full"; - break; - case StatusCode::kNetWorkError: - s = "Network error"; - break; - case StatusCode::kUnexpectedError: - default: - s = "Unexpected error"; - break; - } - } - return std::string(s); -} - -Status::Status(StatusCode c) noexcept - : code_(c), err_msg_(CodeAsString(c)), line_of_code_(-1), file_name_(""), err_description_("") {} - -Status::Status() noexcept - : code_(StatusCode::kOK), err_msg_(""), line_of_code_(-1), file_name_(""), err_description_("") {} - -Status::~Status() noexcept {} - -Status::Status(const Status &s) - : code_(s.code_), - err_msg_(s.err_msg_), - line_of_code_(s.line_of_code_), - file_name_(s.file_name_), - err_description_(s.err_description_) {} - -Status &Status::operator=(const Status &s) { - if (this == &s) { - return *this; - } - code_ = s.code_; - err_msg_ = s.err_msg_; - line_of_code_ = s.line_of_code_; - file_name_ = s.file_name_; - err_description_ = s.err_description_; - return *this; -} - -Status::Status(Status &&s) noexcept { - code_ = s.code_; - s.code_ = StatusCode::kOK; - line_of_code_ = s.line_of_code_; - s.line_of_code_ = -1; - file_name_ = std::move(s.file_name_); - err_description_ = std::move(s.err_description_); - err_msg_ = std::move(s.err_msg_); -} - -Status &Status::operator=(Status &&s) noexcept { - if (this == &s) { - return *this; - } - code_ = s.code_; - s.code_ = StatusCode::kOK; - line_of_code_ = s.line_of_code_; - s.line_of_code_ = -1; - file_name_ = std::move(s.file_name_); - err_description_ = std::move(s.err_description_); - err_msg_ = std::move(s.err_msg_); - return *this; -} - -Status::Status(const StatusCode code, const std::string &msg) - : code_(code), err_msg_(msg), line_of_code_(-1), file_name_(""), err_description_(msg) {} - -Status::Status(const StatusCode code, int line_of_code, const char *file_name, const std::string &extra) { - code_ = code; - line_of_code_ = line_of_code; - file_name_ = std::string(file_name); - err_description_ = extra; - std::ostringstream ss; -#ifndef ENABLE_ANDROID - ss << "Thread ID " << this_thread::get_id() << " " << CodeAsString(code) << ". "; - if (!extra.empty()) { - ss << extra; - } - ss << "\n"; -#endif - - ss << "Line of code : " << line_of_code << "\n"; - if (file_name != nullptr) { - ss << "File : " << file_name << "\n"; - } - err_msg_ = ss.str(); -} - -std::ostream &operator<<(std::ostream &os, const Status &s) { - os << s.ToString(); - return os; -} - -std::string Status::SetErrDescription(const std::string &err_description) { - err_description_ = err_description; - std::ostringstream ss; -#ifndef ENABLE_ANDROID - ss << "Thread ID " << this_thread::get_id() << " " << CodeAsString(code_) << ". "; - if (!err_description_.empty()) { - ss << err_description_; - } - ss << "\n"; -#endif - - if (line_of_code_ > 0 && !file_name_.empty()) { - ss << "Line of code : " << line_of_code_ << "\n"; - ss << "File : " << file_name_ << "\n"; - } - err_msg_ = ss.str(); - return err_msg_; -} - -std::string Status::ToString() const { return err_msg_; } - -StatusCode Status::get_code() const { return code_; } - #if !defined(_WIN32) && !defined(_WIN64) float GetMemoryUsage() { char buf[128] = {0}; diff --git a/mindspore/ccsrc/minddata/dataset/util/status.h b/mindspore/ccsrc/minddata/dataset/util/status.h index 7002b05f10e..f7f19b6511d 100644 --- a/mindspore/ccsrc/minddata/dataset/util/status.h +++ b/mindspore/ccsrc/minddata/dataset/util/status.h @@ -29,6 +29,8 @@ #include #include +#include "include/api/status.h" + namespace mindspore { namespace dataset { #define RETURN_IF_NOT_OK(_s) \ @@ -39,23 +41,23 @@ namespace dataset { } \ } while (false) -#define RETURN_STATUS_UNEXPECTED(_e) \ - do { \ - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, _e); \ - } while (false) - -#define CHECK_FAIL_RETURN_UNEXPECTED(_condition, _e) \ +#define RETURN_STATUS_UNEXPECTED(_e) \ do { \ - if (!(_condition)) { \ - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, _e); \ - } \ + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, _e); \ } while (false) -#define CHECK_FAIL_RETURN_SYNTAX_ERROR(_condition, _e) \ - do { \ - if (!(_condition)) { \ - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, _e); \ - } \ +#define CHECK_FAIL_RETURN_UNEXPECTED(_condition, _e) \ + do { \ + if (!(_condition)) { \ + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, _e); \ + } \ + } while (false) + +#define CHECK_FAIL_RETURN_SYNTAX_ERROR(_condition, _e) \ + do { \ + if (!(_condition)) { \ + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, _e); \ + } \ } while (false) #define RETURN_UNEXPECTED_IF_NULL(_ptr) \ @@ -73,9 +75,9 @@ namespace dataset { } \ } while (false) -#define RETURN_STATUS_SYNTAX_ERROR(_e) \ - do { \ - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, _e); \ +#define RETURN_STATUS_SYNTAX_ERROR(_e) \ + do { \ + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, _e); \ } while (false) #define RETURN_SECOND_IF_ERROR(_s, _r) \ @@ -87,99 +89,8 @@ namespace dataset { } \ } while (false) -enum class StatusCode : char { - kOK = 0, - kOutOfMemory = 1, - kShapeMisMatch = 2, - kInterrupted = 3, - kNoSpace = 4, - kPyFuncException = 5, - kDuplicateKey = 6, - kPythonInterpreterFailure = 7, - kTDTPushFailure = 8, - kFileNotExist = 9, - kProfilingError = 10, - kBoundingBoxOutOfBounds = 11, - kBoundingBoxInvalidShape = 12, - kSyntaxError = 13, - kTimeOut = 14, - kBuddySpaceFull = 15, - kNetWorkError = 16, - kNotImplementedYet = 17, - // Make this error code the last one. Add new error code above it. - kUnexpectedError = 127 -}; - -std::string CodeAsString(const StatusCode c); - -class Status { - public: - Status() noexcept; - - explicit Status(StatusCode c) noexcept; - - ~Status() noexcept; - - // Copy constructor - Status(const Status &s); - - Status &operator=(const Status &s); - - // Move constructor - Status(Status &&) noexcept; - - Status &operator=(Status &&) noexcept; - - Status(const StatusCode code, const std::string &msg); - - Status(const StatusCode code, int line_of_code, const char *file_name, const std::string &extra = ""); - - // Return a success status - static Status OK() { return Status(StatusCode::kOK); } - - std::string ToString() const; - - StatusCode get_code() const; - - int GetLineOfCode() const { return line_of_code_; } - - std::string SetErrDescription(const std::string &err_description); - - std::string GetErrDescription() const { return err_description_; } - - friend std::ostream &operator<<(std::ostream &os, const Status &s); - - explicit operator bool() const { return (get_code() == StatusCode::kOK); } - - bool operator==(const Status &other) const { return (this->get_code() == other.get_code()); } - - bool operator!=(const Status &other) const { return !(*this == other); } - - bool IsOk() const { return (get_code() == StatusCode::kOK); } - - bool IsError() const { return !IsOk(); } - - bool IsOutofMemory() const { return (get_code() == StatusCode::kOutOfMemory); } - - bool IsInterrupted() const { return (get_code() == StatusCode::kInterrupted); } - - bool IsShapeIncorrect() const { return (get_code() == StatusCode::kShapeMisMatch); } - - bool IsNoSpace() const { return (get_code() == StatusCode::kNoSpace); } - - bool IsNetWorkError() const { return (get_code() == StatusCode::kNetWorkError); } - - private: - StatusCode code_; - int line_of_code_; - std::string file_name_; - std::string err_description_; - std::string err_msg_; -}; - #if !defined(_WIN32) && !defined(_WIN64) const float MAX_MEMORY_USAGE_THRESHOLD = 0.95; - float GetMemoryUsage(); #endif } // namespace dataset diff --git a/mindspore/ccsrc/minddata/dataset/util/task.cc b/mindspore/ccsrc/minddata/dataset/util/task.cc index 514b6bb991f..6d2c0bcaa0b 100644 --- a/mindspore/ccsrc/minddata/dataset/util/task.cc +++ b/mindspore/ccsrc/minddata/dataset/util/task.cc @@ -57,8 +57,8 @@ void Task::operator()() { rc_ = fnc_obj_(); } // Some error codes are ignored, e.g. interrupt. Others we just shutdown the group. - if (rc_.IsError() && !rc_.IsInterrupted()) { - if (rc_.get_code() == StatusCode::kNetWorkError) { + if (rc_.IsError() && rc_ != StatusCode::kMDInterrupted) { + if (rc_.StatusCode() == StatusCode::kMDNetWorkError) { MS_LOG(WARNING) << rc_; } else { MS_LOG(ERROR) << rc_; @@ -66,11 +66,11 @@ void Task::operator()() { ShutdownGroup(); } } catch (const std::bad_alloc &e) { - rc_ = Status(StatusCode::kOutOfMemory, __LINE__, __FILE__, e.what()); + rc_ = Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__, e.what()); MS_LOG(ERROR) << rc_; ShutdownGroup(); } catch (const std::exception &e) { - rc_ = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, e.what()); + rc_ = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, e.what()); MS_LOG(ERROR) << rc_; ShutdownGroup(); } @@ -128,7 +128,7 @@ Status Task::Run() { running_ = true; caught_severe_exception_ = false; } catch (const std::exception &e) { - rc = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, e.what()); + rc = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, e.what()); } } return rc; @@ -200,7 +200,7 @@ void Task::set_task_group(TaskGroup *vg) { task_group_ = vg; } Task::~Task() { task_group_ = nullptr; } Status Task::OverrideInterruptRc(const Status &rc) { - if (rc.IsInterrupted() && this_thread::is_master_thread()) { + if (rc == StatusCode::kMDInterrupted && this_thread::is_master_thread()) { // If we are interrupted, override the return value if this is the master thread. // Master thread is being interrupted mostly because of some thread is reporting error. return TaskManager::GetMasterThreadRc(); diff --git a/mindspore/ccsrc/minddata/dataset/util/task_manager.cc b/mindspore/ccsrc/minddata/dataset/util/task_manager.cc index f1d172c84cf..d02b3d770ea 100644 --- a/mindspore/ccsrc/minddata/dataset/util/task_manager.cc +++ b/mindspore/ccsrc/minddata/dataset/util/task_manager.cc @@ -31,7 +31,7 @@ Status TaskManager::CreateAsyncTask(const std::string &my_name, const std::funct SharedLock stateLck(&state_lock_); // Now double check the state if (ServiceState() == STATE::kStopInProg || ServiceState() == STATE::kStopped) { - return Status(StatusCode::kInterrupted, __LINE__, __FILE__, "TaskManager is shutting down"); + return Status(StatusCode::kMDInterrupted, __LINE__, __FILE__, "TaskManager is shutting down"); } RETURN_IF_NOT_OK(GetFreeTask(my_name, f, task, operator_id)); if (vg == nullptr) { @@ -282,7 +282,7 @@ Status TaskGroup::CreateAsyncTask(const std::string &my_name, const std::functio SharedLock state_lck(&state_lock_); // Now double check the state if (ServiceState() != STATE::kRunning) { - return Status(StatusCode::kInterrupted, __LINE__, __FILE__, "Taskgroup is shutting down"); + return Status(StatusCode::kMDInterrupted, __LINE__, __FILE__, "Taskgroup is shutting down"); } TaskManager &dm = TaskManager::GetInstance(); Task *pTask = nullptr; @@ -292,7 +292,7 @@ Status TaskGroup::CreateAsyncTask(const std::string &my_name, const std::functio { std::unique_lock rcLock(rc_mux_); if (rc_.IsError()) { - return pMytask->IsMasterThread() ? rc_ : Status(StatusCode::kInterrupted); + return pMytask->IsMasterThread() ? rc_ : Status(StatusCode::kMDInterrupted); } } RETURN_IF_NOT_OK(dm.CreateAsyncTask(my_name, f, this, &pTask, operator_id)); diff --git a/mindspore/core/CMakeLists.txt b/mindspore/core/CMakeLists.txt index 02be915eea9..8a95bc7d5b6 100644 --- a/mindspore/core/CMakeLists.txt +++ b/mindspore/core/CMakeLists.txt @@ -7,8 +7,6 @@ if(NOT(CMAKE_SYSTEM_NAME MATCHES "Windows")) endif() message("************ build core ***************") - - file(GLOB_RECURSE CORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "abstract/*.cc" "base/*.cc" diff --git a/mindspore/core/ir/api_tensor_impl.h b/mindspore/core/ir/api_tensor_impl.h new file mode 100644 index 00000000000..f57b134c057 --- /dev/null +++ b/mindspore/core/ir/api_tensor_impl.h @@ -0,0 +1,47 @@ +/** + * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). + * + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_IR_API_TENSOR_IMPL_H_ +#define MINDSPORE_CORE_IR_API_TENSOR_IMPL_H_ + +#include +#include +#include +#include "include/api/types.h" + +namespace mindspore { +class MSTensor::Impl { + public: + Impl() = default; + virtual ~Impl() = default; + + virtual const std::string &Name() const = 0; + virtual enum DataType DataType() const = 0; + virtual const std::vector &Shape() const = 0; + + virtual std::shared_ptr Data() const = 0; + virtual void *MutableData() = 0; + virtual size_t DataSize() const = 0; + + virtual bool IsDevice() const = 0; + + virtual std::shared_ptr Clone() const = 0; +}; +} // namespace mindspore + +#endif // MINDSPORE_CORE_IR_API_TENSOR_IMPL_H_ diff --git a/mindspore/core/utils/status.cc b/mindspore/core/utils/status.cc new file mode 100644 index 00000000000..e90ce645d8e --- /dev/null +++ b/mindspore/core/utils/status.cc @@ -0,0 +1,127 @@ +/** + * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). + * + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "include/api/status.h" +#ifndef ENABLE_ANDROID +#include +#endif +#include +#include + +namespace mindspore { +Status::Status(enum StatusCode code, int line_of_code, const char *file_name, const std::string &extra) { + status_code_ = code; + line_of_code_ = line_of_code; + file_name_ = std::string(file_name); + err_description_ = extra; + std::ostringstream ss; +#ifndef ENABLE_ANDROID + ss << "Thread ID " << std::this_thread::get_id() << " " << CodeAsString(code) << ". "; + if (!extra.empty()) { + ss << extra; + } + ss << "\n"; +#endif + + ss << "Line of code : " << line_of_code << "\n"; + if (file_name != nullptr) { + ss << "File : " << file_name << "\n"; + } + status_msg_ = ss.str(); +} + +std::string Status::CodeAsString(enum StatusCode c) { + static std::map info_map = {{kSuccess, "No error occurs."}, + // Core + {kCoreFailed, "Common error code."}, + // MD + {kMDOutOfMemory, "Out of memory"}, + {kMDShapeMisMatch, "Shape is incorrect."}, + {kMDInterrupted, "Interrupted system call"}, + {kMDNoSpace, "No space left on device"}, + {kMDPyFuncException, "Exception thrown from PyFunc"}, + {kMDDuplicateKey, "Duplicate key"}, + {kMDPythonInterpreterFailure, ""}, + {kMDTDTPushFailure, "Unexpected error"}, + {kMDFileNotExist, "Unexpected error"}, + {kMDProfilingError, "Error encountered while profiling"}, + {kMDBoundingBoxOutOfBounds, "Unexpected error"}, + {kMDBoundingBoxInvalidShape, "Unexpected error"}, + {kMDSyntaxError, "Syntax error"}, + {kMDTimeOut, "Unexpected error"}, + {kMDBuddySpaceFull, "BuddySpace full"}, + {kMDNetWorkError, "Network error"}, + {kMDNotImplementedYet, "Unexpected error"}, + {kMDUnexpectedError, "Unexpected error"}, + // ME + {kMEFailed, "Common error code."}, + {kMEInvalidInput, "Invalid input."}, + // MC + {kMCFailed, "Common error code."}, + {kMCDeviceError, "Device error."}, + {kMCInvalidInput, "Invalid input."}, + {kMCInvalidArgs, "Invalid arguments."}, + // Lite + {kLiteError, "Common error code."}, + {kLiteNullptr, "NULL pointer returned."}, + {kLiteParamInvalid, "Invalid parameter."}, + {kLiteNoChange, "No change."}, + {kLiteSuccessExit, "No error but exit."}, + {kLiteMemoryFailed, "Fail to create memory."}, + {kLiteNotSupport, "Fail to support."}, + {kLiteThreadPoolError, "Thread pool error."}, + {kLiteOutOfTensorRange, "Failed to check range."}, + {kLiteInputTensorError, "Failed to check input tensor."}, + {kLiteReentrantError, "Exist executor running."}, + {kLiteGraphFileError, "Failed to verify graph file."}, + {kLiteNotFindOp, "Failed to find operator."}, + {kLiteInvalidOpName, "Invalid operator name."}, + {kLiteInvalidOpAttr, "Invalid operator attr."}, + {kLiteOpExecuteFailure, "Failed to execution operator."}, + {kLiteFormatError, "Failed to checking tensor format."}, + {kLiteInferError, "Failed to infer shape."}, + {kLiteInferInvalid, "Invalid infer shape before runtime."}, + {kLiteInputParamInvalid, "Invalid input param by user."}}; + auto iter = info_map.find(c); + return iter == info_map.end() ? "Unknown error" : iter->second; +} + +std::ostream &operator<<(std::ostream &os, const Status &s) { + os << s.ToString(); + return os; +} + +const std::string &Status::SetErrDescription(const std::string &err_description) { + err_description_ = err_description; + std::ostringstream ss; +#ifndef ENABLE_ANDROID + ss << "Thread ID " << std::this_thread::get_id() << " " << CodeAsString(status_code_) << ". "; + if (!err_description_.empty()) { + ss << err_description_; + } + ss << "\n"; +#endif + + if (line_of_code_ > 0 && !file_name_.empty()) { + ss << "Line of code : " << line_of_code_ << "\n"; + ss << "File : " << file_name_ << "\n"; + } + status_msg_ = ss.str(); + return status_msg_; +} +} // namespace mindspore diff --git a/mindspore/lite/include/context.h b/mindspore/lite/include/context.h index 70a3c1a3c9d..7c673d04547 100644 --- a/mindspore/lite/include/context.h +++ b/mindspore/lite/include/context.h @@ -19,17 +19,11 @@ #include #include +#include "include/api/lite_context.h" #include "include/ms_tensor.h" #include "include/lite_utils.h" namespace mindspore::lite { -/// \brief CpuBindMode defined for holding bind cpu strategy argument. -typedef enum { - NO_BIND = 0, /**< no bind */ - HIGHER_CPU = 1, /**< bind higher cpu first */ - MID_CPU = 2 /**< bind middle cpu first */ -} CpuBindMode; - /// \brief DeviceType defined for holding user's preferred backend. typedef enum { DT_CPU, /**< CPU device type */ diff --git a/mindspore/lite/include/errorcode.h b/mindspore/lite/include/errorcode.h index ff74b02dac1..f516e21f341 100644 --- a/mindspore/lite/include/errorcode.h +++ b/mindspore/lite/include/errorcode.h @@ -18,6 +18,8 @@ #define MINDSPORE_LITE_INCLUDE_ERRORCODE_H_ #include +#include +#include "include/api/status.h" namespace mindspore { namespace lite { @@ -67,6 +69,7 @@ constexpr int RET_INPUT_PARAM_INVALID = -600; /**< Invalid input param by user. /// /// \return String of errorcode info. std::string GetErrorInfo(STATUS error_code); + } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/include/ms_tensor.h b/mindspore/lite/include/ms_tensor.h index 5cb14ab6fd8..f3706af0955 100644 --- a/mindspore/lite/include/ms_tensor.h +++ b/mindspore/lite/include/ms_tensor.h @@ -24,11 +24,13 @@ #include #include "ir/dtype/type_id.h" +#ifndef MS_API #ifdef _WIN32 #define MS_API __declspec(dllexport) #else #define MS_API __attribute__((visibility("default"))) #endif +#endif namespace mindspore { namespace tensor { @@ -45,7 +47,7 @@ class MS_API MSTensor { /// \brief Get data type of the MindSpore Lite MSTensor. /// - /// \note TypeId is defined in mindspore/mindspore/core/ir/dtype/type_id.h. Only number types in TypeId enum are + /// \note TypeId is defined in mindspore/mindspore/include/api/type_id.h. Only number types in TypeId enum are /// suitable for MSTensor. /// /// \return MindSpore Lite TypeId of the MindSpore Lite MSTensor. @@ -79,6 +81,17 @@ class MS_API MSTensor { /// /// \return the pointer points to data in MSTensor. virtual void *MutableData() = 0; + + /// \brief Get the name of MSTensor. + /// + /// \return the name of MSTensor. + virtual std::string tensor_name() const = 0; + + /// \brief Set the name of MSTensor. + virtual void set_tensor_name(const std::string name) = 0; + + /// \brief Set the data of MSTensor. + virtual void set_data(void *data) = 0; }; } // namespace tensor /// \brief CallBackParam defined input arguments for callBack function. diff --git a/mindspore/lite/java/java/app/src/main/native/CMakeLists.txt b/mindspore/lite/java/java/app/src/main/native/CMakeLists.txt index e7231258a0d..959dfe3e8f9 100644 --- a/mindspore/lite/java/java/app/src/main/native/CMakeLists.txt +++ b/mindspore/lite/java/java/app/src/main/native/CMakeLists.txt @@ -33,7 +33,8 @@ set(TOP_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../..) set(LITE_DIR ${TOP_DIR}/mindspore/lite) include_directories(${CMAKE_CURRENT_SOURCE_DIR}) -include_directories(${LITE_DIR}) ## lite include +include_directories(${LITE_DIR}) ## lite include +include_directories(${TOP_DIR}) ## api include include_directories(${TOP_DIR}/mindspore/core/) ## core include include_directories(${LITE_DIR}/build) ## flatbuffers diff --git a/mindspore/lite/minddata/CMakeLists.txt b/mindspore/lite/minddata/CMakeLists.txt index d0d8fcfc94b..418ee00089d 100644 --- a/mindspore/lite/minddata/CMakeLists.txt +++ b/mindspore/lite/minddata/CMakeLists.txt @@ -9,22 +9,31 @@ include(${TOP_DIR}/cmake/external_libs/jpeg_turbo.cmake) set(MINDDATA_DIR ${CCSRC_DIR}/minddata/dataset) set(CMAKE_CXX_STANDARD 17) -set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer -D_LIBCPP_INLINE_VISIBILITY='' -D_LIBCPP_DISABLE_EXTERN_TEMPLATE=1 -DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2 -Wno-cpp") -set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -Werror -Wno-return-std-move -Wno-unused-private-field -Wno-unused-lambda-capture -Wno-sign-compare -Wno-overloaded-virtual -Wno-unneeded-internal-declaration -Wno-unused-variable -Wno-pessimizing-move -Wno-inconsistent-missing-override") - +set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer \ + -D_LIBCPP_INLINE_VISIBILITY='' -D_LIBCPP_DISABLE_EXTERN_TEMPLATE=1 -DHALF_ENABLE_CPP11_USER_LITERALS=0 \ + -D_FORTIFY_SOURCE=2 -Wno-cpp") +set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -Werror -Wno-return-std-move -Wno-unused-private-field \ + -Wno-unused-lambda-capture -Wno-sign-compare -Wno-overloaded-virtual -Wno-unneeded-internal-declaration \ + -Wno-unused-variable -Wno-pessimizing-move -Wno-inconsistent-missing-override") set(CMAKE_CXX_FLAGS "$ENV{CXXFLAGS} -I/usr/local/include -std=c++17 -Wall -fPIC") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OPTION_CXX_FLAGS}") if(PLATFORM_ARM) - set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O2 -Werror -Wno-return-std-move -Wno-unused-private-field -Wno-unused-lambda-capture -Wno-sign-compare -Wno-overloaded-virtual -Wno-unneeded-internal-declaration -Wno-unused-variable -Wno-pessimizing-move -Wno-inconsistent-missing-override -DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2") + set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O2 -Werror -Wno-return-std-move -Wno-unused-private-field \ + -Wno-unused-lambda-capture -Wno-sign-compare -Wno-overloaded-virtual -Wno-unneeded-internal-declaration \ + -Wno-unused-variable -Wno-pessimizing-move -Wno-inconsistent-missing-override \ + -DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2") else() - set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O2 -Wl,--allow-shlib-undefined -DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2") + set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O2 -Wl,--allow-shlib-undefined -DHALF_ENABLE_CPP11_USER_LITERALS=0 \ + -D_FORTIFY_SOURCE=2") endif() -set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes -Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}") -set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}") +set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \ + -Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}") +set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \ + -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-non-virtual-dtor") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-sign-compare") @@ -61,7 +70,8 @@ AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/consumers MINDDATA_ENGINE_CONSUMERS_ AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/datasetops MINDDATA_ENGINE_DATASETOPS_SRC_FILES) AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/datasetops/map_op MINDDATA_ENGINE_DATASETOPS_MAPOP_SRC_FILES) AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/datasetops/source MINDDATA_ENGINE_DATASETOPS_SOURCE_SRC_FILES) -AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/datasetops/source/sampler MINDDATA_ENGINE_DATASETOPS_SOURCE_SAMPLER_SRC_FILES) +AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/datasetops/source/sampler + MINDDATA_ENGINE_DATASETOPS_SOURCE_SAMPLER_SRC_FILES) AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/ir/cache MINDDATA_ENGINE_IR_CACHE_SRC_FILES) AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/ir/datasetops MINDDATA_ENGINE_IR_DATASETOPS_SRC_FILES) AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/ir/datasetops/source MINDDATA_ENGINE_IR_DATASETOPS_SOURCE_SRC_FILES) @@ -328,10 +338,10 @@ elseif(BUILD_MINDDATA STREQUAL "wrapper") ${MINDDATA_DIR}/core/tensor_helpers.cc ${MINDDATA_DIR}/core/global_context.cc ${MINDDATA_DIR}/core/tensor_row.cc + ${MINDDATA_DIR}/core/de_tensor.cc ${MINDDATA_DIR}/api/vision.cc ${MINDDATA_DIR}/api/execute.cc ${MINDDATA_DIR}/api/transforms.cc - ${MINDDATA_DIR}/api/de_tensor.cc ${MINDDATA_DIR}/util/path.cc ${MINDDATA_DIR}/util/status.cc ${MINDDATA_DIR}/util/data_helper.cc @@ -356,11 +366,19 @@ elseif(BUILD_MINDDATA STREQUAL "wrapper") ${CMAKE_CURRENT_SOURCE_DIR}/wrapper/album_op_android.cc ) + set(MINDSPORE_LITE_CXXAPI_SRC + ${CORE_DIR}/utils/status.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../src/cxx_api/types.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../src/cxx_api/tensor/tensor_impl.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../src/tensor.cc + ) + add_library(minddata-lite SHARED ${MINDDATA_KERNELS_IMAGE_LITE_CV_FILES} ${CMAKE_CURRENT_SOURCE_DIR}/../src/common/log_adapter.cc ${CORE_DIR}/utils/ms_utils.cc ${MINDDATA_TODAPI_SRC} + ${MINDSPORE_LITE_CXXAPI_SRC} ) find_package(Threads REQUIRED) @@ -389,7 +407,7 @@ elseif(BUILD_MINDDATA STREQUAL "lite") list(REMOVE_ITEM MINDDATA_CORE_SRC_FILES "${MINDDATA_DIR}/core/client.cc") list(REMOVE_ITEM MINDDATA_KERNELS_SRC_FILES "${MINDDATA_DIR}/kernels/py_func_op.cc") add_library(minddata_eager_mid OBJECT - ${MINDDATA_DIR}/api/de_tensor.cc + ${MINDDATA_DIR}/core/de_tensor.cc ${MINDDATA_DIR}/api/execute.cc ) list(REMOVE_ITEM MINDDATA_CORE_SRC_FILES diff --git a/mindspore/lite/minddata/wrapper/MDToDApi.cc b/mindspore/lite/minddata/wrapper/MDToDApi.cc index a8dcb180b60..cddb71c74c0 100644 --- a/mindspore/lite/minddata/wrapper/MDToDApi.cc +++ b/mindspore/lite/minddata/wrapper/MDToDApi.cc @@ -26,9 +26,12 @@ #include "album_op_android.h" //NOLINT #include "minddata/dataset/include/execute.h" +#include "minddata/dataset/include/type_id.h" #include "minddata/dataset/util/path.h" #include "minddata/dataset/include/vision.h" #include "minddata/dataset/util/data_helper.h" +#include "minddata/dataset/core/de_tensor.h" +#include "include/api/types.h" #if defined(__ANDROID__) || defined(ANDROID) #include #include @@ -45,9 +48,9 @@ using mindspore::MsLogLevel::DEBUG; using mindspore::MsLogLevel::ERROR; using mindspore::MsLogLevel::INFO; +using mindspore::Status; using mindspore::dataset::BorderType; using mindspore::dataset::InterpolationMode; -using mindspore::dataset::Status; class MDToDApi { public: @@ -60,11 +63,11 @@ class MDToDApi { public: MDToDApi() : _iter(nullptr), _augs({}), _storage_folder(""), _file_id(-1), _hasBatch(false) { - MS_LOG(INFO) << "MDToDAPI Call constractor"; + MS_LOG(INFO) << "MDToDAPI Call constructor"; } ~MDToDApi() { MS_LOG(INFO) << "MDToDAPI Call destractor"; - // derefernce dataset and iterator + // dereference dataset and iterator _augs.clear(); } }; @@ -257,7 +260,7 @@ extern "C" int MDToDApi_GetNext(MDToDApi *pMDToDApi, MDToDResult_t *results) { return -1; } - // Set defualt + // Set default results->fileid = -1; results->embeddingBuff.DataSize = 0; results->imageBuff.DataSize = 0; @@ -287,12 +290,17 @@ extern "C" int MDToDApi_GetNext(MDToDApi *pMDToDApi, MDToDResult_t *results) { if (orientation > 1) { RotateOperation *p = static_cast(pMDToDApi->_augs[i].get()); p->setAngle(orientation); - orientation = 0; // clear oriation filed if allready preformed + orientation = 0; // clear oriation filed if already performed } else { continue; } } - row["image"] = mindspore::dataset::Execute((pMDToDApi->_augs)[i])(std::move(row["image"])); + mindspore::MSTensor image(std::make_shared(row["image"])); + (void)mindspore::dataset::Execute((pMDToDApi->_augs)[i])(image, &image); + mindspore::dataset::Tensor::CreateFromMemory( + mindspore::dataset::TensorShape(image.Shape()), + mindspore::dataset::MSTypeToDEType(static_cast(image.DataType())), + (const uint8_t *)(image.Data().get()), &(row["image"])); if (row["image"] == nullptr) { // nullptr means that the eager mode image processing failed, we fail in this case return -1; @@ -324,7 +332,7 @@ extern "C" int MDToDApi_GetNext(MDToDApi *pMDToDApi, MDToDResult_t *results) { extern "C" int MDToDApi_Stop(MDToDApi *pMDToDApi) { // Manually terminate the pipeline - MS_LOG(INFO) << "pipline stoped"; + MS_LOG(INFO) << "pipline stopped"; return 0; } @@ -338,7 +346,7 @@ extern "C" int MDToDApi_Destroy(MDToDApi *pMDToDApi) { int GetJsonFullFileName(const MDToDApi *pMDToDApi, std::string *filePath) { int64_t file_id = pMDToDApi->_file_id; if (file_id < 0) { - MS_LOG(ERROR) << "Illigal file ID to update: " << file_id << "."; + MS_LOG(ERROR) << "Illegal file ID to update: " << file_id << "."; return -1; } std::string converted = std::to_string(pMDToDApi->_file_id); @@ -407,7 +415,7 @@ extern "C" int MDToDApi_UpdateFloatArray(MDToDApi *pMDToDApi, const char *column auto columnName = std::string(column); std::string file_path; if (0 != GetJsonFullFileName(pMDToDApi, &file_path)) { - MS_LOG(ERROR) << "Faile to updaet " << columnName; + MS_LOG(ERROR) << "Failed to updaet " << columnName; return -1; } MS_LOG(INFO) << "Start Update float Array column: " << columnName << " in file " << file_path; diff --git a/mindspore/lite/src/CMakeLists.txt b/mindspore/lite/src/CMakeLists.txt index ec554a5bce1..47a52f3fba3 100644 --- a/mindspore/lite/src/CMakeLists.txt +++ b/mindspore/lite/src/CMakeLists.txt @@ -16,7 +16,20 @@ if(PLATFORM_ARM32 OR PLATFORM_ARM64) endif() endif() +set(API_SRC + ${CORE_DIR}/utils/status.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/cell.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/serialization.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/types.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/lite_context.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/model/model.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/model/model_impl.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/graph/graph.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/tensor/tensor_impl.cc +) + set(LITE_SRC + ${API_SRC} ${CMAKE_CURRENT_SOURCE_DIR}/common/file_utils.cc ${CMAKE_CURRENT_SOURCE_DIR}/common/utils.cc ${CMAKE_CURRENT_SOURCE_DIR}/common/graph_util.cc diff --git a/mindspore/lite/src/cxx_api/cell.cc b/mindspore/lite/src/cxx_api/cell.cc new file mode 100644 index 00000000000..ec3a3c3bcdc --- /dev/null +++ b/mindspore/lite/src/cxx_api/cell.cc @@ -0,0 +1,95 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "include/api/cell.h" +#include "include/api/lite_context.h" +#include "src/common/log_adapter.h" + +namespace mindspore { + +class GraphImpl {}; + +std::vector CellBase::operator()(const std::vector &inputs) const { + std::vector empty; + MS_LOG(ERROR) << "Unsupported feature."; + return empty; +} + +ParameterCell::ParameterCell(const ParameterCell &cell) { MS_LOG(ERROR) << "Unsupported feature."; } +ParameterCell &ParameterCell::operator=(const ParameterCell &cell) { + MS_LOG(ERROR) << "Unsupported feature."; + return *this; +} + +ParameterCell::ParameterCell(ParameterCell &&cell) { MS_LOG(ERROR) << "Unsupported feature."; } + +ParameterCell &ParameterCell::operator=(ParameterCell &&cell) { + MS_LOG(ERROR) << "Unsupported feature."; + return *this; +} + +ParameterCell::ParameterCell(const MSTensor &tensor) { MS_LOG(ERROR) << "Unsupported feature."; } + +ParameterCell &ParameterCell::operator=(const MSTensor &tensor) { + MS_LOG(ERROR) << "Unsupported feature."; + return *this; +} + +ParameterCell::ParameterCell(MSTensor &&tensor) : tensor_(tensor) { MS_LOG(ERROR) << "Unsupported feature."; } + +ParameterCell &ParameterCell::operator=(MSTensor &&tensor) { + MS_LOG(ERROR) << "Unsupported feature."; + return *this; +} + +GraphCell::GraphCell(const Graph &graph) : graph_(std::shared_ptr(new (std::nothrow) Graph(graph))) { + if (graph_ == nullptr) { + MS_LOG(ERROR) << "Invalid graph."; + } +} + +GraphCell::GraphCell(const std::shared_ptr &graph) : graph_(graph) { + if (graph_ == nullptr) { + MS_LOG(ERROR) << "Invalid graph."; + } +} + +GraphCell::GraphCell(Graph &&graph) : graph_(std::shared_ptr(new (std::nothrow) Graph(graph))) { + if (graph_ == nullptr) { + MS_LOG(ERROR) << "Invalid graph."; + } +} + +Status GraphCell::Run(const std::vector &inputs, std::vector *outputs) { + MS_LOG(ERROR) << "Unsupported feature."; + return kLiteError; +} + +Status GraphCell::Load() { + MS_LOG(ERROR) << "Unsupported feature."; + return kLiteError; +} + +InputAndOutput::InputAndOutput() { MS_LOG(ERROR) << "Unsupported feature."; } + +InputAndOutput::InputAndOutput(const MSTensor &tensor) { MS_LOG(ERROR) << "Unsupported feature."; } +InputAndOutput::InputAndOutput(MSTensor &&tensor) { MS_LOG(ERROR) << "Unsupported feature."; } + +InputAndOutput::InputAndOutput(const std::shared_ptr &cell, const std::vector &prev, + int32_t index) { + MS_LOG(ERROR) << "Unsupported feature."; +} + +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/graph/graph.cc b/mindspore/lite/src/cxx_api/graph/graph.cc new file mode 100644 index 00000000000..cdacd62df53 --- /dev/null +++ b/mindspore/lite/src/cxx_api/graph/graph.cc @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "include/api/graph.h" +#include "include/api/cell.h" +#include "src/cxx_api/graph/graph_data.h" + +namespace mindspore { + +Graph::Graph(const std::shared_ptr &graph_data) : graph_data_(graph_data) {} + +Graph::Graph(std::shared_ptr &&graph_data) : graph_data_(graph_data) {} + +Graph::~Graph() {} + +Graph::Graph(std::nullptr_t) : graph_data_(nullptr) {} + +bool Graph::operator==(std::nullptr_t) const { return graph_data_ == nullptr; } + +ModelType Graph::ModelType() const { return graph_data_->ModelType(); } +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/graph/graph_data.h b/mindspore/lite/src/cxx_api/graph/graph_data.h new file mode 100644 index 00000000000..fdd2aec5162 --- /dev/null +++ b/mindspore/lite/src/cxx_api/graph/graph_data.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_SRC_CXX_API_GRAPH_GRAPH_DATA_H +#define MINDSPORE_LITE_SRC_CXX_API_GRAPH_GRAPH_DATA_H + +#include +#include +#include +#include +#include "include/api/graph.h" +#include "include/api/types.h" +#include "src/lite_model.h" + +namespace mindspore { +class Graph::GraphData { + public: + GraphData() : lite_model_(nullptr) {} + + explicit GraphData(std::shared_ptr model) : lite_model_(model) {} + + ~GraphData() = default; + + std::shared_ptr lite_model() { return lite_model_; } + + enum ModelType ModelType() const { return kMindIR; } + + private: + std::shared_ptr lite_model_; +}; +} // namespace mindspore +#endif // MINDSPORE_LITE_SRC_CXX_API_GRAPH_GRAPH_DATA_H diff --git a/mindspore/lite/src/cxx_api/lite_context.cc b/mindspore/lite/src/cxx_api/lite_context.cc new file mode 100644 index 00000000000..ac1aa80aa9d --- /dev/null +++ b/mindspore/lite/src/cxx_api/lite_context.cc @@ -0,0 +1,303 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "include/api/lite_context.h" +#include +#include +#include +#include "include/api/types.h" +#include "src/common/log_adapter.h" + +namespace mindspore { + +constexpr char kVendorName[] = "vendor_name"; +constexpr char kThreadNum[] = "thread_name"; +constexpr char kAllocator[] = "allocator"; +constexpr char kCPU[] = "cpu"; +constexpr char kCPUEanbleFp16[] = "cpu_enable_fp16"; +constexpr char kCPUBindMode[] = "cpu_bind_mode"; +constexpr char kGPU[] = "gpu"; +constexpr char kGPUEanbleFp16[] = "gpu_enable_fp16"; +constexpr char kNPU[] = "npu"; +constexpr char kNPUFrequency[] = "npu_frequency"; + +void Context::Clear(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + context->context_.clear(); +} + +void Context::SetAsDefault(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + context->context_.clear(); + context->context_.emplace(kCPU, true); +} + +void Context::SetVendorName(const std::shared_ptr &context, const std::string &name) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kVendorName); + if (iter != context->context_.end()) { + iter->second = name; + } else { + context->context_.emplace(kVendorName, name); + } +} + +std::string Context::GetVendorName(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return std::string(); + } + auto iter = context->context_.find(kVendorName); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return std::string(); +} + +void Context::SetThreadNum(const std::shared_ptr &context, int num) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kThreadNum); + if (iter != context->context_.end()) { + iter->second = num; + } else { + context->context_.emplace(kThreadNum, num); + } +} + +int Context::GetThreadNum(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return 0; + } + auto iter = context->context_.find(kThreadNum); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return 2; +} + +void Context::SetAllocator(const std::shared_ptr &context, std::shared_ptr alloc) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kAllocator); + if (iter != context->context_.end()) { + iter->second = alloc; + } else { + context->context_.emplace(kAllocator, alloc); + } +} + +std::shared_ptr Context::GetAllocator(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return nullptr; + } + auto iter = context->context_.find(kAllocator); + if (iter != context->context_.end()) { + return std::any_cast>(iter->second); + } + return nullptr; +} + +void Context::ConfigCPU(const std::shared_ptr &context, bool conf) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kCPU); + if (iter != context->context_.end()) { + iter->second = conf; + } else { + context->context_.emplace(kCPU, conf); + } +} + +bool Context::IfCPUEnabled(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return false; + } + auto iter = context->context_.find(kCPU); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return false; +} + +void Context::ConfigCPUFp16(const std::shared_ptr &context, bool conf) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kCPUEanbleFp16); + if (iter != context->context_.end()) { + iter->second = conf; + } else { + context->context_.emplace(kCPUEanbleFp16, conf); + } +} + +bool Context::IfCPUFp16Enabled(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return false; + } + auto iter = context->context_.find(kCPUEanbleFp16); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return false; +} + +void Context::SetCPUBindMode(const std::shared_ptr &context, lite::CpuBindMode mode) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kCPUBindMode); + if (iter != context->context_.end()) { + iter->second = mode; + } else { + context->context_.emplace(kCPUBindMode, mode); + } +} + +lite::CpuBindMode Context::GetCPUBindMode(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return lite::NO_BIND; + } + auto iter = context->context_.find(kCPUBindMode); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return lite::MID_CPU; +} + +void Context::ConfigGPU(const std::shared_ptr &context, bool conf) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kGPU); + if (iter != context->context_.end()) { + iter->second = conf; + } else { + context->context_.emplace(kGPU, conf); + } +} + +bool Context::IfGPUEnabled(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return false; + } + auto iter = context->context_.find(kGPU); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return false; +} + +void Context::ConfigGPUFp16(const std::shared_ptr &context, bool conf) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kGPUEanbleFp16); + if (iter != context->context_.end()) { + iter->second = conf; + } else { + context->context_.emplace(kGPUEanbleFp16, conf); + } +} + +bool Context::IfGPUFp16Enabled(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return false; + } + auto iter = context->context_.find(kGPUEanbleFp16); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return false; +} + +void Context::ConfigNPU(const std::shared_ptr &context, bool conf) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kNPU); + if (iter != context->context_.end()) { + iter->second = conf; + } else { + context->context_.emplace(kNPU, conf); + } +} + +bool Context::IfNPUEnabled(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return false; + } + auto iter = context->context_.find(kNPU); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return false; +} + +void Context::SetNPUFrequency(const std::shared_ptr &context, int freq) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kNPUFrequency); + if (iter != context->context_.end()) { + iter->second = true; + } else { + context->context_.emplace(kNPUFrequency, true); + } +} + +int Context::GetNPUFrequency(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return 0; + } + auto iter = context->context_.find(kNPUFrequency); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return 3; +} + +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/model/model.cc b/mindspore/lite/src/cxx_api/model/model.cc new file mode 100644 index 00000000000..7d564c2410b --- /dev/null +++ b/mindspore/lite/src/cxx_api/model/model.cc @@ -0,0 +1,98 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "include/api/model.h" +#include "include/api/lite_context.h" +#include "src/cxx_api/model/model_impl.h" +#include "src/common/log_adapter.h" + +namespace mindspore { + +Status Model::Build() { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Model implement is null."; + return kLiteNullptr; + } + return impl_->Build(); +} + +Status Model::Resize(const std::vector &inputs, const std::vector> &dims) { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Model implement is null."; + return kLiteNullptr; + } + return impl_->Resize(inputs, dims); +} + +Status Model::Predict(const std::vector &inputs, std::vector *outputs) { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Model implement is null."; + return kLiteNullptr; + } + return impl_->Predict(inputs, outputs); +} + +Model::Model(const GraphCell &graph, const std::shared_ptr &model_context) { + impl_ = std::shared_ptr(new (std::nothrow) ModelImpl()); + if (impl_ == nullptr || graph.GetGraph() == nullptr) { + MS_LOG(ERROR) << "Invalid graph."; + } else { + if (model_context == nullptr) { + MS_LOG(INFO) << "Invalid context, use default context."; + auto context = std::shared_ptr(new (std::nothrow) Context()); + Context::SetAsDefault(context); + impl_->SetContext(context); + } else { + impl_->SetContext(model_context); + } + auto new_graph_cell = std::shared_ptr(new (std::nothrow) GraphCell(graph)); + if (new_graph_cell != nullptr) { + impl_->SetGraphCell(new_graph_cell); + } else { + MS_LOG(ERROR) << "New graphcell failed."; + } + } +} + +Model::Model(const std::vector &network, const std::shared_ptr &model_context) { + MS_LOG(ERROR) << "Unsupported feature."; +} + +Model::~Model() {} + +bool Model::CheckModelSupport(const std::string &device_type, ModelType) { + MS_LOG(ERROR) << "Unsupported feature."; + return false; +} + +std::vector Model::GetInputs() { + std::vector empty; + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Model implement is null."; + return empty; + } + return impl_->GetInputs(); +} + +std::vector Model::GetOutputs() { + std::vector empty; + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Model implement is null."; + return empty; + } + return impl_->GetOutputs(); +} + +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/model/model_impl.cc b/mindspore/lite/src/cxx_api/model/model_impl.cc new file mode 100644 index 00000000000..989c8e38971 --- /dev/null +++ b/mindspore/lite/src/cxx_api/model/model_impl.cc @@ -0,0 +1,241 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/cxx_api/model/model_impl.h" +#include +#include +#include +#include "include/api/types.h" +#include "include/api/lite_context.h" +#include "include/lite_session.h" +#include "include/context.h" +#include "src/lite_model.h" +#include "src/runtime/allocator.h" +#include "src/cxx_api/utils.h" +#include "src/cxx_api/graph/graph_data.h" +#include "src/cxx_api/tensor/tensor_impl.h" +#include "src/common/log_adapter.h" + +namespace mindspore { +using mindspore::lite::RET_ERROR; +using mindspore::lite::RET_OK; + +Status ModelImpl::Build() { + MS_LOG(DEBUG) << "Start build model."; + if (graph_cell_ == nullptr || graph_cell_->GetGraph() == nullptr || graph_cell_->GetGraph()->graph_data_ == nullptr) { + MS_LOG(ERROR) << "Graph cell is invalid."; + return kLiteNullptr; + } + auto model = graph_cell_->GetGraph()->graph_data_->lite_model(); + if (model == nullptr) { + MS_LOG(ERROR) << "Lite model is nullptr."; + return kLiteNullptr; + } + if (model->buf == nullptr) { + MS_LOG(ERROR) << "Lite model has been freed."; + return kLiteError; + } + if (session_ != nullptr) { + MS_LOG(DEBUG) << "Model has been already built."; + return kSuccess; + } + if (context_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return kLiteNullptr; + } + lite::Context model_context; + model_context.allocator = Context::GetAllocator(context_); + if (model_context.allocator == nullptr) { + model_context.allocator = lite::Allocator::Create(); + if (model_context.allocator == nullptr) { + MS_LOG(ERROR) << "Create Allocator failed."; + return kLiteNullptr; + } + MS_LOG(DEBUG) << "Set new allocator."; + Context::SetAllocator(context_, model_context.allocator); + } + model_context.vendor_name_ = Context::GetVendorName(context_); + model_context.thread_num_ = Context::GetThreadNum(context_); + model_context.device_list_.clear(); + if (Context::IfCPUEnabled(context_) && Context::IfGPUEnabled(context_) && Context::IfNPUEnabled(context_)) { + MS_LOG(INFO) << "CPU/GPU/NPU cannot be enabled at the same time."; + } + if (!Context::IfCPUEnabled(context_)) { + MS_LOG(INFO) << "CPU is forced to be enabled."; + } + lite::DeviceInfo cpu_info = { + .cpu_device_info_ = {Context::IfCPUFp16Enabled(context_), Context::GetCPUBindMode(context_)}}; + model_context.device_list_.push_back({lite::DT_CPU, cpu_info}); + if (Context::IfGPUEnabled(context_)) { + lite::DeviceInfo gpu_info = {.gpu_device_info_ = {Context::IfGPUFp16Enabled(context_)}}; + model_context.device_list_.push_back({lite::DT_GPU, gpu_info}); + } + if (Context::IfNPUEnabled(context_)) { + lite::DeviceInfo npu_info = {.npu_device_info_ = {Context::GetNPUFrequency(context_)}}; + model_context.device_list_.push_back({lite::DT_NPU, npu_info}); + } + auto session = std::shared_ptr(session::LiteSession::CreateSession(&model_context)); + if (session == nullptr) { + MS_LOG(ERROR) << "Allocate session failed."; + return kLiteNullptr; + } + auto ret = session->CompileGraph(model.get()); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Build model failed."; + return static_cast(ret); + } + session_.swap(session); + model->Free(); + MS_LOG(DEBUG) << "Build model success."; + return kSuccess; +} + +Status ModelImpl::Predict(const std::vector &inputs, std::vector *outputs) { + if (session_ == nullptr) { + MS_LOG(ERROR) << "Run graph failed."; + return kLiteError; + } + auto input_tensors = session_->GetInputs(); + if (input_tensors.empty()) { + MS_LOG(ERROR) << "Failed to get input tensor."; + return kLiteError; + } + if (input_tensors.size() != inputs.size()) { + MS_LOG(ERROR) << "Wrong input size."; + return kLiteError; + } + std::vector old_data; + for (size_t i = 0; i < inputs.size(); i++) { + auto input = input_tensors.at(i); + auto user_input = inputs.at(i); + if (user_input.Name() != input->tensor_name()) { + MS_LOG(WARNING) << "Tensor " << user_input.Name() << " has a different name from input" << input->tensor_name() + << "."; + } + old_data.push_back(input->MutableData()); + if (user_input.MutableData() != input->MutableData()) { + if (input->Size() != user_input.DataSize()) { + for (size_t j = 0; j < old_data.size(); j++) { + input_tensors.at(j)->set_data(old_data.at(j)); + } + MS_LOG(ERROR) << "Tensor " << user_input.Name() << " has wrong data size."; + return kLiteInputTensorError; + } + if (user_input.impl_->need_copy()) { + ::memcpy(input->MutableData(), user_input.MutableData(), input->Size()); + } else { + input->set_data(user_input.MutableData()); + } + } + } + auto ret = session_->RunGraph(); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Run graph failed."; + return static_cast(ret); + } + MS_LOG(DEBUG) << "Run graph success."; + for (size_t i = 0; i < old_data.size(); i++) { + input_tensors.at(i)->set_data(old_data.at(i)); + } + auto res = GetOutputs(); + if (res.empty()) { + MS_LOG(DEBUG) << "Empty outputs."; + return kLiteError; + } + outputs->insert(outputs->end(), res.begin(), res.end()); + return kSuccess; +} + +std::vector ModelImpl::GetInputs() { + std::vector empty; + if (session_ == nullptr) { + MS_LOG(ERROR) << "Session is null."; + return empty; + } + std::vector res; + auto inputs = session_->GetInputs(); + for (auto input : inputs) { + auto impl = std::shared_ptr(new (std::nothrow) MSTensor::Impl(input)); + if (impl == nullptr) { + MS_LOG(ERROR) << "Create tensor failed."; + return empty; + } + auto tensor = MSTensor(impl); + if (tensor == nullptr) { + MS_LOG(ERROR) << "Create tensor failed."; + return empty; + } + res.push_back(tensor); + } + return res; +} + +std::vector ModelImpl::GetOutputs() { + std::vector empty; + if (session_ == nullptr) { + MS_LOG(ERROR) << "Session is null."; + return empty; + } + std::vector res; + auto names = session_->GetOutputTensorNames(); + auto outputs = session_->GetOutputs(); + for (auto name : names) { + auto impl = std::shared_ptr(new (std::nothrow) MSTensor::Impl(outputs[name])); + if (impl == nullptr) { + MS_LOG(ERROR) << "Create tensor failed."; + return empty; + } + auto tensor = MSTensor(impl); + if (tensor == nullptr) { + MS_LOG(ERROR) << "Create tensor failed."; + return empty; + } + res.push_back(tensor); + } + return res; +} + +Status ModelImpl::Resize(const std::vector &inputs, const std::vector> &dims) { + if (session_ == nullptr) { + MS_LOG(ERROR) << "Session is null."; + return kLiteNullptr; + } + if (inputs.size() != dims.size()) { + MS_LOG(ERROR) << "The size of inputs is not equal to the size of dims."; + return kLiteParamInvalid; + } + std::vector inner_input; + for (auto input : inputs) { + if (input.impl_ == nullptr || input.impl_->lite_tensor() == nullptr) { + MS_LOG(ERROR) << "Input tensor " << input.Name() << " is null."; + return kLiteInputTensorError; + } + inner_input.push_back(input.impl_->lite_tensor()); + } + std::vector> truncated_shape; + for (size_t i = 0; i < inner_input.size(); i++) { + std::vector tmp = TruncateShape(dims.at(i), inner_input.at(i)->data_type(), inner_input.at(i)->Size()); + if (tmp.empty()) { + MS_LOG(ERROR) << "Input dims[" << i << "]is invalid."; + return kLiteParamInvalid; + } + truncated_shape.push_back(tmp); + } + auto ret = session_->Resize(inner_input, truncated_shape); + return static_cast(ret); +} + +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/model/model_impl.h b/mindspore/lite/src/cxx_api/model/model_impl.h new file mode 100644 index 00000000000..0309f1e867a --- /dev/null +++ b/mindspore/lite/src/cxx_api/model/model_impl.h @@ -0,0 +1,56 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_SRC_CXX_API_MODEL_MODEL_IMPL_H +#define MINDSPORE_LITE_SRC_CXX_API_MODEL_MODEL_IMPL_H +#include +#include +#include +#include +#include +#include +#include +#include "include/api/model.h" +#include "include/api/lite_context.h" +#include "include/api/cell.h" +#include "include/lite_session.h" + +namespace mindspore { +class ModelImpl { + public: + ModelImpl() : graph_cell_(nullptr), session_(nullptr), context_(nullptr) {} + ~ModelImpl() = default; + + Status Build(); + Status Resize(const std::vector &inputs, const std::vector> &dims); + + Status Predict(const std::vector &inputs, std::vector *outputs); + + std::vector GetInputs(); + std::vector GetOutputs(); + + static bool CheckModelSupport(const std::string &device_type, ModelType model_type); + + private: + friend class Model; + std::shared_ptr graph_cell_; + std::shared_ptr session_; + std::shared_ptr context_; + void SetGraphCell(const std::shared_ptr &graph_cell) { graph_cell_ = graph_cell; } + void SetContext(const std::shared_ptr &context) { context_ = context; } +}; +} // namespace mindspore + +#endif // MINDSPORE_LITE_SRC_CXX_API_MODEL_MODEL_IMPL_H diff --git a/mindspore/lite/src/cxx_api/serialization.cc b/mindspore/lite/src/cxx_api/serialization.cc new file mode 100644 index 00000000000..660cf107acb --- /dev/null +++ b/mindspore/lite/src/cxx_api/serialization.cc @@ -0,0 +1,74 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "include/api/serialization.h" +#include +#include +#include +#include "include/api/graph.h" +#include "include/api/lite_context.h" +#include "include/api/types.h" +#include "include/model.h" +#include "include/ms_tensor.h" +#include "src/cxx_api/graph/graph_data.h" +#include "src/common/log_adapter.h" + +namespace mindspore { + +Graph Serialization::LoadModel(const void *model_data, size_t data_size, ModelType model_type) { + if (model_type != kMindIR) { + MS_LOG(ERROR) << "Unsupported IR."; + return Graph(nullptr); + } + auto model = std::shared_ptr(lite::Model::Import(static_cast(model_data), data_size)); + if (model == nullptr) { + MS_LOG(ERROR) << "New model failed."; + return Graph(nullptr); + } + auto graph_data = std::shared_ptr(new (std::nothrow) Graph::GraphData(model)); + if (graph_data == nullptr) { + MS_LOG(ERROR) << "New graph data failed."; + return Graph(nullptr); + } + Graph graph = Graph(graph_data); + return graph; +} + +Graph Serialization::LoadModel(const std::string &file, ModelType model_type) { + MS_LOG(ERROR) << "Unsupported Feature."; + return Graph(nullptr); +} + +Status Serialization::LoadCheckPoint(const std::string &ckpt_file, std::map *parameters) { + MS_LOG(ERROR) << "Unsupported feature."; + return kMEFailed; +} + +Status Serialization::SetParameters(const std::map ¶meters, Model *model) { + MS_LOG(ERROR) << "Unsupported feature."; + return kMEFailed; +} + +Status Serialization::ExportModel(const Model &model, ModelType model_type, Buffer *model_data) { + MS_LOG(ERROR) << "Unsupported feature."; + return kMEFailed; +} + +Status Serialization::ExportModel(const Model &model, ModelType model_type, const std::string &model_file) { + MS_LOG(ERROR) << "Unsupported feature."; + return kMEFailed; +} +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/tensor/tensor_impl.cc b/mindspore/lite/src/cxx_api/tensor/tensor_impl.cc new file mode 100644 index 00000000000..41a430bce51 --- /dev/null +++ b/mindspore/lite/src/cxx_api/tensor/tensor_impl.cc @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/cxx_api/tensor/tensor_impl.h" +#include +#include +#include +#include +#include +#include +#include "include/api/types.h" +#include "include/api/status.h" +#include "src/cxx_api/utils.h" +#include "src/common/log_adapter.h" + +namespace mindspore { +MSTensor::Impl::Impl(const std::string &name, enum DataType type, const std::vector &shape, const void *data, + size_t data_len) { + std::vector truncated_shape = TruncateShape(shape, static_cast(type), data_len); + if (!truncated_shape.empty()) { + lite_tensor_ = new (std::nothrow) lite::Tensor(name, static_cast(type), truncated_shape, data); + } else { + lite_tensor_ = nullptr; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/tensor/tensor_impl.h b/mindspore/lite/src/cxx_api/tensor/tensor_impl.h new file mode 100644 index 00000000000..ca248e9e852 --- /dev/null +++ b/mindspore/lite/src/cxx_api/tensor/tensor_impl.h @@ -0,0 +1,140 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include +#include +#include "include/api/types.h" +#include "include/api/status.h" +#include "include/ms_tensor.h" +#include "src/tensor.h" +#include "src/common/log_adapter.h" + +namespace mindspore { +class MSTensor::Impl { + public: + Impl() {} + ~Impl() = default; + explicit Impl(tensor::MSTensor *tensor) : lite_tensor_(tensor) { + if (tensor != nullptr) { + tensor_name_ = tensor->tensor_name(); + } + } + + bool operator==(std::nullptr_t) const { return lite_tensor_ == nullptr; } + + Impl(const std::string &name, enum DataType type, const std::vector &shape, const void *data, + size_t data_len); + + const std::string &Name() const { + static std::string empty = ""; + if (lite_tensor_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor."; + return empty; + } + return tensor_name_; + } + + enum DataType DataType() const { + if (lite_tensor_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor."; + return DataType::kTypeUnknown; + } + return static_cast(lite_tensor_->data_type()); + } + + int64_t ElementNum() const { + if (lite_tensor_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor."; + return -1; + } + return static_cast(lite_tensor_->ElementsNum()); + } + + const std::vector &Shape() { + static std::vector empty; + if (lite_tensor_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor."; + return empty; + } + auto shape = lite_tensor_->shape(); + shape_.resize(shape.size()); + std::transform(shape.begin(), shape.end(), shape_.begin(), [](int c) { return static_cast(c); }); + return shape_; + } + + std::shared_ptr Data() const { + if (lite_tensor_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor."; + return nullptr; + } + + if (DataSize() == 0) { + MS_LOG(ERROR) << "Invalid data size."; + return nullptr; + } + + return std::shared_ptr(lite_tensor_->MutableData(), [](const void *) {}); + } + + void *MutableData() { + if (lite_tensor_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor."; + return nullptr; + } + return lite_tensor_->MutableData(); + } + size_t DataSize() const { + if (lite_tensor_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor."; + return 0; + } + return lite_tensor_->Size(); + } + + bool IsDevice() const { return false; } + + std::shared_ptr Clone() const { + MS_LOG(ERROR) << "Unsupported feature."; + return nullptr; + } + + tensor::MSTensor *lite_tensor() { return lite_tensor_; } + + Status set_lite_tensor(tensor::MSTensor *tensor) { + if (tensor == nullptr) { + MS_LOG(ERROR) << "Tensor to set is null."; + return kLiteNullptr; + } + lite_tensor_ = tensor; + return kSuccess; + } + + void set_need_copy(bool need_copy) { need_copy_ = need_copy; } + + bool need_copy() { return need_copy_; } + + private: + tensor::MSTensor *lite_tensor_; + std::string tensor_name_; + std::vector shape_; + bool need_copy_ = true; +}; + +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/types.cc b/mindspore/lite/src/cxx_api/types.cc new file mode 100644 index 00000000000..876780459b5 --- /dev/null +++ b/mindspore/lite/src/cxx_api/types.cc @@ -0,0 +1,199 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "include/api/types.h" +#include +#include +#include +#include "include/api/status.h" +#include "src/cxx_api/tensor/tensor_impl.h" +#include "src/tensor.h" +#include "src/common/log_adapter.h" + +namespace mindspore { + +class Buffer::Impl { + public: + Impl() : data_() { MS_LOG(ERROR) << "Unsupported feature."; } + ~Impl() = default; + Impl(const void *data, size_t data_len) { MS_LOG(ERROR) << "Unsupported feature."; } + + const void *Data() const { + MS_LOG(ERROR) << "Unsupported feature."; + return nullptr; + } + void *MutableData() { + MS_LOG(ERROR) << "Unsupported feature."; + return nullptr; + } + size_t DataSize() const { + MS_LOG(ERROR) << "Unsupported feature."; + return 0; + } + + bool ResizeData(size_t data_len) { + MS_LOG(ERROR) << "Unsupported feature."; + return false; + } + + bool SetData(const void *data, size_t data_len) { + MS_LOG(ERROR) << "Unsupported feature."; + return false; + } + + protected: + std::vector data_; +}; + +MSTensor::MSTensor() : impl_(std::make_shared()) {} +MSTensor::MSTensor(std::nullptr_t) : impl_(nullptr) {} +MSTensor::MSTensor(const std::shared_ptr &impl) : impl_(impl) {} +MSTensor::MSTensor(const std::string &name, enum DataType type, const std::vector &shape, const void *data, + size_t data_len) + : impl_(std::make_shared(name, type, shape, data, data_len)) {} +MSTensor::~MSTensor() = default; + +bool MSTensor::operator==(std::nullptr_t) const { return impl_ == nullptr; } + +MSTensor MSTensor::CreateTensor(const std::string &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { + auto impl = std::make_shared(name, type, shape, data, data_len); + if (impl == nullptr) { + MS_LOG(ERROR) << "Allocate tensor impl failed."; + return MSTensor(nullptr); + } + return MSTensor(impl); +} + +MSTensor MSTensor::CreateRefTensor(const std::string &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { + auto tensor = CreateTensor(name, type, shape, data, data_len); + if (tensor == nullptr) { + return MSTensor(nullptr); + } + tensor.impl_->set_need_copy(false); + return tensor; +} + +MSTensor MSTensor::Clone() const { + MSTensor ret; + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor inpmlement."; + ret.impl_ = nullptr; + return ret; + } + ret.impl_ = impl_->Clone(); + return ret; +} + +const std::string &MSTensor::Name() const { + static std::string empty = ""; + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor inpmlement."; + return empty; + } + return impl_->Name(); +} + +int64_t MSTensor::ElementNum() const { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor inpmlement."; + return -1; + } + return impl_->ElementNum(); +} + +enum DataType MSTensor::DataType() const { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor inpmlement."; + return DataType::kTypeUnknown; + } + return impl_->DataType(); +} + +const std::vector &MSTensor::Shape() const { + static std::vector empty; + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor inpmlement."; + return empty; + } + return impl_->Shape(); +} + +std::shared_ptr MSTensor::Data() const { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor inpmlement."; + return nullptr; + } + return impl_->Data(); +} + +void *MSTensor::MutableData() { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor inpmlement."; + return nullptr; + } + return impl_->MutableData(); +} + +size_t MSTensor::DataSize() const { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor inpmlement."; + return 0; + } + return impl_->DataSize(); +} + +bool MSTensor::IsDevice() const { + MS_LOG(ERROR) << "Unsupported feature."; + return false; +} + +Buffer::Buffer() : impl_(std::make_shared()) { MS_LOG(ERROR) << "Unsupported feature."; } +Buffer::Buffer(const void *data, size_t data_len) : impl_(std::make_shared(data, data_len)) { + MS_LOG(ERROR) << "Unsupported feature."; +} +Buffer::~Buffer() = default; + +Buffer Buffer::Clone() const { + MS_LOG(ERROR) << "Unsupported feature."; + return Buffer(); +} + +const void *Buffer::Data() const { + MS_LOG(ERROR) << "Unsupported feature."; + return nullptr; +} + +void *Buffer::MutableData() { + MS_LOG(ERROR) << "Unsupported feature."; + return nullptr; +} + +size_t Buffer::DataSize() const { + MS_LOG(ERROR) << "Unsupported feature."; + return 0; +} + +bool Buffer::ResizeData(size_t data_len) { + MS_LOG(ERROR) << "Unsupported feature."; + return false; +} + +bool Buffer::SetData(const void *data, size_t data_len) { + MS_LOG(ERROR) << "Unsupported feature."; + return false; +} +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/utils.h b/mindspore/lite/src/cxx_api/utils.h new file mode 100644 index 00000000000..03a6c5a5c50 --- /dev/null +++ b/mindspore/lite/src/cxx_api/utils.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "src/tensor.h" + +namespace mindspore { +static std::vector TruncateShape(const std::vector &shape, enum TypeId type, size_t data_len) { + std::vector empty; + std::vector truncated_shape; + size_t element_size = lite::DataTypeSize(type); + for (auto i : shape) { + if (i < 0 || i > INT_MAX || element_size > INT_MAX / static_cast(i)) { + MS_LOG(ERROR) << "Invalid shape."; + return empty; + } else { + element_size *= static_cast(i); + truncated_shape.push_back(static_cast(i)); + } + } + if (element_size != data_len) { + MS_LOG(ERROR) << "Invalid data size."; + return empty; + } + return truncated_shape; +} + +} // namespace mindspore diff --git a/mindspore/lite/src/tensor.cc b/mindspore/lite/src/tensor.cc index 0fe2ae7b30a..a4a23c503cc 100644 --- a/mindspore/lite/src/tensor.cc +++ b/mindspore/lite/src/tensor.cc @@ -29,6 +29,11 @@ namespace lite { Tensor::Tensor(const TypeId data_type, std::vector shape, const schema::Format &format, Category category) : data_type_(data_type), shape_(std::move(shape)), format_(format), category_(category) {} +Tensor::Tensor(const std::string &name, enum TypeId type, const std::vector &shape, const void *data) + : tensor_name_(name), data_type_(type), shape_(std::move(shape)) { + data_ = const_cast(data); +} + int Tensor::CopyTensorData(const Tensor &src_tensor, Tensor *dst_tensor) { if (dst_tensor == nullptr) { MS_LOG(ERROR) << "dst_tensor is nullptr"; diff --git a/mindspore/lite/src/tensor.h b/mindspore/lite/src/tensor.h index 5f72854980d..544ebf8e204 100644 --- a/mindspore/lite/src/tensor.h +++ b/mindspore/lite/src/tensor.h @@ -56,6 +56,8 @@ class Tensor : public mindspore::tensor::MSTensor { Tensor(TypeId data_type, std::vector shape, const schema::Format &format = schema::Format::Format_NHWC, Category category = VAR); + Tensor(const std::string &name, enum TypeId type, const std::vector &shape, const void *data); + Tensor(const Tensor &tensor) = delete; Tensor(Tensor &&other) = delete; @@ -72,9 +74,9 @@ class Tensor : public mindspore::tensor::MSTensor { virtual bool operator==(const Tensor &tensor); - void set_tensor_name(std::string name) { tensor_name_ = name; } + void set_tensor_name(std::string name) override { tensor_name_ = name; } - std::string tensor_name() const { return tensor_name_; } + std::string tensor_name() const override { return tensor_name_; } TypeId data_type() const override { return data_type_; } @@ -117,7 +119,7 @@ class Tensor : public mindspore::tensor::MSTensor { return data_; } - virtual void set_data(void *data) { this->data_ = data; } + void set_data(void *data) override { this->data_ = data; } Category category() const { return this->category_; } diff --git a/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc b/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc deleted file mode 100644 index a6cfa9a91ac..00000000000 --- a/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include -#include -#include "./securec.h" -#include "common/common_test.h" -#include "gtest/gtest.h" - -#include "mindspore/ccsrc/minddata/dataset/core/data_type.h" -#include "mindspore/ccsrc/minddata/dataset/core/tensor.h" -#include "mindspore/ccsrc/minddata/dataset/core/tensor_shape.h" -#include "mindspore/ccsrc/minddata/dataset/include/de_tensor.h" -#include "mindspore/lite/src/common/log_adapter.h" -#include "mindspore/lite/src/tensor.h" - -using MSTensor = mindspore::tensor::MSTensor; -using DETensor = mindspore::tensor::DETensor; -using LiteTensor = mindspore::lite::Tensor; -using Tensor = mindspore::dataset::Tensor; -using DataType = mindspore::dataset::DataType; -using TensorShape = mindspore::dataset::TensorShape; - -class MindDataTestTensorDE : public mindspore::CommonTest { - public: - MindDataTestTensorDE() {} -}; - -TEST_F(MindDataTestTensorDE, MSTensorBasic) { - std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); - auto ms_tensor = std::shared_ptr(new DETensor(t)); - ASSERT_EQ(t == std::dynamic_pointer_cast(ms_tensor)->tensor(), true); -} - -TEST_F(MindDataTestTensorDE, MSTensorConvertToLiteTensor) { - std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); - auto ms_tensor = std::shared_ptr(new DETensor(t)); - std::shared_ptr lite_ms_tensor = - std::shared_ptr(std::dynamic_pointer_cast(ms_tensor)->ConvertToLiteTensor()); - // check if the lite_ms_tensor is the derived LiteTensor - LiteTensor *lite_tensor = static_cast(lite_ms_tensor.get()); - ASSERT_EQ(lite_tensor != nullptr, true); -} - -TEST_F(MindDataTestTensorDE, MSTensorShape) { - std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); - auto ms_tensor = std::shared_ptr(new DETensor(t)); - ASSERT_EQ(ms_tensor->DimensionSize(0) == 2, true); - ASSERT_EQ(ms_tensor->DimensionSize(1) == 3, true); -} - -TEST_F(MindDataTestTensorDE, MSTensorSize) { - std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); - auto ms_tensor = std::shared_ptr(new DETensor(t)); - ASSERT_EQ(ms_tensor->ElementsNum() == 6, true); - ASSERT_EQ(ms_tensor->Size() == 24, true); -} - -TEST_F(MindDataTestTensorDE, MSTensorDataType) { - std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); - auto ms_tensor = std::shared_ptr(new DETensor(t)); - ASSERT_EQ(ms_tensor->data_type() == mindspore::TypeId::kNumberTypeFloat32, true); -} - -TEST_F(MindDataTestTensorDE, MSTensorMutableData) { - std::vector x = {2.5, 2.5, 2.5, 2.5}; - std::shared_ptr t; - Tensor::CreateFromVector(x, TensorShape({2, 2}), &t); - auto ms_tensor = std::shared_ptr(new DETensor(t)); - float *data = static_cast(ms_tensor->MutableData()); - std::vector tensor_vec(data, data + ms_tensor->ElementsNum()); - ASSERT_EQ(x == tensor_vec, true); -} - -TEST_F(MindDataTestTensorDE, MSTensorCreateFromMemory) { - std::vector x = {2.5, 2.5, 2.5, 2.5}; - auto mem_tensor = DETensor::CreateFromMemory(mindspore::TypeId::kNumberTypeFloat32, {2, 2}, &x[0]); - ASSERT_EQ(mem_tensor->data_type() == mindspore::TypeId::kNumberTypeFloat32, true); -} diff --git a/mindspore/lite/test/ut/src/dataset/eager_test.cc b/mindspore/lite/test/ut/src/dataset/eager_test.cc index 74cc8b177b2..0d5eba5088b 100644 --- a/mindspore/lite/test/ut/src/dataset/eager_test.cc +++ b/mindspore/lite/test/ut/src/dataset/eager_test.cc @@ -24,6 +24,7 @@ #include "minddata/dataset/include/execute.h" #include "minddata/dataset/util/path.h" #include "mindspore/lite/src/common/log_adapter.h" +#include "include/api/types.h" using MSTensor = mindspore::tensor::MSTensor; using DETensor = mindspore::tensor::DETensor; @@ -58,16 +59,18 @@ TEST_F(MindDataTestEager, Test1) { while (dir_it->hasNext()) { Path v = dir_it->next(); // MS_LOG(WARNING) << v.toString() << "."; - std::shared_ptr image = std::shared_ptr(DETensor::CreateTensor(v.toString())); + std::shared_ptr de_tensor; + mindspore::dataset::Tensor::CreateFromFile(v.toString(), &de_tensor); + auto image = mindspore::MSTensor(std::make_shared(de_tensor)); - image = Execute(Decode())(image); + (void)Execute(Decode())(image, &image); EXPECT_TRUE(image != nullptr); - image = Execute(Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}))(image); + (void)Execute(Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}))(image, &image); EXPECT_TRUE(image != nullptr); - image = Execute(Resize({224, 224}))(image); + (void)Execute(Resize({224, 224}))(image, &image); EXPECT_TRUE(image != nullptr); - EXPECT_EQ(image->DimensionSize(0), 224); - EXPECT_EQ(image->DimensionSize(1), 224); + EXPECT_EQ(image.Shape()[0], 224); + EXPECT_EQ(image.Shape()[1], 224); } auto t_end = std::chrono::high_resolution_clock::now(); double elapsed_time_ms = std::chrono::duration(t_end - t_start).count(); diff --git a/mindspore/lite/tools/converter/quantizer/quant_cast.h b/mindspore/lite/tools/converter/quantizer/quant_cast.h index 9445f27b101..164ea28d2c0 100644 --- a/mindspore/lite/tools/converter/quantizer/quant_cast.h +++ b/mindspore/lite/tools/converter/quantizer/quant_cast.h @@ -19,7 +19,7 @@ #include "mindspore/core/ir/anf.h" #include "mindspore/lite/include/errorcode.h" -#include "mindspore/core/ir/dtype/type_id.h" +#include "ir/dtype/type_id.h" #include "mindspore/core/ir/func_graph.h" namespace mindspore::lite::quant { diff --git a/tests/st/cpp/common/common_test.cc b/tests/st/cpp/common/common_test.cc index ad8de4d3226..24a999a0ba6 100644 --- a/tests/st/cpp/common/common_test.cc +++ b/tests/st/cpp/common/common_test.cc @@ -61,7 +61,7 @@ void Common::ReadFile(const char *file, size_t *size, char **buf) { void Common::ContextAutoSet() { auto device_target = GetEnv("DEVICE_TARGET"); if (device_target.empty()) { - device_target = mindspore::api::kDeviceTypeAscend310; // default is 310 + device_target = mindspore::kDeviceTypeAscend310; // default is 310 } auto device_id_str = GetEnv("DEVICE_ID"); @@ -70,7 +70,8 @@ void Common::ContextAutoSet() { } uint32_t device_id = std::strtoul(device_id_str.c_str(), nullptr, 10); - mindspore::api::Context::Instance().SetDeviceTarget(device_target).SetDeviceID(device_id); + mindspore::GlobalContext::SetGlobalDeviceTarget(device_target); + mindspore::GlobalContext::SetGlobalDeviceID(device_id); } } // namespace ST diff --git a/tests/st/cpp/data/dataset/apple.jpg b/tests/st/cpp/data/dataset/apple.jpg new file mode 100644 index 00000000000..023bc503160 Binary files /dev/null and b/tests/st/cpp/data/dataset/apple.jpg differ diff --git a/tests/st/cpp/dataset/test_de.cc b/tests/st/cpp/dataset/test_de.cc index 57b4f0f75ba..3077b98fe0d 100644 --- a/tests/st/cpp/dataset/test_de.cc +++ b/tests/st/cpp/dataset/test_de.cc @@ -17,14 +17,14 @@ #include #include "common/common_test.h" #include "include/api/types.h" -#include "minddata/dataset/include/minddata_eager.h" +#include "minddata/dataset/include/execute.h" #include "minddata/dataset/include/vision.h" #include "minddata/dataset/kernels/tensor_op.h" #include "include/api/model.h" #include "include/api/serialization.h" #include "include/api/context.h" -using namespace mindspore::api; +using namespace mindspore; using namespace mindspore::dataset::vision; class TestDE : public ST::Common { @@ -33,59 +33,58 @@ class TestDE : public ST::Common { }; TEST_F(TestDE, TestResNetPreprocess) { - // Read images from target directory - std::vector> images; - MindDataEager::LoadImageFromDir("/home/workspace/mindspore_dataset/imagenet/imagenet_original/val/n01440764", - &images); + // Read images + std::shared_ptr de_tensor; + mindspore::dataset::Tensor::CreateFromFile("./data/dataset/apple.jpg", &de_tensor); + auto image = mindspore::MSTensor(std::make_shared(de_tensor)); // Define transform operations - MindDataEager Transform({Decode(), Resize({224, 224}), - Normalize({0.485 * 255, 0.456 * 255, 0.406 * 255}, {0.229 * 255, 0.224 * 255, 0.225 * 255}), - HWC2CHW()}); + mindspore::dataset::Execute Transform({ + Decode(), Resize({224, 224}), + Normalize({0.485 * 255, 0.456 * 255, 0.406 * 255}, {0.229 * 255, 0.224 * 255, 0.225 * 255}), + HWC2CHW()}); // Apply transform on images - for (auto &img : images) { - img = Transform(img); - } + Status rc = Transform(image, &image); - // Check shape of result - ASSERT_NE(images.size(), 0); - ASSERT_EQ(images[0]->Shape().size(), 3); - ASSERT_EQ(images[0]->Shape()[0], 3); - ASSERT_EQ(images[0]->Shape()[1], 224); - ASSERT_EQ(images[0]->Shape()[2], 224); + // Check image info + ASSERT_TRUE(rc.IsOk()); + ASSERT_EQ(image.Shape().size(), 3); + ASSERT_EQ(image.Shape()[0], 3); + ASSERT_EQ(image.Shape()[1], 224); + ASSERT_EQ(image.Shape()[2], 224); } TEST_F(TestDE, TestDvpp) { - ContextAutoSet(); - +#ifdef ENABLE_ACL // Read images from target directory - std::vector> images; - MindDataEager::LoadImageFromDir("/home/workspace/mindspore_dataset/imagenet/imagenet_original/val/n01440764", - &images); + std::shared_ptr de_tensor; + mindspore::dataset::Tensor::CreateFromFile("./data/dataset/apple.jpg", &de_tensor); + auto image = MSTensor(std::make_shared(de_tensor)); // Define dvpp transform std::vector crop_size = {224, 224}; std::vector resize_size = {256, 256}; - MindDataEager Transform({DvppDecodeResizeCropJpeg(crop_size, resize_size)}); + mindspore::dataset::Execute Transform(DvppDecodeResizeCropJpeg(crop_size, resize_size)); // Apply transform on images - for (auto &img : images) { - img = Transform(img); - ASSERT_NE(img, nullptr); - ASSERT_EQ(img->Shape().size(), 3); - int32_t real_h = 0; - int32_t real_w = 0; - int32_t remainder = crop_size[crop_size.size() - 1] % 16; - if (crop_size.size() == 1) { - real_h = (crop_size[0] % 2 == 0) ? crop_size[0] : crop_size[0] + 1; - real_w = (remainder == 0) ? crop_size[0] : crop_size[0] + 16 - remainder; - } else { - real_h = (crop_size[0] % 2 == 0) ? crop_size[0] : crop_size[0] + 1; - real_w = (remainder == 0) ? crop_size[1] : crop_size[1] + 16 - remainder; - } - ASSERT_EQ(img->Shape()[0], real_h * real_w * 1.5); // For image in YUV format, each pixel takes 1.5 byte - ASSERT_EQ(img->Shape()[1], 1); - ASSERT_EQ(img->Shape()[2], 1); + Status rc = Transform(image, &image); + + // Check image info + ASSERT_TRUE(rc.IsOk()); + ASSERT_EQ(image.Shape().size(), 3); + int32_t real_h = 0; + int32_t real_w = 0; + int32_t remainder = crop_size[crop_size.size() - 1] % 16; + if (crop_size.size() == 1) { + real_h = (crop_size[0] % 2 == 0) ? crop_size[0] : crop_size[0] + 1; + real_w = (remainder == 0) ? crop_size[0] : crop_size[0] + 16 - remainder; + } else { + real_h = (crop_size[0] % 2 == 0) ? crop_size[0] : crop_size[0] + 1; + real_w = (remainder == 0) ? crop_size[1] : crop_size[1] + 16 - remainder; } + ASSERT_EQ(image.Shape()[0], real_h * real_w * 1.5); // For image in YUV format, each pixel takes 1.5 byte + ASSERT_EQ(image.Shape()[1], 1); + ASSERT_EQ(image.Shape()[2], 1); +#endif } diff --git a/tests/st/cpp/model/test_tensor_add.cc b/tests/st/cpp/model/test_tensor_add.cc index fda2dba0e15..7afc3b4ed9e 100644 --- a/tests/st/cpp/model/test_tensor_add.cc +++ b/tests/st/cpp/model/test_tensor_add.cc @@ -20,7 +20,7 @@ #include "include/api/serialization.h" #include "include/api/context.h" -using namespace mindspore::api; +using namespace mindspore; static const char tensor_add_file[] = "/home/workspace/mindspore_dataset/mindir/tensor_add/tensor_add.mindir"; static const std::vector input_data_1 = {1, 2, 3, 4}; @@ -36,23 +36,42 @@ TEST_F(TestTensorAdd, InferMindIR) { auto graph = Serialization::LoadModel(tensor_add_file, ModelType::kMindIR); Model tensor_add((GraphCell(graph))); - Status ret = tensor_add.Build({}); - ASSERT_TRUE(ret == SUCCESS); + ASSERT_TRUE(tensor_add.Build() == kSuccess); + + // get model inputs + std::vector origin_inputs = tensor_add.GetInputs(); + ASSERT_EQ(origin_inputs.size(), 2); // prepare input - std::vector outputs; - std::vector inputs; - inputs.emplace_back(Buffer(input_data_1.data(), sizeof(float) * input_data_1.size())); - inputs.emplace_back(Buffer(input_data_2.data(), sizeof(float) * input_data_2.size())); + std::vector outputs; + std::vector inputs; + inputs.emplace_back(origin_inputs[0].Name(), origin_inputs[0].DataType(), origin_inputs[0].Shape(), + input_data_1.data(), sizeof(float) * input_data_1.size()); + inputs.emplace_back(origin_inputs[1].Name(), origin_inputs[1].DataType(), origin_inputs[1].Shape(), + input_data_2.data(), sizeof(float) * input_data_2.size()); // infer - ret = tensor_add.Predict(inputs, &outputs); - ASSERT_TRUE(ret == SUCCESS); + ASSERT_TRUE(tensor_add.Predict(inputs, &outputs) == kSuccess); - // print + // assert input + inputs = tensor_add.GetInputs(); + ASSERT_EQ(inputs.size(), 2); + auto after_input_data_1 = inputs[0].Data(); + auto after_input_data_2 = inputs[1].Data(); + const float *p = reinterpret_cast(after_input_data_1.get()); + for (size_t i = 0; i < inputs[0].DataSize() / sizeof(float); ++i) { + ASSERT_LE(std::abs(p[i] - input_data_1[i]), 1e-4); + } + p = reinterpret_cast(after_input_data_2.get()); + for (size_t i = 0; i < inputs[0].DataSize() / sizeof(float); ++i) { + ASSERT_LE(std::abs(p[i] - input_data_2[i]), 1e-4); + } + + // assert output for (auto &buffer : outputs) { - const float *p = reinterpret_cast(buffer.Data()); - for (size_t i = 0; i < buffer.DataSize() / sizeof(float); ++i) { + auto buffer_data = buffer.Data(); + p = reinterpret_cast(buffer_data.get()); + for (size_t i = 0; i < buffer.DataSize() / sizeof(float); ++i) { ASSERT_LE(std::abs(p[i] - (input_data_1[i] + input_data_2[i])), 1e-4); } } diff --git a/tests/ut/cpp/cxx_api/context_test.cc b/tests/ut/cpp/cxx_api/context_test.cc new file mode 100644 index 00000000000..8509f0457e6 --- /dev/null +++ b/tests/ut/cpp/cxx_api/context_test.cc @@ -0,0 +1,73 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include "common/common_test.h" +#include "include/api/context.h" + +namespace mindspore { +class TestCxxApiContext : public UT::Common { + public: + TestCxxApiContext() = default; +}; + +TEST_F(TestCxxApiContext, test_context_global_context_SUCCESS) { + std::string device_target = "2333"; + uint32_t device_id = 2333; + GlobalContext::SetGlobalDeviceTarget(device_target); + ASSERT_EQ(GlobalContext::GetGlobalDeviceTarget(), device_target); + GlobalContext::SetGlobalDeviceID(device_id); + ASSERT_EQ(GlobalContext::GetGlobalDeviceID(), device_id); +} + +TEST_F(TestCxxApiContext, test_context_ascend310_context_SUCCESS) { + std::string option_1 = "aaa"; + std::string option_2 = "vvv"; + std::string option_3 = "www"; + auto option_4 = DataType::kNumberTypeEnd; + std::string option_5 = "rrr"; + std::string option_6 = "ppp"; + auto ctx = std::make_shared(); + ModelContext::SetInsertOpConfigPath(ctx, option_1); + ModelContext::SetInputFormat(ctx, option_2); + ModelContext::SetInputShape(ctx, option_3); + ModelContext::SetOutputType(ctx, option_4); + ModelContext::SetPrecisionMode(ctx, option_5); + ModelContext::SetOpSelectImplMode(ctx, option_6); + + ASSERT_EQ(ModelContext::GetInsertOpConfigPath(ctx), option_1); + ASSERT_EQ(ModelContext::GetInputFormat(ctx), option_2); + ASSERT_EQ(ModelContext::GetInputShape(ctx), option_3); + ASSERT_EQ(ModelContext::GetOutputType(ctx), option_4); + ASSERT_EQ(ModelContext::GetPrecisionMode(ctx), option_5); + ASSERT_EQ(ModelContext::GetOpSelectImplMode(ctx), option_6); +} + +TEST_F(TestCxxApiContext, test_context_ascend310_context_nullptr_FAILED) { + auto ctx = std::make_shared(); + EXPECT_ANY_THROW(ModelContext::GetInsertOpConfigPath(nullptr)); +} + +TEST_F(TestCxxApiContext, test_context_ascend310_context_wrong_type_SUCCESS) { + auto ctx = std::make_shared(); + ctx->params["mindspore.option.op_select_impl_mode"] = 5; + ASSERT_EQ(ModelContext::GetOpSelectImplMode(ctx), ""); +} + +TEST_F(TestCxxApiContext, test_context_ascend310_context_default_value_SUCCESS) { + auto ctx = std::make_shared(); + ASSERT_EQ(ModelContext::GetOpSelectImplMode(ctx), ""); +} +} // namespace mindspore diff --git a/tests/ut/cpp/cxx_api/status_test.cc b/tests/ut/cpp/cxx_api/status_test.cc new file mode 100644 index 00000000000..aabd00fe5bc --- /dev/null +++ b/tests/ut/cpp/cxx_api/status_test.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include "common/common_test.h" +#define private public +#include "include/api/status.h" +#undef private + +namespace mindspore { +class TestCxxApiStatus : public UT::Common { + public: + TestCxxApiStatus() = default; +}; + +TEST_F(TestCxxApiStatus, test_status_base_SUCCESS) { + Status status_1; + ASSERT_TRUE(status_1 == kSuccess); + ASSERT_TRUE(status_1 == Status(kSuccess)); + ASSERT_EQ(status_1.operator bool(), true); + ASSERT_EQ(status_1.operator int(), kSuccess); + ASSERT_EQ(status_1.StatusCode(), kSuccess); + ASSERT_EQ(status_1.IsOk(), true); + ASSERT_EQ(status_1.IsError(), false); +} + +TEST_F(TestCxxApiStatus, test_status_msg_SUCCESS) { + std::string message = "2333"; + Status status_1(kMDSyntaxError, message); + ASSERT_EQ(status_1.IsError(), true); + ASSERT_EQ(status_1.ToString(), message); +} + +TEST_F(TestCxxApiStatus, test_status_ctor_SUCCESS) { + Status status_1; + Status status_2(kSuccess); + Status status_3(kSuccess, "2333"); + Status status_4(kSuccess, 1, "file", "2333"); + Status status_5 = Status::OK(); + ASSERT_TRUE(status_1 == status_2); + ASSERT_TRUE(status_1 == status_3); + ASSERT_TRUE(status_1 == status_4); + ASSERT_TRUE(status_1 == status_5); +} + +TEST_F(TestCxxApiStatus, test_status_string_SUCCESS) { + Status status_1(kMDSyntaxError); + ASSERT_EQ(Status::CodeAsString(status_1.StatusCode()), "Syntax error"); +} +} // namespace mindspore diff --git a/tests/ut/cpp/cxx_api/types_test.cc b/tests/ut/cpp/cxx_api/types_test.cc index c222bd5b307..d6c8a7d911e 100644 --- a/tests/ut/cpp/cxx_api/types_test.cc +++ b/tests/ut/cpp/cxx_api/types_test.cc @@ -15,7 +15,9 @@ */ #include #include "common/common_test.h" +#define private public #include "include/api/types.h" +#undef private namespace mindspore { class TestCxxApiTypes : public UT::Common { @@ -23,116 +25,120 @@ class TestCxxApiTypes : public UT::Common { TestCxxApiTypes() = default; }; -TEST_F(TestCxxApiTypes, test_tensor_set_name_SUCCESS) { - std::string tensor_name_before = "TEST1"; - std::string tensor_name_after = "TEST2"; - api::Tensor tensor1(tensor_name_before, api::DataType::kMsFloat32, {}, nullptr, 0); - api::Tensor tensor2 = tensor1; - api::Tensor tensor3 = tensor1.Clone(); - - // name - ASSERT_EQ(tensor1.Name(), tensor_name_before); - ASSERT_EQ(tensor2.Name(), tensor_name_before); - ASSERT_EQ(tensor3.Name(), tensor_name_before); - - tensor1.SetName(tensor_name_after); - ASSERT_EQ(tensor1.Name(), tensor_name_after); - ASSERT_EQ(tensor2.Name(), tensor_name_after); - ASSERT_EQ(tensor3.Name(), tensor_name_before); +TEST_F(TestCxxApiTypes, test_tensor_default_attr_SUCCESS) { + MSTensor tensor; + ASSERT_EQ(tensor.Name(), ""); + ASSERT_EQ(tensor.DataType(), DataType::kTypeUnknown); + ASSERT_EQ(tensor.Shape().size(), 0); + ASSERT_EQ(tensor.MutableData(), nullptr); + ASSERT_EQ(tensor.DataSize(), 0); + ASSERT_EQ(tensor.IsDevice(), false); } -TEST_F(TestCxxApiTypes, test_tensor_set_dtype_SUCCESS) { - api::Tensor tensor1("", api::DataType::kMsFloat32, {}, nullptr, 0); - api::Tensor tensor2 = tensor1; - api::Tensor tensor3 = tensor1.Clone(); - - // dtype - ASSERT_EQ(tensor1.DataType(), api::DataType::kMsFloat32); - ASSERT_EQ(tensor2.DataType(), api::DataType::kMsFloat32); - ASSERT_EQ(tensor3.DataType(), api::DataType::kMsFloat32); - - tensor1.SetDataType(api::DataType::kMsUint32); - ASSERT_EQ(tensor1.DataType(), api::DataType::kMsUint32); - ASSERT_EQ(tensor2.DataType(), api::DataType::kMsUint32); - ASSERT_EQ(tensor3.DataType(), api::DataType::kMsFloat32); +TEST_F(TestCxxApiTypes, test_tensor_attr_SUCCESS) { + std::string tensor_name = "Name1"; + auto data_type = DataType::kNumberTypeFloat16; + MSTensor tensor(tensor_name, data_type, {}, nullptr, 0); + ASSERT_EQ(tensor.Name(), tensor_name); + ASSERT_EQ(tensor.DataType(), data_type); + ASSERT_EQ(tensor.Shape().size(), 0); + ASSERT_EQ(tensor.MutableData(), nullptr); + ASSERT_EQ(tensor.DataSize(), 0); + ASSERT_EQ(tensor.IsDevice(), false); } -TEST_F(TestCxxApiTypes, test_tensor_set_shape_SUCCESS) { - std::vector shape = {3, 4, 5, 6}; - api::Tensor tensor1("", api::DataType::kMsFloat32, {}, nullptr, 0); - api::Tensor tensor2 = tensor1; - api::Tensor tensor3 = tensor1.Clone(); - - // shape - ASSERT_EQ(tensor1.Shape(), std::vector()); - ASSERT_EQ(tensor2.Shape(), std::vector()); - ASSERT_EQ(tensor3.Shape(), std::vector()); - - tensor1.SetShape(shape); - ASSERT_EQ(tensor1.Shape(), shape); - ASSERT_EQ(tensor2.Shape(), shape); - ASSERT_EQ(tensor3.Shape(), std::vector()); +TEST_F(TestCxxApiTypes, test_tensor_create_FAILED) { + MSTensor tensor(nullptr); + ASSERT_EQ(tensor, nullptr); } - -TEST_F(TestCxxApiTypes, test_tensor_util_SUCCESS) { - std::vector shape = {3, 4, 5, 6}; - std::vector data(3 * 4 * 5 * 6, 123); - api::Tensor tensor1("", api::DataType::kMsFloat32, shape, data.data(), data.size() * sizeof(uint32_t)); - - // data - ASSERT_EQ(api::Tensor::GetTypeSize(api::DataType::kMsUint32), sizeof(uint32_t)); - ASSERT_EQ(tensor1.ElementNum(), 3 * 4 * 5 * 6); +TEST_F(TestCxxApiTypes, test_tensor_data_SUCCESS) { + std::vector data = {1, 2, 3, 4}; + MSTensor tensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); + auto value = tensor.Data(); + int32_t *p = (int32_t *)value.get(); + for (size_t i = 0; i < data.size(); ++i) { + ASSERT_EQ(p[i], data[i]); + } } -TEST_F(TestCxxApiTypes, test_tensor_data_ref_and_copy_SUCCESS) { - std::vector shape = {3, 4, 5, 6}; - std::vector data(3 * 4 * 5 * 6, 123); - api::Tensor tensor1("", api::DataType::kMsFloat32, shape, data.data(), data.size() * sizeof(uint32_t)); - api::Tensor tensor2 = tensor1; - api::Tensor tensor3 = tensor1.Clone(); - - // data - ASSERT_EQ(tensor1.DataSize(), tensor2.DataSize()); - ASSERT_EQ(tensor1.DataSize(), tensor3.DataSize()); - ASSERT_EQ(tensor1.Data(), tensor2.MutableData()); - ASSERT_NE(tensor1.Data(), tensor3.Data()); +TEST_F(TestCxxApiTypes, test_tensor_ref_SUCCESS) { + std::vector data = {1, 2, 3, 4}; + MSTensor tensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); + MSTensor tensor2 = tensor; + auto value = tensor2.Data(); + int32_t *p = (int32_t *)value.get(); + for (size_t i = 0; i < data.size(); ++i) { + ASSERT_EQ(p[i], data[i]); + } } -TEST_F(TestCxxApiTypes, test_tensor_resize_data_SUCCESS) { - std::vector shape = {3, 4, 5, 6}; - std::vector data(3 * 4 * 5 * 6, 123); - api::Tensor tensor1("", api::DataType::kMsFloat32, shape, data.data(), data.size() * sizeof(uint32_t)); - - // data - ASSERT_EQ(tensor1.ResizeData(0), true); +TEST_F(TestCxxApiTypes, test_tensor_clone_SUCCESS) { + std::vector data = {1, 2, 3, 4}; + MSTensor tensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); + MSTensor tensor2 = tensor.Clone(); + auto value = tensor2.Data(); + int32_t *p = (int32_t *)value.get(); + for (size_t i = 0; i < data.size(); ++i) { + ASSERT_EQ(p[i], data[i]); + } } -TEST_F(TestCxxApiTypes, test_tensor_set_data_wrong_data_size_FAILED) { - std::vector shape = {3, 4, 5, 6}; - std::vector data(3 * 4 * 5 * 6, 123); - api::Tensor tensor1("", api::DataType::kMsFloat32, shape, data.data(), data.size() * sizeof(uint32_t)); - - // data - ASSERT_EQ(tensor1.SetData(nullptr, 1), false); - ASSERT_EQ(tensor1.SetData(data.data(), 0), false); +TEST_F(TestCxxApiTypes, test_tensor_ref_modified_SUCCESS) { + std::vector data = {1, 2, 3, 4}; + std::vector data_modified = {2, 3, 4, 5}; + MSTensor tensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); + MSTensor tensor2 = tensor; + (void)memcpy(tensor.MutableData(), data_modified.data(), data_modified.size() * sizeof(int32_t)); + auto value = tensor2.Data(); + int32_t *p = (int32_t *)value.get(); + for (size_t i = 0; i < data.size(); ++i) { + ASSERT_EQ(p[i], data_modified[i]); + } } -TEST_F(TestCxxApiTypes, test_tensor_set_data_SUCCESS) { - std::vector shape = {3, 4, 5, 6}; - std::vector data(3 * 4 * 5 * 6, 123); - api::Tensor tensor1("", api::DataType::kMsFloat32, shape, data.data(), data.size() * sizeof(uint32_t)); +TEST_F(TestCxxApiTypes, test_tensor_clone_modified_SUCCESS) { + std::vector data = {1, 2, 3, 4}; + std::vector data_modified = {2, 3, 4, 5}; + MSTensor tensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); + MSTensor tensor2 = tensor.Clone(); + (void)memcpy(tensor.MutableData(), data_modified.data(), data_modified.size() * sizeof(int32_t)); + auto value = tensor2.Data(); + int32_t *p = (int32_t *)value.get(); + for (size_t i = 0; i < data.size(); ++i) { + ASSERT_EQ(p[i], data[i]); + } +} - // data - ASSERT_EQ(tensor1.SetData(nullptr, 0), true); - ASSERT_EQ(tensor1.SetData(data.data(), data.size() * sizeof(uint32_t)), true); +TEST_F(TestCxxApiTypes, test_tensor_ref_creator_function_SUCCESS) { + std::vector data = {1, 2, 3, 4}; + MSTensor tensor = + MSTensor::CreateRefTensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); + data = {3, 4, 5, 6}; + auto value = tensor.Data(); + int32_t *p = (int32_t *)value.get(); + for (size_t i = 0; i < data.size(); ++i) { + ASSERT_EQ(p[i], data[i]); + } +} + +TEST_F(TestCxxApiTypes, test_tensor_creator_function_SUCCESS) { + std::vector data = {1, 2, 3, 4}; + MSTensor tensor = + MSTensor::CreateTensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); + data = {3, 4, 5, 6}; + auto value = tensor.Data(); + int32_t *p = (int32_t *)value.get(); + for (size_t i = 0; i < data.size(); ++i) { + ASSERT_NE(p[i], data[i]); + } } TEST_F(TestCxxApiTypes, test_buffer_data_ref_and_copy_SUCCESS) { std::vector data(3 * 4 * 5 * 6, 123); - api::Buffer buffer1(data.data(), data.size() * sizeof(uint32_t)); - api::Buffer buffer2 = buffer1; - api::Buffer buffer3 = buffer1.Clone(); + Buffer buffer1(data.data(), data.size() * sizeof(uint32_t)); + Buffer buffer2 = buffer1; + Buffer buffer3 = buffer1.Clone(); // data ASSERT_EQ(buffer1.DataSize(), buffer2.DataSize()); @@ -143,7 +149,7 @@ TEST_F(TestCxxApiTypes, test_buffer_data_ref_and_copy_SUCCESS) { TEST_F(TestCxxApiTypes, test_buffer_resize_data_SUCCESS) { std::vector data(3 * 4 * 5 * 6, 123); - api::Buffer buffer1(data.data(), data.size() * sizeof(uint32_t)); + Buffer buffer1(data.data(), data.size() * sizeof(uint32_t)); // data ASSERT_EQ(buffer1.ResizeData(0), true); @@ -151,7 +157,7 @@ TEST_F(TestCxxApiTypes, test_buffer_resize_data_SUCCESS) { TEST_F(TestCxxApiTypes, test_buffer_set_data_wrong_data_size_FAILED) { std::vector data(3 * 4 * 5 * 6, 123); - api::Buffer buffer1(data.data(), data.size() * sizeof(uint32_t)); + Buffer buffer1(data.data(), data.size() * sizeof(uint32_t)); // data ASSERT_EQ(buffer1.SetData(nullptr, 1), false); @@ -160,7 +166,7 @@ TEST_F(TestCxxApiTypes, test_buffer_set_data_wrong_data_size_FAILED) { TEST_F(TestCxxApiTypes, test_buffer_set_data_SUCCESS) { std::vector data(3 * 4 * 5 * 6, 123); - api::Buffer buffer1(data.data(), data.size() * sizeof(uint32_t)); + Buffer buffer1(data.data(), data.size() * sizeof(uint32_t)); // data ASSERT_EQ(buffer1.SetData(nullptr, 0), true); diff --git a/tests/ut/cpp/dataset/btree_test.cc b/tests/ut/cpp/dataset/btree_test.cc index 5e309354cf9..9a2271dcfa9 100644 --- a/tests/ut/cpp/dataset/btree_test.cc +++ b/tests/ut/cpp/dataset/btree_test.cc @@ -101,7 +101,7 @@ TEST_F(MindDataTestBPlusTree, Test1) { // Test duplicate key { rc = btree.DoInsert(100, "Expect error"); - EXPECT_EQ(rc, Status(StatusCode::kDuplicateKey)); + EXPECT_EQ(rc, Status(StatusCode::kMDDuplicateKey)); } } diff --git a/tests/ut/cpp/dataset/build_vocab_test.cc b/tests/ut/cpp/dataset/build_vocab_test.cc index a0d42e6f890..bcb4d436492 100644 --- a/tests/ut/cpp/dataset/build_vocab_test.cc +++ b/tests/ut/cpp/dataset/build_vocab_test.cc @@ -25,7 +25,7 @@ #include "minddata/dataset/text/vocab.h" using mindspore::dataset::Tensor; -using mindspore::dataset::Status; +using mindspore::Status; using mindspore::dataset::Vocab; class MindDataTestVocab : public UT::DatasetOpTesting { diff --git a/tests/ut/cpp/dataset/c_api_dataset_randomdata_test.cc b/tests/ut/cpp/dataset/c_api_dataset_randomdata_test.cc index 5ff421fb18f..e9328529d9c 100644 --- a/tests/ut/cpp/dataset/c_api_dataset_randomdata_test.cc +++ b/tests/ut/cpp/dataset/c_api_dataset_randomdata_test.cc @@ -17,7 +17,7 @@ #include "minddata/dataset/include/datasets.h" #include "minddata/dataset/core/global_context.h" -#include "mindspore/core/ir/dtype/type_id.h" +#include "ir/dtype/type_id.h" using namespace mindspore::dataset; diff --git a/tests/ut/cpp/dataset/c_api_text_test.cc b/tests/ut/cpp/dataset/c_api_text_test.cc index c368bcee213..e50419e23b2 100644 --- a/tests/ut/cpp/dataset/c_api_text_test.cc +++ b/tests/ut/cpp/dataset/c_api_text_test.cc @@ -27,7 +27,7 @@ using namespace mindspore::dataset; using mindspore::dataset::ShuffleMode; -using mindspore::dataset::Status; +using mindspore::Status; using mindspore::dataset::Tensor; using mindspore::dataset::Vocab; diff --git a/tests/ut/cpp/dataset/c_api_text_vocab_test.cc b/tests/ut/cpp/dataset/c_api_text_vocab_test.cc index a01d697153a..f54ed381f69 100644 --- a/tests/ut/cpp/dataset/c_api_text_vocab_test.cc +++ b/tests/ut/cpp/dataset/c_api_text_vocab_test.cc @@ -27,7 +27,7 @@ using namespace mindspore::dataset; using mindspore::dataset::DataType; using mindspore::dataset::ShuffleMode; -using mindspore::dataset::Status; +using mindspore::Status; using mindspore::dataset::Tensor; using mindspore::dataset::Vocab; diff --git a/tests/ut/cpp/dataset/cache_op_test.cc b/tests/ut/cpp/dataset/cache_op_test.cc index a85e6a6c333..1e1b66af782 100644 --- a/tests/ut/cpp/dataset/cache_op_test.cc +++ b/tests/ut/cpp/dataset/cache_op_test.cc @@ -43,7 +43,7 @@ Status GetSessionFromEnv(session_id_type *session_id) { *session_id = std::stoul(session_id_str); } catch (const std::exception &e) { std::string err_msg = "Invalid numeric value for session id in env var: " + session_id_str; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } } else { RETURN_STATUS_UNEXPECTED("Test case requires a session id to be provided via SESSION_ID environment variable."); diff --git a/tests/ut/cpp/dataset/center_crop_op_test.cc b/tests/ut/cpp/dataset/center_crop_op_test.cc index 92f069d4759..59432eee398 100644 --- a/tests/ut/cpp/dataset/center_crop_op_test.cc +++ b/tests/ut/cpp/dataset/center_crop_op_test.cc @@ -53,7 +53,7 @@ TEST_F(MindDataTestCenterCropOp, TestOp2) { std::unique_ptr op(new CenterCropOp(het, wid)); Status s = op->Compute(input_tensor_, &output_tensor); EXPECT_TRUE(s.IsError()); - ASSERT_TRUE(s.get_code() == StatusCode::kUnexpectedError); + ASSERT_TRUE(s.StatusCode() == StatusCode::kMDUnexpectedError); } TEST_F(MindDataTestCenterCropOp, TestOp3) { diff --git a/tests/ut/cpp/dataset/common/common.h b/tests/ut/cpp/dataset/common/common.h index dc865cab053..db8c8130c08 100644 --- a/tests/ut/cpp/dataset/common/common.h +++ b/tests/ut/cpp/dataset/common/common.h @@ -20,6 +20,9 @@ #include "minddata/dataset/util/status.h" #include "utils/log_adapter.h" +using mindspore::Status; +using mindspore::StatusCode; + #define ASSERT_OK(_s) \ do { \ Status __rc = (_s); \ diff --git a/tests/ut/cpp/dataset/connector_test.cc b/tests/ut/cpp/dataset/connector_test.cc index 0fc5b100d73..00c971daef6 100644 --- a/tests/ut/cpp/dataset/connector_test.cc +++ b/tests/ut/cpp/dataset/connector_test.cc @@ -67,7 +67,7 @@ private: // This worker loop read from input_ vector that have complete list of tasks/elements. // The assignment from the elements in input_ to each worker is ensured in RoundRobin, - // i.e., tid-0 will pick input_[0], tid-1 will pick input_[1], so-on circularly. + // i.e., tid-0 will pick input_[0], tid-1 will pick input_[1], so-on circular. Status FirstWorkerPush( int tid, std::shared_ptr > my_conn, @@ -227,7 +227,7 @@ Status MindDataTestConnector::Run_test_1() { std::bind(&MindDataTestConnector::SerialWorkerPull, this, 0, // thread id = 0, since it's the only one - conn2, // poping the data from conn2 + conn2, // popping the data from conn2 &output)); RETURN_IF_NOT_OK(rc); // Wait for the threads to finish. @@ -316,7 +316,7 @@ Status MindDataTestConnector::ValidateOutput(const std::vector &output int prev = 0; for (auto el : output) { if (prev >= el) { - return Status(StatusCode::kUnexpectedError, "Output vector are not in-order."); + return Status(StatusCode::kMDUnexpectedError, "Output vector are not in-order."); } prev = el; } diff --git a/tests/ut/cpp/dataset/execute_test.cc b/tests/ut/cpp/dataset/execute_test.cc index 83029d2400b..08f99415cae 100644 --- a/tests/ut/cpp/dataset/execute_test.cc +++ b/tests/ut/cpp/dataset/execute_test.cc @@ -15,6 +15,7 @@ */ #include "common/common.h" #include "common/cvop_common.h" +#include "minddata/dataset/core/de_tensor.h" #include "minddata/dataset/include/execute.h" #include "minddata/dataset/include/transforms.h" #include "minddata/dataset/include/vision.h" @@ -32,12 +33,22 @@ class MindDataTestExecute : public UT::CVOP::CVOpCommon { std::shared_ptr output_tensor_; }; -TEST_F(MindDataTestExecute, TestOp1) { - MS_LOG(INFO) << "Doing testCrop."; - // Crop params +TEST_F(MindDataTestExecute, TestComposeTransforms) { + MS_LOG(INFO) << "Doing TestComposeTransforms."; + + std::shared_ptr de_tensor; + mindspore::dataset::Tensor::CreateFromFile("data/dataset/apple.jpg", &de_tensor); + auto image = mindspore::MSTensor(std::make_shared(de_tensor)); + + // Transform params + std::shared_ptr decode = vision::Decode(); std::shared_ptr center_crop = vision::CenterCrop({30}); - std::shared_ptr out_image = Execute(std::move(center_crop))(input_tensor_); - EXPECT_NE(out_image, nullptr); - EXPECT_EQ(30, out_image->shape()[0]); - EXPECT_EQ(30, out_image->shape()[1]); + std::shared_ptr rescale = vision::Rescale(1./3, 0.5); + + auto transform = Execute({decode, center_crop, rescale}); + Status rc = transform(image, &image); + + EXPECT_EQ(rc, Status::OK()); + EXPECT_EQ(30, image.Shape()[0]); + EXPECT_EQ(30, image.Shape()[1]); } diff --git a/tests/ut/cpp/dataset/fill_op_test.cc b/tests/ut/cpp/dataset/fill_op_test.cc index 795db705af7..08d1ef072f3 100644 --- a/tests/ut/cpp/dataset/fill_op_test.cc +++ b/tests/ut/cpp/dataset/fill_op_test.cc @@ -98,7 +98,7 @@ TEST_F(MindDataTestFillOp, ScalarFill) { Status s = op->Compute(input, &output); EXPECT_TRUE(s.IsError()); - ASSERT_TRUE(s.get_code() == StatusCode::kUnexpectedError); + ASSERT_TRUE(s.StatusCode() == StatusCode::kMDUnexpectedError); MS_LOG(INFO) << "MindDataTestFillOp-ScalarFill end."; } @@ -147,7 +147,7 @@ TEST_F(MindDataTestFillOp, NumericToString) { Status s = op->Compute(input, &output); EXPECT_TRUE(s.IsError()); - ASSERT_TRUE(s.get_code() == StatusCode::kUnexpectedError); + ASSERT_TRUE(s.StatusCode() == StatusCode::kMDUnexpectedError); MS_LOG(INFO) << "MindDataTestFillOp-NumericToString end."; } @@ -167,7 +167,7 @@ TEST_F(MindDataTestFillOp, StringToNumeric) { Status s = op->Compute(input, &output); EXPECT_TRUE(s.IsError()); - ASSERT_TRUE(s.get_code() == StatusCode::kUnexpectedError); + ASSERT_TRUE(s.StatusCode() == StatusCode::kMDUnexpectedError); MS_LOG(INFO) << "MindDataTestFillOp-StringToNumeric end."; } \ No newline at end of file diff --git a/tests/ut/cpp/dataset/interrupt_test.cc b/tests/ut/cpp/dataset/interrupt_test.cc index 8a064131751..7282c1f8053 100644 --- a/tests/ut/cpp/dataset/interrupt_test.cc +++ b/tests/ut/cpp/dataset/interrupt_test.cc @@ -43,7 +43,7 @@ TEST_F(MindDataTestIntrpService, Test1) { int v; Status rc; rc = q.PopFront(&v); - EXPECT_TRUE(rc.IsInterrupted()); + EXPECT_TRUE(rc == StatusCode::kMDInterrupted); return rc; }); vg_.GetIntrpService()->InterruptAll(); @@ -59,7 +59,7 @@ TEST_F(MindDataTestIntrpService, Test2) { vg_.CreateAsyncTask("Test1", [&]() -> Status { TaskManager::FindMe()->Post(); Status rc = wp.Wait(); - EXPECT_TRUE(rc.IsInterrupted()); + EXPECT_TRUE(rc == StatusCode::kMDInterrupted); return rc; }); vg_.GetIntrpService()->InterruptAll(); diff --git a/tests/ut/cpp/dataset/memory_pool_test.cc b/tests/ut/cpp/dataset/memory_pool_test.cc index 2981a63708b..8c9713285d1 100644 --- a/tests/ut/cpp/dataset/memory_pool_test.cc +++ b/tests/ut/cpp/dataset/memory_pool_test.cc @@ -79,7 +79,7 @@ TEST_F(MindDataTestMemoryPool, TestMemGuard) { // Try some large value. int64_t sz = 5LL * 1024LL * 1024LL * 1024LL; Status rc = mem.allocate(sz); - ASSERT_TRUE(rc.IsOk() || rc.IsOutofMemory()); + ASSERT_TRUE(rc.IsOk() || rc == StatusCode::kMDOutOfMemory); if (rc.IsOk()) { // Try write a character half way. auto *p = mem.GetMutablePointer(); diff --git a/tests/ut/cpp/dataset/queue_test.cc b/tests/ut/cpp/dataset/queue_test.cc index fcc4e1a54d8..3f2e1ad4af2 100644 --- a/tests/ut/cpp/dataset/queue_test.cc +++ b/tests/ut/cpp/dataset/queue_test.cc @@ -101,19 +101,19 @@ TEST_F(MindDataTestQueue, Test1) { TEST_F(MindDataTestQueue, Test2) { // Passing status object Queue que(3); - Status rc_send(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Oops"); + Status rc_send(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Oops"); Status rc = que.Add(rc_send); ASSERT_TRUE(rc.IsOk()); Status rc_recv; rc = que.PopFront(&rc_recv); ASSERT_TRUE(rc.IsOk()); ASSERT_EQ(rc_recv, rc_send); - rc = que.EmplaceBack(StatusCode::kOutOfMemory, "Test emplace"); + rc = que.EmplaceBack(StatusCode::kMDOutOfMemory, "Test emplace"); ASSERT_TRUE(rc.IsOk()); Status rc_recv2; rc = que.PopFront(&rc_recv2); ASSERT_TRUE(rc.IsOk()); - ASSERT_TRUE(rc_recv2.IsOutofMemory()); + ASSERT_TRUE(rc_recv2 == StatusCode::kMDOutOfMemory); } TEST_F(MindDataTestQueue, Test3) { diff --git a/tests/ut/cpp/dataset/random_crop_with_bbox_op_test.cc b/tests/ut/cpp/dataset/random_crop_with_bbox_op_test.cc index 50212ac76d6..201de384c99 100644 --- a/tests/ut/cpp/dataset/random_crop_with_bbox_op_test.cc +++ b/tests/ut/cpp/dataset/random_crop_with_bbox_op_test.cc @@ -103,7 +103,7 @@ TEST_F(MindDataTestRandomCropWithBBoxOp, TestOp3) { for (auto tensor_row_ : images_and_annotations_) { Status s = op->Compute(tensor_row_, &output_tensor_row_); EXPECT_TRUE(s.IsError()); - ASSERT_TRUE(s.get_code() == StatusCode::kUnexpectedError); + ASSERT_TRUE(s.StatusCode() == StatusCode::kMDUnexpectedError); } MS_LOG(INFO) << "testRandomCropWithBBoxOp3 end."; } \ No newline at end of file diff --git a/tests/ut/cpp/dataset/solarize_op_test.cc b/tests/ut/cpp/dataset/solarize_op_test.cc index 6e7bd0e2fd7..e1e5a15b5c7 100644 --- a/tests/ut/cpp/dataset/solarize_op_test.cc +++ b/tests/ut/cpp/dataset/solarize_op_test.cc @@ -163,5 +163,5 @@ TEST_F(MindDataTestSolarizeOp, TestOp6) { EXPECT_TRUE(s.IsError()); EXPECT_NE(s.ToString().find("Solarize: threshold_min must be smaller or equal to threshold_max."), std::string::npos); - ASSERT_TRUE(s.get_code() == StatusCode::kUnexpectedError); + ASSERT_TRUE(s.StatusCode() == StatusCode::kMDUnexpectedError); } \ No newline at end of file diff --git a/tests/ut/cpp/dataset/status_test.cc b/tests/ut/cpp/dataset/status_test.cc index 195da1c1199..50072ce2387 100644 --- a/tests/ut/cpp/dataset/status_test.cc +++ b/tests/ut/cpp/dataset/status_test.cc @@ -27,7 +27,7 @@ class MindDataTestStatus : public UT::Common { // This function returns Status Status f1() { - Status rc(StatusCode::kUnexpectedError, "Testing macro"); + Status rc(StatusCode::kMDUnexpectedError, "Testing macro"); RETURN_IF_NOT_OK(rc); // We shouldn't get here return Status::OK(); @@ -41,11 +41,11 @@ TEST_F(MindDataTestStatus, Test1) { // Test default constructor which should be OK Status rc; ASSERT_TRUE(rc.IsOk()); - Status err1(StatusCode::kOutOfMemory, __LINE__, __FILE__); + Status err1(StatusCode::kMDOutOfMemory, __LINE__, __FILE__); MS_LOG(DEBUG) << err1; - ASSERT_TRUE(err1.IsOutofMemory()); + ASSERT_TRUE(err1 == StatusCode::kMDOutOfMemory); ASSERT_TRUE(err1.IsError()); - Status err2(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Oops"); + Status err2(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Oops"); MS_LOG(DEBUG) << err2; } diff --git a/tests/ut/cpp/dataset/task_manager_test.cc b/tests/ut/cpp/dataset/task_manager_test.cc index 7b8101fa566..fc61468f5a1 100644 --- a/tests/ut/cpp/dataset/task_manager_test.cc +++ b/tests/ut/cpp/dataset/task_manager_test.cc @@ -35,9 +35,9 @@ TEST_F(MindDataTestTaskManager, Test1) { TaskManager::FindMe()->Post(); throw std::bad_alloc(); }); - ASSERT_TRUE(vg_rc.IsOk() || vg_rc.IsOutofMemory()); + ASSERT_TRUE(vg_rc.IsOk() || vg_rc == StatusCode::kMDOutOfMemory); ASSERT_TRUE(vg.join_all().IsOk()); - ASSERT_TRUE(vg.GetTaskErrorIfAny().IsOutofMemory()); + ASSERT_TRUE(vg.GetTaskErrorIfAny() == StatusCode::kMDOutOfMemory); // Test the error is passed back to the master thread if vg_rc above is OK. // If vg_rc is kOutOfMemory, the group error is already passed back. // Some compiler may choose to run the next line in parallel with the above 3 lines @@ -46,7 +46,7 @@ TEST_F(MindDataTestTaskManager, Test1) { // depends on previous lines. if (vg.GetTaskErrorIfAny().IsError() && vg_rc.IsOk()) { Status rc = TaskManager::GetMasterThreadRc(); - ASSERT_TRUE(rc.IsOutofMemory()); + ASSERT_TRUE(rc == StatusCode::kMDOutOfMemory); } } diff --git a/tests/ut/cpp/dataset/tensor_test.cc b/tests/ut/cpp/dataset/tensor_test.cc index f789533b809..ed10f8376a7 100644 --- a/tests/ut/cpp/dataset/tensor_test.cc +++ b/tests/ut/cpp/dataset/tensor_test.cc @@ -156,9 +156,9 @@ TEST_F(MindDataTestTensorDE, InsertTensor) { Tensor::CreateFromVector(z, TensorShape({2, 3}), &t6); ASSERT_EQ(*t == *t6, true); - ASSERT_EQ(t->InsertTensor({2}, t5).get_code(), StatusCode::kUnexpectedError); - ASSERT_EQ(t->InsertTensor({1}, t5).get_code(), StatusCode::kUnexpectedError); - ASSERT_EQ(t->InsertTensor({1, 2}, t6).get_code(), StatusCode::kUnexpectedError); + ASSERT_EQ(t->InsertTensor({2}, t5).StatusCode(), StatusCode::kMDUnexpectedError); + ASSERT_EQ(t->InsertTensor({1}, t5).StatusCode(), StatusCode::kMDUnexpectedError); + ASSERT_EQ(t->InsertTensor({1, 2}, t6).StatusCode(), StatusCode::kMDUnexpectedError); t6->Fill(-1); ASSERT_TRUE(t->InsertTensor({}, t6).OK()); ASSERT_EQ(*t == *t6, true);