diff --git a/mindspore/lite/include/context.h b/mindspore/lite/include/context.h index b3f8abe75ef..2623a341890 100644 --- a/mindspore/lite/include/context.h +++ b/mindspore/lite/include/context.h @@ -17,8 +17,6 @@ #ifndef MINDSPORE_LITE_INCLUDE_CONTEXT_H_ #define MINDSPORE_LITE_INCLUDE_CONTEXT_H_ -#include -#include #include "include/ms_tensor.h" #include "include/lite_utils.h" #include "include/lite_types.h" @@ -55,10 +53,14 @@ struct DeviceContext { /// \brief Context defined for holding environment variables during runtime. struct Context { - std::string vendor_name_; + String vendor_name_; int thread_num_ = 2; /**< thread number config for thread pool */ AllocatorPtr allocator = nullptr; +#ifndef NOT_USE_STL DeviceContextVector device_list_ = {{DT_CPU, {false, MID_CPU}}}; +#else + DeviceContextVector device_list_; +#endif // NOT_USE_STL }; } // namespace mindspore::lite #endif // MINDSPORE_LITE_INCLUDE_CONTEXT_H_ diff --git a/mindspore/lite/include/errorcode.h b/mindspore/lite/include/errorcode.h index b7671259206..9ff4e093795 100644 --- a/mindspore/lite/include/errorcode.h +++ b/mindspore/lite/include/errorcode.h @@ -17,8 +17,7 @@ #ifndef MINDSPORE_LITE_INCLUDE_ERRORCODE_H_ #define MINDSPORE_LITE_INCLUDE_ERRORCODE_H_ -#include -#include +#include "include/lite_utils.h" namespace mindspore { namespace lite { @@ -67,7 +66,7 @@ constexpr int RET_INPUT_PARAM_INVALID = -600; /**< Invalid input param by user. /// \param[in] error_code define return status of procedure. /// /// \return String of errorcode info. -std::string GetErrorInfo(STATUS error_code); +String GetErrorInfo(STATUS error_code); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/include/lite_session.h b/mindspore/lite/include/lite_session.h index e2eed86a7b4..c3ad559339f 100644 --- a/mindspore/lite/include/lite_session.h +++ b/mindspore/lite/include/lite_session.h @@ -17,10 +17,9 @@ #ifndef MINDSPORE_LITE_INCLUDE_LITE_SESSION_H #define MINDSPORE_LITE_INCLUDE_LITE_SESSION_H -#include -#include -#include +#ifndef NOT_USE_STL #include +#endif // NOT_USE_STL #include "include/ms_tensor.h" #include "include/model.h" #include "include/context.h" @@ -66,14 +65,14 @@ class MS_API LiteSession { /// \brief Get input MindSpore Lite MSTensors of model. /// /// \return The vector of MindSpore Lite MSTensor. - virtual std::vector GetInputs() const = 0; + virtual Vector GetInputs() const = 0; /// \brief Get input MindSpore Lite MSTensors of model by tensor name. /// /// \param[in] node_name Define tensor name. /// /// \return The vector of MindSpore Lite MSTensor. - virtual mindspore::tensor::MSTensor *GetInputsByTensorName(const std::string &tensor_name) const = 0; + virtual mindspore::tensor::MSTensor *GetInputsByTensorName(const String &tensor_name) const = 0; /// \brief Run session with callback. /// @@ -92,24 +91,26 @@ class MS_API LiteSession { /// \note Deprecated, replace with GetOutputByTensorName /// /// \return The vector of MindSpore Lite MSTensor. - virtual std::vector GetOutputsByNodeName(const std::string &node_name) const = 0; + virtual Vector GetOutputsByNodeName(const String &node_name) const = 0; +#ifndef NOT_USE_STL /// \brief Get output MindSpore Lite MSTensors of model mapped by tensor name. /// /// \return The map of output tensor name and MindSpore Lite MSTensor. - virtual std::unordered_map GetOutputs() const = 0; + virtual std::unordered_map GetOutputs() const = 0; +#endif /// \brief Get name of output tensors of model compiled by this session. /// /// \return The vector of string as output tensor names in order. - virtual std::vector GetOutputTensorNames() const = 0; + virtual Vector GetOutputTensorNames() const = 0; /// \brief Get output MindSpore Lite MSTensors of model by tensor name. /// /// \param[in] tensor_name Define tensor name. /// /// \return Pointer of MindSpore Lite MSTensor. - virtual mindspore::tensor::MSTensor *GetOutputByTensorName(const std::string &tensor_name) const = 0; + virtual mindspore::tensor::MSTensor *GetOutputByTensorName(const String &tensor_name) const = 0; /// \brief Resize inputs shape. /// @@ -117,7 +118,7 @@ class MS_API LiteSession { /// \param[in] dims Define the inputs new shape. /// /// \return STATUS as an error code of resize inputs, STATUS is defined in errorcode.h. - virtual int Resize(const std::vector &inputs, const std::vector> &dims) = 0; + virtual int Resize(const Vector &inputs, const Vector> &dims) = 0; }; } // namespace session } // namespace mindspore diff --git a/mindspore/lite/include/lite_utils.h b/mindspore/lite/include/lite_utils.h index 1b22677a9a8..d7ebedbc12e 100644 --- a/mindspore/lite/include/lite_utils.h +++ b/mindspore/lite/include/lite_utils.h @@ -16,31 +16,383 @@ #ifndef MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_ #define MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_ + +#ifndef NOT_USE_STL #include #include #include -#include "include/ms_tensor.h" +#include +#else +#include +#include +#include +#include +#endif // NOT_USE_STL + +#ifndef MS_API +#ifdef _WIN32 +#define MS_API __declspec(dllexport) +#else +#define MS_API __attribute__((visibility("default"))) +#endif +#endif namespace mindspore { -class Allocator; - namespace schema { struct Tensor; } // namespace schema +namespace tensor { +class MSTensor; +} // namespace tensor + namespace lite { +struct DeviceContext; +} // namespace lite + +#ifdef NOT_USE_STL +class String { + public: + String(); + String(size_t count, char ch); + String(const char *s, size_t count); + explicit String(const char *s); + String(const String &other); + String(const String &other, size_t pos, size_t count = npos); + + ~String(); + + String &operator=(const String &str); + String &operator=(const char *str); + + char &at(size_t pos); + const char &at(size_t pos) const; + inline char &operator[](size_t pos); + inline const char &operator[](size_t pos) const; + char *data() noexcept; + const char *data() const noexcept; + const char *c_str() const noexcept; + + // capacity + bool empty() const noexcept; + size_t size() const noexcept; + size_t length() const noexcept; + + // operations + void clear() noexcept; + String &append(size_t count, const char ch); + String &append(const String &str); + String &append(const char *s); + String &operator+(const String &str); + String &operator+=(const String &str); + String &operator+=(const char *str); + String &operator+=(const char ch); + int compare(const String &str) const; + int compare(const char *str) const; + + String substr(size_t pos = 0, size_t count = npos) const; + + static const size_t npos = -1; + + private: + size_t size_; + char *buffer_; +}; + +String operator+(const String &str1, const char *str2); +String operator+(const char *str1, const String &str2); + +String to_string(int32_t value); +String to_string(float value); + +#define DEFAULT_CAPACITY 4 +#define MS_C_EXCEPTION(...) exit(1) +#define MIN(x, y) ((x < y) ? (x) : (y)) +template +class Vector { + public: + Vector() { + size_ = 0; + capacity_ = DEFAULT_CAPACITY; + elem_size_ = sizeof(T); + data_ = nullptr; + } + + explicit Vector(size_t size) { + size_ = size; + elem_size_ = sizeof(T); + capacity_ = (size == 0 ? DEFAULT_CAPACITY : size); + data_ = reinterpret_cast(malloc(capacity_ * elem_size_)); + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memset(data_, 0, capacity_ * elem_size_); + } + + Vector(size_t size, const T &value) { + size_ = size; + elem_size_ = sizeof(T); + capacity_ = (size == 0 ? DEFAULT_CAPACITY : size); + data_ = reinterpret_cast(malloc(capacity_ * elem_size_)); + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + for (int i = 0; i < size; ++i) { + data_[i] = value; + } + } + + Vector(const Vector &vec) { + size_ = vec.size_; + elem_size_ = sizeof(T); + capacity_ = vec.capacity_; + data_ = reinterpret_cast(malloc(capacity_ * elem_size_)); + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(data_, vec.data_, size_ * elem_size_); + } + + ~Vector() { + if (data_ != nullptr) { + free(data_); + } + } + + void clear() { + size_ = 0; + if (data_ != nullptr) { + free(data_); + data_ = nullptr; + } + } + + void push_back(const T &elem) { + if (data_ == nullptr) { + data_ = reinterpret_cast(malloc(capacity_ * elem_size_)); + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + } else if (size_ == capacity_) { + resize(size_ + 1); + --size_; + } + data_[size_] = elem; + ++size_; + } + + void push_back(T &&elem) { + if (data_ == nullptr) { + data_ = reinterpret_cast(malloc(capacity_ * elem_size_)); + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + } else if (size_ == capacity_) { + resize(size_ + 1); + --size_; + } + data_[size_] = elem; + ++size_; + } + + void pop_back() { + if (size_ > 0) { + --size_; + } else { + MS_C_EXCEPTION("Index is out of range!"); + } + } + + void insert(const T &elem, size_t index) { + if (index <= size_) { + ++size_; + if (size_ > capacity_) { + resize(size_); + } + if (index == size_ - 1) { + push_back(elem); + } else { + memmove(data_ + index + 1, data_ + index, (size_ - index - 1) * elem_size_); + data_[index] = elem; + } + } else { + MS_C_EXCEPTION("Input index is out of range!"); + } + } + + T *begin() { return data_; } + + const T *begin() const { return data_; } + + T *end() { return data_ + size_; } + + const T *end() const { return data_ + size_; } + + T &front() { + if (size_ > 0) { + return data_[0]; + } + MS_C_EXCEPTION("Index is out of range!"); + } + + const T &front() const { + if (size_ > 0) { + return data_[0]; + } + MS_C_EXCEPTION("Index is out of range!"); + } + + T &back() { + if (size_ > 0) { + return data_[size_ - 1]; + } + MS_C_EXCEPTION("Index is out of range!"); + } + + const T &back() const { + if (size_ > 0) { + return data_[size_ - 1]; + } + MS_C_EXCEPTION("Index is out of range!"); + } + + T &at(size_t index) { + if (index < size_) { + return data_[index]; + } + MS_C_EXCEPTION("Input index is out of range!"); + } + + const T &at(size_t index) const { + if (index < size_) { + return data_[index]; + } + MS_C_EXCEPTION("Input index is out of range!"); + } + + T &operator[](size_t index) { + if (index < size_) { + return data_[index]; + } + MS_C_EXCEPTION("Input index is out of range!"); + } + + const T &operator[](size_t index) const { + if (index < size_) { + return data_[index]; + } + MS_C_EXCEPTION("Input index is out of range!"); + } + + T *data() { return data_; } + + const T *data() const { return data_; } + + size_t size() const { return size_; } + + size_t capacity() const { return capacity_; } + + bool empty() const { return size_ == 0; } + + void erase(size_t index) { + if (index == size_ - 1) { + --size_; + } else if (index < size_) { + memmove(data_ + index, data_ + index + 1, (size_ - index - 1) * elem_size_); + --size_; + } else { + MS_C_EXCEPTION("Input index is out of range!"); + } + } + + void resize(size_t size) { + while (size > capacity_) { + capacity_ *= 2; + } + T *tmp = data_; + data_ = reinterpret_cast(malloc(capacity_ * elem_size_)); + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(data_, tmp, MIN(size, size_) * elem_size_); + size_ = size; + free(tmp); + } + + void reserve(size_t capacity) { + if (capacity > capacity_) { + capacity_ = capacity; + } + } + + Vector &operator=(const Vector &vec) { + if (this == &vec) { + return *this; + } + size_ = vec.size_; + elem_size_ = sizeof(T); + capacity_ = vec.capacity_; + data_ = reinterpret_cast(malloc(capacity_ * elem_size_)); + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(data_, vec.data_, size_ * elem_size_); + return *this; + } + + private: + size_t size_; + size_t elem_size_; + size_t capacity_; + T *data_; +}; +using TensorPtrVector = Vector; +using Uint32Vector = Vector; +using AllocatorPtr = void *; +using DeviceContextVector = Vector; +using KernelCallBack = void (*)(void *, void *); +#else /// \brief Allocator defined a memory pool for malloc memory and free memory dynamically. /// /// \note List public class and interface for reference. - -/// \brief DeviceContext defined a device context. -struct DeviceContext; +class Allocator; +using AllocatorPtr = std::shared_ptr; using TensorPtrVector = std::vector; -using DeviceContextVector = std::vector; using Uint32Vector = std::vector; +template +using Vector = std::vector; + +template +inline std::string to_string(T t) { + return std::to_string(t); +} + +namespace tensor { using String = std::string; -using AllocatorPtr = std::shared_ptr; +} // namespace tensor + +namespace session { +using String = std::string; +} // namespace session + +/// \brief CallBackParam defined input arguments for callBack function. +struct CallBackParam { + session::String node_name; /**< node name argument */ + session::String node_type; /**< node type argument */ +}; + +struct GPUCallBackParam : CallBackParam { + double execute_time{-1.f}; +}; + +/// \brief KernelCallBack defined the function pointer for callBack. +using KernelCallBack = std::function inputs, Vector outputs, + const CallBackParam &opInfo)>; + +namespace lite { +using String = std::string; +using DeviceContextVector = std::vector; /// \brief Set data of MSTensor from string vector. /// @@ -48,12 +400,13 @@ using AllocatorPtr = std::shared_ptr; /// \param[out] MSTensor. /// /// \return STATUS as an error code of this interface, STATUS is defined in errorcode.h. -int MS_API StringsToMSTensor(const std::vector &inputs, tensor::MSTensor *tensor); +int MS_API StringsToMSTensor(const Vector &inputs, tensor::MSTensor *tensor); /// \brief Get string vector from MSTensor. /// \param[in] MSTensor. /// \return string vector. -std::vector MS_API MSTensorToStrings(const tensor::MSTensor *tensor); +Vector MS_API MSTensorToStrings(const tensor::MSTensor *tensor); } // namespace lite +#endif // NOT_USE_STL } // namespace mindspore #endif // MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_ diff --git a/mindspore/lite/include/model.h b/mindspore/lite/include/model.h index 6fa57be574e..22d2ffa30b9 100644 --- a/mindspore/lite/include/model.h +++ b/mindspore/lite/include/model.h @@ -15,7 +15,7 @@ */ #ifndef MINDSPORE_LITE_INCLUDE_MODEL_H_ #define MINDSPORE_LITE_INCLUDE_MODEL_H_ -#include + #include "include/lite_utils.h" namespace mindspore::lite { @@ -28,7 +28,7 @@ struct MS_API Model { Uint32Vector output_indices_; int quant_type_; }; - using NodePtrVector = std::vector; + using NodePtrVector = Vector; struct SubGraph { String name_; Uint32Vector input_indices_; @@ -36,7 +36,7 @@ struct MS_API Model { Uint32Vector node_indices_; Uint32Vector tensor_indices_; }; - using SubGraphPtrVector = std::vector; + using SubGraphPtrVector = Vector; String name_; String version_; TensorPtrVector all_tensors_; diff --git a/mindspore/lite/include/ms_tensor.h b/mindspore/lite/include/ms_tensor.h index 6d038232553..1a6f3112b8a 100644 --- a/mindspore/lite/include/ms_tensor.h +++ b/mindspore/lite/include/ms_tensor.h @@ -17,21 +17,9 @@ #ifndef MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ #define MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ -#include -#include -#include -#include -#include +#include "include/lite_utils.h" #include "ir/dtype/type_id.h" -#ifndef MS_API -#ifdef _WIN32 -#define MS_API __declspec(dllexport) -#else -#define MS_API __attribute__((visibility("default"))) -#endif -#endif - namespace mindspore { namespace tensor { /// \brief MSTensor defined tensor in MindSpore Lite. @@ -48,7 +36,7 @@ class MS_API MSTensor { /// \brief Create a MSTensor. /// /// \return Pointer to an instance of MindSpore Lite MSTensor. - static MSTensor *CreateTensor(const std::string &name, TypeId type, const std::vector &shape, const void *data, + static MSTensor *CreateTensor(const String &name, TypeId type, const Vector &shape, const void *data, size_t data_len); /// \brief Get data type of the MindSpore Lite MSTensor. @@ -62,10 +50,10 @@ class MS_API MSTensor { /// \brief Get shape of the MindSpore Lite MSTensor. /// /// \return A vector of int as the shape of the MindSpore Lite MSTensor. - virtual std::vector shape() const = 0; + virtual Vector shape() const = 0; /// \brief Set the shape of MSTensor. - virtual void set_shape(const std::vector &name) = 0; + virtual void set_shape(const Vector &name) = 0; /// \brief Get number of element in MSTensor. /// @@ -80,10 +68,10 @@ class MS_API MSTensor { /// \brief Get the name of MSTensor. /// /// \return the name of MSTensor. - virtual std::string tensor_name() const = 0; + virtual String tensor_name() const = 0; /// \brief Set the name of MSTensor. - virtual void set_tensor_name(const std::string name) = 0; + virtual void set_tensor_name(const String name) = 0; /// \brief Get the pointer of data in MSTensor. /// @@ -105,18 +93,5 @@ class MS_API MSTensor { virtual void set_data(void *data) = 0; }; } // namespace tensor -/// \brief CallBackParam defined input arguments for callBack function. -struct CallBackParam { - std::string node_name; /**< node name argument */ - std::string node_type; /**< node type argument */ -}; - -struct GPUCallBackParam : CallBackParam { - double execute_time{-1.f}; -}; - -/// \brief KernelCallBack defined the function pointer for callBack. -using KernelCallBack = std::function inputs, - std::vector outputs, const CallBackParam &opInfo)>; } // namespace mindspore #endif // MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ diff --git a/mindspore/lite/include/version.h b/mindspore/lite/include/version.h index 476fc20625d..8a9ca730e71 100644 --- a/mindspore/lite/include/version.h +++ b/mindspore/lite/include/version.h @@ -17,7 +17,7 @@ #ifndef MINDSPORE_LITE_INCLUDE_VERSION_H_ #define MINDSPORE_LITE_INCLUDE_VERSION_H_ -#include +#include "include/lite_utils.h" namespace mindspore { namespace lite { @@ -28,11 +28,11 @@ const int ms_version_revision = 0; /// \brief Global method to get a version string. /// /// \return The version string of MindSpore Lite. -inline std::string Version() { - return "MindSpore Lite " + std::to_string(ms_version_major) + "." + std::to_string(ms_version_minor) + "." + - std::to_string(ms_version_revision); +inline String Version() { + return "MindSpore Lite " + to_string(ms_version_major) + "." + to_string(ms_version_minor) + "." + + to_string(ms_version_revision); } } // namespace lite } // namespace mindspore -#endif // LITE_VERSION_H +#endif // MINDSPORE_LITE_INCLUDE_VERSION_H_ diff --git a/mindspore/lite/micro/cmake/package_wrapper.cmake b/mindspore/lite/micro/cmake/package_wrapper.cmake index 54bc68e226e..094af3886e3 100644 --- a/mindspore/lite/micro/cmake/package_wrapper.cmake +++ b/mindspore/lite/micro/cmake/package_wrapper.cmake @@ -22,6 +22,7 @@ set(WRAPPER_SRC ${WRAPPER_DIR}/int8/resize_int8_wrapper.c ${WRAPPER_DIR}/int8/slice_int8_wrapper.c ${WRAPPER_DIR}/int8/batchnorm_int8_wrapper.c + ${LITE_DIR}/src/common/string.cc ) list(APPEND FILE_SET ${WRAPPER_SRC} ${RUNTIME_SRC}) diff --git a/mindspore/lite/micro/coder/generator/component/cmake_component.cc b/mindspore/lite/micro/coder/generator/component/cmake_component.cc index c43e87900d7..f263a7ff96b 100644 --- a/mindspore/lite/micro/coder/generator/component/cmake_component.cc +++ b/mindspore/lite/micro/coder/generator/component/cmake_component.cc @@ -34,7 +34,8 @@ void CodeCMakeNetLibrary(std::ofstream &ofs, const std::unique_ptr ofs << " weight.c.o\n" << " net.c.o\n" << " session.cc.o\n" - << " tensor.cc.o\n"; + << " tensor.cc.o\n" + << " string.cc.o\n"; if (config->debug_mode()) { ofs << " debug_utils.c.o\n"; } diff --git a/mindspore/lite/micro/coder/generator/component/common_component.cc b/mindspore/lite/micro/coder/generator/component/common_component.cc index fa46f32d85d..85bae959bbc 100644 --- a/mindspore/lite/micro/coder/generator/component/common_component.cc +++ b/mindspore/lite/micro/coder/generator/component/common_component.cc @@ -26,6 +26,14 @@ namespace mindspore::lite::micro { void CodeSessionCompileGraph(std::ofstream &ofs, const std::unique_ptr &ctx) { + auto array_tostring = [&ofs](const std::vector &array, const std::string &name) { + size_t num = array.size(); + ofs << " Vector " << name << ";\n"; + ofs << " " << name << ".resize(" << num << ");\n"; + for (size_t i = 0; i < num; ++i) { + ofs << " " << name << "[" << i << "] = " << array[i] << ";\n"; + } + }; std::vector inputs = ctx->graph_inputs(); std::vector outputs = ctx->graph_outputs(); size_t inputs_size = inputs.size(); @@ -36,20 +44,21 @@ void CodeSessionCompileGraph(std::ofstream &ofs, const std::unique_ptrtensor_name() << "\", " - << EnumNameDataType(input->data_type()) << ", " << ArrayToString(input->shape()) << ");\n"; + std::string shape_i = "in_shape_" + std::to_string(i); + array_tostring(input->shape(), shape_i); + ofs << " inputs_[" << i << "] = new (std::nothrow) MTensor(String(\"" << input->tensor_name() << "\"), " + << EnumNameDataType(input->data_type()) << ", " << shape_i << ");\n"; ofs << " MS_ERROR_IF_NULL(inputs_[" << i << "]);\n"; } ofs << " outputs_.resize(" << outputs_size << ");\n"; for (size_t i = 0; i < outputs_size; ++i) { Tensor *output = outputs[i]; - ofs << " outputs_[" << i << "] = new (std::nothrow) MTensor(\"" << output->tensor_name() << "\", " - << EnumNameDataType(output->data_type()) << ", " << ArrayToString(output->shape()) << ");\n"; + std::string shape_i = "out_shape_" + std::to_string(i); + array_tostring(output->shape(), shape_i); + ofs << " outputs_[" << i << "] = new (std::nothrow) MTensor(String(\"" << output->tensor_name() << "\"), " + << EnumNameDataType(output->data_type()) << ", " << shape_i << ");\n"; ofs << " MS_ERROR_IF_NULL(outputs_[" << i << "]);\n"; } - ofs << " for (const auto &output: outputs_) {\n" - " output_tensor_map_[output->tensor_name()] = output;\n" - " }\n"; ofs << " return RET_OK;\n"; ofs << "}\n\n"; } diff --git a/mindspore/lite/micro/coder/generator/component/const_blocks/benchmark.cc b/mindspore/lite/micro/coder/generator/component/const_blocks/benchmark.cc index 929ab824033..0a13a05956f 100644 --- a/mindspore/lite/micro/coder/generator/component/const_blocks/benchmark.cc +++ b/mindspore/lite/micro/coder/generator/component/const_blocks/benchmark.cc @@ -72,10 +72,6 @@ void PrintData(void *data, size_t data_number) { } void TensorToString(tensor::MSTensor *tensor) { - uint8_t i = 0; - std::cout << "uint8: " << i << std::endl; - - std::cout << "Name: " << tensor->tensor_name(); std::cout << ", DataType: " << tensor->data_type(); std::cout << ", Size: " << tensor->Size(); std::cout << ", Shape:"; @@ -129,7 +125,7 @@ int main(int argc, const char **argv) { } // set model inputs tensor data - std::vector inputs = session->GetInputs(); + Vector inputs = session->GetInputs(); size_t inputs_num = inputs.size(); void *inputs_binbuf[inputs_num]; int inputs_size[inputs_num]; @@ -150,13 +146,6 @@ int main(int argc, const char **argv) { return lite::RET_ERROR; } - auto outputs = session->GetOutputs(); - std::cout << "output size: " << outputs.size() << std::endl; - for (const auto &item : outputs) { - auto output = item.second; - TensorToString(output); - } - std::cout << "run benchmark success" << std::endl; delete session; for (size_t i = 0; i < inputs_num; ++i) { diff --git a/mindspore/lite/micro/coder/generator/component/const_blocks/cmake_lists.cc b/mindspore/lite/micro/coder/generator/component/const_blocks/cmake_lists.cc index b306c153cb0..21080094c28 100644 --- a/mindspore/lite/micro/coder/generator/component/const_blocks/cmake_lists.cc +++ b/mindspore/lite/micro/coder/generator/component/const_blocks/cmake_lists.cc @@ -34,6 +34,8 @@ set(HEADER_PATH ${PKG_PATH}/inference) option(MICRO_BUILD_ARM64 "build android arm64" OFF) option(MICRO_BUILD_ARM32A "build android arm32" OFF) +add_compile_definitions(NOT_USE_STL) + if(MICRO_BUILD_ARM64 OR MICRO_BUILD_ARM32A) add_compile_definitions(ENABLE_NEON) add_compile_definitions(ENABLE_ARM) @@ -95,6 +97,8 @@ set(HEADER_PATH ${PKG_PATH}/inference) message("operator lib path: ${OP_LIB}") message("operator header path: ${OP_HEADER_PATH}") +add_compile_definitions(NOT_USE_STL) + include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include) include_directories(${OP_HEADER_PATH}) include_directories(${HEADER_PATH}) diff --git a/mindspore/lite/micro/coder/generator/component/const_blocks/msession.cc b/mindspore/lite/micro/coder/generator/component/const_blocks/msession.cc index eb9acef8a30..f158b179ee7 100644 --- a/mindspore/lite/micro/coder/generator/component/const_blocks/msession.cc +++ b/mindspore/lite/micro/coder/generator/component/const_blocks/msession.cc @@ -63,31 +63,25 @@ class LiteSession : public session::LiteSession { int CompileGraph(lite::Model *model) override; - std::vector GetInputs() const override; + Vector GetInputs() const override; - mindspore::tensor::MSTensor *GetInputsByTensorName(const std::string &tensor_name) const override { return nullptr; } + mindspore::tensor::MSTensor *GetInputsByTensorName(const String &tensor_name) const override { return nullptr; } int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) override; - std::vector GetOutputsByNodeName(const std::string &node_name) const override; + Vector GetOutputsByNodeName(const String &node_name) const override; - std::unordered_map GetOutputs() const override; + Vector GetOutputTensorNames() const override; - std::vector GetOutputTensorNames() const override; + mindspore::tensor::MSTensor *GetOutputByTensorName(const String &tensor_name) const override; - mindspore::tensor::MSTensor *GetOutputByTensorName(const std::string &tensor_name) const override; - - int Resize(const std::vector &inputs, const std::vector> &dims) override; + int Resize(const Vector &inputs, const Vector> &dims) override { return RET_ERROR; } int InitRuntimeBuffer(); private: - int SetInputsData(const std::vector &inputs) const; - std::vector inputs_; - std::vector outputs_; - std::unordered_map output_tensor_map_; - std::unordered_map> output_node_map_; - + Vector inputs_; + Vector outputs_; void *runtime_buffer_; }; @@ -95,7 +89,6 @@ class LiteSession : public session::LiteSession { } // namespace mindspore #endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_ - )RAW"; const char *session_source = R"RAW( @@ -130,8 +123,7 @@ LiteSession::~LiteSession() { delete input; input = nullptr; } - for (auto &item : output_tensor_map_) { - auto output = item.second; + for (auto &output : outputs_) { if (output == nullptr) { continue; } @@ -153,46 +145,28 @@ int LiteSession::InitRuntimeBuffer() { return RET_OK; } -std::vector LiteSession::GetInputs() const { - std::vector inputs; - inputs.insert(inputs.begin(), inputs_.begin(), inputs_.end()); +Vector LiteSession::GetInputs() const { + Vector inputs; + for (const auto &input : inputs_) { + inputs.push_back(input); + } return inputs; } -std::vector LiteSession::GetOutputsByNodeName(const std::string &node_name) const { - auto iter = output_node_map_.find(node_name); - if (iter == output_node_map_.end()) { - std::vector empty; - return empty; - } - return iter->second; +Vector LiteSession::GetOutputsByNodeName(const String &node_name) const { + Vector outputs; + return outputs; } -std::unordered_map LiteSession::GetOutputs() const { - return output_tensor_map_; -} - -std::vector LiteSession::GetOutputTensorNames() const { - std::vector output_names; - for (const auto &item : output_node_map_) { - for (const auto &output : item.second) { - output_names.emplace_back(output->tensor_name()); - } +Vector LiteSession::GetOutputTensorNames() const { + Vector output_names; + for (const auto &output : outputs_) { + output_names.push_back(output->tensor_name()); } return output_names; } -mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const std::string &tensor_name) const { - auto item = output_tensor_map_.find(tensor_name); - if (item == output_tensor_map_.end()) { - return nullptr; - } - return item->second; -} - -int LiteSession::Resize(const std::vector &inputs, const std::vector> &dims) { - return RET_OK; -} +mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const String &tensor_name) const { return nullptr; } } // namespace lite @@ -219,7 +193,6 @@ session::LiteSession *session::LiteSession::CreateSession(const char *net_buf, s return session; } } // namespace mindspore - )RAW"; } // namespace mindspore::lite::micro diff --git a/mindspore/lite/micro/coder/generator/component/const_blocks/mtensor.cc b/mindspore/lite/micro/coder/generator/component/const_blocks/mtensor.cc index d59b860b234..2e8e1f15b0f 100644 --- a/mindspore/lite/micro/coder/generator/component/const_blocks/mtensor.cc +++ b/mindspore/lite/micro/coder/generator/component/const_blocks/mtensor.cc @@ -19,7 +19,6 @@ namespace mindspore::lite::micro { const char *tensor_header = R"RAW( - /** * Copyright 2021 Huawei Technologies Co., Ltd * @@ -40,8 +39,6 @@ const char *tensor_header = R"RAW( #define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_ #include "include/ms_tensor.h" -#include -#include namespace mindspore { namespace lite { @@ -51,7 +48,7 @@ struct QuantArg { float var_corr{1}; float mean_corr{0}; bool inited; - std::vector clusters{}; + Vector clusters{}; int bitNum; int roundType; int multiplier; @@ -61,38 +58,35 @@ struct QuantArg { class MTensor : public mindspore::tensor::MSTensor { public: MTensor() = default; - MTensor(std::string name, enum TypeId type, std::vector shape) - : tensor_name_(std::move(name)), data_type_(type), shape_(std::move(shape)) {} + MTensor(String name, TypeId type, Vector shape) : tensor_name_(name), data_type_(type), shape_(shape) {} ~MTensor() override; TypeId data_type() const override { return data_type_; } - std::vector shape() const override { return shape_; } - int DimensionSize(size_t index) const override; + Vector shape() const override { return shape_; } + void set_shape(const Vector &shape) override { shape_ = shape; } int ElementsNum() const override; size_t Size() const override; + String tensor_name() const override { return tensor_name_; } + void set_tensor_name(const String name) override { tensor_name_ = name; } void *MutableData() override; - std::string tensor_name() const override { return tensor_name_; } - void set_tensor_name(const std::string name) override { tensor_name_ = name; } + void *data() override { return data_; } void set_data(void *data) override { data_ = data; } private: - std::string tensor_name_; + String tensor_name_; TypeId data_type_; - std::vector shape_; + Vector shape_; void *data_ = nullptr; - std::vector quant_params_; + Vector quant_params_; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_ - - )RAW"; const char *tensor_source = R"RAW( - /** * Copyright 2021 Huawei Technologies Co., Ltd * @@ -154,14 +148,6 @@ MTensor::~MTensor() { } } -int MTensor::DimensionSize(const size_t index) const { - int dim_size = -1; - if (index < shape_.size()) { - dim_size = shape_[index]; - } - return dim_size; -} - int MTensor::ElementsNum() const { int elements = 1; for (int i : shape_) { @@ -183,7 +169,6 @@ void *MTensor::MutableData() { } } // namespace lite } // namespace mindspore - )RAW"; } // namespace mindspore::lite::micro diff --git a/mindspore/lite/micro/coder/generator/generator.cc b/mindspore/lite/micro/coder/generator/generator.cc index 6fc56205c13..1ad60bc5750 100644 --- a/mindspore/lite/micro/coder/generator/generator.cc +++ b/mindspore/lite/micro/coder/generator/generator.cc @@ -111,6 +111,7 @@ int Generator::CodeSessionImplement() { ofs << g_hwLicense; ofs << "#include \"session.h\"\n"; ofs << "#include \"net.h\"\n\n"; + ofs << "#include \n\n"; CodeSessionCompileGraph(ofs, ctx_); ofs << session_source; return RET_OK; diff --git a/mindspore/lite/micro/coder/operator_library/CMakeLists.txt b/mindspore/lite/micro/coder/operator_library/CMakeLists.txt index ff0b529c16e..984890590a8 100644 --- a/mindspore/lite/micro/coder/operator_library/CMakeLists.txt +++ b/mindspore/lite/micro/coder/operator_library/CMakeLists.txt @@ -60,4 +60,5 @@ endif() # generate static library add_library(ops STATIC ${OP_FILES}) +target_compile_definitions(ops PRIVATE NOT_USE_STL) install(TARGETS ops ARCHIVE DESTINATION ${LIB_PATH}) diff --git a/mindspore/lite/src/common/string.cc b/mindspore/lite/src/common/string.cc new file mode 100644 index 00000000000..881a3a4b170 --- /dev/null +++ b/mindspore/lite/src/common/string.cc @@ -0,0 +1,300 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifdef NOT_USE_STL +#include +#include +#include +#include +#include +#include "include/lite_utils.h" + +namespace mindspore { +String::String() { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; +} + +String::String(size_t count, char ch) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * (count + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memset(buffer_, ch, count); + buffer_[count] = '\0'; + size_ = count; +} +String::String(const char *s, size_t count) { + if (s == nullptr) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + return; + } + size_t size_s = strlen(s); + if (size_s <= count) { + size_ = size_s; + } else { + size_ = count; + } + buffer_ = reinterpret_cast(malloc(sizeof(char) * (size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + strncpy(buffer_, s, size_); + buffer_[size_] = '\0'; +} + +String::String(const char *s) { + if (s == nullptr) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + return; + } + size_ = strlen(s); + buffer_ = reinterpret_cast(malloc(sizeof(char) * (size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(buffer_, s, size_ + 1); +} + +String::String(const String &other) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * (other.size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + size_ = other.size_; + memcpy(buffer_, other.buffer_, size_ + 1); +} + +String::String(const String &other, size_t pos, size_t count) { + if (pos >= other.size_) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + } else { + if (count == npos) { + count = other.size_ - pos; + } + if (pos + count > other.size_) { + size_ = other.size_ - pos; + } else { + size_ = count; + } + buffer_ = reinterpret_cast(malloc(sizeof(char) * (size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + strncpy(buffer_, other.buffer_ + pos, size_); + buffer_[size_] = '\0'; + } +} + +String::~String() { free(buffer_); } + +String &String::operator=(const String &str) { + if (this == &str) { + return *this; + } + free(buffer_); + buffer_ = reinterpret_cast(malloc(sizeof(char) * (str.size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + size_ = str.size_; + memcpy(buffer_, str.buffer_, size_ + 1); + return *this; +} + +String &String::operator=(const char *str) { + free(buffer_); + if (str == nullptr) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + return *this; + } + size_t size_s = strlen(str); + buffer_ = reinterpret_cast(malloc(sizeof(char) * (size_s + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + size_ = size_s; + memcpy(buffer_, str, size_ + 1); + return *this; +} + +char &String::at(size_t pos) { + if (pos >= size_) { + MS_C_EXCEPTION("pos out of range"); + } + return buffer_[pos]; +} +const char &String::at(size_t pos) const { + if (pos >= size_) { + MS_C_EXCEPTION("pos out of range"); + } + return buffer_[pos]; +} +char &String::operator[](size_t pos) { + if (pos >= size_) { + MS_C_EXCEPTION("pos out of range"); + } + return this->at(pos); +} +const char &String::operator[](size_t pos) const { + if (pos >= size_) { + MS_C_EXCEPTION("pos out of range"); + } + return this->at(pos); +} +char *String::data() noexcept { return buffer_; }; +const char *String::data() const noexcept { return buffer_; } +const char *String::c_str() const noexcept { return buffer_; } + +// capacity +bool String::empty() const noexcept { return size_ == 0; } +size_t String::size() const noexcept { return size_; } +size_t String::length() const noexcept { return size_; } + +// operations +void String::clear() noexcept { + free(buffer_); + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; +} + +String &String::operator+(const String &str) { + (*this) += str; + return *this; +} + +String &String::operator+=(const String &str) { + size_t new_size = size_ + str.size_; + char *tmp = reinterpret_cast(malloc(sizeof(char) * (new_size + 1))); + if (tmp == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(tmp, this->buffer_, size_ + 1); + strncat(tmp, str.buffer_, str.size_); + tmp[new_size] = '\0'; + free(buffer_); + buffer_ = tmp; + size_ = new_size; + return *this; +} + +String &String::operator+=(const char *str) { + if (str == nullptr) { + return *this; + } + size_t str_size = strlen(str); + size_t new_size = size_ + str_size; + char *tmp = reinterpret_cast(malloc(sizeof(char) * (new_size + 1))); + if (tmp == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(tmp, this->buffer_, size_ + 1); + strncat(tmp, str, str_size); + tmp[new_size] = '\0'; + free(buffer_); + buffer_ = tmp; + size_ = new_size; + return *this; +} + +String &String::operator+=(const char ch) { + char *tmp = reinterpret_cast(malloc(sizeof(char) * (size_ + 2))); + if (tmp == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(tmp, this->buffer_, size_ + 1); + tmp[size_] = ch; + tmp[size_ + 1] = '\0'; + free(buffer_); + buffer_ = tmp; + size_ += 1; + return *this; +} + +String &String::append(size_t count, const char ch) { + (*this) += ch; + return *this; +} +String &String::append(const String &str) { + (*this) += str; + return *this; +} +String &String::append(const char *str) { + if (str == nullptr) { + return *this; + } + (*this) += str; + return *this; +} + +int String::compare(const String &str) const { return strcmp(buffer_, str.buffer_); } +int String::compare(const char *str) const { return strcmp(buffer_, str); } + +String String::substr(size_t pos, size_t count) const { return String(*this, pos, count); } + +String operator+(const String &str1, const char *str2) { + String str = str1; + str += str2; + return str; +} + +String operator+(const char *str1, const String &str2) { + String str = str2; + str += str1; + return str; +} + +String to_String(int32_t value) { + char tmp[sizeof(int32_t) * 4]; + snprintf(tmp, sizeof(int32_t) * 4, "%d", value); + return String(tmp, strlen(tmp)); +} + +String to_String(float value) { + char tmp[FLT_MAX_10_EXP + 20]; + snprintf(tmp, FLT_MAX_10_EXP + 20, "%f", value); + return String(tmp, strlen(tmp)); +} +} // namespace mindspore +#endif // NOT_USE_STL