forked from mindspore-Ecosystem/mindspore
uniform interface, add vector and string self-defined
This commit is contained in:
parent
b5af96af76
commit
48d7330a55
|
@ -17,8 +17,6 @@
|
|||
#ifndef MINDSPORE_LITE_INCLUDE_CONTEXT_H_
|
||||
#define MINDSPORE_LITE_INCLUDE_CONTEXT_H_
|
||||
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include "include/ms_tensor.h"
|
||||
#include "include/lite_utils.h"
|
||||
#include "include/lite_types.h"
|
||||
|
@ -55,10 +53,14 @@ struct DeviceContext {
|
|||
|
||||
/// \brief Context defined for holding environment variables during runtime.
|
||||
struct Context {
|
||||
std::string vendor_name_;
|
||||
String vendor_name_;
|
||||
int thread_num_ = 2; /**< thread number config for thread pool */
|
||||
AllocatorPtr allocator = nullptr;
|
||||
#ifndef NOT_USE_STL
|
||||
DeviceContextVector device_list_ = {{DT_CPU, {false, MID_CPU}}};
|
||||
#else
|
||||
DeviceContextVector device_list_;
|
||||
#endif // NOT_USE_STL
|
||||
};
|
||||
} // namespace mindspore::lite
|
||||
#endif // MINDSPORE_LITE_INCLUDE_CONTEXT_H_
|
||||
|
|
|
@ -17,8 +17,7 @@
|
|||
#ifndef MINDSPORE_LITE_INCLUDE_ERRORCODE_H_
|
||||
#define MINDSPORE_LITE_INCLUDE_ERRORCODE_H_
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include "include/lite_utils.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
|
@ -67,7 +66,7 @@ constexpr int RET_INPUT_PARAM_INVALID = -600; /**< Invalid input param by user.
|
|||
/// \param[in] error_code define return status of procedure.
|
||||
///
|
||||
/// \return String of errorcode info.
|
||||
std::string GetErrorInfo(STATUS error_code);
|
||||
String GetErrorInfo(STATUS error_code);
|
||||
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -17,10 +17,9 @@
|
|||
#ifndef MINDSPORE_LITE_INCLUDE_LITE_SESSION_H
|
||||
#define MINDSPORE_LITE_INCLUDE_LITE_SESSION_H
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#ifndef NOT_USE_STL
|
||||
#include <unordered_map>
|
||||
#endif // NOT_USE_STL
|
||||
#include "include/ms_tensor.h"
|
||||
#include "include/model.h"
|
||||
#include "include/context.h"
|
||||
|
@ -66,14 +65,14 @@ class MS_API LiteSession {
|
|||
/// \brief Get input MindSpore Lite MSTensors of model.
|
||||
///
|
||||
/// \return The vector of MindSpore Lite MSTensor.
|
||||
virtual std::vector<tensor::MSTensor *> GetInputs() const = 0;
|
||||
virtual Vector<tensor::MSTensor *> GetInputs() const = 0;
|
||||
|
||||
/// \brief Get input MindSpore Lite MSTensors of model by tensor name.
|
||||
///
|
||||
/// \param[in] node_name Define tensor name.
|
||||
///
|
||||
/// \return The vector of MindSpore Lite MSTensor.
|
||||
virtual mindspore::tensor::MSTensor *GetInputsByTensorName(const std::string &tensor_name) const = 0;
|
||||
virtual mindspore::tensor::MSTensor *GetInputsByTensorName(const String &tensor_name) const = 0;
|
||||
|
||||
/// \brief Run session with callback.
|
||||
///
|
||||
|
@ -92,24 +91,26 @@ class MS_API LiteSession {
|
|||
/// \note Deprecated, replace with GetOutputByTensorName
|
||||
///
|
||||
/// \return The vector of MindSpore Lite MSTensor.
|
||||
virtual std::vector<tensor::MSTensor *> GetOutputsByNodeName(const std::string &node_name) const = 0;
|
||||
virtual Vector<tensor::MSTensor *> GetOutputsByNodeName(const String &node_name) const = 0;
|
||||
|
||||
#ifndef NOT_USE_STL
|
||||
/// \brief Get output MindSpore Lite MSTensors of model mapped by tensor name.
|
||||
///
|
||||
/// \return The map of output tensor name and MindSpore Lite MSTensor.
|
||||
virtual std::unordered_map<std::string, mindspore::tensor::MSTensor *> GetOutputs() const = 0;
|
||||
virtual std::unordered_map<String, mindspore::tensor::MSTensor *> GetOutputs() const = 0;
|
||||
#endif
|
||||
|
||||
/// \brief Get name of output tensors of model compiled by this session.
|
||||
///
|
||||
/// \return The vector of string as output tensor names in order.
|
||||
virtual std::vector<std::string> GetOutputTensorNames() const = 0;
|
||||
virtual Vector<String> GetOutputTensorNames() const = 0;
|
||||
|
||||
/// \brief Get output MindSpore Lite MSTensors of model by tensor name.
|
||||
///
|
||||
/// \param[in] tensor_name Define tensor name.
|
||||
///
|
||||
/// \return Pointer of MindSpore Lite MSTensor.
|
||||
virtual mindspore::tensor::MSTensor *GetOutputByTensorName(const std::string &tensor_name) const = 0;
|
||||
virtual mindspore::tensor::MSTensor *GetOutputByTensorName(const String &tensor_name) const = 0;
|
||||
|
||||
/// \brief Resize inputs shape.
|
||||
///
|
||||
|
@ -117,7 +118,7 @@ class MS_API LiteSession {
|
|||
/// \param[in] dims Define the inputs new shape.
|
||||
///
|
||||
/// \return STATUS as an error code of resize inputs, STATUS is defined in errorcode.h.
|
||||
virtual int Resize(const std::vector<tensor::MSTensor *> &inputs, const std::vector<std::vector<int>> &dims) = 0;
|
||||
virtual int Resize(const Vector<tensor::MSTensor *> &inputs, const Vector<Vector<int>> &dims) = 0;
|
||||
};
|
||||
} // namespace session
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -16,31 +16,383 @@
|
|||
|
||||
#ifndef MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_
|
||||
#define MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_
|
||||
|
||||
#ifndef NOT_USE_STL
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include "include/ms_tensor.h"
|
||||
#include <functional>
|
||||
#else
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stddef.h>
|
||||
#endif // NOT_USE_STL
|
||||
|
||||
#ifndef MS_API
|
||||
#ifdef _WIN32
|
||||
#define MS_API __declspec(dllexport)
|
||||
#else
|
||||
#define MS_API __attribute__((visibility("default")))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
namespace mindspore {
|
||||
class Allocator;
|
||||
|
||||
namespace schema {
|
||||
struct Tensor;
|
||||
} // namespace schema
|
||||
|
||||
namespace tensor {
|
||||
class MSTensor;
|
||||
} // namespace tensor
|
||||
|
||||
namespace lite {
|
||||
struct DeviceContext;
|
||||
} // namespace lite
|
||||
|
||||
#ifdef NOT_USE_STL
|
||||
class String {
|
||||
public:
|
||||
String();
|
||||
String(size_t count, char ch);
|
||||
String(const char *s, size_t count);
|
||||
explicit String(const char *s);
|
||||
String(const String &other);
|
||||
String(const String &other, size_t pos, size_t count = npos);
|
||||
|
||||
~String();
|
||||
|
||||
String &operator=(const String &str);
|
||||
String &operator=(const char *str);
|
||||
|
||||
char &at(size_t pos);
|
||||
const char &at(size_t pos) const;
|
||||
inline char &operator[](size_t pos);
|
||||
inline const char &operator[](size_t pos) const;
|
||||
char *data() noexcept;
|
||||
const char *data() const noexcept;
|
||||
const char *c_str() const noexcept;
|
||||
|
||||
// capacity
|
||||
bool empty() const noexcept;
|
||||
size_t size() const noexcept;
|
||||
size_t length() const noexcept;
|
||||
|
||||
// operations
|
||||
void clear() noexcept;
|
||||
String &append(size_t count, const char ch);
|
||||
String &append(const String &str);
|
||||
String &append(const char *s);
|
||||
String &operator+(const String &str);
|
||||
String &operator+=(const String &str);
|
||||
String &operator+=(const char *str);
|
||||
String &operator+=(const char ch);
|
||||
int compare(const String &str) const;
|
||||
int compare(const char *str) const;
|
||||
|
||||
String substr(size_t pos = 0, size_t count = npos) const;
|
||||
|
||||
static const size_t npos = -1;
|
||||
|
||||
private:
|
||||
size_t size_;
|
||||
char *buffer_;
|
||||
};
|
||||
|
||||
String operator+(const String &str1, const char *str2);
|
||||
String operator+(const char *str1, const String &str2);
|
||||
|
||||
String to_string(int32_t value);
|
||||
String to_string(float value);
|
||||
|
||||
#define DEFAULT_CAPACITY 4
|
||||
#define MS_C_EXCEPTION(...) exit(1)
|
||||
#define MIN(x, y) ((x < y) ? (x) : (y))
|
||||
template <typename T>
|
||||
class Vector {
|
||||
public:
|
||||
Vector() {
|
||||
size_ = 0;
|
||||
capacity_ = DEFAULT_CAPACITY;
|
||||
elem_size_ = sizeof(T);
|
||||
data_ = nullptr;
|
||||
}
|
||||
|
||||
explicit Vector(size_t size) {
|
||||
size_ = size;
|
||||
elem_size_ = sizeof(T);
|
||||
capacity_ = (size == 0 ? DEFAULT_CAPACITY : size);
|
||||
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
|
||||
if (data_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
memset(data_, 0, capacity_ * elem_size_);
|
||||
}
|
||||
|
||||
Vector(size_t size, const T &value) {
|
||||
size_ = size;
|
||||
elem_size_ = sizeof(T);
|
||||
capacity_ = (size == 0 ? DEFAULT_CAPACITY : size);
|
||||
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
|
||||
if (data_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
for (int i = 0; i < size; ++i) {
|
||||
data_[i] = value;
|
||||
}
|
||||
}
|
||||
|
||||
Vector(const Vector<T> &vec) {
|
||||
size_ = vec.size_;
|
||||
elem_size_ = sizeof(T);
|
||||
capacity_ = vec.capacity_;
|
||||
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
|
||||
if (data_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
memcpy(data_, vec.data_, size_ * elem_size_);
|
||||
}
|
||||
|
||||
~Vector() {
|
||||
if (data_ != nullptr) {
|
||||
free(data_);
|
||||
}
|
||||
}
|
||||
|
||||
void clear() {
|
||||
size_ = 0;
|
||||
if (data_ != nullptr) {
|
||||
free(data_);
|
||||
data_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void push_back(const T &elem) {
|
||||
if (data_ == nullptr) {
|
||||
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
|
||||
if (data_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
} else if (size_ == capacity_) {
|
||||
resize(size_ + 1);
|
||||
--size_;
|
||||
}
|
||||
data_[size_] = elem;
|
||||
++size_;
|
||||
}
|
||||
|
||||
void push_back(T &&elem) {
|
||||
if (data_ == nullptr) {
|
||||
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
|
||||
if (data_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
} else if (size_ == capacity_) {
|
||||
resize(size_ + 1);
|
||||
--size_;
|
||||
}
|
||||
data_[size_] = elem;
|
||||
++size_;
|
||||
}
|
||||
|
||||
void pop_back() {
|
||||
if (size_ > 0) {
|
||||
--size_;
|
||||
} else {
|
||||
MS_C_EXCEPTION("Index is out of range!");
|
||||
}
|
||||
}
|
||||
|
||||
void insert(const T &elem, size_t index) {
|
||||
if (index <= size_) {
|
||||
++size_;
|
||||
if (size_ > capacity_) {
|
||||
resize(size_);
|
||||
}
|
||||
if (index == size_ - 1) {
|
||||
push_back(elem);
|
||||
} else {
|
||||
memmove(data_ + index + 1, data_ + index, (size_ - index - 1) * elem_size_);
|
||||
data_[index] = elem;
|
||||
}
|
||||
} else {
|
||||
MS_C_EXCEPTION("Input index is out of range!");
|
||||
}
|
||||
}
|
||||
|
||||
T *begin() { return data_; }
|
||||
|
||||
const T *begin() const { return data_; }
|
||||
|
||||
T *end() { return data_ + size_; }
|
||||
|
||||
const T *end() const { return data_ + size_; }
|
||||
|
||||
T &front() {
|
||||
if (size_ > 0) {
|
||||
return data_[0];
|
||||
}
|
||||
MS_C_EXCEPTION("Index is out of range!");
|
||||
}
|
||||
|
||||
const T &front() const {
|
||||
if (size_ > 0) {
|
||||
return data_[0];
|
||||
}
|
||||
MS_C_EXCEPTION("Index is out of range!");
|
||||
}
|
||||
|
||||
T &back() {
|
||||
if (size_ > 0) {
|
||||
return data_[size_ - 1];
|
||||
}
|
||||
MS_C_EXCEPTION("Index is out of range!");
|
||||
}
|
||||
|
||||
const T &back() const {
|
||||
if (size_ > 0) {
|
||||
return data_[size_ - 1];
|
||||
}
|
||||
MS_C_EXCEPTION("Index is out of range!");
|
||||
}
|
||||
|
||||
T &at(size_t index) {
|
||||
if (index < size_) {
|
||||
return data_[index];
|
||||
}
|
||||
MS_C_EXCEPTION("Input index is out of range!");
|
||||
}
|
||||
|
||||
const T &at(size_t index) const {
|
||||
if (index < size_) {
|
||||
return data_[index];
|
||||
}
|
||||
MS_C_EXCEPTION("Input index is out of range!");
|
||||
}
|
||||
|
||||
T &operator[](size_t index) {
|
||||
if (index < size_) {
|
||||
return data_[index];
|
||||
}
|
||||
MS_C_EXCEPTION("Input index is out of range!");
|
||||
}
|
||||
|
||||
const T &operator[](size_t index) const {
|
||||
if (index < size_) {
|
||||
return data_[index];
|
||||
}
|
||||
MS_C_EXCEPTION("Input index is out of range!");
|
||||
}
|
||||
|
||||
T *data() { return data_; }
|
||||
|
||||
const T *data() const { return data_; }
|
||||
|
||||
size_t size() const { return size_; }
|
||||
|
||||
size_t capacity() const { return capacity_; }
|
||||
|
||||
bool empty() const { return size_ == 0; }
|
||||
|
||||
void erase(size_t index) {
|
||||
if (index == size_ - 1) {
|
||||
--size_;
|
||||
} else if (index < size_) {
|
||||
memmove(data_ + index, data_ + index + 1, (size_ - index - 1) * elem_size_);
|
||||
--size_;
|
||||
} else {
|
||||
MS_C_EXCEPTION("Input index is out of range!");
|
||||
}
|
||||
}
|
||||
|
||||
void resize(size_t size) {
|
||||
while (size > capacity_) {
|
||||
capacity_ *= 2;
|
||||
}
|
||||
T *tmp = data_;
|
||||
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
|
||||
if (data_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
memcpy(data_, tmp, MIN(size, size_) * elem_size_);
|
||||
size_ = size;
|
||||
free(tmp);
|
||||
}
|
||||
|
||||
void reserve(size_t capacity) {
|
||||
if (capacity > capacity_) {
|
||||
capacity_ = capacity;
|
||||
}
|
||||
}
|
||||
|
||||
Vector<T> &operator=(const Vector<T> &vec) {
|
||||
if (this == &vec) {
|
||||
return *this;
|
||||
}
|
||||
size_ = vec.size_;
|
||||
elem_size_ = sizeof(T);
|
||||
capacity_ = vec.capacity_;
|
||||
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
|
||||
if (data_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
memcpy(data_, vec.data_, size_ * elem_size_);
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
size_t size_;
|
||||
size_t elem_size_;
|
||||
size_t capacity_;
|
||||
T *data_;
|
||||
};
|
||||
using TensorPtrVector = Vector<mindspore::schema::Tensor *>;
|
||||
using Uint32Vector = Vector<uint32_t>;
|
||||
using AllocatorPtr = void *;
|
||||
using DeviceContextVector = Vector<lite::DeviceContext>;
|
||||
using KernelCallBack = void (*)(void *, void *);
|
||||
#else
|
||||
/// \brief Allocator defined a memory pool for malloc memory and free memory dynamically.
|
||||
///
|
||||
/// \note List public class and interface for reference.
|
||||
|
||||
/// \brief DeviceContext defined a device context.
|
||||
struct DeviceContext;
|
||||
class Allocator;
|
||||
using AllocatorPtr = std::shared_ptr<Allocator>;
|
||||
|
||||
using TensorPtrVector = std::vector<mindspore::schema::Tensor *>;
|
||||
using DeviceContextVector = std::vector<DeviceContext>;
|
||||
using Uint32Vector = std::vector<uint32_t>;
|
||||
template <typename T>
|
||||
using Vector = std::vector<T>;
|
||||
|
||||
template <typename T>
|
||||
inline std::string to_string(T t) {
|
||||
return std::to_string(t);
|
||||
}
|
||||
|
||||
namespace tensor {
|
||||
using String = std::string;
|
||||
using AllocatorPtr = std::shared_ptr<Allocator>;
|
||||
} // namespace tensor
|
||||
|
||||
namespace session {
|
||||
using String = std::string;
|
||||
} // namespace session
|
||||
|
||||
/// \brief CallBackParam defined input arguments for callBack function.
|
||||
struct CallBackParam {
|
||||
session::String node_name; /**< node name argument */
|
||||
session::String node_type; /**< node type argument */
|
||||
};
|
||||
|
||||
struct GPUCallBackParam : CallBackParam {
|
||||
double execute_time{-1.f};
|
||||
};
|
||||
|
||||
/// \brief KernelCallBack defined the function pointer for callBack.
|
||||
using KernelCallBack = std::function<bool(Vector<tensor::MSTensor *> inputs, Vector<tensor::MSTensor *> outputs,
|
||||
const CallBackParam &opInfo)>;
|
||||
|
||||
namespace lite {
|
||||
using String = std::string;
|
||||
using DeviceContextVector = std::vector<DeviceContext>;
|
||||
|
||||
/// \brief Set data of MSTensor from string vector.
|
||||
///
|
||||
|
@ -48,12 +400,13 @@ using AllocatorPtr = std::shared_ptr<Allocator>;
|
|||
/// \param[out] MSTensor.
|
||||
///
|
||||
/// \return STATUS as an error code of this interface, STATUS is defined in errorcode.h.
|
||||
int MS_API StringsToMSTensor(const std::vector<std::string> &inputs, tensor::MSTensor *tensor);
|
||||
int MS_API StringsToMSTensor(const Vector<String> &inputs, tensor::MSTensor *tensor);
|
||||
|
||||
/// \brief Get string vector from MSTensor.
|
||||
/// \param[in] MSTensor.
|
||||
/// \return string vector.
|
||||
std::vector<std::string> MS_API MSTensorToStrings(const tensor::MSTensor *tensor);
|
||||
Vector<String> MS_API MSTensorToStrings(const tensor::MSTensor *tensor);
|
||||
} // namespace lite
|
||||
#endif // NOT_USE_STL
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
*/
|
||||
#ifndef MINDSPORE_LITE_INCLUDE_MODEL_H_
|
||||
#define MINDSPORE_LITE_INCLUDE_MODEL_H_
|
||||
#include <vector>
|
||||
|
||||
#include "include/lite_utils.h"
|
||||
|
||||
namespace mindspore::lite {
|
||||
|
@ -28,7 +28,7 @@ struct MS_API Model {
|
|||
Uint32Vector output_indices_;
|
||||
int quant_type_;
|
||||
};
|
||||
using NodePtrVector = std::vector<Node *>;
|
||||
using NodePtrVector = Vector<Node *>;
|
||||
struct SubGraph {
|
||||
String name_;
|
||||
Uint32Vector input_indices_;
|
||||
|
@ -36,7 +36,7 @@ struct MS_API Model {
|
|||
Uint32Vector node_indices_;
|
||||
Uint32Vector tensor_indices_;
|
||||
};
|
||||
using SubGraphPtrVector = std::vector<SubGraph *>;
|
||||
using SubGraphPtrVector = Vector<SubGraph *>;
|
||||
String name_;
|
||||
String version_;
|
||||
TensorPtrVector all_tensors_;
|
||||
|
|
|
@ -17,21 +17,9 @@
|
|||
#ifndef MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_
|
||||
#define MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include "include/lite_utils.h"
|
||||
#include "ir/dtype/type_id.h"
|
||||
|
||||
#ifndef MS_API
|
||||
#ifdef _WIN32
|
||||
#define MS_API __declspec(dllexport)
|
||||
#else
|
||||
#define MS_API __attribute__((visibility("default")))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
namespace mindspore {
|
||||
namespace tensor {
|
||||
/// \brief MSTensor defined tensor in MindSpore Lite.
|
||||
|
@ -48,7 +36,7 @@ class MS_API MSTensor {
|
|||
/// \brief Create a MSTensor.
|
||||
///
|
||||
/// \return Pointer to an instance of MindSpore Lite MSTensor.
|
||||
static MSTensor *CreateTensor(const std::string &name, TypeId type, const std::vector<int> &shape, const void *data,
|
||||
static MSTensor *CreateTensor(const String &name, TypeId type, const Vector<int> &shape, const void *data,
|
||||
size_t data_len);
|
||||
|
||||
/// \brief Get data type of the MindSpore Lite MSTensor.
|
||||
|
@ -62,10 +50,10 @@ class MS_API MSTensor {
|
|||
/// \brief Get shape of the MindSpore Lite MSTensor.
|
||||
///
|
||||
/// \return A vector of int as the shape of the MindSpore Lite MSTensor.
|
||||
virtual std::vector<int> shape() const = 0;
|
||||
virtual Vector<int> shape() const = 0;
|
||||
|
||||
/// \brief Set the shape of MSTensor.
|
||||
virtual void set_shape(const std::vector<int> &name) = 0;
|
||||
virtual void set_shape(const Vector<int> &name) = 0;
|
||||
|
||||
/// \brief Get number of element in MSTensor.
|
||||
///
|
||||
|
@ -80,10 +68,10 @@ class MS_API MSTensor {
|
|||
/// \brief Get the name of MSTensor.
|
||||
///
|
||||
/// \return the name of MSTensor.
|
||||
virtual std::string tensor_name() const = 0;
|
||||
virtual String tensor_name() const = 0;
|
||||
|
||||
/// \brief Set the name of MSTensor.
|
||||
virtual void set_tensor_name(const std::string name) = 0;
|
||||
virtual void set_tensor_name(const String name) = 0;
|
||||
|
||||
/// \brief Get the pointer of data in MSTensor.
|
||||
///
|
||||
|
@ -105,18 +93,5 @@ class MS_API MSTensor {
|
|||
virtual void set_data(void *data) = 0;
|
||||
};
|
||||
} // namespace tensor
|
||||
/// \brief CallBackParam defined input arguments for callBack function.
|
||||
struct CallBackParam {
|
||||
std::string node_name; /**< node name argument */
|
||||
std::string node_type; /**< node type argument */
|
||||
};
|
||||
|
||||
struct GPUCallBackParam : CallBackParam {
|
||||
double execute_time{-1.f};
|
||||
};
|
||||
|
||||
/// \brief KernelCallBack defined the function pointer for callBack.
|
||||
using KernelCallBack = std::function<bool(std::vector<tensor::MSTensor *> inputs,
|
||||
std::vector<tensor::MSTensor *> outputs, const CallBackParam &opInfo)>;
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#ifndef MINDSPORE_LITE_INCLUDE_VERSION_H_
|
||||
#define MINDSPORE_LITE_INCLUDE_VERSION_H_
|
||||
|
||||
#include <string>
|
||||
#include "include/lite_utils.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
|
@ -28,11 +28,11 @@ const int ms_version_revision = 0;
|
|||
/// \brief Global method to get a version string.
|
||||
///
|
||||
/// \return The version string of MindSpore Lite.
|
||||
inline std::string Version() {
|
||||
return "MindSpore Lite " + std::to_string(ms_version_major) + "." + std::to_string(ms_version_minor) + "." +
|
||||
std::to_string(ms_version_revision);
|
||||
inline String Version() {
|
||||
return "MindSpore Lite " + to_string(ms_version_major) + "." + to_string(ms_version_minor) + "." +
|
||||
to_string(ms_version_revision);
|
||||
}
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // LITE_VERSION_H
|
||||
#endif // MINDSPORE_LITE_INCLUDE_VERSION_H_
|
||||
|
|
|
@ -22,6 +22,7 @@ set(WRAPPER_SRC
|
|||
${WRAPPER_DIR}/int8/resize_int8_wrapper.c
|
||||
${WRAPPER_DIR}/int8/slice_int8_wrapper.c
|
||||
${WRAPPER_DIR}/int8/batchnorm_int8_wrapper.c
|
||||
${LITE_DIR}/src/common/string.cc
|
||||
)
|
||||
|
||||
list(APPEND FILE_SET ${WRAPPER_SRC} ${RUNTIME_SRC})
|
||||
|
|
|
@ -34,7 +34,8 @@ void CodeCMakeNetLibrary(std::ofstream &ofs, const std::unique_ptr<CoderContext>
|
|||
ofs << " weight.c.o\n"
|
||||
<< " net.c.o\n"
|
||||
<< " session.cc.o\n"
|
||||
<< " tensor.cc.o\n";
|
||||
<< " tensor.cc.o\n"
|
||||
<< " string.cc.o\n";
|
||||
if (config->debug_mode()) {
|
||||
ofs << " debug_utils.c.o\n";
|
||||
}
|
||||
|
|
|
@ -26,6 +26,14 @@
|
|||
|
||||
namespace mindspore::lite::micro {
|
||||
void CodeSessionCompileGraph(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx) {
|
||||
auto array_tostring = [&ofs](const std::vector<int> &array, const std::string &name) {
|
||||
size_t num = array.size();
|
||||
ofs << " Vector<int> " << name << ";\n";
|
||||
ofs << " " << name << ".resize(" << num << ");\n";
|
||||
for (size_t i = 0; i < num; ++i) {
|
||||
ofs << " " << name << "[" << i << "] = " << array[i] << ";\n";
|
||||
}
|
||||
};
|
||||
std::vector<Tensor *> inputs = ctx->graph_inputs();
|
||||
std::vector<Tensor *> outputs = ctx->graph_outputs();
|
||||
size_t inputs_size = inputs.size();
|
||||
|
@ -36,20 +44,21 @@ void CodeSessionCompileGraph(std::ofstream &ofs, const std::unique_ptr<CoderCont
|
|||
ofs << " inputs_.resize(" << inputs_size << ");\n";
|
||||
for (size_t i = 0; i < inputs_size; ++i) {
|
||||
Tensor *input = inputs[i];
|
||||
ofs << " inputs_[" << i << "] = new (std::nothrow) MTensor(\"" << input->tensor_name() << "\", "
|
||||
<< EnumNameDataType(input->data_type()) << ", " << ArrayToString(input->shape()) << ");\n";
|
||||
std::string shape_i = "in_shape_" + std::to_string(i);
|
||||
array_tostring(input->shape(), shape_i);
|
||||
ofs << " inputs_[" << i << "] = new (std::nothrow) MTensor(String(\"" << input->tensor_name() << "\"), "
|
||||
<< EnumNameDataType(input->data_type()) << ", " << shape_i << ");\n";
|
||||
ofs << " MS_ERROR_IF_NULL(inputs_[" << i << "]);\n";
|
||||
}
|
||||
ofs << " outputs_.resize(" << outputs_size << ");\n";
|
||||
for (size_t i = 0; i < outputs_size; ++i) {
|
||||
Tensor *output = outputs[i];
|
||||
ofs << " outputs_[" << i << "] = new (std::nothrow) MTensor(\"" << output->tensor_name() << "\", "
|
||||
<< EnumNameDataType(output->data_type()) << ", " << ArrayToString(output->shape()) << ");\n";
|
||||
std::string shape_i = "out_shape_" + std::to_string(i);
|
||||
array_tostring(output->shape(), shape_i);
|
||||
ofs << " outputs_[" << i << "] = new (std::nothrow) MTensor(String(\"" << output->tensor_name() << "\"), "
|
||||
<< EnumNameDataType(output->data_type()) << ", " << shape_i << ");\n";
|
||||
ofs << " MS_ERROR_IF_NULL(outputs_[" << i << "]);\n";
|
||||
}
|
||||
ofs << " for (const auto &output: outputs_) {\n"
|
||||
" output_tensor_map_[output->tensor_name()] = output;\n"
|
||||
" }\n";
|
||||
ofs << " return RET_OK;\n";
|
||||
ofs << "}\n\n";
|
||||
}
|
||||
|
|
|
@ -72,10 +72,6 @@ void PrintData(void *data, size_t data_number) {
|
|||
}
|
||||
|
||||
void TensorToString(tensor::MSTensor *tensor) {
|
||||
uint8_t i = 0;
|
||||
std::cout << "uint8: " << i << std::endl;
|
||||
|
||||
std::cout << "Name: " << tensor->tensor_name();
|
||||
std::cout << ", DataType: " << tensor->data_type();
|
||||
std::cout << ", Size: " << tensor->Size();
|
||||
std::cout << ", Shape:";
|
||||
|
@ -129,7 +125,7 @@ int main(int argc, const char **argv) {
|
|||
}
|
||||
|
||||
// set model inputs tensor data
|
||||
std::vector<tensor::MSTensor *> inputs = session->GetInputs();
|
||||
Vector<tensor::MSTensor *> inputs = session->GetInputs();
|
||||
size_t inputs_num = inputs.size();
|
||||
void *inputs_binbuf[inputs_num];
|
||||
int inputs_size[inputs_num];
|
||||
|
@ -150,13 +146,6 @@ int main(int argc, const char **argv) {
|
|||
return lite::RET_ERROR;
|
||||
}
|
||||
|
||||
auto outputs = session->GetOutputs();
|
||||
std::cout << "output size: " << outputs.size() << std::endl;
|
||||
for (const auto &item : outputs) {
|
||||
auto output = item.second;
|
||||
TensorToString(output);
|
||||
}
|
||||
|
||||
std::cout << "run benchmark success" << std::endl;
|
||||
delete session;
|
||||
for (size_t i = 0; i < inputs_num; ++i) {
|
||||
|
|
|
@ -34,6 +34,8 @@ set(HEADER_PATH ${PKG_PATH}/inference)
|
|||
option(MICRO_BUILD_ARM64 "build android arm64" OFF)
|
||||
option(MICRO_BUILD_ARM32A "build android arm32" OFF)
|
||||
|
||||
add_compile_definitions(NOT_USE_STL)
|
||||
|
||||
if(MICRO_BUILD_ARM64 OR MICRO_BUILD_ARM32A)
|
||||
add_compile_definitions(ENABLE_NEON)
|
||||
add_compile_definitions(ENABLE_ARM)
|
||||
|
@ -95,6 +97,8 @@ set(HEADER_PATH ${PKG_PATH}/inference)
|
|||
message("operator lib path: ${OP_LIB}")
|
||||
message("operator header path: ${OP_HEADER_PATH}")
|
||||
|
||||
add_compile_definitions(NOT_USE_STL)
|
||||
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include)
|
||||
include_directories(${OP_HEADER_PATH})
|
||||
include_directories(${HEADER_PATH})
|
||||
|
|
|
@ -63,31 +63,25 @@ class LiteSession : public session::LiteSession {
|
|||
|
||||
int CompileGraph(lite::Model *model) override;
|
||||
|
||||
std::vector<tensor::MSTensor *> GetInputs() const override;
|
||||
Vector<tensor::MSTensor *> GetInputs() const override;
|
||||
|
||||
mindspore::tensor::MSTensor *GetInputsByTensorName(const std::string &tensor_name) const override { return nullptr; }
|
||||
mindspore::tensor::MSTensor *GetInputsByTensorName(const String &tensor_name) const override { return nullptr; }
|
||||
|
||||
int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) override;
|
||||
|
||||
std::vector<tensor::MSTensor *> GetOutputsByNodeName(const std::string &node_name) const override;
|
||||
Vector<tensor::MSTensor *> GetOutputsByNodeName(const String &node_name) const override;
|
||||
|
||||
std::unordered_map<std::string, mindspore::tensor::MSTensor *> GetOutputs() const override;
|
||||
Vector<String> GetOutputTensorNames() const override;
|
||||
|
||||
std::vector<std::string> GetOutputTensorNames() const override;
|
||||
mindspore::tensor::MSTensor *GetOutputByTensorName(const String &tensor_name) const override;
|
||||
|
||||
mindspore::tensor::MSTensor *GetOutputByTensorName(const std::string &tensor_name) const override;
|
||||
|
||||
int Resize(const std::vector<tensor::MSTensor *> &inputs, const std::vector<std::vector<int>> &dims) override;
|
||||
int Resize(const Vector<tensor::MSTensor *> &inputs, const Vector<Vector<int>> &dims) override { return RET_ERROR; }
|
||||
|
||||
int InitRuntimeBuffer();
|
||||
|
||||
private:
|
||||
int SetInputsData(const std::vector<MTensor *> &inputs) const;
|
||||
std::vector<MTensor *> inputs_;
|
||||
std::vector<MTensor *> outputs_;
|
||||
std::unordered_map<std::string, mindspore::tensor::MSTensor *> output_tensor_map_;
|
||||
std::unordered_map<std::string, std::vector<mindspore::tensor::MSTensor *>> output_node_map_;
|
||||
|
||||
Vector<MTensor *> inputs_;
|
||||
Vector<MTensor *> outputs_;
|
||||
void *runtime_buffer_;
|
||||
};
|
||||
|
||||
|
@ -95,7 +89,6 @@ class LiteSession : public session::LiteSession {
|
|||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_
|
||||
|
||||
)RAW";
|
||||
|
||||
const char *session_source = R"RAW(
|
||||
|
@ -130,8 +123,7 @@ LiteSession::~LiteSession() {
|
|||
delete input;
|
||||
input = nullptr;
|
||||
}
|
||||
for (auto &item : output_tensor_map_) {
|
||||
auto output = item.second;
|
||||
for (auto &output : outputs_) {
|
||||
if (output == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
@ -153,46 +145,28 @@ int LiteSession::InitRuntimeBuffer() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
std::vector<tensor::MSTensor *> LiteSession::GetInputs() const {
|
||||
std::vector<tensor::MSTensor *> inputs;
|
||||
inputs.insert(inputs.begin(), inputs_.begin(), inputs_.end());
|
||||
Vector<tensor::MSTensor *> LiteSession::GetInputs() const {
|
||||
Vector<tensor::MSTensor *> inputs;
|
||||
for (const auto &input : inputs_) {
|
||||
inputs.push_back(input);
|
||||
}
|
||||
return inputs;
|
||||
}
|
||||
|
||||
std::vector<tensor::MSTensor *> LiteSession::GetOutputsByNodeName(const std::string &node_name) const {
|
||||
auto iter = output_node_map_.find(node_name);
|
||||
if (iter == output_node_map_.end()) {
|
||||
std::vector<tensor::MSTensor *> empty;
|
||||
return empty;
|
||||
}
|
||||
return iter->second;
|
||||
Vector<tensor::MSTensor *> LiteSession::GetOutputsByNodeName(const String &node_name) const {
|
||||
Vector<tensor::MSTensor *> outputs;
|
||||
return outputs;
|
||||
}
|
||||
|
||||
std::unordered_map<std::string, mindspore::tensor::MSTensor *> LiteSession::GetOutputs() const {
|
||||
return output_tensor_map_;
|
||||
}
|
||||
|
||||
std::vector<std::string> LiteSession::GetOutputTensorNames() const {
|
||||
std::vector<std::string> output_names;
|
||||
for (const auto &item : output_node_map_) {
|
||||
for (const auto &output : item.second) {
|
||||
output_names.emplace_back(output->tensor_name());
|
||||
}
|
||||
Vector<String> LiteSession::GetOutputTensorNames() const {
|
||||
Vector<String> output_names;
|
||||
for (const auto &output : outputs_) {
|
||||
output_names.push_back(output->tensor_name());
|
||||
}
|
||||
return output_names;
|
||||
}
|
||||
|
||||
mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const std::string &tensor_name) const {
|
||||
auto item = output_tensor_map_.find(tensor_name);
|
||||
if (item == output_tensor_map_.end()) {
|
||||
return nullptr;
|
||||
}
|
||||
return item->second;
|
||||
}
|
||||
|
||||
int LiteSession::Resize(const std::vector<tensor::MSTensor *> &inputs, const std::vector<std::vector<int>> &dims) {
|
||||
return RET_OK;
|
||||
}
|
||||
mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const String &tensor_name) const { return nullptr; }
|
||||
|
||||
} // namespace lite
|
||||
|
||||
|
@ -219,7 +193,6 @@ session::LiteSession *session::LiteSession::CreateSession(const char *net_buf, s
|
|||
return session;
|
||||
}
|
||||
} // namespace mindspore
|
||||
|
||||
)RAW";
|
||||
|
||||
} // namespace mindspore::lite::micro
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
namespace mindspore::lite::micro {
|
||||
|
||||
const char *tensor_header = R"RAW(
|
||||
|
||||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
|
@ -40,8 +39,6 @@ const char *tensor_header = R"RAW(
|
|||
#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_
|
||||
|
||||
#include "include/ms_tensor.h"
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
|
@ -51,7 +48,7 @@ struct QuantArg {
|
|||
float var_corr{1};
|
||||
float mean_corr{0};
|
||||
bool inited;
|
||||
std::vector<float> clusters{};
|
||||
Vector<float> clusters{};
|
||||
int bitNum;
|
||||
int roundType;
|
||||
int multiplier;
|
||||
|
@ -61,38 +58,35 @@ struct QuantArg {
|
|||
class MTensor : public mindspore::tensor::MSTensor {
|
||||
public:
|
||||
MTensor() = default;
|
||||
MTensor(std::string name, enum TypeId type, std::vector<int32_t> shape)
|
||||
: tensor_name_(std::move(name)), data_type_(type), shape_(std::move(shape)) {}
|
||||
MTensor(String name, TypeId type, Vector<int32_t> shape) : tensor_name_(name), data_type_(type), shape_(shape) {}
|
||||
~MTensor() override;
|
||||
|
||||
TypeId data_type() const override { return data_type_; }
|
||||
std::vector<int> shape() const override { return shape_; }
|
||||
int DimensionSize(size_t index) const override;
|
||||
Vector<int> shape() const override { return shape_; }
|
||||
void set_shape(const Vector<int> &shape) override { shape_ = shape; }
|
||||
int ElementsNum() const override;
|
||||
size_t Size() const override;
|
||||
String tensor_name() const override { return tensor_name_; }
|
||||
void set_tensor_name(const String name) override { tensor_name_ = name; }
|
||||
void *MutableData() override;
|
||||
std::string tensor_name() const override { return tensor_name_; }
|
||||
void set_tensor_name(const std::string name) override { tensor_name_ = name; }
|
||||
void *data() override { return data_; }
|
||||
void set_data(void *data) override { data_ = data; }
|
||||
|
||||
private:
|
||||
std::string tensor_name_;
|
||||
String tensor_name_;
|
||||
TypeId data_type_;
|
||||
std::vector<int> shape_;
|
||||
Vector<int> shape_;
|
||||
void *data_ = nullptr;
|
||||
std::vector<QuantArg> quant_params_;
|
||||
Vector<QuantArg> quant_params_;
|
||||
};
|
||||
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_
|
||||
|
||||
|
||||
)RAW";
|
||||
|
||||
const char *tensor_source = R"RAW(
|
||||
|
||||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
|
@ -154,14 +148,6 @@ MTensor::~MTensor() {
|
|||
}
|
||||
}
|
||||
|
||||
int MTensor::DimensionSize(const size_t index) const {
|
||||
int dim_size = -1;
|
||||
if (index < shape_.size()) {
|
||||
dim_size = shape_[index];
|
||||
}
|
||||
return dim_size;
|
||||
}
|
||||
|
||||
int MTensor::ElementsNum() const {
|
||||
int elements = 1;
|
||||
for (int i : shape_) {
|
||||
|
@ -183,7 +169,6 @@ void *MTensor::MutableData() {
|
|||
}
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
)RAW";
|
||||
|
||||
} // namespace mindspore::lite::micro
|
||||
|
|
|
@ -111,6 +111,7 @@ int Generator::CodeSessionImplement() {
|
|||
ofs << g_hwLicense;
|
||||
ofs << "#include \"session.h\"\n";
|
||||
ofs << "#include \"net.h\"\n\n";
|
||||
ofs << "#include <new>\n\n";
|
||||
CodeSessionCompileGraph(ofs, ctx_);
|
||||
ofs << session_source;
|
||||
return RET_OK;
|
||||
|
|
|
@ -60,4 +60,5 @@ endif()
|
|||
|
||||
# generate static library
|
||||
add_library(ops STATIC ${OP_FILES})
|
||||
target_compile_definitions(ops PRIVATE NOT_USE_STL)
|
||||
install(TARGETS ops ARCHIVE DESTINATION ${LIB_PATH})
|
||||
|
|
|
@ -0,0 +1,300 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifdef NOT_USE_STL
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <float.h>
|
||||
#include <stdint.h>
|
||||
#include "include/lite_utils.h"
|
||||
|
||||
namespace mindspore {
|
||||
String::String() {
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
buffer_[0] = '\0';
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
String::String(size_t count, char ch) {
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (count + 1)));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
memset(buffer_, ch, count);
|
||||
buffer_[count] = '\0';
|
||||
size_ = count;
|
||||
}
|
||||
String::String(const char *s, size_t count) {
|
||||
if (s == nullptr) {
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
buffer_[0] = '\0';
|
||||
size_ = 0;
|
||||
return;
|
||||
}
|
||||
size_t size_s = strlen(s);
|
||||
if (size_s <= count) {
|
||||
size_ = size_s;
|
||||
} else {
|
||||
size_ = count;
|
||||
}
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 1)));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
strncpy(buffer_, s, size_);
|
||||
buffer_[size_] = '\0';
|
||||
}
|
||||
|
||||
String::String(const char *s) {
|
||||
if (s == nullptr) {
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
buffer_[0] = '\0';
|
||||
size_ = 0;
|
||||
return;
|
||||
}
|
||||
size_ = strlen(s);
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 1)));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
memcpy(buffer_, s, size_ + 1);
|
||||
}
|
||||
|
||||
String::String(const String &other) {
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (other.size_ + 1)));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
size_ = other.size_;
|
||||
memcpy(buffer_, other.buffer_, size_ + 1);
|
||||
}
|
||||
|
||||
String::String(const String &other, size_t pos, size_t count) {
|
||||
if (pos >= other.size_) {
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
buffer_[0] = '\0';
|
||||
size_ = 0;
|
||||
} else {
|
||||
if (count == npos) {
|
||||
count = other.size_ - pos;
|
||||
}
|
||||
if (pos + count > other.size_) {
|
||||
size_ = other.size_ - pos;
|
||||
} else {
|
||||
size_ = count;
|
||||
}
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 1)));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
strncpy(buffer_, other.buffer_ + pos, size_);
|
||||
buffer_[size_] = '\0';
|
||||
}
|
||||
}
|
||||
|
||||
String::~String() { free(buffer_); }
|
||||
|
||||
String &String::operator=(const String &str) {
|
||||
if (this == &str) {
|
||||
return *this;
|
||||
}
|
||||
free(buffer_);
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (str.size_ + 1)));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
size_ = str.size_;
|
||||
memcpy(buffer_, str.buffer_, size_ + 1);
|
||||
return *this;
|
||||
}
|
||||
|
||||
String &String::operator=(const char *str) {
|
||||
free(buffer_);
|
||||
if (str == nullptr) {
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
buffer_[0] = '\0';
|
||||
size_ = 0;
|
||||
return *this;
|
||||
}
|
||||
size_t size_s = strlen(str);
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_s + 1)));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
size_ = size_s;
|
||||
memcpy(buffer_, str, size_ + 1);
|
||||
return *this;
|
||||
}
|
||||
|
||||
char &String::at(size_t pos) {
|
||||
if (pos >= size_) {
|
||||
MS_C_EXCEPTION("pos out of range");
|
||||
}
|
||||
return buffer_[pos];
|
||||
}
|
||||
const char &String::at(size_t pos) const {
|
||||
if (pos >= size_) {
|
||||
MS_C_EXCEPTION("pos out of range");
|
||||
}
|
||||
return buffer_[pos];
|
||||
}
|
||||
char &String::operator[](size_t pos) {
|
||||
if (pos >= size_) {
|
||||
MS_C_EXCEPTION("pos out of range");
|
||||
}
|
||||
return this->at(pos);
|
||||
}
|
||||
const char &String::operator[](size_t pos) const {
|
||||
if (pos >= size_) {
|
||||
MS_C_EXCEPTION("pos out of range");
|
||||
}
|
||||
return this->at(pos);
|
||||
}
|
||||
char *String::data() noexcept { return buffer_; };
|
||||
const char *String::data() const noexcept { return buffer_; }
|
||||
const char *String::c_str() const noexcept { return buffer_; }
|
||||
|
||||
// capacity
|
||||
bool String::empty() const noexcept { return size_ == 0; }
|
||||
size_t String::size() const noexcept { return size_; }
|
||||
size_t String::length() const noexcept { return size_; }
|
||||
|
||||
// operations
|
||||
void String::clear() noexcept {
|
||||
free(buffer_);
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
buffer_[0] = '\0';
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
String &String::operator+(const String &str) {
|
||||
(*this) += str;
|
||||
return *this;
|
||||
}
|
||||
|
||||
String &String::operator+=(const String &str) {
|
||||
size_t new_size = size_ + str.size_;
|
||||
char *tmp = reinterpret_cast<char *>(malloc(sizeof(char) * (new_size + 1)));
|
||||
if (tmp == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
memcpy(tmp, this->buffer_, size_ + 1);
|
||||
strncat(tmp, str.buffer_, str.size_);
|
||||
tmp[new_size] = '\0';
|
||||
free(buffer_);
|
||||
buffer_ = tmp;
|
||||
size_ = new_size;
|
||||
return *this;
|
||||
}
|
||||
|
||||
String &String::operator+=(const char *str) {
|
||||
if (str == nullptr) {
|
||||
return *this;
|
||||
}
|
||||
size_t str_size = strlen(str);
|
||||
size_t new_size = size_ + str_size;
|
||||
char *tmp = reinterpret_cast<char *>(malloc(sizeof(char) * (new_size + 1)));
|
||||
if (tmp == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
memcpy(tmp, this->buffer_, size_ + 1);
|
||||
strncat(tmp, str, str_size);
|
||||
tmp[new_size] = '\0';
|
||||
free(buffer_);
|
||||
buffer_ = tmp;
|
||||
size_ = new_size;
|
||||
return *this;
|
||||
}
|
||||
|
||||
String &String::operator+=(const char ch) {
|
||||
char *tmp = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 2)));
|
||||
if (tmp == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
memcpy(tmp, this->buffer_, size_ + 1);
|
||||
tmp[size_] = ch;
|
||||
tmp[size_ + 1] = '\0';
|
||||
free(buffer_);
|
||||
buffer_ = tmp;
|
||||
size_ += 1;
|
||||
return *this;
|
||||
}
|
||||
|
||||
String &String::append(size_t count, const char ch) {
|
||||
(*this) += ch;
|
||||
return *this;
|
||||
}
|
||||
String &String::append(const String &str) {
|
||||
(*this) += str;
|
||||
return *this;
|
||||
}
|
||||
String &String::append(const char *str) {
|
||||
if (str == nullptr) {
|
||||
return *this;
|
||||
}
|
||||
(*this) += str;
|
||||
return *this;
|
||||
}
|
||||
|
||||
int String::compare(const String &str) const { return strcmp(buffer_, str.buffer_); }
|
||||
int String::compare(const char *str) const { return strcmp(buffer_, str); }
|
||||
|
||||
String String::substr(size_t pos, size_t count) const { return String(*this, pos, count); }
|
||||
|
||||
String operator+(const String &str1, const char *str2) {
|
||||
String str = str1;
|
||||
str += str2;
|
||||
return str;
|
||||
}
|
||||
|
||||
String operator+(const char *str1, const String &str2) {
|
||||
String str = str2;
|
||||
str += str1;
|
||||
return str;
|
||||
}
|
||||
|
||||
String to_String(int32_t value) {
|
||||
char tmp[sizeof(int32_t) * 4];
|
||||
snprintf(tmp, sizeof(int32_t) * 4, "%d", value);
|
||||
return String(tmp, strlen(tmp));
|
||||
}
|
||||
|
||||
String to_String(float value) {
|
||||
char tmp[FLT_MAX_10_EXP + 20];
|
||||
snprintf(tmp, FLT_MAX_10_EXP + 20, "%f", value);
|
||||
return String(tmp, strlen(tmp));
|
||||
}
|
||||
} // namespace mindspore
|
||||
#endif // NOT_USE_STL
|
Loading…
Reference in New Issue