forked from OSSInnovation/mindspore
internal/model.h add ReduceMode & add operator new and operator delete for internal/MSTensor
This commit is contained in:
parent
ab75268187
commit
af174ae3d0
|
@ -205,6 +205,18 @@ enum ActivationType {
|
|||
UNKNOW = 16
|
||||
};
|
||||
|
||||
enum ReduceMode {
|
||||
ReduceMode_ReduceMean = 0,
|
||||
ReduceMode_ReduceMax = 1,
|
||||
ReduceMode_ReduceMin = 2,
|
||||
ReduceMode_ReduceProd = 3,
|
||||
ReduceMode_ReduceSum = 4,
|
||||
ReduceMode_ReduceSumSquare = 5,
|
||||
ReduceMode_ReduceASum = 6,
|
||||
ReduceMode_MIN = ReduceMode_ReduceMean,
|
||||
ReduceMode_MAX = ReduceMode_ReduceASum
|
||||
};
|
||||
|
||||
typedef struct Node {
|
||||
String name_;
|
||||
NodeType node_type_;
|
||||
|
|
|
@ -136,7 +136,16 @@ typedef struct MSTensor {
|
|||
///
|
||||
/// \return Byte size of data in MSTensor.
|
||||
size_t Size() const;
|
||||
|
||||
static void *operator new(std::size_t sz);
|
||||
|
||||
static void *operator new[](std::size_t sz);
|
||||
|
||||
static void operator delete(void *ptr, std::size_t sz);
|
||||
|
||||
static void operator delete[](void *ptr, std::size_t sz);
|
||||
} MSTensor;
|
||||
|
||||
MSTensor *CreateTensor(TypeId data_type, const ShapeVector &shape);
|
||||
void DestroyTensor(MSTensor *ptr);
|
||||
#endif // MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include "src/runtime/allocator.h"
|
||||
#include "nnacl/arithmetic_common.h"
|
||||
#include "nnacl/fp32/arithmetic.h"
|
||||
#include "schema/ops_generated.h"
|
||||
|
||||
typedef int (*ArithmeticRun)(float *input0, float *input1, float *output, int element_size);
|
||||
typedef int (*ArithmeticOptRun)(float *input0, float *input1, float *output, int element_size,
|
||||
|
@ -167,9 +166,9 @@ int DoArithmeticInferShape(const TensorPtrVector &in_tensors, const TensorPtrVec
|
|||
|
||||
int ChooseKernel(const int kernel_type, ArithmeticRun *arithmetic_run, ArithmeticParameter *params) {
|
||||
if (kernel_type == KernelType::Mul) {
|
||||
if (params->activation_type_ == mindspore::schema::ActivationType_RELU) {
|
||||
if (params->activation_type_ == ActivationType::RELU) {
|
||||
*arithmetic_run = ElementMulRelu;
|
||||
} else if (params->activation_type_ == mindspore::schema::ActivationType_RELU6) {
|
||||
} else if (params->activation_type_ == ActivationType::RELU6) {
|
||||
*arithmetic_run = ElementMulRelu6;
|
||||
} else {
|
||||
*arithmetic_run = ElementMul;
|
||||
|
@ -183,9 +182,9 @@ int ChooseKernel(const int kernel_type, ArithmeticRun *arithmetic_run, Arithmeti
|
|||
|
||||
int ChooseOptKernel(const int kernel_type, ArithmeticOptRun *arithmetic_opt_run, ArithmeticParameter *params) {
|
||||
if (kernel_type == KernelType::Mul) {
|
||||
if (params->activation_type_ == mindspore::schema::ActivationType_RELU) {
|
||||
if (params->activation_type_ == ActivationType::RELU) {
|
||||
*arithmetic_opt_run = ElementOptMulRelu;
|
||||
} else if (params->activation_type_ == mindspore::schema::ActivationType_RELU6) {
|
||||
} else if (params->activation_type_ == ActivationType::RELU6) {
|
||||
*arithmetic_opt_run = ElementOptMulRelu6;
|
||||
} else {
|
||||
*arithmetic_opt_run = ElementOptMul;
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#include "internal/include/errorcode.h"
|
||||
#include "nnacl/reduce_parameter.h"
|
||||
#include "nnacl/fp32/reduce.h"
|
||||
#include "schema/ops_generated.h"
|
||||
|
||||
typedef int (*Reducer)(const int outer_size, const int inner_size, const int axis_size, const float *src_data,
|
||||
float *dst_data, const int tid, const int thread_num);
|
||||
|
@ -202,9 +201,9 @@ int DoReduce(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tenso
|
|||
|
||||
ReduceParameter *params = reinterpret_cast<ReduceParameter *>(node->primitive_);
|
||||
Reducer reducer = NULL;
|
||||
if (params->mode_ == mindspore::schema::ReduceMode::ReduceMode_ReduceSum) {
|
||||
if (params->mode_ == ReduceMode::ReduceMode_ReduceSum) {
|
||||
reducer = ReduceSum;
|
||||
} else if (params->mode_ == mindspore::schema::ReduceMode::ReduceMode_ReduceMean) {
|
||||
} else if (params->mode_ == ReduceMode::ReduceMode_ReduceMean) {
|
||||
reducer = ReduceMean;
|
||||
}
|
||||
|
||||
|
|
|
@ -17,8 +17,9 @@
|
|||
#include <vector>
|
||||
#include <string>
|
||||
#include "internal/include/ms_tensor.h"
|
||||
|
||||
MSTensor *CreateTensor(TypeId data_type, const ShapeVector &shape) {
|
||||
MSTensor *tensor = new (std::nothrow) MSTensor();
|
||||
MSTensor *tensor = new MSTensor();
|
||||
if (tensor == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -27,6 +28,8 @@ MSTensor *CreateTensor(TypeId data_type, const ShapeVector &shape) {
|
|||
return tensor;
|
||||
}
|
||||
|
||||
void DestroyTensor(MSTensor *ptr) { delete ptr; }
|
||||
|
||||
int MSTensor::ElementsNum() const {
|
||||
int result = 1;
|
||||
for (size_t i = 0; i < shape_.size(); ++i) {
|
||||
|
@ -200,3 +203,17 @@ int MSTensor::ElementsC4Num() const {
|
|||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void *MSTensor::operator new(std::size_t sz) {
|
||||
void *storage = malloc(sz);
|
||||
return storage;
|
||||
}
|
||||
|
||||
void *MSTensor::operator new[](std::size_t sz) {
|
||||
void *storage = malloc(sz);
|
||||
return storage;
|
||||
}
|
||||
|
||||
void MSTensor::operator delete(void *ptr, std::size_t sz) { free(ptr); }
|
||||
|
||||
void MSTensor::operator delete[](void *ptr, std::size_t sz) { free(ptr); }
|
||||
|
|
|
@ -40,6 +40,7 @@ set(TEST_LITE_SRC
|
|||
${LITE_DIR}/internal/src/lite_session.cc
|
||||
${LITE_DIR}/src/runtime/allocator.cc
|
||||
${LITE_DIR}/internal/src/ms_tensor.cc
|
||||
${LITE_DIR}/internal/src/common/string.cc
|
||||
${TOP_DIR}/mindspore/core/utils/log_adapter.cc
|
||||
${TOP_DIR}/mindspore/core/gvar/logging_level.cc
|
||||
)
|
||||
|
|
|
@ -69,6 +69,8 @@ TEST_F(InferTest, TestSession) {
|
|||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(reinterpret_cast<float *>(outvec.at(0)->data_), expect_out, kOutSize, 0.000001);
|
||||
DestroyTensor(in);
|
||||
DestroyTensor(out);
|
||||
}
|
||||
|
||||
} // namespace mindspore
|
||||
|
|
Loading…
Reference in New Issue