!6193 [MS][LITE][Develop]lite/internal support log with C style

Merge pull request !6193 from chenjianping/lite_dev2
This commit is contained in:
mindspore-ci-bot 2020-09-15 16:19:34 +08:00 committed by Gitee
commit 2421a06945
12 changed files with 109 additions and 54 deletions

View File

@ -10,19 +10,25 @@ file(GLOB KERNEL_SRC
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/arithmetic_self.c
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/arithmetic.c
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/matmul.c
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32_grad/activation_grad.c
${CMAKE_CURRENT_SOURCE_DIR}/src/kernel/fp32/*.cc
${CMAKE_CURRENT_SOURCE_DIR}/src/kernel/fp32_grad/*.cc
${CMAKE_CURRENT_SOURCE_DIR}/src/kernel/common/*.cc
)
if (SUPPORT_TRAIN)
file(GLOB TRAIN_KERNEL_SRC
${KERNEL_SRC}
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32_grad/activation_grad.c
${CMAKE_CURRENT_SOURCE_DIR}/src/kernel/fp32_grad/*.cc
)
endif ()
list(REMOVE_ITEM KERNEL_SRC ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/opt_op_handler.c)
set(CCSRC
${CMAKE_CURRENT_SOURCE_DIR}/src/lite_session.cc
${CMAKE_CURRENT_SOURCE_DIR}/src/ms_tensor.cc
${TOP_DIR}/src/common/log_adapter.cc
${TOP_DIR}/src/runtime/allocator.cc
${CMAKE_CURRENT_SOURCE_DIR}/../../core/gvar/logging_level.cc
${TOP_DIR}/src/runtime/allocator.cc
)
if (PLATFORM_ARM64)
@ -32,7 +38,7 @@ if (PLATFORM_ARM64)
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/assembly/arm64/MatmulFp32Opt.S)
set_property(SOURCE ${ASSEMBLY_SRC} PROPERTY LANGUAGE C)
set(KERNEL_SRC ${KERNEL_SRC} ${ASSEMBLY_SRC})
add_library(mslite_internal SHARED ${CCSRC} ${KERNEL_SRC})
add_library(mslite_internal SHARED ${CCSRC} ${KERNEL_SRC} ${TRAIN_KERNEL_SRC})
target_link_libraries(mslite_internal log)
endif()

View File

@ -27,8 +27,8 @@ enum NodeType {
NodeType_MAX = NodeType_CNode
};
enum KernelType {
Concat,
enum KernelType : int {
Concat = 0,
SoftMax,
Activation,
Conv2D,

View File

@ -17,7 +17,7 @@
#include "internal/src/kernel/common/common_infershape.h"
#include "internal/include/errorcode.h"
#include "internal/include/ms_tensor.h"
#include "utils/log_adapter.h"
#include "internal/src/lite_log.h"
int DoCommonInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors) {
TensorPtr input = in_tensors.at(0);

View File

@ -19,7 +19,7 @@
#include "internal/include/errorcode.h"
#include "internal/include/ms_tensor.h"
#include "nnacl/fp32/activation.h"
#include "utils/log_adapter.h"
#include "internal/src/lite_log.h"
#include "nnacl/errorcode.h"
int DoActivationInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param) {
@ -43,11 +43,11 @@ int DoActivation(const TensorPtrVector &in_tensors, const TensorPtrVector &out_t
float alpha = param->alpha_;
ret = LRelu(input_addr, length, output_addr, alpha);
} else {
MS_LOG(ERROR) << "Unsupport activation type " << param->type_;
LITE_ERROR_LOG("Unsupport activation type: %d", param->type_);
return RET_PARAM_INVALID;
}
if (ret != NNACL_OK) {
MS_LOG(ERROR) << "do activation(" << param->type_ << ") fail!ret: " << ret;
LITE_ERROR_LOG("do activation(%d) fail!ret: %d", param->type_, ret);
return RET_ERROR;
}
return RET_OK;

View File

@ -18,7 +18,7 @@
#include "internal/src/kernel/common/common_infershape.h"
#include "internal/include/errorcode.h"
#include "internal/include/ms_tensor.h"
#include "utils/log_adapter.h"
#include "internal/src/lite_log.h"
#include "nnacl/fp32/arithmetic_self.h"
int DoArithmeticSelfInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors,
@ -36,11 +36,11 @@ int DoArithmeticSelf(const TensorPtrVector &in_tensors, const TensorPtrVector &o
} else if (param->type_ == KernelType::Neg) {
ret = ElementNegative((float *)in_tensors[0]->data_, (float *)out_tensors[0]->data_, data_size);
} else {
MS_LOG(ERROR) << "Unsupport kernel type: " << param->type_;
LITE_ERROR_LOG("Unsupport kernel type: %d", param->type_);
return RET_PARAM_INVALID;
}
if (ret != NNACL_OK) {
MS_LOG(ERROR) << "do arithmetic " << param->type_ << " fail!ret: " << ret;
LITE_ERROR_LOG("do arithmetic %d fail!ret: %d", param->type_, ret);
return RET_ERROR;
}
return RET_OK;

View File

@ -18,7 +18,7 @@
#include "nnacl/fp32/matmul.h"
#include "internal/include/errorcode.h"
#include "internal/include/ms_tensor.h"
#include "utils/log_adapter.h"
#include "internal/src/lite_log.h"
typedef struct MatMulCPUKernelData {
float *a_c12_ptr_;
@ -92,12 +92,12 @@ int DoMatMulInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector
Int32Vector a_shape = input0->shape_;
Int32Vector b_shape = input1->shape_;
if (a_shape.size() < 2 || b_shape.size() < 2) {
MS_LOG(ERROR) << "inputs shape is invalid";
LITE_ERROR_LOG("inputs shape is invalid");
return RET_INPUT_TENSOR_ERROR;
}
for (size_t i = 0; i < a_shape.size() - 2; ++i) {
if (a_shape[i] != b_shape[i]) {
MS_LOG(ERROR) << "Op MatMul's dimensions must be equal";
LITE_ERROR_LOG("Op MatMul's dimensions must be equal");
return RET_INPUT_TENSOR_ERROR;
}
}
@ -117,11 +117,11 @@ int DoMatMulInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector
int DoMatMul(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator) {
if (in_tensors[0]->data_ == NULL || in_tensors[1]->data_ ==NULL) {
MS_LOG(ERROR) << "input data is NULL!";
LITE_LOG_ERROR("input data is NULL!");
return RET_PARAM_INVALID;
}
if (allocator == NULL) {
MS_LOG(ERROR) << "input allocator is NULL!";
LITE_LOG_ERROR("input allocator is NULL!");
return RET_PARAM_INVALID;
}
int batch = 1;
@ -130,7 +130,8 @@ int DoMatMul(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tenso
if (in_tensors.size() == 3) {
std::vector<int> bias_shape = in_tensors[2]->shape_;
if (bias_shape[bias_shape.size() - 1] != c_shape[c_shape.size() - 1]) {
MS_LOG(ERROR) << "The bias' dimension is not equal with column";
LITE_ERROR_LOG("The bias' dimension %d is not equal with column %d", bias_shape[bias_shape.size() - 1],
c_shape[c_shape.size() - 1]);
return RET_INPUT_TENSOR_ERROR;
}
}

View File

@ -19,7 +19,7 @@
#include "internal/include/errorcode.h"
#include "internal/include/ms_tensor.h"
#include "nnacl/fp32_grad/activation_grad.h"
#include "utils/log_adapter.h"
#include "internal/src/lite_log.h"
#include "nnacl/errorcode.h"
int DoActivationGradInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors,
@ -45,11 +45,11 @@ int DoActivationGrad(const TensorPtrVector &in_tensors, const TensorPtrVector &o
float alpha = param->alpha_;
ret = LReluGrad(dy_data, x_data, length, dx_data, alpha);
} else {
MS_LOG(ERROR) << "Unsupport activation type " << param->type_;
LITE_ERROR_LOG("Unsupport activation type %d", param->type_);
return RET_PARAM_INVALID;
}
if (ret != NNACL_OK) {
MS_LOG(ERROR) << "do activation(" << param->type_ << ") fail!ret: " << ret;
LITE_ERROR_LOG("do activation(%d) fail!ret: %d", param->type_, ret);
return RET_ERROR;
}
return RET_OK;

View File

@ -18,7 +18,7 @@
#include "internal/src/kernel/common/common_infershape.h"
#include "internal/include/errorcode.h"
#include "internal/include/ms_tensor.h"
#include "utils/log_adapter.h"
#include "internal/src/lite_log.h"
#include "nnacl/fp32/arithmetic_self.h"
#include "nnacl/fp32/arithmetic.h"
@ -40,11 +40,11 @@ int DoArithmeticSelfGrad(const TensorPtrVector &in_tensors, const TensorPtrVecto
} else if (param->type_ == KernelType::NegGrad) {
ret = ElementNegative(dy_data, dx_data, data_size);
} else {
MS_LOG(ERROR) << "Unsupport kernel type: " << param->type_;
LITE_ERROR_LOG("Unsupport kernel type: %d", param->type_);
return RET_PARAM_INVALID;
}
if (ret != NNACL_OK) {
MS_LOG(ERROR) << "do arithmetic " << param->type_ << " fail!ret: " << ret;
LITE_ERROR_LOG("do arithmetic %d fail!ret: %d", param->type_, ret);
return RET_ERROR;
}
return RET_OK;

View File

@ -0,0 +1,44 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INTERNAL_SRC_LITE_LOG_H_
#define MINDSPORE_LITE_INTERNAL_SRC_LITE_LOG_H_
#ifdef DEBUG
#include <assert.h>
#endif
#ifdef DEBUG
#define LITE_DEBUG_LOG(format, ...) \
printf("[DEBUG] [%s %s] [%s] [%d] " format "\n", __DATE__, __TIME__, __FILE__, __LINE__, __VA_ARGS__)
#define LITE_INFO_LOG(format, ...) \
printf("[INFO] [%s %s] [%s] [%d] " format "\n", __DATE__, __TIME__, __FILE__, __LINE__, __VA_ARGS__)
#define LITE_WARNING_LOG(format, ...) \
printf("[WARNING] [%s %s] [%s] [%d] " format "\n", __DATE__, __TIME__, __FILE__, __LINE__, __VA_ARGS__)
#define LITE_ERROR_LOG(format, ...) \
printf("[ERROR] [%s %s] [%s] [%d] " format "\n", __DATE__, __TIME__, __FILE__, __LINE__, __VA_ARGS__)
#define LITE_LOG_ERROR(...) \
printf("[ERROR] [%s %s] [%s] [%d] %s\n", __DATE__, __TIME__, __FILE__, __LINE__, __VA_ARGS__)
#define MS_ASSERT(f) assert(f)
#else
#define LITE_DEBUG_LOG(...)
#define LITE_INFO_LOG(...)
#define LITE_WARNING_LOG(...)
#define LITE_ERROR_LOG(...)
#define LITE_LOG_ERROR(...)
#define MS_ASSERT(f) ((void)0)
#endif
#endif // MINDSPORE_LITE_INTERNAL_SRC_LITE_LOG_H_

View File

@ -18,7 +18,7 @@
#include "internal/include/ms_tensor.h"
#include "src/runtime/allocator.h"
#include "internal/include/errorcode.h"
#include "utils/log_adapter.h"
#include "internal/src/lite_log.h"
#include "internal/src/kernel/fp32/activation.h"
#include "internal/src/kernel/fp32/arithmetic_self.h"
#include "internal/src/kernel/fp32/matmul.h"
@ -42,8 +42,8 @@ static int ModelInferShape() {
size_t nodes_size = nodes.size();
for (size_t i = 0; i < nodes_size; ++i) {
auto node = nodes[i];
if (node->primitive_ == nullptr) {
MS_LOG(ERROR) << "node's primitive is NULL!";
if (node->primitive_ == NULL) {
LITE_ERROR_LOG("node's primitive is NULL!");
return RET_ERROR;
}
TensorPtrVector in_tensors;
@ -57,16 +57,16 @@ static int ModelInferShape() {
int type = node->primitive_->type_;
InferShape infershape = g_infershape_funcs[type];
if (infershape == NULL) {
MS_LOG(ERROR) << "Unsupport kernel type: " << type;
LITE_ERROR_LOG("Unsupport kernel type: %d", type);
return RET_PARAM_INVALID;
}
int ret = (*infershape)(in_tensors, out_tensors, node->primitive_);
if (ret == RET_INFER_INVALID) {
g_infershape_interrupt = true;
MS_LOG(INFO) << node->name_ << "inferShape shouldn't be done before runtime, inferShape interrupt!";
LITE_INFO_LOG("%s inferShape shouldn't be done before runtime, inferShape interrupt!", node->name_.c_str());
}
if (ret != RET_OK) {
MS_LOG(ERROR) << "Infer shape fail!ret: " << ret;
LITE_ERROR_LOG("Infer shape fail!ret: %d", ret);
return ret;
}
}
@ -79,15 +79,19 @@ static void InitFuncs() {
g_infershape_funcs[KernelType::Activation] = DoActivationInferShape;
g_infershape_funcs[KernelType::Log] = DoArithmeticSelfInferShape;
g_infershape_funcs[KernelType::Neg] = DoArithmeticSelfInferShape;
g_infershape_funcs[KernelType::ActivationGrad] = DoActivationGradInferShape;
g_runkernel_funcs[KernelType::MatMul] = DoMatMul;
g_runkernel_funcs[KernelType::Activation] = DoActivation;
g_runkernel_funcs[KernelType::Log] = DoArithmeticSelf;
g_runkernel_funcs[KernelType::LogGrad] = DoArithmeticSelfGrad;
g_runkernel_funcs[KernelType::Neg] = DoArithmeticSelf;
#ifdef SUPPORT_TRAIN
g_infershape_funcs[KernelType::ActivationGrad] = DoActivationGradInferShape;
g_runkernel_funcs[KernelType::NegGrad] = DoArithmeticSelfGrad;
g_runkernel_funcs[KernelType::ActivationGrad] = DoActivationGrad;
g_runkernel_funcs[KernelType::LogGrad] = DoArithmeticSelfGrad;
#endif
g_first_load = false;
}
}
@ -114,7 +118,7 @@ int LiteSession::CompileGraph(Model *model) {
TensorPtrVector LiteSession::GetInputs() const {
TensorPtrVector in(g_model->input_indices_.size());
for (size_t i = 0; i < g_model->input_indices_.size(); ++i) {
in.at(i) = g_model->all_tensors_[i];
in.at(i) = g_model->all_tensors_[g_model->input_indices_[i]];
}
return in;
}
@ -126,7 +130,7 @@ TensorPtrVector LiteSession::GetOutputsByNodeName(const String &node_name) const
TensorPtrVector LiteSession::GetOutputs() const {
TensorPtrVector out(g_model->output_indices_.size());
for (size_t i = 0; i < g_model->output_indices_.size(); ++i) {
out.at(i) = g_model->all_tensors_[i];
out.at(i) = g_model->all_tensors_[g_model->output_indices_[i]];
}
return out;
}
@ -137,7 +141,7 @@ int LiteSession::RunGraph() {
for (size_t i = 0; i < nodes_size; ++i) {
auto node = nodes[i];
if (node->primitive_ == nullptr) {
MS_LOG(ERROR) << "node's primitive is NULL!";
LITE_ERROR_LOG("node's primitive is NULL!");
return RET_ERROR;
}
TensorPtrVector in_tensors;
@ -152,31 +156,31 @@ int LiteSession::RunGraph() {
if (g_infershape_interrupt) {
InferShape infershape = g_infershape_funcs[type];
if (infershape == NULL) {
MS_LOG(ERROR) << "Unsupport kernel type: " << type;
LITE_ERROR_LOG("Unsupport kernel type: %d", type);
return RET_PARAM_INVALID;
}
int ret = (*infershape)(in_tensors, out_tensors, node->primitive_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "InferShape fail!ret: " << ret;
LITE_ERROR_LOG("InferShape fail!ret: %d", ret);
return ret;
}
}
for (size_t j = 0; j < out_tensors.size(); ++j) {
out_tensors[j]->data_ = g_allocator.Malloc(out_tensors[j]->Size());
if (out_tensors[j]->data_ == NULL) {
MS_LOG(ERROR) << "Malloc data for out tensor fail!";
LITE_ERROR_LOG("Malloc data for out tensor fail!");
return RET_NULL_PTR;
}
}
RunKernel run_kernel = g_runkernel_funcs[type];
if (run_kernel == NULL) {
MS_LOG(ERROR) << "Unsupport kernel type: " << type;
LITE_ERROR_LOG("Unsupport kernel type: %d", type);
return RET_PARAM_INVALID;
}
int ret = (*run_kernel)(in_tensors, out_tensors, node, &g_allocator);
if (ret != RET_OK) {
MS_LOG(ERROR) << "run kernel fail!ret: " << ret;
LITE_ERROR_LOG("run kernel fail!ret: ", ret);
return ret;
}
}

View File

@ -15,11 +15,10 @@
*/
#include <iostream>
#include <vector>
#include <numeric>
#include <string>
#include "internal/include/ms_tensor.h"
MSTensor *CreateTensor(TypeId data_type, const ShapeVector &shape) {
MSTensor *tensor = (MSTensor *)malloc(sizeof(MSTensor));
MSTensor *tensor = new (std::nothrow) MSTensor();
if (tensor == NULL) {
return NULL;
}

View File

@ -32,22 +32,23 @@ class InferTest : public mindspore::CommonTest {
TEST_F(InferTest, TestSession) {
Model model;
Node *node = reinterpret_cast<Node *>(malloc(sizeof(Node)));
Node node;
model.nodes_.push_back(&node);
node->name_ = "Neg";
node->node_type_ = NodeType::NodeType_CNode;
PrimitiveC *prim = reinterpret_cast<PrimitiveC *>(malloc(sizeof(PrimitiveC)));
prim->type_ = KernelType::Neg;
node->input_indices_.push_back(0);
node->output_indices_.push_back(1);
MSTensor *in = CreateTensor(kNumberTypeFloat32, {1, 1, 1, 10});
node.node_type_ = NodeType::NodeType_CNode;
PrimitiveC prim;
prim.type_ = KernelType::Neg;
node.primitive_ = &prim;
node.input_indices_.push_back(0);
node.output_indices_.push_back(1);
ShapeVector shape = {1, 1, 1, 10};
MSTensor *in = CreateTensor(kNumberTypeFloat32, shape);
model.all_tensors_.push_back(in);
model.input_indices_.push_back(0);
MSTensor *out = CreateTensor(kNumberTypeFloat32, {1, 1, 1, 10});
MSTensor *out = CreateTensor(kNumberTypeFloat32, shape);
model.all_tensors_.emplace_back(out);
node->output_indices_.push_back(1);
model.output_indices_.push_back(1);
LiteSession session;
session.CompileGraph(&model);