!20146 [lite]open test and demo

Merge pull request !20146 from 徐安越/r1.3
This commit is contained in:
i-robot 2021-07-13 10:51:50 +00:00 committed by Gitee
commit ae6c95082f
11 changed files with 126 additions and 139 deletions

View File

@ -28,16 +28,16 @@ class CustomAddInfer : public kernel::KernelInterface {
CustomAddInfer() = default;
~CustomAddInfer() = default;
int Infer(const std::vector<tensor::MSTensor *> &inputs, const std::vector<tensor::MSTensor *> &outputs,
int Infer(std::vector<mindspore::MSTensor> *inputs, std::vector<mindspore::MSTensor> *outputs,
const schema::Primitive *primitive) override {
outputs[0]->set_format(inputs[0]->format());
outputs[0]->set_data_type(inputs[0]->data_type());
auto ret = common::CheckInputs(inputs);
(*outputs)[0].SetFormat((*inputs)[0].format());
(*outputs)[0].SetDataType((*inputs)[0].DataType());
auto ret = common::CheckInputs(*inputs);
if (ret != lite::RET_OK) {
outputs[0]->set_shape({-1}); // shape{-1} shows that shape need to be inferred when running.
(*outputs)[0].SetShape({-1}); // shape{-1} shows that shape need to be inferred when running.
return ret;
}
outputs[0]->set_shape(inputs[0]->shape());
(*outputs)[0].SetShape((*inputs)[0].Shape());
return lite::RET_OK;
}
};

View File

@ -19,24 +19,14 @@
namespace mindspore {
namespace common {
int CheckInputs(const std::vector<tensor::MSTensor *> &inputs) {
int CheckInputs(const std::vector<mindspore::MSTensor> &inputs) {
for (auto &input : inputs) {
auto input_shape = input->shape();
auto input_shape = input.Shape();
if (std::find(input_shape.begin(), input_shape.end(), -1) != input_shape.end()) {
return lite::RET_INFER_INVALID;
}
}
return lite::RET_OK;
}
int CheckOutputs(const std::vector<tensor::MSTensor *> &outputs) {
for (auto &output : outputs) {
auto output_shape = output->shape();
if (std::find(output_shape.begin(), output_shape.end(), -1) != output_shape.end()) {
return lite::RET_INFER_INVALID;
}
}
return lite::RET_OK;
}
} // namespace common
} // namespace mindspore

View File

@ -18,16 +18,14 @@
#define MINDSPORE_LITE_EXAMPLES_RUNTIME_REGISTRY_SRC_CUSTOM_COMMON_H
#include <vector>
#include "include/api/types.h"
#include "include/errorcode.h"
#include "include/ms_tensor.h"
namespace mindspore {
namespace common {
// verify that the inputs' shape is inferred successfully when inferring current node.
int CheckInputs(const std::vector<tensor::MSTensor *> &inputs);
// versify that the outputs' shape is inferred successfully when running current node.
int CheckOutputs(const std::vector<tensor::MSTensor *> &outputs);
int CheckInputs(const std::vector<mindspore::MSTensor> &inputs);
} // namespace common
} // namespace mindspore
#endif // MINDSPORE_LITE_EXAMPLES_RUNTIME_REGISTRY_SRC_CUSTOM_COMMON_H

View File

@ -29,7 +29,6 @@ MINDSPORE_LITE_DOWNLOAD_URL="https://ms-release.obs.cn-north-4.myhuaweicloud.com
mkdir -p build
mkdir -p lib
mkdir -p include
mkdir -p model
if [ ! -e ${BASEPATH}/model/add_extend.ms ]; then
wget -c -O ${BASEPATH}/model/add_extend.ms --no-check-certificate ${MODEL_DOWNLOAD_URL}
@ -39,11 +38,7 @@ if [ ! -e ${BASEPATH}/build/${MINDSPORE_FILE} ]; then
fi
tar -xzf ${BASEPATH}/build/${MINDSPORE_FILE}
cp -r ${BASEPATH}/build/${MINDSPORE_FILE_NAME}/runtime/lib/libmindspore-lite.a ${BASEPATH}/lib/
cp -r ${BASEPATH}/build/${MINDSPORE_FILE_NAME}/runtime/include/*.h ${BASEPATH}/include/
cp -r ${BASEPATH}/build/${MINDSPORE_FILE_NAME}/runtime/include/ir ${BASEPATH}/include/
cp -r ${BASEPATH}/build/${MINDSPORE_FILE_NAME}/runtime/include/registry ${BASEPATH}/include/
cp -r ${BASEPATH}/build/${MINDSPORE_FILE_NAME}/runtime/include/schema ${BASEPATH}/include/
cp -r ${BASEPATH}/build/${MINDSPORE_FILE_NAME}/runtime/include/third_party ${BASEPATH}/include/
cp -r ${BASEPATH}/build/${MINDSPORE_FILE_NAME}/runtime/include ${BASEPATH}/
cd ${BASEPATH}/build || exit
cmake ${BASEPATH}
make

View File

@ -28,16 +28,16 @@ class CustomAddInfer : public kernel::KernelInterface {
CustomAddInfer() = default;
~CustomAddInfer() = default;
int Infer(const std::vector<tensor::MSTensor *> &inputs, const std::vector<tensor::MSTensor *> &outputs,
int Infer(std::vector<mindspore::MSTensor> *inputs, std::vector<mindspore::MSTensor> *outputs,
const schema::Primitive *primitive) override {
outputs[0]->set_format(inputs[0]->format());
outputs[0]->set_data_type(inputs[0]->data_type());
(*outputs)[0].SetFormat((*inputs)[0].format());
(*outputs)[0].SetDataType((*inputs)[0].DataType());
auto ret = common::CheckInputs(inputs);
if (ret != lite::RET_OK) {
outputs[0]->set_shape({-1}); // shape{-1} shows that shape need to be inferred when running.
(*outputs)[0].SetShape({-1}); // shape{-1} shows that shape need to be inferred when running.
return ret;
}
outputs[0]->set_shape(inputs[0]->shape());
(*outputs)[0].SetShape((*inputs)[0].Shape());
return lite::RET_OK;
}
};

View File

@ -27,8 +27,8 @@ namespace mindspore {
namespace kernel {
class CustomAddKernel : public Kernel {
public:
CustomAddKernel(const std::vector<tensor::MSTensor *> &inputs, const std::vector<tensor::MSTensor *> &outputs,
const schema::Primitive *primitive, const lite::Context *ctx)
CustomAddKernel(const std::vector<MSTensor> &inputs, const std::vector<MSTensor> &outputs,
const schema::Primitive *primitive, const mindspore::Context *ctx)
: Kernel(inputs, outputs, primitive, ctx) {}
// Prepare will be called during graph compilation
int Prepare() override { return lite::RET_OK; }
@ -40,10 +40,10 @@ class CustomAddKernel : public Kernel {
}
PreProcess();
ParseAttrData();
float *in0 = static_cast<float *>(inputs_[0]->data());
float *in1 = static_cast<float *>(inputs_[1]->data());
float *out = static_cast<float *>(outputs_[0]->data());
auto num = outputs_[0]->ElementsNum();
const float *in0 = static_cast<const float *>(inputs_[0].Data().get());
const float *in1 = static_cast<const float *>(inputs_[1].Data().get());
float *out = static_cast<float *>(outputs_[0].MutableData());
auto num = outputs_[0].ElementNum();
for (int i = 0; i < num; ++i) {
out[i] = in0[i] + in1[i];
}
@ -57,7 +57,7 @@ class CustomAddKernel : public Kernel {
// if output shape exists value -1, need to be inferred before applying memory for output tensor.
int PreProcess() {
if (common::CheckOutputs(outputs_) != lite::RET_OK) {
auto ret = RegisterKernelInterface::GetKernelInterface({}, primitive_)->Infer(inputs_, outputs_, primitive_);
auto ret = RegisterKernelInterface::GetKernelInterface({}, primitive_)->Infer(&inputs_, &outputs_, primitive_);
if (ret != lite::RET_OK) {
std::cerr << "infer failed." << std::endl;
return lite::RET_ERROR;
@ -68,9 +68,9 @@ class CustomAddKernel : public Kernel {
return ret;
}
}
for (auto *output : outputs_) {
for (auto &output : outputs_) {
// malloc data for output tensor
auto data = output->MutableData();
auto data = output.MutableData();
if (data == nullptr) {
std::cerr << "Get data failed" << std::endl;
return lite::RET_ERROR;
@ -101,9 +101,8 @@ class CustomAddKernel : public Kernel {
std::map<std::string, std::string> attrs_;
};
std::shared_ptr<Kernel> CustomAddCreator(const std::vector<tensor::MSTensor *> &inputs,
const std::vector<tensor::MSTensor *> &outputs,
const schema::Primitive *primitive, const lite::Context *ctx) {
std::shared_ptr<Kernel> CustomAddCreator(const std::vector<MSTensor> &inputs, const std::vector<MSTensor> &outputs,
const schema::Primitive *primitive, const mindspore::Context *ctx) {
return std::make_shared<CustomAddKernel>(inputs, outputs, primitive, ctx);
}
REGISTER_CUSTOM_KERNEL(CPU, Tutorial, kNumberTypeFloat32, Custom_Add, CustomAddCreator)

View File

@ -19,9 +19,9 @@
namespace mindspore {
namespace common {
int CheckInputs(const std::vector<tensor::MSTensor *> &inputs) {
int CheckInputs(const std::vector<mindspore::MSTensor> &inputs) {
for (auto &input : inputs) {
auto input_shape = input->shape();
auto input_shape = input.Shape();
if (std::find(input_shape.begin(), input_shape.end(), -1) != input_shape.end()) {
return lite::RET_INFER_INVALID;
}
@ -29,9 +29,9 @@ int CheckInputs(const std::vector<tensor::MSTensor *> &inputs) {
return lite::RET_OK;
}
int CheckOutputs(const std::vector<tensor::MSTensor *> &outputs) {
int CheckOutputs(const std::vector<mindspore::MSTensor> &outputs) {
for (auto &output : outputs) {
auto output_shape = output->shape();
auto output_shape = output.Shape();
if (std::find(output_shape.begin(), output_shape.end(), -1) != output_shape.end()) {
return lite::RET_INFER_INVALID;
}

View File

@ -18,16 +18,17 @@
#define MINDSPORE_LITE_EXAMPLES_RUNTIME_REGISTRY_SRC_CUSTOM_COMMON_H
#include <vector>
#include "include/api/types.h"
#include "include/errorcode.h"
#include "include/ms_tensor.h"
namespace mindspore {
namespace common {
// verify that the inputs' shape is inferred successfully when inferring current node.
int CheckInputs(const std::vector<tensor::MSTensor *> &inputs);
int CheckInputs(const std::vector<mindspore::MSTensor> &inputs);
// versify that the outputs' shape is inferred successfully when running current node.
int CheckOutputs(const std::vector<tensor::MSTensor *> &outputs);
int CheckOutputs(const std::vector<mindspore::MSTensor> &inputs);
} // namespace common
} // namespace mindspore
#endif // MINDSPORE_LITE_EXAMPLES_RUNTIME_REGISTRY_SRC_CUSTOM_COMMON_H

View File

@ -56,6 +56,8 @@ echo 'run common ut tests'
./lite-test --gtest_filter="TestSwishOpenCLCI.Fp32CI"
./lite-test --gtest_filter="ModelParserRegistryTest.TestRegistry"
./lite-test --gtest_filter="PassRegistryTest.TestRegistry"
./lite-test --gtest_filter="TestRegistry.TestAdd"
./lite-test --gtest_filter="TestRegistry.TestCustomAdd"
# test cases specific for train

View File

@ -17,8 +17,9 @@
#include <cstring>
#include <memory>
#include "schema/inner/model_generated.h"
#include "mindspore/lite/include/model.h"
#include "common/common_test.h"
#include "include/api/context.h"
#include "include/api/model.h"
#include "include/lite_session.h"
#include "include/context.h"
#include "include/errorcode.h"
@ -51,8 +52,8 @@ class TestData {
class TestCustomOp : public Kernel {
public:
TestCustomOp(const std::vector<tensor::MSTensor *> &inputs, const std::vector<tensor::MSTensor *> &outputs,
const schema::Primitive *primitive, const lite::Context *ctx)
TestCustomOp(const std::vector<mindspore::MSTensor> &inputs, const std::vector<mindspore::MSTensor> &outputs,
const schema::Primitive *primitive, const mindspore::Context *ctx)
: Kernel(inputs, outputs, primitive, ctx) {}
int Prepare() override { return 0; }
@ -62,9 +63,9 @@ class TestCustomOp : public Kernel {
private:
int PreProcess() {
for (auto *output : outputs_) {
for (auto &output : outputs_) {
// malloc data for output tensor
auto data = output->MutableData();
auto data = output.MutableData();
if (data == nullptr) {
MS_LOG(ERROR) << "Get data failed";
return RET_ERROR;
@ -95,10 +96,10 @@ int TestCustomOp::Execute() {
}
PreProcess();
GetAttrData();
float *in0 = static_cast<float *>(inputs_[0]->data());
float *in1 = static_cast<float *>(inputs_[1]->data());
float *out = static_cast<float *>(outputs_[0]->data());
auto num = outputs_[0]->ElementsNum();
const float *in0 = static_cast<const float *>(inputs_[0].Data().get());
const float *in1 = static_cast<const float *>(inputs_[1].Data().get());
float *out = static_cast<float *>(outputs_[0].MutableData());
auto num = outputs_[0].ElementNum();
for (int i = 0; i < num; ++i) {
out[i] = in0[i] + in1[i];
}
@ -109,19 +110,18 @@ class TestCustomOpInfer : public KernelInterface {
public:
TestCustomOpInfer() = default;
~TestCustomOpInfer() = default;
int Infer(const std::vector<tensor::MSTensor *> &inputs, const std::vector<tensor::MSTensor *> &outputs,
int Infer(std::vector<mindspore::MSTensor> *inputs, std::vector<mindspore::MSTensor> *outputs,
const schema::Primitive *primitive) override {
outputs[0]->set_format(inputs[0]->format());
outputs[0]->set_data_type(inputs[0]->data_type());
outputs[0]->set_shape(inputs[0]->shape());
(*outputs)[0].SetFormat((*inputs)[0].format());
(*outputs)[0].SetDataType((*inputs)[0].DataType());
(*outputs)[0].SetShape((*inputs)[0].Shape());
return RET_OK;
}
};
namespace {
std::shared_ptr<Kernel> TestCustomAddCreator(const std::vector<tensor::MSTensor *> &inputs,
const std::vector<tensor::MSTensor *> &outputs,
const schema::Primitive *primitive, const lite::Context *ctx) {
std::shared_ptr<Kernel> TestCustomAddCreator(const std::vector<MSTensor> &inputs, const std::vector<MSTensor> &outputs,
const schema::Primitive *primitive, const mindspore::Context *ctx) {
return std::make_shared<TestCustomOp>(inputs, outputs, primitive, ctx);
}
@ -190,40 +190,41 @@ TEST_F(TestRegistryCustomOp, TestCustomAdd) {
size_t size = builder.GetSize();
const char *content = reinterpret_cast<char *>(builder.GetBufferPointer());
auto model = lite::Model::Import(content, size);
ASSERT_NE(nullptr, model);
meta_graph.reset();
content = nullptr;
auto context = new lite::InnerContext;
auto &device_list = context->device_list_;
std::shared_ptr<DefaultAllocator> allocator = std::make_shared<DefaultAllocator>();
lite::DeviceContext device_ctx = {lite::DT_CPU, {false, lite::NO_BIND}, "BuiltInTest", "CPU", allocator};
device_list.push_back(device_ctx);
context->thread_num_ = 1;
ASSERT_EQ(lite::RET_OK, context->Init());
auto session = session::LiteSession::CreateSession(context);
ASSERT_NE(nullptr, session);
auto ret = session->CompileGraph(model);
ASSERT_EQ(lite::RET_OK, ret);
auto inputs = session->GetInputs();
// create a context
auto context = std::make_shared<mindspore::Context>();
context->SetThreadNum(1);
context->SetEnableParallel(false);
context->SetThreadAffinity(1);
auto &device_list = context->MutableDeviceInfo();
std::shared_ptr<CPUDeviceInfo> device_info = std::make_shared<CPUDeviceInfo>();
device_info->SetEnableFP16(false);
device_list.push_back(device_info);
// build a model
auto model = std::make_shared<mindspore::Model>();
auto ret = model->Build(content, size, kFlatBuffer, context);
ASSERT_EQ(kSuccess, ret);
auto inputs = model->GetInputs();
ASSERT_EQ(inputs.size(), 2);
auto inTensor = inputs.front();
ASSERT_NE(nullptr, inTensor);
float *in0_data = static_cast<float *>(inTensor->MutableData());
auto impl = inTensor.impl();
ASSERT_NE(nullptr, impl);
float *in0_data = static_cast<float *>(inTensor.MutableData());
in0_data[0] = 10.0f;
auto inTensor1 = inputs.back();
ASSERT_NE(nullptr, inTensor1);
float *in1_data = static_cast<float *>(inTensor1->MutableData());
impl = inTensor1.impl();
ASSERT_NE(nullptr, impl);
float *in1_data = static_cast<float *>(inTensor1.MutableData());
in1_data[0] = 20.0f;
ret = session->RunGraph();
ASSERT_EQ(lite::RET_OK, ret);
auto outputs = session->GetOutputs();
std::vector<mindspore::MSTensor> outputs;
ret = model->Predict(inputs, &outputs);
ASSERT_EQ(kSuccess, ret);
ASSERT_EQ(outputs.size(), 1);
auto outTensor = outputs.begin()->second;
ASSERT_NE(nullptr, outTensor);
ASSERT_EQ(28 * 28 * 3, outTensor->ElementsNum());
ASSERT_EQ(TypeId::kNumberTypeFloat32, outTensor->data_type());
auto *outData = reinterpret_cast<float *>(outTensor->MutableData());
impl = outputs.front().impl();
ASSERT_NE(nullptr, impl);
ASSERT_EQ(28 * 28 * 3, outputs.front().ElementNum());
ASSERT_EQ(DataType::kNumberTypeFloat32, outputs.front().DataType());
auto *outData = reinterpret_cast<const float *>(outputs.front().Data().get());
ASSERT_NE(nullptr, outData);
ASSERT_EQ(30.0f, outData[0]);
ASSERT_EQ(TestData::GetInstance()->data_, kTestData);

View File

@ -16,8 +16,9 @@
#include <cmath>
#include <memory>
#include "schema/inner/model_generated.h"
#include "mindspore/lite/include/model.h"
#include "common/common_test.h"
#include "include/api/context.h"
#include "include/api/model.h"
#include "include/lite_session.h"
#include "include/context.h"
#include "include/errorcode.h"
@ -37,8 +38,8 @@ using mindspore::schema::PrimitiveType_AddFusion;
namespace mindspore {
class TestCustomAdd : public Kernel {
public:
TestCustomAdd(const std::vector<tensor::MSTensor *> &inputs, const std::vector<tensor::MSTensor *> &outputs,
const schema::Primitive *primitive, const lite::Context *ctx)
TestCustomAdd(const std::vector<mindspore::MSTensor> &inputs, const std::vector<mindspore::MSTensor> &outputs,
const schema::Primitive *primitive, const mindspore::Context *ctx)
: Kernel(inputs, outputs, primitive, ctx) {}
int Prepare() override { return 0; }
@ -48,9 +49,9 @@ class TestCustomAdd : public Kernel {
private:
int PreProcess() {
for (auto *output : outputs_) {
for (auto &output : outputs_) {
// malloc data for output tensor
auto data = output->MutableData();
auto data = output.MutableData();
if (data == nullptr) {
MS_LOG(ERROR) << "Get data failed";
return RET_ERROR;
@ -65,10 +66,10 @@ int TestCustomAdd::Execute() {
return RET_PARAM_INVALID;
}
PreProcess();
float *in0 = static_cast<float *>(inputs_[0]->data());
float *in1 = static_cast<float *>(inputs_[1]->data());
float *out = static_cast<float *>(outputs_[0]->data());
auto num = outputs_[0]->ElementsNum();
auto *in0 = static_cast<const float *>(inputs_[0].Data().get());
auto *in1 = static_cast<const float *>(inputs_[1].Data().get());
float *out = static_cast<float *>(outputs_[0].MutableData());
auto num = outputs_[0].ElementNum();
for (int i = 0; i < num; ++i) {
out[i] = in0[i] + in1[i];
}
@ -79,19 +80,18 @@ class TestCustomAddInfer : public KernelInterface {
public:
TestCustomAddInfer() = default;
~TestCustomAddInfer() = default;
int Infer(const std::vector<tensor::MSTensor *> &inputs, const std::vector<tensor::MSTensor *> &outputs,
int Infer(std::vector<mindspore::MSTensor> *inputs, std::vector<mindspore::MSTensor> *outputs,
const schema::Primitive *primitive) override {
outputs[0]->set_format(inputs[0]->format());
outputs[0]->set_data_type(inputs[0]->data_type());
outputs[0]->set_shape(inputs[0]->shape());
(*outputs)[0].SetFormat((*inputs)[0].format());
(*outputs)[0].SetDataType((*inputs)[0].DataType());
(*outputs)[0].SetShape((*inputs)[0].Shape());
return RET_OK;
}
};
namespace {
std::shared_ptr<Kernel> TestCustomAddCreator(const std::vector<tensor::MSTensor *> &inputs,
const std::vector<tensor::MSTensor *> &outputs,
const schema::Primitive *primitive, const lite::Context *ctx) {
std::shared_ptr<Kernel> TestCustomAddCreator(const std::vector<MSTensor> &inputs, const std::vector<MSTensor> &outputs,
const schema::Primitive *primitive, const mindspore::Context *ctx) {
return std::make_shared<TestCustomAdd>(inputs, outputs, primitive, ctx);
}
@ -153,40 +153,41 @@ TEST_F(TestRegistry, TestAdd) {
size_t size = builder.GetSize();
const char *content = reinterpret_cast<char *>(builder.GetBufferPointer());
auto model = lite::Model::Import(content, size);
ASSERT_NE(nullptr, model);
meta_graph.reset();
content = nullptr;
auto context = new lite::InnerContext;
auto &device_list = context->device_list_;
std::shared_ptr<DefaultAllocator> allocator = std::make_shared<DefaultAllocator>();
lite::DeviceContext device_ctx = {lite::DT_CPU, {false, lite::NO_BIND}, "BuiltInTest", "CPU", allocator};
device_list.push_back(device_ctx);
context->thread_num_ = 1;
ASSERT_EQ(lite::RET_OK, context->Init());
auto session = session::LiteSession::CreateSession(context);
ASSERT_NE(nullptr, session);
auto ret = session->CompileGraph(model);
ASSERT_EQ(lite::RET_OK, ret);
auto inputs = session->GetInputs();
// create a context
auto context = std::make_shared<mindspore::Context>();
context->SetThreadNum(1);
context->SetEnableParallel(false);
context->SetThreadAffinity(1);
auto &device_list = context->MutableDeviceInfo();
std::shared_ptr<CPUDeviceInfo> device_info = std::make_shared<CPUDeviceInfo>();
device_info->SetEnableFP16(false);
device_list.push_back(device_info);
// build a model
auto model = std::make_shared<mindspore::Model>();
auto ret = model->Build(content, size, kFlatBuffer, context);
ASSERT_EQ(kSuccess, ret);
auto inputs = model->GetInputs();
ASSERT_EQ(inputs.size(), 2);
auto inTensor = inputs.front();
ASSERT_NE(nullptr, inTensor);
float *in0_data = static_cast<float *>(inTensor->MutableData());
auto impl = inTensor.impl();
ASSERT_NE(nullptr, impl);
float *in0_data = static_cast<float *>(inTensor.MutableData());
in0_data[0] = 10.0f;
auto inTensor1 = inputs.back();
ASSERT_NE(nullptr, inTensor1);
float *in1_data = static_cast<float *>(inTensor1->MutableData());
impl = inTensor1.impl();
ASSERT_NE(nullptr, impl);
float *in1_data = static_cast<float *>(inTensor1.MutableData());
in1_data[0] = 20.0f;
ret = session->RunGraph();
ASSERT_EQ(lite::RET_OK, ret);
auto outputs = session->GetOutputs();
std::vector<mindspore::MSTensor> outputs;
ret = model->Predict(inputs, &outputs);
ASSERT_EQ(kSuccess, ret);
ASSERT_EQ(outputs.size(), 1);
auto outTensor = outputs.begin()->second;
ASSERT_NE(nullptr, outTensor);
ASSERT_EQ(28 * 28 * 3, outTensor->ElementsNum());
ASSERT_EQ(TypeId::kNumberTypeFloat32, outTensor->data_type());
auto *outData = reinterpret_cast<float *>(outTensor->MutableData());
impl = outputs.front().impl();
ASSERT_NE(nullptr, impl);
ASSERT_EQ(28 * 28 * 3, outputs.front().ElementNum());
ASSERT_EQ(DataType::kNumberTypeFloat32, outputs.front().DataType());
auto *outData = reinterpret_cast<const float *>(outputs.front().Data().get());
ASSERT_NE(nullptr, outData);
ASSERT_EQ(30.0f, outData[0]);
MS_LOG(INFO) << "Register add op test pass.";