!15212 update example files of micro

From: @yangjie159
Reviewed-by: @wangchengyuan,@zhanghaibo5
Signed-off-by: @wangchengyuan
This commit is contained in:
mindspore-ci-bot 2021-04-15 15:41:09 +08:00 committed by Gitee
commit 9346fa3484
8 changed files with 494 additions and 12 deletions

View File

@ -24,6 +24,7 @@
#include "include/errorcode.h"
#include "load_input.h"
#include "calib_output.h"
using namespace mindspore;
@ -34,8 +35,9 @@ void usage() {
"args[1]: inputs binary file\n"
"args[2]: model weight binary file\n"
"args[3]: loop count for performance test\n"
"args[4]: runtime thread num\n"
"args[5]: runtime thread bind mode\n\n");
"args[4]: calibration file\n"
"args[5]: runtime thread num\n"
"args[6]: runtime thread bind mode\n\n");
}
uint64_t GetTimeUs() {
@ -111,15 +113,15 @@ int main(int argc, const char **argv) {
}
lite::Context *context = nullptr;
if (argc >= 6) {
if (argc >= 7) {
// config benchmark context
context = new (std::nothrow) lite::Context();
if (context == nullptr) {
return lite::RET_ERROR;
}
context->thread_num_ = atoi(argv[4]);
context->thread_num_ = atoi(argv[5]);
context->device_list_.resize(1);
context->device_list_[0] = {lite::DT_CPU, {{false, static_cast<lite::CpuBindMode>(atoi(argv[5]))}}};
context->device_list_[0] = {lite::DT_CPU, {{false, static_cast<lite::CpuBindMode>(atoi(argv[6]))}}};
printf("context: ThreadNum: %d, BindMode: %d\n", context->thread_num_,
context->device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_);
}
@ -167,12 +169,29 @@ int main(int argc, const char **argv) {
return lite::RET_ERROR;
}
Vector<String> outputs_name = session->GetOutputTensorNames();
printf("\noutputs: \n");
Vector<String> outputs_name = session->GetOutputTensorNames();
Vector<tensor::MSTensor *> outputs;
for (const auto &name : outputs_name) {
auto output = session->GetOutputByTensorName(name);
outputs.push_back(output);
TensorToString(output);
}
if (argc >= 5) {
lite::Calibrator *calibrator = new (std::nothrow) lite::Calibrator();
if (calibrator == nullptr) {
return lite::RET_NULL_PTR;
}
ret = calibrator->ReadCalibData(argv[4]);
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}
ret = calibrator->CompareOutputs(outputs);
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}
delete calibrator;
}
printf("========run success=======\n");
delete session;
session = nullptr;

View File

@ -0,0 +1,148 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "calib_output.h"
#include <fstream>
#include <sstream>
#include <iostream>
#include <stdio.h>
#include <cmath>
namespace mindspore {
namespace lite {
constexpr float kToleranceVal = 0.0001;
#define MS_ERROR_IF_NULL(ptr) \
do { \
if ((ptr) == nullptr) { \
return mindspore::lite::RET_ERROR; \
} \
} while (0)
int Calibrator::ReadCalibData(const char *calib_data_path) {
std::ifstream in_file(calib_data_path);
if (!in_file.good()) {
printf("file is not exist, %s\n", calib_data_path);
return RET_ERROR;
}
if (!in_file.is_open()) {
printf("open file failed, %s\n", calib_data_path);
in_file.close();
return RET_ERROR;
}
while (!in_file.eof()) {
std::string line;
getline(in_file, line);
if (line.empty()) {
continue;
}
std::stringstream name_line(line);
std::string tensor_name;
size_t dim = 0;
name_line >> tensor_name >> dim;
size_t elements = 1;
for (size_t i = 0; i < dim; i++) {
size_t tmp_dim;
name_line >> tmp_dim;
elements *= tmp_dim;
}
getline(in_file, line);
std::stringstream data_line(line);
String name(tensor_name.c_str());
CalibTensor *output = new (std::nothrow) CalibTensor(name, elements);
MS_ERROR_IF_NULL(output);
float *data = output->MutableData();
MS_ERROR_IF_NULL(data);
for (size_t i = 0; i < elements; i++) {
data_line >> data[i];
}
calib_outputs_.push_back(output);
}
in_file.close();
return RET_OK;
}
template <typename T>
float CompareData(const T *output, const float *calib, size_t elements_num) {
float error = 0.;
if (output == nullptr || calib == nullptr) {
printf("output or calib is nullptr\n");
return error;
}
for (size_t i = 0; i < elements_num; ++i) {
if (std::isnan(output[i]) || std::isinf(output[i]) || std::isnan(calib[i]) || std::isinf(calib[i])) {
printf("error, output data is nan or inf\n");
return error;
}
error += std::abs(output[i] - calib[i]);
}
return error;
}
int Calibrator::CompareOutputs(const Vector<tensor::MSTensor *> &outputs) const {
if (outputs.size() != calib_outputs_.size()) {
printf("error, outputs and calibs size is mismatch\n");
return RET_ERROR;
}
float total_error = 0;
size_t outputs_num = outputs.size();
for (size_t i = 0; i < outputs_num; ++i) {
tensor::MSTensor *output = outputs[i];
MS_ERROR_IF_NULL(output);
CalibTensor *calib = calib_outputs_[i];
MS_ERROR_IF_NULL(calib);
if (output->tensor_name() != calib->tensor_name()) {
printf("error, output tensor name is not equal to calib\n");
return RET_ERROR;
}
if (output->ElementsNum() != calib->ElementsNum()) {
printf("error, output elements num is not equal to calib\n");
return RET_ERROR;
}
switch (output->data_type()) {
case TypeId::kNumberTypeFloat:
case TypeId::kNumberTypeFloat32: {
total_error += CompareData(static_cast<float *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
case TypeId::kNumberTypeInt8: {
total_error += CompareData(static_cast<int8_t *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
case TypeId::kNumberTypeUInt8: {
total_error += CompareData(static_cast<uint8_t *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
case TypeId::kNumberTypeUInt:
case TypeId::kNumberTypeUInt32: {
total_error += CompareData(static_cast<int32_t *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
default: {
printf("unsupported tensor data type\n");
}
}
}
if (total_error > kToleranceVal) {
printf("compare outputs failed, total error: %f\n", total_error);
return RET_ERROR;
}
printf("compare outputs success, total error: %f\n", total_error);
return RET_OK;
}
} // namespace lite
} // namespace mindspore

View File

@ -0,0 +1,73 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_
#define MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_
#include "include/lite_utils.h"
#include "include/ms_tensor.h"
#include "include/errorcode.h"
namespace mindspore {
namespace lite {
class CalibTensor {
public:
CalibTensor(String name, size_t elements_num) : tensor_name_(name), elements_num_(elements_num) {}
~CalibTensor() {
free(data_);
data_ = nullptr;
}
String tensor_name() const { return tensor_name_; }
int ElementsNum() const { return elements_num_; }
float *MutableData() {
if (data_ == nullptr) {
if (elements_num_ == 0 || elements_num_ > INT16_MAX) {
return nullptr;
}
data_ = static_cast<float *>(malloc(elements_num_ * sizeof(float)));
}
return data_;
}
private:
String tensor_name_;
int elements_num_{0};
float *data_{nullptr};
};
class Calibrator {
public:
Calibrator() = default;
~Calibrator() {
for (auto &calib : calib_outputs_) {
delete calib;
calib = nullptr;
}
calib_outputs_.clear();
}
int ReadCalibData(const char *calib_data_path);
int CompareOutputs(const Vector<tensor::MSTensor *> &outputs) const;
private:
Vector<CalibTensor *> calib_outputs_;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_

View File

@ -287,6 +287,7 @@ String operator+(const char *lhs, const String &rhs) {
return str;
}
bool operator!=(const String &lhs, const String &rhs) { return lhs.compare(rhs) != 0; }
bool operator==(const String &lhs, const String &rhs) { return lhs.compare(rhs) == 0; }
bool operator==(const String &lhs, const char *rhs) { return lhs.compare(rhs) == 0; }
bool operator==(const char *lhs, const String &rhs) { return rhs.compare(lhs) == 0; }

View File

@ -24,6 +24,7 @@
#include "include/errorcode.h"
#include "load_input.h"
#include "calib_output.h"
using namespace mindspore;
@ -34,8 +35,9 @@ void usage() {
"args[1]: inputs binary file\n"
"args[2]: model weight binary file\n"
"args[3]: loop count for performance test\n"
"args[4]: runtime thread num\n"
"args[5]: runtime thread bind mode\n\n");
"args[4]: calibration file\n"
"args[5]: runtime thread num\n"
"args[6]: runtime thread bind mode\n\n");
}
uint64_t GetTimeUs() {
@ -111,15 +113,15 @@ int main(int argc, const char **argv) {
}
lite::Context *context = nullptr;
if (argc >= 6) {
if (argc >= 7) {
// config benchmark context
context = new (std::nothrow) lite::Context();
if (context == nullptr) {
return lite::RET_ERROR;
}
context->thread_num_ = atoi(argv[4]);
context->thread_num_ = atoi(argv[5]);
context->device_list_.resize(1);
context->device_list_[0] = {lite::DT_CPU, {{false, static_cast<lite::CpuBindMode>(atoi(argv[5]))}}};
context->device_list_[0] = {lite::DT_CPU, {{false, static_cast<lite::CpuBindMode>(atoi(argv[6]))}}};
printf("context: ThreadNum: %d, BindMode: %d\n", context->thread_num_,
context->device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_);
}
@ -167,12 +169,29 @@ int main(int argc, const char **argv) {
return lite::RET_ERROR;
}
Vector<String> outputs_name = session->GetOutputTensorNames();
printf("\noutputs: \n");
Vector<String> outputs_name = session->GetOutputTensorNames();
Vector<tensor::MSTensor *> outputs;
for (const auto &name : outputs_name) {
auto output = session->GetOutputByTensorName(name);
outputs.push_back(output);
TensorToString(output);
}
if (argc >= 5) {
lite::Calibrator *calibrator = new (std::nothrow) lite::Calibrator();
if (calibrator == nullptr) {
return lite::RET_NULL_PTR;
}
ret = calibrator->ReadCalibData(argv[4]);
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}
ret = calibrator->CompareOutputs(outputs);
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}
delete calibrator;
}
printf("========run success=======\n");
delete session;
session = nullptr;

View File

@ -0,0 +1,148 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "calib_output.h"
#include <fstream>
#include <sstream>
#include <iostream>
#include <stdio.h>
#include <cmath>
namespace mindspore {
namespace lite {
constexpr float kToleranceVal = 0.0001;
#define MS_ERROR_IF_NULL(ptr) \
do { \
if ((ptr) == nullptr) { \
return mindspore::lite::RET_ERROR; \
} \
} while (0)
int Calibrator::ReadCalibData(const char *calib_data_path) {
std::ifstream in_file(calib_data_path);
if (!in_file.good()) {
printf("file is not exist, %s\n", calib_data_path);
return RET_ERROR;
}
if (!in_file.is_open()) {
printf("open file failed, %s\n", calib_data_path);
in_file.close();
return RET_ERROR;
}
while (!in_file.eof()) {
std::string line;
getline(in_file, line);
if (line.empty()) {
continue;
}
std::stringstream name_line(line);
std::string tensor_name;
size_t dim = 0;
name_line >> tensor_name >> dim;
size_t elements = 1;
for (size_t i = 0; i < dim; i++) {
size_t tmp_dim;
name_line >> tmp_dim;
elements *= tmp_dim;
}
getline(in_file, line);
std::stringstream data_line(line);
String name(tensor_name.c_str());
CalibTensor *output = new (std::nothrow) CalibTensor(name, elements);
MS_ERROR_IF_NULL(output);
float *data = output->MutableData();
MS_ERROR_IF_NULL(data);
for (size_t i = 0; i < elements; i++) {
data_line >> data[i];
}
calib_outputs_.push_back(output);
}
in_file.close();
return RET_OK;
}
template <typename T>
float CompareData(const T *output, const float *calib, size_t elements_num) {
float error = 0.;
if (output == nullptr || calib == nullptr) {
printf("output or calib is nullptr\n");
return error;
}
for (size_t i = 0; i < elements_num; ++i) {
if (std::isnan(output[i]) || std::isinf(output[i]) || std::isnan(calib[i]) || std::isinf(calib[i])) {
printf("error, output data is nan or inf\n");
return error;
}
error += std::abs(output[i] - calib[i]);
}
return error;
}
int Calibrator::CompareOutputs(const Vector<tensor::MSTensor *> &outputs) const {
if (outputs.size() != calib_outputs_.size()) {
printf("error, outputs and calibs size is mismatch\n");
return RET_ERROR;
}
float total_error = 0;
size_t outputs_num = outputs.size();
for (size_t i = 0; i < outputs_num; ++i) {
tensor::MSTensor *output = outputs[i];
MS_ERROR_IF_NULL(output);
CalibTensor *calib = calib_outputs_[i];
MS_ERROR_IF_NULL(calib);
if (output->tensor_name() != calib->tensor_name()) {
printf("error, output tensor name is not equal to calib\n");
return RET_ERROR;
}
if (output->ElementsNum() != calib->ElementsNum()) {
printf("error, output elements num is not equal to calib\n");
return RET_ERROR;
}
switch (output->data_type()) {
case TypeId::kNumberTypeFloat:
case TypeId::kNumberTypeFloat32: {
total_error += CompareData(static_cast<float *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
case TypeId::kNumberTypeInt8: {
total_error += CompareData(static_cast<int8_t *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
case TypeId::kNumberTypeUInt8: {
total_error += CompareData(static_cast<uint8_t *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
case TypeId::kNumberTypeUInt:
case TypeId::kNumberTypeUInt32: {
total_error += CompareData(static_cast<int32_t *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
default: {
printf("unsupported tensor data type\n");
}
}
}
if (total_error > kToleranceVal) {
printf("compare outputs failed, total error: %f\n", total_error);
return RET_ERROR;
}
printf("compare outputs success, total error: %f\n", total_error);
return RET_OK;
}
} // namespace lite
} // namespace mindspore

View File

@ -0,0 +1,73 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_
#define MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_
#include "include/lite_utils.h"
#include "include/ms_tensor.h"
#include "include/errorcode.h"
namespace mindspore {
namespace lite {
class CalibTensor {
public:
CalibTensor(String name, size_t elements_num) : tensor_name_(name), elements_num_(elements_num) {}
~CalibTensor() {
free(data_);
data_ = nullptr;
}
String tensor_name() const { return tensor_name_; }
int ElementsNum() const { return elements_num_; }
float *MutableData() {
if (data_ == nullptr) {
if (elements_num_ == 0 || elements_num_ > INT16_MAX) {
return nullptr;
}
data_ = static_cast<float *>(malloc(elements_num_ * sizeof(float)));
}
return data_;
}
private:
String tensor_name_;
int elements_num_{0};
float *data_{nullptr};
};
class Calibrator {
public:
Calibrator() = default;
~Calibrator() {
for (auto &calib : calib_outputs_) {
delete calib;
calib = nullptr;
}
calib_outputs_.clear();
}
int ReadCalibData(const char *calib_data_path);
int CompareOutputs(const Vector<tensor::MSTensor *> &outputs) const;
private:
Vector<CalibTensor *> calib_outputs_;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_

View File

@ -287,6 +287,7 @@ String operator+(const char *lhs, const String &rhs) {
return str;
}
bool operator!=(const String &lhs, const String &rhs) { return lhs.compare(rhs) != 0; }
bool operator==(const String &lhs, const String &rhs) { return lhs.compare(rhs) == 0; }
bool operator==(const String &lhs, const char *rhs) { return lhs.compare(rhs) == 0; }
bool operator==(const char *lhs, const String &rhs) { return rhs.compare(lhs) == 0; }