forked from mindspore-Ecosystem/mindspore
!3879 benchmark print model name
Merge pull request !3879 from cjh9368/fix_benchmark_inf_bug
This commit is contained in:
commit
cdc5131869
|
@ -21,6 +21,7 @@
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
#include <cfloat>
|
||||||
#include "src/common/common.h"
|
#include "src/common/common.h"
|
||||||
#include "include/ms_tensor.h"
|
#include "include/ms_tensor.h"
|
||||||
#include "include/context.h"
|
#include "include/context.h"
|
||||||
|
@ -191,11 +192,16 @@ float Benchmark::CompareData(const std::string &nodeName, std::vector<int> msSha
|
||||||
std::cout << msTensorData[j] << " ";
|
std::cout << msTensorData[j] << " ";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (std::isnan(msTensorData[j]) || std::isinf(msTensorData[j])) {
|
||||||
|
MS_LOG(ERROR) << "Output tensor has nan or inf data, compare fail";
|
||||||
|
return RET_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
auto tolerance = absoluteTolerance + relativeTolerance * fabs(calibTensor->data.at(j));
|
auto tolerance = absoluteTolerance + relativeTolerance * fabs(calibTensor->data.at(j));
|
||||||
auto absoluteError = std::fabs(msTensorData[j] - calibTensor->data.at(j));
|
auto absoluteError = std::fabs(msTensorData[j] - calibTensor->data.at(j));
|
||||||
if (absoluteError > tolerance) {
|
if (absoluteError > tolerance) {
|
||||||
// just assume that atol = rtol
|
// just assume that atol = rtol
|
||||||
meanError += absoluteError / (fabs(calibTensor->data.at(j)) + 1);
|
meanError += absoluteError / (fabs(calibTensor->data.at(j)) + FLT_MIN);
|
||||||
errorCount++;
|
errorCount++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -296,16 +302,10 @@ int Benchmark::MarkPerformance() {
|
||||||
}
|
}
|
||||||
if (_flags->loopCount > 0) {
|
if (_flags->loopCount > 0) {
|
||||||
timeAvg /= _flags->loopCount;
|
timeAvg /= _flags->loopCount;
|
||||||
// MS_LOG(INFO) << "CSV:%s:%d:%f:%f:%f\n", _flags->modelPath.substr(_flags->modelPath.find_last_of(DELIM_SLASH) +
|
MS_LOG(INFO) << "Model = " << _flags->modelPath.substr(_flags->modelPath.find_last_of(DELIM_SLASH) + 1).c_str()
|
||||||
// 1).c_str(),
|
<< ", NumThreads = " << _flags->numThreads << ", MinRunTime = " << timeMin / 1000.0f
|
||||||
// _flags->numThreads, timeMin / 1000.0f, timeMax / 1000.0f, timeAvg / 1000.0f);
|
<< ", MaxRuntime = " << timeMax / 1000.0f << ", AvgRunTime = " << timeAvg / 1000.0f;
|
||||||
// MS_LOG(INFO) <<"Modle = %s, numThreads = %d, MinRunTime = %f ms, MaxRuntime = %f ms, AvgRunTime = %f ms",
|
printf("Model = %s, NumThreads = %d, MinRunTime = %f ms, MaxRuntime = %f ms, AvgRunTime = %f ms\n",
|
||||||
// _flags->modelPath.substr(_flags->modelPath.find_last_of(DELIM_SLASH) + 1).c_str(), _flags->numThreads,
|
|
||||||
// timeMin / 1000.0f, timeMax / 1000.0f, timeAvg / 1000.0f);
|
|
||||||
|
|
||||||
printf("CSV:%s:%d:%f:%f:%f\n", _flags->modelPath.substr(_flags->modelPath.find_last_of(DELIM_SLASH) + 1).c_str(),
|
|
||||||
_flags->numThreads, timeMin / 1000.0f, timeMax / 1000.0f, timeAvg / 1000.0f);
|
|
||||||
printf("Modle = %s, numThreads = %d, MinRunTime = %f ms, MaxRuntime = %f ms, AvgRunTime = %f ms\n",
|
|
||||||
_flags->modelPath.substr(_flags->modelPath.find_last_of(DELIM_SLASH) + 1).c_str(), _flags->numThreads,
|
_flags->modelPath.substr(_flags->modelPath.find_last_of(DELIM_SLASH) + 1).c_str(), _flags->numThreads,
|
||||||
timeMin / 1000.0f, timeMax / 1000.0f, timeAvg / 1000.0f);
|
timeMin / 1000.0f, timeMax / 1000.0f, timeAvg / 1000.0f);
|
||||||
}
|
}
|
||||||
|
@ -325,13 +325,22 @@ int Benchmark::MarkAccuracy() {
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
}
|
}
|
||||||
auto status = session->RunGraph();
|
auto status = session->RunGraph();
|
||||||
if (status != 0) {
|
if (status != RET_OK) {
|
||||||
MS_LOG(ERROR) << "Inference error %d" << status;
|
MS_LOG(ERROR) << "Inference error " << status;
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
ReadCalibData();
|
status = ReadCalibData();
|
||||||
CompareOutput();
|
if (status != RET_OK) {
|
||||||
|
MS_LOG(ERROR) << "Read calib data error " << status;
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
status = CompareOutput();
|
||||||
|
if (status != RET_OK) {
|
||||||
|
MS_LOG(ERROR) << "Compare output error " << status;
|
||||||
|
return status;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -373,10 +382,10 @@ int Benchmark::RunBenchmark(const std::string &deviceType) {
|
||||||
msInputs = session->GetInputs();
|
msInputs = session->GetInputs();
|
||||||
auto endPrepareTime = GetTimeUs();
|
auto endPrepareTime = GetTimeUs();
|
||||||
#if defined(__arm__)
|
#if defined(__arm__)
|
||||||
MS_LOG(INFO) << "PrepareTime = %lld ms, " << (endPrepareTime - startPrepareTime) / 1000;
|
MS_LOG(INFO) << "PrepareTime = " << (endPrepareTime - startPrepareTime) / 1000 << " ms";
|
||||||
printf("PrepareTime = %lld ms, ", (endPrepareTime - startPrepareTime) / 1000);
|
printf("PrepareTime = %lld ms, ", (endPrepareTime - startPrepareTime) / 1000);
|
||||||
#else
|
#else
|
||||||
MS_LOG(INFO) << "PrepareTime = %ld ms, " << (endPrepareTime - startPrepareTime) / 1000;
|
MS_LOG(INFO) << "PrepareTime = " << (endPrepareTime - startPrepareTime) / 1000 << " ms ";
|
||||||
printf("PrepareTime = %ld ms, ", (endPrepareTime - startPrepareTime) / 1000);
|
printf("PrepareTime = %ld ms, ", (endPrepareTime - startPrepareTime) / 1000);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -385,18 +394,21 @@ int Benchmark::RunBenchmark(const std::string &deviceType) {
|
||||||
auto status = LoadInput();
|
auto status = LoadInput();
|
||||||
if (status != 0) {
|
if (status != 0) {
|
||||||
MS_LOG(ERROR) << "Generate input data error";
|
MS_LOG(ERROR) << "Generate input data error";
|
||||||
|
delete graphBuf;
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
if (!_flags->calibDataPath.empty()) {
|
if (!_flags->calibDataPath.empty()) {
|
||||||
status = MarkAccuracy();
|
status = MarkAccuracy();
|
||||||
if (status != 0) {
|
if (status != 0) {
|
||||||
MS_LOG(ERROR) << "Run MarkAccuracy error: %d" << status;
|
MS_LOG(ERROR) << "Run MarkAccuracy error: %d" << status;
|
||||||
|
delete graphBuf;
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
status = MarkPerformance();
|
status = MarkPerformance();
|
||||||
if (status != 0) {
|
if (status != 0) {
|
||||||
MS_LOG(ERROR) << "Run MarkPerformance error: %d" << status;
|
MS_LOG(ERROR) << "Run MarkPerformance error: %d" << status;
|
||||||
|
delete graphBuf;
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -511,13 +523,14 @@ int RunBenchmark(int argc, const char **argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status != 0) {
|
if (status != 0) {
|
||||||
MS_LOG(ERROR) << "Run Benchmark Error : " << status;
|
MS_LOG(ERROR) << "Run Benchmark " << flags.modelPath.substr(flags.modelPath.find_last_of(DELIM_SLASH) + 1).c_str()
|
||||||
|
<< " Failed : " << status;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
MS_LOG(INFO) << "end of benchmark";
|
MS_LOG(INFO) << "Run Benchmark " << flags.modelPath.substr(flags.modelPath.find_last_of(DELIM_SLASH) + 1).c_str()
|
||||||
|
<< " Success.";
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
} // namespace lite
|
} // namespace lite
|
||||||
} // namespace mindspore
|
} // namespace mindspore
|
||||||
|
|
||||||
|
|
|
@ -37,8 +37,8 @@
|
||||||
namespace mindspore::lite {
|
namespace mindspore::lite {
|
||||||
enum MS_API InDataType { kImage = 0, kBinary = 1 };
|
enum MS_API InDataType { kImage = 0, kBinary = 1 };
|
||||||
|
|
||||||
constexpr float relativeTolerance = 0.01;
|
constexpr float relativeTolerance = 1e-5;
|
||||||
constexpr float absoluteTolerance = 0.01;
|
constexpr float absoluteTolerance = 1e-8;
|
||||||
|
|
||||||
struct MS_API CheckTensor {
|
struct MS_API CheckTensor {
|
||||||
CheckTensor(const std::vector<size_t> &shape, const std::vector<float> &data) {
|
CheckTensor(const std::vector<size_t> &shape, const std::vector<float> &data) {
|
||||||
|
@ -143,4 +143,3 @@ class MS_API Benchmark {
|
||||||
int MS_API RunBenchmark(int argc, const char **argv);
|
int MS_API RunBenchmark(int argc, const char **argv);
|
||||||
} // namespace mindspore::lite
|
} // namespace mindspore::lite
|
||||||
#endif // MINNIE_BENCHMARK_BENCHMARK_H_
|
#endif // MINNIE_BENCHMARK_BENCHMARK_H_
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue