From 5ad3ebbde1aa48774f4a81fcc6007993e479ab73 Mon Sep 17 00:00:00 2001 From: cjh9368 Date: Mon, 24 Aug 2020 15:20:57 +0800 Subject: [PATCH] ci error fix --- mindspore/lite/tools/benchmark/benchmark.cc | 56 +++++++++++++++---- .../lite/tools/time_profile/time_profile.cc | 23 +++++++- 2 files changed, 67 insertions(+), 12 deletions(-) diff --git a/mindspore/lite/tools/benchmark/benchmark.cc b/mindspore/lite/tools/benchmark/benchmark.cc index 11485b71df6..0990d0d35a6 100644 --- a/mindspore/lite/tools/benchmark/benchmark.cc +++ b/mindspore/lite/tools/benchmark/benchmark.cc @@ -190,7 +190,7 @@ float Benchmark::CompareData(const std::string &nodeName, std::vector msSha } oss << ") are different"; std::cerr << oss.str() << std::endl; - MS_LOG(ERROR) << "%s", oss.str().c_str(); + MS_LOG(ERROR) << oss.str().c_str(); return RET_ERROR; } size_t errorCount = 0; @@ -242,11 +242,13 @@ int Benchmark::CompareOutput() { auto tensors = session->GetOutputsByName(nodeName); if (tensors.empty()) { MS_LOG(ERROR) << "Cannot find output node: " << nodeName.c_str() << " , compare output data fail."; + std::cerr << "Cannot find output node: " << nodeName.c_str() << " , compare output data fail." << std::endl; return RET_ERROR; } // make sure tensor size is 1 if (tensors.size() != 1) { MS_LOG(ERROR) << "Only support 1 tensor with a name now."; + std::cerr << "Only support 1 tensor with a name now." << std::endl; return RET_ERROR; } auto &tensor = tensors.front(); @@ -274,13 +276,15 @@ int Benchmark::CompareOutput() { std::cout << "=======================================================" << std::endl << std::endl; if (meanBias > this->_flags->accuracyThreshold) { - MS_LOG(ERROR) << "Mean bias of all nodes is too big: " << meanBias << "%%"; + MS_LOG(ERROR) << "Mean bias of all nodes is too big: " << meanBias << "%"; + std::cerr << "Mean bias of all nodes is too big: " << meanBias << "%" << std::endl; return RET_ERROR; } else { return RET_OK; } } else { MS_LOG(ERROR) << "Error in CompareData"; + std::cerr << "Error in CompareData" << std::endl; std::cout << "=======================================================" << std::endl << std::endl; return RET_ERROR; } @@ -288,15 +292,18 @@ int Benchmark::CompareOutput() { int Benchmark::MarkPerformance() { MS_LOG(INFO) << "Running warm up loops..."; + std::cout << "Running warm up loops..." << std::endl; for (int i = 0; i < _flags->warmUpLoopCount; i++) { auto status = session->RunGraph(); if (status != 0) { - MS_LOG(ERROR) << "Inference error %d" << status; + MS_LOG(ERROR) << "Inference error " << status; + std::cerr << "Inference error " << status << std::endl; return status; } } MS_LOG(INFO) << "Running benchmark loops..."; + std::cout << "Running benchmark loops..." << std::endl; uint64_t timeMin = 1000000; uint64_t timeMax = 0; uint64_t timeAvg = 0; @@ -306,7 +313,8 @@ int Benchmark::MarkPerformance() { auto start = GetTimeUs(); auto status = session->RunGraph(); if (status != 0) { - MS_LOG(ERROR) << "Inference error %d" << status; + MS_LOG(ERROR) << "Inference error " << status; + std::cerr << "Inference error " << status; return status; } @@ -332,6 +340,7 @@ int Benchmark::MarkPerformance() { int Benchmark::MarkAccuracy() { MS_LOG(INFO) << "MarkAccuracy"; + std::cout << "MarkAccuracy" << std::endl; for (size_t i = 0; i < msInputs.size(); i++) { MS_ASSERT(msInputs.at(i) != nullptr); MS_ASSERT(msInputs.at(i)->data_type() == TypeId::kNumberTypeFloat32); @@ -345,18 +354,21 @@ int Benchmark::MarkAccuracy() { auto status = session->RunGraph(); if (status != RET_OK) { MS_LOG(ERROR) << "Inference error " << status; + std::cerr << "Inference error " << status << std::endl; return status; } status = ReadCalibData(); if (status != RET_OK) { MS_LOG(ERROR) << "Read calib data error " << status; + std::cerr << "Read calib data error " << status << std::endl; return status; } status = CompareOutput(); if (status != RET_OK) { MS_LOG(ERROR) << "Compare output error " << status; + std::cerr << "Compare output error " << status << std::endl; return status; } return RET_OK; @@ -368,22 +380,26 @@ int Benchmark::RunBenchmark(const std::string &deviceType) { std::string modelName = _flags->modelPath.substr(_flags->modelPath.find_last_of(DELIM_SLASH) + 1); MS_LOG(INFO) << "start reading model file"; + std::cout << "start reading model file" << std::endl; size_t size = 0; char *graphBuf = ReadFile(_flags->modelPath.c_str(), &size); if (graphBuf == nullptr) { - MS_LOG(ERROR) << "Read model file failed while running %s", modelName.c_str(); + MS_LOG(ERROR) << "Read model file failed while running " << modelName.c_str(); + std::cerr << "Read model file failed while running " << modelName.c_str() << std::endl; return RET_ERROR; } auto model = lite::Model::Import(graphBuf, size); if (model == nullptr) { - MS_LOG(ERROR) << "Import model file failed while running %s", modelName.c_str(); + MS_LOG(ERROR) << "Import model file failed while running " << modelName.c_str(); + std::cerr << "Import model file failed while running " << modelName.c_str() << std::endl; delete[](graphBuf); return RET_ERROR; } delete[](graphBuf); auto context = new (std::nothrow) lite::Context; if (context == nullptr) { - MS_LOG(ERROR) << "New context failed while running %s", modelName.c_str(); + MS_LOG(ERROR) << "New context failed while running " << modelName.c_str(); + std::cerr << "New context failed while running " << modelName.c_str() << std::endl; return RET_ERROR; } if (_flags->device == "CPU") { @@ -406,12 +422,14 @@ int Benchmark::RunBenchmark(const std::string &deviceType) { session = session::LiteSession::CreateSession(context); delete (context); if (session == nullptr) { - MS_LOG(ERROR) << "CreateSession failed while running %s", modelName.c_str(); + MS_LOG(ERROR) << "CreateSession failed while running ", modelName.c_str(); + std::cout << "CreateSession failed while running ", modelName.c_str(); return RET_ERROR; } auto ret = session->CompileGraph(model); if (ret != RET_OK) { - MS_LOG(ERROR) << "CompileGraph failed while running %s", modelName.c_str(); + MS_LOG(ERROR) << "CompileGraph failed while running ", modelName.c_str(); + std::cout << "CompileGraph failed while running ", modelName.c_str(); delete (session); delete (model); return ret; @@ -438,7 +456,8 @@ int Benchmark::RunBenchmark(const std::string &deviceType) { if (!_flags->calibDataPath.empty()) { status = MarkAccuracy(); if (status != 0) { - MS_LOG(ERROR) << "Run MarkAccuracy error: %d" << status; + MS_LOG(ERROR) << "Run MarkAccuracy error: " << status; + std::cout << "Run MarkAccuracy error: " << status << std::endl; delete (session); delete (model); return status; @@ -446,7 +465,8 @@ int Benchmark::RunBenchmark(const std::string &deviceType) { } else { status = MarkPerformance(); if (status != 0) { - MS_LOG(ERROR) << "Run MarkPerformance error: %d" << status; + MS_LOG(ERROR) << "Run MarkPerformance error: " << status; + std::cout << "Run MarkPerformance error: " << status << std::endl; delete (session); delete (model); return status; @@ -515,37 +535,45 @@ int Benchmark::Init() { if (this->_flags->loopCount < 1) { MS_LOG(ERROR) << "LoopCount:" << this->_flags->loopCount << " must be greater than 0"; + std::cerr << "LoopCount:" << this->_flags->loopCount << " must be greater than 0" << std::endl; return RET_ERROR; } if (this->_flags->numThreads < 1) { MS_LOG(ERROR) << "numThreads:" << this->_flags->numThreads << " must be greater than 0"; + std::cerr << "numThreads:" << this->_flags->numThreads << " must be greater than 0" << std::endl; return RET_ERROR; } if (this->_flags->cpuBindMode == -1) { MS_LOG(INFO) << "cpuBindMode = MID_CPU"; + std::cout << "cpuBindMode = MID_CPU" << std::endl; } else if (this->_flags->cpuBindMode == 1) { MS_LOG(INFO) << "cpuBindMode = HIGHER_CPU"; + std::cout << "cpuBindMode = HIGHER_CPU" << std::endl; } else { MS_LOG(INFO) << "cpuBindMode = NO_BIND"; + std::cout << "cpuBindMode = NO_BIND" << std::endl; } this->_flags->inDataType = this->_flags->inDataTypeIn == "img" ? kImage : kBinary; if (_flags->modelPath.empty()) { MS_LOG(ERROR) << "modelPath is required"; + std::cerr << "modelPath is required" << std::endl; return 1; } _flags->InitInputDataList(); _flags->InitResizeDimsList(); if (!_flags->resizeDims.empty() && _flags->resizeDims.size() != _flags->input_data_list.size()) { MS_LOG(ERROR) << "Size of input resizeDims should be equal to size of input inDataPath"; + std::cerr << "Size of input resizeDims should be equal to size of input inDataPath" << std::endl; return RET_ERROR; } if (_flags->device != "CPU" && _flags->device != "GPU") { MS_LOG(ERROR) << "Device type:" << _flags->device << " is not supported."; + std::cerr << "Device type:" << _flags->device << " is not supported." << std::endl; return RET_ERROR; } @@ -578,6 +606,7 @@ int RunBenchmark(int argc, const char **argv) { auto status = mBenchmark.Init(); if (status != 0) { MS_LOG(ERROR) << "Benchmark init Error : " << status; + std::cerr << "Benchmark init Error : " << status << std::endl; return RET_ERROR; } @@ -587,17 +616,22 @@ int RunBenchmark(int argc, const char **argv) { status = mBenchmark.RunBenchmark("CPU"); } else { MS_LOG(ERROR) << "Device type" << flags.device << " not support."; + std::cerr << "Device type" << flags.device << " not support." << std::endl; return RET_ERROR; } if (status != 0) { MS_LOG(ERROR) << "Run Benchmark " << flags.modelPath.substr(flags.modelPath.find_last_of(DELIM_SLASH) + 1).c_str() << " Failed : " << status; + std::cerr << "Run Benchmark " << flags.modelPath.substr(flags.modelPath.find_last_of(DELIM_SLASH) + 1).c_str() + << " Failed : " << status << std::endl; return RET_ERROR; } MS_LOG(INFO) << "Run Benchmark " << flags.modelPath.substr(flags.modelPath.find_last_of(DELIM_SLASH) + 1).c_str() << " Success."; + std::cout << "Run Benchmark " << flags.modelPath.substr(flags.modelPath.find_last_of(DELIM_SLASH) + 1).c_str() + << " Success." << std::endl; return RET_OK; } } // namespace lite diff --git a/mindspore/lite/tools/time_profile/time_profile.cc b/mindspore/lite/tools/time_profile/time_profile.cc index 09bec1f340e..4508bb73d8f 100644 --- a/mindspore/lite/tools/time_profile/time_profile.cc +++ b/mindspore/lite/tools/time_profile/time_profile.cc @@ -42,6 +42,7 @@ int TimeProfile::GenerateInputData() { auto input_data = tensor->MutableData(); if (input_data == nullptr) { MS_LOG(ERROR) << "MallocData for inTensor failed"; + std::cerr << "MallocData for inTensor failed" << std::endl; return RET_ERROR; } MS_ASSERT(tensor->GetData() != nullptr); @@ -49,6 +50,7 @@ int TimeProfile::GenerateInputData() { auto status = GenerateRandomData(tensor_byte_size, input_data); if (status != RET_OK) { MS_LOG(ERROR) << "Generate RandomData for inTensor failed " << status; + std::cerr << "Generate RandomData for inTensor failed " << status << std::endl; return RET_ERROR; } } @@ -66,12 +68,14 @@ int TimeProfile::ReadInputFile() { size_t size; char *bin_buf = ReadFile(_flags->in_data_path_.c_str(), &size); if (bin_buf == nullptr) { - MS_LOG(ERROR) << "Input data file error, required: "; + MS_LOG(ERROR) << "Read input data failed."; + std::cerr << "Read input data failed." << std::endl; return RET_ERROR; } auto tensor_data_size = inTensor->Size(); if (size != tensor_data_size) { MS_LOG(ERROR) << "Input binary file size error, required: " << tensor_data_size << " in fact: " << size; + std::cerr << "Input binary file size error, required: " << tensor_data_size << " in fact: " << size << std::endl; return RET_ERROR; } auto input_data = inTensor->MutableData(); @@ -85,12 +89,14 @@ int TimeProfile::LoadInput() { auto status = GenerateInputData(); if (status != RET_OK) { MS_LOG(ERROR) << "Generate input data error " << status; + std::cerr << "Generate input data error " << status << std::endl; return RET_ERROR; } } else { auto status = ReadInputFile(); if (status != RET_OK) { MS_LOG(ERROR) << "ReadInputFile error " << status; + std::cerr << "ReadInputFile error " << status << std::endl; return RET_ERROR; } } @@ -102,6 +108,7 @@ int TimeProfile::InitSession() { char *graph_buf = ReadFile(_flags->model_path_.c_str(), &size); if (graph_buf == nullptr) { MS_LOG(ERROR) << "Load graph failed, path " << _flags->model_path_; + std::cerr << "Load graph failed, path " << _flags->model_path_ << std::endl; return RET_ERROR; } @@ -113,6 +120,7 @@ int TimeProfile::InitSession() { session_ = session::LiteSession::CreateSession(ctx); if (session_ == nullptr) { MS_LOG(ERROR) << "New session failed while running."; + std::cerr << "New session failed while running." << std::endl; return RET_ERROR; } @@ -179,11 +187,13 @@ int TimeProfile::Init() { if (_flags->num_threads_ < 1) { MS_LOG(ERROR) << "NumThreads: " << _flags->num_threads_ << " must greater than or equal 1"; + std::cerr << "NumThreads: " << _flags->num_threads_ << " must greater than or equal 1" << std::endl; return RET_ERROR; } if (_flags->loop_count_ < 1) { MS_LOG(ERROR) << "LoopCount: " << _flags->loop_count_ << " must greater than or equal 1"; + std::cerr << "LoopCount: " << _flags->loop_count_ << " must greater than or equal 1" << std::endl; return RET_ERROR; } @@ -200,24 +210,28 @@ int TimeProfile::Init() { if (_flags->model_path_.empty()) { MS_LOG(ERROR) << "modelPath is required"; + std::cerr << "modelPath is required" << std::endl; return RET_ERROR; } auto status = InitSession(); if (status != RET_OK) { MS_LOG(ERROR) << "Init session failed."; + std::cerr << "Init session failed." << std::endl; return RET_ERROR; } status = this->LoadInput(); if (status != RET_OK) { MS_LOG(ERROR) << "Load input failed."; + std::cerr << "Load input failed." << std::endl; return RET_ERROR; } status = InitCallbackParameter(); if (status != RET_OK) { MS_LOG(ERROR) << "Init callback Parameter failed."; + std::cerr << "Init callback Parameter failed." << std::endl; return RET_ERROR; } @@ -299,6 +313,7 @@ int TimeProfile::RunTimeProfile() { char *graphBuf = ReadFile(_flags->model_path_.c_str(), &size); if (graphBuf == nullptr) { MS_LOG(ERROR) << "Load graph failed while running " << modelName.c_str(); + std::cerr << "Load graph failed while running " << modelName.c_str() << std::endl; delete graphBuf; delete session_; return RET_ERROR; @@ -307,6 +322,7 @@ int TimeProfile::RunTimeProfile() { delete graphBuf; if (model == nullptr) { MS_LOG(ERROR) << "Import model file failed while running " << modelName.c_str(); + std::cerr << "Import model file failed while running " << modelName.c_str() << std::endl; delete session_; delete model; return RET_ERROR; @@ -314,6 +330,7 @@ int TimeProfile::RunTimeProfile() { auto ret = session_->CompileGraph(model); if (ret != RET_OK) { MS_LOG(ERROR) << "Compile graph failed."; + std::cerr << "Compile graph failed." << std::endl; delete session_; delete model; return RET_ERROR; @@ -324,6 +341,7 @@ int TimeProfile::RunTimeProfile() { auto status = LoadInput(); if (status != RET_OK) { MS_LOG(ERROR) << "Generate input data error"; + std::cerr << "Generate input data error" << std::endl; delete session_; delete model; return status; @@ -337,6 +355,7 @@ int TimeProfile::RunTimeProfile() { ret = session_->RunGraph(before_call_back_, after_call_back_); if (ret != RET_OK) { MS_LOG(ERROR) << "Run graph failed."; + std::cerr << "Run graph failed." << std::endl; delete session_; delete model; return RET_ERROR; @@ -384,12 +403,14 @@ int RunTimeProfile(int argc, const char **argv) { auto ret = time_profile.Init(); if (ret != RET_OK) { MS_LOG(ERROR) << "Init TimeProfile failed."; + std::cerr << "Init TimeProfile failed." << std::endl; return RET_ERROR; } ret = time_profile.RunTimeProfile(); if (ret != RET_OK) { MS_LOG(ERROR) << "Run TimeProfile failed."; + std::cerr << "Run TimeProfile failed." << std::endl; return RET_ERROR; }