!29451 [MS][LITE] check train data type

Merge pull request !29451 from yefeng/207-check_train_data_type
This commit is contained in:
i-robot 2022-01-24 09:34:30 +00:00 committed by Gitee
commit e0fcbdc2c7
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
11 changed files with 21 additions and 16 deletions

View File

@ -34,8 +34,9 @@ void AccuracyMonitor::Begin(const session::TrainLoopCallBackData &cb_data) {
}
int AccuracyMonitor::EpochEnd(const session::TrainLoopCallBackData &cb_data) {
if ((cb_data.epoch_ + 1) % check_every_n_ == 0) cb_data.loop_->Eval(ds_, {}, nullptr, max_steps_);
if ((static_cast<int>(cb_data.epoch_) + 1) % check_every_n_ == 0) {
cb_data.loop_->Eval(ds_, {}, nullptr, max_steps_);
}
accuracies_.push_back(std::make_pair(cb_data.epoch_, 0.0));
return mindspore::session::RET_CONTINUE;
}

View File

@ -45,7 +45,7 @@ void ClassificationTrainAccuracyMonitor::EpochBegin(const session::TrainLoopCall
int ClassificationTrainAccuracyMonitor::EpochEnd(const session::TrainLoopCallBackData &cb_data) {
if (cb_data.step_ > 0) accuracies_.at(cb_data.epoch_).second /= static_cast<float>(cb_data.step_ + 1);
if ((cb_data.epoch_ + 1) % print_every_n_ == 0) {
if ((static_cast<int>(cb_data.epoch_) + 1) % print_every_n_ == 0) {
std::cout << "Epoch (" << (cb_data.epoch_ + 1) << "):\tTraining Accuracy is "
<< accuracies_.at(cb_data.epoch_).second << std::endl;
}

View File

@ -52,7 +52,7 @@ void LossMonitor::StepEnd(const session::TrainLoopCallBackData &cb_data) {
if (it->second->ElementsNum() == 1) {
auto loss = reinterpret_cast<float *>(it->second->MutableData());
losses_.at(cb_data.epoch_).second += loss[0];
if ((cb_data.step_ + 1) % print_every_n_ == 0)
if ((static_cast<int>(cb_data.step_) + 1) % print_every_n_ == 0)
std::cout << (cb_data.epoch_ + 1) << "." << (cb_data.step_ + 1) << ":\tLoss is " << loss[0] << std::endl;
return;
}

View File

@ -56,7 +56,7 @@ LRScheduler::LRScheduler(LR_Lambda lambda_func, void *lr_cb_data, int step)
: lambda_func_(lambda_func), lr_data_(lr_cb_data), step_(step) {}
int LRScheduler::EpochEnd(const session::TrainLoopCallBackData &cb_data) {
if (((cb_data.epoch_ + 1) % step_) == 0) {
if (((static_cast<int>(cb_data.epoch_) + 1) % step_) == 0) {
float lr = cb_data.session_->GetLearningRate();
int update = lambda_func_(&lr, cb_data.epoch_, lr_data_);
if (update == UPDATE_LR) {

View File

@ -350,7 +350,7 @@ int TrainExport::ExportTensor(const Model *model, const std::vector<mindspore::l
const std::vector<std::string> &output_names, const std::set<size_t> &out_set) {
for (auto index : map_index) {
auto id = index.first;
size_t pid = id - offset;
size_t pid = id - static_cast<size_t>(offset);
mindspore::lite::Tensor *tensor = tensors.at(pid);
schema::Tensor *scTensor = model->all_tensors_.at(pid);
auto preferred_dim =
@ -404,7 +404,7 @@ int TrainExport::ExportNet(const std::vector<mindspore::kernel::LiteKernel *> &k
std::vector<uint32_t> in_idx, out_idx;
size_t input_index = 0;
for (const auto tensor : kernel->in_tensors()) {
size_t id = TSFindTensor(tensors, tensor) + offset;
size_t id = TSFindTensor(tensors, tensor) + static_cast<size_t>(offset);
if (id == tensors.size()) {
MS_LOG(ERROR) << "cannot find tensor " + tensor->ToString() + " in model";
return RET_ERROR;
@ -543,7 +543,7 @@ int TrainExport::ExportInit(const std::string model_name, std::string version) {
int TrainExport::SaveToFile() { return MetaGraphSerializer::Save(*meta_graph_, file_name_); }
int TrainExport::IsInputTensor(const schema::TensorT &t) {
bool TrainExport::IsInputTensor(const schema::TensorT &t) {
int total_dims = std::accumulate(t.dims.begin(), t.dims.end(), 1, std::multiplies<int>());
return ((t.data.size() == 0) && (total_dims != 0));
}

View File

@ -72,7 +72,7 @@ class TrainExport {
std::unique_ptr<schema::CNodeT> CreateCNode(const mindspore::kernel::LiteKernel *kernel,
std::vector<uint32_t> inputIndex, std::vector<uint32_t> outputIndex,
const Model *model);
int IsInputTensor(const schema::TensorT &t);
bool IsInputTensor(const schema::TensorT &t);
int CreateAndAddCNode(const mindspore::kernel::LiteKernel *kernel, std::vector<uint32_t> inputIndex,
std::vector<uint32_t> outputIndex, const Model *model);
std::unique_ptr<schema::CNodeT> CreateTransformNode(std::vector<uint32_t> inputIndex,

View File

@ -171,7 +171,7 @@ OpParameter *PopulateSoftmaxCrossEntropyParameter(const void *prim) {
memset(sce_param, 0, sizeof(SoftmaxCrossEntropyParameter));
auto primitive = static_cast<const schema::Primitive *>(prim);
sce_param->op_parameter_.type_ = primitive->value_type();
sce_param->is_grad_ = 0;
sce_param->is_grad_ = false;
return reinterpret_cast<OpParameter *>(sce_param);
}

View File

@ -234,7 +234,7 @@ OpParameter *PopulateSparseSoftmaxCrossEntropyParameter(const void *primitive) {
memset(sce_param, 0, sizeof(SoftmaxCrossEntropyParameter));
auto sparseSoftmaxCrossEntropy_prim = prim->value_as_SparseSoftmaxCrossEntropy();
MS_ASSERT(sparseSoftmaxCrossEntropy_prim != nullptr);
sce_param->is_grad_ = sparseSoftmaxCrossEntropy_prim->isGrad();
sce_param->is_grad_ = static_cast<bool>(sparseSoftmaxCrossEntropy_prim->isGrad());
sce_param->op_parameter_.type_ = schema::PrimitiveType_SparseSoftmaxCrossEntropyWithLogits;
return reinterpret_cast<OpParameter *>(sce_param);
@ -252,7 +252,7 @@ OpParameter *PopulateSoftmaxCrossEntropyParameter(const void *primitive) {
return nullptr;
}
memset(sce_param, 0, sizeof(SoftmaxCrossEntropyParameter));
sce_param->is_grad_ = 0;
sce_param->is_grad_ = false;
sce_param->op_parameter_.type_ = schema::PrimitiveType_SoftmaxCrossEntropyWithLogits;
return reinterpret_cast<OpParameter *>(sce_param);
}

View File

@ -65,7 +65,9 @@ float CalcSparseClassificationAccuracy(T *predictions, int *labels, int batch_si
max_idx = c;
}
}
if (labels[b] == max_idx) accuracy += 1.0;
if (labels[b] == max_idx) {
accuracy += 1.0;
}
}
return accuracy / (static_cast<float>(batch_size));
}
@ -109,7 +111,9 @@ float CalcOneHotClassificationAccuracy(T *predictions, float *labels, int batch_
label = c;
}
}
if (label == max_idx) accuracy += 1.0;
if (label == max_idx) {
accuracy += 1.0;
}
}
return accuracy / (static_cast<float>(batch_size));
}

View File

@ -271,7 +271,7 @@ int NetTrain::MarkPerformance(const std::unique_ptr<session::LiteSession> &sessi
}
if (flags_->epochs_ > 0) {
time_avg /= flags_->epochs_;
time_avg /= static_cast<size_t>(flags_->epochs_);
MS_LOG(INFO) << "Model = " << flags_->model_file_.substr(flags_->model_file_.find_last_of(DELIM_SLASH) + 1).c_str()
<< ", NumThreads = " << flags_->num_threads_ << ", MinRunTime = " << time_min / 1000.0f
<< ", MaxRuntime = " << time_max / 1000.0f << ", AvgRunTime = " << time_avg / 1000.0f;

View File

@ -223,7 +223,7 @@ class MS_API NetTrain {
int CompareOutput(const session::LiteSession &lite_session);
int SaveModels(const std::unique_ptr<session::LiteSession> &session);
int CheckExecutionOfSavedModels();
void TensorNan(float *data, int size) {
void TensorNan(const float *data, int size) {
for (int i = 0; i < size; i++) {
if (std::isnan(data[i])) {
std::cout << "nan value of index=" << i << ", " << data[i] << std::endl;