mindspore/include/api/cfg.h

70 lines
2.5 KiB
C
Raw Normal View History

2021-07-09 18:07:51 +08:00
/**
2022-04-14 20:22:36 +08:00
* Copyright 2022 Huawei Technologies Co., Ltd
2021-07-09 18:07:51 +08:00
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_INCLUDE_API_CFG_H
#define MINDSPORE_INCLUDE_API_CFG_H
#include <cstddef>
#include <string>
#include <vector>
#include <memory>
#include "include/api/data_type.h"
#include "include/api/dual_abi_helper.h"
2021-12-17 10:02:18 +08:00
#include "include/api/types.h"
2021-07-09 18:07:51 +08:00
namespace mindspore {
2022-04-14 20:22:36 +08:00
constexpr int iter_th = 1000;
2021-07-09 18:07:51 +08:00
class MixPrecisionCfg {
public:
MixPrecisionCfg() {
this->dynamic_loss_scale_ = false;
this->loss_scale_ = 128.0f;
2022-04-25 13:57:43 +08:00
this->keep_batchnorm_fp32_ = true;
2022-04-14 20:22:36 +08:00
this->num_of_not_nan_iter_th_ = iter_th;
2021-07-09 18:07:51 +08:00
}
2022-05-20 17:32:24 +08:00
MixPrecisionCfg(const MixPrecisionCfg &rhs) {
this->dynamic_loss_scale_ = rhs.dynamic_loss_scale_;
this->loss_scale_ = rhs.loss_scale_;
this->keep_batchnorm_fp32_ = rhs.keep_batchnorm_fp32_;
this->num_of_not_nan_iter_th_ = rhs.num_of_not_nan_iter_th_;
}
~MixPrecisionCfg() = default;
2022-04-28 15:41:34 +08:00
bool dynamic_loss_scale_ = false; /**< Enable/disable dynamic loss scale during mix precision training */
float loss_scale_; /**< Initial loss scale factor */
2022-04-25 13:57:43 +08:00
bool keep_batchnorm_fp32_ = true; /**< Keep batch norm in FP32 while training */
uint32_t num_of_not_nan_iter_th_; /**< a threshold for modifying loss scale when dynamic loss scale is enabled */
2021-09-02 16:20:36 +08:00
bool is_raw_mix_precision_ = false; /**< Is mix precision model export from mindspore */
2021-07-09 18:07:51 +08:00
};
class TrainCfg {
public:
2022-05-20 17:32:24 +08:00
TrainCfg() = default;
TrainCfg(const TrainCfg &rhs) {
this->loss_name_ = rhs.loss_name_;
this->mix_precision_cfg_ = rhs.mix_precision_cfg_;
this->accumulate_gradients_ = rhs.accumulate_gradients_;
}
~TrainCfg() = default;
2021-07-09 18:07:51 +08:00
OptimizationLevel optimization_level_ = kO0;
std::vector<std::string> loss_name_ = {
"loss_fct", "_loss_fn", "SigmoidCrossEntropy"}; /**< Set part of the name that identify a loss kernel */
MixPrecisionCfg mix_precision_cfg_; /**< Mix precision configuration */
2021-09-01 23:24:32 +08:00
bool accumulate_gradients_ = false;
2021-07-09 18:07:51 +08:00
};
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_CFG_H