Fix I5IZU7 optimize log && clean pclint-plus warnings

Add loss to cfg instead train_cfg
This commit is contained in:
zhangzhaoju 2022-07-27 14:14:12 +08:00
parent 229ed3b7a6
commit 4ba1af2f2c
8 changed files with 38 additions and 28 deletions

View File

@ -60,9 +60,9 @@ class TrainCfg {
~TrainCfg() = default;
OptimizationLevel optimization_level_ = kO0;
std::vector<std::string> loss_name_ = {"loss_fct",
"_loss_fn"}; /**< Set part of the name that identify a loss kernel */
MixPrecisionCfg mix_precision_cfg_; /**< Mix precision configuration */
std::vector<std::string> loss_name_ = {
"loss_fct", "_loss_fn", "SigmoidCrossEntropy"}; /**< Set part of the name that identify a loss kernel */
MixPrecisionCfg mix_precision_cfg_; /**< Mix precision configuration */
bool accumulate_gradients_ = false;
};
} // namespace mindspore

View File

@ -20,11 +20,11 @@
#include <vector>
#include <memory>
#include <unordered_map>
#include "pybind11/pytypes.h"
#include "utils/hash_map.h"
#include "utils/ms_utils.h"
#include "ir/anf.h"
#include "ir/signature.h"
#include "pybind11/pytypes.h"
namespace mindspore::pynative {
struct AbsCacheKey {

View File

@ -17,6 +17,7 @@
#include <algorithm>
#include <utility>
#include <limits>
#include <cmath>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
namespace mindspore {
@ -27,11 +28,7 @@ static constexpr int MAX_DIMS = 7;
template <typename T>
T GetDivZeroVal(const T &v) {
auto zero = static_cast<T>(0.0);
if (std::numeric_limits<T>::has_infinity) {
return v > zero ? std::numeric_limits<T>::infinity() : -std::numeric_limits<T>::infinity();
} else {
return v > zero ? std::numeric_limits<T>::max() : std::numeric_limits<T>::min();
}
return v > zero ? std::numeric_limits<T>::infinity() : -std::numeric_limits<T>::infinity();
}
template <>
@ -40,28 +37,42 @@ complex128 GetDivZeroVal(const complex128 &v) {
}
template <>
complex64 GetDivZeroVal(const complex64 &v) {
complex64 GetDivZeroVal(const complex64 &) {
return std::numeric_limits<complex64>::quiet_NaN();
}
template <class T>
bool isZero(T val) {
return val == T(0.0f);
}
template <>
bool isZero(float val) {
return std::fpclassify(val) == FP_ZERO;
}
template <>
bool isZero(double val) {
return std::fpclassify(val) == FP_ZERO;
}
template <typename T>
bool XdivyCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> &,
const std::vector<kernel::AddressPtr> &outputs) {
CHECK_KERNEL_INPUTS_NUM(inputs.size(), INPUT_NUM, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), OUTPUT_NUM, kernel_name_);
auto x_addr = reinterpret_cast<T *>(inputs[0]->addr);
auto y_addr = reinterpret_cast<T *>(inputs[1]->addr);
auto output_addr = reinterpret_cast<T *>(outputs[0]->addr);
auto x_addr = static_cast<T *>(inputs[0]->addr);
auto y_addr = static_cast<T *>(inputs[1]->addr);
auto output_addr = static_cast<T *>(outputs[0]->addr);
size_t output_size = outputs[0]->size / sizeof(T);
auto sameShapeTask = [&x_addr, &y_addr, &output_addr](size_t start, size_t end) {
for (size_t i = start; i < end; i++) {
auto dividend = x_addr[i];
auto divisor = y_addr[i];
auto zero = (T)0;
if (divisor == zero) {
if (dividend == zero) {
output_addr[i] = zero;
if (isZero(divisor)) {
if (isZero(dividend)) {
output_addr[i] = static_cast<T>(0.0);
continue;
}
output_addr[i] = GetDivZeroVal(dividend);
@ -217,7 +228,7 @@ int XdivyCpuKernelMod::Resize(const BaseOperatorPtr &base_operator, const std::v
GetBroadCastIndex(x_shape, out_shape, &index_listx_);
GetBroadCastIndex(y_shape, out_shape, &index_listy_);
}
return KRET_OK;
return 0;
}
std::vector<KernelAttr> XdivyCpuKernelMod::GetOpSupport() { return support_ops_; }

View File

@ -18,7 +18,6 @@
#include <complex>
#include <vector>
#include <map>
#include <utility>
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "plugin/factory/ms_factory.h"
@ -41,9 +40,9 @@ class XdivyCpuKernelMod : public NativeCpuKernelMod {
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs, const std::map<uint32_t, tensor::TensorPtr> &) override;
protected:
std::vector<KernelAttr> GetOpSupport() override;
protected:
void ResetResource() noexcept {
input_size_list_.clear();
output_size_list_.clear();

View File

@ -781,8 +781,8 @@ void FuncGraphManager::CommitChanges(std::vector<change::ChangePtr> &&changes) {
void FuncGraphManager::EraseOneGraph(const FuncGraphPtr &fg) {
MS_EXCEPTION_IF_NULL(fg);
size_t erase_cnt = func_graphs_.erase(fg->shared_from_base<FuncGraph>());
if (!erase_cnt) {
bool erase_ret = func_graphs_.erase(fg->shared_from_base<FuncGraph>());
if (!erase_ret) {
return;
}
fg->DecAttachedMngCnt();

View File

@ -617,7 +617,7 @@ class PConstant : public PBase<PConstant<T> > {
MS_EXCEPTION_IF_NULL(source_data);
if (x_tensor_ptr->DataSize() == 1) {
auto tensor_type_byte = GetTypeByte(tensor_type_ptr);
char *data = reinterpret_cast<char *>(new_tensor_ptr->data_c());
char *data = static_cast<char *>(new_tensor_ptr->data_c());
for (int i = 0; i < new_tensor_ptr->ElementsNum(); i++) {
ret = memcpy_s(data + IntToSize(i) * tensor_type_byte, tensor_type_byte, source_data, tensor_type_byte);
if (ret != 0) {
@ -626,7 +626,7 @@ class PConstant : public PBase<PConstant<T> > {
}
}
} else {
char *data = reinterpret_cast<char *>(new_tensor_ptr->data_c());
char *data = static_cast<char *>(new_tensor_ptr->data_c());
ret = memcpy_s(data, new_tensor_ptr->Size(), source_data, new_tensor_ptr->Size());
if (ret != 0) {
MS_LOG(INFO) << "memcpy_s error, error no " << ret << ", source size " << new_tensor_ptr->Size()

View File

@ -49,10 +49,10 @@ abstract::ShapePtr ExtractVolumePatchesInferShape(const PrimitivePtr &primitive,
primitive->name());
auto padding = GetValue<std::string>(primitive->GetAttr(kPadding));
for (auto &item : strides) {
(void)CheckAndConvertUtils::Check("strides", item, kGreaterThan, 0, primitive->name());
CheckAndConvertUtils::Check("strides", item, kGreaterThan, 0, primitive->name());
}
for (auto &item : kernel_size) {
(void)CheckAndConvertUtils::Check("kernel_size", item, kGreaterThan, 0, primitive->name());
CheckAndConvertUtils::Check("kernel_size", item, kGreaterThan, 0, primitive->name());
}
std::vector<int64_t> y_shape(5);
int64_t padding_needed = 0;

View File

@ -291,9 +291,9 @@ def check_type_valid(dtype, target_type, op_name):
if dtype != target_type and (isinstance(target_type, (list, tuple)) and dtype not in target_type):
if op_name in (TENSOR_GETITEM, TENSOR_SETITEM):
raise IndexError(
f"The '{op_name}' doesn't support {dtype}' and expect to receive {target_type}.")
f"The '{op_name}' doesn't support '{dtype}' and expect to receive {target_type}.")
raise TypeError(
f"The '{op_name}' doesn't support {dtype}' and expect to receive {target_type}.")
f"The '{op_name}' doesn't support '{dtype}' and expect to receive {target_type}.")
@constexpr