diff --git a/include/api/cfg.h b/include/api/cfg.h index bb0a7aa33b3..db915cac944 100644 --- a/include/api/cfg.h +++ b/include/api/cfg.h @@ -60,9 +60,9 @@ class TrainCfg { ~TrainCfg() = default; OptimizationLevel optimization_level_ = kO0; - std::vector loss_name_ = {"loss_fct", - "_loss_fn"}; /**< Set part of the name that identify a loss kernel */ - MixPrecisionCfg mix_precision_cfg_; /**< Mix precision configuration */ + std::vector loss_name_ = { + "loss_fct", "_loss_fn", "SigmoidCrossEntropy"}; /**< Set part of the name that identify a loss kernel */ + MixPrecisionCfg mix_precision_cfg_; /**< Mix precision configuration */ bool accumulate_gradients_ = false; }; } // namespace mindspore diff --git a/mindspore/ccsrc/pipeline/pynative/pynative_cache.h b/mindspore/ccsrc/pipeline/pynative/pynative_cache.h index c0bc1db04d1..21335bcac3f 100644 --- a/mindspore/ccsrc/pipeline/pynative/pynative_cache.h +++ b/mindspore/ccsrc/pipeline/pynative/pynative_cache.h @@ -20,11 +20,11 @@ #include #include #include +#include "pybind11/pytypes.h" #include "utils/hash_map.h" #include "utils/ms_utils.h" #include "ir/anf.h" #include "ir/signature.h" -#include "pybind11/pytypes.h" namespace mindspore::pynative { struct AbsCacheKey { diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/xdivy_cpu_kernel.cc b/mindspore/ccsrc/plugin/device/cpu/kernel/xdivy_cpu_kernel.cc index 2da1f7d5bbd..a54a7b05e82 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/xdivy_cpu_kernel.cc +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/xdivy_cpu_kernel.cc @@ -17,6 +17,7 @@ #include #include #include +#include #include "plugin/device/cpu/hal/device/cpu_device_address.h" namespace mindspore { @@ -27,11 +28,7 @@ static constexpr int MAX_DIMS = 7; template T GetDivZeroVal(const T &v) { auto zero = static_cast(0.0); - if (std::numeric_limits::has_infinity) { - return v > zero ? std::numeric_limits::infinity() : -std::numeric_limits::infinity(); - } else { - return v > zero ? std::numeric_limits::max() : std::numeric_limits::min(); - } + return v > zero ? std::numeric_limits::infinity() : -std::numeric_limits::infinity(); } template <> @@ -40,28 +37,42 @@ complex128 GetDivZeroVal(const complex128 &v) { } template <> -complex64 GetDivZeroVal(const complex64 &v) { +complex64 GetDivZeroVal(const complex64 &) { return std::numeric_limits::quiet_NaN(); } +template +bool isZero(T val) { + return val == T(0.0f); +} + +template <> +bool isZero(float val) { + return std::fpclassify(val) == FP_ZERO; +} + +template <> +bool isZero(double val) { + return std::fpclassify(val) == FP_ZERO; +} + template bool XdivyCpuKernelMod::LaunchKernel(const std::vector &inputs, const std::vector &, const std::vector &outputs) { CHECK_KERNEL_INPUTS_NUM(inputs.size(), INPUT_NUM, kernel_name_); CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), OUTPUT_NUM, kernel_name_); - auto x_addr = reinterpret_cast(inputs[0]->addr); - auto y_addr = reinterpret_cast(inputs[1]->addr); - auto output_addr = reinterpret_cast(outputs[0]->addr); + auto x_addr = static_cast(inputs[0]->addr); + auto y_addr = static_cast(inputs[1]->addr); + auto output_addr = static_cast(outputs[0]->addr); size_t output_size = outputs[0]->size / sizeof(T); auto sameShapeTask = [&x_addr, &y_addr, &output_addr](size_t start, size_t end) { for (size_t i = start; i < end; i++) { auto dividend = x_addr[i]; auto divisor = y_addr[i]; - auto zero = (T)0; - if (divisor == zero) { - if (dividend == zero) { - output_addr[i] = zero; + if (isZero(divisor)) { + if (isZero(dividend)) { + output_addr[i] = static_cast(0.0); continue; } output_addr[i] = GetDivZeroVal(dividend); @@ -217,7 +228,7 @@ int XdivyCpuKernelMod::Resize(const BaseOperatorPtr &base_operator, const std::v GetBroadCastIndex(x_shape, out_shape, &index_listx_); GetBroadCastIndex(y_shape, out_shape, &index_listy_); } - return KRET_OK; + return 0; } std::vector XdivyCpuKernelMod::GetOpSupport() { return support_ops_; } diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/xdivy_cpu_kernel.h b/mindspore/ccsrc/plugin/device/cpu/kernel/xdivy_cpu_kernel.h index 0569f585a8f..8c65d4cffb3 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/xdivy_cpu_kernel.h +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/xdivy_cpu_kernel.h @@ -18,7 +18,6 @@ #include #include #include -#include #include "plugin/device/cpu/kernel/cpu_kernel.h" #include "plugin/factory/ms_factory.h" @@ -41,9 +40,9 @@ class XdivyCpuKernelMod : public NativeCpuKernelMod { int Resize(const BaseOperatorPtr &base_operator, const std::vector &inputs, const std::vector &outputs, const std::map &) override; - protected: std::vector GetOpSupport() override; + protected: void ResetResource() noexcept { input_size_list_.clear(); output_size_list_.clear(); diff --git a/mindspore/core/ir/manager.cc b/mindspore/core/ir/manager.cc index 88f1d546adc..2887df7c4d4 100644 --- a/mindspore/core/ir/manager.cc +++ b/mindspore/core/ir/manager.cc @@ -781,8 +781,8 @@ void FuncGraphManager::CommitChanges(std::vector &&changes) { void FuncGraphManager::EraseOneGraph(const FuncGraphPtr &fg) { MS_EXCEPTION_IF_NULL(fg); - size_t erase_cnt = func_graphs_.erase(fg->shared_from_base()); - if (!erase_cnt) { + bool erase_ret = func_graphs_.erase(fg->shared_from_base()); + if (!erase_ret) { return; } fg->DecAttachedMngCnt(); diff --git a/mindspore/core/ir/pattern_matcher.h b/mindspore/core/ir/pattern_matcher.h index 2113be26754..de110db7a6e 100644 --- a/mindspore/core/ir/pattern_matcher.h +++ b/mindspore/core/ir/pattern_matcher.h @@ -617,7 +617,7 @@ class PConstant : public PBase > { MS_EXCEPTION_IF_NULL(source_data); if (x_tensor_ptr->DataSize() == 1) { auto tensor_type_byte = GetTypeByte(tensor_type_ptr); - char *data = reinterpret_cast(new_tensor_ptr->data_c()); + char *data = static_cast(new_tensor_ptr->data_c()); for (int i = 0; i < new_tensor_ptr->ElementsNum(); i++) { ret = memcpy_s(data + IntToSize(i) * tensor_type_byte, tensor_type_byte, source_data, tensor_type_byte); if (ret != 0) { @@ -626,7 +626,7 @@ class PConstant : public PBase > { } } } else { - char *data = reinterpret_cast(new_tensor_ptr->data_c()); + char *data = static_cast(new_tensor_ptr->data_c()); ret = memcpy_s(data, new_tensor_ptr->Size(), source_data, new_tensor_ptr->Size()); if (ret != 0) { MS_LOG(INFO) << "memcpy_s error, error no " << ret << ", source size " << new_tensor_ptr->Size() diff --git a/mindspore/core/ops/extract_volume_patches.cc b/mindspore/core/ops/extract_volume_patches.cc index fdeda8a6fb4..ed8e2a393a5 100644 --- a/mindspore/core/ops/extract_volume_patches.cc +++ b/mindspore/core/ops/extract_volume_patches.cc @@ -49,10 +49,10 @@ abstract::ShapePtr ExtractVolumePatchesInferShape(const PrimitivePtr &primitive, primitive->name()); auto padding = GetValue(primitive->GetAttr(kPadding)); for (auto &item : strides) { - (void)CheckAndConvertUtils::Check("strides", item, kGreaterThan, 0, primitive->name()); + CheckAndConvertUtils::Check("strides", item, kGreaterThan, 0, primitive->name()); } for (auto &item : kernel_size) { - (void)CheckAndConvertUtils::Check("kernel_size", item, kGreaterThan, 0, primitive->name()); + CheckAndConvertUtils::Check("kernel_size", item, kGreaterThan, 0, primitive->name()); } std::vector y_shape(5); int64_t padding_needed = 0; diff --git a/mindspore/python/mindspore/ops/composite/multitype_ops/_constexpr_utils.py b/mindspore/python/mindspore/ops/composite/multitype_ops/_constexpr_utils.py index 141d5ef4f8b..a4eccad99d4 100644 --- a/mindspore/python/mindspore/ops/composite/multitype_ops/_constexpr_utils.py +++ b/mindspore/python/mindspore/ops/composite/multitype_ops/_constexpr_utils.py @@ -291,9 +291,9 @@ def check_type_valid(dtype, target_type, op_name): if dtype != target_type and (isinstance(target_type, (list, tuple)) and dtype not in target_type): if op_name in (TENSOR_GETITEM, TENSOR_SETITEM): raise IndexError( - f"The '{op_name}' doesn't support {dtype}' and expect to receive {target_type}.") + f"The '{op_name}' doesn't support '{dtype}' and expect to receive {target_type}.") raise TypeError( - f"The '{op_name}' doesn't support {dtype}' and expect to receive {target_type}.") + f"The '{op_name}' doesn't support '{dtype}' and expect to receive {target_type}.") @constexpr