From 8f55d79bc90a95a1970c0459f6d8920ccc846741 Mon Sep 17 00:00:00 2001 From: xulei Date: Tue, 17 Jan 2023 18:37:19 +0800 Subject: [PATCH] clean code --- mindspore/ccsrc/backend/common/somas/somas.cc | 3 ++- .../backend/common/somas/somas_solver_alg.h | 4 +++- .../backend/common/somas/somas_solver_pre.h | 3 ++- mindspore/ccsrc/cxx_api/types.cc | 4 +++- mindspore/ccsrc/kernel/common_utils.h | 3 ++- .../ascend/hal/device/ascend_device_address.cc | 17 +++++++++-------- .../ascend/hal/device/ascend_stream_assign.cc | 3 ++- .../ascend/hal/device/kernel_select_ascend.cc | 3 ++- .../device/ascend/kernel/hccl/hccl_kernel.h | 12 ++++++------ .../device/ascend/kernel/host/host_kernel_mod.h | 8 ++++---- .../plugin/device/ascend/kernel/rts/assign.cc | 7 +++++-- .../plugin/device/ascend/kernel/rts/rt_kernel.h | 12 ++++++------ .../device/ascend/kernel/rts/rt_kernel_info.h | 12 ++++++------ .../ascend/kernel/rts/tensor_copy_slices.cc | 6 +++--- .../ascend/kernel/tbe/tbe_kernel_compile.cc | 3 ++- .../ir_fission/dynamic_rnn_grad_fission_v2.cc | 3 ++- .../mindir/avg_pool_grad_unify_mindir.cc | 5 ++++- .../optimizer/mindir/dropout_unify_mindir.cc | 5 +++-- .../mindir/neighbor_exchange_v2_unify_mindir.cc | 3 ++- 19 files changed, 68 insertions(+), 48 deletions(-) diff --git a/mindspore/ccsrc/backend/common/somas/somas.cc b/mindspore/ccsrc/backend/common/somas/somas.cc index d489a45bc17..845a10fb2e4 100644 --- a/mindspore/ccsrc/backend/common/somas/somas.cc +++ b/mindspore/ccsrc/backend/common/somas/somas.cc @@ -2019,7 +2019,8 @@ size_t Somas::CalcLowerBound() const { lifetime_lb[time] = 0; } - size_t lower, upper; + size_t lower; + size_t upper; for (const auto &tensor : tensors_list_) { MS_EXCEPTION_IF_NULL(tensor); if (tensor->lifelong_value_ == kLifeLongGraphAll) { diff --git a/mindspore/ccsrc/backend/common/somas/somas_solver_alg.h b/mindspore/ccsrc/backend/common/somas/somas_solver_alg.h index 3dbf8bf5268..4097c0433da 100644 --- a/mindspore/ccsrc/backend/common/somas/somas_solver_alg.h +++ b/mindspore/ccsrc/backend/common/somas/somas_solver_alg.h @@ -41,6 +41,8 @@ using std::vector; namespace mindspore { namespace somas { +constexpr auto kDefaultAlignmentSize = 512; + class Interval { public: Interval() : m_a_(0), m_b_(0) {} @@ -180,7 +182,7 @@ class FootPrint : public std::enable_shared_from_this { class FastHeuristic { public: - FastHeuristic() : m_alignment_(512), m_tensors_allocated_(0) {} + FastHeuristic() : m_alignment_(kDefaultAlignmentSize), m_tensors_allocated_(0) {} ~FastHeuristic() = default; void setAlignment(const size_t &a) { m_alignment_ = a; } diff --git a/mindspore/ccsrc/backend/common/somas/somas_solver_pre.h b/mindspore/ccsrc/backend/common/somas/somas_solver_pre.h index 594cd16e2f1..ff1d2629031 100644 --- a/mindspore/ccsrc/backend/common/somas/somas_solver_pre.h +++ b/mindspore/ccsrc/backend/common/somas/somas_solver_pre.h @@ -43,6 +43,7 @@ constexpr char const *sortingNames[6] = {"size(>), index(<)", constexpr char const *branchingNames[4] = {"bestfit", "smallest", "largest", "worstfit"}; constexpr char const *algorithmTypeNames[2] = {"Shared Objects", "Single Object"}; constexpr auto kParallelComputeSizeThreshold = 2000; +constexpr auto kHalfByteSize = 4; enum Status { FAILED, SUCCESS }; enum AlgorithmType { kManyObjects = 0, kSingleObject, kNumAlgorithmTypes }; enum SortingType { @@ -117,7 +118,7 @@ class DynamicBitSet { auto *char_value = reinterpret_cast(&value); for (size_t j = 0; j < bit_width_ / CHAR_BIT; j++) { ret += ones_num_in_hex[static_cast(char_value[j] & 0xF)]; - char_value[j] >>= 4; + char_value[j] >>= kHalfByteSize; ret += ones_num_in_hex[static_cast(char_value[j] & 0xF)]; } } diff --git a/mindspore/ccsrc/cxx_api/types.cc b/mindspore/ccsrc/cxx_api/types.cc index 28350702f16..1ba7211f36f 100644 --- a/mindspore/ccsrc/cxx_api/types.cc +++ b/mindspore/ccsrc/cxx_api/types.cc @@ -312,7 +312,9 @@ MSTensor *MSTensor::CharStringsToTensor(const std::vector &name, const std } std::vector> MSTensor::TensorToStringChars(const MSTensor &tensor) { - if (tensor == nullptr || tensor.DataType() != DataType::kObjectTypeString || tensor.DataSize() < 4) { + constexpr auto minimum_tensor_size = 4; + if (tensor == nullptr || tensor.DataType() != DataType::kObjectTypeString || + tensor.DataSize() < minimum_tensor_size) { MS_LOG(ERROR) << "Invalid tensor."; return {}; } diff --git a/mindspore/ccsrc/kernel/common_utils.h b/mindspore/ccsrc/kernel/common_utils.h index c34d1212fd8..c4b8e3ea0d2 100644 --- a/mindspore/ccsrc/kernel/common_utils.h +++ b/mindspore/ccsrc/kernel/common_utils.h @@ -210,7 +210,8 @@ struct AsymmetricFunc { template struct HalfPixelFunc { T operator()(const T &new_x, const int &old_length, const int &new_length) const { - return new_length > 1 ? (new_x + 0.5) * old_length / new_length - 0.5 : 0; + constexpr auto half_pixel = 0.5; + return new_length > 1 ? (new_x + half_pixel) * old_length / new_length - half_pixel : 0; } }; diff --git a/mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_device_address.cc b/mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_device_address.cc index fde235d0344..2f0d6146fb0 100644 --- a/mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_device_address.cc +++ b/mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_device_address.cc @@ -45,8 +45,9 @@ namespace mindspore { namespace device { namespace ascend { -const int FLOAT_LEN = sizeof(float); -const int FLOAT16_LEN = 2; +const auto kFloat16Bytes = 2; +const auto kFloatBytes = sizeof(float); +const auto kFloat64Bytes = 8; bool IsUseTransDataTypeFormat(const std::pair &type_format) { static const std::set> use_trans_data = { @@ -107,8 +108,8 @@ void SyncMemory(void *dst, const void *src, uint64_t size, aclrtMemcpyKind kind) } bool FloatToHalfAndSyncHostToDevice(void *dst, size_t dst_size, const void *src, size_t src_size) { - auto elem_num = src_size / FLOAT_LEN; - if (elem_num != (dst_size / FLOAT16_LEN)) { + auto elem_num = src_size / kFloatBytes; + if (elem_num != (dst_size / kFloat16Bytes)) { MS_EXCEPTION(ArgumentError) << "FloatToHalf failed. size not match src_size[" << src_size << "], dst_size[" << dst_size << "]"; } @@ -119,7 +120,7 @@ bool FloatToHalfAndSyncHostToDevice(void *dst, size_t dst_size, const void *src, } bool Float64ToFloatAndSyncHostToDevice(void *dst, size_t dst_size, const void *src, size_t src_size) { - if (src_size / 2 != dst_size) { + if (src_size / kFloat64Bytes != dst_size / kFloatBytes) { MS_EXCEPTION(ArgumentError) << "src_size[" << src_size << "], dst_size[" << dst_size << "]"; } size_t elem_num = dst_size / sizeof(float); @@ -130,8 +131,8 @@ bool Float64ToFloatAndSyncHostToDevice(void *dst, size_t dst_size, const void *s } bool SyncDeviceToHostAndHalfToFloat(void *dst, size_t dst_size, const void *src, size_t src_size) { - auto elem_num = src_size / FLOAT16_LEN; - if (elem_num != (dst_size / FLOAT_LEN)) { + auto elem_num = src_size / kFloat16Bytes; + if (elem_num != (dst_size / kFloatBytes)) { MS_EXCEPTION(ArgumentError) << "HalfToFloat failed. size not match src_size[" << src_size << "], dst_size[" << dst_size << "]"; } @@ -142,7 +143,7 @@ bool SyncDeviceToHostAndHalfToFloat(void *dst, size_t dst_size, const void *src, } bool SyncDeviceToHostAndFloatToFloat64(void *dst, size_t dst_size, const void *src, size_t src_size) { - if (src_size != dst_size / 2) { + if (src_size / kFloatBytes != dst_size / kFloat64Bytes) { MS_EXCEPTION(ArgumentError) << "src_size[" << src_size << "], dst_size[" << dst_size << "]"; } size_t elem_num = src_size / sizeof(float); diff --git a/mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_stream_assign.cc b/mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_stream_assign.cc index 480e0547e8b..cb964f52760 100644 --- a/mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_stream_assign.cc +++ b/mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_stream_assign.cc @@ -2411,7 +2411,8 @@ void AscendStreamAssign::CheckEventAssign(const NotNull &graph_p << ", max event id:" << max_event_id << ", event map is:" << event_map; } for (const auto &item : std::as_const(event_map)) { - if (item.second.size() != 2) { + constexpr auto pair_size = 2; + if (item.second.size() != pair_size) { MS_LOG(EXCEPTION) << "Send/recv should be in pair and share one event id, invalid event id is:" << item.first << ", event size is:" << item.second.size(); } diff --git a/mindspore/ccsrc/plugin/device/ascend/hal/device/kernel_select_ascend.cc b/mindspore/ccsrc/plugin/device/ascend/hal/device/kernel_select_ascend.cc index 1140204d8ed..1de4a5c44bb 100644 --- a/mindspore/ccsrc/plugin/device/ascend/hal/device/kernel_select_ascend.cc +++ b/mindspore/ccsrc/plugin/device/ascend/hal/device/kernel_select_ascend.cc @@ -859,7 +859,8 @@ std::tuple SelectKernelInfoWithM KernelType kernel_type) { std::vector> kernel_info_list; std::vector> aicpu_kernel_info_list; - std::ostringstream aicore_in_out_info, aicpu_in_out_info; + std::ostringstream aicore_in_out_info; + std::ostringstream aicpu_in_out_info; std::tuple result = std::make_tuple(kStatusAllMatched, "", NoExceptionType); MS_EXCEPTION_IF_NULL(kernel_node); diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/hccl/hccl_kernel.h b/mindspore/ccsrc/plugin/device/ascend/kernel/hccl/hccl_kernel.h index d99015c3190..709ebb81054 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/hccl/hccl_kernel.h +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/hccl/hccl_kernel.h @@ -93,24 +93,24 @@ class HcclKernelFactory { std::map hccl_kernel_map_; }; -class _HcclKernelRegister { +class HcclKernelRegister { public: - _HcclKernelRegister(const string &name, HcclKernelCreater &&fun) { + HcclKernelRegister(const string &name, HcclKernelCreater &&fun) { HcclKernelFactory::Get().Register(name, std::move(fun)); } - ~_HcclKernelRegister() = default; + ~HcclKernelRegister() = default; }; -#define _MS_HCCL_REG_KERNEL_REG(KNAME, clazz) \ +#define MS_HCCL_REG_KERNEL_REG(KNAME, clazz) \ static_assert(std::is_base_of::value, " must be base of HcclKernel"); \ - static const _HcclKernelRegister g_##KNAME##_##_kernel_reg(#KNAME, []() { \ + static const HcclKernelRegister g_##KNAME##_##_kernel_reg(#KNAME, []() { \ std::shared_ptr ptr = nullptr; \ ptr = std::make_shared(); \ MS_EXCEPTION_IF_NULL(ptr); \ return ptr; \ }); -#define MS_HCCL_REG_KERNEL(KNAME, clazz) _MS_HCCL_REG_KERNEL_REG(KNAME, clazz) +#define MS_HCCL_REG_KERNEL(KNAME, clazz) MS_HCCL_REG_KERNEL_REG(KNAME, clazz) } // namespace kernel } // namespace mindspore #endif diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/host/host_kernel_mod.h b/mindspore/ccsrc/plugin/device/ascend/kernel/host/host_kernel_mod.h index 61ee1c2203d..e57266e34ff 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/host/host_kernel_mod.h +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/host/host_kernel_mod.h @@ -58,17 +58,17 @@ class HostKernelFactory { std::map hostKernelMap_; }; -class _HostKernelRegister { +class HostKernelRegister { public: - _HostKernelRegister(const string &name, HostKernelCreater &&fun) { + HostKernelRegister(const string &name, HostKernelCreater &&fun) { HostKernelFactory::Get().Register(name, std::move(fun)); } - ~_HostKernelRegister() = default; + ~HostKernelRegister() = default; }; #define MS_HOST_REG_KERNEL_REG(KNAME, clazz) \ static_assert(std::is_base_of::value, " must be base of HostKernelMod"); \ - static const _HostKernelRegister g_##KNAME##_##_kernel_reg(#KNAME, []() { \ + static const HostKernelRegister g_##KNAME##_##_kernel_reg(#KNAME, []() { \ std::shared_ptr ptr = nullptr; \ ptr = std::make_shared(); \ MS_EXCEPTION_IF_NULL(ptr); \ diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/rts/assign.cc b/mindspore/ccsrc/plugin/device/ascend/kernel/rts/assign.cc index 87024b8f7da..d3a307aced0 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/rts/assign.cc +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/rts/assign.cc @@ -24,13 +24,16 @@ using MemcpyAsyncTaskInfoPtr = std::shared_ptr; namespace mindspore { namespace kernel { +namespace { +constexpr auto kAssignInputSize = 2; +} AssignKernel::AssignKernel() {} AssignKernel::~AssignKernel() {} bool AssignKernel::Launch(const std::vector &inputs, const std::vector &, const std::vector &, void *stream_ptr) { - if (inputs.size() != 2) { + if (inputs.size() != kAssignInputSize) { MS_LOG(ERROR) << "inputs size is not two"; return false; } @@ -52,7 +55,7 @@ bool AssignKernel::Launch(const std::vector &inputs, const std::vect std::vector AssignKernel::GenTask(const std::vector &inputs, const std::vector &, const std::vector &, uint32_t stream_id) { - if (inputs.size() != 2) { + if (inputs.size() != kAssignInputSize) { MS_LOG(EXCEPTION) << "Inputs size should be 2, but got " << inputs.size(); } stream_id_ = stream_id; diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/rts/rt_kernel.h b/mindspore/ccsrc/plugin/device/ascend/kernel/rts/rt_kernel.h index 347bcadfd9a..fc9287f399d 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/rts/rt_kernel.h +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/rts/rt_kernel.h @@ -61,19 +61,19 @@ class RtKernelFactory { std::map fmap_; }; -class _RtKernelRegister { +class RtKernelRegister { public: - _RtKernelRegister(const std::string &name, RtKernelCreater &&fun) { + RtKernelRegister(const std::string &name, RtKernelCreater &&fun) { RtKernelFactory::Get().Register(name, std::move(fun)); } - ~_RtKernelRegister() = default; + ~RtKernelRegister() = default; }; -#define _MS_REG_RTKERNEL_REG(KNAME, clazz) \ +#define MS_REG_RTKERNEL_REG(KNAME, clazz) \ static_assert(std::is_base_of::value, " must be base of RtKernel"); \ - static const _RtKernelRegister g_##KNAME##_##_RtKernel_reg(#KNAME, []() { return std::make_shared(); }); + static const RtKernelRegister g_##KNAME##_##_RtKernel_reg(#KNAME, []() { return std::make_shared(); }); -#define MS_REG_RTKERNEL(KNAME, clazz) _MS_REG_RTKERNEL_REG(KNAME, clazz) +#define MS_REG_RTKERNEL(KNAME, clazz) MS_REG_RTKERNEL_REG(KNAME, clazz) } // namespace kernel } // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/rts/rt_kernel_info.h b/mindspore/ccsrc/plugin/device/ascend/kernel/rts/rt_kernel_info.h index c99b33fe784..b403b1fb604 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/rts/rt_kernel_info.h +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/rts/rt_kernel_info.h @@ -54,19 +54,19 @@ class RtKerDescFactory { std::map fmap_; }; -class _RtKerDescRegister { +class RtKerDescRegister { public: - _RtKerDescRegister(const std::string &name, RtKerDescCreater &&fun) { + RtKerDescRegister(const std::string &name, RtKerDescCreater &&fun) { RtKerDescFactory::Get().Register(name, std::move(fun)); } - ~_RtKerDescRegister() = default; + ~RtKerDescRegister() = default; }; -#define _MS_REG_RTKERNEL_DESC_REG(KNAME, clazz) \ +#define MS_REG_RTKERNEL_DESC_REG(KNAME, clazz) \ static_assert(std::is_base_of::value, " must be base of RtKerDesc"); \ - static const _RtKerDescRegister g_##KNAME##_##_rtkernel_desc_reg(#KNAME, []() { return std::make_shared(); }); + static const RtKerDescRegister g_##KNAME##_##_rtkernel_desc_reg(#KNAME, []() { return std::make_shared(); }); -#define MS_REG_RTKERNEL_DESC(KNAME, clazz) _MS_REG_RTKERNEL_DESC_REG(KNAME, clazz) +#define MS_REG_RTKERNEL_DESC(KNAME, clazz) MS_REG_RTKERNEL_DESC_REG(KNAME, clazz) void GetRtKelInfo(const CNodePtr &kernel_node, std::vector> *kernel_info_list); } // namespace kernel diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/rts/tensor_copy_slices.cc b/mindspore/ccsrc/plugin/device/ascend/kernel/rts/tensor_copy_slices.cc index 00552f519f2..ee2c6fd7e86 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/rts/tensor_copy_slices.cc +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/rts/tensor_copy_slices.cc @@ -38,7 +38,7 @@ TensorCopySlices::~TensorCopySlices() {} bool TensorCopySlices::Launch(const std::vector &inputs, const std::vector &, const std::vector &outputs, void *stream_ptr) { - if (inputs.size() != 2) { + if (inputs.size() != kTensorCopySlicesInputSize) { MS_LOG(ERROR) << "inputs size is not 2"; return false; } @@ -88,7 +88,7 @@ bool TensorCopySlices::Init(const mindspore::AnfNodePtr &anf_node) { void TensorCopySlices::GetInputOutputInfo(const AnfNodePtr &anf_node) { MS_EXCEPTION_IF_NULL(anf_node); size_t input_size = common::AnfAlgo::GetInputTensorNum(anf_node); - if (input_size != 2) { + if (input_size != kTensorCopySlicesInputSize) { MS_LOG(EXCEPTION) << "TensorCopySlices input size is not 2, got " << input_size; } input_type_id_ = AnfAlgo::GetPrevNodeOutputDeviceDataType(anf_node, 0); @@ -134,7 +134,7 @@ void TensorCopySlices::GetInputOutputTotalCount(const AnfNodePtr &anf_node) { std::vector TensorCopySlices::GenTask(const std::vector &inputs, const std::vector &, const std::vector &outputs, uint32_t stream_id) { - if (inputs.size() != 2) { + if (inputs.size() != kTensorCopySlicesInputSize) { MS_LOG(EXCEPTION) << "inputs size is not 2."; } if (outputs.size() != 1) { diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_kernel_compile.cc b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_kernel_compile.cc index c5e687b7826..11ea817a949 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_kernel_compile.cc +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_kernel_compile.cc @@ -781,7 +781,8 @@ void TbeKernelCompileManager::DistributeCompileTask(const std::vector void TbeKernelCompileManager::TbePreBuild(const KernelGraphPtr &kernel_graph) { MS_EXCEPTION_IF_NULL(kernel_graph); MS_LOG(INFO) << "Single op pre build start."; - struct timeval start_time, end_time; + struct timeval start_time; + struct timeval end_time; (void)gettimeofday(&start_time, nullptr); std::vector node_list; GetAllTbeNodes(kernel_graph, &node_list); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/dynamic_rnn_grad_fission_v2.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/dynamic_rnn_grad_fission_v2.cc index 56b00e5f29a..1be048558f2 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/dynamic_rnn_grad_fission_v2.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/dynamic_rnn_grad_fission_v2.cc @@ -418,7 +418,8 @@ AnfNodePtr DynamicRnnGradFissionV2::CreateSplitV(const FuncGraphPtr &func_graph, } auto split_v = NewCNode(splitv_input, func_graph); // Set infer data type and shape - ShapeVector shape1, shape2; + ShapeVector shape1; + ShapeVector shape2; if (specs.batch_size % kCubeSize == 0 && !specs.shape_need_align) { shape1 = {(origin_input6_shape[kDim0] - 1) * origin_input6_shape[kDim1], origin_input6_shape[kDim2]}; shape2 = {origin_input6_shape[kDim1], origin_input6_shape[kDim2]}; diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/avg_pool_grad_unify_mindir.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/avg_pool_grad_unify_mindir.cc index 63b614f17ab..4b70d14be41 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/avg_pool_grad_unify_mindir.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/avg_pool_grad_unify_mindir.cc @@ -111,7 +111,10 @@ ValueNodePtr CreateMeanMatrixValueNode(const FuncGraphPtr &func_graph, const Anf << x_shape << ", kernel_size:" << k_size << ", strides:" << stride << trace::DumpSourceLines(node); } - int64_t pad_top, pad_bottom, pad_left, pad_right; + int64_t pad_top; + int64_t pad_bottom; + int64_t pad_left; + int64_t pad_right; int64_t h_output = windowed_output_size(node, x_shape[kDim2], k_size[kDim2], stride[kDim2], pad_mode, &pad_top, &pad_bottom); int64_t w_output = diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/dropout_unify_mindir.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/dropout_unify_mindir.cc index bf48c47eeb4..c35f76a15da 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/dropout_unify_mindir.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/dropout_unify_mindir.cc @@ -110,8 +110,9 @@ std::vector CalGenMaskOutputShape(const std::vector &shape) { std::vector CalGenMaskV3OutputShape(const std::vector &shape, TypeId type) { // [*dim, M, N] -> [*dim, N/16, M/16, 16, 16] if M%16=0 and N%16=0 - if (shape.size() >= 2 && shape[shape.size() - 1] % static_cast(kCubeSize) == 0 && - shape[shape.size() - 2] % static_cast(kCubeSize) == 0) { + constexpr auto cube_h_offset = 2; + if (shape.size() >= cube_h_offset && shape[shape.size() - 1] % static_cast(kCubeSize) == 0 && + shape[shape.size() - cube_h_offset] % static_cast(kCubeSize) == 0) { auto fnz_shape = trans::TransShapeToDevice(shape, kOpFormat_FRAC_NZ, type); return fnz_shape; } diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/neighbor_exchange_v2_unify_mindir.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/neighbor_exchange_v2_unify_mindir.cc index 9a7741f136f..670488afc6e 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/neighbor_exchange_v2_unify_mindir.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/neighbor_exchange_v2_unify_mindir.cc @@ -48,6 +48,7 @@ constexpr int64_t kRankIdFive = 5; constexpr int64_t kRankIdSix = 6; constexpr int64_t kRankIdSeven = 7; constexpr size_t kSizeFour = 4; +constexpr size_t kSizeEight = 8; constexpr int64_t kInvalidId = -1; constexpr size_t kMinSplitOutputSize = 2; @@ -215,7 +216,7 @@ AnfNodePtr GetCenter(const FuncGraphPtr &graph, const CNodePtr &neighbor_exchang std::vector CreateAllToAllvInputForGrad(const std::vector &send_rank_ids, const std::vector> &split_outputs, const std::vector &split_nodes) { - if (send_rank_ids.size() != 8) { + if (send_rank_ids.size() != kSizeEight) { MS_LOG(EXCEPTION) << "Wrong send_rank_ids size: " << send_rank_ids.size() << ", expect size: 8."; } if (split_outputs.size() != kSizeFour) {