diff --git a/mindspore/ccsrc/frontend/parallel/group_manager.cc b/mindspore/ccsrc/frontend/parallel/group_manager.cc index 4d52ac30c7c..47f9ae0f676 100644 --- a/mindspore/ccsrc/frontend/parallel/group_manager.cc +++ b/mindspore/ccsrc/frontend/parallel/group_manager.cc @@ -211,7 +211,7 @@ Status GroupManager::CreateGroup(const std::string &group_name, const std::vecto } } -Status GroupManager::DestroyGroup(const std::string &group_name) { +Status GroupManager::DestroyGroup(const std::string &group_name) const { auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); std::string device_name = context_ptr->get_param(MS_CTX_DEVICE_TARGET); diff --git a/mindspore/ccsrc/frontend/parallel/group_manager.h b/mindspore/ccsrc/frontend/parallel/group_manager.h index 542c8fe4058..2e4de073fc1 100644 --- a/mindspore/ccsrc/frontend/parallel/group_manager.h +++ b/mindspore/ccsrc/frontend/parallel/group_manager.h @@ -70,7 +70,7 @@ class GroupManager { bool CreateGroupByExecutor(const std::string &device_name, const std::string &group_name, const std::vector ranks, uint32_t device_id) const; bool DestroyGroupByExecutor(const std::string &device_name, const std::string &group_name, uint32_t device_id) const; - Status DestroyGroup(const std::string &group_name); + Status DestroyGroup(const std::string &group_name) const; // the key is group name (name_) std::map groups_; std::string world_group_; diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/bias_add_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/bias_add_info.cc index e6aef0cad39..05f87f75ae4 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/bias_add_info.cc +++ b/mindspore/ccsrc/frontend/parallel/ops_info/bias_add_info.cc @@ -64,7 +64,7 @@ Status BiasAddInfo::InferTensorMap() { for (size_t i = 0; i < sub_a_strategy_size; ++i) { sub_a_tensor_map.push_back(static_cast(LAST_INDEX(sub_a_strategy_size) - i)); } - sub_b_tensor_map.push_back(static_cast(LAST_INDEX(sub_a_strategy_size) - static_cast(1))); + sub_b_tensor_map.push_back(static_cast(LAST_INDEX(sub_a_strategy_size) - static_cast(1))); inputs_tensor_map_.push_back(sub_a_tensor_map); inputs_tensor_map_.push_back(sub_b_tensor_map); diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/matmul_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/matmul_info.h index 7753e61e61f..faf28a5079f 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/matmul_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/matmul_info.h @@ -33,16 +33,16 @@ namespace parallel { class MatMulBase : public OperatorInfo { public: // Generate all strategies and the corresponding cost for this MatMul operator + MatMulBase(const std::string &name, const Shapes &inputs_shape, const Shapes &outputs_shape, + const PrimitiveAttrs &attrs) + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} + ~MatMulBase() override = default; std::vector GenerateOpStrategies(int64_t stage_id) override; Status SetCostUnderStrategy(const StrategyPtr &strategy) override; Status SwapLastTwoElements(Shape *const input); Shapes InferParamStrategy(const Shapes &default_strategy) override; protected: - MatMulBase(const std::string &name, const Shapes &inputs_shape, const Shapes &outputs_shape, - const PrimitiveAttrs &attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} - ~MatMulBase() override = default; Status InferForwardCommunication() override; Status InferTensorInfo() override; // the forward_reduce_scatter mode need to override this function Status InferDevMatrixShape() override;