diff --git a/mindspore/ccsrc/parallel/device_manager.cc b/mindspore/ccsrc/parallel/device_manager.cc index 3a553e08ece..0b34cedc006 100644 --- a/mindspore/ccsrc/parallel/device_manager.cc +++ b/mindspore/ccsrc/parallel/device_manager.cc @@ -370,6 +370,5 @@ void DeviceManager::Clear() { stage_devices_.clear(); gm_.Clear(); } - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/device_matrix.cc b/mindspore/ccsrc/parallel/device_matrix.cc index a581dbf2759..3fdc3dd15a9 100644 --- a/mindspore/ccsrc/parallel/device_matrix.cc +++ b/mindspore/ccsrc/parallel/device_matrix.cc @@ -29,7 +29,6 @@ namespace mindspore { namespace parallel { - DeviceMatrix::DeviceMatrix(int32_t rank, RankList dev_list, Shape dev_shape) : rank_(rank), dev_list_(std::move(dev_list)), dev_shape_(std::move(dev_shape)) { if (!std::any_of(dev_list_.begin(), dev_list_.end(), [rank](int32_t a) { return a == rank; })) { diff --git a/mindspore/ccsrc/parallel/dynamic_creator.h b/mindspore/ccsrc/parallel/dynamic_creator.h index 145a8a0840e..1b864cd8bf4 100644 --- a/mindspore/ccsrc/parallel/dynamic_creator.h +++ b/mindspore/ccsrc/parallel/dynamic_creator.h @@ -27,7 +27,6 @@ namespace mindspore { namespace parallel { - #define REGISTER(className) \ OperatorInfoPtr objectCreator##className(std::string name, Shapes in, Shapes out, PrimitiveAttrs& attrs) { \ return std::make_shared(name, in, out, attrs); \ diff --git a/mindspore/ccsrc/parallel/ops_info/activation_info.cc b/mindspore/ccsrc/parallel/ops_info/activation_info.cc index c59ca8402b9..c11db56082a 100644 --- a/mindspore/ccsrc/parallel/ops_info/activation_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/activation_info.cc @@ -229,7 +229,8 @@ Status Softmax::GenerateStrategies(int32_t stage_id) { } is_auto_parallel_ = true; - Shape input0_split(inputs_shape_[0].size(), 1); + Shape input0_split; + (void)input0_split.insert(input0_split.begin(), inputs_shape_[0].size(), 1); for (auto& element : axis_) { int32_t axis_index = element; if (element < 0) { diff --git a/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc b/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc index b1d9b8b60e8..9d356cd573c 100644 --- a/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc @@ -27,7 +27,6 @@ namespace mindspore { namespace parallel { - Status BatchParallelInfo::CheckStrategy(const StrategyPtr& strategy) { if (CheckStrategyValue(strategy, inputs_shape_, is_auto_parallel_) != SUCCESS) { if (is_auto_parallel_) { diff --git a/mindspore/ccsrc/parallel/ops_info/loss_info.cc b/mindspore/ccsrc/parallel/ops_info/loss_info.cc index 31f80e338b7..28ea19f1202 100644 --- a/mindspore/ccsrc/parallel/ops_info/loss_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/loss_info.cc @@ -194,7 +194,8 @@ Status SoftmaxCrossEntropyWithLogitsInfo::GenerateStrategies(int32_t stage_id) { } is_auto_parallel_ = true; - Shape input0_split(inputs_shape_[0].size(), 1); + Shape input0_split; + (void)input0_split.insert(input0_split.begin(), inputs_shape_[0].size(), 1); input0_split[IntToSize(axis_index)] = 0; Shapes splittable_inputs = {input0_split, input0_split}; std::vector sp_vector; diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.cc b/mindspore/ccsrc/parallel/ops_info/operator_info.cc index 23b6a5190a8..e066142589b 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.cc @@ -1255,6 +1255,5 @@ void OperatorInfo::BreakingTiesForPerferringDataParallel(const StrategyPtr& stra double OperatorInfo::GetForwardMemoryCostFromCNode() { return operator_cost()->GetForwardComputationCost(inputs_tensor_info_, outputs_tensor_info_, 0); } - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/prelu_info.cc b/mindspore/ccsrc/parallel/ops_info/prelu_info.cc index 1a44501f427..a4d601dbe93 100644 --- a/mindspore/ccsrc/parallel/ops_info/prelu_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/prelu_info.cc @@ -212,8 +212,10 @@ Status PReLUInfo::GenerateStrategies(int32_t stage_id) { return FAILED; } is_auto_parallel_ = true; - Shape input0_split(inputs_shape_[0].size(), 1); - input0_split[1] = 0; + Shape input0_split; + input0_split.emplace_back(1); + input0_split.emplace_back(0); + (void)input0_split.insert(input0_split.end(), inputs_shape_[0].size() - 2, 1); Shape input1_split(inputs_shape_[1].size(), 0); Shapes splittable_inputs = {input0_split, input1_split}; std::vector sp_vector; diff --git a/mindspore/ccsrc/parallel/ops_info/reshape_info.cc b/mindspore/ccsrc/parallel/ops_info/reshape_info.cc index 0c95ee9c051..4cb81ee7699 100644 --- a/mindspore/ccsrc/parallel/ops_info/reshape_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/reshape_info.cc @@ -413,8 +413,9 @@ Status ReshapeInfo::GenerateStrategies(int32_t stage_id) { return FAILED; } is_auto_parallel_ = true; - Shape input0_split(inputs_shape_[0].size(), 0); - input0_split[0] = 1; + Shape input0_split; + input0_split.emplace_back(1); + (void)input0_split.insert(input0_split.end(), inputs_shape_[0].size() - 1, 0); Shapes splittable_inputs = {input0_split}; std::vector sp_vector; if (GenerateStrategiesForIndependentInputs(stage_id, inputs_shape_, splittable_inputs, &sp_vector) != SUCCESS) { diff --git a/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.cc b/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.cc index acb39247d44..cd3b40315c1 100644 --- a/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.cc @@ -27,7 +27,6 @@ namespace mindspore { namespace parallel { - Status VirtualDatasetInfo::CheckStrategy(const StrategyPtr& strategy) { if (CheckStrategyValue(strategy, inputs_shape_, is_auto_parallel_) != SUCCESS) { if (is_auto_parallel_) { @@ -225,8 +224,9 @@ Status VirtualDatasetInfo::GenerateStrategies(int32_t stage_id) { StrategyPtr sp; std::vector strategy; for (auto& shape : inputs_shape_) { - Shape temp(shape.size(), 1); - temp[0] = SizeToInt(total_dev_num); + Shape temp; + temp.emplace_back(SizeToInt(total_dev_num)); + (void)temp.insert(temp.end(), shape.size() - 1, 1); strategy.push_back(temp); } sp = std::make_shared(stage_id, strategy); diff --git a/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h b/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h index bf17e678a3e..398bae3585d 100644 --- a/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h +++ b/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h @@ -51,7 +51,6 @@ class VirtualDatasetInfo : public OperatorInfo { Status GetAttrs() override; Status InferAsLossDivisor() override; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/status.h b/mindspore/ccsrc/parallel/status.h index 9d773f0d9b4..6bfe9f0e72b 100644 --- a/mindspore/ccsrc/parallel/status.h +++ b/mindspore/ccsrc/parallel/status.h @@ -21,7 +21,6 @@ namespace mindspore { namespace parallel { - enum Status { SUCCESS = 0, FAILED, diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index a42ce612fb5..a56fca1ae6d 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -487,7 +487,6 @@ Status ConstructCostGraphNodes(const std::vector &all_nodes, const F bool is_find_wrong = (current_op_ptr->name().find(VIRTUAL_DATA_SET_INFO) == std::string::npos) && (current_op_ptr->name().find(BATCH_PARALLEL) == std::string::npos) && (current_op_ptr->name().find(prim->name()) == std::string::npos); - if (is_find_wrong) { MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name() << " does not match the Prim: " << prim->name(); @@ -947,7 +946,6 @@ Status ParallelStrategyRecSearch(const std::vector &all_nodes, const graph = EliminateGraph(graph, eli_list, index_list); size_t num_device = g_device_manager->DeviceNum(); - if (PartitionForAllDevices(num_device, graph) == SUCCESS) { MS_LOG(INFO) << "Partition Success With " << num_device << " devices."; } else { diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.h b/mindspore/ccsrc/parallel/step_auto_parallel.h index 349af7c956b..f120edcc616 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.h +++ b/mindspore/ccsrc/parallel/step_auto_parallel.h @@ -55,7 +55,6 @@ Status ParallelStrategyRecSearch(const std::vector &all_nodes, const std::vector> RecInputTensorNames(const std::map::iterator &it, std::vector> input_tensor_names); - } // namespace parallel } // namespace mindspore #endif // PARALLEL_STEP_AUTO_PARALLEL_H_ diff --git a/mindspore/ccsrc/parallel/step_parallel.cc b/mindspore/ccsrc/parallel/step_parallel.cc index f0126a40275..bcd4dc3763b 100644 --- a/mindspore/ccsrc/parallel/step_parallel.cc +++ b/mindspore/ccsrc/parallel/step_parallel.cc @@ -2094,7 +2094,6 @@ CNodePtr FindLossCNodeFromRoot(const FuncGraphPtr& root) { MS_EXCEPTION_IF_NULL(root_return_node); const auto& all_nodes = root->nodes(); FuncGraphPtr func_graph = FindForwardGraphByRootNodes(all_nodes); - if (func_graph == nullptr) { return FindLossCNode(root); } else { @@ -2109,7 +2108,6 @@ FuncGraphPtr ForwardGraph(const FuncGraphPtr& root) { MS_EXCEPTION_IF_NULL(root_return_node); const auto& all_nodes = root->nodes(); FuncGraphPtr func_graph = FindForwardGraphByRootNodes(all_nodes); - if (func_graph != nullptr) { forward_graph = func_graph; } diff --git a/mindspore/ccsrc/parallel/strategy.h b/mindspore/ccsrc/parallel/strategy.h index acc6ca928f7..93d4d4dff1a 100644 --- a/mindspore/ccsrc/parallel/strategy.h +++ b/mindspore/ccsrc/parallel/strategy.h @@ -27,7 +27,6 @@ namespace mindspore { namespace parallel { - #define MIN_SLICE_NUM 1 using Dimensions = std::vector; diff --git a/mindspore/ccsrc/parallel/tensor_layout/arrangement.cc b/mindspore/ccsrc/parallel/tensor_layout/arrangement.cc index 68acae87f36..b42ba302427 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/arrangement.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/arrangement.cc @@ -26,7 +26,6 @@ namespace mindspore { namespace parallel { - Status Arrangement::Init(const std::vector& array) { Status status = Array::Init(array); if (status != Status::SUCCESS) { diff --git a/mindspore/ccsrc/parallel/tensor_layout/arrangement.h b/mindspore/ccsrc/parallel/tensor_layout/arrangement.h index 6d64e07f031..2dc13038c10 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/arrangement.h +++ b/mindspore/ccsrc/parallel/tensor_layout/arrangement.h @@ -28,7 +28,6 @@ namespace mindspore { namespace parallel { - class Arrangement : public Array { public: Arrangement() : size_(1) {} @@ -53,7 +52,6 @@ class Arrangement : public Array { void ComputeSize(); int32_t size_; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/array.cc b/mindspore/ccsrc/parallel/tensor_layout/array.cc index ce1b9b8ecfe..ba3858ae009 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/array.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/array.cc @@ -21,7 +21,6 @@ namespace mindspore { namespace parallel { - std::string Array::ToString() const { std::ostringstream buffer; buffer << "[ "; diff --git a/mindspore/ccsrc/parallel/tensor_layout/array.h b/mindspore/ccsrc/parallel/tensor_layout/array.h index 3a47f0d8187..f7d9c3c673b 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/array.h +++ b/mindspore/ccsrc/parallel/tensor_layout/array.h @@ -26,7 +26,6 @@ namespace mindspore { namespace parallel { - class Array { public: Array() = default; @@ -43,7 +42,6 @@ class Array { protected: std::vector array_; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/construct_operator.h b/mindspore/ccsrc/parallel/tensor_layout/construct_operator.h index 91f52360373..cf6cff456a6 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/construct_operator.h +++ b/mindspore/ccsrc/parallel/tensor_layout/construct_operator.h @@ -52,7 +52,6 @@ class ConstructOperator { Shape dev_matrix_shape_; Status CreateGroupByDim(size_t axis, std::vector* group); }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc b/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc index b2ee51b40b6..190a5846baa 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc @@ -20,7 +20,6 @@ namespace mindspore { namespace parallel { - std::string LayoutTransfer::ToString() const { std::ostringstream buffer; buffer << std::endl << std::string("from_in_ tensor layout:" + from_in_.ToString()); @@ -37,6 +36,5 @@ Status LayoutTransfer::Init(const TensorLayout& from_in, const TensorLayout& to_ Status status = CheckValidTransfer(); return status; } - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.h b/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.h index b892a87d305..b05128f5b82 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.h +++ b/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.h @@ -23,7 +23,6 @@ namespace mindspore { namespace parallel { - class LayoutTransfer { public: LayoutTransfer() = default; @@ -43,7 +42,6 @@ class LayoutTransfer { private: virtual Status CheckValidTransfer() = 0; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/map.cc b/mindspore/ccsrc/parallel/tensor_layout/map.cc index 4f3f2369c7e..320dbe6ebd8 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/map.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/map.cc @@ -26,7 +26,6 @@ namespace mindspore { namespace parallel { - Status Map::Init(const std::vector& array) { Status status = Array::Init(array); if (status != Status::SUCCESS) { diff --git a/mindspore/ccsrc/parallel/tensor_layout/map.h b/mindspore/ccsrc/parallel/tensor_layout/map.h index f7bc061aa1f..3f839ef1989 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/map.h +++ b/mindspore/ccsrc/parallel/tensor_layout/map.h @@ -46,7 +46,6 @@ class Map : public Array { private: bool IsValidMap(); }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.cc b/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.cc index 2ee682fad87..7ed07ac02ec 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.cc @@ -21,7 +21,6 @@ namespace mindspore { namespace parallel { - Status RedistributionLayoutTransfer::CheckValidTransfer() { return Status::SUCCESS; } /* @@ -66,6 +65,5 @@ std::shared_ptr RedistributionLayoutTransfer::UnifyDevice } return unified_device_arrangement_ptr->UnifyDeviceArrangementAndTensorShape(); } - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.h b/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.h index 6522b7f8c2f..7b57f46dd6e 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.h +++ b/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.h @@ -24,7 +24,6 @@ namespace mindspore { namespace parallel { - class RedistributionLayoutTransfer : public LayoutTransfer { public: RedistributionLayoutTransfer() = default; @@ -35,7 +34,6 @@ class RedistributionLayoutTransfer : public LayoutTransfer { Status CheckValidTransfer() override; std::shared_ptr UnifyDeviceArrangement() const; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.cc b/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.cc index 028fb5874aa..b4ec6a016fe 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.cc @@ -22,7 +22,6 @@ namespace mindspore { namespace parallel { - Status RedistributionOperatorInfer::Init(const TensorLayout& tensor_layout, const Map& out_tensor_map, RankList dev_list) { in_tensor_map_ = tensor_layout.tensor_map(); @@ -273,6 +272,5 @@ Status RedistributionOperatorInfer::TransferConcatByAxis(Args args) { } return Status::SUCCESS; } - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h b/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h index 13f9e7af247..b4ec0c4633b 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h +++ b/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h @@ -28,7 +28,6 @@ #include "utils/convert_utils.h" namespace mindspore { namespace parallel { - using DeviceArrangement = std::vector; using TensorMap = std::vector; using TensorShape = std::vector; @@ -69,7 +68,6 @@ class RedistributionOperatorInfer { RankList dev_list_; bool construct_op_flag_; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.cc b/mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.cc index 1d56aa22203..39a6bef92da 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.cc @@ -20,7 +20,6 @@ namespace mindspore { namespace parallel { - Status ReshapeLayoutTransfer::CheckValidTransfer() { if (!IsSameDeviceArrangement()) { return Status::FAILED; diff --git a/mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.h b/mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.h index 9ad8e676359..8aae71631df 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.h +++ b/mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.h @@ -23,7 +23,6 @@ namespace mindspore { namespace parallel { - class ReshapeLayoutTransfer : public LayoutTransfer { public: ReshapeLayoutTransfer() = default; @@ -43,7 +42,6 @@ class ReshapeLayoutTransfer : public LayoutTransfer { bool FromTensorShapeCanBeExpandByTo() const; bool ToTensorShapeCanBeExpandByFrom() const; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/shape_util.cc b/mindspore/ccsrc/parallel/tensor_layout/shape_util.cc index 54bb976032a..a26627fb3ce 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/shape_util.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/shape_util.cc @@ -21,7 +21,6 @@ namespace mindspore { namespace parallel { - /* * example: * shape = [2, 8, 32] @@ -260,6 +259,5 @@ Status ExpandShape(const std::vector& in, const std::vector& e } return status; } - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/shape_util.h b/mindspore/ccsrc/parallel/tensor_layout/shape_util.h index 85ca70969b8..e83156500c9 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/shape_util.h +++ b/mindspore/ccsrc/parallel/tensor_layout/shape_util.h @@ -27,7 +27,6 @@ namespace mindspore { namespace parallel { - /* * compute the accumulating product of all the values in shape from left to right, * the accumulating results are saved in shape_accum from left to right @@ -167,7 +166,6 @@ Status ExpandAccumulateProduct(const std::vector& in_accum_reverse, * out = [2, 4, 2, 4, 8] */ Status ExpandShape(const std::vector& in, const std::vector& expand, std::vector* out); - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_info.h b/mindspore/ccsrc/parallel/tensor_layout/tensor_info.h index 9fc6a229e21..4a64ab472c1 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_info.h +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_info.h @@ -28,7 +28,6 @@ namespace mindspore { namespace parallel { - using Shapes = std::vector; class TensorInfo { @@ -55,7 +54,6 @@ class TensorInfo { // reduce method's reduce dim std::vector reduce_dim_; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc b/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc index f49b967abcc..5fbd04431cb 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc @@ -27,7 +27,6 @@ namespace mindspore { namespace parallel { - std::string TensorLayout::ToString() const { return StandardToString() + OriginToString(); } std::string TensorLayout::StandardToString() const { @@ -337,7 +336,7 @@ Status TensorLayout::UpdateTensorMap(uint32_t index, int32_t value) { MS_LOG(ERROR) << "Index is out of the size of the tensor map!"; return Status::FAILED; } - Shape shape = tensor_map_.array(); + auto shape = tensor_map_.array(); shape[index] = value; if (tensor_map_.Init(shape) == Status::FAILED) { MS_LOG(ERROR) << "Update tensor map failed!"; diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h b/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h index 238c9373d93..e6ddc2a708e 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h @@ -30,7 +30,6 @@ namespace mindspore { namespace parallel { - class TensorLayout { public: TensorLayout() = default; @@ -94,7 +93,6 @@ class TensorLayout { Map tensor_map_; Arrangement tensor_shape_; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc index be5eaa40bac..d8eef7e7a53 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc @@ -24,7 +24,6 @@ namespace mindspore { namespace parallel { - Status TensorRedistribution::Init(const TensorLayout& from, const TensorLayout& to, const RankList& dev_list) { from_origin_ = from; to_origin_ = to; diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h index 7e2b3682e60..ebaccadf532 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h @@ -33,7 +33,6 @@ namespace mindspore { namespace parallel { - class TensorRedistribution { public: explicit TensorRedistribution(bool construct_op_flag = true, bool keep_reshape = false) @@ -83,7 +82,6 @@ class TensorRedistribution { bool construct_op_flag_; bool keep_reshape_; }; - } // namespace parallel } // namespace mindspore