diff --git a/mindspore/ccsrc/frontend/parallel/auto_parallel/operator_costmodel.h b/mindspore/ccsrc/frontend/parallel/auto_parallel/operator_costmodel.h index 1759fabb03a..fbda9c15466 100644 --- a/mindspore/ccsrc/frontend/parallel/auto_parallel/operator_costmodel.h +++ b/mindspore/ccsrc/frontend/parallel/auto_parallel/operator_costmodel.h @@ -201,6 +201,8 @@ using TileCost = SoftmaxCost; using TileCostPtr = std::shared_ptr; using ConcatCost = TileCost; using ConcatCostPtr = std::shared_ptr; +using SplitCost = TileCost; +using SplitCostPtr = std::shared_ptr; class TmpIdentityCost : public OperatorCost { public: diff --git a/mindspore/ccsrc/frontend/parallel/dynamic_creator.h b/mindspore/ccsrc/frontend/parallel/dynamic_creator.h index 8e006863d43..9d8d114bb87 100644 --- a/mindspore/ccsrc/frontend/parallel/dynamic_creator.h +++ b/mindspore/ccsrc/frontend/parallel/dynamic_creator.h @@ -179,6 +179,7 @@ REGISTER(TileInfo); REGISTER(StridedSliceInfo); REGISTER(DropoutInfo); REGISTER(ConcatInfo); +REGISTER(SplitInfo); } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/ops_info_head_files.h b/mindspore/ccsrc/frontend/parallel/ops_info/ops_info_head_files.h index a48e04b6dd9..18e3dec6ed9 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/ops_info_head_files.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/ops_info_head_files.h @@ -40,5 +40,6 @@ #include "frontend/parallel/ops_info/tile_info.h" #include "frontend/parallel/ops_info/strided_slice_info.h" #include "frontend/parallel/ops_info/concat_info.h" +#include "frontend/parallel/ops_info/split_info.h" #endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_HEAD_FILES_H_ diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/split_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/split_info.cc new file mode 100644 index 00000000000..3a9ff4b909d --- /dev/null +++ b/mindspore/ccsrc/frontend/parallel/ops_info/split_info.cc @@ -0,0 +1,294 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frontend/parallel/ops_info/split_info.h" + +#include +#include +#include + +#include "frontend/parallel/device_matrix.h" +#include "frontend/parallel/strategy.h" +#include "frontend/parallel/tensor_layout/tensor_redistribution.h" +#include "frontend/parallel/context.h" +#include "pipeline/jit/resource.h" + +namespace mindspore { +namespace parallel { +Status SplitInfo::GetAttrs() { + int axis = 0; + int output_num = 0; + + auto axis_iter = attrs_.find(AXIS); + if (axis_iter != attrs_.end()) { + MS_EXCEPTION_IF_NULL(axis_iter->second); + if (axis_iter->second->isa()) { + axis = axis_iter->second->cast()->value(); + } else { + MS_LOG(ERROR) << name_ << ": The value of axis is not int"; + return FAILED; + } + } else { + MS_LOG(ERROR) << name_ << ": Can not find the axis attr"; + return FAILED; + } + + if (inputs_shape_.empty()) { + MS_LOG(ERROR) << name_ << ": The inputs shape is empty"; + return FAILED; + } + int dim = SizeToInt(inputs_shape_[0].size()); + if (axis < 0) { + axis = axis + dim; + } + axis_ = SizeToInt(axis); + + auto output_num_iter = attrs_.find(OUTPUT_NUM); + if (output_num_iter != attrs_.end()) { + MS_EXCEPTION_IF_NULL(output_num_iter->second); + if (output_num_iter->second->isa()) { + output_num = output_num_iter->second->cast()->value(); + } else { + MS_LOG(ERROR) << name_ << ": The value of output_num is not int"; + return FAILED; + } + } else { + MS_LOG(ERROR) << name_ << ": Can not find the output_num attr"; + return FAILED; + } + output_num_ = output_num; + + return SUCCESS; +} + +Status SplitInfo::CheckStrategy(const StrategyPtr &strategy) { + MS_EXCEPTION_IF_NULL(strategy); + if (CheckStrategyValue(strategy, inputs_shape_) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Invalid strategy"; + return FAILED; + } + + std::vector stra = strategy->GetInputDim(); + if (stra.empty()) { + MS_LOG(ERROR) << name_ << ": The strategy is empty"; + return FAILED; + } + + if (axis_ >= stra[0].size()) { + MS_LOG(ERROR) << name_ << ": The axis is out of range, the axis is " << axis_; + return FAILED; + } + + if (stra[0][axis_] != 1) { + MS_LOG(ERROR) << name_ << ": The axis can not be split"; + return FAILED; + } + + return SUCCESS; +} + +Status SplitInfo::InferDevMatrixShape() { + MS_EXCEPTION_IF_NULL(strategy_); + std::vector stra = strategy_->GetInputDim(); + if (stra.empty()) { + MS_LOG(ERROR) << name_ << "The strategy is empty"; + return FAILED; + } + + dev_matrix_shape_ = stra[0]; + return SUCCESS; +} + +Status SplitInfo::InferTensorMap() { + TensorMap tensor_map; + if (inputs_shape_.empty()) { + MS_LOG(ERROR) << name_ << "The inputs shape is empty"; + return FAILED; + } + + int32_t size = SizeToInt(inputs_shape_[0].size()); + for (int i = 0; i < size; ++i) { + tensor_map.push_back(size - i - 1); + } + + inputs_tensor_map_.push_back(tensor_map); + + for (size_t i = 0; i < outputs_shape_.size(); ++i) { + outputs_tensor_map_.push_back(tensor_map); + } + + return SUCCESS; +} + +Status SplitInfo::InferMirrorOps() { + mirror_ops_.clear(); + if (inputs_tensor_map_.empty()) { + MS_LOG(ERROR) << name_ << ": The inputs tensor map is empty"; + return FAILED; + } + + Shape input_tensor_map = inputs_tensor_map_[0]; + std::vector group; + if (CreateGroupByTensorMap(input_tensor_map, &group) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Create group for input failed."; + return FAILED; + } + + OperatorVector mirror_op; + if (group.empty()) { + MS_LOG(INFO) << name_ << ": The mirror group is empty."; + return SUCCESS; + } else { + mirror_op = CreateMirrorOps(group[0].name(), group[0].GetDevNum()); + mirror_ops_.push_back(mirror_op); + std::string group_name = group[0].name(); + MS_LOG(INFO) << name_ << " : Create the mirror ops success, the group name is " << group_name; + } + + return SUCCESS; +} + +Status SplitInfo::InferTensorInfo() { + if (inputs_shape_.empty() || outputs_shape_.empty() || inputs_tensor_map_.empty() || outputs_tensor_map_.empty()) { + MS_LOG(ERROR) << name_ << ": Invalid args"; + return FAILED; + } + + TensorLayout input_layout, output_layout; + // infer tensor layout + if (input_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_[0], inputs_shape_[0]) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Infer input tensor layout failed."; + return FAILED; + } + TensorInfo input_tensor_info(input_layout); + inputs_tensor_info_.push_back(input_tensor_info); + + if (output_layout.InitFromVector(dev_matrix_shape_, outputs_tensor_map_[0], outputs_shape_[0]) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Infer output tensor layout failed."; + return FAILED; + } + for (size_t i = 0; i < outputs_shape_.size(); ++i) { + TensorInfo output_tensor_info(output_layout); + outputs_tensor_info_.push_back(output_tensor_info); + } + + return SUCCESS; +} + +Status SplitInfo::SetCostUnderStrategy(const StrategyPtr &strategy) { return SetCostUnderStrategyBase(strategy); } + +Status SplitInfo::GenerateStrategies(int32_t stage_id) { + if (InferAttrs() != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Infer attrs failed"; + return FAILED; + } + if (inputs_shape_.empty()) { + MS_LOG(ERROR) << name_ << ": The inputs shape is empty"; + return FAILED; + } + Shape input_split; + for (size_t i = 0; i < inputs_shape_[0].size(); ++i) { + if (i == axis_) { + input_split.push_back(0); + } else { + input_split.push_back(1); + } + } + + Shapes splittable_input = {input_split}; + Shapes tmp_inputs_shape = {inputs_shape_[0]}; + + std::vector sp_vector; + if (GenerateStrategiesForIndependentInputs(stage_id, tmp_inputs_shape, splittable_input, &sp_vector) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Generate strategies failed"; + return FAILED; + } + + size_t success = 0; + for (auto &sp : sp_vector) { + PrintStrategy(sp); + if (SetCostUnderStrategy(sp) == SUCCESS) { + success++; + MS_LOG(INFO) << name_ << ": Successfully generated " << success << " strategy."; + PrintStrategy(sp); + } + } + return SUCCESS; +} + +std::shared_ptr SplitInfo::GenerateBatchStrategies() { + if (GetAttrs() != SUCCESS) { + MS_LOG(EXCEPTION) << name_ << ": Get attr failed"; + } + CheckGlobalDeviceManager(); + size_t dev_num = g_device_manager->GetDeviceListByStageId(0).size(); + Dimensions input_strategy(inputs_shape_[0].size(), 1); + // axis can't split + if (inputs_shape_[0].size() > 1) { + if (axis_ == 0) { + input_strategy[1] = dev_num; + } else { + input_strategy[0] = dev_num; + } + } + Strategys strategy_v = {input_strategy}; + return std::make_shared(strategy_v); +} + +Status SplitInfo::InferAsLossDivisor() { + if (!ParallelContext::GetInstance()->loss_repeated_mean()) { + as_loss_divisor_ = 1; + return SUCCESS; + } + + if (outputs_tensor_map_.empty()) { + MS_LOG(ERROR) << name_ << ": The outputs tensor map is empty."; + return FAILED; + } + + if (outputs_tensor_map_[0].empty()) { + as_loss_divisor_ = SizeToInt(global_device_list_.size()); + MS_LOG(INFO) << name_ << ": The output is a scalar, use the dev size " << as_loss_divisor_ << ", loss divisor."; + return SUCCESS; + } + + as_loss_divisor_ = ComputeRepeatDeviceNumByTensorMap(dev_matrix_shape_, outputs_tensor_map_[0]); + MS_LOG(INFO) << name_ << ": the dev matrix shape is " << ShapeToString(dev_matrix_shape_) + << ", the output tensor map is " << ShapeToString(outputs_tensor_map_[0]) << ", loss divisor is " + << as_loss_divisor_; + return SUCCESS; +} + +Status SplitInfo::Init(const StrategyPtr &strategy) { + if (InitWithAutoRepeatCalc(strategy) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Init failed."; + return FAILED; + } + MS_LOG(INFO) << name_ << ": Init success."; + return SUCCESS; +} + +Status SplitInfo::InitForCostModel(const StrategyPtr &strategy) { + if (InitForCostModelWithAutoRepeatCalc(strategy) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Init for cost model failed."; + return FAILED; + } + + MS_LOG(INFO) << name_ << ": Init for cost model success."; + return SUCCESS; +} +} // namespace parallel +} // namespace mindspore diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/split_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/split_info.h new file mode 100644 index 00000000000..4e747bad4ff --- /dev/null +++ b/mindspore/ccsrc/frontend/parallel/ops_info/split_info.h @@ -0,0 +1,60 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_SPLIT_INFO_H_ +#define MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_SPLIT_INFO_H_ + +#include +#include + +#include "ir/value.h" +#include "frontend/parallel/auto_parallel/operator_costmodel.h" +#include "frontend/parallel/ops_info/operator_info.h" +#include "frontend/parallel/strategy.h" + +namespace mindspore { +namespace parallel { +class SplitInfo : public OperatorInfo { + public: + SplitInfo(const std::string &operator_name, const Shapes &inputs_shape, const Shapes &outputs_shape, + const PrimitiveAttrs &attrs) + : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} + ~SplitInfo() override = default; + + Status Init(const StrategyPtr &strategy) override; + Status InitForCostModel(const StrategyPtr &strategy) override; + Status GenerateStrategies(int32_t) override; + std::shared_ptr GenerateBatchStrategies() override; + Status SetCostUnderStrategy(const StrategyPtr &) override; + + protected: + Status GetAttrs() override; + Status CheckStrategy(const StrategyPtr &strategy) override; + Status InferMirrorOps() override; + Status InferForwardCommunication() override { return SUCCESS; } + Status InferTensorInfo() override; + Status InferDevMatrixShape() override; + Status InferTensorMap() override; + Status InferAsLossDivisor() override; + + private: + size_t axis_ = 0; + size_t output_num_ = 0; +}; +} // namespace parallel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_SPLIT_INFO_H_ diff --git a/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc b/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc index 295bc27333c..d6f48071d8a 100644 --- a/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc @@ -263,7 +263,7 @@ bool IsSplittableOperator(const std::string &op_name) { LOG, REDUCE_MEAN, REAL_DIV, SIGMOID, POW, MAXIMUM, MINIMUM, EQUAL, NOT_EQUAL, LOGICALNOT, GATHERV2, SQRT, CONCAT, STRIDEDSLICE, GET_NEXT, CAST, NEG, SQUARE, BATCH_MATMUL, EXPAND_DIMS, SQUEEZE, SPARSE_GATHERV2, TILE, DROPOUT, SOFTMAX_CROSS_ENTROPY_WITH_LOGITS, SIGMOID_CROSS_ENTROPY_WITH_LOGITS, SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS, - EMBEDDING_LOOKUP, FUSE_BATCH_NORM_EX}; + EMBEDDING_LOOKUP, FUSE_BATCH_NORM_EX, SPLIT}; // clang-format on auto iter = splittable_op.find(op_name); diff --git a/tests/ut/python/parallel/test_split.py b/tests/ut/python/parallel/test_split.py new file mode 100644 index 00000000000..0ebf22fdff4 --- /dev/null +++ b/tests/ut/python/parallel/test_split.py @@ -0,0 +1,147 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import numpy as np +import mindspore as ms +import mindspore.context as context +from mindspore import Tensor, Parameter +import mindspore.nn as nn +from mindspore.common.api import _executor +from mindspore.nn import TrainOneStepCell, Momentum +from mindspore.ops import operations as P + + +class Net(nn.Cell): + def __init__(self, mul_weight, axis=0, out_nums=1, strategy1=None, strategy2=None, strategy3=None): + super(Net, self).__init__() + self.split = P.Split(axis, out_nums).shard(strategy1) + self.mul = P.Mul().shard(strategy2) + self.matmul = P.MatMul(transpose_b=True).shard(strategy2) + self.matmul2 = P.MatMul().shard(strategy3) + self.weight = Parameter(mul_weight, "w1") + + def construct(self, x): + out = self.mul(x, self.weight) + out1, out2, out3 = self.split(out) + out = self.matmul(out1, out2) + out = self.matmul2(out, out3) + return out + + +class Net1(nn.Cell): + def __init__(self, mul_weight, axis=0, out_nums=1, strategy1=None, strategy2=None): + super(Net1, self).__init__() + self.split = P.Split(axis, out_nums).shard(strategy1) + self.mul = P.Mul().shard(strategy2) + self.weight = Parameter(mul_weight, "w1") + + def construct(self, x): + out1, out2 = self.split(self.weight) + out = self.mul(x, out1) + out = self.mul(out, out2) + return out + + +class Net2(nn.Cell): + def __init__(self, mul_weight, axis=0, out_nums=1, strategy1=None, strategy2=None): + super(Net2, self).__init__() + self.split = P.Split(axis, out_nums).shard(strategy1) + self.mul = P.Mul().shard(strategy2) + self.weight = Parameter(mul_weight, "w1") + + def construct(self, x): + out = self.mul(x, self.weight) + out1, _ = self.split(out) + return out1 + + +_w = Tensor(np.ones([48, 64]), dtype=ms.float32) +_x = Tensor(np.ones([48, 64]), dtype=ms.float32) + +_w1 = Tensor(np.ones([96, 64, 32]), dtype=ms.float32) +_x1 = Tensor(np.ones([48, 64, 32]), dtype=ms.float32) + +_w2 = Tensor(np.ones([48, 64, 32]), dtype=ms.float32) + +def compile_net(net): + context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) + train_net = TrainOneStepCell(net, optimizer) + train_net.set_auto_parallel() + _executor.compile(train_net, _x) + context.reset_auto_parallel_context() + + +def compile_net1(net): + context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) + train_net = TrainOneStepCell(net, optimizer) + train_net.set_auto_parallel() + _executor.compile(train_net, _x1) + context.reset_auto_parallel_context() + + +def test_split_parameter(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) + strategy1 = ((1, 4, 2),) + strategy2 = ((1, 4, 2), (1, 4, 2)) + net = Net1(_w1, 0, 2, strategy1, strategy2) + compile_net1(net) + + +def test_split_parameter_no_full_split(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) + strategy1 = ((1, 2, 2),) + strategy2 = ((1, 4, 2), (1, 4, 2)) + net = Net1(_w1, 0, 2, strategy1, strategy2) + compile_net1(net) + + +def test_split_tensor(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) + strategy1 = ((1, 8),) + strategy2 = ((1, 8), (1, 8)) + strategy3 = ((1, 1), (1, 8)) + net = Net(_w, 0, 3, strategy1, strategy2, strategy3) + compile_net(net) + + +def test_split_output(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) + strategy1 = ((1, 4, 2),) + strategy2 = ((1, 4, 2), (1, 4, 2)) + net = Net2(_w2, 0, 2, strategy1, strategy2) + compile_net1(net) + + +def test_split_output_no_full_split(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) + strategy1 = ((1, 2, 2),) + strategy2 = ((1, 4, 2), (1, 4, 2)) + net = Net2(_w2, 0, 2, strategy1, strategy2) + compile_net1(net) + + +def test_split_no_strategy(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) + strategy1 = None + strategy2 = ((1, 4, 2), (1, 4, 2)) + net = Net2(_w2, 0, 2, strategy1, strategy2) + compile_net1(net) + + +def test_split_auto_parallel(): + context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0) + net = Net2(_w2, 0, 2) + compile_net1(net)