forked from mindspore-Ecosystem/mindspore
support split ValueList
This commit is contained in:
parent
12251ac612
commit
3102c4ff8d
|
@ -117,6 +117,17 @@ bool StepAutoParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &) {
|
|||
std::vector<bool> ExtractInputParameterByNode(const CNodePtr &node) {
|
||||
std::vector<bool> is_parameter;
|
||||
std::vector<AnfNodePtr> node_inputs{node->inputs()};
|
||||
// input is a ValueList or ValueTuple, then all inputs are not parameter.
|
||||
if ((node_inputs.size() == 2) &&
|
||||
(IsValueNode<ValueList>(node_inputs[1]) || IsValueNode<ValueTuple>(node_inputs[1]))) {
|
||||
std::vector<ValuePtr> inputs_seq;
|
||||
if (IsValueNode<ValueList>(node_inputs[1])) {
|
||||
inputs_seq = node_inputs[1]->cast<ValueNodePtr>()->value()->cast<ValueListPtr>()->value();
|
||||
} else {
|
||||
inputs_seq = node_inputs[1]->cast<ValueNodePtr>()->value()->cast<ValueTuplePtr>()->value();
|
||||
}
|
||||
return std::vector<bool>(inputs_seq.size(), false);
|
||||
}
|
||||
if ((node_inputs.size() == 2) &&
|
||||
(AnfNodeIsPrimitive(node_inputs[1], MAKE_TUPLE) || AnfNodeIsPrimitive(node_inputs[1], MAKE_LIST))) {
|
||||
node_inputs = node_inputs[1]->cast<CNodePtr>()->inputs();
|
||||
|
@ -195,6 +206,22 @@ std::vector<size_t> ExtractInputTypeLengthByNode(const CNodePtr &node) {
|
|||
std::vector<size_t> inputs_type_len;
|
||||
std::vector<AnfNodePtr> node_inputs{node->inputs()};
|
||||
|
||||
if ((node_inputs.size() == 2) &&
|
||||
(IsValueNode<ValueList>(node_inputs[1]) || IsValueNode<ValueTuple>(node_inputs[1]))) {
|
||||
std::vector<ValuePtr> inputs_seq;
|
||||
if (IsValueNode<ValueList>(node_inputs[1])) {
|
||||
inputs_seq = node_inputs[1]->cast<ValueNodePtr>()->value()->cast<ValueListPtr>()->value();
|
||||
} else {
|
||||
inputs_seq = node_inputs[1]->cast<ValueNodePtr>()->value()->cast<ValueTuplePtr>()->value();
|
||||
}
|
||||
for (auto &ele : inputs_seq) {
|
||||
auto tensor = ele->cast<tensor::TensorPtr>();
|
||||
MS_EXCEPTION_IF_NULL(tensor);
|
||||
inputs_type_len.push_back(GetLengthOfDataType(tensor->Dtype()));
|
||||
}
|
||||
return inputs_type_len;
|
||||
}
|
||||
|
||||
if ((node_inputs.size() == 2) &&
|
||||
(AnfNodeIsPrimitive(node_inputs[1], MAKE_TUPLE) || AnfNodeIsPrimitive(node_inputs[1], MAKE_LIST))) {
|
||||
node_inputs = node_inputs[1]->cast<CNodePtr>()->inputs();
|
||||
|
|
|
@ -533,6 +533,58 @@ void SplitTensor(const AnfNodePtr &node, const CNodePtr &next_node, int index) {
|
|||
}
|
||||
}
|
||||
|
||||
void SplitTensorList(const AnfNodePtr &node, const CNodePtr &next_node, int index) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
MS_EXCEPTION_IF_NULL(next_node);
|
||||
if (next_node->inputs().size() != 2 || index != 1) {
|
||||
MS_LOG(INFO) << next_node->fullname_with_scope() << " Inputs must have only one input, get "
|
||||
<< next_node->inputs().size() - 1 << " index should be 1, get " << index;
|
||||
return;
|
||||
}
|
||||
OperatorInfoPtr op_info = next_node->user_data<OperatorInfo>();
|
||||
MS_EXCEPTION_IF_NULL(op_info);
|
||||
|
||||
std::vector<ValuePtr> inputs_values;
|
||||
if (IsValueNode<ValueList>(node)) {
|
||||
inputs_values = node->cast<ValueNodePtr>()->value()->cast<ValueListPtr>()->value();
|
||||
} else {
|
||||
inputs_values = node->cast<ValueNodePtr>()->value()->cast<ValueTuplePtr>()->value();
|
||||
}
|
||||
if (inputs_values.size() != op_info->inputs_tensor_info().size()) {
|
||||
MS_LOG(EXCEPTION) << "The inputs size " << inputs_values.size() << ", is not equal to inputs shape size "
|
||||
<< op_info->inputs_tensor_info().size();
|
||||
}
|
||||
std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple)};
|
||||
FuncGraphPtr func_graph = next_node->func_graph();
|
||||
MS_EXCEPTION_IF_NULL(func_graph);
|
||||
FuncGraphManagerPtr manager = func_graph->manager();
|
||||
MS_EXCEPTION_IF_NULL(manager);
|
||||
ScopePtr scope = next_node->scope();
|
||||
MS_EXCEPTION_IF_NULL(scope);
|
||||
for (size_t i = 0; i < inputs_values.size(); ++i) {
|
||||
auto value_ptr = inputs_values[i];
|
||||
auto tensor = value_ptr->cast<tensor::TensorPtr>();
|
||||
MS_EXCEPTION_IF_NULL(tensor);
|
||||
TensorInfo tensor_info = op_info->inputs_tensor_info()[i];
|
||||
TensorLayout tensor_layout = tensor_info.tensor_layout();
|
||||
auto value_node = NewValueNode(value_ptr)->cast<AnfNodePtr>();
|
||||
Operator op = CreateGetTensorSliceOp(tensor_layout);
|
||||
std::vector<AnfNodePtr> node_input = CreateInput(op, value_node, SPLIT_TENSOR);
|
||||
CNodePtr new_node = func_graph->NewCNode(node_input);
|
||||
new_node->set_in_forward_flag(true);
|
||||
auto new_node_value = node_input[0]->cast<ValueNodePtr>();
|
||||
MS_EXCEPTION_IF_NULL(new_node_value);
|
||||
PrimitivePtr new_node_prim = new_node_value->value()->cast<PrimitivePtr>();
|
||||
new_node_prim->set_instance_name(SPLIT_TENSOR);
|
||||
new_node_prim->set_attr("keep_value_node_input", MakeValue(true));
|
||||
new_node->set_scope(scope);
|
||||
node_input[0]->set_scope(scope);
|
||||
make_tuple_inputs.push_back(new_node);
|
||||
}
|
||||
CNodePtr make_tuple = func_graph->NewCNode(make_tuple_inputs);
|
||||
manager->Replace(node, make_tuple);
|
||||
}
|
||||
|
||||
void StepSplitTensor(const AnfNodePtr &node, const FuncGraphManagerPtr &manager) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
MS_EXCEPTION_IF_NULL(manager);
|
||||
|
@ -550,7 +602,11 @@ void StepSplitTensor(const AnfNodePtr &node, const FuncGraphManagerPtr &manager)
|
|||
continue;
|
||||
}
|
||||
if (IsParallelCareNode(use_cnode)) {
|
||||
SplitTensor(node, use_cnode, node_pair.second);
|
||||
if (IsValueNode<ValueList>(node) || IsValueNode<ValueTuple>(node)) {
|
||||
SplitTensorList(node, use_cnode, node_pair.second);
|
||||
} else {
|
||||
SplitTensor(node, use_cnode, node_pair.second);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -852,6 +908,11 @@ void InsertMirrorOps(const MirrorOps &mirror_ops, const CNodePtr &node) {
|
|||
FuncGraphManagerPtr manager = func_graph->manager();
|
||||
MS_EXCEPTION_IF_NULL(manager);
|
||||
|
||||
if ((node->inputs().size() == 2) && (IsValueNode<ValueSequeue>(node->input(1)))) {
|
||||
MS_LOG(INFO) << "Input is ValueList, skip it.";
|
||||
return;
|
||||
}
|
||||
|
||||
if ((node->inputs().size() == 2) &&
|
||||
(AnfNodeIsPrimitive(node->input(1), MAKE_TUPLE) || AnfNodeIsPrimitive(node->input(1), MAKE_LIST))) {
|
||||
MS_LOG(INFO) << "The mirror for " << GetPrimName(node) << " has handle by make_tuple node";
|
||||
|
@ -1049,9 +1110,34 @@ StrategyPtr ExtractStrategy(std::unordered_map<std::string, ValuePtr> attrs) {
|
|||
return strategyPtr;
|
||||
}
|
||||
|
||||
Shapes GetValueListShape(const AnfNodePtr &node) {
|
||||
Shapes shapes;
|
||||
std::vector<ValuePtr> inputs_seq;
|
||||
if (IsValueNode<ValueList>(node)) {
|
||||
inputs_seq = node->cast<ValueNodePtr>()->value()->cast<ValueListPtr>()->value();
|
||||
} else if (IsValueNode<ValueTuple>(node)) {
|
||||
inputs_seq = node->cast<ValueNodePtr>()->value()->cast<ValueTuplePtr>()->value();
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "node is eigther ValueList or ValueTuple";
|
||||
}
|
||||
for (auto &ele : inputs_seq) {
|
||||
auto tensor = ele->cast<tensor::TensorPtr>();
|
||||
MS_EXCEPTION_IF_NULL(tensor);
|
||||
auto one_shape = tensor->shape();
|
||||
Shape shape_64;
|
||||
(void)std::transform(one_shape.begin(), one_shape.end(), std::back_inserter(shape_64),
|
||||
[](const int &value) { return static_cast<int64_t>(value); });
|
||||
shapes.push_back(shape_64);
|
||||
}
|
||||
return shapes;
|
||||
}
|
||||
|
||||
Shapes GetNodeShape(const AnfNodePtr &node) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
Shapes shapes;
|
||||
if (IsValueNode<ValueList>(node) || IsValueNode<ValueTuple>(node)) {
|
||||
return GetValueListShape(node);
|
||||
}
|
||||
BaseShapePtr base_shape_ptr = node->Shape();
|
||||
if (node->isa<CNode>()) {
|
||||
auto cnode = node->cast<CNodePtr>();
|
||||
|
@ -1177,7 +1263,8 @@ std::vector<Shapes> ExtractShape(const CNodePtr &node) {
|
|||
std::pair<AnfNodePtr, int> node_pair = std::make_pair(node, SizeToInt(i));
|
||||
g_RefMap[parameters[0]] = node_pair;
|
||||
input_shapes = GetRefKeyNodeShape(input, func_graph);
|
||||
} else if (IsValueNode<Tensor>(input) || input->isa<CNode>() || input->isa<Parameter>()) {
|
||||
} else if (IsValueNode<Tensor>(input) || input->isa<CNode>() || input->isa<Parameter>() ||
|
||||
((IsValueNode<ValueList>(input) || IsValueNode<ValueTuple>(input)) && (inputs_size == 2))) {
|
||||
input_shapes = GetNodeShape(input);
|
||||
} else {
|
||||
continue;
|
||||
|
@ -2258,7 +2345,7 @@ void ParallelCommunication(const FuncGraphPtr &root, const std::vector<AnfNodePt
|
|||
}
|
||||
|
||||
HandleSpecialNode(distribute_operator, cnode);
|
||||
} else if (IsValueNode<Tensor>(node)) {
|
||||
} else if (IsValueNode<Tensor>(node) || IsValueNode<ValueList>(node) || IsValueNode<ValueTuple>(node)) {
|
||||
StepSplitTensor(node, manager);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import mindspore.nn as nn
|
|||
from mindspore.common.api import _executor
|
||||
from mindspore.nn import TrainOneStepCell, Momentum
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.nn import Dense, Flatten
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
@ -71,12 +72,67 @@ class Net2(nn.Cell):
|
|||
return out
|
||||
|
||||
|
||||
class PackConstantNet1(nn.Cell):
|
||||
def __init__(self, dense_in_channel, dense_out_channel, axis=0, shape=None, strategy=None):
|
||||
super().__init__()
|
||||
weight_np = np.full((dense_out_channel, dense_in_channel), 0.01, dtype=np.float32)
|
||||
bias_np = np.full((dense_out_channel), 0.01, dtype=np.float32)
|
||||
self.pack_con = Tensor(np.full(shape, 0.01, dtype=np.float32))
|
||||
self.flat = Flatten()
|
||||
self.dense = Dense(in_channels=dense_in_channel,
|
||||
out_channels=dense_out_channel,
|
||||
weight_init=Tensor(weight_np),
|
||||
bias_init=Tensor(bias_np),
|
||||
has_bias=True)
|
||||
self.mul = P.Mul()
|
||||
self.pack = P.Pack(axis)
|
||||
if strategy is not None:
|
||||
self.pack.shard(strategy)
|
||||
|
||||
def construct(self, inputs):
|
||||
x = self.pack([self.pack_con, self.pack_con, self.pack_con, self.pack_con,
|
||||
self.pack_con, self.pack_con, self.pack_con, self.pack_con])
|
||||
x1 = self.flat(x)
|
||||
x2 = self.flat(inputs)
|
||||
x = self.mul(x1, x2)
|
||||
x = self.dense(x)
|
||||
return x
|
||||
|
||||
|
||||
class PackConstantNet2(nn.Cell):
|
||||
def __init__(self, dense_in_channel, dense_out_channel, axis=0, shape=None, strategy=None):
|
||||
super().__init__()
|
||||
weight_np = np.full((dense_out_channel, dense_in_channel), 0.01, dtype=np.float32)
|
||||
bias_np = np.full((dense_out_channel), 0.01, dtype=np.float32)
|
||||
self.pack_con = Tensor(np.full(shape, 0.01, dtype=np.float32))
|
||||
self.flat = Flatten()
|
||||
self.dense = Dense(in_channels=dense_in_channel,
|
||||
out_channels=dense_out_channel,
|
||||
weight_init=Tensor(weight_np),
|
||||
bias_init=Tensor(bias_np),
|
||||
has_bias=True)
|
||||
self.mul = P.Mul()
|
||||
self.pack = P.Pack(axis)
|
||||
if strategy is not None:
|
||||
self.pack.shard(strategy)
|
||||
|
||||
def construct(self, inputs):
|
||||
x = self.pack((self.pack_con, self.pack_con, self.pack_con, self.pack_con,
|
||||
self.pack_con, self.pack_con, self.pack_con, self.pack_con))
|
||||
x1 = self.flat(x)
|
||||
x2 = self.flat(inputs)
|
||||
x = self.mul(x1, x2)
|
||||
x = self.dense(x)
|
||||
return x
|
||||
|
||||
|
||||
_w1 = Tensor(np.ones([48, 64]), dtype=ms.float32)
|
||||
_w2 = Tensor(np.ones([48, 64]), dtype=ms.float32)
|
||||
_w3 = Tensor(np.ones([48, 64]), dtype=ms.float32)
|
||||
_x = Tensor(np.ones([2, 48, 64]), dtype=ms.float32)
|
||||
_x1 = Tensor(np.ones([48, 64]), dtype=ms.float32)
|
||||
_x2 = Tensor(np.ones([3, 48, 64]), dtype=ms.float32)
|
||||
_x_c = Tensor(np.ones([8, 8, 8]), dtype=ms.float32)
|
||||
|
||||
|
||||
def compile_net(net):
|
||||
|
@ -106,6 +162,15 @@ def compile_net2(net):
|
|||
context.reset_auto_parallel_context()
|
||||
|
||||
|
||||
def compile_net_con(net):
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
|
||||
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
||||
train_net = TrainOneStepCell(net, optimizer)
|
||||
train_net.set_auto_parallel()
|
||||
_executor.compile(train_net, _x_c)
|
||||
context.reset_auto_parallel_context()
|
||||
|
||||
|
||||
def test_pack_parameter():
|
||||
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
|
||||
strategy1 = ((4, 2), (4, 2))
|
||||
|
@ -186,3 +251,24 @@ def test_pack_auto_parallel_3_tensor():
|
|||
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0)
|
||||
net = Net2(_w1, _w2, _w3)
|
||||
compile_net2(net)
|
||||
|
||||
|
||||
def test_pack_constant1():
|
||||
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
|
||||
net = PackConstantNet1(dense_in_channel=64, dense_out_channel=4, axis=0, shape=(8, 8),
|
||||
strategy=((4, 1), (4, 1), (4, 1), (4, 1), (4, 1), (4, 1), (4, 1), (4, 1)))
|
||||
compile_net_con(net)
|
||||
|
||||
|
||||
def test_pack_constant2():
|
||||
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
|
||||
net = PackConstantNet2(dense_in_channel=64, dense_out_channel=4, axis=0, shape=(8, 8),
|
||||
strategy=((4, 1), (4, 1), (4, 1), (4, 1), (4, 1), (4, 1), (4, 1), (4, 1)))
|
||||
compile_net_con(net)
|
||||
|
||||
|
||||
def test_pack_auto_constant():
|
||||
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0)
|
||||
net = PackConstantNet1(dense_in_channel=64, dense_out_channel=4, axis=0, shape=(8, 8),
|
||||
strategy=((8, 1), (8, 1), (8, 1), (8, 1), (8, 1), (8, 1), (8, 1), (8, 1)))
|
||||
compile_net_con(net)
|
||||
|
|
Loading…
Reference in New Issue