set last node data parallel or repeat calculate in eval/predict

This commit is contained in:
yao_yf 2020-10-24 16:21:27 +08:00
parent 4bbb854d3c
commit 65d8e63580
101 changed files with 337 additions and 25 deletions

View File

@ -1512,7 +1512,87 @@ Status ValidStageCheck(const std::vector<int32_t> &stages, int32_t strategy_stag
}
}
void ExtractInformation(const std::vector<AnfNodePtr> &all_nodes) {
// find previous parallel care node.
bool FindPreNodes(const AnfNodePtr &node, vector<std::string> *unique_ids) {
MS_EXCEPTION_IF_NULL(unique_ids);
// if previous node is a parameter, handle it in the outsize.
if (node->isa<Parameter>()) {
return false;
}
if (!node->isa<CNode>()) {
return false;
}
CNodePtr cnode = node->cast<CNodePtr>();
if (!IsValueNode<Primitive>(cnode->input(0))) {
return false;
}
ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
PrimitivePtr prim = prim_anf_node->value()->cast<PrimitivePtr>();
if (IsParallelCareNode(cnode) && prim->name() != MAKE_TUPLE && prim->name() != MAKE_LIST) {
unique_ids->push_back(cnode->UniqueId());
return true;
}
bool find = false;
for (size_t index = 0; index < cnode->inputs().size(); ++index) {
if (prim->name() == DEPEND && index != 1) {
continue;
}
if (FindPreNodes(cnode->inputs()[index], unique_ids)) {
find = true;
continue;
}
}
return find;
}
void FindLastNodesUniqueId(const std::vector<AnfNodePtr> &all_nodes, vector<std::string> *unique_ids) {
MS_EXCEPTION_IF_NULL(unique_ids);
for (auto &node : all_nodes) {
auto cnode = node->cast<CNodePtr>();
if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
continue;
}
ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
if (prim->name() == RETURN) {
if (!FindPreNodes(cnode, unique_ids)) {
MS_LOG(WARNING) << "cannot find the last parallel care node in eval graph";
}
}
}
}
StrategyPtr GenerateBatchParallelStrategy(const OperatorInfoPtr operator_, const PrimitivePtr prim) {
MS_EXCEPTION_IF_NULL(operator_);
MS_EXCEPTION_IF_NULL(prim);
StrategyPtr strategyPtr;
std::shared_ptr<Strategys> strategy_v_ptr = operator_->GenerateBatchStrategies();
MS_EXCEPTION_IF_NULL(strategy_v_ptr);
strategyPtr = NewStrategy(0, *strategy_v_ptr);
std::vector<ValuePtr> elements;
for (size_t i = 0; i < strategy_v_ptr->size(); i++) {
elements.push_back(MakeValue((*strategy_v_ptr)[i]));
}
ValueTuplePtr strategy = std::make_shared<ValueTuple>(elements);
// display the strategy generated by batch parallel
auto attrs = prim->attrs();
attrs[GEN_STRATEGY] = strategy;
(void)prim->SetAttrs(attrs);
MS_LOG(INFO) << "prim " << prim->name() << " batch parallel strategy is " << attrs[GEN_STRATEGY]->ToString();
return strategyPtr;
}
void SetLastNodeStrategy(const StrategyPtr strategyPtr) {
auto strategys = strategyPtr->GetInputDim();
for (size_t i = 0; i < strategys.size(); ++i) {
for (size_t j = 0; j < strategys[i].size(); ++j) {
strategys[i][j] = 1;
}
}
strategyPtr->ResetInputs(strategys);
}
void ExtractInformation(const std::vector<AnfNodePtr> &all_nodes, bool is_training) {
// load strategy map from checkpoint
StrategyMap stra_map;
if (StrategyCheckpoint::GetInstance().LoadCheckPointOn()) {
@ -1520,7 +1600,11 @@ void ExtractInformation(const std::vector<AnfNodePtr> &all_nodes) {
MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
}
}
vector<std::string> last_forward_node_ids;
if (!is_training) {
FindLastNodesUniqueId(all_nodes, &last_forward_node_ids);
MS_LOG(INFO) << "there are " << last_forward_node_ids.size() << " output nodes in eval/predict";
}
// Get global rank after the checkpoint?
int32_t global_rank = ParallelContext::GetInstance()->global_rank();
std::vector<int32_t> stages = ParallelContext::GetInstance()->stage();
@ -1572,30 +1656,22 @@ void ExtractInformation(const std::vector<AnfNodePtr> &all_nodes) {
}
bool load_strategy_from_ckpt =
StrategyCheckpoint::GetInstance().LoadCheckPointOn() && stra_map.find(strategy_key_name) != stra_map.end();
if (!StrategyFound(attrs) && !load_strategy_from_ckpt) {
bool is_last_nodes = std::find(last_forward_node_ids.begin(), last_forward_node_ids.end(), cnode->UniqueId()) !=
last_forward_node_ids.end();
bool full_batch = ParallelContext::GetInstance()->full_batch();
if ((is_last_nodes && !full_batch) || (!StrategyFound(attrs) && !load_strategy_from_ckpt)) {
MS_LOG(INFO) << "ExtractInformation: the strategy of node " << node->ToString() << " prim " << prim->name()
<< " is empty, using batch parallel";
std::shared_ptr<Strategys> strategy_v_ptr = operator_->GenerateBatchStrategies();
if (strategy_v_ptr == nullptr) {
MS_LOG(EXCEPTION) << "Failure:Generate batch parallel strategy failed";
}
std::vector<ValuePtr> elements;
for (size_t i = 0; i < strategy_v_ptr->size(); i++) {
elements.push_back(MakeValue((*strategy_v_ptr)[i]));
}
ValueTuplePtr strategy = std::make_shared<ValueTuple>(elements);
// display the strategy generated by batch parallel
attrs[GEN_STRATEGY] = strategy;
(void)prim->SetAttrs(attrs);
MS_LOG(INFO) << "node " << node->ToString() << " prim " << prim->name() << " batch parallel strategy is "
<< attrs[GEN_STRATEGY]->ToString();
strategyPtr = NewStrategy(0, *strategy_v_ptr);
strategyPtr = GenerateBatchParallelStrategy(operator_, prim);
} else if (load_strategy_from_ckpt) {
strategyPtr = stra_map[strategy_key_name];
} else {
strategyPtr = ExtractStrategy(attrs);
}
if (strategyPtr != nullptr) {
if (is_last_nodes && full_batch) {
SetLastNodeStrategy(strategyPtr);
}
(*operator_).set_stage_id(strategyPtr->GetInputStage());
MS_LOG(INFO) << "Extract stage id for op " << prim->name() << " is " << (*operator_).stage_id();
if (ValidStageCheck(stages, (*operator_).stage_id()) == FAILED) {
@ -2854,7 +2930,7 @@ bool StepParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &optimizer)
}
// extract shape and strategy, set operator_info
ExtractInformation(all_nodes);
ExtractInformation(all_nodes, root->has_flag(TRAINING));
ReshapeInit(all_nodes);
}

View File

@ -118,7 +118,7 @@ void CoverSliceShape(const FuncGraphPtr &root);
void SetVirtualDatasetStrategy(const CNodePtr &node);
// Creat parallel operator for primitive node(has strategy)
void ExtractInformation(const std::vector<AnfNodePtr> &all_nodes);
void ExtractInformation(const std::vector<AnfNodePtr> &all_nodes, bool is_training = true);
TensorLayout GetInputLayoutFromCNode(const std::pair<AnfNodePtr, int> &node_pair);

View File

@ -59,6 +59,7 @@ class Grad(nn.Cell):
def compile_net(net, x, y):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y)

View File

@ -48,6 +48,7 @@ class GradWrap(nn.Cell):
def compile_net(net, x, y, b):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, b)
@ -649,6 +650,7 @@ def test_assign_sub():
def compile_sub_net(net, x):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
context.set_auto_parallel_context(device_num=64, global_rank=15)
@ -696,6 +698,7 @@ def test_assign_add():
def compile_sub_net(net, x):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
context.set_auto_parallel_context(device_num=64, global_rank=15)
@ -743,6 +746,7 @@ def test_assign():
def compile_sub_net(net, x):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
context.set_auto_parallel_context(device_num=64, global_rank=15)

View File

@ -73,4 +73,5 @@ def test_auto_parallel_bn_with_prelu():
net = GradWrap(NetWithLoss(Net()))
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)

View File

@ -43,6 +43,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()

View File

@ -52,6 +52,7 @@ class GradWrap(nn.Cell):
def compile_net(net, x, y, b, phase):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, b, phase=phase)

View File

@ -61,6 +61,7 @@ def test_auto_parallel_assign_sub_with_ref_key():
net.set_auto_parallel()
reset_op_id()
net.set_train()
_executor.compile(net, x, phase="train")
strategies = _executor._get_shard_strategy(net)
for (k, v) in strategies.items():

View File

@ -81,6 +81,7 @@ def test_double_star_graph():
net.set_auto_parallel()
reset_op_id()
net.set_train()
_executor.compile(net, x, y, z, w, phase='train')
strategies = _executor._get_shard_strategy(net)
expected_strategies = {'Default/network-Net/Cast-op0': [[8, 1]],

View File

@ -72,4 +72,5 @@ def test_common_parameter():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, z)

View File

@ -79,6 +79,7 @@ def test_double_source_graph():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, z, w, a)
@ -114,4 +115,5 @@ def test_double_source_complex_graph():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, z, w, a)

View File

@ -83,4 +83,5 @@ def test_double_star_graph():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, z, w, a, b, c)

View File

@ -113,6 +113,7 @@ def test_double_subgraphs():
x = Tensor(np.ones([8, 8, 8, 8]), dtype=ms.float32)
reset_op_id()
net.set_train()
_executor.compile(net, x, phase='train')
strategies = _executor._get_shard_strategy(net)
for (k, v) in strategies.items():

View File

@ -70,4 +70,5 @@ def test_two_matmul():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -49,6 +49,7 @@ class GradWrap(nn.Cell):
def compile_net(net, x, y, z, w, b):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, z, w, b)
# model_parallel test

View File

@ -73,4 +73,5 @@ def test_auto_parallel_l2normalize():
x = Tensor(np.ones([128, 64, 64]), dtype=ms.float32)
y = Tensor(np.ones([128, 64, 64]), dtype=ms.float32)
b = Tensor(np.ones([128, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y, b, phase='train')

View File

@ -70,4 +70,5 @@ def test_two_matmul_dropout():
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -74,6 +74,7 @@ def test_matmul_prelu():
net.set_auto_parallel()
reset_op_id()
net.set_train()
_executor.compile(net, x, y, b, phase='train')
strategies = _executor._get_shard_strategy(net)
for (k, v) in strategies.items():

View File

@ -58,6 +58,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, inputs_, label_)
context.reset_auto_parallel_context()

View File

@ -99,6 +99,7 @@ def test_auto_parallel_arithmetic():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64]), dtype=ms.int32)
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -68,6 +68,7 @@ def test_common_parameter():
net.set_auto_parallel()
reset_op_id()
net.set_train()
_executor.compile(net, x, y, phase='train')
strategies = _executor._get_shard_strategy(net)
for (k, v) in strategies.items():

View File

@ -77,4 +77,5 @@ def test_four_matmul_linear():
net = GradWrap(NetWithLoss(Net(strategy1)))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, z, w, b)

View File

@ -49,6 +49,7 @@ class GradWrap(nn.Cell):
def compile_net(net, x, y, b):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -68,6 +68,7 @@ def test_reshape_matmul():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
def test_reshape_reshape():
@ -90,6 +91,7 @@ def test_reshape_reshape():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
@ -115,6 +117,7 @@ def test_reshape_auto_1():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
@ -143,6 +146,7 @@ def test_reshape_auto_2():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
@ -168,6 +172,7 @@ def test_reshape_auto_3():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
@ -194,6 +199,7 @@ def test_reshape_auto_4():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
@ -244,6 +250,7 @@ def test_reshape_auto_5():
net = GradWrap5(NetWithLoss5(Net()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y)
def test_reshape_auto_6():
@ -291,6 +298,7 @@ def test_reshape_auto_6():
net = GradWrap6(NetWithLoss6(Net()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y)
def test_reshape_auto_7():
@ -313,4 +321,5 @@ def test_reshape_auto_7():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)

View File

@ -49,6 +49,7 @@ class GradWrap(nn.Cell):
def compile_net(net, x, y, b):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -66,4 +66,5 @@ def test_softmax_cross_entropy_loss_auto_parallel():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 32]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -88,6 +88,7 @@ def test_star_strategy_consistency1():
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
reset_op_id()
net.set_train()
_executor.compile(net, x, phase='train')
@ -102,6 +103,7 @@ def test_star_strategy_consistency2():
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
reset_op_id()
net.set_train()
_executor.compile(net, x, phase='train')
@ -116,6 +118,7 @@ def test_star_strategy_consistency3():
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
reset_op_id()
net.set_train()
_executor.compile(net, x, phase='train')
@ -131,4 +134,5 @@ def test_star_strategy_consistency4():
net.set_auto_parallel()
reset_op_id()
with pytest.raises(RuntimeError):
net.set_train()
_executor.compile(net, x, phase='train')

View File

@ -112,4 +112,5 @@ def test_dmnet_train_step():
net = GradWrap(NetWithLoss(MultiTransformer()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, input_)

View File

@ -76,6 +76,7 @@ def test_two_matmul_transpose():
net.set_auto_parallel()
reset_op_id()
net.set_train()
_executor.compile(net, x, y, b, phase='train')
strategies = _executor._get_shard_strategy(net)
expected_strategies = {'Default/network-Net/Transpose-op3': [[1, 16]],

View File

@ -70,4 +70,5 @@ def test_triangle_strategy_consistency():
net.set_auto_parallel()
reset_op_id()
net.set_train()
_executor.compile(net, x, phase='train')

View File

@ -78,4 +78,5 @@ def test_virtual_dataset_3_input():
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64, 2048]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -134,6 +134,7 @@ def test_two_matmul():
net.set_auto_parallel()
reset_op_id()
net.set_train()
_executor.compile(net, x, y, b, phase='train')
strategies = _executor._get_shard_strategy(net)
expected_strategies = {'Default/network-Net/MatMul-op0': [[16, 1], [1, 1]],

View File

@ -71,4 +71,5 @@ def test_four_matmul_linear():
net = GradWrap(NetWithLoss(Net(strategy1)))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y)

View File

@ -77,4 +77,5 @@ def test_zig_zag_graph():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, z, w, a)

View File

@ -89,4 +89,5 @@ def test_marin_loss():
net = GradWrap(NetWithLoss(MarginCE()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y)

View File

@ -45,6 +45,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()

View File

@ -108,6 +108,7 @@ def test_batch():
x = Tensor(np.ones([128, 16, 34, 34]), dtype=ms.float32)
w1 = Tensor(np.ones([128, 8, 32, 32]), dtype=ms.float32)
w2 = Tensor(np.ones([128, 64, 24, 24]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, w1, w2)

View File

@ -70,4 +70,5 @@ def test_batch_parallel_dropout():
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -68,4 +68,5 @@ def test_matmul_add():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -73,4 +73,5 @@ def test_two_matmul_batchnorm_ex():
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -68,6 +68,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x1)
context.reset_auto_parallel_context()
@ -77,6 +78,7 @@ def compile_net2(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x1, _x2)
context.reset_auto_parallel_context()

View File

@ -49,6 +49,7 @@ class GradWrap(nn.Cell):
def compile_net(net, x, y, b):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -84,6 +84,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()

View File

@ -51,4 +51,5 @@ def test_dmnet_train_step():
label = Tensor(np.zeros([32, 768]).astype(np.float32))
net = DenseMutMulNet()
net = train_step_with_loss_warp(DenseMutMulNet())
net.set_train()
_executor.compile(net, input_, label)

View File

@ -37,6 +37,7 @@ class GradWrap(nn.Cell):
def compile_net(net, x, y):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y)

View File

@ -54,6 +54,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()

View File

@ -49,6 +49,7 @@ class GradWrap(nn.Cell):
def compile_net(net, x, y, b):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -66,6 +66,7 @@ def test_embeddinglookup_reducescatter_false():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([8, 32, 8]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -77,6 +78,7 @@ def test_embeddinglookup_reducescatter_true():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([8, 32, 8]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -88,6 +90,7 @@ def test_embeddinglookup_reducescatter_false_grad():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([8, 32, 8]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -100,6 +103,7 @@ def test_embeddinglookup_reducescatter_true_grad():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([8, 32, 8]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -114,6 +118,7 @@ def test_embeddinglookup_semi_auto1():
net.set_auto_parallel()
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -128,4 +133,5 @@ def test_embeddinglookup_semi_auto2():
net.set_auto_parallel()
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)

View File

@ -0,0 +1,69 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore as ms
from mindspore import context, Tensor, Parameter
from mindspore.common.api import _executor
from mindspore.nn import Cell
from mindspore.ops import operations as P
class Net(Cell):
def __init__(self, mul_weight, strategy1=None, strategy2=None):
super().__init__()
self.mul = P.Mul().shard(strategy1)
self.neg = P.Neg().shard(strategy2)
self.mul_weight = Parameter(mul_weight, "w1")
def construct(self, x, b):
out = self.mul(x, self.mul_weight)
out = self.neg(out)
return out
class EvalNet(Cell):
def __init__(self, network, strategy2=None):
super().__init__()
self.network = network
self.relu = P.ReLU().shard(strategy2)
def construct(self, x, b):
out = self.network(x, b)
out1 = self.relu(out)
return out, out1
_x = Tensor(np.ones([64, 64]), dtype=ms.float32)
_w1 = Tensor(np.ones([64, 64]), dtype=ms.float32)
_b = Tensor(np.ones([64, 64]), dtype=ms.float32)
def test_train_and_eval():
context.set_context(save_graphs=True, mode=0)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16)
strategy1 = ((4, 4), (4, 4))
strategy2 = ((4, 4),)
net = Net(_w1, strategy1, strategy2)
eval_net = EvalNet(net, strategy2=strategy2)
net.set_auto_parallel()
net.set_train()
_executor.compile(net, _x, _b, phase='train', auto_parallel_mode=True)
eval_net.set_train(mode=False)
eval_net.set_auto_parallel()
_executor.compile(eval_net, _x, _b, phase='eval', auto_parallel_mode=True)
context.reset_auto_parallel_context()

View File

@ -58,6 +58,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()

View File

@ -41,6 +41,7 @@ _b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32)
def compile_net(net):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, _x, _b)
context.reset_auto_parallel_context()

View File

@ -71,6 +71,7 @@ def test_gatherv2_semi_auto0():
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -83,6 +84,7 @@ def test_gatherv2_semi_auto1():
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -95,6 +97,7 @@ def test_gatherv2_semi_auto2():
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -107,6 +110,7 @@ def test_gatherv2_semi_auto3():
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -119,6 +123,7 @@ def test_gatherv2_semi_auto4():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -131,6 +136,7 @@ def test_gatherv2_semi_auto5():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -142,6 +148,7 @@ def test_gatherv2_semi_auto6():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -153,6 +160,7 @@ def test_gatherv2_semi_auto7():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -165,6 +173,7 @@ def test_gatherv2_semi_auto8():
x = Tensor(np.ones([64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -174,6 +183,7 @@ def test_gatherv2_auto0():
net.set_auto_parallel()
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -183,4 +193,5 @@ def test_gatherv2_auto1():
net.set_auto_parallel()
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)

View File

@ -65,6 +65,7 @@ def test_dropout_semi_auto():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 128]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -77,6 +78,7 @@ def test_dropout_semi_auto2():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 128]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -89,6 +91,7 @@ def test_dropout_semi_auto3():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 128]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -99,4 +102,5 @@ def test_dropout_auto():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 128]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)

View File

@ -49,6 +49,7 @@ class GradWrap(nn.Cell):
def compile_net(net, x, y, b):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -53,6 +53,7 @@ def check_initializer_weight_slice(init_name="Uniform"):
weight = initializer(init_name, [64, 32], ms.float32)
net = Net(strategy1, strategy2, weight)
net.set_auto_parallel()
net.set_train()
exe.compile(net, x, auto_parallel_mode=True, phase='train')
hccl.rank_id = rank_save
return net.parameters_dict()['w1'].data.asnumpy()
@ -131,6 +132,7 @@ def test_check_initializer_weight_slice_seed(init_name="Uniform"):
weight = initializer(init_name, [64, 32], ms.float32)
net = Net(strategy1, strategy2, weight)
net.set_auto_parallel()
net.set_train()
exe.compile(net, x, auto_parallel_mode=True, phase='train')
hccl.rank_id = rank_save
return net.parameters_dict()['w1'].data.asnumpy()

View File

@ -75,4 +75,5 @@ def test_l2normalize_matmul():
x = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
y = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
b = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -52,6 +52,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()

View File

@ -73,4 +73,5 @@ def test_linear():
y = Tensor(np.ones([64, 32]), dtype=ms.float32)
bias = Tensor(np.ones([64]), dtype=ms.float32)
label = Tensor(np.ones([64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y, bias, label)

View File

@ -95,5 +95,6 @@ def test_two_matmul():
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, b)
count = count + 1

View File

@ -37,6 +37,7 @@ class NetWithLoss(nn.Cell):
def compile_net(net, x, b):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, b)

View File

@ -67,6 +67,7 @@ def compile_net(net):
optimizer.sparse_opt.add_prim_attr("primitive_target", "CPU")
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b, auto_parallel_mode=True)
context.reset_auto_parallel_context()

View File

@ -64,6 +64,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b, auto_parallel_mode=True)
context.reset_auto_parallel_context()

View File

@ -75,4 +75,5 @@ def test_two_matmul_dropout():
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -51,6 +51,7 @@ class GradWrap(nn.Cell):
def compile_net(net, x, y):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y)

View File

@ -87,4 +87,5 @@ def test_two_matmul():
b = Tensor(np.ones([32, 64]), dtype=ms.float32)
z = Tensor(np.ones([64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y, b, z)

View File

@ -43,6 +43,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()

View File

@ -278,6 +278,7 @@ def test_bn_reshape_dense_bn_train_loss():
net = GradWrap(NetWithLoss(BNReshapeDenseBNNet()))
net.set_auto_parallel()
net.set_train()
_executor.compile(net, input_, label)
@ -292,6 +293,7 @@ def test_semi_one_hot_net_batch():
net = GradWrap(NetWithLoss(net))
net.set_auto_parallel()
net.set_train()
_executor.compile(net, input_, label)

View File

@ -76,4 +76,5 @@ def test_one_weight_parameter():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, x, b)

View File

@ -78,6 +78,7 @@ def compile_graph(strategy1, strategy2, strategy3, strategy4, auto=False, onthot
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64]), dtype=ms.int32)
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -84,6 +84,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x)
context.reset_auto_parallel_context()
@ -93,6 +94,7 @@ def compile_net1(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x1)
context.reset_auto_parallel_context()
@ -102,6 +104,7 @@ def compile_net2(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x2)
context.reset_auto_parallel_context()

View File

@ -76,6 +76,7 @@ def auto_parallel_compile_net(mode, dev_num, strategy1=None, strategy2=None):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_network = TrainOneStepCell(net, optimizer)
train_network.set_auto_parallel()
train_network.set_train()
_executor.compile(train_network, inputs, label)
context.reset_auto_parallel_context()

View File

@ -56,6 +56,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()

View File

@ -74,6 +74,7 @@ def test_gatherv2_semi_samestage1():
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
def test_gatherv2_semi_samestage2():
@ -86,4 +87,5 @@ def test_gatherv2_semi_samestage2():
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)

View File

@ -49,6 +49,7 @@ class GradWrap(nn.Cell):
def compile_net(net, x, y):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y)
@ -166,6 +167,7 @@ def test_prelu_parallel_success3():
w = Tensor(np.random.rand(16), dtype=ms.float32)
net = GradWrap3(NetWithLoss3(Net(strategy1, strategy2)))
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, w)

View File

@ -69,11 +69,13 @@ class GradWrap(nn.Cell):
def compile_net_no_bias(net, x, y):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y)
def compile_net(net, x, y, b):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -49,6 +49,7 @@ class GradWrap(nn.Cell):
def compile_net(net, x, y, b):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -317,6 +317,7 @@ class ReshapeNet6(nn.Cell):
def compile_net(net, input_):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, input_)

View File

@ -44,6 +44,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()

View File

@ -63,6 +63,7 @@ class Net(nn.Cell):
def compile_net(net, x, y):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y)

View File

@ -47,6 +47,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()

View File

@ -67,6 +67,7 @@ def test_reshape_unexpand():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
def test_reshape_unexpand_1():
@ -89,6 +90,7 @@ def test_reshape_unexpand_1():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
def test_reshape_unexpand_2():
@ -111,6 +113,7 @@ def test_reshape_unexpand_2():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
def test_reshape_unexpand_3():
@ -134,6 +137,7 @@ def test_reshape_unexpand_3():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
def test_reshape_unexpand_4():
@ -157,6 +161,7 @@ def test_reshape_unexpand_4():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
def test_reshape_unexpand_5():
@ -180,6 +185,7 @@ def test_reshape_unexpand_5():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
def test_reshape_unexpand_6():
@ -203,6 +209,7 @@ def test_reshape_unexpand_6():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
def test_reshape_unexpand_7():
@ -235,6 +242,7 @@ def test_reshape_unexpand_7():
x = Tensor(np.ones([32, 3, 224, 224]), dtype=ms.float32)
net = GradWrap(NetWithLoss(Net()))
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)
def test_reshape_unexpand_8():
@ -257,4 +265,5 @@ def test_reshape_unexpand_8():
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)

View File

@ -60,4 +60,5 @@ def test_sum_as_loss():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 32]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)

View File

@ -52,6 +52,7 @@ class GradWrap(nn.Cell):
def compile_net(net, x):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x)

View File

@ -107,4 +107,5 @@ def test_two_subgraphs():
net = TrainStepWrap(NetWithLoss(Net()))
input_x = Tensor(np.ones([8, 8, 8, 8]), dtype=ms.float32)
net.set_auto_parallel()
net.set_train()
_executor.compile(net, input_x)

View File

@ -43,6 +43,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()

View File

@ -48,6 +48,7 @@ class GradWrap(nn.Cell):
def compile_net(net, x, y, b):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -60,6 +60,7 @@ def test_bprop_with_sparse_feature_allreduce():
net = GradWrap(Net())
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x)
@ -87,6 +88,7 @@ def test_bprop_with_sparse_feature_mirror():
def compile_net(net):
optimizer = Adam(net.trainable_params(), learning_rate=0.1, loss_scale=1024.0, weight_decay=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_train()
_executor.compile(train_net, _x, _b)
net = Net()
@ -119,6 +121,7 @@ def test_bprop_with_sparse_feature_dataparallel():
def compile_net(net):
optimizer = Adam(net.trainable_params(), learning_rate=0.1, loss_scale=1024.0, weight_decay=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_train()
_executor.compile(train_net, _x, _b)
net = Net()

View File

@ -72,6 +72,7 @@ def test_gatherv2_semi_auto0():
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -84,6 +85,7 @@ def test_gatherv2_semi_auto1():
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -96,6 +98,7 @@ def test_gatherv2_semi_auto2():
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -108,6 +111,7 @@ def test_gatherv2_semi_auto3():
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -120,6 +124,7 @@ def test_gatherv2_semi_auto4():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -132,6 +137,7 @@ def test_gatherv2_semi_auto5():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -143,6 +149,7 @@ def test_gatherv2_semi_auto6():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -154,6 +161,7 @@ def test_gatherv2_semi_auto7():
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -163,6 +171,7 @@ def test_gatherv2_auto0():
net.set_auto_parallel()
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -172,6 +181,7 @@ def test_gatherv2_auto1():
net.set_auto_parallel()
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -184,6 +194,7 @@ def test_gatherv2_cpu0():
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -196,6 +207,7 @@ def test_gatherv2_cpu1():
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)
@ -208,4 +220,5 @@ def test_gatherv2_cpu2():
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x, y)

View File

@ -79,6 +79,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x)
context.reset_auto_parallel_context()
@ -88,6 +89,7 @@ def compile_net1(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x1)
context.reset_auto_parallel_context()

View File

@ -66,10 +66,12 @@ class GradWrap4(nn.Cell):
def compile_net(net, x, y, b):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, b)
def compile_net_no_bias(net, x, y):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y)
def test_no_grad():
@ -120,6 +122,7 @@ def test_grad_sens_parameter_type():
sens = Tensor(np.ones([128, 64]), dtype=ms.float32)
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, b, sens, phase='train', auto_parallel_mode=True)
x_layout = ([8, 8], [1, -1], [16, 32], 0, True, '')
y_layout = ([8, 8], [-1, 0], [32, 8], 0, True, '')

View File

@ -45,6 +45,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()

View File

@ -39,6 +39,7 @@ _b = Tensor(np.ones([64, 32]), dtype=ms.float32)
def compile_net(net):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, _x, _b)
context.reset_auto_parallel_context()

View File

@ -76,4 +76,5 @@ def test_two_matmul():
b = Tensor(np.ones([128, 128]), dtype=ms.float32)
a = Tensor(np.ones([128, 128]), dtype=ms.float32)
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, b, a)

View File

@ -87,6 +87,7 @@ def test_six_matmul_save():
net.set_auto_parallel()
x1 = Tensor(np.ones([32, 32]), dtype=ms.float32)
x6 = Tensor(np.ones([128, 32]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x1, x6)
@ -149,6 +150,7 @@ def test_six_matmul_load():
x1 = Tensor(np.ones([32, 32]), dtype=ms.float32)
x6 = Tensor(np.ones([128, 32]), dtype=ms.float32)
x7 = Tensor(np.ones([32, 32]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x1, x6, x7)
@ -205,6 +207,7 @@ def test_six_matmul_save_auto():
net.set_auto_parallel()
x1 = Tensor(np.ones([32, 32]), dtype=ms.float32)
x6 = Tensor(np.ones([128, 32]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x1, x6)
@ -265,4 +268,5 @@ def test_six_matmul_load_auto():
x1 = Tensor(np.ones([32, 32]), dtype=ms.float32)
x6 = Tensor(np.ones([128, 32]), dtype=ms.float32)
x7 = Tensor(np.ones([32, 32]), dtype=ms.float32)
net.set_train()
_executor.compile(net, x1, x6, x7)

View File

@ -71,6 +71,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()

View File

@ -37,6 +37,7 @@ class GradWrap(nn.Cell):
def compile_net(net, x, y):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y)

View File

@ -64,6 +64,7 @@ def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()

View File

@ -34,7 +34,7 @@ class Net(Cell):
return out
class EvalNet(Cell):
class EvalNet(Cell):
def __init__(self, network, strategy2=None):
super().__init__()
self.network = network
@ -46,9 +46,9 @@ class EvalNet(Cell):
return out
_x = Tensor(np.ones([8, 8]), dtype=ms.float32)
_w1 = Tensor(np.ones([8, 8]), dtype=ms.float32)
_b = Tensor(np.ones([8, 8]), dtype=ms.float32)
_x = Tensor(np.ones([64, 64]), dtype=ms.float32)
_w1 = Tensor(np.ones([64, 64]), dtype=ms.float32)
_b = Tensor(np.ones([64, 64]), dtype=ms.float32)
def test_train_and_eval():
@ -58,8 +58,8 @@ def test_train_and_eval():
strategy2 = ((4, 4),)
net = Net(_w1, strategy1, strategy2)
eval_net = EvalNet(net, strategy2=strategy2)
net.set_train()
net.set_auto_parallel()
net.set_train()
_executor.compile(net, _x, _b, phase='train', auto_parallel_mode=True)
eval_net.set_train(mode=False)

View File

@ -49,6 +49,7 @@ class GradWrap(nn.Cell):
def compile_net(net, x, y, b):
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y, b)

View File

@ -80,4 +80,5 @@ def test_two_weights_parameter():
train_net = OneStepCell(net_with_loss)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, x, b)

Some files were not shown because too many files have changed in this diff Show More