forked from mindspore-Ecosystem/mindspore
!21283 VM bug fix and test_cont_case add gpu
Merge pull request !21283 from chenfei_mindspore/vm-bug-fix
This commit is contained in:
commit
1245eed4d1
|
@ -127,6 +127,11 @@ void CompileGraph::AddInput(const AnfNodePtr &node) {
|
|||
MS_LOG(DEBUG) << "Input node is null " << node->DebugString(true);
|
||||
(void)Ref(node);
|
||||
return;
|
||||
} else if (node->isa<ValueNode>()) {
|
||||
// Value node maybe reused in different graph or by different nodes,copy the value node to ensure stack correct.
|
||||
auto copy_value_node = NewValueNode(node->cast<ValueNodePtr>()->value());
|
||||
(void)Ref(copy_value_node);
|
||||
return;
|
||||
}
|
||||
AddInst(Instruction::kInput, Ref(node));
|
||||
set_height(height_ + 1);
|
||||
|
|
|
@ -675,10 +675,9 @@ class SideEffectControlFlowAssignDependWhileNet(Cell):
|
|||
return grad_out
|
||||
|
||||
|
||||
# Now the case can't pass because the GPU RT problem, so only run on Ascend current time.
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_side_effect_grad_control_flow_assign_depend_while_net():
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
|
|
@ -23,6 +23,7 @@ from mindspore.common import dtype as mstype
|
|||
grad_all = C.GradOperation(get_all=True)
|
||||
context.set_context(device_target="Ascend")
|
||||
|
||||
|
||||
def test_for_in_for_01():
|
||||
class ForInForNet(nn.Cell):
|
||||
def __init__(self):
|
||||
|
@ -87,10 +88,10 @@ def test_for_in_for_02():
|
|||
self.param_b = Parameter(Tensor(11, mstype.int32), name='b')
|
||||
|
||||
def construct(self, x):
|
||||
for _ in range(0, 10):
|
||||
for _ in range(0, 3):
|
||||
x = x * 2
|
||||
self.assign(self.param_a, x + self.param_a)
|
||||
for _ in range(0, 5):
|
||||
for _ in range(0, 2):
|
||||
x = self.add(x, x)
|
||||
self.param_b += 1
|
||||
y = self.sub(x, self.param_b + self.param_a)
|
||||
|
|
|
@ -23,6 +23,7 @@ from mindspore import nn
|
|||
from mindspore.common.parameter import Parameter, ParameterTuple
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
# from tests.vm_impl.math_ops_vm_impl import *
|
||||
# from tests.vm_impl.vm_interface import *
|
||||
# from tests.vm_impl import *
|
||||
|
@ -54,8 +55,9 @@ def test_while_grad():
|
|||
|
||||
def construct(self, *inputs):
|
||||
return grad_all(self.net)(*inputs)
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = GradNet(while_net)
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -63,15 +65,16 @@ def test_while_grad():
|
|||
x = Tensor(np.random.randn(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
|
||||
assert np.allclose(graph_output[1].asnumpy(), pynative_output[1].asnumpy(), 0.0001, 0.0001)
|
||||
assert np.allclose(graph_output[2].asnumpy(), pynative_output[2].asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_while_with_const_param_grad():
|
||||
class MyWhileNet(nn.Cell):
|
||||
|
@ -93,7 +96,8 @@ def test_while_with_const_param_grad():
|
|||
|
||||
def construct(self, *inputs):
|
||||
return grad_all(self.net)(*inputs)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = GradNet(while_net)
|
||||
idx = Tensor([1.1], dtype=ms.float32)
|
||||
|
@ -104,9 +108,10 @@ def test_while_with_const_param_grad():
|
|||
assert np.allclose(graph_output[0].asnumpy(), expect_one, 0.0001, 0.0001)
|
||||
assert np.allclose(graph_output[1].asnumpy(), expect_two, 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_while_with_variable_grad():
|
||||
class MyWhileNet(nn.Cell):
|
||||
|
@ -128,7 +133,8 @@ def test_while_with_variable_grad():
|
|||
|
||||
def construct(self, *inputs):
|
||||
return grad_all(self.net)(*inputs)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = GradNet(while_net)
|
||||
idx = Tensor([1.1], dtype=ms.float32)
|
||||
|
@ -139,9 +145,10 @@ def test_while_with_variable_grad():
|
|||
assert np.allclose(graph_output[0].asnumpy(), expect_one, 0.0001, 0.0001)
|
||||
assert np.allclose(graph_output[1].asnumpy(), expect_two, 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_while_with_param_forward():
|
||||
class MyWhileNet(nn.Cell):
|
||||
|
@ -160,8 +167,9 @@ def test_while_with_param_forward():
|
|||
out = out + x + self.param
|
||||
idx = idx + 1
|
||||
return out
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
net = MyWhileNet()
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
end = Tensor(np.array(2), dtype=ms.int32)
|
||||
|
@ -170,12 +178,14 @@ def test_while_with_param_forward():
|
|||
expect = np.array([[[6, 8], [10, 12]], [[19, 22], [25, 28]]], dtype=np.int32)
|
||||
assert np.allclose(graph_output.asnumpy(), expect, 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_while_endless_case():
|
||||
"""endless case when optimization"""
|
||||
|
||||
class MyWhileNet(nn.Cell):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
@ -190,21 +200,23 @@ def test_while_endless_case():
|
|||
out = out + part
|
||||
idx = idx + 1
|
||||
return out
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
net = MyWhileNet()
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
end = Tensor(np.array(2), dtype=ms.int32)
|
||||
x = Tensor(np.arange(8).reshape(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output.asnumpy(), pynative_output.asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_while_with_param_grad():
|
||||
class MyWhileNet(nn.Cell):
|
||||
|
@ -232,7 +244,8 @@ def test_while_with_param_grad():
|
|||
|
||||
def construct(self, a, b, c):
|
||||
return grad_by_list(self.net, self.weights)(a, b, c)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = GradNet(while_net)
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -242,9 +255,10 @@ def test_while_with_param_grad():
|
|||
expect = np.array([[[2, 2], [2, 2]], [[2, 2], [2, 2]]], dtype=np.int32)
|
||||
assert np.allclose(graph_output[0].asnumpy(), expect, 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_while_with_param_forward_with_const_branch():
|
||||
class MyWhileNet(nn.Cell):
|
||||
|
@ -264,8 +278,9 @@ def test_while_with_param_forward_with_const_branch():
|
|||
out = out + idx + self.param
|
||||
idx = idx + 1
|
||||
return out
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = while_net
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -273,16 +288,18 @@ def test_while_with_param_forward_with_const_branch():
|
|||
x = Tensor(np.random.randn(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output.asnumpy(), pynative_output.asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_while_opt_endless():
|
||||
"""endless during optimization case"""
|
||||
|
||||
class MyWhileNet(nn.Cell):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
@ -308,8 +325,9 @@ def test_while_opt_endless():
|
|||
|
||||
def construct(self, *inputs):
|
||||
return grad_all(self.net)(*inputs)
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = GradNet(while_net)
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -317,7 +335,7 @@ def test_while_opt_endless():
|
|||
x = Tensor(np.ones([2, 2, 2]).astype(np.float32) * 3, dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
@ -343,8 +361,9 @@ def test_no_while_call():
|
|||
else:
|
||||
out = out + idx + self.param
|
||||
return out
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = while_net
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -352,13 +371,14 @@ def test_no_while_call():
|
|||
x = Tensor(np.random.randn(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output.asnumpy(), pynative_output.asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_while_with_param_grad_with_const_branch():
|
||||
class MyWhileNet(nn.Cell):
|
||||
|
@ -387,8 +407,9 @@ def test_while_with_param_grad_with_const_branch():
|
|||
|
||||
def construct(self, a, b, c):
|
||||
return grad_by_list(self.net, self.weights)(a, b, c)
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = GradNet(while_net)
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -396,10 +417,11 @@ def test_while_with_param_grad_with_const_branch():
|
|||
x = Tensor(np.random.randn(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="not supported yet")
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -435,8 +457,9 @@ def test_for_while_with_param_grad_with_const_branch():
|
|||
|
||||
def construct(self, a, b, c):
|
||||
return grad_by_list(self.net, self.weights)(a, b, c)
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = GradNet(while_net)
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -444,13 +467,14 @@ def test_for_while_with_param_grad_with_const_branch():
|
|||
x = Tensor(np.random.randn(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_for_while_with_param_grad_basic():
|
||||
class MyWhileNet(nn.Cell):
|
||||
|
@ -479,8 +503,9 @@ def test_for_while_with_param_grad_basic():
|
|||
|
||||
def construct(self, a, b, c):
|
||||
return grad_by_list(self.net, self.weights)(a, b, c)
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = GradNet(while_net)
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -488,13 +513,14 @@ def test_for_while_with_param_grad_basic():
|
|||
x = Tensor(np.random.randn(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_for_while_with_param_grad_normal():
|
||||
class MyWhileNet(nn.Cell):
|
||||
|
@ -523,8 +549,9 @@ def test_for_while_with_param_grad_normal():
|
|||
|
||||
def construct(self, a, b, c):
|
||||
return grad_by_list(self.net, self.weights)(a, b, c)
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = GradNet(while_net)
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -532,13 +559,14 @@ def test_for_while_with_param_grad_normal():
|
|||
x = Tensor(np.random.randn(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_while_with_param_basic_grad():
|
||||
class MyWhileNet(nn.Cell):
|
||||
|
@ -564,8 +592,9 @@ def test_while_with_param_basic_grad():
|
|||
|
||||
def construct(self, a, b, c):
|
||||
return grad_by_list(self.net, self.weights)(a, b, c)
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = GradNet(while_net)
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -573,13 +602,14 @@ def test_while_with_param_basic_grad():
|
|||
x = Tensor(np.random.randn(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_while_with_param_basic_grad_mul():
|
||||
class MyWhileNet(nn.Cell):
|
||||
|
@ -605,8 +635,9 @@ def test_while_with_param_basic_grad_mul():
|
|||
|
||||
def construct(self, a, b, c):
|
||||
return grad_by_list(self.net, self.weights)(a, b, c)
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = GradNet(while_net)
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -614,13 +645,14 @@ def test_while_with_param_basic_grad_mul():
|
|||
x = Tensor(np.random.randn(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_while_with_param_basic_grad_two():
|
||||
class MyWhileNet(nn.Cell):
|
||||
|
@ -647,8 +679,9 @@ def test_while_with_param_basic_grad_two():
|
|||
|
||||
def construct(self, a, b, c):
|
||||
return grad_by_list(self.net, self.weights)(a, b, c)
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = GradNet(while_net)
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -656,14 +689,15 @@ def test_while_with_param_basic_grad_two():
|
|||
x = Tensor(np.random.randn(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
|
||||
assert np.allclose(graph_output[1].asnumpy(), pynative_output[1].asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_while_with_param_basic_grad_three():
|
||||
class MyWhileNet(nn.Cell):
|
||||
|
@ -691,8 +725,9 @@ def test_while_with_param_basic_grad_three():
|
|||
|
||||
def construct(self, a, b, c):
|
||||
return grad_by_list(self.net, self.weights)(a, b, c)
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = GradNet(while_net)
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -700,15 +735,16 @@ def test_while_with_param_basic_grad_three():
|
|||
x = Tensor(np.random.randn(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
|
||||
assert np.allclose(graph_output[1].asnumpy(), pynative_output[1].asnumpy(), 0.0001, 0.0001)
|
||||
assert np.allclose(graph_output[2].asnumpy(), pynative_output[2].asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_while_if_with_param_grad():
|
||||
class MyWhileNet(nn.Cell):
|
||||
|
@ -737,8 +773,9 @@ def test_while_if_with_param_grad():
|
|||
|
||||
def construct(self, a, b, c):
|
||||
return grad_by_list(self.net, self.weights)(a, b, c)
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = GradNet(while_net)
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -746,10 +783,11 @@ def test_while_if_with_param_grad():
|
|||
x = Tensor(np.ones([2, 2, 2]).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="not supported yet")
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -778,8 +816,9 @@ def test_while_with_param_grad_not_enter_while():
|
|||
|
||||
def construct(self, a, b, c):
|
||||
return grad_by_list(self.net, self.weights)(a, b, c)
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
while_net = MyWhileNet()
|
||||
net = GradNet(while_net)
|
||||
idx = Tensor(np.array(3), dtype=ms.int32)
|
||||
|
@ -787,13 +826,14 @@ def test_while_with_param_grad_not_enter_while():
|
|||
x = Tensor(np.random.randn(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_with_param_if_by_if_forward():
|
||||
class MyIfByIfNet(nn.Cell):
|
||||
|
@ -810,12 +850,13 @@ def test_with_param_if_by_if_forward():
|
|||
else:
|
||||
out = out + x
|
||||
if a == b:
|
||||
out = out + x*3 + self.param
|
||||
out = out + x * 3 + self.param
|
||||
else:
|
||||
out = out + x*2
|
||||
out = out + x * 2
|
||||
return out
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
if_net = MyIfByIfNet()
|
||||
net = if_net
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -823,13 +864,14 @@ def test_with_param_if_by_if_forward():
|
|||
x = Tensor(np.ones([2, 2, 2]).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output.asnumpy(), pynative_output.asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_with_param_if_by_if_grad_inputs():
|
||||
class MyIfByIfNet(nn.Cell):
|
||||
|
@ -844,7 +886,7 @@ def test_with_param_if_by_if_grad_inputs():
|
|||
if a < b:
|
||||
out = out + x + self.param * 4
|
||||
if a == b:
|
||||
out = out + x*3 + self.param * 3
|
||||
out = out + x * 3 + self.param * 3
|
||||
return out
|
||||
|
||||
class GradNet(nn.Cell):
|
||||
|
@ -854,8 +896,9 @@ def test_with_param_if_by_if_grad_inputs():
|
|||
|
||||
def construct(self, *inputs):
|
||||
return grad_all(self.net)(*inputs)
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
if_net = MyIfByIfNet()
|
||||
net = GradNet(if_net)
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -863,15 +906,16 @@ def test_with_param_if_by_if_grad_inputs():
|
|||
x = Tensor(np.random.randn(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
|
||||
assert np.allclose(graph_output[1].asnumpy(), pynative_output[1].asnumpy(), 0.0001, 0.0001)
|
||||
assert np.allclose(graph_output[2].asnumpy(), pynative_output[2].asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_with_param_if_by_if_grad_parameter():
|
||||
class MyIfByIfNet(nn.Cell):
|
||||
|
@ -886,7 +930,7 @@ def test_with_param_if_by_if_grad_parameter():
|
|||
if a < b:
|
||||
out = out + x + self.param * 2
|
||||
if a == b:
|
||||
out = out + x*3 + self.param
|
||||
out = out + x * 3 + self.param
|
||||
return out
|
||||
|
||||
class GradNet(nn.Cell):
|
||||
|
@ -897,8 +941,9 @@ def test_with_param_if_by_if_grad_parameter():
|
|||
|
||||
def construct(self, *inputs):
|
||||
return grad_by_list(self.net, self.weights)(*inputs)
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
if_net = MyIfByIfNet()
|
||||
net = GradNet(if_net)
|
||||
idx = Tensor(np.array(0), dtype=ms.int32)
|
||||
|
@ -906,13 +951,14 @@ def test_with_param_if_by_if_grad_parameter():
|
|||
x = Tensor(np.random.randn(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_with_param_if_by_if_grad_param_excute_null():
|
||||
class MyIfByIfNet(nn.Cell):
|
||||
|
@ -936,8 +982,9 @@ def test_with_param_if_by_if_grad_param_excute_null():
|
|||
|
||||
def construct(self, *inputs):
|
||||
return grad_by_list(self.net, self.weights)(*inputs)
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
if_net = MyIfByIfNet()
|
||||
net = GradNet(if_net)
|
||||
idx = Tensor(np.array(4), dtype=ms.int32)
|
||||
|
@ -945,13 +992,14 @@ def test_with_param_if_by_if_grad_param_excute_null():
|
|||
x = Tensor(np.random.randn(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_if_by_if_return_inside_grad():
|
||||
class MyIfByIfNet(nn.Cell):
|
||||
|
@ -977,8 +1025,9 @@ def test_if_by_if_return_inside_grad():
|
|||
|
||||
def construct(self, *inputs):
|
||||
return grad_by_list(self.net, self.weights)(*inputs)
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
if_net = MyIfByIfNet()
|
||||
net = GradNet(if_net)
|
||||
idx = Tensor(np.array(1), dtype=ms.int32)
|
||||
|
@ -986,13 +1035,14 @@ def test_if_by_if_return_inside_grad():
|
|||
x = Tensor(np.random.randn(2, 2, 2).astype(np.float32), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_if_by_if_forward():
|
||||
class MyIfByIfNet(nn.Cell):
|
||||
|
@ -1019,8 +1069,9 @@ def test_if_by_if_forward():
|
|||
a = a * b
|
||||
out = a + b + x
|
||||
return out
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
if_net = MyIfByIfNet()
|
||||
net = if_net
|
||||
idx = Tensor(np.array(2), dtype=ms.float32)
|
||||
|
@ -1028,16 +1079,18 @@ def test_if_by_if_forward():
|
|||
x = Tensor(np.array(4), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output.asnumpy(), pynative_output.asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_if_by_if_forward_control_tuple_switch():
|
||||
"""tuple_get from switch op will generate new switch inside to eliminate tuple_get"""
|
||||
|
||||
class Branch3Net(nn.Cell):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
@ -1052,6 +1105,7 @@ def test_if_by_if_forward_control_tuple_switch():
|
|||
else:
|
||||
b = self.add(a, x)
|
||||
return a, b, x
|
||||
|
||||
class Branch2Net(nn.Cell):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
@ -1086,8 +1140,9 @@ def test_if_by_if_forward_control_tuple_switch():
|
|||
a = a * b
|
||||
out = a + b + x
|
||||
return out
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
if_net = MyIfByIfNet()
|
||||
net = if_net
|
||||
idx = Tensor(np.array(2), dtype=ms.float32)
|
||||
|
@ -1095,13 +1150,14 @@ def test_if_by_if_forward_control_tuple_switch():
|
|||
x = Tensor(np.array(0), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output.asnumpy(), pynative_output.asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_if_by_if_forward_control_inside_net():
|
||||
class Branch3Net(nn.Cell):
|
||||
|
@ -1120,6 +1176,7 @@ def test_if_by_if_forward_control_inside_net():
|
|||
a = a * b
|
||||
out = a + b + x
|
||||
return out
|
||||
|
||||
class Branch2Net(nn.Cell):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
@ -1152,8 +1209,9 @@ def test_if_by_if_forward_control_inside_net():
|
|||
a = self.sub(a, b)
|
||||
out = self.net(a, b, x)
|
||||
return out
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
if_net = MyIfByIfNet()
|
||||
net = if_net
|
||||
idx = Tensor(np.array(2), dtype=ms.float32)
|
||||
|
@ -1161,10 +1219,11 @@ def test_if_by_if_forward_control_inside_net():
|
|||
x = Tensor(np.array(0), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output.asnumpy(), pynative_output.asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
|
@ -1194,8 +1253,9 @@ def test_if_by_if_forward_use_namespace():
|
|||
a = a * b
|
||||
out = a + b + x
|
||||
return out
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
if_net = MyIfByIfNet()
|
||||
net = if_net
|
||||
idx = Tensor(np.array(2), dtype=ms.float32)
|
||||
|
@ -1203,10 +1263,11 @@ def test_if_by_if_forward_use_namespace():
|
|||
x = Tensor(np.array(0), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output.asnumpy(), pynative_output.asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
|
@ -1240,8 +1301,9 @@ def test_if_by_if_forward_use_global_op():
|
|||
a = a * b
|
||||
out = a + b + x
|
||||
return out
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
if_net = MyIfByIfNet()
|
||||
net = if_net
|
||||
idx = Tensor(np.array(2), dtype=ms.float32)
|
||||
|
@ -1249,10 +1311,11 @@ def test_if_by_if_forward_use_global_op():
|
|||
x = Tensor(np.array(0), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output.asnumpy(), pynative_output.asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
|
@ -1273,8 +1336,9 @@ def test_for_with_if_by_if_forward():
|
|||
a = a * b
|
||||
out = a + b + x
|
||||
return out
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
if_net = MyIfByIfNet()
|
||||
net = if_net
|
||||
idx = Tensor(np.array(2), dtype=ms.float32)
|
||||
|
@ -1282,10 +1346,11 @@ def test_for_with_if_by_if_forward():
|
|||
x = Tensor(np.array(0), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output.asnumpy(), pynative_output.asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
|
@ -1308,8 +1373,9 @@ def test_for_with_if_by_if_forward_namespace():
|
|||
a = a * b
|
||||
out = a + b + x
|
||||
return out
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
if_net = MyIfByIfNet()
|
||||
net = if_net
|
||||
idx = Tensor(np.array(2), dtype=ms.float32)
|
||||
|
@ -1317,7 +1383,7 @@ def test_for_with_if_by_if_forward_namespace():
|
|||
x = Tensor(np.array(0), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output.asnumpy(), pynative_output.asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
@ -1355,8 +1421,9 @@ def test_if_by_if_forward_const_branch_inner():
|
|||
a = a * b
|
||||
out = a + b + x
|
||||
return out
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
if_net = MyIfByIfNet()
|
||||
net = if_net
|
||||
idx = Tensor(np.array(2), dtype=ms.float32)
|
||||
|
@ -1364,10 +1431,11 @@ def test_if_by_if_forward_const_branch_inner():
|
|||
x = Tensor(np.array(0), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output.asnumpy(), pynative_output.asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
|
@ -1401,8 +1469,9 @@ def test_if_by_if_forward_all_const_branch():
|
|||
a = a * b
|
||||
out = a + b + x
|
||||
return out
|
||||
|
||||
# graph mode
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
if_net = MyIfByIfNet()
|
||||
net = if_net
|
||||
idx = Tensor(np.array(2), dtype=ms.float32)
|
||||
|
@ -1410,13 +1479,14 @@ def test_if_by_if_forward_all_const_branch():
|
|||
x = Tensor(np.array(0), dtype=ms.float32)
|
||||
graph_output = net(idx, end, x)
|
||||
# pynative mode
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
||||
context.set_context(mode=context.PYNATIVE_MODE)
|
||||
pynative_output = net(idx, end, x)
|
||||
assert np.allclose(graph_output.asnumpy(), pynative_output.asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_if_const_grad():
|
||||
class MyNet(nn.Cell):
|
||||
|
@ -1452,6 +1522,7 @@ def test_if_const_grad():
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_if_by_if_const_grad():
|
||||
class MyNet(nn.Cell):
|
||||
|
@ -1491,6 +1562,7 @@ def test_if_by_if_const_grad():
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_while_const_grad():
|
||||
class MyNet(nn.Cell):
|
||||
|
@ -1524,6 +1596,7 @@ def test_while_const_grad():
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_if_by_while_const_grad():
|
||||
class MyNet(nn.Cell):
|
||||
|
|
Loading…
Reference in New Issue