forked from mindspore-Ecosystem/mindspore
!48565 support sequence ascend test
Merge pull request !48565 from NaCN/add_sequence_ascend
This commit is contained in:
commit
bffa7667a9
|
@ -594,6 +594,7 @@ void AscendGraphOptimization::SetOperatorInfo(const KernelGraphPtr &graph) {
|
|||
if (cnode == nullptr) {
|
||||
std::pair<std::string, ExceptionType> failure_info = std::make_pair(msg, etype);
|
||||
device::ascend::HandleKernelSelectFailure(graph, node, failure_info);
|
||||
continue;
|
||||
}
|
||||
(void)mng->Replace(node, cnode);
|
||||
MS_LOG(INFO) << msg << " but expand success.";
|
||||
|
|
|
@ -12,12 +12,19 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import os
|
||||
import mindspore
|
||||
from mindspore.common import mutable
|
||||
from mindspore.nn import Cell
|
||||
from mindspore.ops.composite import GradOperation
|
||||
from mindspore.common import ParameterTuple
|
||||
|
||||
|
||||
def context_prepare():
|
||||
if mindspore.get_context("device_target") == "Ascend":
|
||||
os.environ["GRAPH_OP_RUN"] = "1"
|
||||
|
||||
|
||||
class _Grad(Cell):
|
||||
def __init__(self, grad, network, wrt_params=False, real_inputs_count=None):
|
||||
super().__init__()
|
||||
|
@ -33,6 +40,7 @@ class _Grad(Cell):
|
|||
return self.grad(self.network)(*inputs)
|
||||
|
||||
|
||||
|
||||
class GradOfFirstInput(_Grad):
|
||||
"""
|
||||
get grad of first input
|
|
@ -15,14 +15,17 @@
|
|||
import pytest
|
||||
from mindspore import context
|
||||
from mindspore.nn import Cell
|
||||
from tuple_help import TupleFactory
|
||||
from sequence_help import TupleFactory, context_prepare
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
context_prepare()
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_scalar_add():
|
||||
"""
|
||||
|
@ -40,7 +43,6 @@ def test_scalar_add():
|
|||
net_ms = Net()
|
||||
input_x = 3
|
||||
input_y = 4
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
fact = TupleFactory(net_ms, func, (input_x, input_y))
|
||||
fact.forward_cmp()
|
||||
fact.grad_impl()
|
||||
|
@ -49,6 +51,8 @@ def test_scalar_add():
|
|||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_scalar_sub():
|
||||
"""
|
||||
|
@ -66,7 +70,6 @@ def test_scalar_sub():
|
|||
net_ms = Net()
|
||||
input_x = 3
|
||||
input_y = 4
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
fact = TupleFactory(net_ms, func, (input_x, input_y))
|
||||
fact.forward_cmp()
|
||||
fact.grad_impl()
|
||||
|
@ -76,6 +79,8 @@ def test_scalar_sub():
|
|||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_scalar_mul():
|
||||
"""
|
||||
|
@ -93,7 +98,6 @@ def test_scalar_mul():
|
|||
net_ms = Net()
|
||||
input_x = 3
|
||||
input_y = 4
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
fact = TupleFactory(net_ms, func, (input_x, input_y))
|
||||
fact.forward_cmp()
|
||||
fact.grad_impl()
|
||||
|
@ -102,6 +106,8 @@ def test_scalar_mul():
|
|||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_scalar_div():
|
||||
"""
|
||||
|
@ -119,7 +125,6 @@ def test_scalar_div():
|
|||
net_ms = Net()
|
||||
input_x = 3
|
||||
input_y = 4
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
fact = TupleFactory(net_ms, func, (input_x, input_y))
|
||||
fact.forward_cmp()
|
||||
fact.grad_impl()
|
||||
|
@ -128,6 +133,8 @@ def test_scalar_div():
|
|||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_scalar_mod():
|
||||
"""
|
||||
|
@ -145,7 +152,6 @@ def test_scalar_mod():
|
|||
net_ms = Net()
|
||||
input_x = 3
|
||||
input_y = 4
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
fact = TupleFactory(net_ms, func, (input_x, input_y))
|
||||
fact.forward_cmp()
|
||||
fact.grad_impl()
|
||||
|
@ -154,6 +160,8 @@ def test_scalar_mod():
|
|||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_scalar_floordiv():
|
||||
"""
|
||||
|
@ -171,7 +179,6 @@ def test_scalar_floordiv():
|
|||
net_ms = Net()
|
||||
input_x = 3
|
||||
input_y = 4
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
fact = TupleFactory(net_ms, func, (input_x, input_y))
|
||||
fact.forward_cmp()
|
||||
fact.grad_impl()
|
||||
|
@ -180,6 +187,8 @@ def test_scalar_floordiv():
|
|||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_scalar_eq():
|
||||
"""
|
||||
|
@ -197,7 +206,6 @@ def test_scalar_eq():
|
|||
net_ms = Net()
|
||||
input_x = 3
|
||||
input_y = 4
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
fact = TupleFactory(net_ms, func, (input_x, input_y))
|
||||
fact.forward_cmp()
|
||||
fact.grad_impl()
|
||||
|
@ -206,6 +214,8 @@ def test_scalar_eq():
|
|||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_scalar_ge():
|
||||
"""
|
||||
|
@ -223,7 +233,6 @@ def test_scalar_ge():
|
|||
net_ms = Net()
|
||||
input_x = 3
|
||||
input_y = 4
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
fact = TupleFactory(net_ms, func, (input_x, input_y))
|
||||
fact.forward_cmp()
|
||||
fact.grad_impl()
|
||||
|
@ -232,6 +241,8 @@ def test_scalar_ge():
|
|||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_scalar_gt():
|
||||
"""
|
||||
|
@ -249,7 +260,6 @@ def test_scalar_gt():
|
|||
net_ms = Net()
|
||||
input_x = 3
|
||||
input_y = 4
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
fact = TupleFactory(net_ms, func, (input_x, input_y))
|
||||
fact.forward_cmp()
|
||||
fact.grad_impl()
|
||||
|
@ -258,6 +268,8 @@ def test_scalar_gt():
|
|||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_scalar_le():
|
||||
"""
|
||||
|
@ -275,7 +287,6 @@ def test_scalar_le():
|
|||
net_ms = Net()
|
||||
input_x = 3
|
||||
input_y = 4
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
fact = TupleFactory(net_ms, func, (input_x, input_y))
|
||||
fact.forward_cmp()
|
||||
fact.grad_impl()
|
||||
|
@ -284,6 +295,8 @@ def test_scalar_le():
|
|||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_scalar_lt():
|
||||
"""
|
||||
|
@ -301,7 +314,6 @@ def test_scalar_lt():
|
|||
net_ms = Net()
|
||||
input_x = 3
|
||||
input_y = 4
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
fact = TupleFactory(net_ms, func, (input_x, input_y))
|
||||
fact.forward_cmp()
|
||||
fact.grad_impl()
|
||||
|
|
|
@ -20,8 +20,10 @@ from mindspore import context
|
|||
from mindspore.common import mutable
|
||||
from mindspore.nn import Cell
|
||||
from mindspore.ops.composite import GradOperation
|
||||
from sequence_help import context_prepare
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
context_prepare()
|
||||
|
||||
|
||||
class NetAdd(nn.Cell):
|
||||
|
@ -44,6 +46,8 @@ class NetAddOffset(nn.Cell):
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_seq_add():
|
||||
"""
|
||||
|
@ -61,6 +65,8 @@ def test_seq_add():
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_seq_add_offset():
|
||||
"""
|
||||
|
@ -78,6 +84,8 @@ def test_seq_add_offset():
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_seq_add_grad():
|
||||
"""
|
||||
|
@ -99,6 +107,8 @@ def test_seq_add_grad():
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_seq_add_grad_other():
|
||||
"""
|
||||
|
|
|
@ -19,8 +19,10 @@ from mindspore.ops.operations import _sequence_ops as seq
|
|||
from mindspore import context
|
||||
from mindspore.common import mutable
|
||||
from mindspore.ops.composite import GradOperation
|
||||
from sequence_help import context_prepare
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
context_prepare()
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
|
|
@ -18,8 +18,10 @@ import mindspore.nn as nn
|
|||
from mindspore import context
|
||||
from mindspore.common import mutable
|
||||
from mindspore.ops.composite import GradOperation
|
||||
from sequence_help import context_prepare
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
context_prepare()
|
||||
|
||||
|
||||
class NetGetItem(nn.Cell):
|
||||
|
@ -29,6 +31,8 @@ class NetGetItem(nn.Cell):
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_seq_getitem():
|
||||
"""
|
||||
|
@ -46,6 +50,8 @@ def test_seq_getitem():
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_seq_getitem_grad():
|
||||
"""
|
||||
|
|
|
@ -19,8 +19,10 @@ from mindspore.ops.operations import _sequence_ops as seq
|
|||
from mindspore import context
|
||||
from mindspore.common import mutable
|
||||
from mindspore.ops.composite import GradOperation
|
||||
from sequence_help import context_prepare
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
context_prepare()
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
|
|
@ -18,9 +18,10 @@ from mindspore import context
|
|||
from mindspore.nn import Cell
|
||||
from mindspore.common import mutable
|
||||
from mindspore.ops.composite import GradOperation
|
||||
from tuple_help import TupleFactory
|
||||
from sequence_help import TupleFactory, context_prepare
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
context_prepare()
|
||||
|
||||
|
||||
class Net(Cell):
|
||||
|
@ -34,6 +35,8 @@ class Net(Cell):
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_seqence_make_range():
|
||||
"""
|
||||
|
@ -55,6 +58,8 @@ def test_seqence_make_range():
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_seqence_make_range_grad():
|
||||
"""
|
||||
|
|
|
@ -19,8 +19,10 @@ from mindspore.ops.operations import _sequence_ops as seq
|
|||
from mindspore import context
|
||||
from mindspore.common import mutable
|
||||
from mindspore.ops.composite import GradOperation
|
||||
from sequence_help import context_prepare
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
context_prepare()
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
|
|
|
@ -19,8 +19,10 @@ from mindspore.ops.operations import _sequence_ops as seq
|
|||
from mindspore import context
|
||||
from mindspore.common import mutable
|
||||
from mindspore.ops.composite import GradOperation
|
||||
from sequence_help import context_prepare
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
context_prepare()
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
|
|
@ -19,8 +19,10 @@ from mindspore.ops import functional as F
|
|||
from mindspore import context
|
||||
from mindspore.common import mutable
|
||||
from mindspore.ops.composite import GradOperation
|
||||
from sequence_help import context_prepare
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
context_prepare()
|
||||
|
||||
|
||||
class NetSetItem(nn.Cell):
|
||||
|
@ -30,6 +32,8 @@ class NetSetItem(nn.Cell):
|
|||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_seq_setitem():
|
||||
"""
|
||||
|
@ -48,6 +52,8 @@ def test_seq_setitem():
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_seq_setitem_grad_0():
|
||||
"""
|
||||
|
@ -66,6 +72,8 @@ def test_seq_setitem_grad_0():
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_seq_setitem_grad_1():
|
||||
"""
|
||||
|
|
|
@ -18,13 +18,16 @@ from mindspore import context
|
|||
from mindspore.ops.operations import _sequence_ops as S
|
||||
from mindspore.common import mutable
|
||||
from mindspore.ops.composite import GradOperation
|
||||
from tuple_help import TupleFactory
|
||||
from sequence_help import TupleFactory, context_prepare
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
context_prepare()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_seq_slice():
|
||||
"""
|
||||
|
@ -54,6 +57,8 @@ def test_seq_slice():
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_seq_slice_grad():
|
||||
"""
|
||||
|
|
|
@ -21,8 +21,10 @@ from mindspore.ops.operations import _sequence_ops as seq
|
|||
import mindspore.ops as ops
|
||||
from mindspore import context
|
||||
from mindspore.common import mutable
|
||||
from sequence_help import context_prepare
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
context_prepare()
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
@ -50,6 +52,8 @@ def dyn_case():
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_seq_to_tensor():
|
||||
"""
|
||||
|
|
|
@ -17,13 +17,16 @@ import numpy as np
|
|||
from mindspore.ops.operations import _sequence_ops as seq
|
||||
from mindspore import context
|
||||
from mindspore.nn import Cell
|
||||
from tuple_help import TupleFactory
|
||||
from sequence_help import TupleFactory, context_prepare
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
context_prepare()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_seqence_zeros_like():
|
||||
"""
|
||||
|
|
|
@ -19,8 +19,10 @@ import mindspore.nn as nn
|
|||
from mindspore.ops.operations import _sequence_ops as seq
|
||||
from mindspore import context
|
||||
from mindspore import Tensor
|
||||
from sequence_help import context_prepare
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
context_prepare()
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
@ -46,6 +48,8 @@ def dyn_case():
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_seq_to_tensor():
|
||||
"""
|
||||
|
|
|
@ -13,13 +13,15 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import pytest
|
||||
|
||||
import numpy as np
|
||||
from mindspore import context
|
||||
from mindspore import Tensor
|
||||
from mindspore import context, Tensor
|
||||
from mindspore.nn import Cell
|
||||
import mindspore
|
||||
from sequence_help import context_prepare
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
context_prepare()
|
||||
|
||||
|
||||
class ShapeNet(Cell):
|
||||
|
@ -33,6 +35,8 @@ class ShapeNet(Cell):
|
|||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_shape():
|
||||
"""
|
||||
|
|
Loading…
Reference in New Issue