forked from mindspore-Ecosystem/mindspore
Adjust ST test cases
This commit is contained in:
parent
f47f0c1395
commit
f89a1eda78
|
@ -16,8 +16,6 @@ import pytest
|
|||
from tests.st.control.cases_register import case_register
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
from tests.st.fallback.control_flow.test_fallback_000_single_if import *
|
||||
from tests.st.fallback.control_flow.test_fallback_001_single_while import *
|
||||
from tests.st.fallback.control_flow.test_fallback_002_single_for import *
|
||||
from tests.st.fallback.control_flow.test_fallback_010_if_in_if import *
|
||||
from tests.st.fallback.control_flow.test_fallback_011_if_in_while import *
|
||||
from tests.st.fallback.control_flow.test_fallback_012_if_in_for import *
|
||||
from tests.st.fallback.control_flow.test_fallback_020_while_in_if import *
|
||||
from tests.st.fallback.control_flow.test_fallback_021_while_in_while import *
|
||||
from tests.st.fallback.control_flow.test_fallback_022_while_in_for import *
|
||||
from tests.st.fallback.control_flow.test_fallback_030_for_in_if import *
|
||||
from tests.st.fallback.control_flow.test_fallback_031_for_in_while import *
|
||||
from tests.st.fallback.control_flow.test_fallback_100_if_after_if import *
|
||||
from tests.st.fallback.control_flow.test_fallback_101_if_after_while import *
|
||||
from tests.st.fallback.control_flow.test_fallback_110_if_after_if_in_if import *
|
||||
from tests.st.fallback.control_flow.test_fallback_111_if_after_if_in_while import *
|
||||
from tests.st.fallback.control_flow.test_fallback_112_if_after_if_in_for import *
|
||||
from tests.st.fallback.control_flow.test_fallback_120_if_after_while_in_if import *
|
||||
from tests.st.fallback.control_flow.test_fallback_121_if_after_while_in_while import *
|
||||
from tests.st.fallback.control_flow.test_fallback_122_if_after_while_in_for import *
|
||||
from tests.st.fallback.control_flow.test_fallback_130_if_after_for_in_if import *
|
||||
from tests.st.fallback.control_flow.test_fallback_131_if_after_for_in_while import *
|
||||
from tests.st.fallback.control_flow.test_fallback_132_if_after_for_in_for import *
|
||||
from tests.st.fallback.control_flow.test_fallback_200_while_after_if import *
|
||||
from tests.st.fallback.control_flow.test_fallback_201_while_after_while import *
|
||||
from tests.st.fallback.control_flow.test_fallback_202_while_after_for import *
|
||||
from tests.st.fallback.control_flow.test_fallback_210_while_after_if_in_if import *
|
||||
from tests.st.fallback.control_flow.test_fallback_211_while_after_if_in_while import *
|
||||
from tests.st.fallback.control_flow.test_fallback_212_while_after_if_in_for import *
|
||||
from tests.st.fallback.control_flow.test_fallback_220_while_after_while_in_if import *
|
||||
from tests.st.fallback.control_flow.test_fallback_221_while_after_while_in_while import *
|
||||
from tests.st.fallback.control_flow.test_fallback_222_while_after_while_in_for import *
|
||||
from tests.st.fallback.control_flow.test_fallback_230_while_after_for_in_if import *
|
||||
from tests.st.fallback.control_flow.test_fallback_231_while_after_for_in_while import *
|
||||
from tests.st.fallback.control_flow.test_fallback_232_while_after_for_in_for import *
|
||||
from tests.st.fallback.control_flow.test_fallback_300_for_after_if import *
|
||||
from tests.st.fallback.control_flow.test_fallback_301_for_after_while import *
|
||||
from tests.st.fallback.control_flow.test_fallback_302_for_after_for import *
|
||||
from tests.st.fallback.control_flow.test_fallback_310_for_after_if_in_if import *
|
||||
from tests.st.fallback.control_flow.test_fallback_311_for_after_if_in_while import *
|
||||
from tests.st.fallback.control_flow.test_fallback_320_for_after_while_in_if import *
|
||||
from tests.st.fallback.control_flow.test_fallback_321_for_after_while_in_while import *
|
||||
from tests.st.fallback.control_flow.test_fallback_322_for_after_while_in_for import *
|
||||
from tests.st.fallback.control_flow.test_fallback_330_for_after_for_in_if import *
|
||||
from tests.st.fallback.control_flow.test_fallback_331_for_after_for_in_while import *
|
||||
from tests.st.fallback.control_flow.test_fallback_332_for_after_for_in_for import *
|
|
@ -0,0 +1,85 @@
|
|||
# Copyright 2022 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import time
|
||||
import operator
|
||||
|
||||
|
||||
class Register:
|
||||
def __init__(self):
|
||||
self.case_targets = dict()
|
||||
self.case_levels = dict()
|
||||
self.skip_cases = dict()
|
||||
|
||||
def target_ascend(self, fn):
|
||||
self._add_target(fn, "Ascend")
|
||||
return fn
|
||||
|
||||
def target_gpu(self, fn):
|
||||
self._add_target(fn, "GPU")
|
||||
return fn
|
||||
|
||||
def target_cpu(self, fn):
|
||||
self._add_target(fn, "CPU")
|
||||
return fn
|
||||
|
||||
def level0(self, fn):
|
||||
self._add_level(fn, 0)
|
||||
return fn
|
||||
|
||||
def level1(self, fn):
|
||||
self._add_level(fn, 1)
|
||||
return fn
|
||||
|
||||
def skip(self, reason):
|
||||
def deco(fn):
|
||||
self.skip_cases[fn] = reason
|
||||
return fn
|
||||
|
||||
return deco
|
||||
|
||||
def _add_target(self, fn, target):
|
||||
if fn not in self.case_targets:
|
||||
self.case_targets[fn] = set()
|
||||
self.case_targets[fn].add(target)
|
||||
|
||||
def _add_level(self, fn, level):
|
||||
self.case_levels[fn] = level
|
||||
|
||||
def check_and_run(self, target, level):
|
||||
time_cost = dict()
|
||||
for fn, targets in self.case_targets.items():
|
||||
if fn in self.skip_cases:
|
||||
continue
|
||||
if target not in targets:
|
||||
continue
|
||||
if fn not in self.case_levels:
|
||||
continue
|
||||
if self.case_levels[fn] != level:
|
||||
continue
|
||||
print(f"\nexceute fn:{fn}, level:{level}, target:{target}")
|
||||
start_time = time.time()
|
||||
fn()
|
||||
end_time = time.time()
|
||||
time_cost[fn] = end_time - start_time
|
||||
|
||||
sorted_time_cost = sorted(time_cost.items(), key=operator.itemgetter(1), reverse=True)
|
||||
total_cost_time = 0
|
||||
for item in sorted_time_cost:
|
||||
total_cost_time += item[1]
|
||||
print("Time:", item[1], ", fn:", item[0], "\n")
|
||||
print("Total cost time:", total_cost_time)
|
||||
|
||||
|
||||
case_register = Register()
|
|
@ -13,19 +13,17 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from mindspore import dtype as mstype
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_single_if_4():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -44,11 +42,9 @@ def test_single_if_4():
|
|||
assert res == 42
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_single_if_two_cond():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -66,10 +62,8 @@ def test_single_if_two_cond():
|
|||
assert res == 1
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_ascend
|
||||
def test_single_if_builtin_function_abs():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -86,10 +80,8 @@ def test_single_if_builtin_function_abs():
|
|||
assert res == -13
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_ascend
|
||||
def test_single_if_builtin_function_abs_min():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,20 +13,18 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
import mindspore as ms
|
||||
from mindspore import Tensor, jit, context, Parameter
|
||||
from mindspore.nn import Cell
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_single_while_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -43,11 +41,9 @@ def test_single_while_1():
|
|||
assert res == 8
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_single_while_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -65,11 +61,9 @@ def test_single_while_2():
|
|||
assert res == 14
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_single_while_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -88,11 +82,9 @@ def test_single_while_3():
|
|||
assert res == 7
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_single_while_two_cond_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -112,11 +104,9 @@ def test_single_while_two_cond_1():
|
|||
assert res == 25
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_single_while_two_cond_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -135,11 +125,9 @@ def test_single_while_two_cond_2():
|
|||
assert res == 8
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_single_while_param():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -165,7 +153,7 @@ def test_single_while_param():
|
|||
assert res == 24
|
||||
|
||||
|
||||
@pytest.mark.skip(reason='Not support graph fallback feature yet')
|
||||
@case_register.skip(reason='Not support graph fallback feature yet')
|
||||
def test_single_while_numpy():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -183,10 +171,8 @@ def test_single_while_numpy():
|
|||
assert (res.asnumpy() == [1, 1, 3, 4, 5]).all()
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_ascend
|
||||
def test_single_while_two_cond_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -17,15 +17,14 @@ import pytest
|
|||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from mindspore import dtype as mstype
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_single_for_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -43,11 +42,9 @@ def test_single_for_1():
|
|||
assert res == 21
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_single_for_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -67,11 +64,9 @@ def test_single_for_2():
|
|||
assert res == 21
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_single_for_zip():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -90,11 +85,9 @@ def test_single_for_zip():
|
|||
assert res == 9
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_single_for_builtin_function_int():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow if in if scenario"""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_in_if_5():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -44,11 +42,9 @@ def test_if_in_if_5():
|
|||
assert res == 4
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_else_in_if_else_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -79,11 +75,9 @@ def test_if_else_in_if_else_2():
|
|||
assert res == -16
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_in_if_multi_conds_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -109,10 +103,8 @@ def test_if_in_if_multi_conds_2():
|
|||
assert res == 20
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_ascend
|
||||
def test_if_in_if_4():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow if in while scenario"""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_in_while_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -44,11 +42,9 @@ def test_if_in_while_1():
|
|||
assert res == 7
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_in_while_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -67,11 +63,9 @@ def test_if_in_while_2():
|
|||
assert res == 3
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_in_while_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -93,11 +87,9 @@ def test_if_in_while_3():
|
|||
assert res == 6
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_in_while_4():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -124,11 +116,9 @@ def test_if_in_while_4():
|
|||
assert res == 5
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_in_while_numpy():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_in_for_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -43,11 +41,9 @@ def test_if_in_for_tensor():
|
|||
assert res == 14
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_in_for_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -68,11 +64,9 @@ def test_if_in_for_tensor_2():
|
|||
assert res == 19
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_in_for_tensor_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -92,11 +86,9 @@ def test_if_in_for_tensor_3():
|
|||
assert res == 17
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_in_for_numpy_5():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,19 +13,17 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import mindspore as ms
|
||||
from mindspore import Tensor, jit, context, nn, Parameter
|
||||
import numpy as np
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_in_if_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -46,11 +44,9 @@ def test_while_in_if_1():
|
|||
assert res == 8
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_in_if_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -72,11 +68,9 @@ def test_while_in_if_2():
|
|||
assert res == 12
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_in_if_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -99,11 +93,9 @@ def test_while_in_if_3():
|
|||
assert res == 7
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_two_cond_in_if_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -126,11 +118,9 @@ def test_while_two_cond_in_if_1():
|
|||
assert res == 21
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_two_cond_in_if_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -152,11 +142,9 @@ def test_while_two_cond_in_if_2():
|
|||
assert res == 8
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_param_in_if():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,19 +13,17 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import mindspore as ms
|
||||
from mindspore import Tensor, jit, context, nn, Parameter
|
||||
import numpy as np
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_in_while_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -48,11 +46,9 @@ def test_while_in_while_1():
|
|||
assert res == 8
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_in_while_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -74,11 +70,9 @@ def test_while_in_while_2():
|
|||
assert res == 4
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_in_while_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -102,11 +96,9 @@ def test_while_in_while_3():
|
|||
assert res == 7
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_in_while_with_two_cond_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -129,11 +121,9 @@ def test_while_in_while_with_two_cond_1():
|
|||
assert res == 27
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_in_while_with_two_cond_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -155,11 +145,9 @@ def test_while_in_while_with_two_cond_2():
|
|||
assert res == 8
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_in_while_with_two_cond_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -181,11 +169,9 @@ def test_while_in_while_with_two_cond_3():
|
|||
assert res == -1
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_in_while_with_param():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,19 +13,17 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import mindspore as ms
|
||||
from mindspore import Tensor, jit, context, nn, Parameter
|
||||
import numpy as np
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_in_for_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -49,11 +47,9 @@ def test_while_in_for_1():
|
|||
assert res == 27
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_in_for_zip():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -78,11 +74,9 @@ def test_while_in_for_zip():
|
|||
assert res == 18
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_in_for_numpy():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -106,11 +100,9 @@ def test_while_in_for_numpy():
|
|||
assert res2 == 10
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_in_for_builtin_function():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,20 +13,18 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context, nn
|
||||
from mindspore.common.parameter import Parameter
|
||||
from mindspore.common import dtype as mstype
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_in_if_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -46,11 +44,9 @@ def test_for_in_if_tensor():
|
|||
assert res == 10
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_in_if_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -70,11 +66,9 @@ def test_for_in_if_tensor_2():
|
|||
assert res == -5
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_in_if_param():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -104,11 +98,9 @@ def test_for_in_if_param():
|
|||
assert res2 == 12
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_in_if_numpy():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -127,11 +119,9 @@ def test_for_in_if_numpy():
|
|||
np.all(out.asnumpy() == np.array([7, 9, 1]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_in_if_isinstance_raise():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -153,7 +143,9 @@ def test_for_in_if_isinstance_raise():
|
|||
assert out == 4
|
||||
|
||||
|
||||
@pytest.mark.skip(reason='Not support graph fallback feature yet')
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_in_if_dict_isinstance():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,21 +13,19 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context, nn
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.common.parameter import Parameter
|
||||
from mindspore.ops import operations as P
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_in_while_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -48,11 +46,9 @@ def test_for_in_while_tensor():
|
|||
assert res == 37
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_in_while_numpy_append():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -73,11 +69,9 @@ def test_for_in_while_numpy_append():
|
|||
assert res == 54
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_in_while_sum():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -108,11 +102,9 @@ def test_for_in_while_sum():
|
|||
assert res == 26
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_in_while_print():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -134,11 +126,9 @@ def test_for_in_while_print():
|
|||
assert res2 == 8
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_in_while_round():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,17 +13,15 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow if after if scenario"""
|
||||
import pytest
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_if_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -43,11 +41,9 @@ def test_if_after_if_tensor():
|
|||
assert res == 8
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_if_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -73,11 +69,9 @@ def test_if_after_if_tensor_2():
|
|||
assert res == 9
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_if_tensor_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,17 +13,15 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow if after while scenario"""
|
||||
import pytest
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_while_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -45,11 +43,9 @@ def test_if_after_while_tensor():
|
|||
assert res == 6
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_while_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,17 +13,15 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_if_in_if_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_if_in_while_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -49,11 +47,9 @@ def test_if_after_if_in_while_tensor():
|
|||
assert res == 54
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_if_in_while_numpy():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -77,11 +73,9 @@ def test_if_after_if_in_while_numpy():
|
|||
assert (res.asnumpy() == [1, 0]).all()
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_if_in_while_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,17 +13,15 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_if_in_for_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,17 +13,15 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_while_in_if_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -46,11 +44,9 @@ def test_if_after_while_in_if_tensor():
|
|||
assert res == 0
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_while_in_if_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_while_in_while_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -47,11 +45,9 @@ def test_if_after_while_in_while_tensor():
|
|||
assert res == 1
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_while_in_while_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -75,11 +71,9 @@ def test_if_after_while_in_while_tensor_2():
|
|||
assert res == 0
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_while_in_while_numpy():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_while_in_for_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -47,11 +45,9 @@ def test_if_after_while_in_for_tensor():
|
|||
assert res == 13
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_while_in_for_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -75,11 +71,9 @@ def test_if_after_while_in_for_tensor_2():
|
|||
assert res == 4
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_while_in_for_numpy():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_for_in_if_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -47,6 +45,9 @@ def test_if_after_for_in_if_tensor():
|
|||
assert res == 5
|
||||
|
||||
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_for_in_if_numpy():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -69,11 +70,9 @@ def test_if_after_for_in_if_numpy():
|
|||
assert (res.asnumpy() == [6, 10]).all()
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_for_in_if_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -99,10 +98,9 @@ def test_if_after_for_in_if_tensor_2():
|
|||
assert (res.asnumpy() == [-1]).all()
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_for_in_if_numpy_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_for_in_while_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -47,11 +45,9 @@ def test_if_after_for_in_while_tensor():
|
|||
assert res == 13
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_if_after_for_in_while_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -75,10 +71,8 @@ def test_if_after_for_in_while_tensor_2():
|
|||
assert res == 48
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_ascend
|
||||
def test_if_after_for_in_while_numpy_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,17 +13,15 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_ascend
|
||||
def test_if_after_for_in_for_numpy():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_if_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -46,11 +44,9 @@ def test_while_after_if_tensor():
|
|||
assert res == 11
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_if_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -76,11 +72,9 @@ def test_while_after_if_tensor_2():
|
|||
assert res_z == 1
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_if_numpy():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -102,11 +96,9 @@ def test_while_after_if_numpy():
|
|||
assert (res.asnumpy() == [-3, -4]).all()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_if_numpy_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_while_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -46,11 +44,9 @@ def test_while_after_while_tensor():
|
|||
assert res == -3
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_while_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -71,7 +67,7 @@ def test_while_after_while_tensor_2():
|
|||
assert res == 6
|
||||
|
||||
|
||||
@pytest.mark.skip(reason='Not support graph fallback feature yet')
|
||||
@case_register.skip(reason='Not support graph fallback feature yet')
|
||||
def test_while_after_while_numpy_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -92,11 +88,9 @@ def test_while_after_while_numpy_2():
|
|||
assert (res_y.asnumpy() == [1, 2, 3, 4, 2, 2, 3]).all()
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_while_numpy():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_for_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -46,7 +44,7 @@ def test_while_after_for_tensor_2():
|
|||
assert res == 5
|
||||
|
||||
|
||||
@pytest.mark.skip(reason='Not support graph fallback feature yet')
|
||||
@case_register.skip(reason='Not support graph fallback feature yet')
|
||||
def test_while_after_for_numpy_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -70,11 +68,9 @@ def test_while_after_for_numpy_2():
|
|||
assert res_z == [10]
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_for_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -96,11 +92,9 @@ def test_while_after_for_tensor():
|
|||
assert res == -12
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_for_numpy():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_if_in_if_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -52,11 +50,9 @@ def test_while_after_if_in_if_tensor():
|
|||
assert res == 0
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_if_in_if_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -82,7 +78,7 @@ def test_while_after_if_in_if_tensor_2():
|
|||
assert res == 4
|
||||
|
||||
|
||||
@pytest.mark.skip(reason='Not support graph fallback feature yet')
|
||||
@case_register.skip(reason='Not support graph fallback feature yet')
|
||||
def test_while_after_if_in_if_numpy():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -106,7 +102,7 @@ def test_while_after_if_in_if_numpy():
|
|||
assert res == 1
|
||||
|
||||
|
||||
@pytest.mark.skip(reason='Not support graph fallback feature yet')
|
||||
@case_register.skip(reason='Not support graph fallback feature yet')
|
||||
def test_while_after_if_in_if_numpy_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_if_in_while_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -53,11 +51,9 @@ def test_while_after_if_in_while_tensor():
|
|||
assert res == 33
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_if_in_while_numpy_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -84,11 +80,9 @@ def test_while_after_if_in_while_numpy_2():
|
|||
assert res_y == 1
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_if_in_while_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_if_in_for_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -52,11 +50,9 @@ def test_while_after_if_in_for_tensor():
|
|||
assert res == 73
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_if_in_for_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -83,11 +79,9 @@ def test_while_after_if_in_for_tensor_2():
|
|||
assert res_y == 5
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_if_in_for_numpy_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_while_in_if_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -49,11 +47,9 @@ def test_while_after_while_in_if_tensor():
|
|||
assert res == (1, 1, 16)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_while_in_if_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -84,11 +80,9 @@ def test_while_after_while_in_if_tensor_2():
|
|||
assert res == (2, 1, 15)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_while_in_if_numpy_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -113,11 +107,9 @@ def test_while_after_while_in_if_numpy_2():
|
|||
assert res == (1, 3, 3)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_while_in_if_numpy():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_while_in_while_numpy():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_while_in_for_numpy():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -49,11 +47,9 @@ def test_while_after_while_in_for_numpy():
|
|||
assert res == -6
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_while_in_for_numpy_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -79,11 +75,9 @@ def test_while_after_while_in_for_numpy_2():
|
|||
assert res == 63
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_while_in_for_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -106,11 +100,9 @@ def test_while_after_while_in_for_tensor():
|
|||
assert res_y == -5
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_while_in_for_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_for_in_if_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -52,11 +50,9 @@ def test_while_after_for_in_if_1():
|
|||
assert res == 11
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_for_in_if_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -88,11 +84,9 @@ def test_while_after_for_in_if_2():
|
|||
assert res_z == 1
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_for_in_if_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -118,11 +112,9 @@ def test_while_after_for_in_if_3():
|
|||
assert (res.asnumpy() == [-3, -4]).all()
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_for_in_if_4():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_for_in_while_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -50,11 +48,9 @@ def test_while_after_for_in_while_1():
|
|||
assert res == 6
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_for_in_while_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -80,11 +76,9 @@ def test_while_after_for_in_while_2():
|
|||
assert res == -2
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_for_in_while_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_for_in_for_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -56,11 +54,9 @@ def test_while_after_for_in_for_2():
|
|||
assert res_y == 6
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_while_after_for_in_for_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_if_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -52,11 +50,9 @@ def test_for_after_if_2():
|
|||
assert res_y == 5
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_if_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -83,10 +79,8 @@ def test_for_after_if_3():
|
|||
assert res == 12
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_ascend
|
||||
def test_for_after_if_4():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_while_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -47,11 +45,9 @@ def test_for_after_while_1():
|
|||
assert res == 0
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_while_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -77,11 +73,9 @@ def test_for_after_while_2():
|
|||
assert res_y == 5
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_while_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_for_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -50,11 +48,9 @@ def test_for_after_for_2():
|
|||
assert res_y == 6
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_for_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,17 +13,15 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow for after if in if scenario"""
|
||||
import pytest
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_if_in_if_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,17 +13,15 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow for after if in while scenario"""
|
||||
import pytest
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_if_in_while_tensor():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -49,11 +47,9 @@ def test_for_after_if_in_while_tensor():
|
|||
assert res == 13
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_if_in_while_tensor_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -17,15 +17,14 @@ import pytest
|
|||
import numpy as np
|
||||
import mindspore
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_while_in_if_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -51,11 +50,9 @@ def test_for_after_while_in_if_1():
|
|||
assert res == 8
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_while_in_if_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -90,11 +87,9 @@ def test_for_after_while_in_if_2():
|
|||
assert res_y == 5
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_while_in_if_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_while_in_while_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -49,11 +47,9 @@ def test_for_after_while_in_while_1():
|
|||
assert res == 0
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_while_in_while_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -81,11 +77,9 @@ def test_for_after_while_in_while_2():
|
|||
assert y == 10
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_while_in_while_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,19 +13,17 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
import mindspore
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_while_in_for_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -49,11 +47,9 @@ def test_for_after_while_in_for_1():
|
|||
assert res == 7
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_while_in_for_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -83,6 +79,9 @@ def test_for_after_while_in_for_2():
|
|||
assert res_y == 9
|
||||
|
||||
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_while_in_for_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_for_in_if_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -52,11 +50,9 @@ def test_for_after_for_in_if_3():
|
|||
assert res == 64
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_for_in_if_4():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_for_in_while_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -49,11 +47,9 @@ def test_for_after_for_in_while_1():
|
|||
assert res == 0
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_for_in_while_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -80,11 +76,9 @@ def test_for_after_for_in_while_2():
|
|||
assert res_y == 5
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level1
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_for_in_while_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -13,18 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback control flow."""
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor, jit, context
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@case_register.level0
|
||||
@case_register.target_gpu
|
||||
@case_register.target_ascend
|
||||
def test_for_after_for_in_for_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
@ -53,7 +51,7 @@ def test_for_after_for_in_for_2():
|
|||
assert res_y == 6
|
||||
|
||||
|
||||
@pytest.mark.skip(reason='Not support graph fallback feature yet')
|
||||
@case_register.skip(reason='Not support graph fallback feature yet')
|
||||
def test_for_after_for_in_for_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
|
|
|
@ -0,0 +1,97 @@
|
|||
# Copyright 2022 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import pytest
|
||||
from tests.st.fallback.cases_register import case_register
|
||||
from mindspore import context
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_level0_ascend_cases():
|
||||
"""
|
||||
Feature: control flow.
|
||||
Description: Execute all test cases with level0 and with device_target Ascend in one process.
|
||||
Expectation: All cases passed.
|
||||
"""
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
case_register.check_and_run("Ascend", 0)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_level0_gpu_cases():
|
||||
"""
|
||||
Feature: control flow.
|
||||
Description: Execute all test cases with level0 and with device_target GPU in one process.
|
||||
Expectation: All cases passed.
|
||||
"""
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||
case_register.check_and_run("GPU", 0)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_level0_cpu_cases():
|
||||
"""
|
||||
Feature: control flow.
|
||||
Description: Execute all test cases with level0 and with device_target CPU in one process.
|
||||
Expectation: All cases passed.
|
||||
"""
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||
case_register.check_and_run("CPU", 0)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_level1_ascend_cases():
|
||||
"""
|
||||
Feature: control flow.
|
||||
Description: Execute all test cases with level1 and with device_target Ascend in one process.
|
||||
Expectation: All cases passed.
|
||||
"""
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
case_register.check_and_run("Ascend", 1)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_level1_gpu_cases():
|
||||
"""
|
||||
Feature: control flow.
|
||||
Description: Execute all test cases with level1 and with device_target GPU in one process.
|
||||
Expectation: All cases passed.
|
||||
"""
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||
case_register.check_and_run("GPU", 1)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_level1_cpu_cases():
|
||||
"""
|
||||
Feature: control flow.
|
||||
Description: Execute all test cases with level1 and with device_target CPU in one process.
|
||||
Expectation: All cases passed.
|
||||
"""
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||
case_register.check_and_run("CPU", 1)
|
|
@ -19,7 +19,6 @@ import numpy as np
|
|||
import mindspore.nn as nn
|
||||
import mindspore.common.dtype as mstype
|
||||
from mindspore import Tensor, jit, context
|
||||
from mindspore.ops import Primitive
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
@ -84,44 +83,6 @@ def test_np_tensor_list():
|
|||
assert len(tensor_list) == 3
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_list_count():
|
||||
"""
|
||||
Feature: Fallback feature
|
||||
Description: support attr/method of builtin type.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def list_count():
|
||||
x = list([1, 2, 3])
|
||||
res = x.count(1)
|
||||
return res
|
||||
assert list_count() == 1
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_list_append():
|
||||
"""
|
||||
Feature: Fallback feature
|
||||
Description: support attr/method of builtin type.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def list_append():
|
||||
x = list([1, 2, 3])
|
||||
x.append(4)
|
||||
return Tensor(x)
|
||||
assert np.all(list_append().asnumpy() == np.array([1, 2, 3, 4]))
|
||||
|
||||
|
||||
@jit
|
||||
def np_fallback_func_tensor_index(x):
|
||||
array_x = tuple([2, 3, 4, 5])
|
||||
|
@ -148,69 +109,6 @@ def test_np_fallback_func_tensor_index():
|
|||
assert output == output_expect
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_calculate():
|
||||
"""
|
||||
Feature: Fallback feature.
|
||||
Description: Support numpy calculation.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_calculate():
|
||||
x = np.array([3, 1, 2, 4, 5])
|
||||
y = x % 2
|
||||
z = Tensor(y)
|
||||
return z
|
||||
assert np.all(np_calculate().asnumpy() == np.array([1, 1, 0, 0, 1]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_fallback_tensor_array_astype():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test Tensor(array) with astype() in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def foo():
|
||||
me_x = Tensor([1.1, -2.1]).astype("float32")
|
||||
return me_x
|
||||
print(foo())
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_fallback_tuple_with_mindspore_function():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test fallback when local input has tuple with mindspore function type, such as Cell, Primitive.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
def test_isinstance(a, base_type):
|
||||
mro = type(a).mro()
|
||||
for i in base_type:
|
||||
if i in mro:
|
||||
return True
|
||||
return False
|
||||
|
||||
@jit
|
||||
def foo():
|
||||
return test_isinstance(np.array(1), (np.ndarray, nn.Cell, Primitive))
|
||||
|
||||
assert foo()
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
|
|
@ -20,121 +20,9 @@ from mindspore import jit, context, Tensor
|
|||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_linspace():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy with linspace in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_linspace():
|
||||
a = Tensor(np.linspace(1, 10, 10))
|
||||
b = Tensor(np.linspace(1, 1, 10))
|
||||
c = Tensor(np.linspace(10, 20, 5, endpoint=False))
|
||||
d = Tensor(np.linspace(10, 20, 5, endpoint=True))
|
||||
e = Tensor(np.linspace(1, 10, 10).reshape([10, 1]))
|
||||
return a, b, c, d, e
|
||||
a, b, c, d, e = np_linspace()
|
||||
print("a:", a)
|
||||
print("b:", b)
|
||||
print("c:", c)
|
||||
print("d:", d)
|
||||
print("e:", e)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_arange_slice_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy with arange slice in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_arange_slice_1():
|
||||
x = np.arange(10)
|
||||
index = slice(2, 7, 2)
|
||||
a = Tensor(x[index])
|
||||
b = Tensor(x[2:7:2])
|
||||
c = Tensor(x[5])
|
||||
d = Tensor(x[2:])
|
||||
e = Tensor(x[2:5])
|
||||
return a, b, c, d, e
|
||||
a, b, c, d, e = np_arange_slice_1()
|
||||
assert np.all(a.asnumpy() == np.array([2, 4, 6]))
|
||||
assert np.all(b.asnumpy() == np.array([2, 4, 6]))
|
||||
assert np.all(c.asnumpy() == np.array([5]))
|
||||
assert np.all(d.asnumpy() == np.array([2, 3, 4, 5, 6, 7, 8, 9]))
|
||||
assert np.all(e.asnumpy() == np.array([2, 3, 4]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_arange_slice_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy with arange slice in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_arange_slice_2():
|
||||
x = np.array([[1, 2, 3], [3, 4, 5], [4, 5, 6]])
|
||||
a = Tensor(x[1:])
|
||||
b = Tensor(x[..., 1])
|
||||
c = Tensor(x[1, ...])
|
||||
d = Tensor(x[..., 1:])
|
||||
return a, b, c, d
|
||||
a, b, c, d = np_arange_slice_2()
|
||||
assert np.all(a.asnumpy() == np.array([[3, 4, 5], [4, 5, 6]]))
|
||||
assert np.all(b.asnumpy() == np.array([2, 4, 5]))
|
||||
assert np.all(c.asnumpy() == np.array([3, 4, 5]))
|
||||
assert np.all(d.asnumpy() == np.array([[2, 3], [4, 5], [5, 6]]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_array_advanced_index_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy with array advanced index in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_array_advanced_index_1():
|
||||
x = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])
|
||||
a = Tensor(x[[0, 1, 2], [0, 1, 0]])
|
||||
rows = np.array([[0, 0], [3, 3]])
|
||||
cols = np.array([[0, 2], [0, 2]])
|
||||
b = Tensor(x[rows, cols])
|
||||
c = Tensor(x[1:3, 1:3])
|
||||
d = Tensor(x[1:3, [1, 2]])
|
||||
e = Tensor(x[..., 1:])
|
||||
return a, b, c, d, e
|
||||
a, b, c, d, e = np_array_advanced_index_1()
|
||||
assert np.all(a.asnumpy() == np.array([0, 4, 6]))
|
||||
assert np.all(b.asnumpy() == np.array([[0, 2], [9, 11]]))
|
||||
assert np.all(c.asnumpy() == np.array([[4, 5], [7, 8]]))
|
||||
assert np.all(d.asnumpy() == np.array([[4, 5], [7, 8]]))
|
||||
assert np.all(e.asnumpy() == np.array([[1, 2], [4, 5], [7, 8], [10, 11]]))
|
||||
|
||||
|
||||
# Not support <class 'complex'> yet.
|
||||
@pytest.mark.skip(reason='Not support graph fallback feature yet')
|
||||
def test_np_array_advanced_index_2():
|
||||
def test_np_array_advanced_index_complex():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy with array advanced index in graph mode.
|
||||
|
@ -155,108 +43,6 @@ def test_np_array_advanced_index_2():
|
|||
assert np.all(c.asnumpy() == np.array([2. + 6.j, 3.5 + 5.j]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_array_advanced_index_3():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy with array advanced index in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_array_advanced_index_3():
|
||||
x = np.arange(32).reshape((8, 4))
|
||||
a = Tensor(x[[4, 2, 1, 7]])
|
||||
y = np.arange(32).reshape((8, 4))
|
||||
b = Tensor(y[[-4, -2, -1, -7]])
|
||||
z = np.arange(32).reshape((8, 4))
|
||||
c = Tensor(z[np.ix_([1, 5, 7, 2], [0, 3, 1, 2])])
|
||||
return a, b, c
|
||||
a, b, c = np_array_advanced_index_3()
|
||||
print("a:", a)
|
||||
print("b:", b)
|
||||
print("c:", c)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_reshape():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy.reshape() method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_reshape():
|
||||
x = np.arange(8)
|
||||
y = x.reshape(2, 4)
|
||||
return Tensor(y)
|
||||
assert np.all(np_reshape().asnumpy() == np.array([[0, 1, 2, 3], [4, 5, 6, 7]]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_ndarray_flatten():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy.flatten() method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_ndarray_flatten():
|
||||
x = np.arange(8).reshape(2, 4)
|
||||
y = x.flatten()
|
||||
return Tensor(y)
|
||||
assert np.all(np_ndarray_flatten().asnumpy() == np.array([0, 1, 2, 3, 4, 5, 6, 7]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_ravel():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy.ravel() method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_ravel():
|
||||
x = np.arange(8).reshape(2, 4)
|
||||
y = x.ravel(order='F')
|
||||
return Tensor(y)
|
||||
assert np.all(np_ravel().asnumpy() == np.array([0, 4, 1, 5, 2, 6, 3, 7]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_transpose():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy.transpose() method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_transpose():
|
||||
x = np.arange(4).reshape(4, 1)
|
||||
y = np.transpose(x)
|
||||
return Tensor(y)
|
||||
assert np.all(np_transpose().asnumpy() == np.array([0, 1, 2, 3]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -299,475 +85,3 @@ def test_np_swapaxes():
|
|||
return tensor_x[1, 1, 0], tensor_y[1, 1, 0]
|
||||
x, y = np_swapaxes()
|
||||
assert x == 6 and y == 3
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_broadcast():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy.broadcast() method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_broadcast():
|
||||
x = np.array([[1], [2], [3]])
|
||||
y = np.array([4, 5, 6])
|
||||
z = np.broadcast(x, y)
|
||||
return Tensor(z.shape)
|
||||
assert np.all(np_broadcast().asnumpy() == np.array([3, 3]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_broadcast_to():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy.broadcast_to() method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_broadcast_to():
|
||||
x = np.arange(4).reshape(1, 4)
|
||||
y = np.broadcast_to(x, (2, 4))
|
||||
return Tensor(y)
|
||||
assert np.all(np_broadcast_to().asnumpy() == np.array([[0, 1, 2, 3], [0, 1, 2, 3]]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_expand_dims():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy.expand_dims() method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_expand_dims():
|
||||
x = np.array(([1, 2], [3, 4]))
|
||||
y = np.expand_dims(x, axis=0)
|
||||
return Tensor(y)
|
||||
assert np.all(np_expand_dims().asnumpy() == np.array([[[1, 2], [3, 4]]]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_squeeze():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy.squeeze() method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_squeeze():
|
||||
x = np.arange(4).reshape(1, 2, 2)
|
||||
y = np.squeeze(x)
|
||||
return Tensor(y)
|
||||
assert np.all(np_squeeze().asnumpy() == np.array([[0, 1], [2, 3]]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_concat():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_concat():
|
||||
x = np.array([[1, 2], [3, 4]])
|
||||
y = np.array([[5, 6], [7, 8]])
|
||||
concatenate = np.concatenate((x, y))
|
||||
stack = np.stack((x, y), 0)
|
||||
hstack = np.hstack((x, y))
|
||||
vstack = np.vstack((x, y))
|
||||
return Tensor(concatenate), Tensor(stack), Tensor(hstack), Tensor(vstack)
|
||||
|
||||
out_concatenate, out_stack, out_hstack, out_vstack = np_concat()
|
||||
assert np.all(out_concatenate.asnumpy() == np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
|
||||
assert np.all(out_stack.asnumpy() == np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
|
||||
assert np.all(out_hstack.asnumpy() == np.array([[1, 2, 5, 6], [3, 4, 7, 8]]))
|
||||
assert np.all(out_vstack.asnumpy() == np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_split():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy split method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_split():
|
||||
x = np.arange(4).reshape(2, 2)
|
||||
split = np.split(x, 2)
|
||||
hsplit = np.hsplit(x, 2)
|
||||
vsplit = np.vsplit(x, 2)
|
||||
return Tensor(split), Tensor(hsplit), Tensor(vsplit)
|
||||
|
||||
out_split, out_hsplit, out_vsplit = np_split()
|
||||
assert np.all(out_split.asnumpy() == np.array([[[0, 1]], [[2, 3]]]))
|
||||
assert np.all(out_hsplit.asnumpy() == np.array([[[0], [2]], [[1], [3]]]))
|
||||
assert np.all(out_vsplit.asnumpy() == np.array([[[0, 1]], [[2, 3]]]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_element():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_element():
|
||||
resize = np.resize(np.array([[1, 2, 3], [4, 5, 6]]), (3, 2))
|
||||
append = np.append(np.array([[1, 2, 3], [4, 5, 6]]), [[7, 8, 9]], axis=0)
|
||||
insert = np.insert(np.array([[1, 2], [3, 4], [5, 6]]), 3, [7, 8], axis=0)
|
||||
delete = np.delete(np.arange(6).reshape(2, 3), 0, axis=0)
|
||||
unique = np.unique(np.array([5, 2, 6, 2, 7, 5, 6, 8, 2, 9]))
|
||||
return Tensor(resize), Tensor(append), Tensor(insert), Tensor(delete), Tensor(unique)
|
||||
|
||||
out_resize, out_append, out_insert, out_delete, out_unique = np_element()
|
||||
assert np.all(out_resize.asnumpy() == np.array([[1, 2], [3, 4], [5, 6]]))
|
||||
assert np.all(out_append.asnumpy() == np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
|
||||
assert np.all(out_insert.asnumpy() == np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
|
||||
assert np.all(out_delete.asnumpy() == np.array([3, 4, 5]))
|
||||
assert np.all(out_unique.asnumpy() == np.array([2, 5, 6, 7, 8, 9]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_bitwise():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy bitwise method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_bitwise():
|
||||
bitwise_and = np.bitwise_and(13, 17)
|
||||
bitwise_or = np.bitwise_or(13, 17)
|
||||
invert = np.invert(np.array([13], dtype=np.uint8))
|
||||
left_shift = np.left_shift(10, 2)
|
||||
right_shift = np.right_shift(40, 2)
|
||||
return Tensor(bitwise_and), Tensor(bitwise_or), Tensor(invert), Tensor(left_shift), Tensor(right_shift)
|
||||
|
||||
bitwise_and, bitwise_or, invert, left_shift, right_shift = np_bitwise()
|
||||
assert bitwise_and.asnumpy() == 1
|
||||
assert bitwise_or.asnumpy() == 29
|
||||
assert np.all(invert.asnumpy() == np.array([242]))
|
||||
assert left_shift.asnumpy() == 40
|
||||
assert right_shift.asnumpy() == 10
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_char_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy char method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_char():
|
||||
char_add = np.char.add(['MindSpore'], [' fallback'])
|
||||
char_multiply = np.char.multiply('fallback ', 3)
|
||||
char_center = np.char.center('fallback', 10, fillchar='*')
|
||||
char_capitalize = np.char.capitalize('fallback')
|
||||
char_title = np.char.title('fallback')
|
||||
char_lower = np.char.lower('FALLBACK')
|
||||
char_upper = np.char.upper('fallback')
|
||||
return Tensor(char_add), Tensor(char_multiply), Tensor(char_center), Tensor(char_capitalize), \
|
||||
Tensor(char_title), Tensor(char_lower), Tensor(char_upper)
|
||||
|
||||
char_add, char_multiply, char_center, char_capitalize, char_title, char_lower, char_upper = np_char()
|
||||
assert char_add.asnumpy() == 'MindSpore fallback'
|
||||
assert char_multiply.asnumpy() == 'fallback fallback fallback '
|
||||
assert char_center.asnumpy() == '*fallback*'
|
||||
assert char_capitalize.asnumpy() == 'Fallback'
|
||||
assert char_title.asnumpy() == 'Fallback'
|
||||
assert char_lower.asnumpy() == 'fallback'
|
||||
assert char_upper.asnumpy() == 'FALLBACK'
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_char_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy char method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_char():
|
||||
char_split = np.char.split('MindSpore fallback')
|
||||
out_split = np.char.join(' ', char_split)
|
||||
|
||||
char_splitlines = np.char.splitlines('MindSpore\nfallback')
|
||||
out_splitlines = np.char.join(',', char_splitlines)
|
||||
|
||||
out_strip = np.char.strip('abc acd', 'a')
|
||||
out_replace = np.char.replace('faooback', 'oo', 'll')
|
||||
char_encode = np.char.encode('runoob', 'cp500')
|
||||
out_decode = np.char.decode(char_encode, 'cp500')
|
||||
return Tensor(out_split), Tensor(out_splitlines), Tensor(out_strip), Tensor(out_replace), Tensor(out_decode)
|
||||
|
||||
char_split, char_splitlines, char_strip, char_replace, char_decode = np_char()
|
||||
assert char_split.asnumpy() == 'MindSpore fallback'
|
||||
assert char_splitlines.asnumpy() == 'MindSpore,fallback'
|
||||
assert char_strip.asnumpy() == 'bc acd'
|
||||
assert char_replace.asnumpy() == 'fallback'
|
||||
assert char_decode.asnumpy() == 'runoob'
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_degree():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_degree():
|
||||
out_sin = np.sin(30 * np.pi / 180)
|
||||
out_arcsin = np.degrees(np.arcsin(out_sin))
|
||||
out_cos = np.cos(60 * np.pi / 180)
|
||||
out_arccos = np.degrees(np.arccos(out_cos))
|
||||
out_tan = np.tan(45 * np.pi / 180)
|
||||
out_arctan = np.degrees(np.arctan(out_tan))
|
||||
return Tensor(out_sin), Tensor(out_arcsin), Tensor(out_cos), \
|
||||
Tensor(out_arccos), Tensor(out_tan), Tensor(out_arctan)
|
||||
|
||||
out_sin, out_arcsin, out_cos, out_arccos, out_tan, out_arctan = np_degree()
|
||||
assert np.isclose(out_sin.asnumpy(), 0.5)
|
||||
assert np.isclose(out_arcsin.asnumpy(), 30)
|
||||
assert np.isclose(out_cos.asnumpy(), 0.5)
|
||||
assert np.isclose(out_arccos.asnumpy(), 60)
|
||||
assert np.isclose(out_tan.asnumpy(), 1)
|
||||
assert np.isclose(out_arctan.asnumpy(), 45)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_math_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy math method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_math():
|
||||
x = np.array([6, 12])
|
||||
y = np.array([3, 5])
|
||||
out_add = np.add(x, y)
|
||||
out_subtract = np.subtract(x, y)
|
||||
out_multiply = np.multiply(x, y)
|
||||
out_divide = np.divide(x, y)
|
||||
out_mod = np.mod(x, y)
|
||||
out_remainder = np.remainder(x, y)
|
||||
return Tensor(out_add), Tensor(out_subtract), Tensor(out_multiply), \
|
||||
Tensor(out_divide), Tensor(out_mod), Tensor(out_remainder)
|
||||
|
||||
out_add, out_subtract, out_multiply, out_divide, out_mod, out_remainder = np_math()
|
||||
assert np.all(out_add.asnumpy() == np.array([9, 17]))
|
||||
assert np.all(out_subtract.asnumpy() == np.array([3, 7]))
|
||||
assert np.all(out_multiply.asnumpy() == np.array([18, 60]))
|
||||
assert np.allclose(out_divide.asnumpy(), np.array([2, 2.4]))
|
||||
assert np.all(out_mod.asnumpy() == np.array([0, 2]))
|
||||
assert np.all(out_remainder.asnumpy() == np.array([0, 2]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_math_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy math method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_math():
|
||||
x = np.array([0.1, 1.4, 2.51, 3.3])
|
||||
out_around = np.around(x)
|
||||
out_floot = np.floor(x)
|
||||
out_ceil = np.ceil(x)
|
||||
out_reciprocal = np.reciprocal(np.array([0.25, 1, 2]))
|
||||
out_power = np.power(np.array([1.0, 2.0, 3.0]), 2)
|
||||
return Tensor(out_around), Tensor(out_floot), Tensor(out_ceil), Tensor(out_reciprocal), Tensor(out_power)
|
||||
|
||||
out_around, out_floot, out_ceil, out_reciprocal, out_power = np_math()
|
||||
assert np.allclose(out_around.asnumpy(), np.array([0, 1, 3, 3]))
|
||||
assert np.allclose(out_floot.asnumpy(), np.array([0, 1, 2, 3]))
|
||||
assert np.allclose(out_ceil.asnumpy(), np.array([1, 2, 3, 4]))
|
||||
assert np.allclose(out_reciprocal.asnumpy(), np.array([4, 1, 0.5]))
|
||||
assert np.allclose(out_power.asnumpy(), np.array([1, 4, 9]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_statistic():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy statistic method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_statistic():
|
||||
x = np.array([1, 2, 3, 4, 5])
|
||||
out_amin = np.amin(x)
|
||||
out_amax = np.amax(x)
|
||||
out_ptp = np.ptp(x)
|
||||
out_percentile = np.percentile(x, 50)
|
||||
out_median = np.median(x)
|
||||
out_mean = np.mean(x)
|
||||
out_average = np.average(x)
|
||||
out_sqrt = np.std(x)
|
||||
out_var = np.var(x)
|
||||
return Tensor(out_amin), Tensor(out_amax), Tensor(out_ptp), Tensor(out_percentile), \
|
||||
Tensor(out_median), Tensor(out_mean), Tensor(out_average), Tensor(out_sqrt), Tensor(out_var)
|
||||
|
||||
out_amin, out_amax, out_ptp, out_percentile, out_median, out_mean, out_average, out_sqrt, out_var = np_statistic()
|
||||
assert out_amin.asnumpy() == 1
|
||||
assert out_amax.asnumpy() == 5
|
||||
assert out_ptp.asnumpy() == 4
|
||||
assert np.isclose(out_percentile.asnumpy(), 3.0)
|
||||
assert out_median.asnumpy() == 3
|
||||
assert out_mean.asnumpy() == 3
|
||||
assert out_average.asnumpy() == 3
|
||||
assert np.allclose(out_sqrt.asnumpy(), np.array([1.41421356]))
|
||||
assert np.isclose(out_var.asnumpy(), 2.0)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_sort():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_sort():
|
||||
x = np.array([3, 1, 2, 4, 5])
|
||||
out_sort = np.sort(x)
|
||||
out_argsort = np.argsort(x)
|
||||
out_argmax = np.argmax(x)
|
||||
out_argmin = np.argmin(x)
|
||||
out_nonzero = np.nonzero(x)
|
||||
out_where = np.where(x > 4)
|
||||
return Tensor(out_sort), Tensor(out_argsort), Tensor(out_argmax), \
|
||||
Tensor(out_argmin), Tensor(out_nonzero), Tensor(out_where)
|
||||
|
||||
out_sort, out_argsort, out_argmax, out_argmin, out_nonzero, out_where = np_sort()
|
||||
assert np.all(out_sort.asnumpy() == np.array([1, 2, 3, 4, 5]))
|
||||
assert np.all(out_argsort.asnumpy() == np.array([1, 2, 0, 3, 4]))
|
||||
assert out_argmax.asnumpy() == 4
|
||||
assert out_argmin.asnumpy() == 1
|
||||
assert np.all(out_nonzero.asnumpy() == np.array([0, 1, 2, 3, 4]))
|
||||
assert np.all(out_where.asnumpy() == np.array([4]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_extract():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy extract method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_extract():
|
||||
x = np.array([3, 1, 2, 4, 5])
|
||||
condition = x % 2 == 0
|
||||
out_extract = np.extract(condition, x)
|
||||
return Tensor(out_extract)
|
||||
|
||||
out_extract = np_extract()
|
||||
assert np.all(out_extract.asnumpy() == np.array([2, 4]))
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_np_matrix():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy matrix method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_matrix():
|
||||
x = np.arange(4).reshape(2, 2)
|
||||
y = np.array([[2, 2], [3, 3]])
|
||||
out_t = x.T
|
||||
out_dot = np.dot(x, y)
|
||||
out_vdot = np.vdot(x, y)
|
||||
out_inner = np.inner(x, y)
|
||||
out_matmul = np.matmul(x, y)
|
||||
out_det = np.linalg.det(x)
|
||||
out_inv = np.linalg.inv(x)
|
||||
return Tensor(out_t), Tensor(out_dot), Tensor(out_vdot), Tensor(out_inner), \
|
||||
Tensor(out_matmul), Tensor(out_det), Tensor(out_inv)
|
||||
|
||||
out_t, out_dot, out_vdot, out_inner, out_matmul, out_det, out_inv = np_matrix()
|
||||
assert np.all(out_t.asnumpy() == np.array([[0, 2], [1, 3]]))
|
||||
assert np.all(out_dot.asnumpy() == np.array([[3, 3], [13, 13]]))
|
||||
assert out_vdot.asnumpy() == 17
|
||||
assert np.all(out_inner.asnumpy() == np.array([[2, 3], [10, 15]]))
|
||||
assert np.all(out_matmul.asnumpy() == np.array([[3, 3], [13, 13]]))
|
||||
assert np.isclose(out_det.asnumpy(), -2.0)
|
||||
assert np.allclose(out_inv.asnumpy(), np.array([[-1.5, 0.5], [1, 0]]))
|
||||
|
|
|
@ -0,0 +1,576 @@
|
|||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test graph fallback """
|
||||
import numpy as np
|
||||
from mindspore import jit, context, Tensor
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
def test_np_linspace():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy with linspace in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_linspace():
|
||||
a = Tensor(np.linspace(1, 10, 10))
|
||||
b = Tensor(np.linspace(1, 1, 10))
|
||||
c = Tensor(np.linspace(10, 20, 5, endpoint=False))
|
||||
d = Tensor(np.linspace(10, 20, 5, endpoint=True))
|
||||
e = Tensor(np.linspace(1, 10, 10).reshape([10, 1]))
|
||||
return a, b, c, d, e
|
||||
a, b, c, d, e = np_linspace()
|
||||
print("a:", a)
|
||||
print("b:", b)
|
||||
print("c:", c)
|
||||
print("d:", d)
|
||||
print("e:", e)
|
||||
|
||||
|
||||
def test_np_arange_slice_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy with arange slice in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_arange_slice_1():
|
||||
x = np.arange(10)
|
||||
index = slice(2, 7, 2)
|
||||
a = Tensor(x[index])
|
||||
b = Tensor(x[2:7:2])
|
||||
c = Tensor(x[5])
|
||||
d = Tensor(x[2:])
|
||||
e = Tensor(x[2:5])
|
||||
return a, b, c, d, e
|
||||
a, b, c, d, e = np_arange_slice_1()
|
||||
assert np.all(a.asnumpy() == np.array([2, 4, 6]))
|
||||
assert np.all(b.asnumpy() == np.array([2, 4, 6]))
|
||||
assert np.all(c.asnumpy() == np.array([5]))
|
||||
assert np.all(d.asnumpy() == np.array([2, 3, 4, 5, 6, 7, 8, 9]))
|
||||
assert np.all(e.asnumpy() == np.array([2, 3, 4]))
|
||||
|
||||
|
||||
def test_np_arange_slice_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy with arange slice in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_arange_slice_2():
|
||||
x = np.array([[1, 2, 3], [3, 4, 5], [4, 5, 6]])
|
||||
a = Tensor(x[1:])
|
||||
b = Tensor(x[..., 1])
|
||||
c = Tensor(x[1, ...])
|
||||
d = Tensor(x[..., 1:])
|
||||
return a, b, c, d
|
||||
a, b, c, d = np_arange_slice_2()
|
||||
assert np.all(a.asnumpy() == np.array([[3, 4, 5], [4, 5, 6]]))
|
||||
assert np.all(b.asnumpy() == np.array([2, 4, 5]))
|
||||
assert np.all(c.asnumpy() == np.array([3, 4, 5]))
|
||||
assert np.all(d.asnumpy() == np.array([[2, 3], [4, 5], [5, 6]]))
|
||||
|
||||
|
||||
def test_np_array_advanced_index():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy with array advanced index in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_array_advanced_index():
|
||||
x = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])
|
||||
a = Tensor(x[[0, 1, 2], [0, 1, 0]])
|
||||
rows = np.array([[0, 0], [3, 3]])
|
||||
cols = np.array([[0, 2], [0, 2]])
|
||||
b = Tensor(x[rows, cols])
|
||||
c = Tensor(x[1:3, 1:3])
|
||||
d = Tensor(x[1:3, [1, 2]])
|
||||
e = Tensor(x[..., 1:])
|
||||
return a, b, c, d, e
|
||||
a, b, c, d, e = np_array_advanced_index()
|
||||
assert np.all(a.asnumpy() == np.array([0, 4, 6]))
|
||||
assert np.all(b.asnumpy() == np.array([[0, 2], [9, 11]]))
|
||||
assert np.all(c.asnumpy() == np.array([[4, 5], [7, 8]]))
|
||||
assert np.all(d.asnumpy() == np.array([[4, 5], [7, 8]]))
|
||||
assert np.all(e.asnumpy() == np.array([[1, 2], [4, 5], [7, 8], [10, 11]]))
|
||||
|
||||
|
||||
def test_np_array_advanced_index_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy with array advanced index in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_array_advanced_index():
|
||||
x = np.arange(32).reshape((8, 4))
|
||||
a = Tensor(x[[4, 2, 1, 7]])
|
||||
y = np.arange(32).reshape((8, 4))
|
||||
b = Tensor(y[[-4, -2, -1, -7]])
|
||||
z = np.arange(32).reshape((8, 4))
|
||||
c = Tensor(z[np.ix_([1, 5, 7, 2], [0, 3, 1, 2])])
|
||||
return a, b, c
|
||||
a, b, c = np_array_advanced_index()
|
||||
print("a:", a)
|
||||
print("b:", b)
|
||||
print("c:", c)
|
||||
|
||||
|
||||
def test_np_reshape():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy.reshape() method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_reshape():
|
||||
x = np.arange(8)
|
||||
y = x.reshape(2, 4)
|
||||
return Tensor(y)
|
||||
assert np.all(np_reshape().asnumpy() == np.array([[0, 1, 2, 3], [4, 5, 6, 7]]))
|
||||
|
||||
|
||||
def test_np_ndarray_flatten():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy.flatten() method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_ndarray_flatten():
|
||||
x = np.arange(8).reshape(2, 4)
|
||||
y = x.flatten()
|
||||
return Tensor(y)
|
||||
assert np.all(np_ndarray_flatten().asnumpy() == np.array([0, 1, 2, 3, 4, 5, 6, 7]))
|
||||
|
||||
|
||||
def test_np_ravel():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy.ravel() method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_ravel():
|
||||
x = np.arange(8).reshape(2, 4)
|
||||
y = x.ravel(order='F')
|
||||
return Tensor(y)
|
||||
assert np.all(np_ravel().asnumpy() == np.array([0, 4, 1, 5, 2, 6, 3, 7]))
|
||||
|
||||
|
||||
def test_np_transpose():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy.transpose() method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_transpose():
|
||||
x = np.arange(4).reshape(4, 1)
|
||||
y = np.transpose(x)
|
||||
return Tensor(y)
|
||||
assert np.all(np_transpose().asnumpy() == np.array([0, 1, 2, 3]))
|
||||
|
||||
|
||||
|
||||
def test_np_broadcast():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy.broadcast() method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_broadcast():
|
||||
x = np.array([[1], [2], [3]])
|
||||
y = np.array([4, 5, 6])
|
||||
z = np.broadcast(x, y)
|
||||
return Tensor(z.shape)
|
||||
assert np.all(np_broadcast().asnumpy() == np.array([3, 3]))
|
||||
|
||||
|
||||
def test_np_broadcast_to():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy.broadcast_to() method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_broadcast_to():
|
||||
x = np.arange(4).reshape(1, 4)
|
||||
y = np.broadcast_to(x, (2, 4))
|
||||
return Tensor(y)
|
||||
assert np.all(np_broadcast_to().asnumpy() == np.array([[0, 1, 2, 3], [0, 1, 2, 3]]))
|
||||
|
||||
|
||||
def test_np_expand_dims():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy.expand_dims() method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_expand_dims():
|
||||
x = np.array(([1, 2], [3, 4]))
|
||||
y = np.expand_dims(x, axis=0)
|
||||
return Tensor(y)
|
||||
assert np.all(np_expand_dims().asnumpy() == np.array([[[1, 2], [3, 4]]]))
|
||||
|
||||
|
||||
def test_np_squeeze():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy.squeeze() method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_squeeze():
|
||||
x = np.arange(4).reshape(1, 2, 2)
|
||||
y = np.squeeze(x)
|
||||
return Tensor(y)
|
||||
assert np.all(np_squeeze().asnumpy() == np.array([[0, 1], [2, 3]]))
|
||||
|
||||
|
||||
def test_np_concat():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_concat():
|
||||
x = np.array([[1, 2], [3, 4]])
|
||||
y = np.array([[5, 6], [7, 8]])
|
||||
concatenate = np.concatenate((x, y))
|
||||
stack = np.stack((x, y), 0)
|
||||
hstack = np.hstack((x, y))
|
||||
vstack = np.vstack((x, y))
|
||||
return Tensor(concatenate), Tensor(stack), Tensor(hstack), Tensor(vstack)
|
||||
|
||||
out_concatenate, out_stack, out_hstack, out_vstack = np_concat()
|
||||
assert np.all(out_concatenate.asnumpy() == np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
|
||||
assert np.all(out_stack.asnumpy() == np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
|
||||
assert np.all(out_hstack.asnumpy() == np.array([[1, 2, 5, 6], [3, 4, 7, 8]]))
|
||||
assert np.all(out_vstack.asnumpy() == np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
|
||||
|
||||
|
||||
def test_np_split():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy split method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_split():
|
||||
x = np.arange(4).reshape(2, 2)
|
||||
split = np.split(x, 2)
|
||||
hsplit = np.hsplit(x, 2)
|
||||
vsplit = np.vsplit(x, 2)
|
||||
return Tensor(split), Tensor(hsplit), Tensor(vsplit)
|
||||
|
||||
out_split, out_hsplit, out_vsplit = np_split()
|
||||
assert np.all(out_split.asnumpy() == np.array([[[0, 1]], [[2, 3]]]))
|
||||
assert np.all(out_hsplit.asnumpy() == np.array([[[0], [2]], [[1], [3]]]))
|
||||
assert np.all(out_vsplit.asnumpy() == np.array([[[0, 1]], [[2, 3]]]))
|
||||
|
||||
|
||||
def test_np_element():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_element():
|
||||
resize = np.resize(np.array([[1, 2, 3], [4, 5, 6]]), (3, 2))
|
||||
append = np.append(np.array([[1, 2, 3], [4, 5, 6]]), [[7, 8, 9]], axis=0)
|
||||
insert = np.insert(np.array([[1, 2], [3, 4], [5, 6]]), 3, [7, 8], axis=0)
|
||||
delete = np.delete(np.arange(6).reshape(2, 3), 0, axis=0)
|
||||
unique = np.unique(np.array([5, 2, 6, 2, 7, 5, 6, 8, 2, 9]))
|
||||
return Tensor(resize), Tensor(append), Tensor(insert), Tensor(delete), Tensor(unique)
|
||||
|
||||
out_resize, out_append, out_insert, out_delete, out_unique = np_element()
|
||||
assert np.all(out_resize.asnumpy() == np.array([[1, 2], [3, 4], [5, 6]]))
|
||||
assert np.all(out_append.asnumpy() == np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
|
||||
assert np.all(out_insert.asnumpy() == np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
|
||||
assert np.all(out_delete.asnumpy() == np.array([3, 4, 5]))
|
||||
assert np.all(out_unique.asnumpy() == np.array([2, 5, 6, 7, 8, 9]))
|
||||
|
||||
|
||||
def test_np_bitwise():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy bitwise method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_bitwise():
|
||||
bitwise_and = np.bitwise_and(13, 17)
|
||||
bitwise_or = np.bitwise_or(13, 17)
|
||||
invert = np.invert(np.array([13], dtype=np.uint8))
|
||||
left_shift = np.left_shift(10, 2)
|
||||
right_shift = np.right_shift(40, 2)
|
||||
return Tensor(bitwise_and), Tensor(bitwise_or), Tensor(invert), Tensor(left_shift), Tensor(right_shift)
|
||||
|
||||
bitwise_and, bitwise_or, invert, left_shift, right_shift = np_bitwise()
|
||||
assert bitwise_and.asnumpy() == 1
|
||||
assert bitwise_or.asnumpy() == 29
|
||||
assert np.all(invert.asnumpy() == np.array([242]))
|
||||
assert left_shift.asnumpy() == 40
|
||||
assert right_shift.asnumpy() == 10
|
||||
|
||||
|
||||
def test_np_char_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy char method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_char():
|
||||
char_add = np.char.add(['MindSpore'], [' fallback'])
|
||||
char_multiply = np.char.multiply('fallback ', 3)
|
||||
char_center = np.char.center('fallback', 10, fillchar='*')
|
||||
char_capitalize = np.char.capitalize('fallback')
|
||||
char_title = np.char.title('fallback')
|
||||
char_lower = np.char.lower('FALLBACK')
|
||||
char_upper = np.char.upper('fallback')
|
||||
return Tensor(char_add), Tensor(char_multiply), Tensor(char_center), Tensor(char_capitalize), \
|
||||
Tensor(char_title), Tensor(char_lower), Tensor(char_upper)
|
||||
|
||||
char_add, char_multiply, char_center, char_capitalize, char_title, char_lower, char_upper = np_char()
|
||||
assert char_add.asnumpy() == 'MindSpore fallback'
|
||||
assert char_multiply.asnumpy() == 'fallback fallback fallback '
|
||||
assert char_center.asnumpy() == '*fallback*'
|
||||
assert char_capitalize.asnumpy() == 'Fallback'
|
||||
assert char_title.asnumpy() == 'Fallback'
|
||||
assert char_lower.asnumpy() == 'fallback'
|
||||
assert char_upper.asnumpy() == 'FALLBACK'
|
||||
|
||||
|
||||
def test_np_char_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy char method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_char():
|
||||
char_split = np.char.split('MindSpore fallback')
|
||||
out_split = np.char.join(' ', char_split)
|
||||
|
||||
char_splitlines = np.char.splitlines('MindSpore\nfallback')
|
||||
out_splitlines = np.char.join(',', char_splitlines)
|
||||
|
||||
out_strip = np.char.strip('abc acd', 'a')
|
||||
out_replace = np.char.replace('faooback', 'oo', 'll')
|
||||
char_encode = np.char.encode('runoob', 'cp500')
|
||||
out_decode = np.char.decode(char_encode, 'cp500')
|
||||
return Tensor(out_split), Tensor(out_splitlines), Tensor(out_strip), Tensor(out_replace), Tensor(out_decode)
|
||||
|
||||
char_split, char_splitlines, char_strip, char_replace, char_decode = np_char()
|
||||
assert char_split.asnumpy() == 'MindSpore fallback'
|
||||
assert char_splitlines.asnumpy() == 'MindSpore,fallback'
|
||||
assert char_strip.asnumpy() == 'bc acd'
|
||||
assert char_replace.asnumpy() == 'fallback'
|
||||
assert char_decode.asnumpy() == 'runoob'
|
||||
|
||||
|
||||
def test_np_degree():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_degree():
|
||||
out_sin = np.sin(30 * np.pi / 180)
|
||||
out_arcsin = np.degrees(np.arcsin(out_sin))
|
||||
out_cos = np.cos(60 * np.pi / 180)
|
||||
out_arccos = np.degrees(np.arccos(out_cos))
|
||||
out_tan = np.tan(45 * np.pi / 180)
|
||||
out_arctan = np.degrees(np.arctan(out_tan))
|
||||
return Tensor(out_sin), Tensor(out_arcsin), Tensor(out_cos), \
|
||||
Tensor(out_arccos), Tensor(out_tan), Tensor(out_arctan)
|
||||
|
||||
out_sin, out_arcsin, out_cos, out_arccos, out_tan, out_arctan = np_degree()
|
||||
assert np.isclose(out_sin.asnumpy(), 0.5)
|
||||
assert np.isclose(out_arcsin.asnumpy(), 30)
|
||||
assert np.isclose(out_cos.asnumpy(), 0.5)
|
||||
assert np.isclose(out_arccos.asnumpy(), 60)
|
||||
assert np.isclose(out_tan.asnumpy(), 1)
|
||||
assert np.isclose(out_arctan.asnumpy(), 45)
|
||||
|
||||
|
||||
def test_np_math_1():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy math method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_math():
|
||||
x = np.array([6, 12])
|
||||
y = np.array([3, 5])
|
||||
out_add = np.add(x, y)
|
||||
out_subtract = np.subtract(x, y)
|
||||
out_multiply = np.multiply(x, y)
|
||||
out_divide = np.divide(x, y)
|
||||
out_mod = np.mod(x, y)
|
||||
out_remainder = np.remainder(x, y)
|
||||
return Tensor(out_add), Tensor(out_subtract), Tensor(out_multiply), \
|
||||
Tensor(out_divide), Tensor(out_mod), Tensor(out_remainder)
|
||||
|
||||
out_add, out_subtract, out_multiply, out_divide, out_mod, out_remainder = np_math()
|
||||
assert np.all(out_add.asnumpy() == np.array([9, 17]))
|
||||
assert np.all(out_subtract.asnumpy() == np.array([3, 7]))
|
||||
assert np.all(out_multiply.asnumpy() == np.array([18, 60]))
|
||||
assert np.allclose(out_divide.asnumpy(), np.array([2, 2.4]))
|
||||
assert np.all(out_mod.asnumpy() == np.array([0, 2]))
|
||||
assert np.all(out_remainder.asnumpy() == np.array([0, 2]))
|
||||
|
||||
|
||||
def test_np_math_2():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy math method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_math():
|
||||
x = np.array([0.1, 1.4, 2.51, 3.3])
|
||||
out_around = np.around(x)
|
||||
out_floot = np.floor(x)
|
||||
out_ceil = np.ceil(x)
|
||||
out_reciprocal = np.reciprocal(np.array([0.25, 1, 2]))
|
||||
out_power = np.power(np.array([1.0, 2.0, 3.0]), 2)
|
||||
return Tensor(out_around), Tensor(out_floot), Tensor(out_ceil), Tensor(out_reciprocal), Tensor(out_power)
|
||||
|
||||
out_around, out_floot, out_ceil, out_reciprocal, out_power = np_math()
|
||||
assert np.allclose(out_around.asnumpy(), np.array([0, 1, 3, 3]))
|
||||
assert np.allclose(out_floot.asnumpy(), np.array([0, 1, 2, 3]))
|
||||
assert np.allclose(out_ceil.asnumpy(), np.array([1, 2, 3, 4]))
|
||||
assert np.allclose(out_reciprocal.asnumpy(), np.array([4, 1, 0.5]))
|
||||
assert np.allclose(out_power.asnumpy(), np.array([1, 4, 9]))
|
||||
|
||||
|
||||
def test_np_statistic():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy statistic method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_statistic():
|
||||
x = np.array([1, 2, 3, 4, 5])
|
||||
out_amin = np.amin(x)
|
||||
out_amax = np.amax(x)
|
||||
out_ptp = np.ptp(x)
|
||||
out_percentile = np.percentile(x, 50)
|
||||
out_median = np.median(x)
|
||||
out_mean = np.mean(x)
|
||||
out_average = np.average(x)
|
||||
out_sqrt = np.std(x)
|
||||
out_var = np.var(x)
|
||||
return Tensor(out_amin), Tensor(out_amax), Tensor(out_ptp), Tensor(out_percentile), \
|
||||
Tensor(out_median), Tensor(out_mean), Tensor(out_average), Tensor(out_sqrt), Tensor(out_var)
|
||||
|
||||
out_amin, out_amax, out_ptp, out_percentile, out_median, out_mean, out_average, out_sqrt, out_var = np_statistic()
|
||||
assert out_amin.asnumpy() == 1
|
||||
assert out_amax.asnumpy() == 5
|
||||
assert out_ptp.asnumpy() == 4
|
||||
assert np.isclose(out_percentile.asnumpy(), 3.0)
|
||||
assert out_median.asnumpy() == 3
|
||||
assert out_mean.asnumpy() == 3
|
||||
assert out_average.asnumpy() == 3
|
||||
assert np.allclose(out_sqrt.asnumpy(), np.array([1.41421356]))
|
||||
assert np.isclose(out_var.asnumpy(), 2.0)
|
||||
|
||||
|
||||
def test_np_sort():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_sort():
|
||||
x = np.array([3, 1, 2, 4, 5])
|
||||
out_sort = np.sort(x)
|
||||
out_argsort = np.argsort(x)
|
||||
out_argmax = np.argmax(x)
|
||||
out_argmin = np.argmin(x)
|
||||
out_nonzero = np.nonzero(x)
|
||||
out_where = np.where(x > 4)
|
||||
return Tensor(out_sort), Tensor(out_argsort), Tensor(out_argmax), \
|
||||
Tensor(out_argmin), Tensor(out_nonzero), Tensor(out_where)
|
||||
|
||||
out_sort, out_argsort, out_argmax, out_argmin, out_nonzero, out_where = np_sort()
|
||||
assert np.all(out_sort.asnumpy() == np.array([1, 2, 3, 4, 5]))
|
||||
assert np.all(out_argsort.asnumpy() == np.array([1, 2, 0, 3, 4]))
|
||||
assert out_argmax.asnumpy() == 4
|
||||
assert out_argmin.asnumpy() == 1
|
||||
assert np.all(out_nonzero.asnumpy() == np.array([0, 1, 2, 3, 4]))
|
||||
assert np.all(out_where.asnumpy() == np.array([4]))
|
||||
|
||||
|
||||
def test_np_extract():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy extract method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_extract():
|
||||
x = np.array([3, 1, 2, 4, 5])
|
||||
condition = x % 2 == 0
|
||||
out_extract = np.extract(condition, x)
|
||||
return Tensor(out_extract)
|
||||
|
||||
out_extract = np_extract()
|
||||
assert np.all(out_extract.asnumpy() == np.array([2, 4]))
|
||||
|
||||
|
||||
def test_np_matrix():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test numpy matrix method in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_matrix():
|
||||
x = np.arange(4).reshape(2, 2)
|
||||
y = np.array([[2, 2], [3, 3]])
|
||||
out_t = x.T
|
||||
out_dot = np.dot(x, y)
|
||||
out_vdot = np.vdot(x, y)
|
||||
out_inner = np.inner(x, y)
|
||||
out_matmul = np.matmul(x, y)
|
||||
out_det = np.linalg.det(x)
|
||||
out_inv = np.linalg.inv(x)
|
||||
return Tensor(out_t), Tensor(out_dot), Tensor(out_vdot), Tensor(out_inner), \
|
||||
Tensor(out_matmul), Tensor(out_det), Tensor(out_inv)
|
||||
|
||||
out_t, out_dot, out_vdot, out_inner, out_matmul, out_det, out_inv = np_matrix()
|
||||
assert np.all(out_t.asnumpy() == np.array([[0, 2], [1, 3]]))
|
||||
assert np.all(out_dot.asnumpy() == np.array([[3, 3], [13, 13]]))
|
||||
assert out_vdot.asnumpy() == 17
|
||||
assert np.all(out_inner.asnumpy() == np.array([[2, 3], [10, 15]]))
|
||||
assert np.all(out_matmul.asnumpy() == np.array([[3, 3], [13, 13]]))
|
||||
assert np.isclose(out_det.asnumpy(), -2.0)
|
||||
assert np.allclose(out_inv.asnumpy(), np.array([[-1.5, 0.5], [1, 0]]))
|
|
@ -16,7 +16,8 @@
|
|||
import pytest
|
||||
import numpy as np
|
||||
import mindspore.nn as nn
|
||||
from mindspore import context, Tensor
|
||||
from mindspore import context, Tensor, jit
|
||||
from mindspore.ops import Primitive
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
@ -163,6 +164,34 @@ def test_parse_slice():
|
|||
assert out[0].asnumpy() == 11 and out[1].asnumpy() == 22
|
||||
|
||||
|
||||
def test_list_count():
|
||||
"""
|
||||
Feature: Fallback feature
|
||||
Description: support attr/method of builtin type.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def list_count():
|
||||
x = list([1, 2, 3])
|
||||
res = x.count(1)
|
||||
return res
|
||||
assert list_count() == 1
|
||||
|
||||
|
||||
def test_list_append():
|
||||
"""
|
||||
Feature: Fallback feature
|
||||
Description: support attr/method of builtin type.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def list_append():
|
||||
x = list([1, 2, 3])
|
||||
x.append(4)
|
||||
return Tensor(x)
|
||||
assert np.all(list_append().asnumpy() == np.array([1, 2, 3, 4]))
|
||||
|
||||
|
||||
@pytest.mark.skip(reason='Not support graph fallback feature yet')
|
||||
def test_parse_subscript():
|
||||
"""
|
||||
|
@ -256,3 +285,36 @@ def test_parse_ifexpr():
|
|||
net = Network()
|
||||
out = net()
|
||||
assert out == 0
|
||||
|
||||
|
||||
def test_fallback_tensor_array_astype():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test Tensor(array) with astype() in graph mode.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def foo():
|
||||
me_x = Tensor([1.1, -2.1]).astype("float32")
|
||||
return me_x
|
||||
print(foo())
|
||||
|
||||
|
||||
def test_fallback_tuple_with_mindspore_function():
|
||||
"""
|
||||
Feature: JIT Fallback
|
||||
Description: Test fallback when local input has tuple with mindspore function type, such as Cell, Primitive.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
def test_isinstance(a, base_type):
|
||||
mro = type(a).mro()
|
||||
for i in base_type:
|
||||
if i in mro:
|
||||
return True
|
||||
return False
|
||||
|
||||
@jit
|
||||
def foo():
|
||||
return test_isinstance(np.array(1), (np.ndarray, nn.Cell, Primitive))
|
||||
|
||||
assert foo()
|
||||
|
|
|
@ -15,8 +15,7 @@
|
|||
""" test_parse_numpy """
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import nn
|
||||
from mindspore import context
|
||||
from mindspore import nn, context, jit, Tensor
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
@ -67,3 +66,18 @@ def test_use_numpy_module():
|
|||
with pytest.raises(RuntimeError) as err:
|
||||
net()
|
||||
assert "Should not use Python object in runtime" in str(err.value)
|
||||
|
||||
|
||||
def test_np_calculate():
|
||||
"""
|
||||
Feature: Fallback feature.
|
||||
Description: Support numpy calculation.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
@jit
|
||||
def np_calculate():
|
||||
x = np.array([3, 1, 2, 4, 5])
|
||||
y = x % 2
|
||||
z = Tensor(y)
|
||||
return z
|
||||
assert np.all(np_calculate().asnumpy() == np.array([1, 1, 0, 0, 1]))
|
||||
|
|
Loading…
Reference in New Issue