forked from mindspore-Ecosystem/mindspore
Add prim name to error message for nn_ops.py
This commit is contained in:
parent
475f62f680
commit
6dd72f654a
|
@ -117,10 +117,12 @@ class Validator:
|
|||
"""Integer value judgment."""
|
||||
rel_fn = Rel.get_fns(rel)
|
||||
type_mismatch = not isinstance(arg_value, int) or isinstance(arg_value, bool)
|
||||
excp_cls = TypeError if type_mismatch else ValueError
|
||||
if type_mismatch or not rel_fn(arg_value, value):
|
||||
rel_str = Rel.get_strs(rel).format(value)
|
||||
msg_prefix = f'For \'{prim_name}\' the' if prim_name else "The"
|
||||
raise ValueError(f'{msg_prefix} `{arg_name}` should be an int and must {rel_str}, but got {arg_value}.')
|
||||
raise excp_cls(f'{msg_prefix} `{arg_name}` should be an int and must {rel_str}, but got `{arg_value}`'
|
||||
f' with type `{type(arg_value).__name__}`.')
|
||||
return arg_value
|
||||
|
||||
@staticmethod
|
||||
|
@ -137,10 +139,11 @@ class Validator:
|
|||
"""Method for checking whether an int value is in some range."""
|
||||
rel_fn = Rel.get_fns(rel)
|
||||
type_mismatch = not isinstance(arg_value, int)
|
||||
excp_cls = TypeError if type_mismatch else ValueError
|
||||
if type_mismatch or not rel_fn(arg_value, lower_limit, upper_limit):
|
||||
rel_str = Rel.get_strs(rel).format(lower_limit, upper_limit)
|
||||
raise ValueError(f'For \'{prim_name}\' the `{arg_name}` should be an int in range {rel_str},'
|
||||
f' but got {arg_value}.')
|
||||
raise excp_cls(f'For \'{prim_name}\' the `{arg_name}` should be an int in range {rel_str},'
|
||||
f' but got `{arg_value}` with type `{type(arg_value).__name__}`.')
|
||||
return arg_value
|
||||
|
||||
@staticmethod
|
||||
|
@ -192,19 +195,23 @@ class Validator:
|
|||
|
||||
@staticmethod
|
||||
def check_const_input(arg_name, arg_value, prim_name):
|
||||
"""Check valid value."""
|
||||
"""Checks valid value."""
|
||||
if arg_value is None:
|
||||
raise ValueError(f'For \'{prim_name}\' the `{arg_name}` must be a const input, but got {arg_value}.')
|
||||
|
||||
@staticmethod
|
||||
def check_scalar_type_same(args, valid_values, prim_name):
|
||||
"""check whether the types of inputs are the same."""
|
||||
def check_type_same(args, valid_values, prim_name):
|
||||
"""Checks whether the types of inputs are the same."""
|
||||
def _check_tensor_type(arg):
|
||||
arg_key, arg_val = arg
|
||||
elem_type = arg_val
|
||||
type_names = []
|
||||
if not elem_type in valid_values:
|
||||
raise TypeError(f'For \'{prim_name}\' type of `{arg_key}` should be in {valid_values},'
|
||||
f' but `{arg_key}` is {elem_type}.')
|
||||
for t in valid_values:
|
||||
type_names.append(str(t))
|
||||
types_info = '[' + ", ".join(type_names) + ']'
|
||||
raise TypeError(f'For \'{prim_name}\' type of `{arg_key}` should be in {types_info},'
|
||||
f' but got {elem_type}.')
|
||||
return (arg_key, elem_type)
|
||||
|
||||
def _check_types_same(arg1, arg2):
|
||||
|
@ -212,7 +219,7 @@ class Validator:
|
|||
arg2_name, arg2_type = arg2
|
||||
if arg1_type != arg2_type:
|
||||
raise TypeError(f'For \'{prim_name}\' type of `{arg2_name}` should be same as `{arg1_name}`,'
|
||||
f' but `{arg1_name}` is {arg1_type} and `{arg2_name}` is {arg2_type}.')
|
||||
f' but `{arg1_name}` with type {arg1_type} and `{arg2_name}` with type {arg2_type}.')
|
||||
return arg1
|
||||
|
||||
elem_types = map(_check_tensor_type, args.items())
|
||||
|
@ -221,25 +228,8 @@ class Validator:
|
|||
@staticmethod
|
||||
def check_tensor_type_same(args, valid_values, prim_name):
|
||||
"""Checks whether the element types of input tensors are the same."""
|
||||
def _check_tensor_type(arg):
|
||||
arg_key, arg_val = arg
|
||||
Validator.check_subclass(arg_key, arg_val, mstype.tensor, prim_name)
|
||||
elem_type = arg_val.element_type()
|
||||
if not elem_type in valid_values:
|
||||
raise TypeError(f'For \'{prim_name}\' element type of `{arg_key}` should be in {valid_values},'
|
||||
f' but element type of `{arg_key}` is {elem_type}.')
|
||||
return (arg_key, elem_type)
|
||||
|
||||
def _check_types_same(arg1, arg2):
|
||||
arg1_name, arg1_type = arg1
|
||||
arg2_name, arg2_type = arg2
|
||||
if arg1_type != arg2_type:
|
||||
raise TypeError(f'For \'{prim_name}\' element type of `{arg2_name}` should be same as `{arg1_name}`,'
|
||||
f' but `{arg1_name}` is {arg1_type} and `{arg2_name}` is {arg2_type}.')
|
||||
return arg1
|
||||
|
||||
elem_types = map(_check_tensor_type, args.items())
|
||||
reduce(_check_types_same, elem_types)
|
||||
tensor_types = [mstype.tensor_type(t) for t in valid_values]
|
||||
Validator.check_type_same(args, tensor_types, prim_name)
|
||||
|
||||
@staticmethod
|
||||
def check_scalar_or_tensor_type_same(args, valid_values, prim_name, allow_mix=False):
|
||||
|
|
|
@ -34,7 +34,7 @@ GRAPH_MODE = 0
|
|||
PYNATIVE_MODE = 1
|
||||
|
||||
|
||||
def _make_directory(path: str):
|
||||
def _make_directory(path):
|
||||
"""Make directory."""
|
||||
real_path = None
|
||||
if path is None or not isinstance(path, str) or path.strip() == "":
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -41,7 +41,7 @@ class TestInputs:
|
|||
dr.piecewise_constant_lr(milestone1, learning_rates)
|
||||
|
||||
milestone2 = [1.0, 2.0, True]
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(TypeError):
|
||||
dr.piecewise_constant_lr(milestone2, learning_rates)
|
||||
|
||||
def test_learning_rates1(self):
|
||||
|
@ -92,13 +92,13 @@ class TestInputs:
|
|||
|
||||
def test_total_step1(self):
|
||||
total_step1 = 2.0
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(TypeError):
|
||||
dr.exponential_decay_lr(learning_rate, decay_rate, total_step1, step_per_epoch, decay_epoch)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(TypeError):
|
||||
dr.cosine_decay_lr(min_lr, max_lr, total_step1, step_per_epoch, decay_epoch)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(TypeError):
|
||||
dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step1, step_per_epoch, decay_epoch, power)
|
||||
|
||||
def test_total_step2(self):
|
||||
|
@ -114,13 +114,13 @@ class TestInputs:
|
|||
|
||||
def test_step_per_epoch1(self):
|
||||
step_per_epoch1 = True
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(TypeError):
|
||||
dr.exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch1, decay_epoch)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(TypeError):
|
||||
dr.cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch1, decay_epoch)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(TypeError):
|
||||
dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch1, decay_epoch, power)
|
||||
|
||||
def test_step_per_epoch2(self):
|
||||
|
@ -136,13 +136,13 @@ class TestInputs:
|
|||
|
||||
def test_decay_epoch1(self):
|
||||
decay_epoch1 = 'm'
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(TypeError):
|
||||
dr.exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch1)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(TypeError):
|
||||
dr.cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch1)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(TypeError):
|
||||
dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch1, power)
|
||||
|
||||
def test_decay_epoch2(self):
|
||||
|
|
|
@ -60,7 +60,7 @@ def test_ssim_max_val_zero():
|
|||
net = SSIMNet(max_val)
|
||||
|
||||
def test_ssim_filter_size_float():
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(TypeError):
|
||||
net = SSIMNet(filter_size=1.1)
|
||||
|
||||
def test_ssim_filter_size_zero():
|
||||
|
|
|
@ -516,7 +516,7 @@ test_cases = [
|
|||
|
||||
test_cases_for_verify_exception = [
|
||||
('Conv2d_ValueError_1', {
|
||||
'block': (lambda _: P.Conv2D(3, 4, mode=-2.0), {'exception': ValueError}),
|
||||
'block': (lambda _: P.Conv2D(3, 4, mode=-2.0), {'exception': TypeError}),
|
||||
'desc_inputs': [0],
|
||||
}),
|
||||
('Conv2d_ValueError_2', {
|
||||
|
@ -528,7 +528,7 @@ test_cases_for_verify_exception = [
|
|||
'desc_inputs': [0],
|
||||
}),
|
||||
('MaxPoolWithArgmax_ValueError_2', {
|
||||
'block': (lambda _: P.MaxPoolWithArgmax(ksize='1'), {'exception': ValueError}),
|
||||
'block': (lambda _: P.MaxPoolWithArgmax(ksize='1'), {'exception': TypeError}),
|
||||
'desc_inputs': [0],
|
||||
}),
|
||||
('MaxPoolWithArgmax_ValueError_3', {
|
||||
|
@ -540,7 +540,7 @@ test_cases_for_verify_exception = [
|
|||
'desc_inputs': [0],
|
||||
}),
|
||||
('FusedBatchNorm_ValueError_1', {
|
||||
'block': (lambda _: P.FusedBatchNorm(mode="1", epsilon=1e-5, momentum=0.1), {'exception': ValueError}),
|
||||
'block': (lambda _: P.FusedBatchNorm(mode="1", epsilon=1e-5, momentum=0.1), {'exception': TypeError}),
|
||||
'desc_inputs': [0],
|
||||
}),
|
||||
('FusedBatchNorm_ValueError_2', {
|
||||
|
@ -560,31 +560,31 @@ test_cases_for_verify_exception = [
|
|||
'desc_inputs': [0],
|
||||
}),
|
||||
('Softmax_ValueError_1', {
|
||||
'block': (lambda _: P.Softmax("1"), {'exception': ValueError}),
|
||||
'block': (lambda _: P.Softmax("1"), {'exception': TypeError}),
|
||||
'desc_inputs': [0],
|
||||
}),
|
||||
('Softmax_ValueError_2', {
|
||||
'block': (lambda _: P.Softmax(1.1), {'exception': ValueError}),
|
||||
'block': (lambda _: P.Softmax(1.1), {'exception': TypeError}),
|
||||
'desc_inputs': [0],
|
||||
}),
|
||||
('Softmax_ValueError_3', {
|
||||
'block': (lambda _: P.Softmax(axis="1"), {'exception': ValueError}),
|
||||
'block': (lambda _: P.Softmax(axis="1"), {'exception': TypeError}),
|
||||
'desc_inputs': [0],
|
||||
}),
|
||||
('DropoutGenMask_ValueError_1', {
|
||||
'block': (lambda _: P.DropoutGenMask(Seed0="seed0"), {'exception': ValueError}),
|
||||
'block': (lambda _: P.DropoutGenMask(Seed0="seed0"), {'exception': TypeError}),
|
||||
'desc_inputs': [0],
|
||||
}),
|
||||
('DropoutGenMask_ValueError_2', {
|
||||
'block': (lambda _: P.DropoutGenMask(Seed0=1.0), {'exception': ValueError}),
|
||||
'block': (lambda _: P.DropoutGenMask(Seed0=1.0), {'exception': TypeError}),
|
||||
'desc_inputs': [0],
|
||||
}),
|
||||
('DropoutGenMask_ValueError_3', {
|
||||
'block': (lambda _: P.DropoutGenMask(Seed1="seed1"), {'exception': ValueError}),
|
||||
'block': (lambda _: P.DropoutGenMask(Seed1="seed1"), {'exception': TypeError}),
|
||||
'desc_inputs': [0],
|
||||
}),
|
||||
('DropoutGenMask_ValueError_4', {
|
||||
'block': (lambda _: P.DropoutGenMask(Seed1=2.0), {'exception': ValueError}),
|
||||
'block': (lambda _: P.DropoutGenMask(Seed1=2.0), {'exception': TypeError}),
|
||||
'desc_inputs': [0],
|
||||
}),
|
||||
('MaxPool2d_ValueError_1', {
|
||||
|
|
|
@ -0,0 +1,463 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test ops """
|
||||
import functools
|
||||
import numpy as np
|
||||
from mindspore import ops
|
||||
from mindspore.ops import functional as F
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops.operations import _grad_ops as G
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.common.parameter import Parameter
|
||||
from ..ut_filter import non_graph_engine
|
||||
from mindspore.common.api import _executor
|
||||
|
||||
from ....mindspore_test_framework.mindspore_test import mindspore_test
|
||||
from ....mindspore_test_framework.pipeline.forward.compile_forward\
|
||||
import (pipeline_for_compile_forward_ge_graph_for_case_by_case_config,
|
||||
pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception)
|
||||
from ....mindspore_test_framework.pipeline.gradient.compile_gradient\
|
||||
import pipeline_for_compile_grad_ge_graph_for_case_by_case_config
|
||||
|
||||
|
||||
class Conv2DBackpropInputNet(nn.Cell):
|
||||
def __init__(self, net, x_shape):
|
||||
super(Conv2DBackpropInputNet, self).__init__()
|
||||
self.net = net
|
||||
self.x_shape = x_shape
|
||||
|
||||
def construct(self, dout, w):
|
||||
return self.net(dout, w, self.x_shape)
|
||||
|
||||
|
||||
class TopKNet(nn.Cell):
|
||||
def __init__(self, net, k):
|
||||
super(TopKNet, self).__init__()
|
||||
self.net = net
|
||||
self.k = k
|
||||
|
||||
def construct(self, x):
|
||||
return self.net(x, self.k)
|
||||
|
||||
|
||||
raise_set = [
|
||||
# input is scalar
|
||||
('Flatten0', {
|
||||
'block': (P.Flatten(), {'exception': TypeError, 'error_keywords': ['Flatten']}),
|
||||
'desc_inputs': [5.0],
|
||||
'skip': ['backward']}),
|
||||
# dim of input is zero
|
||||
('Flatten1', {
|
||||
'block': (P.Flatten(), {'exception': ValueError, 'error_keywords': ['Flatten']}),
|
||||
'desc_inputs': [F.scalar_to_tensor(5.0)],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('Softmax0', {
|
||||
'block': (P.Softmax(), {'exception': TypeError, 'error_keywords': ['Softmax']}),
|
||||
'desc_inputs': [5.0],
|
||||
'skip': ['backward']}),
|
||||
# axis is empty tuple
|
||||
('Softmax1', {
|
||||
'block': (P.Softmax(axis=()), {'exception': ValueError, 'error_keywords': ['Softmax']}),
|
||||
'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# axis value is not in range
|
||||
('Softmax2', {
|
||||
'block': (P.Softmax(axis=2), {'exception': ValueError, 'error_keywords': ['Softmax']}),
|
||||
'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('LogSoftmax0', {
|
||||
'block': (P.LogSoftmax(), {'exception': TypeError, 'error_keywords': ['LogSoftmax']}),
|
||||
'desc_inputs': [5.0],
|
||||
'skip': ['backward']}),
|
||||
# axis value is not in range
|
||||
('LogSoftmax1', {
|
||||
'block': (P.LogSoftmax(axis=2), {'exception': ValueError, 'error_keywords': ['LogSoftmax']}),
|
||||
'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('ReLU0', {
|
||||
'block': (P.ReLU(), {'exception': TypeError, 'error_keywords': ['ReLU']}),
|
||||
'desc_inputs': [5.0],
|
||||
'skip': ['backward']}),
|
||||
# input is Tensor(Bool)
|
||||
('ReLU1', {
|
||||
'block': (P.ReLU(), {'exception': TypeError, 'error_keywords': ['ReLU']}),
|
||||
'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_))],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('ReLU60', {
|
||||
'block': (P.ReLU6(), {'exception': TypeError, 'error_keywords': ['ReLU6']}),
|
||||
'desc_inputs': [5.0],
|
||||
'skip': ['backward']}),
|
||||
# input is Tensor(int32)
|
||||
('ReLU61', {
|
||||
'block': (P.ReLU6(), {'exception': TypeError, 'error_keywords': ['ReLU6']}),
|
||||
'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('Elu0', {
|
||||
'block': (P.Elu(), {'exception': TypeError, 'error_keywords': ['Elu']}),
|
||||
'desc_inputs': [5.0],
|
||||
'skip': ['backward']}),
|
||||
# input is Tensor(int32)
|
||||
('Elu1', {
|
||||
'block': (P.Elu(alpha=0.9), {'exception': TypeError, 'error_keywords': ['Elu']}),
|
||||
'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('Sigmoid0', {
|
||||
'block': (P.Sigmoid(), {'exception': TypeError, 'error_keywords': ['Sigmoid']}),
|
||||
'desc_inputs': [5.0],
|
||||
'skip': ['backward']}),
|
||||
# input is Tensor(int32)
|
||||
('Sigmoid1', {
|
||||
'block': (P.Sigmoid(), {'exception': TypeError, 'error_keywords': ['Sigmoid']}),
|
||||
'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('Tanh0', {
|
||||
'block': (P.Tanh(), {'exception': TypeError, 'error_keywords': ['Tanh']}),
|
||||
'desc_inputs': [5.0],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('BatchNorm0', {
|
||||
'block': (P.BatchNorm(is_training=False), {'exception': TypeError, 'error_keywords': ['BatchNorm']}),
|
||||
'desc_inputs': [5.0, 5.0, 5.0, 5.0, 5.0],
|
||||
'skip': ['backward']}),
|
||||
# is_training=False and mean=None
|
||||
('BatchNorm1', {
|
||||
'block': (P.BatchNorm(is_training=False), {'exception': TypeError, 'error_keywords': ['BatchNorm']}),
|
||||
'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([5, 3]).astype(np.float32)),
|
||||
Tensor(np.ones([5, 3]).astype(np.float32)), None, None],
|
||||
'skip': ['backward']}),
|
||||
# is_training=True and mean=None
|
||||
('BatchNorm2', {
|
||||
'block': (P.BatchNorm(is_training=True), {'exception': TypeError, 'error_keywords': ['BatchNorm']}),
|
||||
'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)),
|
||||
Tensor(np.ones([3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float16)),
|
||||
Tensor(np.ones([3]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# scale and bias rank > 1
|
||||
('BatchNorm3', {
|
||||
'block': (P.BatchNorm(is_training=True), {'exception': ValueError, 'error_keywords': ['BatchNorm']}),
|
||||
'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([5, 3]).astype(np.float32)),
|
||||
Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)),
|
||||
Tensor(np.ones([3]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# scale and bias shape not match
|
||||
('BatchNorm4', {
|
||||
'block': (P.BatchNorm(is_training=True), {'exception': ValueError, 'error_keywords': ['BatchNorm']}),
|
||||
'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)),
|
||||
Tensor(np.ones([7]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)),
|
||||
Tensor(np.ones([3]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# is_training=False, mean and variance shape not match
|
||||
('BatchNorm5', {
|
||||
'block': (P.BatchNorm(is_training=False), {'exception': ValueError, 'error_keywords': ['BatchNorm']}),
|
||||
'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)),
|
||||
Tensor(np.ones([3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)),
|
||||
Tensor(np.ones([5]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# is_training=False, mean and scale shape not match
|
||||
('BatchNorm6', {
|
||||
'block': (P.BatchNorm(is_training=False), {'exception': ValueError, 'error_keywords': ['BatchNorm']}),
|
||||
'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)),
|
||||
Tensor(np.ones([3]).astype(np.float32)), Tensor(np.ones([5]).astype(np.float32)),
|
||||
Tensor(np.ones([5]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('Conv2D0', {
|
||||
'block': (P.Conv2D(2, (5, 5)), {'exception': TypeError, 'error_keywords': ['Conv2D']}),
|
||||
'desc_inputs': [5.0, 5.0],
|
||||
'skip': ['backward']}),
|
||||
# input is Tensor(bool)
|
||||
('Conv2D1', {
|
||||
'block': (P.Conv2D(2, (5, 5)), {'exception': TypeError, 'error_keywords': ['Conv2D']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))],
|
||||
'skip': ['backward']}),
|
||||
# input x and w type mismatch
|
||||
('Conv2D2', {
|
||||
'block': (P.Conv2D(2, (5, 5)), {'exception': TypeError, 'error_keywords': ['Conv2D']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([5]).astype(np.float16))],
|
||||
'skip': ['backward']}),
|
||||
# rank of x is not 4
|
||||
('Conv2D3', {
|
||||
'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}),
|
||||
'desc_inputs': [Tensor(np.ones([1, 1]).astype(np.float32)), Tensor(np.ones([1,1,9,9]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# rank of 2 is not 4
|
||||
('Conv2D4', {
|
||||
'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}),
|
||||
'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,1,9]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# x_shape[1] / group != w_shape[1]
|
||||
('Conv2D5', {
|
||||
'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}),
|
||||
'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,2,9,9]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# out_channel != w_shape[0]
|
||||
('Conv2D6', {
|
||||
'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}),
|
||||
'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,1,9,9]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# kernel_size != w_shape[2:4]
|
||||
('Conv2D7', {
|
||||
'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}),
|
||||
'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([2,1,5,6]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('DepthwiseConv2dNative0', {
|
||||
'block': (P.DepthwiseConv2dNative(2, (5, 5)),
|
||||
{'exception': TypeError, 'error_keywords': ['DepthwiseConv2dNative']}),
|
||||
'desc_inputs': [5.0, 5.0],
|
||||
'skip': ['backward']}),
|
||||
# input is Tensor(bool)
|
||||
('DepthwiseConv2dNative1', {
|
||||
'block': (P.DepthwiseConv2dNative(2, (5, 5)),
|
||||
{'exception': TypeError, 'error_keywords': ['DepthwiseConv2dNative']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))],
|
||||
'skip': ['backward']}),
|
||||
# input x and w type mismatch
|
||||
('DepthwiseConv2dNative2', {
|
||||
'block': (P.DepthwiseConv2dNative(2, (5, 5)),
|
||||
{'exception': TypeError, 'error_keywords': ['DepthwiseConv2dNative']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([5]).astype(np.float16))],
|
||||
'skip': ['backward']}),
|
||||
# rank of x is not 4
|
||||
('DepthwiseConv2dNative3', {
|
||||
'block': (P.DepthwiseConv2dNative(2, (5, 5)),
|
||||
{'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}),
|
||||
'desc_inputs': [Tensor(np.ones([1, 1]).astype(np.float32)), Tensor(np.ones([1,1,9,9]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# rank of 2 is not 4
|
||||
('DepthwiseConv2dNative4', {
|
||||
'block': (P.DepthwiseConv2dNative(2, (5, 5)),
|
||||
{'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}),
|
||||
'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,1,9]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# x_shape[1] != w_shape[1]
|
||||
('DepthwiseConv2dNative5', {
|
||||
'block': (P.DepthwiseConv2dNative(2, (5, 5)),
|
||||
{'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}),
|
||||
'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,2,9,9]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# kernel_size != w_shape[2:4]
|
||||
('DepthwiseConv2dNative6', {
|
||||
'block': (P.DepthwiseConv2dNative(2, (5, 5)),
|
||||
{'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}),
|
||||
'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([2,1,5,6]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('MaxPoolWithArgmax0', {
|
||||
'block': (P.MaxPoolWithArgmax(), {'exception': TypeError, 'error_keywords': ['MaxPoolWithArgmax']}),
|
||||
'desc_inputs': [5.0],
|
||||
'skip': ['backward']}),
|
||||
# input is Tensor(bool)
|
||||
('MaxPoolWithArgmax1', {
|
||||
'block': (P.MaxPoolWithArgmax(), {'exception': TypeError, 'error_keywords': ['MaxPoolWithArgmax']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_))],
|
||||
'skip': ['backward']}),
|
||||
# rank of x is not 4
|
||||
('MaxPoolWithArgmax2', {
|
||||
'block': (P.MaxPoolWithArgmax(), {'exception': ValueError, 'error_keywords': ['MaxPoolWithArgmax']}),
|
||||
'desc_inputs': [Tensor(np.ones([1,1,32]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# kernel size is invalid(very large)
|
||||
('MaxPoolWithArgmax3', {
|
||||
'block': (P.MaxPoolWithArgmax(ksize=50), {'exception': ValueError, 'error_keywords': ['MaxPoolWithArgmax']}),
|
||||
'desc_inputs': [Tensor(np.ones([1,1,32,32]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('MaxPool0', {
|
||||
'block': (P.MaxPool(), {'exception': TypeError, 'error_keywords': ['MaxPool']}),
|
||||
'desc_inputs': [5.0],
|
||||
'skip': ['backward']}),
|
||||
# rank of x is not 4
|
||||
('MaxPool1', {
|
||||
'block': (P.MaxPool(), {'exception': ValueError, 'error_keywords': ['MaxPool']}),
|
||||
'desc_inputs': [Tensor(np.ones([1,1,32]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# rank of x is not 4
|
||||
('MaxPool2', {
|
||||
'block': (P.MaxPool(ksize=50, strides=1), {'exception': ValueError, 'error_keywords': ['MaxPool']}),
|
||||
'desc_inputs': [Tensor(np.ones([1,1,32,32]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('AvgPool0', {
|
||||
'block': (P.AvgPool(), {'exception': TypeError, 'error_keywords': ['AvgPool']}),
|
||||
'desc_inputs': [5.0],
|
||||
'skip': ['backward']}),
|
||||
# rank of x is not 4
|
||||
('AvgPool1', {
|
||||
'block': (P.AvgPool(), {'exception': ValueError, 'error_keywords': ['AvgPool']}),
|
||||
'desc_inputs': [Tensor(np.ones([1,1,32]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# rank of x is not 4
|
||||
('AvgPool2', {
|
||||
'block': (P.AvgPool(ksize=50, strides=1), {'exception': ValueError, 'error_keywords': ['AvgPool']}),
|
||||
'desc_inputs': [Tensor(np.ones([1,1,32,32]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('Conv2DBackpropInput0', {
|
||||
'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2,3)),
|
||||
{'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}),
|
||||
'desc_inputs': [5.0, 5.0],
|
||||
'skip': ['backward']}),
|
||||
# input is Tensor(bool)
|
||||
('Conv2DBackpropInput1', {
|
||||
'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2,3)),
|
||||
{'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))],
|
||||
'skip': ['backward']}),
|
||||
# types of doutput and w mismatch
|
||||
('Conv2DBackpropInput2', {
|
||||
'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2,3)),
|
||||
{'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.int32)), Tensor(np.ones([5]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# types x_size is not tuple
|
||||
('Conv2DBackpropInput3', {
|
||||
'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), 2),
|
||||
{'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.int32)), Tensor(np.ones([5]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# types x_size is not tuple(int,...)
|
||||
('Conv2DBackpropInput4', {
|
||||
'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2, 3.0)),
|
||||
{'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.int32)), Tensor(np.ones([5]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('BiasAdd0', {
|
||||
'block': (P.BiasAdd(), {'exception': TypeError, 'error_keywords': ['BiasAdd']}),
|
||||
'desc_inputs': [5.0, 5.0],
|
||||
'skip': ['backward']}),
|
||||
# input is Tensor(bool)
|
||||
('BiasAdd1', {
|
||||
'block': (P.BiasAdd(), {'exception': TypeError, 'error_keywords': ['BiasAdd']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))],
|
||||
'skip': ['backward']}),
|
||||
# types of x and bias mismatch
|
||||
('BiasAdd2', {
|
||||
'block': (P.BiasAdd(), {'exception': TypeError, 'error_keywords': ['BiasAdd']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.int32)), Tensor(np.ones([5]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# rank of x less than 2
|
||||
('BiasAdd3', {
|
||||
'block': (P.BiasAdd(), {'exception': ValueError, 'error_keywords': ['BiasAdd']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([5]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# rank of bias is not equal to 1
|
||||
('BiasAdd4', {
|
||||
'block': (P.BiasAdd(), {'exception': ValueError, 'error_keywords': ['BiasAdd']}),
|
||||
'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([5, 3]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# b_shape[0] != x_shape[1]
|
||||
('BiasAdd5', {
|
||||
'block': (P.BiasAdd(), {'exception': ValueError, 'error_keywords': ['BiasAdd']}),
|
||||
'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([5]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input x is scalar
|
||||
('TopK0', {
|
||||
'block': (TopKNet(P.TopK(), 5), {'exception': TypeError, 'error_keywords': ['TopK']}),
|
||||
'desc_inputs': [5.0],
|
||||
'skip': ['backward']}),
|
||||
# input x is Tensor(bool)
|
||||
('TopK1', {
|
||||
'block': (TopKNet(P.TopK(), 5), {'exception': TypeError, 'error_keywords': ['TopK']}),
|
||||
'desc_inputs': [Tensor(np.ones([10]).astype(np.bool_))],
|
||||
'skip': ['backward']}),
|
||||
# k is not integer
|
||||
('TopK2', {
|
||||
'block': (TopKNet(P.TopK(), 5.0), {'exception': TypeError, 'error_keywords': ['TopK']}),
|
||||
'desc_inputs': [Tensor(np.ones([10]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('SoftmaxCrossEntropyWithLogits0', {
|
||||
'block': (P.SoftmaxCrossEntropyWithLogits(),
|
||||
{'exception': TypeError, 'error_keywords': ['SoftmaxCrossEntropyWithLogits']}),
|
||||
'desc_inputs': [5.0, 5.0],
|
||||
'skip': ['backward']}),
|
||||
# input is Tensor(bool)
|
||||
('SoftmaxCrossEntropyWithLogits1', {
|
||||
'block': (P.SoftmaxCrossEntropyWithLogits(),
|
||||
{'exception': TypeError, 'error_keywords': ['SoftmaxCrossEntropyWithLogits']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))],
|
||||
'skip': ['backward']}),
|
||||
# types of logits and labels mismatch
|
||||
('SoftmaxCrossEntropyWithLogits2', {
|
||||
'block': (P.SoftmaxCrossEntropyWithLogits(),
|
||||
{'exception': TypeError, 'error_keywords': ['SoftmaxCrossEntropyWithLogits']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.float16)), Tensor(np.ones([5]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
# shapes of logits and labels mismatch
|
||||
('SoftmaxCrossEntropyWithLogits3', {
|
||||
'block': (P.SoftmaxCrossEntropyWithLogits(),
|
||||
{'exception': ValueError, 'error_keywords': ['SoftmaxCrossEntropyWithLogits']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32))],
|
||||
'skip': ['backward']}),
|
||||
|
||||
# input is scalar
|
||||
('SparseSoftmaxCrossEntropyWithLogits0', {
|
||||
'block': (P.SparseSoftmaxCrossEntropyWithLogits(),
|
||||
{'exception': TypeError, 'error_keywords': ['SparseSoftmaxCrossEntropyWithLogits']}),
|
||||
'desc_inputs': [5.0, 5.0],
|
||||
'skip': ['backward']}),
|
||||
# logits is Tensor(bool)
|
||||
('SparseSoftmaxCrossEntropyWithLogits1', {
|
||||
'block': (P.SparseSoftmaxCrossEntropyWithLogits(),
|
||||
{'exception': TypeError, 'error_keywords': ['SparseSoftmaxCrossEntropyWithLogits']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))],
|
||||
'skip': ['backward']}),
|
||||
# labels is Tensor(bool)
|
||||
('SparseSoftmaxCrossEntropyWithLogits2', {
|
||||
'block': (P.SparseSoftmaxCrossEntropyWithLogits(),
|
||||
{'exception': TypeError, 'error_keywords': ['SparseSoftmaxCrossEntropyWithLogits']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([5]).astype(np.bool_))],
|
||||
'skip': ['backward']}),
|
||||
# logits_shape[0] != labels_shape[0]
|
||||
('SparseSoftmaxCrossEntropyWithLogits3', {
|
||||
'block': (P.SparseSoftmaxCrossEntropyWithLogits(),
|
||||
{'exception': ValueError, 'error_keywords': ['SparseSoftmaxCrossEntropyWithLogits']}),
|
||||
'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([3]).astype(np.int32))],
|
||||
'skip': ['backward']}),
|
||||
]
|
||||
|
||||
|
||||
@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception)
|
||||
def test_check_exception():
|
||||
return raise_set
|
Loading…
Reference in New Issue