clean pylint

This commit is contained in:
jinyaohui 2020-05-18 10:31:46 +08:00
parent 93f6fc0ab0
commit bcfaff97f9
408 changed files with 4048 additions and 2340 deletions

View File

@ -166,7 +166,7 @@ class BertAttentionMask(nn.Cell):
super(BertAttentionMask, self).__init__() super(BertAttentionMask, self).__init__()
self.has_attention_mask = has_attention_mask self.has_attention_mask = has_attention_mask
self.multiply_data = Tensor([-1000.0,], dtype=dtype) self.multiply_data = Tensor([-1000.0, ], dtype=dtype)
self.multiply = P.Mul() self.multiply = P.Mul()
if self.has_attention_mask: if self.has_attention_mask:
@ -189,6 +189,7 @@ class BertAttentionMask(nn.Cell):
return attention_scores return attention_scores
class BertAttentionMaskBackward(nn.Cell): class BertAttentionMaskBackward(nn.Cell):
def __init__(self, def __init__(self,
attention_mask_shape, attention_mask_shape,
@ -196,7 +197,7 @@ class BertAttentionMaskBackward(nn.Cell):
dtype=mstype.float32): dtype=mstype.float32):
super(BertAttentionMaskBackward, self).__init__() super(BertAttentionMaskBackward, self).__init__()
self.has_attention_mask = has_attention_mask self.has_attention_mask = has_attention_mask
self.multiply_data = Tensor([-1000.0,], dtype=dtype) self.multiply_data = Tensor([-1000.0, ], dtype=dtype)
self.multiply = P.Mul() self.multiply = P.Mul()
self.attention_mask = Tensor(np.ones(shape=attention_mask_shape).astype(np.float32)) self.attention_mask = Tensor(np.ones(shape=attention_mask_shape).astype(np.float32))
if self.has_attention_mask: if self.has_attention_mask:
@ -218,6 +219,7 @@ class BertAttentionMaskBackward(nn.Cell):
attention_scores = self.add(adder, attention_scores) attention_scores = self.add(adder, attention_scores)
return attention_scores return attention_scores
class BertAttentionSoftmax(nn.Cell): class BertAttentionSoftmax(nn.Cell):
def __init__(self, def __init__(self,
batch_size, batch_size,

View File

@ -20,7 +20,7 @@ import numpy as np
from mindspore.model_zoo.Bert_NEZHA import GetNextSentenceOutput, BertNetworkWithLoss from mindspore.model_zoo.Bert_NEZHA import GetNextSentenceOutput, BertNetworkWithLoss
from mindspore.model_zoo.Bert_NEZHA.bert_model import BertConfig, \ from mindspore.model_zoo.Bert_NEZHA.bert_model import BertConfig, \
EmbeddingLookup, EmbeddingPostprocessor, BertOutput, \ EmbeddingLookup, EmbeddingPostprocessor, BertOutput, \
BertAttention, BertSelfAttention, SaturateCast, TruncatedNormal,\ BertAttention, BertSelfAttention, SaturateCast, TruncatedNormal, \
BertEncoderCell, BertTransformer, CreateAttentionMaskFromInputMask, BertModel BertEncoderCell, BertTransformer, CreateAttentionMaskFromInputMask, BertModel
from mindspore import context, nn from mindspore import context, nn
@ -373,9 +373,9 @@ verification_set = {
'id': 'BertDense_CICase', 'id': 'BertDense_CICase',
'group': 'bert', 'group': 'bert',
'block': BertDense( 'block': BertDense(
hidden_size=8, hidden_size=8,
intermediate_size=8, intermediate_size=8,
initializer_range=0.02), initializer_range=0.02),
'reduce_output': False 'reduce_output': False
}, },
{ {

View File

@ -19,17 +19,18 @@ import numpy as np
import mindspore.common.dtype as mstype import mindspore.common.dtype as mstype
from mindspore import context from mindspore import context
from mindspore.model_zoo.Bert_NEZHA.bert_model import BertAttention, SaturateCast, \ from mindspore.model_zoo.Bert_NEZHA.bert_model import BertAttention, SaturateCast, \
EmbeddingLookup, BertModel, \ EmbeddingLookup, BertModel, \
BertConfig, EmbeddingPostprocessor, \ BertConfig, EmbeddingPostprocessor, \
BertTransformer, BertEncoderCell, \ BertTransformer, BertEncoderCell, \
BertSelfAttention, CreateAttentionMaskFromInputMask, \ BertSelfAttention, CreateAttentionMaskFromInputMask, \
RelaPosMatrixGenerator, BertOutput, \ RelaPosMatrixGenerator, BertOutput, \
RelaPosEmbeddingsGenerator RelaPosEmbeddingsGenerator
from ..mindspore_test import mindspore_test from ..mindspore_test import mindspore_test
from ..pipeline.forward.compare_forward import pipeline_for_compare_forward_with_npy_for_group_by_group_config_using_group_policy from ..pipeline.forward.compare_forward import \
pipeline_for_compare_forward_with_npy_for_group_by_group_config_using_group_policy
from .bert_attention_submodules import BertAttentionQueryKeyMul, BertAttentionRelativePositionKeys, BertAttentionMask, \ from .bert_attention_submodules import BertAttentionQueryKeyMul, BertAttentionRelativePositionKeys, BertAttentionMask, \
BertAttentionSoftmax, BertAttentionRelativePositionValues, BertDense BertAttentionSoftmax, BertAttentionRelativePositionValues, BertDense
verification_set = { verification_set = {
'inputs': [ 'inputs': [

View File

@ -22,6 +22,7 @@ from ..pipeline.gradient.compare_gradient import \
pipeline_for_compare_inputs_grad_with_numerical_diff_for_group_by_group_config, \ pipeline_for_compare_inputs_grad_with_numerical_diff_for_group_by_group_config, \
pipeline_for_compare_inputs_jacobian_with_numerical_diff_for_group_by_group_config pipeline_for_compare_inputs_jacobian_with_numerical_diff_for_group_by_group_config
from ..mindspore_test import mindspore_test from ..mindspore_test import mindspore_test
# from ...vm_impl import * # from ...vm_impl import *

View File

@ -18,9 +18,11 @@
from ..mindspore_test import mindspore_test from ..mindspore_test import mindspore_test
from ..pipeline.forward.verify_exception import pipeline_for_verify_exception_for_case_by_case_config from ..pipeline.forward.verify_exception import pipeline_for_verify_exception_for_case_by_case_config
def func_raise_exception(x, y): def func_raise_exception(x, y):
raise ValueError() raise ValueError()
verification_set = [ verification_set = [
('func_raise_exception', { ('func_raise_exception', {
'block': (func_raise_exception, {'exception': ValueError}), 'block': (func_raise_exception, {'exception': ValueError}),
@ -28,6 +30,7 @@ verification_set = [
}) })
] ]
@mindspore_test(pipeline_for_verify_exception_for_case_by_case_config) @mindspore_test(pipeline_for_verify_exception_for_case_by_case_config)
def test_check_exception(): def test_check_exception():
return verification_set return verification_set

View File

@ -42,6 +42,7 @@ verification_set = [
}) })
] ]
@mindspore_test(pipeline_for_check_model_loss_for_case_by_case_config) @mindspore_test(pipeline_for_check_model_loss_for_case_by_case_config)
def test_lamb_loss(): def test_lamb_loss():
context.set_context(mode=context.GRAPH_MODE) context.set_context(mode=context.GRAPH_MODE)

View File

@ -40,6 +40,7 @@ verification_set = [
}) })
] ]
@mindspore_test(pipeline_for_check_model_loss_for_case_by_case_config) @mindspore_test(pipeline_for_check_model_loss_for_case_by_case_config)
def test_model_loss(): def test_model_loss():
context.set_context(mode=context.GRAPH_MODE) context.set_context(mode=context.GRAPH_MODE)

View File

@ -21,6 +21,8 @@ import numpy as np
from ..mindspore_test import mindspore_test from ..mindspore_test import mindspore_test
from ..pipeline.forward.verify_shapetype import pipeline_for_verify_shapetype_for_group_by_group_config from ..pipeline.forward.verify_shapetype import pipeline_for_verify_shapetype_for_group_by_group_config
# from ...vm_impl import * # from ...vm_impl import *
# functions could be operations or NN cell # functions could be operations or NN cell

View File

@ -53,6 +53,7 @@ verification_set = [
}) })
] ]
@mindspore_test(pipeline_for_compare_inputs_grad_with_npy_for_case_by_case_config) @mindspore_test(pipeline_for_compare_inputs_grad_with_npy_for_case_by_case_config)
def test_reid_check_gradient(): def test_reid_check_gradient():
context.set_context(mode=context.PYNATIVE_MODE) context.set_context(mode=context.PYNATIVE_MODE)

View File

@ -21,6 +21,7 @@ import pytest
from ...components.icomponent import IExectorComponent from ...components.icomponent import IExectorComponent
from ...utils import keyword from ...utils import keyword
class CheckExceptionsEC(IExectorComponent): class CheckExceptionsEC(IExectorComponent):
""" """
Check if the function raises the expected Exception and the error message contains specified keywords if not None. Check if the function raises the expected Exception and the error message contains specified keywords if not None.
@ -32,6 +33,7 @@ class CheckExceptionsEC(IExectorComponent):
'error_keywords': ['TensorAdd', 'shape'] 'error_keywords': ['TensorAdd', 'shape']
} }
""" """
def __call__(self): def __call__(self):
f = self.function[keyword.block] f = self.function[keyword.block]
args = self.inputs[keyword.desc_inputs] args = self.inputs[keyword.desc_inputs]

View File

@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent
from ...utils.check_gradient import check_gradient, ScalarGradChecker from ...utils.check_gradient import check_gradient, ScalarGradChecker
from ...utils.config_util import get_grad_checking_options from ...utils.config_util import get_grad_checking_options
class CheckGradientForScalarFunctionEC(IExectorComponent): class CheckGradientForScalarFunctionEC(IExectorComponent):
""" """
Check gradient against numeric with respect to inputs for scalar function, execute and verify. Check gradient against numeric with respect to inputs for scalar function, execute and verify.
@ -26,6 +27,7 @@ class CheckGradientForScalarFunctionEC(IExectorComponent):
Examples: Examples:
'block': scalar_function 'block': scalar_function
""" """
def __call__(self): def __call__(self):
f, args, delta, max_error, input_selector, output_selector, sampling_times, _ = \ f, args, delta, max_error, input_selector, output_selector, sampling_times, _ = \
get_grad_checking_options(self.function, self.inputs) get_grad_checking_options(self.function, self.inputs)

View File

@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent
from ...utils.check_gradient import check_gradient, OperationGradChecker from ...utils.check_gradient import check_gradient, OperationGradChecker
from ...utils.config_util import get_grad_checking_options from ...utils.config_util import get_grad_checking_options
class CheckGradientWrtInputsEC(IExectorComponent): class CheckGradientWrtInputsEC(IExectorComponent):
""" """
Check gradient against numeric with respect to inputs, execute and verify. Check gradient against numeric with respect to inputs, execute and verify.
@ -35,6 +36,7 @@ class CheckGradientWrtInputsEC(IExectorComponent):
key_act=None, key_act=None,
initializer_range=0.02) initializer_range=0.02)
""" """
def __call__(self): def __call__(self):
f, args, delta, max_error, input_selector, output_selector, \ f, args, delta, max_error, input_selector, output_selector, \
sampling_times, reduce_output = get_grad_checking_options(self.function, self.inputs) sampling_times, reduce_output = get_grad_checking_options(self.function, self.inputs)

View File

@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent
from ...utils.check_gradient import check_gradient, NNGradChecker from ...utils.check_gradient import check_gradient, NNGradChecker
from ...utils.config_util import get_grad_checking_options from ...utils.config_util import get_grad_checking_options
class CheckGradientWrtParamsEC(IExectorComponent): class CheckGradientWrtParamsEC(IExectorComponent):
""" """
Check gradient against numeric with respect to params, execute and verify. Check gradient against numeric with respect to params, execute and verify.
@ -35,6 +36,7 @@ class CheckGradientWrtParamsEC(IExectorComponent):
key_act=None, key_act=None,
initializer_range=0.02) initializer_range=0.02)
""" """
def __call__(self): def __call__(self):
f, args, delta, max_error, input_selector, output_selector, \ f, args, delta, max_error, input_selector, output_selector, \
sampling_times, reduce_output = get_grad_checking_options(self.function, self.inputs) sampling_times, reduce_output = get_grad_checking_options(self.function, self.inputs)

View File

@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent
from ...utils.check_gradient import check_jacobian, ScalarGradChecker from ...utils.check_gradient import check_jacobian, ScalarGradChecker
from ...utils.config_util import get_grad_checking_options from ...utils.config_util import get_grad_checking_options
class CheckJacobianForScalarFunctionEC(IExectorComponent): class CheckJacobianForScalarFunctionEC(IExectorComponent):
""" """
Check jacobian against numeric with respect to inputs for scalar_func, execute and verify. Check jacobian against numeric with respect to inputs for scalar_func, execute and verify.
@ -26,6 +27,7 @@ class CheckJacobianForScalarFunctionEC(IExectorComponent):
Examples: Examples:
'block': scalar_function 'block': scalar_function
""" """
def __call__(self): def __call__(self):
f, args, delta, max_error, input_selector, output_selector, _, _ = \ f, args, delta, max_error, input_selector, output_selector, _, _ = \
get_grad_checking_options(self.function, self.inputs) get_grad_checking_options(self.function, self.inputs)

View File

@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent
from ...utils.check_gradient import check_jacobian, OperationGradChecker from ...utils.check_gradient import check_jacobian, OperationGradChecker
from ...utils.config_util import get_grad_checking_options from ...utils.config_util import get_grad_checking_options
class CheckJacobianWrtInputsEC(IExectorComponent): class CheckJacobianWrtInputsEC(IExectorComponent):
""" """
Check jacobian against numeric with respect to inputs, execute and verify. Check jacobian against numeric with respect to inputs, execute and verify.
@ -35,6 +36,7 @@ class CheckJacobianWrtInputsEC(IExectorComponent):
key_act=None, key_act=None,
initializer_range=0.02) initializer_range=0.02)
""" """
def __call__(self): def __call__(self):
f, args, delta, max_error, input_selector, output_selector, _, _ = \ f, args, delta, max_error, input_selector, output_selector, _, _ = \
get_grad_checking_options(self.function, self.inputs) get_grad_checking_options(self.function, self.inputs)

View File

@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent
from ...utils.check_gradient import check_jacobian, NNGradChecker from ...utils.check_gradient import check_jacobian, NNGradChecker
from ...utils.config_util import get_grad_checking_options from ...utils.config_util import get_grad_checking_options
class CheckJacobianWrtParamsEC(IExectorComponent): class CheckJacobianWrtParamsEC(IExectorComponent):
""" """
Check jacobian against numeric with respect to params, execute and verify. Check jacobian against numeric with respect to params, execute and verify.
@ -35,6 +36,7 @@ class CheckJacobianWrtParamsEC(IExectorComponent):
key_act=None, key_act=None,
initializer_range=0.02) initializer_range=0.02)
""" """
def __call__(self): def __call__(self):
f, args, delta, max_error, input_selector, output_selector, _, _ = \ f, args, delta, max_error, input_selector, output_selector, _, _ = \
get_grad_checking_options(self.function, self.inputs) get_grad_checking_options(self.function, self.inputs)

View File

@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent
from ...utils.model_util import Model from ...utils.model_util import Model
from ...utils import keyword from ...utils import keyword
class LossVerifierEC(IExectorComponent): class LossVerifierEC(IExectorComponent):
""" """
Verify if the model can converge to expected loss. Verify if the model can converge to expected loss.
@ -32,6 +33,7 @@ class LossVerifierEC(IExectorComponent):
'loss_upper_bound': 0.03, 'loss_upper_bound': 0.03,
} }
""" """
def __call__(self): def __call__(self):
model = self.function[keyword.block][keyword.model] model = self.function[keyword.block][keyword.model]
loss = self.function[keyword.block][keyword.loss] loss = self.function[keyword.block][keyword.loss]

View File

@ -18,10 +18,12 @@
from ...components.icomponent import IExectorComponent from ...components.icomponent import IExectorComponent
from ...utils import keyword from ...utils import keyword
class IdentityEC(IExectorComponent): class IdentityEC(IExectorComponent):
""" """
Execute function/inputs. Execute function/inputs.
""" """
def __call__(self): def __call__(self):
result_id = self.function[keyword.id] + '-' + self.inputs[keyword.id] result_id = self.function[keyword.id] + '-' + self.inputs[keyword.id]
group = self.function[keyword.group] + '-' + self.inputs[keyword.group] group = self.function[keyword.group] + '-' + self.inputs[keyword.group]

View File

@ -18,10 +18,12 @@
from ...components.icomponent import IExectorComponent from ...components.icomponent import IExectorComponent
from ...utils import keyword from ...utils import keyword
class IdentityBackwardEC(IExectorComponent): class IdentityBackwardEC(IExectorComponent):
""" """
Execute function/inputs, with all bprops attached, the bprop function created by BC should handle these bprops. Execute function/inputs, with all bprops attached, the bprop function created by BC should handle these bprops.
""" """
def __call__(self): def __call__(self):
result_id = self.function[keyword.id] + '-' + self.inputs[keyword.id] result_id = self.function[keyword.id] + '-' + self.inputs[keyword.id]
group = self.function[keyword.group] + '-' + self.inputs[keyword.group] group = self.function[keyword.group] + '-' + self.inputs[keyword.group]

View File

@ -18,10 +18,12 @@
from ...components.icomponent import IERPolicyComponent from ...components.icomponent import IERPolicyComponent
from ...utils import keyword from ...utils import keyword
class GroupCartesianProductERPC(IERPolicyComponent): class GroupCartesianProductERPC(IERPolicyComponent):
""" """
Combine expect/result by do cartesian product on group. Combine expect/result by do cartesian product on group.
""" """
def __call__(self): def __call__(self):
ret = [(s1, s2) for s1 in self.expect for s2 in self.result if s1[keyword.group] == s2[keyword.group]] ret = [(s1, s2) for s1 in self.expect for s2 in self.result if s1[keyword.group] == s2[keyword.group]]
return ret return ret

View File

@ -18,10 +18,12 @@
from ...components.icomponent import IERPolicyComponent from ...components.icomponent import IERPolicyComponent
from ...utils import keyword from ...utils import keyword
class IdCartesianProductERPC(IERPolicyComponent): class IdCartesianProductERPC(IERPolicyComponent):
""" """
Combine expect/result by do cartesian product on id. Combine expect/result by do cartesian product on id.
""" """
def __call__(self): def __call__(self):
ret = [(s1, s2) for s1 in self.expect for s2 in self.result if s1[keyword.id] == s2[keyword.id]] ret = [(s1, s2) for s1 in self.expect for s2 in self.result if s1[keyword.id] == s2[keyword.id]]
return ret return ret

View File

@ -21,6 +21,7 @@ from ...components.icomponent import IFacadeComponent
from ...utils.facade_util import get_block_config, fill_block_config from ...utils.facade_util import get_block_config, fill_block_config
from ...utils import keyword from ...utils import keyword
class MeFacadeFC(IFacadeComponent): class MeFacadeFC(IFacadeComponent):
""" """
Transform ME style config to mindspore_test_framework style. Transform ME style config to mindspore_test_framework style.
@ -47,6 +48,7 @@ class MeFacadeFC(IFacadeComponent):
} }
}) })
""" """
def __call__(self): def __call__(self):
ret = get_block_config() ret = get_block_config()
for config in self.verification_set: for config in self.verification_set:

View File

@ -18,6 +18,7 @@
from ...components.icomponent import IBuilderComponent from ...components.icomponent import IBuilderComponent
from ...utils.block_util import compile_block, gen_net, create_funcs from ...utils.block_util import compile_block, gen_net, create_funcs
class CompileBlockBC(IBuilderComponent): class CompileBlockBC(IBuilderComponent):
""" """
Build a function that do mindspore compile. Build a function that do mindspore compile.
@ -42,5 +43,6 @@ class CompileBlockBC(IBuilderComponent):
dtype=mstype.float32, dtype=mstype.float32,
compute_type=mstype.float32) compute_type=mstype.float32)
""" """
def __call__(self): def __call__(self):
return create_funcs(self.verification_set, gen_net, compile_block) return create_funcs(self.verification_set, gen_net, compile_block)

View File

@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation
from ...components.icomponent import IBuilderComponent from ...components.icomponent import IBuilderComponent
from ...utils.block_util import compile_block, gen_grad_net, create_funcs from ...utils.block_util import compile_block, gen_grad_net, create_funcs
class CompileBackwardBlockWrtInputsBC(IBuilderComponent): class CompileBackwardBlockWrtInputsBC(IBuilderComponent):
""" """
Build a function that do mindspore gradient compile with respect to inputs. Build a function that do mindspore gradient compile with respect to inputs.
@ -43,6 +44,7 @@ class CompileBackwardBlockWrtInputsBC(IBuilderComponent):
dtype=mstype.float32, dtype=mstype.float32,
compute_type=mstype.float32) compute_type=mstype.float32)
""" """
def __call__(self): def __call__(self):
grad_op = GradOperation('grad', get_all=True, sens_param=True) grad_op = GradOperation('grad', get_all=True, sens_param=True)
return create_funcs(self.verification_set, gen_grad_net, compile_block, grad_op) return create_funcs(self.verification_set, gen_grad_net, compile_block, grad_op)

View File

@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation
from ...components.icomponent import IBuilderComponent from ...components.icomponent import IBuilderComponent
from ...utils.block_util import compile_block, gen_grad_net, create_funcs from ...utils.block_util import compile_block, gen_grad_net, create_funcs
class CompileBackwardBlockWrtParamsBC(IBuilderComponent): class CompileBackwardBlockWrtParamsBC(IBuilderComponent):
""" """
Build a function that do mindspore gradient compile with respect to params. Build a function that do mindspore gradient compile with respect to params.
@ -43,6 +44,7 @@ class CompileBackwardBlockWrtParamsBC(IBuilderComponent):
dtype=mstype.float32, dtype=mstype.float32,
compute_type=mstype.float32) compute_type=mstype.float32)
""" """
def __call__(self, verification_set): def __call__(self, verification_set):
grad_op = GradOperation('grad', get_by_list=True, sens_param=True) grad_op = GradOperation('grad', get_by_list=True, sens_param=True)
return create_funcs(self.verification_set, gen_grad_net, compile_block, grad_op) return create_funcs(self.verification_set, gen_grad_net, compile_block, grad_op)

View File

@ -18,6 +18,7 @@
from ...components.icomponent import IBuilderComponent from ...components.icomponent import IBuilderComponent
from ...utils import keyword from ...utils import keyword
class IdentityBC(IBuilderComponent): class IdentityBC(IBuilderComponent):
""" """
Return function. Return function.
@ -25,5 +26,6 @@ class IdentityBC(IBuilderComponent):
Examples: Examples:
'function': Add 'function': Add
""" """
def __call__(self): def __call__(self):
return self.verification_set[keyword.function] return self.verification_set[keyword.function]

View File

@ -18,6 +18,7 @@
from ...components.icomponent import IBuilderComponent from ...components.icomponent import IBuilderComponent
from ...utils.block_util import run_block, get_uniform_with_shape, gen_net, create_funcs from ...utils.block_util import run_block, get_uniform_with_shape, gen_net, create_funcs
class RunBlockWithRandParamBC(IBuilderComponent): class RunBlockWithRandParamBC(IBuilderComponent):
""" """
Build a function with uniformed params that run mindspore pipeline. Build a function with uniformed params that run mindspore pipeline.
@ -42,5 +43,6 @@ class RunBlockWithRandParamBC(IBuilderComponent):
dtype=mstype.float32, dtype=mstype.float32,
compute_type=mstype.float32) compute_type=mstype.float32)
""" """
def __call__(self): def __call__(self):
return create_funcs(self.verification_set, gen_net, run_block, default_rand_func=get_uniform_with_shape) return create_funcs(self.verification_set, gen_net, run_block, default_rand_func=get_uniform_with_shape)

View File

@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation
from ...components.icomponent import IBuilderComponent from ...components.icomponent import IBuilderComponent
from ...utils.block_util import run_block, gen_grad_net, create_funcs, get_uniform_with_shape from ...utils.block_util import run_block, gen_grad_net, create_funcs, get_uniform_with_shape
class RunBackwardBlockWrtInputsWithRandParamBC(IBuilderComponent): class RunBackwardBlockWrtInputsWithRandParamBC(IBuilderComponent):
def __call__(self): def __call__(self):
grad_op = GradOperation('grad', get_all=True, sens_param=True) grad_op = GradOperation('grad', get_all=True, sens_param=True)

View File

@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation
from ...components.icomponent import IBuilderComponent from ...components.icomponent import IBuilderComponent
from ...utils.block_util import run_block, gen_grad_net, create_funcs, get_uniform_with_shape from ...utils.block_util import run_block, gen_grad_net, create_funcs, get_uniform_with_shape
class RunBackwardBlockWrtParamsWithRandParamBC(IBuilderComponent): class RunBackwardBlockWrtParamsWithRandParamBC(IBuilderComponent):
def __call__(self): def __call__(self):
grad_op = GradOperation('grad', get_by_list=True, sens_param=True) grad_op = GradOperation('grad', get_by_list=True, sens_param=True)

View File

@ -18,6 +18,7 @@
from ...components.icomponent import IBuilderComponent from ...components.icomponent import IBuilderComponent
from ...utils.block_util import run_block, gen_net, create_funcs from ...utils.block_util import run_block, gen_net, create_funcs
class RunBlockBC(IBuilderComponent): class RunBlockBC(IBuilderComponent):
""" """
Build a function that run mindspore pipeline. Build a function that run mindspore pipeline.
@ -42,5 +43,6 @@ class RunBlockBC(IBuilderComponent):
dtype=mstype.float32, dtype=mstype.float32,
compute_type=mstype.float32) compute_type=mstype.float32)
""" """
def __call__(self): def __call__(self):
return create_funcs(self.verification_set, gen_net, run_block) return create_funcs(self.verification_set, gen_net, run_block)

View File

@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation
from ...components.icomponent import IBuilderComponent from ...components.icomponent import IBuilderComponent
from ...utils.block_util import run_block, gen_grad_net, create_funcs from ...utils.block_util import run_block, gen_grad_net, create_funcs
class RunBackwardBlockWrtInputsBC(IBuilderComponent): class RunBackwardBlockWrtInputsBC(IBuilderComponent):
def __call__(self): def __call__(self):
grad_op = GradOperation('grad', get_all=True, sens_param=True) grad_op = GradOperation('grad', get_all=True, sens_param=True)

View File

@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation
from ...components.icomponent import IBuilderComponent from ...components.icomponent import IBuilderComponent
from ...utils.block_util import run_block, gen_grad_net, create_funcs from ...utils.block_util import run_block, gen_grad_net, create_funcs
class RunBackwardBlockWrtParamsBC(IBuilderComponent): class RunBackwardBlockWrtParamsBC(IBuilderComponent):
def __call__(self): def __call__(self):
grad_op = GradOperation('grad', get_by_list=True, sens_param=True) grad_op = GradOperation('grad', get_by_list=True, sens_param=True)

View File

@ -23,6 +23,7 @@ class GroupCartesianProductFIPC(IFIPolicyComponent):
""" """
Combine function/inputs by do cartesian product on group. Combine function/inputs by do cartesian product on group.
""" """
def __call__(self): def __call__(self):
ret = [(s1, s2) for s1 in self.function for s2 in self.inputs if s1[keyword.group] == s2[keyword.group]] ret = [(s1, s2) for s1 in self.function for s2 in self.inputs if s1[keyword.group] == s2[keyword.group]]
return ret return ret

View File

@ -18,10 +18,12 @@
from ...components.icomponent import IFIPolicyComponent from ...components.icomponent import IFIPolicyComponent
from ...utils import keyword from ...utils import keyword
class IdCartesianProductFIPC(IFIPolicyComponent): class IdCartesianProductFIPC(IFIPolicyComponent):
""" """
Combine function/inputs by do cartesian product on id. Combine function/inputs by do cartesian product on id.
""" """
def __call__(self): def __call__(self):
ret = [(s1, s2) for s1 in self.function for s2 in self.inputs if s1[keyword.id] == s2[keyword.id]] ret = [(s1, s2) for s1 in self.function for s2 in self.inputs if s1[keyword.id] == s2[keyword.id]]
return ret return ret

View File

@ -15,8 +15,10 @@
"""Component interfaces.""" """Component interfaces."""
class IComponent: class IComponent:
"""Component interfaces.""" """Component interfaces."""
def __init__(self, verification_set): def __init__(self, verification_set):
self.verification_set = verification_set self.verification_set = verification_set
@ -26,18 +28,21 @@ class IComponent:
class IDataComponent(IComponent): class IDataComponent(IComponent):
"""Create inputs for verification_set.""" """Create inputs for verification_set."""
def __call__(self): def __call__(self):
raise NotImplementedError raise NotImplementedError
class IBuilderComponent(IComponent): class IBuilderComponent(IComponent):
"""Build system under test.""" """Build system under test."""
def __call__(self): def __call__(self):
raise NotImplementedError raise NotImplementedError
class IExectorComponent(IComponent): class IExectorComponent(IComponent):
"""Execute sut, take (function, input) pairs as input.""" """Execute sut, take (function, input) pairs as input."""
def __init__(self, verification_set, function, inputs): def __init__(self, verification_set, function, inputs):
super(IExectorComponent, self).__init__(verification_set) super(IExectorComponent, self).__init__(verification_set)
self.function = function self.function = function
@ -49,6 +54,7 @@ class IExectorComponent(IComponent):
class IVerifierComponent(IComponent): class IVerifierComponent(IComponent):
"""Verify sut result, take (expect, result) pairs as input.""" """Verify sut result, take (expect, result) pairs as input."""
def __init__(self, verification_set, expect, result): def __init__(self, verification_set, expect, result):
super(IVerifierComponent, self).__init__(verification_set) super(IVerifierComponent, self).__init__(verification_set)
self.expect = expect self.expect = expect
@ -60,6 +66,7 @@ class IVerifierComponent(IComponent):
class IFIPolicyComponent(IComponent): class IFIPolicyComponent(IComponent):
"""Combine functions/inputs.""" """Combine functions/inputs."""
def __init__(self, verification_set, function, inputs): def __init__(self, verification_set, function, inputs):
super(IFIPolicyComponent, self).__init__(verification_set) super(IFIPolicyComponent, self).__init__(verification_set)
self.function = function self.function = function
@ -71,6 +78,7 @@ class IFIPolicyComponent(IComponent):
class IERPolicyComponent(IComponent): class IERPolicyComponent(IComponent):
"""Combine expects and results.""" """Combine expects and results."""
def __init__(self, verification_set, expect, result): def __init__(self, verification_set, expect, result):
super(IERPolicyComponent, self).__init__(verification_set) super(IERPolicyComponent, self).__init__(verification_set)
self.expect = expect self.expect = expect
@ -82,5 +90,6 @@ class IERPolicyComponent(IComponent):
class IFacadeComponent(IComponent): class IFacadeComponent(IComponent):
"""Adapt verification_set.""" """Adapt verification_set."""
def __call__(self): def __call__(self):
raise NotImplementedError raise NotImplementedError

View File

@ -19,6 +19,7 @@ from ...components.icomponent import IDataComponent
from ...utils.dataset_util import generate_dataset_for_linear_regression from ...utils.dataset_util import generate_dataset_for_linear_regression
from ...utils import keyword from ...utils import keyword
class GenerateDataSetForLRDC(IDataComponent): class GenerateDataSetForLRDC(IDataComponent):
""" """
Create dataset for linear regression, with salt from normal distribution. Create dataset for linear regression, with salt from normal distribution.
@ -30,6 +31,7 @@ class GenerateDataSetForLRDC(IDataComponent):
'batch_size': 20, 'batch_size': 20,
} }
""" """
def __call__(self): def __call__(self):
result = [] result = []
for config in self.verification_set[keyword.inputs]: for config in self.verification_set[keyword.inputs]:

View File

@ -23,6 +23,7 @@ from ...utils.other_util import shape2tensor
from ...utils.config_util import get_input_config from ...utils.config_util import get_input_config
from ...utils import keyword from ...utils import keyword
class GenerateFromShapeDC(IDataComponent): class GenerateFromShapeDC(IDataComponent):
""" """
Generate inputs from shape, desc_inputs must be configured, desc_bprop is optional. Generate inputs from shape, desc_inputs must be configured, desc_bprop is optional.
@ -41,6 +42,7 @@ class GenerateFromShapeDC(IDataComponent):
([1, 16, 128, 64], np.float32, 6), # (inputs, dtype, scale) ([1, 16, 128, 64], np.float32, 6), # (inputs, dtype, scale)
] ]
""" """
def __call__(self): def __call__(self):
result = [] result = []
for config in self.verification_set[keyword.inputs]: for config in self.verification_set[keyword.inputs]:

View File

@ -17,6 +17,7 @@
from ...components.icomponent import IDataComponent from ...components.icomponent import IDataComponent
class IdentityDC(IDataComponent): class IdentityDC(IDataComponent):
""" """
Return inputs. Return inputs.
@ -26,5 +27,6 @@ class IdentityDC(IDataComponent):
np.array([[2, 2], [2, 2]]).astype(np.float32) np.array([[2, 2], [2, 2]]).astype(np.float32)
] ]
""" """
def __call__(self): def __call__(self):
return self.verification_set['inputs'] return self.verification_set['inputs']

View File

@ -19,6 +19,7 @@ from ...components.icomponent import IDataComponent
from ...utils.npy_util import load_data_from_npy_or_shape from ...utils.npy_util import load_data_from_npy_or_shape
from ...utils import keyword from ...utils import keyword
class LoadFromNpyDC(IDataComponent): class LoadFromNpyDC(IDataComponent):
""" """
Load inputs from npy data, inputs could be shape/tensor/np.ndarray/file path. Load inputs from npy data, inputs could be shape/tensor/np.ndarray/file path.
@ -43,6 +44,7 @@ class LoadFromNpyDC(IDataComponent):
([2, 2], np.float32, 6) ([2, 2], np.float32, 6)
] ]
""" """
def __call__(self): def __call__(self):
result = [] result = []
for config in self.verification_set[keyword.inputs]: for config in self.verification_set[keyword.inputs]:

View File

@ -19,6 +19,7 @@ from ...components.icomponent import IVerifierComponent
from ...utils.compare_util import compare from ...utils.compare_util import compare
from ...utils import keyword from ...utils import keyword
class CompareWithVC(IVerifierComponent): class CompareWithVC(IVerifierComponent):
""" """
Compare the result with baseline functions configured in 'compare' config item. Compare the result with baseline functions configured in 'compare' config item.
@ -41,5 +42,6 @@ class CompareWithVC(IVerifierComponent):
'max_error': 1e-3 'max_error': 1e-3
} }
""" """
def __call__(self): def __call__(self):
compare(self.expect, self.func_result, baseline=keyword.compare_with) compare(self.expect, self.func_result, baseline=keyword.compare_with)

View File

@ -19,6 +19,7 @@ from ...components.icomponent import IVerifierComponent
from ...utils.compare_util import compare from ...utils.compare_util import compare
from ...utils import keyword from ...utils import keyword
class CompareGradientWithVC(IVerifierComponent): class CompareGradientWithVC(IVerifierComponent):
""" """
Compare the result with baseline functions configured in 'compare_gradient_with' config item. Compare the result with baseline functions configured in 'compare_gradient_with' config item.
@ -35,5 +36,6 @@ class CompareGradientWithVC(IVerifierComponent):
'max_error': 1e-3 'max_error': 1e-3
} }
""" """
def __call__(self): def __call__(self):
compare(self.expect, self.func_result, baseline=keyword.compare_gradient_with) compare(self.expect, self.func_result, baseline=keyword.compare_gradient_with)

View File

@ -22,6 +22,7 @@ from ...utils.npy_util import load_data_from_npy_or_shape
from ...utils.verifier_util import tolerance_assert from ...utils.verifier_util import tolerance_assert
from ...utils import keyword from ...utils import keyword
class LoadFromNpyVC(IVerifierComponent): class LoadFromNpyVC(IVerifierComponent):
""" """
Verify if the results are like expects from npy data, expects could be shape/tensor/np.ndarray/file path. Verify if the results are like expects from npy data, expects could be shape/tensor/np.ndarray/file path.
@ -37,6 +38,7 @@ class LoadFromNpyVC(IVerifierComponent):
([2, 2], np.float32, 6, 1e-3) # (shape, dtype, scale, max_error) ([2, 2], np.float32, 6, 1e-3) # (shape, dtype, scale, max_error)
] ]
""" """
def __call__(self): def __call__(self):
dpaths = self.expect.get(keyword.desc_expect) dpaths = self.expect.get(keyword.desc_expect)
expects = load_data_from_npy_or_shape(dpaths, False) expects = load_data_from_npy_or_shape(dpaths, False)

View File

@ -19,6 +19,7 @@ from ...components.icomponent import IVerifierComponent
from ...utils.other_util import to_numpy_list from ...utils.other_util import to_numpy_list
from ...utils import keyword from ...utils import keyword
class ShapeTypeVC(IVerifierComponent): class ShapeTypeVC(IVerifierComponent):
""" """
Verify if the result's shape and type are correct. Verify if the result's shape and type are correct.
@ -33,6 +34,7 @@ class ShapeTypeVC(IVerifierComponent):
] ]
} }
""" """
def __call__(self): def __call__(self):
results = to_numpy_list(self.func_result[keyword.result]) results = to_numpy_list(self.func_result[keyword.result])
expects = self.expect[keyword.desc_expect][keyword.shape_type] expects = self.expect[keyword.desc_expect][keyword.shape_type]

View File

@ -18,10 +18,11 @@
import logging import logging
import pytest import pytest
from .components.icomponent import IDataComponent, IBuilderComponent, IExectorComponent, \ from .components.icomponent import IDataComponent, IBuilderComponent, IExectorComponent, \
IVerifierComponent, IFIPolicyComponent, IERPolicyComponent, IComponent, \ IVerifierComponent, IFIPolicyComponent, IERPolicyComponent, IComponent, \
IFacadeComponent IFacadeComponent
from .utils import keyword from .utils import keyword
def mindspore_test(verification_pipeline): def mindspore_test(verification_pipeline):
""" """
Run verification pipeline. Run verification pipeline.
@ -31,6 +32,7 @@ def mindspore_test(verification_pipeline):
Returns: Returns:
""" """
def decorate(get_verification_set): def decorate(get_verification_set):
verification_set = get_verification_set() verification_set = get_verification_set()

View File

@ -107,7 +107,7 @@ Example:
] ]
} }
""" """
pipeline_for_compare_forward_with_npy_for_group_by_group_config =\ pipeline_for_compare_forward_with_npy_for_group_by_group_config = \
[LoadFromNpyDC, RunBlockWithRandParamBC, IdCartesianProductFIPC, [LoadFromNpyDC, RunBlockWithRandParamBC, IdCartesianProductFIPC,
IdentityEC, IdCartesianProductERPC, LoadFromNpyVC] IdentityEC, IdCartesianProductERPC, LoadFromNpyVC]
@ -161,7 +161,7 @@ Example:
] ]
} }
""" """
pipeline_for_compare_forward_with_npy_for_group_by_group_config_using_group_policy =\ pipeline_for_compare_forward_with_npy_for_group_by_group_config_using_group_policy = \
[LoadFromNpyDC, RunBlockWithRandParamBC, [LoadFromNpyDC, RunBlockWithRandParamBC,
GroupCartesianProductFIPC, IdentityEC, GroupCartesianProductFIPC, IdentityEC,
IdCartesianProductERPC, LoadFromNpyVC] IdCartesianProductERPC, LoadFromNpyVC]

View File

@ -49,7 +49,7 @@ Example:
}) })
] ]
""" """
pipeline_for_compare_inputs_grad_with_user_defined_for_case_by_case_config =\ pipeline_for_compare_inputs_grad_with_user_defined_for_case_by_case_config = \
[MeFacadeFC, GenerateFromShapeDC, [MeFacadeFC, GenerateFromShapeDC,
RunBackwardBlockWrtInputsBC, IdCartesianProductFIPC, RunBackwardBlockWrtInputsBC, IdCartesianProductFIPC,
IdentityBackwardEC, IdCartesianProductERPC, IdentityBackwardEC, IdCartesianProductERPC,
@ -89,7 +89,7 @@ Example:
}) })
] ]
""" """
pipeline_for_compare_inputs_grad_with_npy_for_case_by_case_config =\ pipeline_for_compare_inputs_grad_with_npy_for_case_by_case_config = \
[MeFacadeFC, LoadFromNpyDC, RunBackwardBlockWrtInputsBC, [MeFacadeFC, LoadFromNpyDC, RunBackwardBlockWrtInputsBC,
IdCartesianProductFIPC, IdentityBackwardEC, IdCartesianProductFIPC, IdentityBackwardEC,
IdCartesianProductERPC, LoadFromNpyVC] IdCartesianProductERPC, LoadFromNpyVC]
@ -128,7 +128,7 @@ Example:
}) })
] ]
""" """
pipeline_for_compare_params_grad_with_npy_for_case_by_case_config =\ pipeline_for_compare_params_grad_with_npy_for_case_by_case_config = \
[MeFacadeFC, LoadFromNpyDC, RunBackwardBlockWrtParamsBC, [MeFacadeFC, LoadFromNpyDC, RunBackwardBlockWrtParamsBC,
IdCartesianProductFIPC, IdentityBackwardEC, IdCartesianProductFIPC, IdentityBackwardEC,
IdCartesianProductERPC, LoadFromNpyVC] IdCartesianProductERPC, LoadFromNpyVC]
@ -146,7 +146,7 @@ Example:
}) })
] ]
""" """
pipeline_for_compare_inputs_grad_with_numerical_diff_for_case_by_case_config =\ pipeline_for_compare_inputs_grad_with_numerical_diff_for_case_by_case_config = \
[MeFacadeFC, GenerateFromShapeDC, IdentityBC, [MeFacadeFC, GenerateFromShapeDC, IdentityBC,
IdCartesianProductFIPC, IdCartesianProductFIPC,
CheckGradientWrtInputsEC] CheckGradientWrtInputsEC]
@ -244,7 +244,7 @@ Example:
}), }),
] ]
""" """
pipeline_for_compare_inputs_jacobian_with_numerical_diff_for_case_by_case_config =\ pipeline_for_compare_inputs_jacobian_with_numerical_diff_for_case_by_case_config = \
[MeFacadeFC, GenerateFromShapeDC, IdentityBC, [MeFacadeFC, GenerateFromShapeDC, IdentityBC,
IdCartesianProductFIPC, IdCartesianProductFIPC,
CheckJacobianWrtInputsEC] CheckJacobianWrtInputsEC]

View File

@ -35,7 +35,7 @@ Example:
}) })
] ]
""" """
pipeline_for_compile_grad_anf_graph_for_case_by_case_config =\ pipeline_for_compile_grad_anf_graph_for_case_by_case_config = \
[MeFacadeFC, GenerateFromShapeDC, CompileBackwardBlockWrtInputsBC, [MeFacadeFC, GenerateFromShapeDC, CompileBackwardBlockWrtInputsBC,
IdCartesianProductFIPC, IdentityBackwardEC] IdCartesianProductFIPC, IdentityBackwardEC]
@ -51,6 +51,6 @@ Example:
}) })
] ]
""" """
pipeline_for_compile_grad_ge_graph_for_case_by_case_config =\ pipeline_for_compile_grad_ge_graph_for_case_by_case_config = \
[MeFacadeFC, GenerateFromShapeDC, RunBackwardBlockWrtInputsBC, [MeFacadeFC, GenerateFromShapeDC, RunBackwardBlockWrtInputsBC,
IdCartesianProductFIPC, IdentityBackwardEC] IdCartesianProductFIPC, IdentityBackwardEC]

View File

@ -28,10 +28,12 @@ from mindspore.ops import operations as P
from mindspore import ParameterTuple from mindspore import ParameterTuple
from . import keyword from . import keyword
def get_uniform_with_shape(shape): def get_uniform_with_shape(shape):
np.random.seed(1) np.random.seed(1)
return np.random.uniform(-0.1, 0.1, size=shape).astype(np.float32) return np.random.uniform(-0.1, 0.1, size=shape).astype(np.float32)
def set_block_param_with_rand(net, rand_func=None): def set_block_param_with_rand(net, rand_func=None):
if not isinstance(net, nn.Cell) or rand_func is None: if not isinstance(net, nn.Cell) or rand_func is None:
return return
@ -39,11 +41,13 @@ def set_block_param_with_rand(net, rand_func=None):
for param in net.trainable_params(): for param in net.trainable_params():
param.default_input = Tensor(rand_func(param.default_input.asnumpy().shape)) param.default_input = Tensor(rand_func(param.default_input.asnumpy().shape))
def compile_block(net, *inputs, rand_func=None, training=True): def compile_block(net, *inputs, rand_func=None, training=True):
set_block_training(net, training) set_block_training(net, training)
set_block_param_with_rand(net, rand_func) set_block_param_with_rand(net, rand_func)
return _executor.compile(net, *inputs) return _executor.compile(net, *inputs)
def run_block(net, *inputs, rand_func=None, training=True): def run_block(net, *inputs, rand_func=None, training=True):
set_block_training(net, training) set_block_training(net, training)
set_block_param_with_rand(net, rand_func) set_block_param_with_rand(net, rand_func)
@ -52,10 +56,13 @@ def run_block(net, *inputs, rand_func=None, training=True):
@ms_function @ms_function
def _func_pynative(*inputs): def _func_pynative(*inputs):
return net(*inputs) return net(*inputs)
return _func_pynative(*inputs) return _func_pynative(*inputs)
return func_pynative(*inputs) return func_pynative(*inputs)
return net(*inputs) return net(*inputs)
class IthOutputCell(nn.Cell): class IthOutputCell(nn.Cell):
def __init__(self, network, output_index): def __init__(self, network, output_index):
if isinstance(network, nn.Cell): if isinstance(network, nn.Cell):
@ -69,12 +76,14 @@ class IthOutputCell(nn.Cell):
predict = self.network(*inputs)[self.output_index] predict = self.network(*inputs)[self.output_index]
return predict return predict
def get_output_cell(network, num_input, output_index, training=True): def get_output_cell(network, num_input, output_index, training=True):
_ = num_input _ = num_input
net = IthOutputCell(network, output_index) net = IthOutputCell(network, output_index)
set_block_training(net, training) set_block_training(net, training)
return net return net
class OutputReduceSumCell(nn.Cell): class OutputReduceSumCell(nn.Cell):
def __init__(self, network, output_num): def __init__(self, network, output_num):
super(OutputReduceSumCell, self).__init__() super(OutputReduceSumCell, self).__init__()
@ -92,11 +101,13 @@ class OutputReduceSumCell(nn.Cell):
ret = ret + F.make_tuple(predict_reduce) ret = ret + F.make_tuple(predict_reduce)
return ret return ret
def get_output_reduce_cell(network, output_num, training=True): def get_output_reduce_cell(network, output_num, training=True):
net = OutputReduceSumCell(network, output_num) net = OutputReduceSumCell(network, output_num)
set_block_training(net, training) set_block_training(net, training)
return net return net
class InputOpNet(nn.Cell): class InputOpNet(nn.Cell):
def __init__(self, op, c1=None, c2=None, c3=None, c4=None): def __init__(self, op, c1=None, c2=None, c3=None, c4=None):
super(InputOpNet, self).__init__() super(InputOpNet, self).__init__()
@ -112,6 +123,7 @@ class InputOpNet(nn.Cell):
def construct0_c0_fake(self, data): def construct0_c0_fake(self, data):
x = self.op() + data x = self.op() + data
return x return x
def construct0_c1_fake(self, data): def construct0_c1_fake(self, data):
x = self.op(self.c1) + data x = self.op(self.c1) + data
return x return x
@ -212,6 +224,7 @@ class InputOpNet(nn.Cell):
x = self.op(x1, x2, x3, x4, x5, self.c1, self.c2, self.c3, self.c4) x = self.op(x1, x2, x3, x4, x5, self.c1, self.c2, self.c3, self.c4)
return x return x
def gen_net(op, input_num, training=True, desc_const=(), const_first=False, add_fake_input=False): def gen_net(op, input_num, training=True, desc_const=(), const_first=False, add_fake_input=False):
if isinstance(op, nn.Cell): if isinstance(op, nn.Cell):
return op return op
@ -227,6 +240,7 @@ def gen_net(op, input_num, training=True, desc_const=(), const_first=False, add_
set_block_training(net, training) set_block_training(net, training)
return net return net
class OperationBackward(nn.Cell): class OperationBackward(nn.Cell):
def __init__(self, network, grad_op, sens): def __init__(self, network, grad_op, sens):
if isinstance(network, nn.Cell): if isinstance(network, nn.Cell):
@ -240,6 +254,7 @@ class OperationBackward(nn.Cell):
def construct(self, *inputs): def construct(self, *inputs):
return self.grad(self.network)(*inputs, self.sens) return self.grad(self.network)(*inputs, self.sens)
class OperationBackwardWithNoSens(nn.Cell): class OperationBackwardWithNoSens(nn.Cell):
def __init__(self, network, grad_op): def __init__(self, network, grad_op):
if isinstance(network, nn.Cell): if isinstance(network, nn.Cell):
@ -252,6 +267,7 @@ class OperationBackwardWithNoSens(nn.Cell):
def construct(self, *inputs): def construct(self, *inputs):
return self.grad(self.network)(*inputs) return self.grad(self.network)(*inputs)
class NNBackward(nn.Cell): class NNBackward(nn.Cell):
def __init__(self, network, grad_op, sens): def __init__(self, network, grad_op, sens):
if isinstance(network, nn.Cell): if isinstance(network, nn.Cell):
@ -266,6 +282,7 @@ class NNBackward(nn.Cell):
def construct(self, *inputs): def construct(self, *inputs):
return self.grad(self.network, self.params)(*inputs, self.sens) return self.grad(self.network, self.params)(*inputs, self.sens)
class NNBackwardWithNoSens(nn.Cell): class NNBackwardWithNoSens(nn.Cell):
def __init__(self, network, grad_op): def __init__(self, network, grad_op):
if isinstance(network, nn.Cell): if isinstance(network, nn.Cell):
@ -279,6 +296,7 @@ class NNBackwardWithNoSens(nn.Cell):
def construct(self, *inputs): def construct(self, *inputs):
return self.grad(self.network, self.params)(*inputs) return self.grad(self.network, self.params)(*inputs)
def gen_grad_net(net, grad_op, input_num, sens=None, training=True, desc_const=(), def gen_grad_net(net, grad_op, input_num, sens=None, training=True, desc_const=(),
const_first=False, add_fake_input=False): const_first=False, add_fake_input=False):
if not isinstance(net, nn.Cell): if not isinstance(net, nn.Cell):
@ -296,14 +314,17 @@ def gen_grad_net(net, grad_op, input_num, sens=None, training=True, desc_const=(
set_block_training(net, training) set_block_training(net, training)
return net return net
def set_block_training(net, training=True): def set_block_training(net, training=True):
if isinstance(net, nn.Cell): if isinstance(net, nn.Cell):
net.set_train(training) net.set_train(training)
def set_block_phase(net, phase='train'): def set_block_phase(net, phase='train'):
if isinstance(net, nn.Cell): if isinstance(net, nn.Cell):
net.phase = phase net.phase = phase
def create_funcs(verification_set, block_generator, block_runner, grad_op=None, default_rand_func=None): def create_funcs(verification_set, block_generator, block_runner, grad_op=None, default_rand_func=None):
def create_func(block, num_outputs, rand_func, desc_const, const_first, add_fake_input, split_outputs): def create_func(block, num_outputs, rand_func, desc_const, const_first, add_fake_input, split_outputs):
def function(*inputs): def function(*inputs):
@ -347,6 +368,7 @@ def create_funcs(verification_set, block_generator, block_runner, grad_op=None,
b = block_generator(block, inputs_num, desc_const=desc_const, const_first=const_first, b = block_generator(block, inputs_num, desc_const=desc_const, const_first=const_first,
add_fake_input=add_fake_input) add_fake_input=add_fake_input)
return block_runner(b, *inputs, rand_func=rand_func) return block_runner(b, *inputs, rand_func=rand_func)
return function return function
bc_configs = verification_set[keyword.function] bc_configs = verification_set[keyword.function]

View File

@ -21,10 +21,12 @@ from mindspore.nn import Cell
from mindspore.common import ParameterTuple from mindspore.common import ParameterTuple
from mindspore.common.api import ms_function from mindspore.common.api import ms_function
class Bprop(Cell): class Bprop(Cell):
""" """
The gradient wraper. The gradient wraper.
""" """
def __init__(self, func, wrt_params, params, grad_op, sens): def __init__(self, func, wrt_params, params, grad_op, sens):
super(Bprop, self).__init__(auto_prefix=False) super(Bprop, self).__init__(auto_prefix=False)
self.func = func self.func = func
@ -50,6 +52,7 @@ class Bprop(Cell):
else: else:
return self.grad(self.func)(*inputs) return self.grad(self.func)(*inputs)
def bprop(func, *inputs, grads_wrt_outputs=None, wrt: list = None, params: list = None): def bprop(func, *inputs, grads_wrt_outputs=None, wrt: list = None, params: list = None):
""" """
Compute gradients of function. Compute gradients of function.
@ -90,6 +93,8 @@ def bprop(func, *inputs, grads_wrt_outputs=None, wrt: list = None, params: list
@ms_function @ms_function
def _func_pynative(*inputs): def _func_pynative(*inputs):
return grad(*inputs) return grad(*inputs)
return _func_pynative(*inputs) return _func_pynative(*inputs)
return func_pynative(*inputs) return func_pynative(*inputs)
return grad(*inputs) return grad(*inputs)

View File

@ -27,6 +27,7 @@ import mindspore._c_expression as _c_expression
from .block_util import get_output_cell, gen_net, gen_grad_net, \ from .block_util import get_output_cell, gen_net, gen_grad_net, \
get_uniform_with_shape, set_block_phase, get_output_reduce_cell, set_block_param_with_rand get_uniform_with_shape, set_block_phase, get_output_reduce_cell, set_block_param_with_rand
class _GradChecker: class _GradChecker:
""" """
Check the theoretical Jacobian against numeric Check the theoretical Jacobian against numeric
@ -130,6 +131,7 @@ class _GradChecker:
@ms_function @ms_function
def _func_pynative(*inputs): def _func_pynative(*inputs):
return net(*inputs) return net(*inputs)
return _func_pynative(*inputs) return _func_pynative(*inputs)
return func_forward_pynative return func_forward_pynative
@ -277,7 +279,7 @@ class _GradChecker:
print('GradChecker.compute_theoretical.args', args) print('GradChecker.compute_theoretical.args', args)
gout = self.wrap(self.gfns[out_index](*args)) gout = self.wrap(self.gfns[out_index](*args))
gout = [self.to_numpy_and_scale(g) if isinstance(g, _c_expression.Tensor) \ gout = [self.to_numpy_and_scale(g) if isinstance(g, _c_expression.Tensor) \
else self.to_numpy_and_scale(np.array(g)) for g in gout] else self.to_numpy_and_scale(np.array(g)) for g in gout]
print('GradChecker.compute_theoretical.gout', gout) print('GradChecker.compute_theoretical.gout', gout)
dy_mask.ravel().view()[jacobian_col] = 0.0 dy_mask.ravel().view()[jacobian_col] = 0.0
@ -433,6 +435,7 @@ def check_gradient(fn, *args, delta=1e-3, max_error=1e-3,
reduce_output=reduce_output) reduce_output=reduce_output)
grad_checker.assert_match() grad_checker.assert_match()
def check_jacobian(fn, *args, delta=1e-3, max_error=1e-3, def check_jacobian(fn, *args, delta=1e-3, max_error=1e-3,
grad_checker_class=OperationGradChecker, grad_checker_class=OperationGradChecker,
input_selector=None, input_selector=None,

View File

@ -19,6 +19,7 @@ import numpy as np
from .other_util import to_numpy_list from .other_util import to_numpy_list
from . import keyword from . import keyword
def compare(expect, func_result, baseline): def compare(expect, func_result, baseline):
""" """
Compare results of function with baseline functions. Compare results of function with baseline functions.

View File

@ -20,6 +20,7 @@ import numpy as np
from . import keyword from . import keyword
from .other_util import select_from_config_tuple from .other_util import select_from_config_tuple
def get_input_config(d): def get_input_config(d):
""" """
Get input config. Get input config.
@ -38,6 +39,7 @@ def get_input_config(d):
scale = ext_config.get(keyword.scale, 1) scale = ext_config.get(keyword.scale, 1)
return s, dtype, scale return s, dtype, scale
def get_expect_config(d): def get_expect_config(d):
""" """
Get input config. Get input config.
@ -66,6 +68,7 @@ def get_expect_config(d):
absolute_tolerance = ext_config.get(keyword.absolute_tolerance, 0.0) absolute_tolerance = ext_config.get(keyword.absolute_tolerance, 0.0)
return s, dtype, scale, max_error, check_tolerance, relative_tolerance, absolute_tolerance return s, dtype, scale, max_error, check_tolerance, relative_tolerance, absolute_tolerance
def get_function_config(function): def get_function_config(function):
""" """
Get input config. Get input config.
@ -91,6 +94,7 @@ def get_function_config(function):
return delta, max_error, input_selector, output_selector, sampling_times, \ return delta, max_error, input_selector, output_selector, sampling_times, \
reduce_output, init_param_with, split_outputs, exception, error_keywords reduce_output, init_param_with, split_outputs, exception, error_keywords
def get_grad_checking_options(function, inputs): def get_grad_checking_options(function, inputs):
""" """
Get input config. Get input config.

View File

@ -19,6 +19,7 @@ import random
import numpy as np import numpy as np
from mindspore import Tensor from mindspore import Tensor
def generate_dataset_for_linear_regression(true_w, true_b, num_samples, batch_size): def generate_dataset_for_linear_regression(true_w, true_b, num_samples, batch_size):
features = np.random.normal(scale=1, size=(num_samples, len(true_w))) features = np.random.normal(scale=1, size=(num_samples, len(true_w)))
labels = np.matmul(features, np.reshape(np.array(true_w), (-1, 1))) + true_b labels = np.matmul(features, np.reshape(np.array(true_w), (-1, 1))) + true_b

View File

@ -24,9 +24,10 @@ from mindspore.ops._grad.grad_base import bprop_getters
from mindspore.ops.primitive import prim_attr_register, PrimitiveWithInfer from mindspore.ops.primitive import prim_attr_register, PrimitiveWithInfer
logging.basicConfig(level=logging.DEBUG, format= logging.basicConfig(level=logging.DEBUG, format=
'[%(levelname)s] %(asctime)s %(pathname)s:%(lineno)d %(message)s') '[%(levelname)s] %(asctime)s %(pathname)s:%(lineno)d %(message)s')
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class PrintShapeType(PrimitiveWithInfer): class PrintShapeType(PrimitiveWithInfer):
""" """
PrintShapeType input's shape and type. PrintShapeType input's shape and type.
@ -78,14 +79,17 @@ class PrintShapeType(PrimitiveWithInfer):
@bprop_getters.register(PrintShapeType) @bprop_getters.register(PrintShapeType)
def get_bprop_print_shape_type(self): def get_bprop_print_shape_type(self):
"""Generate bprop for PrintShapeType""" """Generate bprop for PrintShapeType"""
def bprop(x, out, dout): def bprop(x, out, dout):
return (dout,) return (dout,)
return bprop return bprop
class PrintShapeTypeCell(nn.Cell): class PrintShapeTypeCell(nn.Cell):
def __init__(self): def __init__(self):
super(PrintShapeTypeCell, self).__init__() super(PrintShapeTypeCell, self).__init__()
def construct(self, msg, x): def construct(self, msg, x):
PrintShapeType(msg)(x) PrintShapeType(msg)(x)
return x return x

View File

@ -18,6 +18,7 @@
from . import keyword from . import keyword
from .config_util import get_function_config from .config_util import get_function_config
def get_block_config(): def get_block_config():
""" """
Get Empty function config. Get Empty function config.
@ -28,6 +29,7 @@ def get_block_config():
ret[keyword.expect] = [] ret[keyword.expect] = []
return ret return ret
def fill_block_config(ret, block_config, tid, group, desc_inputs, desc_bprop, expect, def fill_block_config(ret, block_config, tid, group, desc_inputs, desc_bprop, expect,
desc_const, const_first, add_fake_input, fake_input_type): desc_const, const_first, add_fake_input, fake_input_type):
""" """
@ -95,7 +97,7 @@ def fill_block_config(ret, block_config, tid, group, desc_inputs, desc_bprop, ex
if expect: if expect:
expect_list.append({ expect_list.append({
keyword.id: tid+'-'+tid, keyword.id: tid + '-' + tid,
keyword.group: group+'-'+group, keyword.group: group + '-' + group,
keyword.desc_expect: expect keyword.desc_expect: expect
}) })

View File

@ -17,11 +17,14 @@
import sys import sys
class _MindsporeTestFrameworkkeyword: class _MindsporeTestFrameworkkeyword:
def __setattr__(self, name, value): def __setattr__(self, name, value):
if name in self.__dict__: if name in self.__dict__:
raise TypeError("can not rebind keyword (%s)" % name) raise TypeError("can not rebind keyword (%s)" % name)
self.__dict__[name] = value self.__dict__[name] = value
keyword = _MindsporeTestFrameworkkeyword() keyword = _MindsporeTestFrameworkkeyword()
keyword.function = "function" keyword.function = "function"

View File

@ -24,8 +24,10 @@ from mindspore.ops import operations as P
from mindspore.ops import functional as F from mindspore.ops import functional as F
from mindspore.ops import composite as C from mindspore.ops import composite as C
class SquaredLoss(nn.Cell): class SquaredLoss(nn.Cell):
"""Squared loss function.""" """Squared loss function."""
def __init__(self): def __init__(self):
super(SquaredLoss, self).__init__() super(SquaredLoss, self).__init__()
self.reshape = P.Reshape() self.reshape = P.Reshape()
@ -37,7 +39,10 @@ class SquaredLoss(nn.Cell):
ret = y_hat - self.reshape(y, self.shape(y_hat)) ret = y_hat - self.reshape(y, self.shape(y_hat))
return self.reduce_sum((ret * ret) / self.two, (0,)) return self.reduce_sum((ret * ret) / self.two, (0,))
opt_step = C.MultitypeFuncGraph("opt_step") opt_step = C.MultitypeFuncGraph("opt_step")
@opt_step.register("Tensor", "Tensor", @opt_step.register("Tensor", "Tensor",
"Tensor", "Tensor") "Tensor", "Tensor")
def update_opt_step(learning_rate, batch_size, parameter, gradient): def update_opt_step(learning_rate, batch_size, parameter, gradient):
@ -56,8 +61,10 @@ def update_opt_step(learning_rate, batch_size, parameter, gradient):
F.assign(parameter, next_param) F.assign(parameter, next_param)
return next_param return next_param
class SGD(nn.Cell): class SGD(nn.Cell):
"""SGD optimizer.""" """SGD optimizer."""
def __init__(self, parameters, learning_rate=0.001, batch_size=1): def __init__(self, parameters, learning_rate=0.001, batch_size=1):
super(SGD, self).__init__() super(SGD, self).__init__()
self.parameters = ParameterTuple(parameters) self.parameters = ParameterTuple(parameters)
@ -73,8 +80,10 @@ class SGD(nn.Cell):
self.parameters, gradients) self.parameters, gradients)
return success return success
class Linreg(nn.Cell): class Linreg(nn.Cell):
"""Linear regression model.""" """Linear regression model."""
def __init__(self, num_features): def __init__(self, num_features):
super(Linreg, self).__init__() super(Linreg, self).__init__()
self.matmul = P.MatMul() self.matmul = P.MatMul()
@ -84,8 +93,10 @@ class Linreg(nn.Cell):
def construct(self, x): def construct(self, x):
return self.matmul(x, self.w) + self.b return self.matmul(x, self.w) + self.b
class Model: class Model:
"""Simplified model.""" """Simplified model."""
def __init__(self, network, loss_fn, optimizer): def __init__(self, network, loss_fn, optimizer):
self.optimizer = optimizer self.optimizer = optimizer
self.step = nn.TrainOneStepCell(nn.WithLossCell(network, loss_fn), self.optimizer) self.step = nn.TrainOneStepCell(nn.WithLossCell(network, loss_fn), self.optimizer)

View File

@ -22,6 +22,7 @@ from mindspore.common.tensor import Tensor
from .other_util import shape2tensor from .other_util import shape2tensor
from .config_util import get_expect_config from .config_util import get_expect_config
def load_npy(p): def load_npy(p):
s, dtype, scale, max_error, check_tolerance, relative_tolerance, absolute_tolerance = get_expect_config(p) s, dtype, scale, max_error, check_tolerance, relative_tolerance, absolute_tolerance = get_expect_config(p)
if isinstance(s, str): if isinstance(s, str):
@ -33,6 +34,7 @@ def load_npy(p):
ret = shape2tensor(s, dtype, scale) ret = shape2tensor(s, dtype, scale)
return ret, max_error, check_tolerance, relative_tolerance, absolute_tolerance return ret, max_error, check_tolerance, relative_tolerance, absolute_tolerance
def load_data_from_npy_or_shape(dpaths, skip_expect_config=True): def load_data_from_npy_or_shape(dpaths, skip_expect_config=True):
ret = [] ret = []
for p in dpaths: for p in dpaths:

View File

@ -20,11 +20,13 @@ import numpy as np
import mindspore._c_expression as _c_expression import mindspore._c_expression as _c_expression
from mindspore.common.tensor import Tensor from mindspore.common.tensor import Tensor
def wrap(x): def wrap(x):
if isinstance(x, (tuple, list)): if isinstance(x, (tuple, list)):
return x return x
return (x,) return (x,)
def to_numpy_list(tl): def to_numpy_list(tl):
tl = wrap(tl) tl = wrap(tl)
ret = [] ret = []
@ -35,11 +37,13 @@ def to_numpy_list(tl):
ret.append(x) ret.append(x)
return ret return ret
def to_numpy(x): def to_numpy(x):
if isinstance(x, (Tensor, _c_expression.Tensor)): if isinstance(x, (Tensor, _c_expression.Tensor)):
return x.asnumpy() return x.asnumpy()
return x return x
def shape2tensor(shp, dtype=np.float32, scale=6): def shape2tensor(shp, dtype=np.float32, scale=6):
if isinstance(shp, list): if isinstance(shp, list):
if not shp: if not shp:
@ -47,11 +51,12 @@ def shape2tensor(shp, dtype=np.float32, scale=6):
return Tensor((np.random.rand(*shp) * scale).astype(dtype)) return Tensor((np.random.rand(*shp) * scale).astype(dtype))
return shp return shp
def select_from_config_tuple(t, index, default): def select_from_config_tuple(t, index, default):
if not isinstance(t, tuple): if not isinstance(t, tuple):
return default return default
if not isinstance(t[-1], dict): if not isinstance(t[-1], dict):
return default return default
if index > len(t)-1: if index > len(t) - 1:
return default return default
return t[index] return t[index]

View File

@ -17,6 +17,7 @@
import numpy as np import numpy as np
def count_unequal_element(expect, result, rtol, atol): def count_unequal_element(expect, result, rtol, atol):
""" """
Count unequal element. Count unequal element.
@ -33,15 +34,16 @@ def count_unequal_element(expect, result, rtol, atol):
raise ValueError(f'expect.shape {expect.shape}, result.shape {result.shape}') raise ValueError(f'expect.shape {expect.shape}, result.shape {result.shape}')
total_count = len(expect.flatten()) total_count = len(expect.flatten())
error = np.abs(expect - result) error = np.abs(expect - result)
count = np.count_nonzero(np.less_equal(error, atol + np.abs(result)*rtol)) count = np.count_nonzero(np.less_equal(error, atol + np.abs(result) * rtol))
if ((total_count-count)/total_count) >= rtol: if ((total_count - count) / total_count) >= rtol:
raise ValueError(f'expect {expect}, but got {result}, ' raise ValueError(f'expect {expect}, but got {result}, '
f'{total_count-count} / {total_count} elements out of tolerance, ' f'{total_count - count} / {total_count} elements out of tolerance, '
f'absolute_tolerance {atol}, relative_tolerance {rtol}') f'absolute_tolerance {atol}, relative_tolerance {rtol}')
print(f'expect {expect}, got {result}, ' print(f'expect {expect}, got {result}, '
f'{total_count-count} / {total_count} elements out of tolerance, ' f'{total_count - count} / {total_count} elements out of tolerance, '
f'absolute_tolerance {atol}, relative_tolerance {rtol}') f'absolute_tolerance {atol}, relative_tolerance {rtol}')
def tolerance_assert(expect, result, rtol, atol): def tolerance_assert(expect, result, rtol, atol):
""" """
Verify if results are in expected tolerance. Verify if results are in expected tolerance.

View File

@ -21,8 +21,10 @@ import mindspore.ops.operations as P
from mindspore import Tensor from mindspore import Tensor
from mindspore.common.api import _executor from mindspore.common.api import _executor
class InputBackward(nn.Cell): class InputBackward(nn.Cell):
""" InputBackward definition """ """ InputBackward definition """
def __init__(self, network, c1=None, c2=None): def __init__(self, network, c1=None, c2=None):
super(InputBackward, self).__init__() super(InputBackward, self).__init__()
self.network = network self.network = network
@ -58,6 +60,7 @@ class InputBackward(nn.Cell):
class InputOpNet(nn.Cell): class InputOpNet(nn.Cell):
""" InputOpNet definition """ """ InputOpNet definition """
def __init__(self, op, get_first=False, def __init__(self, op, get_first=False,
c1=None, c2=None, c3=None, c4=None): c1=None, c2=None, c3=None, c4=None):
super(InputOpNet, self).__init__() super(InputOpNet, self).__init__()
@ -76,6 +79,7 @@ class InputOpNet(nn.Cell):
if self.get_first: if self.get_first:
x = x[0] x = x[0]
return x return x
def construct0_c1_fack(self, data): def construct0_c1_fack(self, data):
x = self.op(self.c1) + data x = self.op(self.c1) + data
if self.get_first: if self.get_first:
@ -148,7 +152,6 @@ class InputOpNet(nn.Cell):
x = x[0] x = x[0]
return x return x
def construct2_c1(self, x1, x2): def construct2_c1(self, x1, x2):
x = self.op(x1, x2, self.c1) x = self.op(x1, x2, self.c1)
if self.get_first: if self.get_first:
@ -203,8 +206,10 @@ class InputOpNet(nn.Cell):
x = x[0] x = x[0]
return x return x
class NetOutputAsLoss(nn.Cell): class NetOutputAsLoss(nn.Cell):
""" NetOutputAsLoss definition """ """ NetOutputAsLoss definition """
def __init__(self, network, output_index): def __init__(self, network, output_index):
super(NetOutputAsLoss, self).__init__() super(NetOutputAsLoss, self).__init__()
self.network = network self.network = network
@ -233,18 +238,21 @@ class NetOutputAsLoss(nn.Cell):
predict = self.network(x1, x2, x3, x4, x5)[self.output_index] predict = self.network(x1, x2, x3, x4, x5)[self.output_index]
return predict return predict
def get_loss_fun(construct_net, num_input, output_index): def get_loss_fun(construct_net, num_input, output_index):
net = NetOutputAsLoss(construct_net, output_index) net = NetOutputAsLoss(construct_net, output_index)
f = getattr(net, 'construct%d' % num_input) f = getattr(net, 'construct%d' % num_input)
setattr(net, "construct", f) setattr(net, "construct", f)
return net return net
def build_construct_graph(net, *inputs, execute=True): def build_construct_graph(net, *inputs, execute=True):
net.set_train() net.set_train()
_executor.compile(net, *inputs) _executor.compile(net, *inputs)
if execute: if execute:
_executor(net, inputs) _executor(net, inputs)
def build_backward_graph(net, output_shapes, inputs, execute=True): def build_backward_graph(net, output_shapes, inputs, execute=True):
inputs = append_sens_to_inputs(output_shapes, inputs) inputs = append_sens_to_inputs(output_shapes, inputs)
net = gen_backward_net(net, len(inputs) - 1) net = gen_backward_net(net, len(inputs) - 1)
@ -253,6 +261,7 @@ def build_backward_graph(net, output_shapes, inputs, execute=True):
if execute: if execute:
_executor(net, inputs) _executor(net, inputs)
def convert(shp, dtype=np.float32, scale=6): def convert(shp, dtype=np.float32, scale=6):
if isinstance(shp, list): if isinstance(shp, list):
if not shp: if not shp:
@ -260,12 +269,14 @@ def convert(shp, dtype=np.float32, scale=6):
return Tensor((np.random.rand(*shp) * scale).astype(dtype)) return Tensor((np.random.rand(*shp) * scale).astype(dtype))
return shp return shp
def gen_inputs(input_shapes, config): def gen_inputs(input_shapes, config):
add_fack_input = config.get('add_fack_input', False) add_fack_input = config.get('add_fack_input', False)
if not input_shapes and add_fack_input: if not input_shapes and add_fack_input:
return [Tensor(np.array([1.0]).astype(config.get('fack_input_type', np.float32)))] return [Tensor(np.array([1.0]).astype(config.get('fack_input_type', np.float32)))]
return [convert(shp) for shp in input_shapes] return [convert(shp) for shp in input_shapes]
def gen_backward_inputs(input_shapes, output_shapes, config): def gen_backward_inputs(input_shapes, output_shapes, config):
add_fack_input = config.get('add_fack_input', False) add_fack_input = config.get('add_fack_input', False)
if not input_shapes and add_fack_input: if not input_shapes and add_fack_input:
@ -276,11 +287,13 @@ def gen_backward_inputs(input_shapes, output_shapes, config):
sens = convert(sens_shape) sens = convert(sens_shape)
return inputs + [sens] return inputs + [sens]
def append_sens_to_inputs(output_shapes, inputs): def append_sens_to_inputs(output_shapes, inputs):
inputs = inputs inputs = inputs
sens = Tensor(np.random.normal(0, 1, output_shapes).astype(np.float32)) sens = Tensor(np.random.normal(0, 1, output_shapes).astype(np.float32))
return inputs + [sens] return inputs + [sens]
def gen_net(shapes, config, get_first=False): def gen_net(shapes, config, get_first=False):
""" """
gen_net function gen_net function
@ -313,14 +326,17 @@ def gen_backward_net(construct_net, input_num):
setattr(net, "construct", f) setattr(net, "construct", f)
return net return net
def batch_tuple_tensor(data, batch_size): def batch_tuple_tensor(data, batch_size):
ret = [Tensor(np.tile(d.asnumpy(), (batch_size, 1))) for d in data] ret = [Tensor(np.tile(d.asnumpy(), (batch_size, 1))) for d in data]
return tuple(ret) return tuple(ret)
class OutPutWrap(nn.Cell): class OutPutWrap(nn.Cell):
""" """
OutPutWrap definition OutPutWrap definition
""" """
def __init__(self, network, num_output, output_is_tuple): def __init__(self, network, num_output, output_is_tuple):
super(OutPutWrap, self).__init__() super(OutPutWrap, self).__init__()
self.network = network self.network = network
@ -387,6 +403,7 @@ class OutPutWrap(nn.Cell):
ret = ret + F.make_tuple(predict[i] * self.cast(self.one, self.dtype(predict[i]))) ret = ret + F.make_tuple(predict[i] * self.cast(self.one, self.dtype(predict[i])))
return ret return ret
def get_output_wrap(network, num_input, num_output, output_is_tuple=0): def get_output_wrap(network, num_input, num_output, output_is_tuple=0):
net = OutPutWrap(network, num_output, output_is_tuple) net = OutPutWrap(network, num_output, output_is_tuple)
f = getattr(net, 'construct%d' % num_input) f = getattr(net, 'construct%d' % num_input)

View File

@ -23,4 +23,4 @@ import pytest
def test_expand_loss(): def test_expand_loss():
sh_path = os.path.split(os.path.realpath(__file__))[0] sh_path = os.path.split(os.path.realpath(__file__))[0]
ret = os.system(f"sh {sh_path}/run_auto_parallel_loss_expand.sh") ret = os.system(f"sh {sh_path}/run_auto_parallel_loss_expand.sh")
assert(ret == 0) assert (ret == 0)

View File

@ -19,4 +19,4 @@ import pytest
def test_expand_loss(): def test_expand_loss():
ret = os.system("sh run_onehot_model_parallel.sh") ret = os.system("sh run_onehot_model_parallel.sh")
assert(ret == 0) assert (ret == 0)

View File

@ -18,6 +18,7 @@ import numpy as np
from mindspore.nn import Cell from mindspore.nn import Cell
from mindspore import Tensor, Model, context from mindspore import Tensor, Model, context
def run_test(netclass, count, dev): def run_test(netclass, count, dev):
context.set_context(mode=context.GRAPH_MODE, device_target=dev) context.set_context(mode=context.GRAPH_MODE, device_target=dev)
net = netclass() net = netclass()
@ -25,10 +26,11 @@ def run_test(netclass, count, dev):
for _ in range(count): for _ in range(count):
input_np = np.random.randn(2, 3).astype(np.float32) input_np = np.random.randn(2, 3).astype(np.float32)
input_ms = Tensor(input_np) input_ms = Tensor(input_np)
output_np = net.construct(input_np) # run python output_np = net.construct(input_np) # run python
output_ms = model.predict(input_ms) # run graph output_ms = model.predict(input_ms) # run graph
np.testing.assert_array_almost_equal(output_np, output_ms.asnumpy(), decimal=3) np.testing.assert_array_almost_equal(output_np, output_ms.asnumpy(), decimal=3)
class for_loop_with_break(Cell): class for_loop_with_break(Cell):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
@ -42,6 +44,7 @@ class for_loop_with_break(Cell):
pass pass
return x return x
class for_loop_with_continue(Cell): class for_loop_with_continue(Cell):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
@ -54,6 +57,7 @@ class for_loop_with_continue(Cell):
x = x * 2 x = x * 2
return x return x
class for_loop_with_cont_break(Cell): class for_loop_with_cont_break(Cell):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
@ -71,6 +75,7 @@ class for_loop_with_cont_break(Cell):
pass pass
return x return x
class for_nested_loop_with_break(Cell): class for_nested_loop_with_break(Cell):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
@ -84,6 +89,7 @@ class for_nested_loop_with_break(Cell):
x = x * 1.5 x = x * 1.5
return x return x
class while_with_break(Cell): class while_with_break(Cell):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
@ -98,6 +104,7 @@ class while_with_break(Cell):
i += 1 i += 1
return x return x
class while_with_continue(Cell): class while_with_continue(Cell):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
@ -113,6 +120,7 @@ class while_with_continue(Cell):
i += 1 i += 1
return x return x
class while_for_nested(Cell): class while_for_nested(Cell):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
@ -131,6 +139,7 @@ class while_for_nested(Cell):
i += 1 i += 1
return x return x
class pass_branch(Cell): class pass_branch(Cell):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
@ -145,6 +154,7 @@ class pass_branch(Cell):
i += 1 i += 1
return x return x
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -159,4 +169,3 @@ def test_cont_break():
run_test(while_with_continue, count, dev) run_test(while_with_continue, count, dev)
run_test(while_for_nested, count, dev) run_test(while_for_nested, count, dev)
run_test(pass_branch, count, dev) run_test(pass_branch, count, dev)

View File

@ -64,6 +64,7 @@ class GNNFeatureTransform(nn.Cell):
[[ 2.5246444 2.2738023 0.5711005 -3.9399147 ] [[ 2.5246444 2.2738023 0.5711005 -3.9399147 ]
[ 1.0739875 4.0155234 0.94188046 -5.459526 ]] [ 1.0739875 4.0155234 0.94188046 -5.459526 ]]
""" """
@cell_attr_register @cell_attr_register
def __init__(self, def __init__(self,
in_channels, in_channels,
@ -78,7 +79,7 @@ class GNNFeatureTransform(nn.Cell):
if isinstance(weight_init, Tensor): if isinstance(weight_init, Tensor):
if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \ if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \
weight_init.shape()[1] != in_channels: weight_init.shape()[1] != in_channels:
raise ValueError("weight_init shape error") raise ValueError("weight_init shape error")
self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight") self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight")
@ -104,7 +105,7 @@ class GNNFeatureTransform(nn.Cell):
def extend_repr(self): def extend_repr(self):
str_info = 'in_channels={}, out_channels={}, weight={}, has_bias={}' \ str_info = 'in_channels={}, out_channels={}, weight={}, has_bias={}' \
.format(self.in_channels, self.out_channels, self.weight, self.has_bias) .format(self.in_channels, self.out_channels, self.weight, self.has_bias)
if self.has_bias: if self.has_bias:
str_info = str_info + ', bias={}'.format(self.bias) str_info = str_info + ', bias={}'.format(self.bias)
@ -136,6 +137,7 @@ class _BaseAggregator(nn.Cell):
>>> def construct(self, x): >>> def construct(self, x):
>>> return self.reduce_mean(x, 1) >>> return self.reduce_mean(x, 1)
""" """
def __init__(self, def __init__(self,
feature_in_dim, feature_in_dim,
feature_out_dim, feature_out_dim,
@ -191,6 +193,7 @@ class MeanAggregator(_BaseAggregator):
>>> input_data = Tensor(np.array(np.random.rand(32, 3, 32), dtypy=np.float32)) >>> input_data = Tensor(np.array(np.random.rand(32, 3, 32), dtypy=np.float32))
>>> output = net(input_data) >>> output = net(input_data)
""" """
def __init__(self, def __init__(self,
feature_in_dim, feature_in_dim,
feature_out_dim, feature_out_dim,
@ -349,6 +352,7 @@ class AttentionAggregator(nn.Cell):
8) 8)
>>> net(input_data, biases) >>> net(input_data, biases)
""" """
def __init__(self, def __init__(self,
in_channels, in_channels,
out_channels, out_channels,

View File

@ -27,6 +27,7 @@ context.set_context(mode=context.GRAPH_MODE)
class MeanAggregatorGrad(nn.Cell): class MeanAggregatorGrad(nn.Cell):
"""Backward of MeanAggregator""" """Backward of MeanAggregator"""
def __init__(self, network): def __init__(self, network):
super(MeanAggregatorGrad, self).__init__() super(MeanAggregatorGrad, self).__init__()
self.grad_op = C.grad_all_with_sens self.grad_op = C.grad_all_with_sens

View File

@ -21,7 +21,7 @@ import pytest
@pytest.mark.env_single @pytest.mark.env_single
def test_nccl_lenet(): def test_nccl_lenet():
return_code = os.system("mpirun -n 8 pytest -s test_nccl_lenet.py") return_code = os.system("mpirun -n 8 pytest -s test_nccl_lenet.py")
assert(return_code == 0) assert (return_code == 0)
@pytest.mark.level0 @pytest.mark.level0
@ -29,7 +29,7 @@ def test_nccl_lenet():
@pytest.mark.env_single @pytest.mark.env_single
def test_nccl_all_reduce_op(): def test_nccl_all_reduce_op():
return_code = os.system("mpirun -n 8 pytest -s test_nccl_all_reduce_op.py") return_code = os.system("mpirun -n 8 pytest -s test_nccl_all_reduce_op.py")
assert(return_code == 0) assert (return_code == 0)
@pytest.mark.level0 @pytest.mark.level0
@ -37,7 +37,7 @@ def test_nccl_all_reduce_op():
@pytest.mark.env_single @pytest.mark.env_single
def test_nccl_all_gather_op(): def test_nccl_all_gather_op():
return_code = os.system("mpirun -n 8 pytest -s test_nccl_all_gather_op.py") return_code = os.system("mpirun -n 8 pytest -s test_nccl_all_gather_op.py")
assert(return_code == 0) assert (return_code == 0)
@pytest.mark.level0 @pytest.mark.level0
@ -45,4 +45,4 @@ def test_nccl_all_gather_op():
@pytest.mark.env_single @pytest.mark.env_single
def test_nccl_reduce_scatter_op(): def test_nccl_reduce_scatter_op():
return_code = os.system("mpirun -n 8 pytest -s test_nccl_reduce_scatter_op.py") return_code = os.system("mpirun -n 8 pytest -s test_nccl_reduce_scatter_op.py")
assert(return_code == 0) assert (return_code == 0)

View File

@ -71,7 +71,7 @@ class LeNet(nn.Cell):
def multisteplr(total_steps, gap, base_lr=0.9, gamma=0.1, dtype=mstype.float32): def multisteplr(total_steps, gap, base_lr=0.9, gamma=0.1, dtype=mstype.float32):
lr = [] lr = []
for step in range(total_steps): for step in range(total_steps):
lr_ = base_lr * gamma ** (step//gap) lr_ = base_lr * gamma ** (step // gap)
lr.append(lr_) lr.append(lr_)
return Tensor(np.array(lr), dtype) return Tensor(np.array(lr), dtype)
@ -104,4 +104,4 @@ def test_lenet_nccl():
with open("ms_loss.txt", "w") as fo2: with open("ms_loss.txt", "w") as fo2:
fo2.write("loss:") fo2.write("loss:")
fo2.write(str(losses[-5:])) fo2.write(str(losses[-5:]))
assert(losses[-1] < 0.01) assert (losses[-1] < 0.01)

View File

@ -20,6 +20,7 @@ from mindspore.ops import operations as P
from mindspore.common.initializer import initializer from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter from mindspore.common.parameter import Parameter
from mindspore.communication.management import init, NCCL_WORLD_COMM_GROUP, get_rank, get_group_size from mindspore.communication.management import init, NCCL_WORLD_COMM_GROUP, get_rank, get_group_size
context.set_context(mode=context.GRAPH_MODE, device_target='GPU') context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
init('nccl') init('nccl')

View File

@ -30,10 +30,12 @@ from mindspore.train.loss_scale_manager import DynamicLossScaleManager
from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertNetworkWithLoss, BertTrainOneStepWithLossScaleCell from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertNetworkWithLoss, BertTrainOneStepWithLossScaleCell
from mindspore.nn.optim import Momentum from mindspore.nn.optim import Momentum
from mindspore import log as logger from mindspore import log as logger
_current_dir = os.path.dirname(os.path.realpath(__file__)) _current_dir = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = ["/home/workspace/mindspore_dataset/bert/example/examples.tfrecord"] DATA_DIR = ["/home/workspace/mindspore_dataset/bert/example/examples.tfrecord"]
SCHEMA_DIR = "/home/workspace/mindspore_dataset/bert/example/datasetSchema.json" SCHEMA_DIR = "/home/workspace/mindspore_dataset/bert/example/datasetSchema.json"
def get_config(version='base', batch_size=1): def get_config(version='base', batch_size=1):
"""get config""" """get config"""
if version == 'base': if version == 'base':
@ -80,13 +82,14 @@ def get_config(version='base', batch_size=1):
bert_config = BertConfig(batch_size=batch_size) bert_config = BertConfig(batch_size=batch_size)
return bert_config return bert_config
def me_de_train_dataset(): def me_de_train_dataset():
"""test me de train dataset""" """test me de train dataset"""
# apply repeat operations # apply repeat operations
repeat_count = 1 repeat_count = 1
ds = de.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["input_ids", "input_mask", "segment_ids", ds = de.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["input_ids", "input_mask", "segment_ids",
"next_sentence_labels", "masked_lm_positions", "next_sentence_labels", "masked_lm_positions",
"masked_lm_ids", "masked_lm_weights"], shuffle=False) "masked_lm_ids", "masked_lm_weights"], shuffle=False)
type_cast_op = C.TypeCast(mstype.int32) type_cast_op = C.TypeCast(mstype.int32)
ds = ds.map(input_columns="masked_lm_ids", operations=type_cast_op) ds = ds.map(input_columns="masked_lm_ids", operations=type_cast_op)
ds = ds.map(input_columns="masked_lm_positions", operations=type_cast_op) ds = ds.map(input_columns="masked_lm_positions", operations=type_cast_op)
@ -100,12 +103,14 @@ def me_de_train_dataset():
ds = ds.repeat(repeat_count) ds = ds.repeat(repeat_count)
return ds return ds
def weight_variable(shape): def weight_variable(shape):
"""weight variable""" """weight variable"""
np.random.seed(1) np.random.seed(1)
ones = np.random.uniform(-0.1, 0.1, size=shape).astype(np.float32) ones = np.random.uniform(-0.1, 0.1, size=shape).astype(np.float32)
return Tensor(ones) return Tensor(ones)
class ModelCallback(Callback): class ModelCallback(Callback):
def __init__(self): def __init__(self):
super(ModelCallback, self).__init__() super(ModelCallback, self).__init__()
@ -120,6 +125,7 @@ class ModelCallback(Callback):
self.lossscale_list.append(cb_params.net_outputs[2].asnumpy()) self.lossscale_list.append(cb_params.net_outputs[2].asnumpy())
print("epoch: {}, outputs are: {}".format(cb_params.cur_epoch_num, str(cb_params.net_outputs))) print("epoch: {}, outputs are: {}".format(cb_params.cur_epoch_num, str(cb_params.net_outputs)))
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -134,8 +140,9 @@ def test_bert_tdt():
netwithloss = BertNetworkWithLoss(config, True) netwithloss = BertNetworkWithLoss(config, True)
optimizer = Momentum(netwithloss.trainable_params(), learning_rate=2e-5, momentum=0.9) optimizer = Momentum(netwithloss.trainable_params(), learning_rate=2e-5, momentum=0.9)
scale_window = 3 scale_window = 3
scale_manager = DynamicLossScaleManager(2**16, 2, scale_window) scale_manager = DynamicLossScaleManager(2 ** 16, 2, scale_window)
netwithgrads = BertTrainOneStepWithLossScaleCell(netwithloss, optimizer=optimizer, scale_update_cell=scale_manager.get_update_cell()) netwithgrads = BertTrainOneStepWithLossScaleCell(netwithloss, optimizer=optimizer,
scale_update_cell=scale_manager.get_update_cell())
netwithgrads.set_train(True) netwithgrads.set_train(True)
model = Model(netwithgrads) model = Model(netwithgrads)
callback = ModelCallback() callback = ModelCallback()
@ -162,10 +169,11 @@ def test_bert_tdt():
# assertion occurs while the loss value, overflow state or loss_scale value is wrong # assertion occurs while the loss value, overflow state or loss_scale value is wrong
loss_value = np.array(callback.loss_list) loss_value = np.array(callback.loss_list)
expect_loss_value = [12.1918125, 11.966035, 11.972114, 11.982189, 11.973948, 12.610932, 12.17564, 12.840248, 12.40294, 12.621653] expect_loss_value = [12.1918125, 11.966035, 11.972114, 11.982189, 11.973948, 12.610932, 12.17564, 12.840248,
12.40294, 12.621653]
print("loss value: {}".format(loss_value)) print("loss value: {}".format(loss_value))
assert np.allclose(loss_value, expect_loss_value, 0.00001, 0.00001) assert np.allclose(loss_value, expect_loss_value, 0.00001, 0.00001)
overflow = np.array(callback.overflow_list) overflow = np.array(callback.overflow_list)
expect_overflow = [True, True, False, False, False, True, False, False, False, True] expect_overflow = [True, True, False, False, False, True, False, False, False, True]
print("overflow: {}".format(overflow)) print("overflow: {}".format(overflow))
@ -176,5 +184,6 @@ def test_bert_tdt():
print("loss scale: {}".format(loss_scale)) print("loss scale: {}".format(loss_scale))
assert np.allclose(loss_scale, expect_loss_scale, 0.00001, 0.00001) assert np.allclose(loss_scale, expect_loss_scale, 0.00001, 0.00001)
if __name__ == '__main__': if __name__ == '__main__':
test_bert_tdt() test_bert_tdt()

View File

@ -42,7 +42,7 @@ class AlexNet(nn.Cell):
self.relu = nn.ReLU() self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="valid") self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="valid")
self.flatten = nn.Flatten() self.flatten = nn.Flatten()
self.fc1 = nn.Dense(6*6*256, 4096) self.fc1 = nn.Dense(6 * 6 * 256, 4096)
self.fc2 = nn.Dense(4096, 4096) self.fc2 = nn.Dense(4096, 4096)
self.fc3 = nn.Dense(4096, num_classes) self.fc3 = nn.Dense(4096, num_classes)
@ -87,4 +87,4 @@ def test_trainTensor(num_classes=10, epoch=15, batch_size=32):
label = Tensor(np.ones([batch_size]).astype(np.int32)) label = Tensor(np.ones([batch_size]).astype(np.int32))
loss = train_network(data, label) loss = train_network(data, label)
losses.append(loss) losses.append(loss)
assert(losses[-1].asnumpy() < 0.01) assert (losses[-1].asnumpy() < 0.01)

View File

@ -25,7 +25,6 @@ from mindspore.nn import Dense
from mindspore.common.initializer import initializer from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target="GPU") context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
@ -104,6 +103,8 @@ class SentimentNet(nn.Cell):
batch_size = 64 batch_size = 64
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -117,9 +118,9 @@ def test_LSTM():
vocab_size = 252193 vocab_size = 252193
max_len = 500 max_len = 500
weight = np.ones((vocab_size+1, embed_size)).astype(np.float32) weight = np.ones((vocab_size + 1, embed_size)).astype(np.float32)
net = SentimentNet(vocab_size=(vocab_size+1), embed_size=embed_size, net = SentimentNet(vocab_size=(vocab_size + 1), embed_size=embed_size,
num_hiddens=num_hiddens, num_layers=num_layers, num_hiddens=num_hiddens, num_layers=num_layers,
bidirectional=bidirectional, weight=weight, bidirectional=bidirectional, weight=weight,
labels=labels, batch_size=batch_size) labels=labels, batch_size=batch_size)
@ -140,4 +141,4 @@ def test_LSTM():
loss = train_network(train_features, train_labels) loss = train_network(train_features, train_labels)
losses.append(loss) losses.append(loss)
print("loss:", loss.asnumpy()) print("loss:", loss.asnumpy())
assert(losses[-1].asnumpy() < 0.01) assert (losses[-1].asnumpy() < 0.01)

View File

@ -340,7 +340,8 @@ def test_trainTensor(num_classes=10, epoch=8, batch_size=1):
label = Tensor(np.ones([batch_size]).astype(np.int32)) label = Tensor(np.ones([batch_size]).astype(np.int32))
loss = train_network(data, label) loss = train_network(data, label)
losses.append(loss) losses.append(loss)
assert(losses[-1].asnumpy() < 1) assert (losses[-1].asnumpy() < 1)
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@ -359,6 +360,6 @@ def test_trainTensor_amp(num_classes=10, epoch=18, batch_size=16):
label = Tensor(np.ones([batch_size]).astype(np.int32)) label = Tensor(np.ones([batch_size]).astype(np.int32))
loss = train_network(data, label) loss = train_network(data, label)
losses.append(loss) losses.append(loss)
assert(losses[-1][0].asnumpy() < 1) assert (losses[-1][0].asnumpy() < 1)
assert(losses[-1][1].asnumpy() == False) assert (losses[-1][1].asnumpy() == False)
assert(losses[-1][2].asnumpy() > 1) assert (losses[-1][2].asnumpy() > 1)

View File

@ -25,27 +25,27 @@ from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target="CPU") context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class NetArgmax(nn.Cell): class NetArgmax(nn.Cell):
def __init__( self): def __init__(self):
super(NetArgmax, self).__init__() super(NetArgmax, self).__init__()
self.argmax = P.Argmax(output_type=mstype.int32) self.argmax = P.Argmax(output_type=mstype.int32)
x = Tensor(np.array([[1., 20., 5.], x = Tensor(np.array([[1., 20., 5.],
[67., 8., 9.], [67., 8., 9.],
[130., 24., 15.]]).astype(np.float32)) [130., 24., 15.]]).astype(np.float32))
self.x = Parameter(initializer(x, x.shape()), name ='x') self.x = Parameter(initializer(x, x.shape()), name='x')
def construct(self): def construct(self):
return self.argmax(self.x) return self.argmax(self.x)
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_argmax(): def test_argmax():
Argmax = NetArgmax() Argmax = NetArgmax()
output = Argmax() output = Argmax()
print("================================") print("================================")
expect = np.array([1,0,0]).astype(np.float32) expect = np.array([1, 0, 0]).astype(np.float32)
print(output) print(output)
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()

View File

@ -18,8 +18,10 @@ from mindspore.ops import operations as P
import mindspore.nn as nn import mindspore.nn as nn
import numpy as np import numpy as np
import mindspore.context as context import mindspore.context as context
context.set_context(mode=context.GRAPH_MODE, device_target='CPU') context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class Net(nn.Cell): class Net(nn.Cell):
def __init__(self): def __init__(self):
super(Net, self).__init__() super(Net, self).__init__()
@ -28,26 +30,28 @@ class Net(nn.Cell):
def construct(self, x, b): def construct(self, x, b):
return self.bias_add(x, b) return self.bias_add(x, b)
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bias_add1(): def test_bias_add1():
x = np.ones([2,3,4,4]).astype(np.float32) x = np.ones([2, 3, 4, 4]).astype(np.float32)
b = np.array([1,1,1]).astype(np.float32) b = np.array([1, 1, 1]).astype(np.float32)
bias_add = Net() bias_add = Net()
output = bias_add(Tensor(x), Tensor(b)) output = bias_add(Tensor(x), Tensor(b))
expect_output = np.ones([2,3,4,4]).astype(np.float32)*2 expect_output = np.ones([2, 3, 4, 4]).astype(np.float32) * 2
print(output) print(output)
assert np.all(output.asnumpy()==expect_output), "bias_add execute failed, please check current code commit" assert np.all(output.asnumpy() == expect_output), "bias_add execute failed, please check current code commit"
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bias_add2(): def test_bias_add2():
x = np.ones([2,3]).astype(np.float32) x = np.ones([2, 3]).astype(np.float32)
b = np.array([1,1,1]).astype(np.float32) b = np.array([1, 1, 1]).astype(np.float32)
bias_add = Net() bias_add = Net()
output = bias_add(Tensor(x), Tensor(b)) output = bias_add(Tensor(x), Tensor(b))
expect_output = np.ones([2,3]).astype(np.float32)*2 expect_output = np.ones([2, 3]).astype(np.float32) * 2
print(output) print(output)
assert np.all(output.asnumpy()==expect_output), "bias_add execute failed, please check current code commit" assert np.all(output.asnumpy() == expect_output), "bias_add execute failed, please check current code commit"

View File

@ -19,8 +19,10 @@ from mindspore.ops.operations import _grad_ops as G
import mindspore.nn as nn import mindspore.nn as nn
import numpy as np import numpy as np
import mindspore.context as context import mindspore.context as context
context.set_context(mode=context.GRAPH_MODE, device_target='CPU') context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class Net(nn.Cell): class Net(nn.Cell):
def __init__(self): def __init__(self):
super(Net, self).__init__() super(Net, self).__init__()
@ -29,24 +31,26 @@ class Net(nn.Cell):
def construct(self, dout): def construct(self, dout):
return self.bias_add_grad(dout) return self.bias_add_grad(dout)
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bias_add_grad1(): def test_bias_add_grad1():
dout = np.ones([2,3]).astype(np.float32) dout = np.ones([2, 3]).astype(np.float32)
bias_add_grad = Net() bias_add_grad = Net()
output = bias_add_grad(Tensor(dout)) output = bias_add_grad(Tensor(dout))
expect_output = np.array([2.,2.,2.]).astype(np.float32) expect_output = np.array([2., 2., 2.]).astype(np.float32)
print(output.asnumpy()) print(output.asnumpy())
assert np.all(output.asnumpy()==expect_output), "bias_add_grad execute failed, please check current code commit" assert np.all(output.asnumpy() == expect_output), "bias_add_grad execute failed, please check current code commit"
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bias_add_grad2(): def test_bias_add_grad2():
dout = np.ones([2,3,4,4]).astype(np.float32) dout = np.ones([2, 3, 4, 4]).astype(np.float32)
bias_add_grad = Net() bias_add_grad = Net()
output = bias_add_grad(Tensor(dout)) output = bias_add_grad(Tensor(dout))
expect_output = np.array([32.,32.,32.]).astype(np.float32) expect_output = np.array([32., 32., 32.]).astype(np.float32)
print(output.asnumpy()) print(output.asnumpy())
assert np.all(output.asnumpy()==expect_output), "bias_add_grad execute failed, please check current code commit" assert np.all(output.asnumpy() == expect_output), "bias_add_grad execute failed, please check current code commit"

View File

@ -25,32 +25,35 @@ from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target='CPU') context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class Net4(nn.Cell): class Net4(nn.Cell):
def __init__(self): def __init__(self):
super(Net4, self).__init__() super(Net4, self).__init__()
out_channel = 4 out_channel = 4
kernel_size = 1 kernel_size = 1
self.conv_filter = G.Conv2DBackpropFilter(out_channel, self.conv_filter = G.Conv2DBackpropFilter(out_channel,
kernel_size, kernel_size,
pad_mode="valid", pad_mode="valid",
pad=0, pad=0,
mode=1, mode=1,
stride=(1, 1), stride=(1, 1),
dilation=(1, 1, 1, 1), dilation=(1, 1, 1, 1),
group=1) group=1)
self.w = Parameter(initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='w') self.w = Parameter(
initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]),
name='w')
self.x = Parameter(initializer(Tensor(np.array([[[ self.x = Parameter(initializer(Tensor(np.array([[[
[3, 0, 1, 2, 7, 4], [3, 0, 1, 2, 7, 4],
[1, 5, 8, 9, 3, 1], [1, 5, 8, 9, 3, 1],
[2, 7, 2, 5, 1, 3], [2, 7, 2, 5, 1, 3],
[0, 1, 3, 1, 7, 8], [0, 1, 3, 1, 7, 8],
[4, 2, 1, 6, 2, 8], [4, 2, 1, 6, 2, 8],
[2, 4, 5, 2, 3, 9]]]]).astype(np.float32)), [1,1,6,6]), name='x') [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)), [1, 1, 6, 6]), name='x')
self.out = Parameter(initializer(Tensor(np.array([[[ self.out = Parameter(initializer(Tensor(np.array([[[
[ -5, -4, 0, 8], [-5, -4, 0, 8],
[-10, -2, 2, 3], [-10, -2, 2, 3],
[ 0, -2, -4, -7], [0, -2, -4, -7],
[ -3, -2, -3, -16]]]]).astype(np.float32)),[1,1,4,4]), name='y') [-3, -2, -3, -16]]]]).astype(np.float32)), [1, 1, 4, 4]), name='y')
self.get_shape = P.Shape() self.get_shape = P.Shape()
def construct(self): def construct(self):
@ -70,8 +73,8 @@ def test_conv2d_backprop_filter():
[-104, -211, -322] [-104, -211, -322]
[-102, -144, -248]]]] [-102, -144, -248]]]]
""" """
expect = np.array([[[[ -60, -142, -265], expect = np.array([[[[-60, -142, -265],
[-104, -211, -322], [-104, -211, -322],
[-102, -144, -248]]]]).astype(np.float32) [-102, -144, -248]]]]).astype(np.float32)
print(output) print(output)
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()

View File

@ -24,32 +24,35 @@ from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target='CPU') context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class Net5(nn.Cell): class Net5(nn.Cell):
def __init__(self): def __init__(self):
super(Net5, self).__init__() super(Net5, self).__init__()
out_channel = 4 out_channel = 4
kernel_size = 1 kernel_size = 1
self.conv_input = P.Conv2DBackpropInput(out_channel, self.conv_input = P.Conv2DBackpropInput(out_channel,
kernel_size, kernel_size,
pad_mode="valid", pad_mode="valid",
pad=0, pad=0,
mode=1, mode=1,
stride=1, stride=1,
dilation=1, dilation=1,
group=1) group=1)
self.w = Parameter(initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='w') self.w = Parameter(
initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]),
name='w')
self.x = Parameter(initializer(Tensor(np.array([[[ self.x = Parameter(initializer(Tensor(np.array([[[
[3, 0, 1, 2, 7, 4], [3, 0, 1, 2, 7, 4],
[1, 5, 8, 9, 3, 1], [1, 5, 8, 9, 3, 1],
[2, 7, 2, 5, 1, 3], [2, 7, 2, 5, 1, 3],
[0, 1, 3, 1, 7, 8], [0, 1, 3, 1, 7, 8],
[4, 2, 1, 6, 2, 8], [4, 2, 1, 6, 2, 8],
[2, 4, 5, 2, 3, 9]]]]).astype(np.float32)), [1,1,6,6]), name='x') [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)), [1, 1, 6, 6]), name='x')
self.out = Parameter(initializer(Tensor(np.array([[[ self.out = Parameter(initializer(Tensor(np.array([[[
[ -5, -4, 0, 8], [-5, -4, 0, 8],
[-10, -2, 2, 3], [-10, -2, 2, 3],
[ 0, -2, -4, -7], [0, -2, -4, -7],
[ -3, -2, -3, -16]]]]).astype(np.float32)),[1,1,4,4]), name='y') [-3, -2, -3, -16]]]]).astype(np.float32)), [1, 1, 4, 4]), name='y')
self.get_shape = P.Shape() self.get_shape = P.Shape()
def construct(self): def construct(self):
@ -72,11 +75,11 @@ def test_conv2d_backprop_input():
[ -3, -4, -4, -19, 7, 23] [ -3, -4, -4, -19, 7, 23]
[ -3, -2, 0, -14, 3, 16]]]] [ -3, -2, 0, -14, 3, 16]]]]
""" """
expect = np.array([[[[ -5, -4, 5, 12, 0, -8], expect = np.array([[[[-5, -4, 5, 12, 0, -8],
[-15, -6, 17, 17, -2, -11], [-15, -6, 17, 17, -2, -11],
[-15, -8, 13, 12, 2, -4], [-15, -8, 13, 12, 2, -4],
[-13, -6, 8, -14, 5, 20], [-13, -6, 8, -14, 5, 20],
[ -3, -4, -4, -19, 7, 23], [-3, -4, -4, -19, 7, 23],
[ -3, -2, 0, -14, 3, 16]]]]).astype(np.float32) [-3, -2, 0, -14, 3, 16]]]]).astype(np.float32)
print(output) print(output)
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()

View File

@ -24,8 +24,9 @@ from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target='CPU') context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class NetConv2d(nn.Cell): class NetConv2d(nn.Cell):
def __init__( self): def __init__(self):
super(NetConv2d, self).__init__() super(NetConv2d, self).__init__()
out_channel = 2 out_channel = 2
kernel_size = 1 kernel_size = 1
@ -42,7 +43,6 @@ class NetConv2d(nn.Cell):
self.x = Parameter(initializer( self.x = Parameter(initializer(
Tensor(np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3, 3).astype(np.float32)), [1, 3, 3, 3]), name='x') Tensor(np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3, 3).astype(np.float32)), [1, 3, 3, 3]), name='x')
def construct(self): def construct(self):
return self.conv(self.x, self.w) return self.conv(self.x, self.w)
@ -64,9 +64,9 @@ def test_conv2d():
[162. 174. 186.] [162. 174. 186.]
[198. 210. 222.]]]] [198. 210. 222.]]]]
""" """
expect = np.array([[[[ 45, 48, 51], expect = np.array([[[[45, 48, 51],
[ 54, 57, 60], [54, 57, 60],
[ 63, 66, 69]], [63, 66, 69]],
[[126, 138, 150], [[126, 138, 150],
[162, 174, 186], [162, 174, 186],
[198, 210, 222]]]]).astype(np.float32) [198, 210, 222]]]]).astype(np.float32)

View File

@ -24,14 +24,15 @@ from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target="CPU") context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class NetEqualCount(nn.Cell): class NetEqualCount(nn.Cell):
def __init__( self): def __init__(self):
super(NetEqualCount, self).__init__() super(NetEqualCount, self).__init__()
self.equalcount = P.EqualCount() self.equalcount = P.EqualCount()
x = Tensor(np.array([1, 20, 5]).astype(np.int32)) x = Tensor(np.array([1, 20, 5]).astype(np.int32))
y = Tensor(np.array([2, 20, 5]).astype(np.int32)) y = Tensor(np.array([2, 20, 5]).astype(np.int32))
self.x = Parameter(initializer(x, x.shape()), name ='x') self.x = Parameter(initializer(x, x.shape()), name='x')
self.y = Parameter(initializer(y, y.shape()), name ='y') self.y = Parameter(initializer(y, y.shape()), name='y')
def construct(self): def construct(self):
return self.equalcount(self.x, self.y) return self.equalcount(self.x, self.y)
@ -41,11 +42,9 @@ class NetEqualCount(nn.Cell):
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_equalcount(): def test_equalcount():
EqualCount = NetEqualCount() EqualCount = NetEqualCount()
output = EqualCount() output = EqualCount()
print("================================") print("================================")
expect = np.array([2]).astype(np.int32) expect = np.array([2]).astype(np.int32)
print(output) print(output)
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()

View File

@ -25,6 +25,7 @@ from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target='CPU') context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class Net_Pool_Grad(nn.Cell): class Net_Pool_Grad(nn.Cell):
def __init__(self): def __init__(self):
super(Net_Pool_Grad, self).__init__() super(Net_Pool_Grad, self).__init__()
@ -56,7 +57,6 @@ class Net_Pool_Grad(nn.Cell):
[31, 33, 35] [31, 33, 35]
]]]).astype(np.float32)), [1, 1, 3, 3]), name='d') ]]]).astype(np.float32)), [1, 1, 3, 3]), name='d')
def construct(self): def construct(self):
return self.maxpool_grad_fun(self.x, self.a, self.d) return self.maxpool_grad_fun(self.x, self.a, self.d)
@ -78,4 +78,3 @@ def test_maxpool2d_grad():
[0, 31, 0, 33, 0, 35] [0, 31, 0, 33, 0, 35]
]]])) ]]]))
assert (output.asnumpy() == expect_result).all() assert (output.asnumpy() == expect_result).all()

View File

@ -21,20 +21,25 @@ import mindspore.context as context
context.set_context(mode=context.GRAPH_MODE, device_target="CPU") context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class Net_Pool(nn.Cell): class Net_Pool(nn.Cell):
def __init__(self): def __init__(self):
super(Net_Pool, self).__init__() super(Net_Pool, self).__init__()
self.maxpool_fun = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="VALID") self.maxpool_fun = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="VALID")
def construct(self, x): def construct(self, x):
return self.maxpool_fun(x) return self.maxpool_fun(x)
class Net_Pool2(nn.Cell): class Net_Pool2(nn.Cell):
def __init__(self): def __init__(self):
super(Net_Pool2, self).__init__() super(Net_Pool2, self).__init__()
self.maxpool_fun = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="SAME") self.maxpool_fun = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="SAME")
def construct(self, x): def construct(self, x):
return self.maxpool_fun(x) return self.maxpool_fun(x)
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard

View File

@ -25,6 +25,7 @@ import mindspore.context as context
context.set_context(mode=context.GRAPH_MODE, device_target="CPU") context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class MomentumNet(nn.Cell): class MomentumNet(nn.Cell):
def __init__(self): def __init__(self):
super(MomentumNet, self).__init__() super(MomentumNet, self).__init__()
@ -39,6 +40,7 @@ class MomentumNet(nn.Cell):
output = self.fc1(output) output = self.fc1(output)
return output return output
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -46,7 +48,7 @@ def test_momentum():
epoch = 13 epoch = 13
net = MomentumNet() net = MomentumNet()
learning_rate = 0.1 learning_rate = 0.1
momentum = 0.9 momentum = 0.9
optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate, momentum) optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate, momentum)
criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
@ -55,11 +57,11 @@ def test_momentum():
train_network.set_train() train_network.set_train()
losses = [] losses = []
for i in range(epoch): for i in range(epoch):
data = Tensor(np.arange(0, 16).reshape(1, 1, 4, 4).astype(np.float32)*0.01) data = Tensor(np.arange(0, 16).reshape(1, 1, 4, 4).astype(np.float32) * 0.01)
label = Tensor(np.array([0]).astype(np.int32)) label = Tensor(np.array([0]).astype(np.int32))
loss = train_network(data, label) loss = train_network(data, label)
losses.append(loss) losses.append(loss)
print("================================") print("================================")
print(losses) print(losses)
""" """

View File

@ -1,17 +1,17 @@
#Copyright 2019 Huawei Technologies Co., Ltd # Copyright 2019 Huawei Technologies Co., Ltd
# #
#Licensed under the Apache License, Version 2.0(the "License"); # Licensed under the Apache License, Version 2.0(the "License");
#you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
#You may obtain a copy of the License at # You may obtain a copy of the License at
# #
#http: // www.apache.org/licenses/LICENSE-2.0 # http: // www.apache.org/licenses/LICENSE-2.0
# #
#Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
#limitations under the License. # limitations under the License.
#== == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == # == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == ==
import pytest import pytest
from mindspore import Tensor from mindspore import Tensor
@ -23,13 +23,14 @@ import mindspore.context as context
from mindspore.common.initializer import initializer from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter from mindspore.common.parameter import Parameter
x = np.random.uniform(-2, 2, (2,3,4,4)).astype(np.float32) x = np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(np.float32)
y = np.random.uniform(-2, 2, (1,1,1,1)).astype(np.float32) y = np.random.uniform(-2, 2, (1, 1, 1, 1)).astype(np.float32)
context.set_context(device_target='CPU') context.set_context(device_target='CPU')
class Net(nn.Cell): class Net(nn.Cell):
def __init__( self): def __init__(self):
super(Net, self).__init__() super(Net, self).__init__()
self.mul = P.Mul() self.mul = P.Mul()
self.x = Parameter(initializer(Tensor(x), x.shape), name='x3') self.x = Parameter(initializer(Tensor(x), x.shape), name='x3')
@ -39,6 +40,7 @@ class Net(nn.Cell):
def construct(self): def construct(self):
return self.mul(self.x, self.y) return self.mul(self.x, self.y)
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard

View File

@ -25,6 +25,7 @@ from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target='CPU') context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class NetReluGrad(nn.Cell): class NetReluGrad(nn.Cell):
def __init__(self): def __init__(self):
super(NetReluGrad, self).__init__() super(NetReluGrad, self).__init__()
@ -35,16 +36,18 @@ class NetReluGrad(nn.Cell):
self.dy = Parameter(initializer(Tensor(np.array([[[[1, 0, 1], self.dy = Parameter(initializer(Tensor(np.array([[[[1, 0, 1],
[0, 1, 0], [0, 1, 0],
[1, 1, 1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='dy') [1, 1, 1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='dy')
def construct(self): def construct(self):
return self.rekuGrad(self.dy, self.x) return self.rekuGrad(self.dy, self.x)
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_relu_grad(): def test_relu_grad():
relu_grad = NetReluGrad() relu_grad = NetReluGrad()
output = relu_grad() output = relu_grad()
expect = np.array([[[ [0, 0, 1,],[0, 0, 0,],[1, 1, 0.] ]]]).astype(np.float32) expect = np.array([[[[0, 0, 1, ], [0, 0, 0, ], [1, 1, 0.]]]]).astype(np.float32)
error = np.ones(shape=[3, 3]) * 1.0e-6 error = np.ones(shape=[3, 3]) * 1.0e-6
diff = output.asnumpy() - expect diff = output.asnumpy() - expect
assert np.all(diff < error) assert np.all(diff < error)

View File

@ -24,6 +24,7 @@ from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target='CPU') context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class NetRelu(nn.Cell): class NetRelu(nn.Cell):
def __init__(self): def __init__(self):
super(NetRelu, self).__init__() super(NetRelu, self).__init__()
@ -31,17 +32,19 @@ class NetRelu(nn.Cell):
self.x = Parameter(initializer(Tensor(np.array([[[[-1, 1, 10], self.x = Parameter(initializer(Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1], [1, -1, 1],
[10, 1, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='x') [10, 1, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='x')
def construct(self): def construct(self):
return self.relu(self.x) return self.relu(self.x)
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_relu(): def test_relu():
relu = NetRelu() relu = NetRelu()
output = relu() output = relu()
expect = np.array([[[ [0, 1, 10,], expect = np.array([[[[0, 1, 10, ],
[1, 0, 1,], [1, 0, 1, ],
[10, 1, 0.]]]]).astype(np.float32) [10, 1, 0.]]]]).astype(np.float32)
print(output) print(output)
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()

View File

@ -24,18 +24,20 @@ from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target='CPU') context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class NetSoftmax(nn.Cell): class NetSoftmax(nn.Cell):
def __init__( self): def __init__(self):
super(NetSoftmax, self).__init__() super(NetSoftmax, self).__init__()
self.softmax = P.Softmax() self.softmax = P.Softmax()
x = Tensor(np.array([[0.1, 0.3, 0.6], x = Tensor(np.array([[0.1, 0.3, 0.6],
[0.2, -0.6, 0.8], [0.2, -0.6, 0.8],
[0.6, 1, 0.4]]).astype(np.float32)) [0.6, 1, 0.4]]).astype(np.float32))
self.x = Parameter(initializer(x, x.shape()), name ='x') self.x = Parameter(initializer(x, x.shape()), name='x')
def construct(self): def construct(self):
return self.softmax(self.x) return self.softmax(self.x)
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -49,4 +51,3 @@ def test_softmax():
diff = np.abs(outputSum - expect) diff = np.abs(outputSum - expect)
print(diff) print(diff)
assert np.all(diff < error) assert np.all(diff < error)

View File

@ -24,15 +24,16 @@ from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target='CPU') context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class NetSoftmaxWithCrossEntropy(nn.Cell): class NetSoftmaxWithCrossEntropy(nn.Cell):
def __init__( self): def __init__(self):
super(NetSoftmaxWithCrossEntropy, self).__init__() super(NetSoftmaxWithCrossEntropy, self).__init__()
logits = Tensor(np.array([[1,1,10], logits = Tensor(np.array([[1, 1, 10],
[1,10,1], [1, 10, 1],
[10,1,1]]).astype(np.float32)) [10, 1, 1]]).astype(np.float32))
self.logits = Parameter(initializer(logits, logits.shape()), name ='logits') self.logits = Parameter(initializer(logits, logits.shape()), name='logits')
labels = Tensor(np.array([2,1,0]).astype(np.int32)) labels = Tensor(np.array([2, 1, 0]).astype(np.int32))
self.labels = Parameter(initializer(labels, labels.shape()), name ='labels') self.labels = Parameter(initializer(labels, labels.shape()), name='labels')
self.SoftmaxWithCrossEntropy = P.SparseSoftmaxCrossEntropyWithLogits(True) self.SoftmaxWithCrossEntropy = P.SparseSoftmaxCrossEntropyWithLogits(True)
def construct(self): def construct(self):
@ -45,9 +46,9 @@ class NetSoftmaxWithCrossEntropy(nn.Cell):
def test_net(): def test_net():
SoftmaxWithCrossEntropy = NetSoftmaxWithCrossEntropy() SoftmaxWithCrossEntropy = NetSoftmaxWithCrossEntropy()
output = SoftmaxWithCrossEntropy() output = SoftmaxWithCrossEntropy()
expect = np.array([[ 4.1126452e-05, 4.1126452e-05, -8.2234539e-05], expect = np.array([[4.1126452e-05, 4.1126452e-05, -8.2234539e-05],
[ 4.1126452e-05, -8.2234539e-05, 4.1126452e-05], [4.1126452e-05, -8.2234539e-05, 4.1126452e-05],
[-8.2234539e-05, 4.1126452e-05, 4.1126452e-05]]).astype(np.float32) [-8.2234539e-05, 4.1126452e-05, 4.1126452e-05]]).astype(np.float32)
print(output) print(output)
error = np.ones(shape=[3, 3]) * 1.0e-6 error = np.ones(shape=[3, 3]) * 1.0e-6
diff = output.asnumpy() - expect diff = output.asnumpy() - expect

View File

@ -21,6 +21,8 @@ from topi.cce import util
from te import platform as cce from te import platform as cce
Nonetype = type(None) Nonetype = type(None)
# pylint: disable=unused-argument, no-value-for-parameter, too-many-branches # pylint: disable=unused-argument, no-value-for-parameter, too-many-branches
@fusion_manager.register("conv2d") @fusion_manager.register("conv2d")
def conv2d_compute(inputs, weights, bias, outputs, strides, pad_list, dilations, def conv2d_compute(inputs, weights, bias, outputs, strides, pad_list, dilations,
@ -103,6 +105,7 @@ def conv2d_compute(inputs, weights, bias, outputs, strides, pad_list, dilations,
return res return res
@util.check_input_type(dict, dict, (dict, Nonetype), dict, (tuple, list), (tuple, list), (tuple, list), @util.check_input_type(dict, dict, (dict, Nonetype), dict, (tuple, list), (tuple, list), (tuple, list),
str) str)
def conv2d(inputs, weights, bias, outputs, strides, pad_list, dilations, def conv2d(inputs, weights, bias, outputs, strides, pad_list, dilations,
@ -189,7 +192,7 @@ def conv2d(inputs, weights, bias, outputs, strides, pad_list, dilations,
if cce.CceProductParams().cce_product == "5.10": if cce.CceProductParams().cce_product == "5.10":
conv_layer_fast_cce(shape_fm, shape_filter, in_dtype, w_dtype, res_dtype, conv_layer_fast_cce(shape_fm, shape_filter, in_dtype, w_dtype, res_dtype,
padh, padw, strideh, stridew, bias=use_bias, padh, padw, strideh, stridew, bias=use_bias,
kernel_name=kernel_name, need_build=True, need_print=False) kernel_name=kernel_name, need_build=True, need_print=False)
else: else:
conv_layer_cce(shape_fm, shape_filter, in_dtype, w_dtype, res_dtype, conv_layer_cce(shape_fm, shape_filter, in_dtype, w_dtype, res_dtype,
padh, padw, strideh, stridew, padh, padw, strideh, stridew,

View File

@ -18,14 +18,16 @@ from te.platform import CUBE_MKN
from topi import generic from topi import generic
from topi.cce import util from topi.cce import util
from topi.cce.util import is_v200_version from topi.cce.util import is_v200_version
# pylint: disable=R0912,R0913,R0914,R0915,E1101 # pylint: disable=R0912,R0913,R0914,R0915,E1101
# the dim of shape in conv must be 4 # the dim of shape in conv must be 4
PAD_SHAPE_DIM = 2 PAD_SHAPE_DIM = 2
NONETYPE = type(None) NONETYPE = type(None)
@util.check_input_type((list, tuple), (list, tuple), str, str, str, (list, int), (list, int), @util.check_input_type((list, tuple), (list, tuple), str, str, str, (list, int), (list, int),
int, int,(list, tuple), (list, tuple), int, int, (list, tuple), (list, tuple),
str, str, str, str, str, str,
str, str, str, str, str, str,
str, bool, str) str, bool, str)
@ -57,9 +59,9 @@ def conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, p
if quantize_config[0] == 0: if quantize_config[0] == 0:
if is_v200_version(): if is_v200_version():
util.check_dtype_rule(in_dtype, ('int8', )) util.check_dtype_rule(in_dtype, ('int8',))
util.check_dtype_rule(w_dtype, ('int8', )) util.check_dtype_rule(w_dtype, ('int8',))
util.check_dtype_rule(res_dtype, ('int32', )) util.check_dtype_rule(res_dtype, ('int32',))
else: else:
util.check_dtype_rule(in_dtype, ['float16']) util.check_dtype_rule(in_dtype, ['float16'])
util.check_dtype_rule(w_dtype, ['float16']) util.check_dtype_rule(w_dtype, ['float16'])
@ -117,7 +119,7 @@ def conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, p
if isinstance(padh, list): if isinstance(padh, list):
if len(padh) != PAD_SHAPE_DIM: if len(padh) != PAD_SHAPE_DIM:
raise RuntimeError("Dimension must be %d when padh is a list."%PAD_SHAPE_DIM) raise RuntimeError("Dimension must be %d when padh is a list." % PAD_SHAPE_DIM)
pad_top = padh[0] pad_top = padh[0]
pad_bottom = padh[1] pad_bottom = padh[1]
else: else:
@ -126,7 +128,7 @@ def conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, p
if isinstance(padw, list): if isinstance(padw, list):
if len(padw) != PAD_SHAPE_DIM: if len(padw) != PAD_SHAPE_DIM:
raise RuntimeError("Dimension must be %d when padw is a list."%PAD_SHAPE_DIM) raise RuntimeError("Dimension must be %d when padw is a list." % PAD_SHAPE_DIM)
pad_left = padw[0] pad_left = padw[0]
pad_right = padw[1] pad_right = padw[1]
else: else:
@ -134,8 +136,8 @@ def conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, p
pad_right = padw pad_right = padw
shape_in, shape_w = te.lang.cce.check_conv_shape(shape_in, shape_w, pad_top, pad_bottom, \ shape_in, shape_w = te.lang.cce.check_conv_shape(shape_in, shape_w, pad_top, pad_bottom, \
pad_left, pad_right, strideh, \ pad_left, pad_right, strideh, \
stridew, in_dtype, w_dtype, res_dtype) stridew, in_dtype, w_dtype, res_dtype)
return shape_in, shape_w return shape_in, shape_w
@ -248,9 +250,12 @@ def conv_layer_cce(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw,
shape_in = list(shape_in) shape_in = list(shape_in)
shape_w = list(shape_w) shape_w = list(shape_w)
shape_in, shape_w = conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw, strideh, stridew, shape_in, shape_w = conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw, strideh,
quantize_config, scale_sqrt, scale_q_dtype, offset_q_dtype, scale_dq_dtype, stridew,
scale_rq_dtype, offset_rq_dtype, offset_w_dtype, offset_pad_dtype, bias, kernel_name) quantize_config, scale_sqrt, scale_q_dtype, offset_q_dtype,
scale_dq_dtype,
scale_rq_dtype, offset_rq_dtype, offset_w_dtype, offset_pad_dtype,
bias, kernel_name)
# quantize switch on # quantize switch on
if quantize_config[0] == 1: if quantize_config[0] == 1:
@ -338,7 +343,7 @@ def conv_layer_cce(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw,
if is_quantize: if is_quantize:
scale_q = tvm.placeholder( scale_q = tvm.placeholder(
(CUBE_MKN[scale_q_dtype]['mac'][1],), name='scaleQ', dtype=scale_q_dtype) (CUBE_MKN[scale_q_dtype]['mac'][1],), name='scaleQ', dtype=scale_q_dtype)
if quantize_algorithm ==1: if quantize_algorithm == 1:
offset_q = tvm.placeholder( offset_q = tvm.placeholder(
(CUBE_MKN[offset_q_dtype]['mac'][1],), name='offsetQ', dtype=offset_q_dtype) (CUBE_MKN[offset_q_dtype]['mac'][1],), name='offsetQ', dtype=offset_q_dtype)
@ -353,13 +358,13 @@ def conv_layer_cce(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw,
else (out_channel,) else (out_channel,)
scale_rq = tvm.placeholder( scale_rq = tvm.placeholder(
scale_rq_shape, name='scaleRq', dtype=scale_rq_dtype) scale_rq_shape, name='scaleRq', dtype=scale_rq_dtype)
if quantize_algorithm ==1: if quantize_algorithm == 1:
offset_rq_shape = (CUBE_MKN[offset_rq_dtype]['mac'][1],) offset_rq_shape = (CUBE_MKN[offset_rq_dtype]['mac'][1],)
offset_rq = tvm.placeholder( offset_rq = tvm.placeholder(
offset_rq_shape, name='offsetRq', dtype=offset_rq_dtype) offset_rq_shape, name='offsetRq', dtype=offset_rq_dtype)
# need offset_pad , for half offset # need offset_pad , for half offset
if quantize_algorithm ==1: if quantize_algorithm == 1:
offset_pad = tvm.placeholder( offset_pad = tvm.placeholder(
(CUBE_MKN[offset_pad_dtype]['mac'][1],), name='offset_pad', (CUBE_MKN[offset_pad_dtype]['mac'][1],), name='offset_pad',
dtype=offset_pad_dtype) dtype=offset_pad_dtype)

View File

@ -17,12 +17,14 @@ from te import tvm
from te.platform import CUBE_MKN from te.platform import CUBE_MKN
from topi import generic from topi import generic
from topi.cce import util from topi.cce import util
# pylint: disable=R0913,R0914,R0915,E1101 # pylint: disable=R0913,R0914,R0915,E1101
# the dim of shape in conv must be 4 # the dim of shape in conv must be 4
PAD_SHAPE_DIM = 2 PAD_SHAPE_DIM = 2
NoneType = type(None) NoneType = type(None)
@util.check_input_type((list, tuple), (list, tuple), str, str, str, @util.check_input_type((list, tuple), (list, tuple), str, str, str,
(list, int), (list, int), int, int, bool, str) (list, int), (list, int), int, int, bool, str)
def conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, def conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype,
@ -40,7 +42,7 @@ def conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dty
if isinstance(padh, list): if isinstance(padh, list):
if len(padh) != PAD_SHAPE_DIM: if len(padh) != PAD_SHAPE_DIM:
raise RuntimeError("Dimension must be %d when padh is a list."%PAD_SHAPE_DIM) raise RuntimeError("Dimension must be %d when padh is a list." % PAD_SHAPE_DIM)
pad_top = padh[0] pad_top = padh[0]
pad_bottom = padh[1] pad_bottom = padh[1]
else: else:
@ -49,7 +51,7 @@ def conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dty
if isinstance(padw, list): if isinstance(padw, list):
if len(padw) != PAD_SHAPE_DIM: if len(padw) != PAD_SHAPE_DIM:
raise RuntimeError("Dimension must be %d when padw is a list."%PAD_SHAPE_DIM) raise RuntimeError("Dimension must be %d when padw is a list." % PAD_SHAPE_DIM)
pad_left = padw[0] pad_left = padw[0]
pad_right = padw[1] pad_right = padw[1]
else: else:
@ -62,6 +64,7 @@ def conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dty
return shape_in, shape_w return shape_in, shape_w
@util.check_input_type((list, tuple), (list, tuple), str, str, str, @util.check_input_type((list, tuple), (list, tuple), str, str, str,
(list, int), (list, int), int, int, (list, int), (list, int), int, int,
bool, str, bool, bool) bool, str, bool, bool)
@ -112,7 +115,7 @@ def conv_layer_fast_cce(shape_in, shape_w, in_dtype, w_dtype, res_dtype,
shape_w = list(shape_w) shape_w = list(shape_w)
shape_in, shape_w = conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, shape_in, shape_w = conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype,
padh, padw, strideh, stridew, bias, kernel_name) padh, padw, strideh, stridew, bias, kernel_name)
batch_size = shape_in[0] batch_size = shape_in[0]
in_channel = shape_in[1] in_channel = shape_in[1]

View File

@ -20,6 +20,8 @@ from mindspore import Tensor
from mindspore._checkparam import ParamValidator as validator from mindspore._checkparam import ParamValidator as validator
from mindspore._checkparam import Rel, check_bool, check_int_positive, twice from mindspore._checkparam import Rel, check_bool, check_int_positive, twice
from mindspore.common import dtype as mstype from mindspore.common import dtype as mstype
class Cus_Conv2D(PrimitiveWithInfer): class Cus_Conv2D(PrimitiveWithInfer):
r""" r"""
Applies 2D convolution for the input. Applies 2D convolution for the input.
@ -92,13 +94,13 @@ class Cus_Conv2D(PrimitiveWithInfer):
validator.check_type('kernel_size', kernel_size, [int, tuple]) validator.check_type('kernel_size', kernel_size, [int, tuple])
if isinstance(kernel_size, int) and kernel_size < 1: if isinstance(kernel_size, int) and kernel_size < 1:
raise ValueError('Attr \'kernel_size\' of \'Conv2D\' Op passed ' raise ValueError('Attr \'kernel_size\' of \'Conv2D\' Op passed '
+ str(self.kernel_size)+', should be a int or tuple and equal to or greater than 1.') + str(self.kernel_size) + ', should be a int or tuple and equal to or greater than 1.')
if isinstance(kernel_size, tuple) and (len(kernel_size) != 2 or if isinstance(kernel_size, tuple) and (len(kernel_size) != 2 or
(not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[0], int)) or
(not isinstance(kernel_size[1], int)) or (not isinstance(kernel_size[1], int)) or
kernel_size[0] < 1 or kernel_size[1] < 1): kernel_size[0] < 1 or kernel_size[1] < 1):
raise ValueError('Attr \'kernel_size\' of \'Conv2D\' Op passed ' raise ValueError('Attr \'kernel_size\' of \'Conv2D\' Op passed '
+ str(self.kernel_size)+', should be a int or tuple and equal to or greater than 1.') + str(self.kernel_size) + ', should be a int or tuple and equal to or greater than 1.')
self.stride = validator.check_integer('stride', stride, 1, Rel.GE) self.stride = validator.check_integer('stride', stride, 1, Rel.GE)
from .cus_conv2d_impl import Cus_Conv2D from .cus_conv2d_impl import Cus_Conv2D
@ -147,4 +149,4 @@ class Cus_Conv2D(PrimitiveWithInfer):
def infer_dtype(self, x_dtype, w_dtype): def infer_dtype(self, x_dtype, w_dtype):
args = {'x_dtype': x_dtype, 'w_dtype': w_dtype} args = {'x_dtype': x_dtype, 'w_dtype': w_dtype}
validator.check_type_same(args, [mstype.int8, mstype.int32, mstype.float16, mstype.float32]) validator.check_type_same(args, [mstype.int8, mstype.int32, mstype.float16, mstype.float32])
return x_dtype return x_dtype

View File

@ -20,6 +20,7 @@ from mindspore import Tensor
# y = x^2 # y = x^2
class CusSquare(PrimitiveWithInfer): class CusSquare(PrimitiveWithInfer):
"""CusSquare definition""" """CusSquare definition"""
@prim_attr_register @prim_attr_register
def __init__(self): def __init__(self):
"""init CusSquare""" """init CusSquare"""

View File

@ -20,31 +20,34 @@ import mindspore.context as context
from mindspore.common.initializer import initializer from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter from mindspore.common.parameter import Parameter
from .cus_conv2d import Cus_Conv2D from .cus_conv2d import Cus_Conv2D
context.set_context(device_target="Ascend") context.set_context(device_target="Ascend")
class Net(nn.Cell): class Net(nn.Cell):
def __init__(self): def __init__(self):
super(Net, self).__init__() super(Net, self).__init__()
out_channel = 64 out_channel = 64
kernel_size = 7 kernel_size = 7
self.conv = Cus_Conv2D(out_channel, self.conv = Cus_Conv2D(out_channel,
kernel_size, kernel_size,
mode=1, mode=1,
pad_mode="valid", pad_mode="valid",
pad=0, pad=0,
stride=1, stride=1,
dilation=1, dilation=1,
group=1) group=1)
self.w = Parameter(initializer( self.w = Parameter(initializer(
'normal', [64, 3, 7, 7]), name='w') 'normal', [64, 3, 7, 7]), name='w')
@ms_function @ms_function
def construct(self, x): def construct(self, x):
return self.conv(x, self.w) return self.conv(x, self.w)
def test_net(): def test_net():
np.random.seed(3800) np.random.seed(3800)
x = np.random.randn(32,3,224,224).astype(np.float32) x = np.random.randn(32, 3, 224, 224).astype(np.float32)
conv = Net() conv = Net()
output = conv(Tensor(x)) output = conv(Tensor(x))
print(output.asnumpy()) print(output.asnumpy())

View File

@ -18,8 +18,10 @@ import mindspore.context as context
from mindspore import Tensor from mindspore import Tensor
from cus_square import CusSquare from cus_square import CusSquare
import pytest import pytest
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell): class Net(nn.Cell):
"""Net definition""" """Net definition"""
@ -30,6 +32,7 @@ class Net(nn.Cell):
def construct(self, data): def construct(self, data):
return self.square(data) return self.square(data)
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@ -40,5 +43,5 @@ def test_net():
output = square(Tensor(x)) output = square(Tensor(x))
print(x) print(x)
print(output.asnumpy()) print(output.asnumpy())
expect = np.array([1.0,16.0,81.0]).astype(np.float32) expect = np.array([1.0, 16.0, 81.0]).astype(np.float32)
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()

View File

@ -33,6 +33,7 @@ class Net(nn.Cell):
def construct(self, x, y, z): def construct(self, x, y, z):
return self.add((x, y, z)) return self.add((x, y, z))
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard

View File

@ -21,11 +21,12 @@ from mindspore.common import dtype as mstype
import mindspore.nn as nn import mindspore.nn as nn
import mindspore.context as context import mindspore.context as context
class NetArgmax(nn.Cell): class NetArgmax(nn.Cell):
def __init__( self): def __init__(self):
super(NetArgmax, self).__init__() super(NetArgmax, self).__init__()
axis1 = 0 axis1 = 0
axis2 = -1 axis2 = -1
self.argmax1 = P.Argmax(axis1, output_type=mstype.int32) self.argmax1 = P.Argmax(axis1, output_type=mstype.int32)
self.argmax2 = P.Argmax(axis2, output_type=mstype.int32) self.argmax2 = P.Argmax(axis2, output_type=mstype.int32)
self.argmax3 = P.Argmax(output_type=mstype.int32) self.argmax3 = P.Argmax(output_type=mstype.int32)
@ -33,27 +34,28 @@ class NetArgmax(nn.Cell):
def construct(self, x): def construct(self, x):
return (self.argmax1(x), self.argmax2(x), self.argmax3(x)) return (self.argmax1(x), self.argmax2(x), self.argmax3(x))
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_argmax(): def test_argmax():
x = Tensor(np.array([[1., 20., 5.], x = Tensor(np.array([[1., 20., 5.],
[67., 8., 9.], [67., 8., 9.],
[130., 24., 15.], [130., 24., 15.],
[0.3, -0.4, -15.]]).astype(np.float32)) [0.3, -0.4, -15.]]).astype(np.float32))
expect1 = np.array([2,2,2]).astype(np.int32) expect1 = np.array([2, 2, 2]).astype(np.int32)
expect2 = np.array([1,0,0,0]).astype(np.int32) expect2 = np.array([1, 0, 0, 0]).astype(np.int32)
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
Argmax = NetArgmax() Argmax = NetArgmax()
output = Argmax(x) output = Argmax(x)
assert (output[0].asnumpy() == expect1).all() assert (output[0].asnumpy() == expect1).all()
assert (output[1].asnumpy() == expect2).all() assert (output[1].asnumpy() == expect2).all()
assert (output[2].asnumpy() == expect2).all() assert (output[2].asnumpy() == expect2).all()
context.set_context(mode=context.GRAPH_MODE, device_target="GPU") context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
Argmax1 = NetArgmax() Argmax1 = NetArgmax()
output1 = Argmax(x) output1 = Argmax(x)
assert (output1[0].asnumpy() == expect1).all() assert (output1[0].asnumpy() == expect1).all()
assert (output1[1].asnumpy() == expect2).all() assert (output1[1].asnumpy() == expect2).all()
assert (output1[2].asnumpy() == expect2).all() assert (output1[2].asnumpy() == expect2).all()

View File

@ -20,6 +20,7 @@ import mindspore.nn as nn
import numpy as np import numpy as np
import mindspore.context as context import mindspore.context as context
class AssignAdd(nn.Cell): class AssignAdd(nn.Cell):
def __init__(self, value): def __init__(self, value):
super(AssignAdd, self).__init__() super(AssignAdd, self).__init__()
@ -30,21 +31,22 @@ class AssignAdd(nn.Cell):
res = self.add(self.var, y) res = self.add(self.var, y)
return res return res
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_assign_add(): def test_assign_add():
expect1 = np.array([[[[ 0, 2, 4.], expect1 = np.array([[[[0, 2, 4.],
[ 6, 8, 10.], [6, 8, 10.],
[12, 14, 16.]], [12, 14, 16.]],
[[18, 20, 22.], [[18, 20, 22.],
[24, 26, 28.], [24, 26, 28.],
[30, 32, 34.]], [30, 32, 34.]],
[[36, 38, 40.], [[36, 38, 40.],
[42, 44, 46.], [42, 44, 46.],
[48, 50, 52.]]]]) [48, 50, 52.]]]])
expect2 = np.array([[[[ 0, 3, 6], expect2 = np.array([[[[0, 3, 6],
[ 9, 12, 15], [9, 12, 15],
[18, 21, 24]], [18, 21, 24]],
[[27, 30, 33], [[27, 30, 33],
[36, 39, 42], [36, 39, 42],

View File

@ -30,9 +30,11 @@ class Net(nn.Cell):
def construct(self, value): def construct(self, value):
return self.assign(self.var, value) return self.assign(self.var, value)
x = np.array([[1.2, 1], [1, 0]]).astype(np.float32) x = np.array([[1.2, 1], [1, 0]]).astype(np.float32)
value = np.array([[1, 2], [3, 4.0]]).astype(np.float32) value = np.array([[1, 2], [3, 4.0]]).astype(np.float32)
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard

View File

@ -24,6 +24,7 @@ import mindspore.nn as nn
import mindspore.context as context import mindspore.context as context
from mindspore.common import dtype as mstype from mindspore.common import dtype as mstype
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -35,6 +36,7 @@ class BatchMatMulNet(nn.Cell):
def construct(self, x, y): def construct(self, x, y):
return self.batch_matmul(x, y) return self.batch_matmul(x, y)
def test_4D(): def test_4D():
input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32) input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32)
input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32) input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32)
@ -42,15 +44,15 @@ def test_4D():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU") context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = BatchMatMulNet() net = BatchMatMulNet()
output = net(input_x, input_y) output = net(input_x, input_y)
expect = [[[[ 20, 23, 26, 29]], expect = [[[[20, 23, 26, 29]],
[[ 200, 212, 224, 236]], [[200, 212, 224, 236]],
[[ 596, 617, 638, 659]], [[596, 617, 638, 659]],
[[1208, 1238, 1268, 1298]]], [[1208, 1238, 1268, 1298]]],
[[[2036, 2075, 2114, 2153]], [[[2036, 2075, 2114, 2153]],
[[3080, 3128, 3176, 3224]], [[3080, 3128, 3176, 3224]],
[[4340, 4397, 4454, 4511]], [[4340, 4397, 4454, 4511]],
[[5816, 5882, 5948, 6014]]]] [[5816, 5882, 5948, 6014]]]]
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()
@ -58,21 +60,21 @@ def test_4D():
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_4D_transpose_a(): def test_4D_transpose_a():
input_x = Tensor(np.arange(2*4*3*1).reshape(2,4,3,1), mstype.float32) input_x = Tensor(np.arange(2 * 4 * 3 * 1).reshape(2, 4, 3, 1), mstype.float32)
input_y = Tensor(np.arange(2*4*3*4).reshape(2,4,3,4), mstype.float32) input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU") context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = BatchMatMulNet(transpose_a=True) net = BatchMatMulNet(transpose_a=True)
output = net(input_x, input_y) output = net(input_x, input_y)
expect = [[[[ 20, 23, 26, 29]], expect = [[[[20, 23, 26, 29]],
[[ 200, 212, 224, 236]], [[200, 212, 224, 236]],
[[ 596, 617, 638, 659]], [[596, 617, 638, 659]],
[[1208, 1238, 1268, 1298]]], [[1208, 1238, 1268, 1298]]],
[[[2036, 2075, 2114, 2153]], [[[2036, 2075, 2114, 2153]],
[[3080, 3128, 3176, 3224]], [[3080, 3128, 3176, 3224]],
[[4340, 4397, 4454, 4511]], [[4340, 4397, 4454, 4511]],
[[5816, 5882, 5948, 6014]]]] [[5816, 5882, 5948, 6014]]]]
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()
@ -80,21 +82,21 @@ def test_4D_transpose_a():
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_4D_transpose_b(): def test_4D_transpose_b():
input_x = Tensor(np.arange(2*4*1*3).reshape(2,4,1,3), mstype.float32) input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32)
input_y = Tensor(np.arange(2*4*4*3).reshape(2,4,4,3), mstype.float32) input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU") context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = BatchMatMulNet(transpose_b=True) net = BatchMatMulNet(transpose_b=True)
output = net(input_x, input_y) output = net(input_x, input_y)
expect = [[[[ 5, 14, 23, 32]], expect = [[[[5, 14, 23, 32]],
[[ 158, 194, 230, 266]], [[158, 194, 230, 266]],
[[ 527, 590, 653, 716]], [[527, 590, 653, 716]],
[[1112, 1202, 1292, 1382]]], [[1112, 1202, 1292, 1382]]],
[[[1913, 2030, 2147, 2264]], [[[1913, 2030, 2147, 2264]],
[[2930, 3074, 3218, 3362]], [[2930, 3074, 3218, 3362]],
[[4163, 4334, 4505, 4676]], [[4163, 4334, 4505, 4676]],
[[5612, 5810, 6008, 6206]]]] [[5612, 5810, 6008, 6206]]]]
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()
@ -102,23 +104,24 @@ def test_4D_transpose_b():
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_4D_transpose_ab(): def test_4D_transpose_ab():
input_x = Tensor(np.arange(2*4*3*1).reshape(2,4,3,1), mstype.float32) input_x = Tensor(np.arange(2 * 4 * 3 * 1).reshape(2, 4, 3, 1), mstype.float32)
input_y = Tensor(np.arange(2*4*4*3).reshape(2,4,4,3), mstype.float32) input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU") context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = BatchMatMulNet(transpose_a=True, transpose_b=True) net = BatchMatMulNet(transpose_a=True, transpose_b=True)
output = net(input_x, input_y) output = net(input_x, input_y)
expect = [[[[ 5, 14, 23, 32]], expect = [[[[5, 14, 23, 32]],
[[ 158, 194, 230, 266]], [[158, 194, 230, 266]],
[[ 527, 590, 653, 716]], [[527, 590, 653, 716]],
[[1112, 1202, 1292, 1382]]], [[1112, 1202, 1292, 1382]]],
[[[1913, 2030, 2147, 2264]], [[[1913, 2030, 2147, 2264]],
[[2930, 3074, 3218, 3362]], [[2930, 3074, 3218, 3362]],
[[4163, 4334, 4505, 4676]], [[4163, 4334, 4505, 4676]],
[[5612, 5810, 6008, 6206]]]] [[5612, 5810, 6008, 6206]]]]
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()
class BatchMatMulNet(nn.Cell): class BatchMatMulNet(nn.Cell):
def __init__(self, transpose_a=False, transpose_b=False): def __init__(self, transpose_a=False, transpose_b=False):
super(BatchMatMulNet, self).__init__() super(BatchMatMulNet, self).__init__()
@ -127,6 +130,7 @@ class BatchMatMulNet(nn.Cell):
def construct(self, x, y): def construct(self, x, y):
return self.batch_matmul(x, y) return self.batch_matmul(x, y)
def test_4D_fp16(): def test_4D_fp16():
input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float16) input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float16)
input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float16) input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float16)
@ -134,13 +138,13 @@ def test_4D_fp16():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU") context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = BatchMatMulNet() net = BatchMatMulNet()
output = net(input_x, input_y) output = net(input_x, input_y)
expect = [[[[ 20, 23, 26, 29]], expect = [[[[20, 23, 26, 29]],
[[ 200, 212, 224, 236]], [[200, 212, 224, 236]],
[[ 596, 617, 638, 659]], [[596, 617, 638, 659]],
[[1208, 1238, 1268, 1298]]], [[1208, 1238, 1268, 1298]]],
[[[2036, 2075, 2114, 2153]], [[[2036, 2075, 2114, 2153]],
[[3080, 3128, 3176, 3224]], [[3080, 3128, 3176, 3224]],
[[4340, 4397, 4454, 4511]], [[4340, 4397, 4454, 4511]],
[[5816, 5882, 5948, 6014]]]] [[5816, 5882, 5948, 6014]]]]
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()

Some files were not shown because too many files have changed in this diff Show More