sync 0807 code to ms-incubator

This commit is contained in:
changzherui 2020-08-07 23:26:12 +08:00
commit 22336c0843
34 changed files with 674 additions and 130 deletions

2
.gitmodules vendored
View File

@ -17,7 +17,7 @@
url = https://gitee.com/mindspore/akg.git
[submodule "graphengine"]
path = graphengine
url = https://gitee.com/mindspore/graphengine.git
url = https://gitee.com/ms-incubator/graphengine.git
[submodule "third_party/OpenCL-CLHPP"]
path = third_party/OpenCL-CLHPP
url = https://github.com/KhronosGroup/OpenCL-CLHPP.git

@ -1 +1 @@
Subproject commit 6d12411003164d88eaed62e1ead33761cbfa15ef
Subproject commit e64a1cfc0457c96859bc9be1693443aa14f2e9df

View File

@ -309,12 +309,6 @@ INPUT_MAP(SoftmaxCrossEntropyWithLogits) = {{1, INPUT_DESC(features)}, {2, INPUT
ATTR_MAP(SoftmaxCrossEntropyWithLogits) = EMPTY_ATTR_MAP;
OUTPUT_MAP(SoftmaxCrossEntropyWithLogits) = {{0, OUTPUT_DESC(loss)}, {1, OUTPUT_DESC(backprop)}};
// MeanGrad
INPUT_MAP(MeanGrad) = {{1, INPUT_DESC(x)}};
INPUT_ATTR_MAP(MeanGrad) = {{2, ATTR_DESC(mean_grad_output_shape_value, kOpFormat_NHWC,
AnyTraits<std::vector<int64_t>>(), AnyTraits<int64_t>())}};
ATTR_MAP(MeanGrad) = {{"mode", ATTR_DESC(mode, AnyTraits<int64_t>())}};
INPUT_MAP(SliceD) = {{1, INPUT_DESC(x)}};
INPUT_ATTR_MAP(SliceD) = {{2, ATTR_DESC(offsets, AnyTraits<int>(), AnyTraits<std::vector<int64_t>>())},
{3, ATTR_DESC(size, AnyTraits<int>(), AnyTraits<std::vector<int64_t>>())}};
@ -431,11 +425,6 @@ INPUT_MAP(TopK) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(k)}};
ATTR_MAP(TopK) = {{"sorted", ATTR_DESC(sorted, AnyTraits<bool>())}};
OUTPUT_MAP(TopK) = {{0, OUTPUT_DESC(values)}, {1, OUTPUT_DESC(indices)}};
// Multiply
INPUT_MAP(Multiply) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(y)}};
ATTR_MAP(Multiply) = EMPTY_ATTR_MAP;
OUTPUT_MAP(Multiply) = {{0, OUTPUT_DESC(z)}};
// TileD
INPUT_MAP(TileD) = {{1, INPUT_DESC(x)}};
INPUT_ATTR_MAP(TileD) = {{2, ATTR_DESC(multiples, AnyTraits<int>(), AnyTraits<std::vector<int64_t>>())}};

View File

@ -70,8 +70,6 @@ DECLARE_OP_ADAPTER(AssignSub)
DECLARE_OP_USE_OUTPUT(AssignSub)
DECLARE_OP_ADAPTER(ReduceMean)
DECLARE_OP_ADAPTER(Multiply)
DECLARE_OP_USE_OUTPUT(Multiply)
// ** Distributed Operations **
DECLARE_OP_ADAPTER(HcomReduceScatter)
@ -327,9 +325,6 @@ DECLARE_OP_USE_OUTPUT(MatMulV2)
DECLARE_OP_ADAPTER(SoftmaxCrossEntropyWithLogits)
DECLARE_OP_USE_OUTPUT(SoftmaxCrossEntropyWithLogits)
DECLARE_OP_ADAPTER(MeanGrad)
DECLARE_OP_USE_INPUT_ATTR(MeanGrad)
DECLARE_OP_ADAPTER(Assign)
DECLARE_OP_USE_OUTPUT(Assign)
DECLARE_OP_ADAPTER(Constant)

View File

@ -293,12 +293,6 @@ INPUT_MAP(SoftmaxCrossEntropyWithLogits) = {{1, INPUT_DESC(features)}, {2, INPUT
ATTR_MAP(SoftmaxCrossEntropyWithLogits) = EMPTY_ATTR_MAP;
OUTPUT_MAP(SoftmaxCrossEntropyWithLogits) = {{0, OUTPUT_DESC(loss)}, {1, OUTPUT_DESC(backprop)}};
// MeanGrad
INPUT_MAP(MeanGrad) = {{1, INPUT_DESC(x)}};
INPUT_ATTR_MAP(MeanGrad) = {{2, ATTR_DESC(mean_grad_output_shape_value, kOpFormat_NHWC,
AnyTraits<std::vector<int64_t>>(), AnyTraits<int64_t>())}};
ATTR_MAP(MeanGrad) = {{"mode", ATTR_DESC(mode, AnyTraits<int64_t>())}};
INPUT_MAP(SliceD) = {{1, INPUT_DESC(x)}};
INPUT_ATTR_MAP(SliceD) = {{2, ATTR_DESC(offsets, AnyTraits<int>(), AnyTraits<std::vector<int64_t>>())},
{3, ATTR_DESC(size, AnyTraits<int>(), AnyTraits<std::vector<int64_t>>())}};
@ -415,11 +409,6 @@ INPUT_MAP(TopK) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(k)}};
ATTR_MAP(TopK) = {{"sorted", ATTR_DESC(sorted, AnyTraits<bool>())}};
OUTPUT_MAP(TopK) = {{0, OUTPUT_DESC(values)}, {1, OUTPUT_DESC(indices)}};
// Multiply
INPUT_MAP(Multiply) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(y)}};
ATTR_MAP(Multiply) = EMPTY_ATTR_MAP;
OUTPUT_MAP(Multiply) = {{0, OUTPUT_DESC(z)}};
// TileD
INPUT_MAP(TileD) = {{1, INPUT_DESC(x)}};
INPUT_ATTR_MAP(TileD) = {{2, ATTR_DESC(multiples, AnyTraits<int>(), AnyTraits<std::vector<int64_t>>())}};

View File

@ -15,6 +15,7 @@
"""aicpu ops"""
from .init_data_set_queue import _init_data_set_queue_aicpu
from .embedding_lookup import _embedding_lookup_aicpu
from .padding import _padding_aicpu
from .dropout_genmask import _dropout_genmask_aicpu
from .get_next import _get_next_aicpu
from .print_tensor import _print_aicpu
@ -43,3 +44,7 @@ from .laplace import _laplace_aicpu
from .strided_slice import _strided_slice_aicpu
from .strided_slice_grad import _strided_slice_grad_aicpu
from .end_of_sequence import _end_of_sequence_aicpu
from .fused_sparse_adam import _fused_sparse_adam_aicpu
from .fused_sparse_lazy_adam import _fused_sparse_lazy_adam_aicpu
from .fused_sparse_ftrl import _fused_sparse_ftrl_aicpu
from .fused_sparse_proximal_adagrad import _fused_sparse_proximal_adagrad_aicpu

View File

@ -0,0 +1,46 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FusedSparseAdam op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
fused_sparse_adam_op_info = AiCPURegOp("FusedSparseAdam") \
.fusion_type("OPAQUE") \
.attr("use_locking", "bool") \
.attr("use_nesterov", "bool") \
.input(0, "var", "required") \
.input(1, "m", "required") \
.input(2, "v", "required") \
.input(3, "beta1_power", "required") \
.input(4, "beta2_power", "required") \
.input(5, "lr", "required") \
.input(6, "beta1", "required") \
.input(7, "beta2", "required") \
.input(8, "epsilon", "required") \
.input(9, "grad", "required") \
.input(10, "indices", "required") \
.output(0, "var", "required") \
.output(1, "m", "required") \
.output(2, "v", "required") \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.F32_Default, DataType.F32_Default, DataType.I32_Default, DataType.F32_Default,
DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
@op_info_register(fused_sparse_adam_op_info)
def _fused_sparse_adam_aicpu():
"""FusedSparseAdam aicpu register"""
return

View File

@ -0,0 +1,41 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FusedSparseFtrl op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
fused_sparse_ftrl_op_info = AiCPURegOp("FusedSparseFtrl") \
.fusion_type("OPAQUE") \
.attr("lr", "float") \
.attr("l1", "float") \
.attr("l2", "float") \
.attr("lr_power", "float") \
.attr("use_locking", "bool") \
.input(0, "var", "required") \
.input(1, "accum", "required") \
.input(2, "linear", "required") \
.input(3, "grad", "required") \
.input(4, "indices", "required") \
.output(0, "var", "required") \
.output(1, "accum", "required") \
.output(2, "linear", "required") \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.I32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
@op_info_register(fused_sparse_ftrl_op_info)
def _fused_sparse_ftrl_aicpu():
"""FusedSparseFtrl aicpu register"""
return

View File

@ -0,0 +1,46 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FusedSparseLazyAdam op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
fused_sparse_lazy_adam_op_info = AiCPURegOp("FusedSparseLazyAdam") \
.fusion_type("OPAQUE") \
.attr("use_locking", "bool") \
.attr("use_nesterov", "bool") \
.input(0, "var", "required") \
.input(1, "m", "required") \
.input(2, "v", "required") \
.input(3, "beta1_power", "required") \
.input(4, "beta2_power", "required") \
.input(5, "lr", "required") \
.input(6, "beta1", "required") \
.input(7, "beta2", "required") \
.input(8, "epsilon", "required") \
.input(9, "grad", "required") \
.input(10, "indices", "required") \
.output(0, "var", "required") \
.output(1, "m", "required") \
.output(2, "v", "required") \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.F32_Default, DataType.F32_Default, DataType.I32_Default, DataType.F32_Default,
DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
@op_info_register(fused_sparse_lazy_adam_op_info)
def _fused_sparse_lazy_adam_aicpu():
"""FusedSparseLazyAdam aicpu register"""
return

View File

@ -0,0 +1,39 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FusedSparseProximalAdagrad op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
fused_sparse_proximal_adagrad_op_info = AiCPURegOp("FusedSparseProximalAdagrad") \
.fusion_type("OPAQUE") \
.attr("use_locking", "bool") \
.input(0, "var", "required") \
.input(1, "accum", "required") \
.input(2, "lr", "required") \
.input(3, "l1", "required") \
.input(4, "l2", "required") \
.input(5, "grad", "required") \
.input(6, "indices", "required") \
.output(0, "var", "required") \
.output(1, "accum", "required") \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.F32_Default, DataType.F32_Default, DataType.I32_Default, DataType.F32_Default,
DataType.F32_Default) \
.get_op_info()
@op_info_register(fused_sparse_proximal_adagrad_op_info)
def _fused_sparse_proximal_adagrad_aicpu():
"""FusedSparseProximalAdagrad aicpu register"""
return

View File

@ -23,6 +23,7 @@ gamma_op_info = AiCPURegOp("Gamma") \
.input(2, "beta", "required") \
.output(0, "output", "required") \
.attr("seed", "int") \
.attr("seed2", "int") \
.dtype_format(DataType.I32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.I32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW) \
.get_op_info()

View File

@ -0,0 +1,41 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Padding op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
padding_op_info = AiCPURegOp("Padding") \
.fusion_type("OPAQUE") \
.input(0, "x", "required") \
.output(0, "y", "required") \
.attr("pad_dim_size", "int") \
.dtype_format(DataType.I8_Default, DataType.I8_Default) \
.dtype_format(DataType.I16_Default, DataType.I16_Default) \
.dtype_format(DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default) \
.dtype_format(DataType.U8_Default, DataType.U8_Default) \
.dtype_format(DataType.U16_Default, DataType.U16_Default) \
.dtype_format(DataType.U32_Default, DataType.U32_Default) \
.dtype_format(DataType.U64_Default, DataType.U64_Default) \
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.F64_Default) \
.dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \
.get_op_info()
@op_info_register(padding_op_info)
def _padding_aicpu():
"""Padding AiCPU register"""
return

View File

@ -22,6 +22,7 @@ poisson_op_info = AiCPURegOp("Poisson") \
.input(1, "mean", "required") \
.output(0, "output", "required") \
.attr("seed", "int") \
.attr("seed2", "int") \
.dtype_format(DataType.I32_Default, DataType.F32_Default, DataType.I32_Default) \
.dtype_format(DataType.I32_NCHW, DataType.F32_NCHW, DataType.I32_NCHW) \
.get_op_info()

View File

@ -23,6 +23,7 @@ uniform_int_op_info = AiCPURegOp("UniformInt") \
.input(2, "b", "required") \
.output(0, "output", "required") \
.attr("seed", "int") \
.attr("seed2", "int") \
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.I32_NCHW, DataType.I32_NCHW, DataType.I32_NCHW, DataType.I32_NCHW) \
.get_op_info()

View File

@ -19,12 +19,11 @@ from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataTyp
uniform_real_op_info = AiCPURegOp("UniformReal") \
.fusion_type("OPAQUE") \
.input(0, "shape", "required") \
.input(1, "a", "required") \
.input(2, "b", "required") \
.output(0, "output", "required") \
.attr("seed", "int") \
.dtype_format(DataType.I32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.I32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW) \
.attr("seed2", "int") \
.dtype_format(DataType.I32_Default, DataType.F32_Default) \
.dtype_format(DataType.I32_NCHW, DataType.F32_NCHW) \
.get_op_info()
@op_info_register(uniform_real_op_info)

View File

@ -27,7 +27,7 @@ from .clip_ops import clip_by_value
from .multitype_ops.add_impl import hyper_add
from .multitype_ops.ones_like_impl import ones_like
from .multitype_ops.zeros_like_impl import zeros_like
from .random_ops import set_seed, normal, multinomial
from .random_ops import set_seed, normal, multinomial, uniform
__all__ = [
@ -49,6 +49,7 @@ __all__ = [
'ones_like',
'zip_operation',
'set_seed',
'uniform',
'normal',
'multinomial',
'clip_by_value',]

View File

@ -13,7 +13,7 @@
# limitations under the License.
# ============================================================================
"""Operations for random number generatos."""
"""Operations for random number generators."""
from .. import operations as P
from .. import functional as F
@ -84,6 +84,7 @@ def normal(shape, mean, stddev, seed=0):
>>> shape = (4, 16)
>>> mean = Tensor(1.0, mstype.float32)
>>> stddev = Tensor(1.0, mstype.float32)
>>> C.set_seed(10)
>>> output = C.normal(shape, mean, stddev, seed=5)
"""
mean_dtype = F.dtype(mean)
@ -144,3 +145,45 @@ def multinomial(inputs, num_sample=None, replacement=True, seed=0):
_, indices = P.TopK()(vals, num_sample)
return indices
return P.Multinomial(seed=seed)(inputs, num_sample)
def uniform(shape, a, b, seed=0, dtype=mstype.float32):
"""
Generates random numbers according to the Uniform (or Gaussian) random number distribution.
It is defined as:
Args:
shape (tuple): The shape of random tensor to be generated.
a (Tensor): The a distribution parameter.
It defines the minimum possibly generated value. With int32 or float32 data type.
If dtype is int32, only one number is allowed.
b (Tensor): The b distribution parameter.
It defines the maximum possibly generated value. With int32 or float32 data type.
If dtype is int32, only one number is allowed.
seed (int): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
Default: 0.
Returns:
Tensor. The shape should be the broadcasted shape of Input "shape" and shapes of a and b.
The dtype is float32.
Examples:
>>> shape = (4, 16)
>>> a = Tensor(1.0, mstype.float32)
>>> b = Tensor(1.0, mstype.float32)
>>> C.set_seed(10)
>>> output = C.uniform(shape, a, b, seed=5)
"""
a_dtype = F.dtype(a)
b_dtype = F.dtype(b)
const_utils.check_tensors_dtype_same(a_dtype, dtype, "uniform")
const_utils.check_tensors_dtype_same(b_dtype, dtype, "uniform")
seed1 = get_seed()
seed2 = seed
if const_utils.is_same_type(dtype, mstype.int32):
rnd = P.UniformInt(seed1, seed2)
value = rnd(shape, a, b)
else:
uniform_real = P.UniformReal(seed1, seed2)
rnd = uniform_real(shape)
value = rnd * (b - a) + a
return value

View File

@ -27,7 +27,7 @@ from .array_ops import (Argmax, Argmin, Cast, Concat, Pack, Unpack,
Rank, Reshape, ResizeNearestNeighbor, ArgMinWithValue,
SameTypeShape, ScatterAdd, ScatterSub, ScatterMul, ScatterDiv, ScatterMax, ScatterMin,
ScatterUpdate, ScalarToArray, ScalarToTensor, ScatterNd, ScatterNdUpdate, Select,
Shape, Size, Slice, Split, TransShape, ParallelConcat,
Shape, Size, Slice, Split, TransShape, ParallelConcat, Padding,
ScatterNdAdd, ScatterNdSub, ScatterNonAliasingAdd, ReverseV2, Rint,
Squeeze, StridedSlice, Tile, TensorScatterUpdate,
Transpose, TruncatedNormal, TupleToArray, UnsortedSegmentMin, UnsortedSegmentProd,
@ -147,6 +147,7 @@ __all__ = [
'GatherV2',
'SparseGatherV2',
'EmbeddingLookup',
'Padding',
'Concat',
'Pack',
'Unpack',

View File

@ -645,6 +645,46 @@ class SparseGatherV2(GatherV2):
"""
class Padding(PrimitiveWithInfer):
"""
Extend the last dimension of input tensor from 1 to pad_dim_size, fill with 0.
Args:
pad_dim_size (int): The extend value of last dimension of x, must be positive.
Inputs:
- **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The rank of x should be at least 2.
The last dimension of x should be 1.
Outputs:
Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`.
Examples:
>>> x = Tensor(np.array([[8], [10]]), mindspore.float32)
>>> pad_dim_size = 4
>>> out = P.Padding(pad_dim_size)(x)
[[8, 0, 0, 0], [10, 0, 0, 0]]
"""
@prim_attr_register
def __init__(self, pad_dim_size=8):
"""init padding"""
validator.check_value_type("pad_dim_size", pad_dim_size, [int], self.name)
validator.check_integer("pad_dim_size", pad_dim_size, 0, Rel.GT, self.name)
self.pad_dim_size = pad_dim_size
def __infer__(self, x):
validator.check_subclass("x", x['dtype'], mstype.tensor, self.name)
x_shape = list(x['shape'])
validator.check_integer("rank of x", len(x_shape), 1, Rel.GT, self.name)
validator.check_integer("last dim of x", x_shape[-1], 1, Rel.EQ, self.name)
out_shape = x_shape
out_shape[-1] = self.pad_dim_size
out = {'shape': out_shape,
'dtype': x['dtype'],
'value': None}
return out
class Split(PrimitiveWithInfer):
"""
Splits input tensor into output_num of tensors along the given axis and output numbers.

View File

@ -3372,6 +3372,7 @@ class FusedSparseProximalAdagrad(PrimitiveWithInfer):
validator.check_tensor_type_same({'indices': indices_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class KLDivLoss(PrimitiveWithInfer):
r"""
Computes the Kullback-Leibler divergence between the target and the output.
@ -3443,6 +3444,7 @@ class KLDivLoss(PrimitiveWithInfer):
validator.check_tensor_type_same(args, valid_types, self.name)
return x_type
class BinaryCrossEntropy(PrimitiveWithInfer):
r"""
Computes the Binary Cross Entropy between the target and the output.

View File

@ -34,8 +34,7 @@ class StandardNormal(PrimitiveWithInfer):
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
Outputs:
Tensor. The shape should be the broadcasted shape of Input "shape" and shapes of mean and stddev.
The dtype is float32.
Tensor. The shape that the input 'shape' denotes. The dtype is float32.
Examples:
>>> shape = (4, 16)
@ -126,8 +125,8 @@ class Gamma(PrimitiveWithInfer):
\text{P}(x|α,β) = \frac{\exp(-x/β)}{{β^α}\cdot{\Gamma(α)}}\cdot{x^{α-1}},
Args:
seed (int): Seed data is used as entropy source for Random number engines generating pseudo-random numbers.
Default: 0.
seed (int): Random seed. Default: 0.
seed2 (int): Random seed2. Default: 0.
Inputs:
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
@ -149,10 +148,11 @@ class Gamma(PrimitiveWithInfer):
"""
@prim_attr_register
def __init__(self, seed=0):
def __init__(self, seed=0, seed2=0):
"""Init Gamma"""
self.init_prim_io_names(inputs=['shape', 'alpha', 'beta'], outputs=['output'])
validator.check_value_type('seed', seed, [int], self.name)
validator.check_value_type('seed2', seed2, [int], self.name)
def __infer__(self, shape, alpha, beta):
shape_v = shape["value"]
@ -180,8 +180,8 @@ class Poisson(PrimitiveWithInfer):
\text{P}(i|μ) = \frac{\exp(-μ)μ^{i}}{i!},
Args:
seed (int): Seed data is used as entropy source for Random number engines generating pseudo-random numbers.
Default: 0.
seed (int): Random seed. Default: 0.
seed2 (int): Random seed2. Default: 0.
Inputs:
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
@ -200,10 +200,11 @@ class Poisson(PrimitiveWithInfer):
"""
@prim_attr_register
def __init__(self, seed=0):
def __init__(self, seed=0, seed2=0):
"""Init Poisson"""
self.init_prim_io_names(inputs=['shape', 'mean'], outputs=['output'])
validator.check_value_type('seed', seed, [int], self.name)
validator.check_value_type('seed2', seed2, [int], self.name)
def __infer__(self, shape, mean):
shape_v = shape["value"]
@ -223,7 +224,7 @@ class Poisson(PrimitiveWithInfer):
class UniformInt(PrimitiveWithInfer):
r"""
Produces random integer values i, uniformly distributed on the closed interval [a, b], that is,
Produces random integer values i, uniformly distributed on the closed interval [a, b), that is,
distributed according to the discrete probability function:
.. math::
@ -233,19 +234,18 @@ class UniformInt(PrimitiveWithInfer):
The number in tensor a should be strictly less than b at any position after broadcasting.
Args:
seed (int): Seed data is used as entropy source for Random number engines generating pseudo-random numbers.
Default: 0.
seed (int): Random seed. Default: 0.
seed2 (int): Random seed2. Default: 0.
Inputs:
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
- **a** (Tensor) - The a distribution parameter.
It defines the minimum possibly generated value. With int32 data type.
It defines the minimum possibly generated value. With int32 data type. Only one number is supported.
- **b** (Tensor) - The b distribution parameter.
It defines the maximum possibly generated value. With int32 data type.
It defines the maximum possibly generated value. With int32 data type. Only one number is supported.
Outputs:
Tensor. The shape should be the broadcasted shape of Input "shape" and shapes of a and b.
The dtype is int32.
Tensor. The shape that the input 'shape' denotes. The dtype is int32.
Examples:
>>> shape = (4, 16)
@ -256,10 +256,11 @@ class UniformInt(PrimitiveWithInfer):
"""
@prim_attr_register
def __init__(self, seed=0):
def __init__(self, seed=0, seed2=0):
"""Init UniformInt"""
self.init_prim_io_names(inputs=['shape', 'a', 'b'], outputs=['output'])
validator.check_value_type('seed', seed, [int], self.name)
validator.check_value_type('seed2', seed2, [int], self.name)
def __infer__(self, shape, a, b):
shape_v = shape["value"]
@ -270,10 +271,12 @@ class UniformInt(PrimitiveWithInfer):
validator.check_integer("shape[%d]" % i, shape_i, 0, Rel.GT, self.name)
validator.check_tensor_type_same({"a": a["dtype"]}, [mstype.int32], self.name)
validator.check_tensor_type_same({"b": b["dtype"]}, [mstype.int32], self.name)
broadcast_shape = get_broadcast_shape(a['shape'], b['shape'], self.name)
broadcast_shape = get_broadcast_shape(broadcast_shape, shape_v, self.name)
a_shape = a['shape']
b_shape = b['shape']
validator.check("dim of a", len(a_shape), '0(scalar)', 0, Rel.EQ, self.name)
validator.check("dim of b", len(b_shape), '0(scalar)', 0, Rel.EQ, self.name)
out = {
'shape': broadcast_shape,
'shape': shape_v,
'dtype': mstype.int32,
'value': None}
return out
@ -281,54 +284,40 @@ class UniformInt(PrimitiveWithInfer):
class UniformReal(PrimitiveWithInfer):
r"""
Produces random floating-point values i, uniformly distributed on the interval [min(a, b), max(a, b)), that is,\
distributed according to the probability density function:
.. math::
\text{P}(i|a,b) = \frac{1}{b-a},
Produces random floating-point values i, uniformly distributed on the interval [0, 1).
Args:
seed (int): Seed data is used as entropy source for Random number engines generating pseudo-random numbers.
Default: 0.
seed (int): Random seed. Default: 0.
seed2 (int): Random seed2. Default: 0.
Inputs:
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
- **a** (Tensor) - The a distribution parameter.
It defines the minimum possibly generated value. With float32 data type.
- **b** (Tensor) - The b distribution parameter.
It defines the maximum possibly generated value. With float32 data type.
Outputs:
Tensor. The shape should be the broadcasted shape of Input "shape" and shapes of a and b.
The dtype is float32.
Tensor. The shape that the input 'shape' denotes. The dtype is float32.
Examples:
>>> shape = (4, 16)
>>> a = Tensor(1.0, mstype.float32)
>>> b = Tensor(5.0, mstype.float32)
>>> uniform_real = P.UniformReal(seed=10)
>>> output = uniform_real(shape, a, b)
>>> uniformreal = P.UniformReal(seed=2)
>>> output = uniformreal(shape)
"""
@prim_attr_register
def __init__(self, seed=0):
def __init__(self, seed=0, seed2=0):
"""Init UniformReal"""
self.init_prim_io_names(inputs=['shape', 'a', 'b'], outputs=['output'])
self.init_prim_io_names(inputs=['shape'], outputs=['output'])
validator.check_value_type('seed', seed, [int], self.name)
validator.check_value_type('seed2', seed2, [int], self.name)
def __infer__(self, shape, a, b):
def __infer__(self, shape):
shape_v = shape["value"]
if shape_v is None:
raise ValueError(f"For {self.name}, shape must be const.")
validator.check_value_type("shape", shape_v, [tuple], self.name)
for i, shape_i in enumerate(shape_v):
validator.check_integer("shape[%d]" % i, shape_i, 0, Rel.GT, self.name)
validator.check_tensor_type_same({"a": a["dtype"]}, [mstype.float32], self.name)
validator.check_tensor_type_same({"b": b["dtype"]}, [mstype.float32], self.name)
broadcast_shape = get_broadcast_shape(a['shape'], b['shape'], self.name)
broadcast_shape = get_broadcast_shape(broadcast_shape, shape_v, self.name)
out = {
'shape': broadcast_shape,
'shape': shape_v,
'dtype': mstype.float32,
'value': None}
return out

View File

@ -0,0 +1,53 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.nn as nn
import mindspore.common.dtype as mstype
import mindspore.context as context
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
beta1_power = 0.9
beta2_power = 0.999
lr = 0.001
beta1 = 0.9
beta2 = 0.999
epsilon = 1e-8
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.fused_sparse_adam = P.FusedSparseAdam()
self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var")
self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m")
self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v")
def construct(self, grad, indices):
return self.fused_sparse_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2, epsilon,
grad, indices)
def test_net():
gradient = Tensor(np.array([0.22948648, 0.14569908, 0.92861906, 0.66870148])
.reshape([2, 1, 2]).astype(np.float32))
indices = Tensor([0, 1], mstype.int32)
net = Net()
output = net(gradient, indices)
print(output)
print(net.var.default_input)
print(net.m.default_input)
print(net.v.default_input)

View File

@ -0,0 +1,50 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.common.dtype as mstype
import mindspore.nn as nn
import mindspore.context as context
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
lr = 0.01
l1 = 0.0
l2 = 0.0
lr_power = -0.5
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.fused_sparse_ftrl = P.FusedSparseFtrl(lr=0.1, l1=0.0, l2=0.0, lr_power=-0.5)
self.var = Parameter(Tensor(np.ones([3, 3]).astype(np.float32)), name="var")
self.accum = Parameter(Tensor(np.ones([3, 3]).astype(np.float32)), name="accum")
self.linear = Parameter(Tensor(np.ones([3, 3]).astype(np.float32)), name="linear")
def construct(self, grad, indices):
return self.fused_sparse_ftrl(self.var, self.accum, self.linear, grad, indices)
def test_net():
gradient = Tensor(np.array([-3, 2, 3, 0, 0, 0, -4, -1, -2])
.reshape([3, 3]).astype(np.float32))
indices = Tensor(np.ones([3]), mstype.int32)
net = Net()
output = net(gradient, indices)
print(output)
print(net.var.default_input)
print(net.accum.default_input)
print(net.linear.default_input)

View File

@ -0,0 +1,53 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.common.dtype as mstype
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
beta1_power = 0.9
beta2_power = 0.999
lr = 0.001
beta1 = 0.9
beta2 = 0.999
epsilon = 1e-8
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.fused_sparse_lazy_adam = P.FusedSparseLazyAdam()
self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var")
self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m")
self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v")
def construct(self, grad, indices):
return self.fused_sparse_lazy_adam(self.var, self.m, self.v, beta1_power, beta2_power,
lr, beta1, beta2, epsilon, grad, indices)
def test_net():
gradient = Tensor(np.array([0.22948648, 0.14569908, 0.92861906, 0.66870148])
.reshape([2, 1, 2]).astype(np.float32))
indices = Tensor([0, 1], mstype.int32)
net = Net()
output = net(gradient, indices)
print(output)
print(net.var.default_input)
print(net.m.default_input)
print(net.v.default_input)

View File

@ -0,0 +1,47 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.nn as nn
import mindspore.context as context
import mindspore.common.dtype as mstype
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.fused_sparse_proximal_adagrad = P.FusedSparseProximalAdagrad()
self.var = Parameter(Tensor(np.ones([3, 3]).astype(np.float32)), name="var")
self.accum = Parameter(Tensor(np.ones([3, 3]).astype(np.float32)), name="accum")
self.lr = 0.01
self.l1 = 0.0
self.l2 = 0.0
def construct(self, grad, indices):
return self.fused_sparse_proximal_adagrad(self.var, self.accum, self.lr, self.l1, self.l2,
grad, indices)
def test_net():
gradient = Tensor(np.array([-3, 2, 3, 0, 0, 0, -4, -1, -2])
.reshape([3, 3]).astype(np.float32))
indices = Tensor(np.ones([3]), mstype.int32)
net = Net()
output = net(gradient, indices)
print(output)
print(net.var.default_input)
print(net.accum.default_input)

View File

@ -24,9 +24,9 @@ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self, shape, seed=0):
def __init__(self, shape, seed=0, seed2=0):
super(Net, self).__init__()
self.gamma = P.Gamma(seed=seed)
self.gamma = P.Gamma(seed=seed, seed2=seed2)
self.shape = shape
def construct(self, alpha, beta):
@ -38,10 +38,9 @@ def test_net_1D():
shape = (3, 2, 4)
alpha = 1.0
beta = 1.0
net = Net(shape, seed)
net = Net(shape=shape, seed=seed)
talpha, tbeta = Tensor(alpha, mstype.float32), Tensor(beta, mstype.float32)
output = net(talpha, tbeta)
print(output.asnumpy())
assert output.shape == (3, 2, 4)
@ -50,8 +49,7 @@ def test_net_ND():
shape = (3, 1, 2)
alpha = np.array([[[1], [2]], [[3], [4]], [[5], [6]]]).astype(np.float32)
beta = np.array([1.0]).astype(np.float32)
net = Net(shape, seed)
net = Net(shape=shape, seed=seed)
talpha, tbeta = Tensor(alpha), Tensor(beta)
output = net(talpha, tbeta)
print(output.asnumpy())
assert output.shape == (3, 2, 2)

View File

@ -32,6 +32,7 @@ class Net(nn.Cell):
self.seed = seed
def construct(self, mean, stddev):
C.set_seed(20)
return C.normal(self.shape, mean, stddev, self.seed)
@ -55,3 +56,4 @@ def test_net_ND():
tmean, tstddev = Tensor(mean, mstype.float32), Tensor(stddev, mstype.float32)
output = net(tmean, tstddev)
assert output.shape == (3, 2, 2)

View File

@ -24,7 +24,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self, shape):
def __init__(self, shape, seed=0, seed2=0):
super(Net, self).__init__()
self.poisson = P.Poisson()
self.shape = shape
@ -36,17 +36,16 @@ class Net(nn.Cell):
def test_net_1():
shape = (2, 16)
mean = np.array([5.0]).astype(np.float32)
net = Net(shape)
net = Net(shape=shape)
tmean = Tensor(mean)
output = net(tmean)
print(output.asnumpy())
assert output.shape == (2, 16)
def test_net_2():
shape = (4, 1)
mean = np.array([5.0, 10.0]).astype(np.float32)
net = Net(shape)
net = Net(shape=shape)
tmean = Tensor(mean)
output = net(tmean)
print(output.asnumpy())

View File

@ -0,0 +1,57 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common import dtype as mstype
from mindspore.ops import composite as C
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self, shape, seed=0):
super(Net, self).__init__()
self.shape = shape
self.seed = seed
def construct(self, a, b):
C.set_seed(20)
return C.uniform(self.shape, a, b, self.seed)
def test_net_1D():
seed = 10
shape = (3, 2, 4)
a = 1.0
b = 6.0
net = Net(shape, seed)
ta, tb = Tensor(a, mstype.float32), Tensor(b, mstype.float32)
output = net(ta, tb)
assert output.shape == (3, 2, 4)
def test_net_ND():
seed = 10
shape = (3, 1, 2)
a = np.array([[[1], [2]], [[3], [4]], [[5], [6]]]).astype(np.float32)
b = np.array([1.0]).astype(np.float32)
net = Net(shape, seed)
ta, tb = Tensor(a, mstype.float32), Tensor(b, mstype.float32)
output = net(ta, tb)
assert output.shape == (3, 2, 2)

View File

@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
@ -24,7 +23,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self, shape, seed=0):
def __init__(self, shape, seed=0, seed2=0):
super(Net, self).__init__()
self.uniformint = P.UniformInt(seed=seed)
self.shape = shape
@ -38,10 +37,9 @@ def test_net_1D():
shape = (3, 2, 4)
a = 1
b = 5
net = Net(shape, seed)
net = Net(shape, seed=seed)
ta, tb = Tensor(a, mstype.int32), Tensor(b, mstype.int32)
output = net(ta, tb)
print(output.asnumpy())
assert output.shape == (3, 2, 4)

View File

@ -12,36 +12,29 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common import dtype as mstype
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self, shape, seed=0):
def __init__(self, shape, seed=0, seed2=0):
super(Net, self).__init__()
self.uniformreal = P.UniformReal(seed=seed)
self.shape = shape
def construct(self, a, b):
return self.uniformreal(self.shape, a, b)
def construct(self):
return self.uniformreal(self.shape)
def test_net_1D():
def test_net():
seed = 10
shape = (3, 2, 4)
a = 1.0
b = 5.0
net = Net(shape, seed)
ta, tb = Tensor(a, mstype.float32), Tensor(b, mstype.float32)
output = net(ta, tb)
print(output.asnumpy())
net = Net(shape, seed=seed)
output = net()
assert output.shape == (3, 2, 4)

View File

@ -43,4 +43,4 @@ def test_net():
tx, ty = Tensor(x), Tensor(y)
output = mask(tx, ty)
print(output.asnumpy())
assert ([255, 255, 255, 255] == output.asnumpy()).all()
assert ([255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255] == output.asnumpy()).all()

View File

@ -0,0 +1,40 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore import Tensor
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE,
device_target="Ascend")
class Net(nn.Cell):
def __init__(self, pad_dim_size):
super(Net, self).__init__()
self.padding = P.Padding(pad_dim_size)
def construct(self, x):
return self.padding(x)
def test_padding():
x = Tensor(np.array([[8], [10]]), mstype.int32)
padding = Net(4)
out = padding(x)
assert(out.asnumpy() == [[8, 0, 0, 0], [10, 0, 0, 0]]).all()

View File

@ -611,25 +611,14 @@ class PoissonNet(nn.Cell):
return out
class UniformIntNet(nn.Cell):
class UniformNet(nn.Cell):
def __init__(self, shape=None, seed=0):
super(UniformIntNet, self).__init__()
self.uniformint = P.UniformInt(seed=seed)
super(UniformNet, self).__init__()
self.shape = shape
self.seed = seed
def construct(self, a, b):
out = self.uniformint(self.shape, a, b)
return out
class UniformRealNet(nn.Cell):
def __init__(self, shape=None, seed=0):
super(UniformRealNet, self).__init__()
self.uniformreal = P.UniformReal(seed=seed)
self.shape = shape
def construct(self, a, b):
out = self.uniformreal(self.shape, a, b)
out = C.uniform(self.shape, a, b, self.seed)
return out
@ -924,13 +913,9 @@ test_case_math_ops = [
'block': PoissonNet((3, 2, 4), 0),
'desc_inputs': [Tensor(2.0, mstype.float32)],
'skip': ['backward']}),
('UniformInt', {
'block': UniformIntNet((3, 2, 4), 0),
'desc_inputs': [Tensor(1, mstype.int32), Tensor(15, mstype.int32)],
'skip': ['backward']}),
('UniformReal', {
'block': UniformRealNet((3, 2, 4), 0),
'desc_inputs': [Tensor(1.0, mstype.float32), Tensor(5.0, mstype.float32)],
('Uniform', {
'block': UniformNet((3, 2, 4), 0),
'desc_inputs': [Tensor(0.0, mstype.float32), Tensor(1.0, mstype.float32)],
'skip': ['backward']}),
('RandomChoiceWithMask', {
'block': P.RandomChoiceWithMask(256),