forked from mindspore-Ecosystem/mindspore
!6365 Refactoring Laplace random operator.
Merge pull request !6365 from jxlang910/push-to-opensource
This commit is contained in:
commit
5e43308613
|
@ -41,7 +41,7 @@ from .gamma import _gamma_aicpu
|
||||||
from .poisson import _poisson_aicpu
|
from .poisson import _poisson_aicpu
|
||||||
from .uniform_int import _uniform_int_aicpu
|
from .uniform_int import _uniform_int_aicpu
|
||||||
from .uniform_real import _uniform_real_aicpu
|
from .uniform_real import _uniform_real_aicpu
|
||||||
from .laplace import _laplace_aicpu
|
from .standard_laplace import _standard_laplace_aicpu
|
||||||
from .strided_slice import _strided_slice_aicpu
|
from .strided_slice import _strided_slice_aicpu
|
||||||
from .strided_slice_grad import _strided_slice_grad_aicpu
|
from .strided_slice_grad import _strided_slice_grad_aicpu
|
||||||
from .end_of_sequence import _end_of_sequence_aicpu
|
from .end_of_sequence import _end_of_sequence_aicpu
|
||||||
|
|
|
@ -16,18 +16,17 @@
|
||||||
"""RandomLaplace op"""
|
"""RandomLaplace op"""
|
||||||
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
|
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
|
||||||
|
|
||||||
laplace_op_info = AiCPURegOp("Laplace") \
|
laplace_op_info = AiCPURegOp("StandardLaplace") \
|
||||||
.fusion_type("OPAQUE") \
|
.fusion_type("OPAQUE") \
|
||||||
.input(0, "shape", "required") \
|
.input(0, "shape", "required") \
|
||||||
.input(1, "mean", "required") \
|
|
||||||
.input(2, "lambda_param", "required") \
|
|
||||||
.output(0, "output", "required") \
|
.output(0, "output", "required") \
|
||||||
.attr("seed", "int") \
|
.attr("seed", "int") \
|
||||||
.dtype_format(DataType.I32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
|
.attr("seed2", "int") \
|
||||||
.dtype_format(DataType.I32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW) \
|
.dtype_format(DataType.I32_Default, DataType.F32_Default) \
|
||||||
|
.dtype_format(DataType.I32_NCHW, DataType.F32_NCHW) \
|
||||||
.get_op_info()
|
.get_op_info()
|
||||||
|
|
||||||
@op_info_register(laplace_op_info)
|
@op_info_register(laplace_op_info)
|
||||||
def _laplace_aicpu():
|
def _standard_laplace_aicpu():
|
||||||
"""RandomLaplace AiCPU register"""
|
"""RandomLaplace AiCPU register"""
|
||||||
return
|
return
|
|
@ -26,7 +26,7 @@ from .clip_ops import clip_by_value
|
||||||
from .multitype_ops.add_impl import hyper_add
|
from .multitype_ops.add_impl import hyper_add
|
||||||
from .multitype_ops.ones_like_impl import ones_like
|
from .multitype_ops.ones_like_impl import ones_like
|
||||||
from .multitype_ops.zeros_like_impl import zeros_like
|
from .multitype_ops.zeros_like_impl import zeros_like
|
||||||
from .random_ops import normal, uniform, gamma, poisson, multinomial
|
from .random_ops import normal, laplace, uniform, gamma, poisson, multinomial
|
||||||
|
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
|
@ -42,6 +42,7 @@ __all__ = [
|
||||||
'ones_like',
|
'ones_like',
|
||||||
'zip_operation',
|
'zip_operation',
|
||||||
'normal',
|
'normal',
|
||||||
|
'laplace',
|
||||||
'uniform',
|
'uniform',
|
||||||
'gamma',
|
'gamma',
|
||||||
'poisson',
|
'poisson',
|
||||||
|
|
|
@ -76,6 +76,44 @@ def normal(shape, mean, stddev, seed=0):
|
||||||
value = random_normal * stddev + mean
|
value = random_normal * stddev + mean
|
||||||
return value
|
return value
|
||||||
|
|
||||||
|
def laplace(shape, mean, lambda_param, seed=0):
|
||||||
|
r"""
|
||||||
|
Generates random numbers according to the Laplace random number distribution.
|
||||||
|
It is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{f}(x;μ,λ) = \frac{1}{2λ}\exp(-\frac{|x-μ|}{λ}),
|
||||||
|
|
||||||
|
Args:
|
||||||
|
shape (tuple): The shape of random tensor to be generated.
|
||||||
|
mean (Tensor): The mean μ distribution parameter, which specifies the location of the peak.
|
||||||
|
With float32 data type.
|
||||||
|
lambda_param (Tensor): The parameter used for controling the variance of this random distribution. The
|
||||||
|
variance of Laplace distribution is equal to twice the square of lambda_param. With float32 data type.
|
||||||
|
seed (int): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
|
||||||
|
Default: 0.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tensor. The shape should be the broadcasted shape of Input "shape" and shapes of mean and lambda_param.
|
||||||
|
The dtype is float32.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> shape = (4, 16)
|
||||||
|
>>> mean = Tensor(1.0, mstype.float32)
|
||||||
|
>>> lambda_param = Tensor(1.0, mstype.float32)
|
||||||
|
>>> output = C.laplace(shape, mean, lambda_param, seed=5)
|
||||||
|
"""
|
||||||
|
mean_dtype = F.dtype(mean)
|
||||||
|
lambda_param_dtype = F.dtype(lambda_param)
|
||||||
|
const_utils.check_tensors_dtype_same(mean_dtype, mstype.float32, "laplace")
|
||||||
|
const_utils.check_tensors_dtype_same(lambda_param_dtype, mstype.float32, "laplace")
|
||||||
|
seed1 = get_seed()
|
||||||
|
seed2 = seed
|
||||||
|
stdlaplace = P.StandardLaplace(seed1, seed2)
|
||||||
|
rnd = stdlaplace(shape)
|
||||||
|
value = rnd * lambda_param + mean
|
||||||
|
return value
|
||||||
|
|
||||||
def uniform(shape, minval, maxval, seed=0, dtype=mstype.float32):
|
def uniform(shape, minval, maxval, seed=0, dtype=mstype.float32):
|
||||||
"""
|
"""
|
||||||
Generates random numbers according to the Uniform random number distribution.
|
Generates random numbers according to the Uniform random number distribution.
|
||||||
|
|
|
@ -57,7 +57,7 @@ from .math_ops import (Abs, ACos, Asin, Asinh, AddN, AccumulateNV2, AssignAdd, A
|
||||||
Square, Sub, TensorAdd, Sign, Round, SquareSumAll, Atan, Atanh, Cosh, Sinh, Eps, Tan)
|
Square, Sub, TensorAdd, Sign, Round, SquareSumAll, Atan, Atanh, Cosh, Sinh, Eps, Tan)
|
||||||
|
|
||||||
from .random_ops import (RandomChoiceWithMask, StandardNormal, Gamma, Poisson, UniformInt, UniformReal,
|
from .random_ops import (RandomChoiceWithMask, StandardNormal, Gamma, Poisson, UniformInt, UniformReal,
|
||||||
RandomCategorical, Laplace, Multinomial)
|
RandomCategorical, StandardLaplace, Multinomial)
|
||||||
from .nn_ops import (LSTM, SGD, Adam, FusedSparseAdam, FusedSparseLazyAdam, ApplyMomentum, BatchNorm,
|
from .nn_ops import (LSTM, SGD, Adam, FusedSparseAdam, FusedSparseLazyAdam, ApplyMomentum, BatchNorm,
|
||||||
BiasAdd, Conv2D,
|
BiasAdd, Conv2D,
|
||||||
DepthwiseConv2dNative,
|
DepthwiseConv2dNative,
|
||||||
|
@ -193,7 +193,7 @@ __all__ = [
|
||||||
'Poisson',
|
'Poisson',
|
||||||
'UniformInt',
|
'UniformInt',
|
||||||
'UniformReal',
|
'UniformReal',
|
||||||
'Laplace',
|
'StandardLaplace',
|
||||||
'RandomCategorical',
|
'RandomCategorical',
|
||||||
'ResizeBilinear',
|
'ResizeBilinear',
|
||||||
'ScalarSummary',
|
'ScalarSummary',
|
||||||
|
|
|
@ -63,60 +63,52 @@ class StandardNormal(PrimitiveWithInfer):
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
class Laplace(PrimitiveWithInfer):
|
class StandardLaplace(PrimitiveWithInfer):
|
||||||
r"""
|
r"""
|
||||||
Generates random numbers according to the Laplace random number distribution.
|
Generates random numbers according to the Laplace random number distribution (mean=0, lambda=1).
|
||||||
It is defined as:
|
It is defined as:
|
||||||
|
|
||||||
.. math::
|
.. math::
|
||||||
\text{f}(x;μ,λ) = \frac{1}{2λ}\exp(-\frac{|x-μ|}{λ}),
|
\text{f}(x;0,1) = \frac{1}{2}\exp(-|x|),
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
seed (int): Seed data is used as entropy source for Random number engines to generate pseudo-random numbers.
|
seed (int): Random seed. Default: 0.
|
||||||
Default: 0.
|
seed2 (int): Random seed2. Default: 0.
|
||||||
|
|
||||||
Inputs:
|
Inputs:
|
||||||
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
|
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
|
||||||
- **mean** (Tensor) - The mean μ distribution parameter, which specifies the location of the peak.
|
|
||||||
With float32 data type.
|
|
||||||
- **lambda_param** (Tensor) - The parameter used for controling the variance of this random distribution. The
|
|
||||||
variance of Laplace distribution is equal to twice the square of lambda_param. With float32 data type.
|
|
||||||
|
|
||||||
Outputs:
|
Outputs:
|
||||||
Tensor, has the specified shape and its dtype is float32.
|
Tensor. The shape that the input 'shape' denotes. The dtype is float32.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> shape = (4, 16)
|
>>> shape = (4, 16)
|
||||||
>>> mean = Tensor(1.0, mstype.float32)
|
>>> stdlaplace = P.StandardLaplace(seed=2)
|
||||||
>>> lambda_param = Tensor(1.0, mstype.float32)
|
>>> output = stdlaplace(shape)
|
||||||
>>> laplace = P.Laplace(seed=2)
|
|
||||||
>>> output = laplace(shape, mean, lambda_param)
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@prim_attr_register
|
@prim_attr_register
|
||||||
def __init__(self, seed=0):
|
def __init__(self, seed=0, seed2=0):
|
||||||
"""Init Laplace"""
|
"""Init StandardLaplace"""
|
||||||
self.init_prim_io_names(inputs=['shape', 'mean', 'lambda_param'], outputs=['output'])
|
self.init_prim_io_names(inputs=['shape'], outputs=['output'])
|
||||||
validator.check_value_type('seed', seed, [int], self.name)
|
validator.check_value_type('seed', seed, [int], self.name)
|
||||||
|
validator.check_value_type('seed2', seed2, [int], self.name)
|
||||||
|
|
||||||
def __infer__(self, shape, mean, lambda_param):
|
def __infer__(self, shape):
|
||||||
shape_v = shape["value"]
|
shape_v = shape["value"]
|
||||||
if shape_v is None:
|
if shape_v is None:
|
||||||
raise ValueError(f"For {self.name}, shape must be const.")
|
raise ValueError(f"For {self.name}, shape must be const.")
|
||||||
validator.check_value_type("shape", shape_v, [tuple], self.name)
|
validator.check_value_type("shape", shape_v, [tuple], self.name)
|
||||||
for i, shape_i in enumerate(shape_v):
|
for i, shape_i in enumerate(shape_v):
|
||||||
validator.check_integer("shape[%d]" % i, shape_i, 0, Rel.GT, self.name)
|
validator.check_integer("shape[%d]" % i, shape_i, 0, Rel.GT, self.name)
|
||||||
validator.check_tensor_type_same({"mean": mean["dtype"]}, [mstype.float32], self.name)
|
|
||||||
validator.check_tensor_type_same({"lambda_param": lambda_param["dtype"]}, [mstype.float32], self.name)
|
|
||||||
broadcast_shape = get_broadcast_shape(mean['shape'], lambda_param['shape'], self.name)
|
|
||||||
broadcast_shape = get_broadcast_shape(broadcast_shape, shape_v, self.name)
|
|
||||||
out = {
|
out = {
|
||||||
'shape': broadcast_shape,
|
'shape': shape_v,
|
||||||
'dtype': mstype.float32,
|
'dtype': mstype.float32,
|
||||||
'value': None}
|
'value': None}
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class Gamma(PrimitiveWithInfer):
|
class Gamma(PrimitiveWithInfer):
|
||||||
r"""
|
r"""
|
||||||
Produces random positive floating-point values x, distributed according to probability density function:
|
Produces random positive floating-point values x, distributed according to probability density function:
|
||||||
|
|
|
@ -12,46 +12,30 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import mindspore.context as context
|
import mindspore.context as context
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
from mindspore import Tensor
|
|
||||||
from mindspore.ops import operations as P
|
from mindspore.ops import operations as P
|
||||||
from mindspore.common import dtype as mstype
|
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||||
|
|
||||||
|
|
||||||
class Net(nn.Cell):
|
class Net(nn.Cell):
|
||||||
def __init__(self, shape, seed=0):
|
def __init__(self, shape, seed=0, seed2=0):
|
||||||
super(Net, self).__init__()
|
super(Net, self).__init__()
|
||||||
self.laplace = P.Laplace(seed=seed)
|
|
||||||
self.shape = shape
|
self.shape = shape
|
||||||
|
self.seed = seed
|
||||||
|
self.seed2 = seed2
|
||||||
|
self.stdlaplace = P.StandardLaplace(seed, seed2)
|
||||||
|
|
||||||
def construct(self, mean, lambda_param):
|
def construct(self):
|
||||||
return self.laplace(self.shape, mean, lambda_param)
|
return self.stdlaplace(self.shape)
|
||||||
|
|
||||||
|
|
||||||
def test_net_1D():
|
def test_net():
|
||||||
seed = 10
|
seed = 10
|
||||||
|
seed2 = 10
|
||||||
shape = (3, 2, 4)
|
shape = (3, 2, 4)
|
||||||
mean = 1.0
|
net = Net(shape, seed, seed2)
|
||||||
lambda_param = 1.0
|
output = net()
|
||||||
net = Net(shape, seed)
|
|
||||||
tmean, tlambda_param = Tensor(mean, mstype.float32), Tensor(lambda_param, mstype.float32)
|
|
||||||
output = net(tmean, tlambda_param)
|
|
||||||
print(output.asnumpy())
|
|
||||||
assert output.shape == (3, 2, 4)
|
assert output.shape == (3, 2, 4)
|
||||||
|
|
||||||
|
|
||||||
def test_net_ND():
|
|
||||||
seed = 10
|
|
||||||
shape = (3, 1, 2)
|
|
||||||
mean = np.array([[[1], [2]], [[3], [4]], [[5], [6]]]).astype(np.float32)
|
|
||||||
lambda_param = np.array([1.0]).astype(np.float32)
|
|
||||||
net = Net(shape, seed)
|
|
||||||
tmean, tlambda_param = Tensor(mean), Tensor(lambda_param)
|
|
||||||
output = net(tmean, tlambda_param)
|
|
||||||
print(output.asnumpy())
|
|
||||||
assert output.shape == (3, 2, 2)
|
|
|
@ -585,11 +585,11 @@ class NormalNet(nn.Cell):
|
||||||
class LaplaceNet(nn.Cell):
|
class LaplaceNet(nn.Cell):
|
||||||
def __init__(self, shape=None, seed=0):
|
def __init__(self, shape=None, seed=0):
|
||||||
super(LaplaceNet, self).__init__()
|
super(LaplaceNet, self).__init__()
|
||||||
self.laplace = P.Laplace(seed=seed)
|
|
||||||
self.shape = shape
|
self.shape = shape
|
||||||
|
self.seed = seed
|
||||||
|
|
||||||
def construct(self, mean, lambda_param):
|
def construct(self, mean, lambda_param):
|
||||||
out = self.laplace(self.shape, mean, lambda_param)
|
out = C.laplace(self.shape, mean, lambda_param, self.seed)
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue