forked from mindspore-Ecosystem/mindspore
!16395 Remove the random seed from the BatchNorm1d operator example (additional problems)
From: @dinglinhe123 Reviewed-by: @wuxuejian,@liangchenghui Signed-off-by: @liangchenghui
This commit is contained in:
commit
de0400b477
|
@ -231,8 +231,7 @@ class SSIM(Cell):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>> from mindspore.ops import operations as ops
|
||||
>>> from mindspore import Tensor
|
||||
>>> net = nn.SSIM()
|
||||
>>> img1 = Tensor(np.ones([1, 3, 16, 16]).astype(np.float32))
|
||||
>>> img2 = Tensor(np.ones([1, 3, 16, 16]).astype(np.float32))
|
||||
|
@ -325,8 +324,7 @@ class MSSSIM(Cell):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>> from mindspore.ops import operations as ops
|
||||
>>> from mindspore import Tensor
|
||||
>>> net = nn.MSSSIM(power_factors=(0.033, 0.033, 0.033))
|
||||
>>> img1 = Tensor(np.ones((1, 3, 128, 128)).astype(np.float32))
|
||||
>>> img2 = Tensor(np.ones((1, 3, 128, 128)).astype(np.float32))
|
||||
|
|
|
@ -311,11 +311,8 @@ class BatchNorm1d(_BatchNorm):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>> from mindspore.ops import operations as ops
|
||||
>>> from mindspore import Tensor
|
||||
>>> net = nn.BatchNorm1d(num_features=4)
|
||||
>>> grad = Tensor(np.array([[0.7, 0.3, 0.7, 0.6],
|
||||
... [0.1, 0.1, 0.8, 0.8]]).astype(np.float32))
|
||||
>>> input = Tensor(np.array([[0.7, 0.5, 0.5, 0.6],
|
||||
... [0.5, 0.4, 0.6, 0.9]]).astype(np.float32))
|
||||
>>> output = net(input)
|
||||
|
@ -418,7 +415,7 @@ class BatchNorm2d(_BatchNorm):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>> from mindspore import Tensor
|
||||
>>> net = nn.BatchNorm2d(num_features=3)
|
||||
>>> input = Tensor(np.ones([1, 3, 2, 2]).astype(np.float32))
|
||||
>>> output = net(input)
|
||||
|
@ -522,7 +519,7 @@ class BatchNorm3d(Cell):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>> from mindspore import Tensor
|
||||
>>> net = nn.BatchNorm3d(num_features=3)
|
||||
>>> input = Tensor(np.ones([16, 3, 10, 32, 32]).astype(np.float32))
|
||||
>>> output = net(input)
|
||||
|
@ -627,7 +624,8 @@ class GlobalBatchNorm(_BatchNorm):
|
|||
>>> from mindspore.communication import init
|
||||
>>> from mindspore import context
|
||||
>>> from mindspore.context import ParallelMode
|
||||
>>> from mindspore import nn, Tensor
|
||||
>>> from mindspore import nn
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.common import dtype as mstype
|
||||
>>>
|
||||
>>> context.set_context(mode=context.GRAPH_MODE)
|
||||
|
@ -745,7 +743,8 @@ class SyncBatchNorm(_BatchNorm):
|
|||
>>> from mindspore.communication import init
|
||||
>>> from mindspore import context
|
||||
>>> from mindspore.context import ParallelMode
|
||||
>>> from mindspore import nn, Tensor
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import nn
|
||||
>>> from mindspore.common import dtype as mstype
|
||||
>>>
|
||||
>>> context.set_context(mode=context.GRAPH_MODE)
|
||||
|
@ -949,6 +948,7 @@ class InstanceNorm2d(Cell):
|
|||
class inheriting from `Initializer` not exists.
|
||||
|
||||
Examples:
|
||||
>>> import mindspore
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor
|
||||
|
|
|
@ -378,6 +378,7 @@ class SoftmaxCrossEntropyWithLogits(_Loss):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import mindspore
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor
|
||||
|
|
|
@ -5138,7 +5138,8 @@ class ApplyAdaMax(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import Parameter
|
||||
>>> from mindspore.ops import operations as ops
|
||||
>>> import mindspore.common.dtype as mstype
|
||||
>>> class Net(nn.Cell):
|
||||
|
@ -5279,7 +5280,8 @@ class ApplyAdadelta(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import Parameter
|
||||
>>> from mindspore.ops import operations as ops
|
||||
>>> import mindspore.common.dtype as mstype
|
||||
>>> class Net(nn.Cell):
|
||||
|
@ -5400,7 +5402,8 @@ class ApplyAdagrad(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import Parameter
|
||||
>>> from mindspore.ops import operations as ops
|
||||
>>> import mindspore.common.dtype as mstype
|
||||
>>> class Net(nn.Cell):
|
||||
|
@ -5500,7 +5503,8 @@ class ApplyAdagradV2(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import Parameter
|
||||
>>> from mindspore.ops import operations as ops
|
||||
>>> import mindspore.common.dtype as mstype
|
||||
>>> class Net(nn.Cell):
|
||||
|
@ -5604,7 +5608,8 @@ class SparseApplyAdagrad(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import Parameter
|
||||
>>> from mindspore.ops import operations as ops
|
||||
>>> import mindspore.common.dtype as mstype
|
||||
>>> class Net(nn.Cell):
|
||||
|
@ -5706,7 +5711,8 @@ class SparseApplyAdagradV2(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import Parameter
|
||||
>>> from mindspore.ops import operations as ops
|
||||
>>> import mindspore.common.dtype as mstype
|
||||
>>> class Net(nn.Cell):
|
||||
|
@ -5811,7 +5817,8 @@ class ApplyProximalAdagrad(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import Parameter
|
||||
>>> from mindspore.ops import operations as ops
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self):
|
||||
|
@ -6043,7 +6050,8 @@ class ApplyAddSign(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import Parameter
|
||||
>>> from mindspore.ops import operations as ops
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self):
|
||||
|
@ -6171,7 +6179,8 @@ class ApplyPowerSign(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import Parameter
|
||||
>>> from mindspore.ops import operations as ops
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self):
|
||||
|
@ -6364,7 +6373,8 @@ class ApplyProximalGradientDescent(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import Parameter
|
||||
>>> from mindspore.ops import operations as ops
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self):
|
||||
|
@ -6555,8 +6565,8 @@ class ApplyFtrl(PrimitiveWithInfer):
|
|||
>>> import mindspore
|
||||
>>> import mindspore.nn as nn
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Parameter, Tensor
|
||||
>>> import mindspore.context as context
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import Parameter
|
||||
>>> from mindspore.ops import operations as ops
|
||||
>>> class ApplyFtrlNet(nn.Cell):
|
||||
... def __init__(self):
|
||||
|
@ -7042,9 +7052,9 @@ class CTCLoss(PrimitiveWithInfer):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import mindspore
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.ops import operations as ops
|
||||
>>> inputs = Tensor(np.array([[[0.3, 0.6, 0.6],
|
||||
... [0.4, 0.3, 0.9],
|
||||
|
|
Loading…
Reference in New Issue