!17023 fix the format and line too long problem.

From: @wangshuide2020
Reviewed-by: @liangchenghui,@wuxuejian
Signed-off-by: @liangchenghui
This commit is contained in:
mindspore-ci-bot 2021-05-27 11:45:47 +08:00 committed by Gitee
commit ba4c72d1e3
8 changed files with 54 additions and 28 deletions

View File

@ -202,12 +202,14 @@ class Flatten(Cell):
def construct(self, x):
return F.reshape(x, (F.shape(x)[0], -1))
@constexpr
def check_dense_input_shape(x):
if len(x) < 2:
raise ValueError('For Dense, the dimension of input should not be less than 2, while the input dimension is '
+ f'{len(x)}.')
class Dense(Cell):
r"""
The dense connected layer.

View File

@ -506,9 +506,10 @@ class Conv3d(_Conv):
Args:
in_channels (int): The number of input channel :math:`C_{in}`.
out_channels (int): The number of output channel :math:`C_{out}`.
kernel_size (Union[int, tuple[int]]): The data type is int or a tuple of 3 integers. Specifies the depth, height
and width of the 3D convolution window. Single int means the value is for the depth, height and the width of
the kernel. A tuple of 3 ints means the first value is for the depth, second value is for height and the
kernel_size (Union[int, tuple[int]]): The data type is int or a tuple of 3 integers.
Specifies the depth, height and width of the 3D convolution window.
Single int means the value is for the depth, height and the width of the kernel.
A tuple of 3 ints means the first value is for the depth, second value is for height and the
other is for the width of the kernel.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the depth, height and width of movement are both strides, or a tuple of three int numbers that
@ -746,7 +747,8 @@ class Conv3dTranspose(_Conv):
Examples:
>>> input = Tensor(np.ones([32, 16, 10, 32, 32]), mindspore.float32)
>>> conv3d_transpose = nn.Conv3dTranspose(in_channels=16, out_channels=3, kernel_size=(4, 6, 2), pad_mode='pad')
>>> conv3d_transpose = nn.Conv3dTranspose(in_channels=16, out_channels=3, kernel_size=(4, 6, 2),
... pad_mode='pad')
>>> output = conv3d_transpose(input)
>>> print(output.shape)
(32, 3, 13, 37, 33)

View File

@ -35,12 +35,14 @@ from ..cell import Cell
__all__ = ['Embedding', 'EmbeddingLookup', 'MultiFieldEmbeddingLookup']
@constexpr
def _check_input_2d(input_shape, param_name, func_name):
if len(input_shape) != 2:
raise ValueError(f"{func_name} {param_name} should be 2d, but got shape {input_shape}")
return True
@constexpr
def _check_input_dtype(input_dtype, param_name, allow_dtypes, cls_name):
validator.check_type_name(param_name, input_dtype, allow_dtypes, cls_name)
@ -438,7 +440,8 @@ class MultiFieldEmbeddingLookup(EmbeddingLookup):
TypeError: If `sparse` is not a bool or `feature_num_list` is not a tuple.
ValueError: If `vocab_size` or `embedding_size` or `field_size` is less than 1.
ValueError: If `target` is neither 'CPU' nor 'DEVICE'.
ValueError: If `slice_mode` is not one of 'batch_slice', 'field_slice', 'table_row_slice', 'table_column_slice'.
ValueError: If `slice_mode` is not one of 'batch_slice', 'field_slice', 'table_row_slice',
'table_column_slice'.
ValueError: If `sparse` is False and `target` is 'CPU'.
ValueError: If `slice_mode` is 'field_slice' and `feature_num_list` is None.
ValueError: If `operator` is not one of 'SUM', 'MAX', 'MEAN'.

View File

@ -102,6 +102,7 @@ def _convert_img_dtype_to_float32(img, max_val):
ret = ret * scale
return ret
@constexpr
def _get_dtype_max(dtype):
"""get max of the dtype"""
@ -118,20 +119,24 @@ def _check_input_4d(input_shape, param_name, func_name):
raise ValueError(f"{func_name} {param_name} should be 4d, but got shape {input_shape}")
return True
@constexpr
def _check_input_filter_size(input_shape, param_name, filter_size, func_name):
_check_input_4d(input_shape, param_name, func_name)
validator.check(param_name + " shape[2]", input_shape[2], "filter_size", filter_size, Rel.GE, func_name)
validator.check(param_name + " shape[3]", input_shape[3], "filter_size", filter_size, Rel.GE, func_name)
@constexpr
def _check_input_dtype(input_dtype, param_name, allow_dtypes, cls_name):
validator.check_type_name(param_name, input_dtype, allow_dtypes, cls_name)
def _conv2d(in_channels, out_channels, kernel_size, weight, stride=1, padding=0):
return Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
weight_init=weight, padding=padding, pad_mode="valid")
def _create_window(size, sigma):
x_data, y_data = np.mgrid[-size // 2 + 1:size // 2 + 1, -size // 2 + 1:size // 2 + 1]
x_data = np.expand_dims(x_data, axis=-1).astype(np.float32)
@ -142,12 +147,14 @@ def _create_window(size, sigma):
g = np.exp(-(x_data + y_data) / sigma)
return np.transpose(g / np.sum(g), (2, 3, 0, 1))
def _split_img(x):
_, c, _, _ = F.shape(x)
img_split = P.Split(1, c)
output = img_split(x)
return output, c
def _compute_per_channel_loss(c1, c2, img1, img2, conv):
"""computes ssim index between img1 and img2 per single channel"""
dot_img = img1 * img2
@ -170,6 +177,7 @@ def _compute_per_channel_loss(c1, c2, img1, img2, conv):
cs = v1 / v2
return ssim, cs
def _compute_multi_channel_loss(c1, c2, img1, img2, conv, concat, mean):
"""computes ssim index between img1 and img2 per color channel"""
split_img1, c = _split_img(img1)
@ -188,6 +196,7 @@ def _compute_multi_channel_loss(c1, c2, img1, img2, conv, concat, mean):
cs = mean(multi_cs, (2, 3))
return ssim, cs
class SSIM(Cell):
r"""
Returns SSIM index between two images.
@ -207,7 +216,8 @@ class SSIM(Cell):
max_val (Union[int, float]): The dynamic range of the pixel values (255 for 8-bit grayscale images).
Default: 1.0.
filter_size (int): The size of the Gaussian filter. Default: 11. The value must be greater than or equal to 1.
filter_sigma (float): The standard deviation of Gaussian kernel. Default: 1.5. The value must be greater than 0.
filter_sigma (float): The standard deviation of Gaussian kernel. Default: 1.5.
The value must be greater than 0.
k1 (float): The constant used to generate c1 in the luminance comparison function. Default: 0.01.
k2 (float): The constant used to generate c2 in the contrast comparison function. Default: 0.03.
@ -272,11 +282,13 @@ class SSIM(Cell):
return loss
def _downsample(img1, img2, op):
a = op(img1)
b = op(img2)
return a, b
class MSSSIM(Cell):
r"""
Returns MS-SSIM index between two images.
@ -391,6 +403,7 @@ class MSSSIM(Cell):
return loss
class PSNR(Cell):
r"""
Returns Peak Signal-to-Noise Ratio of two image batches.

View File

@ -37,15 +37,18 @@ def _create_sequence_length(shape):
sequence_length = Tensor(np.ones(batch_size, np.int32) * num_step, mstype.int32)
return sequence_length
@constexpr
def _check_input_dtype(input_dtype, param_name, allow_dtypes, cls_name):
validator.check_type_name(param_name, input_dtype, allow_dtypes, cls_name)
@constexpr
def _check_input_3d(input_shape, param_name, func_name):
if len(input_shape) != 3:
raise ValueError(f"{func_name} {param_name} should be 3d, but got shape {input_shape}")
class LSTM(Cell):
r"""
Stacked LSTM (Long Short-Term Memory) layers.

View File

@ -35,6 +35,15 @@ __all__ = ['ReduceLogSumExp',
'MatDet',
]
_BASE_LANCZOS_COEFF = 0.99999999999980993227684700473478
_LANCZOS_COEFFICIENTS = [676.520368121885098567009190444019,
-1259.13921672240287047156078755283,
771.3234287776530788486528258894,
-176.61502916214059906584551354,
12.507343278686904814458936853,
-0.13857109526572011689554707,
9.984369578019570859563e-6,
1.50563273514931155834e-7]
@constexpr
def _check_input_dtype(param_name, input_dtype, allow_dtypes, cls_name):
@ -204,15 +213,8 @@ class LGamma(Cell):
super(LGamma, self).__init__()
# const numbers
self.k_lanczos_gamma = 7
self.k_base_lanczos_coeff = 0.99999999999980993227684700473478
self.k_lanczos_coefficients = [676.520368121885098567009190444019,
-1259.13921672240287047156078755283,
771.3234287776530788486528258894,
-176.61502916214059906584551354,
12.507343278686904814458936853,
-0.13857109526572011689554707,
9.984369578019570859563e-6,
1.50563273514931155834e-7]
self.k_base_lanczos_coeff = _BASE_LANCZOS_COEFF
self.k_lanczos_coefficients = _LANCZOS_COEFFICIENTS
self.one_half = 0.5
self.one = 1
self.two = 2
@ -322,15 +324,8 @@ class DiGamma(Cell):
super(DiGamma, self).__init__()
# const numbers
self.k_lanczos_gamma = 7
self.k_base_lanczos_coeff = 0.99999999999980993227684700473478
self.k_lanczos_coefficients = [676.520368121885098567009190444019,
-1259.13921672240287047156078755283,
771.3234287776530788486528258894,
-176.61502916214059906584551354,
12.507343278686904814458936853,
-0.13857109526572011689554707,
9.984369578019570859563e-6,
1.50563273514931155834e-7]
self.k_base_lanczos_coeff = _BASE_LANCZOS_COEFF
self.k_lanczos_coefficients = _LANCZOS_COEFFICIENTS
self.nan = np.nan
self.pi = np.pi
self.lanczos_gamma_plus_one_half = self.k_lanczos_gamma + 0.5
@ -383,13 +378,14 @@ class DiGamma(Cell):
eps_fp32 = Tensor(np.finfo(np.float32).eps, mstype.float32)
def _while_helper_func(cond, body, vals):
while cond(vals).any():
vals = body(vals)
return vals
def _IgammaSeries(ax, x, a, enabled):
def _igamma_series(ax, x, a, enabled):
"""Helper function for computing Igamma using a power series."""
logicaland = P.LogicalAnd()
@ -436,7 +432,7 @@ def _IgammaSeries(ax, x, a, enabled):
return (ans * ax) / a
def _IgammacContinuedFraction(ax, x, a, enabled):
def _igammac_continued_fraction(ax, x, a, enabled):
"""Helper function for computing Igammac using a continued fraction."""
abs_x = P.Abs()
@ -632,8 +628,8 @@ class IGamma(Cell):
ax = self.exp(ax)
enabled = self.logicalnot(self.logicalor(self.logicalor(x_is_zero, domain_error), underflow))
output = self.select(use_igammac,
1 - _IgammacContinuedFraction(ax, x, a, self.logicaland(enabled, use_igammac)),
_IgammaSeries(ax, x, a, self.logicaland(enabled, self.logicalnot(use_igammac))))
1 - _igammac_continued_fraction(ax, x, a, self.logicaland(enabled, use_igammac)),
_igamma_series(ax, x, a, self.logicaland(enabled, self.logicalnot(use_igammac))))
output = self.select(x_is_zero, self.zeroslike(output), output)
output = self.select(domain_error, self.fill(self.dtype(a), self.shape(a), np.nan), output)
return output

View File

@ -22,6 +22,7 @@ from ..cell import Cell
__all__ = ['AvgPool2d', 'MaxPool2d', 'AvgPool1d', 'MaxPool1d']
class _PoolNd(Cell):
"""N-D AvgPool"""
@ -31,6 +32,7 @@ class _PoolNd(Cell):
self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.cls_name)
if context.get_context("device_target") != "GPU" and self.format == "NHWC":
raise ValueError("NHWC format only support in GPU target.")
def _check_int_or_tuple(arg_name, arg_value):
validator.check_value_type(arg_name, arg_value, [int, tuple], self.cls_name)
error_msg = f'For \'{self.cls_name}\' the {arg_name} should be an positive int number or ' \
@ -55,11 +57,14 @@ class _PoolNd(Cell):
def extend_repr(self):
return 'kernel_size={kernel_size}, stride={stride}, pad_mode={pad_mode}'.format(**self.__dict__)
@constexpr
def _shape_check(in_shape):
if len(in_shape) != 3:
raise ValueError("The input must has 3 dim")
class MaxPool2d(_PoolNd):
r"""
2D max pooling operation for temporal data.

View File

@ -28,6 +28,7 @@ from mindspore.nn.layer.activation import get_activation
__all__ = ['Dense_Thor', 'Conv2d_Thor', 'Embedding_Thor']
class Dense_Thor(Cell):
r"""
The dense connected layer.
@ -547,6 +548,7 @@ class Conv2d_Thor(_Conv):
self.bias_init)
return s
class Embedding_Thor(Cell):
r"""
A simple lookup table that stores embeddings of a fixed dictionary and size.