diff --git a/mindspore/ccsrc/minddata/dataset/api/execute.cc b/mindspore/ccsrc/minddata/dataset/api/execute.cc index 058ff94bfb0..8d22ae86bf9 100644 --- a/mindspore/ccsrc/minddata/dataset/api/execute.cc +++ b/mindspore/ccsrc/minddata/dataset/api/execute.cc @@ -564,7 +564,7 @@ std::string Execute::AippCfgGenerator() { std::vector aipp_size = AippSizeFilter(resize_paras, crop_paras); - // Process normalization parameters to find out the final normalization parameters for Aipp module + // Process Normalization parameters to find out the final Normalization parameters for Aipp module std::vector normalize_paras; if (info_->aipp_cfg_.find(vision::kDvppNormalizeOperation) != info_->aipp_cfg_.end()) { for (auto pos = info_->aipp_cfg_.equal_range(vision::kDvppNormalizeOperation); pos.first != pos.second; diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/normalize_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/normalize_op.cc index 4f05858fb0f..f27496e3992 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/normalize_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/normalize_op.cc @@ -39,7 +39,7 @@ NormalizeOp::NormalizeOp(float mean_r, float mean_g, float mean_b, float std_r, Status NormalizeOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); - // Doing the normalization + // Doing the Normalization return Normalize(input, output, mean_, std_); } diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/normalize_pad_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/normalize_pad_op.cc index e999ad996a2..d171d6f6d1d 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/normalize_pad_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/normalize_pad_op.cc @@ -37,7 +37,7 @@ NormalizePadOp::NormalizePadOp(float mean_r, float mean_g, float mean_b, float s Status NormalizePadOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); - // Doing the normalization + pad + // Doing the Normalization + pad return NormalizePad(input, output, mean_, std_, dtype_); } diff --git a/mindspore/nn/acc/less_batch_normalization.py b/mindspore/nn/acc/less_batch_normalization.py index 79ce219d074..b2c59137403 100644 --- a/mindspore/nn/acc/less_batch_normalization.py +++ b/mindspore/nn/acc/less_batch_normalization.py @@ -27,7 +27,7 @@ __all__ = ["LessBN"] class CommonHeadLastFN(Cell): r""" - The last full normalization layer. + The last full Normalization layer. This layer implements the operation as: diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index 6d55ecbd098..93142d8f400 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -538,12 +538,12 @@ class BatchNorm3d(Cell): class GlobalBatchNorm(_BatchNorm): r""" - Global normalization layer over a N-dimension input. + Global Batch Normalization layer over a N-dimension input. - Global Normalization is cross device synchronized Batch Normalization. The implementation of Batch Normalization - only normalizes the data within each device. Global normalization will normalize the input within the group. - It has been described in the paper `Batch Normalization: Accelerating Deep Network Training by - Reducing Internal Covariate Shift `_. It rescales and recenters the + Global Batch Normalization is cross device synchronized Batch Normalization. The implementation of + Batch Normalization only normalizes the data within each device. Global Normalization will normalize + the input within the group.It has been described in the paper `Batch Normalization: Accelerating Deep Network + Training by Reducing Internal Covariate Shift `_. It rescales and recenters the feature using a mini-batch of data and the learned parameters which can be described in the following formula. .. math:: @@ -1003,9 +1003,9 @@ class GroupNorm(Cell): r""" Group Normalization over a mini-batch of inputs. - Group normalization is widely used in recurrent neural networks. It applies + Group Normalization is widely used in recurrent neural networks. It applies normalization on a mini-batch of inputs for each single training case as described - in the paper `Group Normalization `_. Group normalization + in the paper `Group Normalization `_. Group Normalization divides the channels into groups and computes within each group the mean and variance for normalization, and it performs very stable over a wide range of batch size. It can be described using the following formula. diff --git a/mindspore/nn/metrics/confusion_matrix.py b/mindspore/nn/metrics/confusion_matrix.py index 1d44d0c3ef4..2771bada9f9 100644 --- a/mindspore/nn/metrics/confusion_matrix.py +++ b/mindspore/nn/metrics/confusion_matrix.py @@ -32,7 +32,7 @@ class ConfusionMatrix(Metric): num_classes (int): Number of classes in the dataset. normalize (str): The parameter of calculating ConfusionMatrix supports four Normalization modes, Choose from: - - **'no_norm'** (None) - No normalization is used. Default: None. + - **'no_norm'** (None) - No Normalization is used. Default: None. - **'target'** (str) - Normalization based on target value. - **'prediction'** (str) - Normalization based on predicted value. - **'all'** (str) - Normalization over the whole matrix. diff --git a/mindspore/numpy/math_ops.py b/mindspore/numpy/math_ops.py index cba407281a8..34abc989118 100644 --- a/mindspore/numpy/math_ops.py +++ b/mindspore/numpy/math_ops.py @@ -2300,9 +2300,9 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=N a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. - bias (bool, optional): Default normalization (``False``) is by :math:`(N - 1)`, where + bias (bool, optional): Default Normalization (``False``) is by :math:`(N - 1)`, where :math:`N` is the number of observations given (unbiased estimate). If bias is - ``True``, then normalization is by `N`. These values can be overridden by + ``True``, then Normalization is by `N`. These values can be overridden by using the keyword `ddof`. ddof (int, optional): If not ``None``, the default value implied by `bias` is overridden. Note that :math:`ddof=1` will return the unbiased estimate, even @@ -2364,7 +2364,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=N avg = average(m, axis=1, weights=w) - # Determine the normalization + # Determine the Normalization if w is None: fact = m.shape[1] - ddof else: diff --git a/mindspore/ops/operations/_grad_ops.py b/mindspore/ops/operations/_grad_ops.py index fb9d92f355b..cb71d46f5c0 100644 --- a/mindspore/ops/operations/_grad_ops.py +++ b/mindspore/ops/operations/_grad_ops.py @@ -1141,7 +1141,7 @@ class L2NormalizeGrad(PrimitiveWithInfer): class LayerNormGrad(Primitive): """ - Applies the layer normalization to the input array. + Applies the layer Normalization to the input array. This operator will calculate the input gradients of layernorm. diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 46d776ec973..f0c14685317 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -816,7 +816,7 @@ class FusedBatchNormEx(PrimitiveWithCheck): class InstanceNorm(PrimitiveWithInfer): r""" - Instance normalization over a 4D input. + Instance Normalization over a 4D input. This operator applies Instance Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in the paper `Instance Normalization: The Missing Ingredient for diff --git a/model_zoo/official/nlp/bert/src/CRF.py b/model_zoo/official/nlp/bert/src/CRF.py index 26685b1bcaf..1f0e67d7ba2 100644 --- a/model_zoo/official/nlp/bert/src/CRF.py +++ b/model_zoo/official/nlp/bert/src/CRF.py @@ -74,7 +74,7 @@ class CRF(nn.Cell): def log_sum_exp(self, logits): ''' - Compute the log_sum_exp score for normalization factor. + Compute the log_sum_exp score for Normalization factor. ''' max_score = self.reduce_max(logits, -1) #16 5 5 score = self.log(self.reduce_sum(self.exp(logits - max_score), -1)) diff --git a/model_zoo/research/audio/deepspeech2/src/dataset.py b/model_zoo/research/audio/deepspeech2/src/dataset.py index 8409ce718a1..001bb170a82 100644 --- a/model_zoo/research/audio/deepspeech2/src/dataset.py +++ b/model_zoo/research/audio/deepspeech2/src/dataset.py @@ -86,7 +86,7 @@ class ASRDataset(LoadAudioAndTranscript): audio_conf: Config containing the sample rate, window and the window length/stride in seconds manifest_filepath (str): manifest_file path. labels (list): List containing all the possible characters to map to - normalize: Apply standard mean and deviation normalization to audio tensor + normalize: Apply standard mean and deviation Normalization to audio tensor batch_size (int): Dataset batch size (default=32) """ def __init__(self, audio_conf=None, @@ -195,7 +195,7 @@ def create_dataset(audio_conf, manifest_filepath, labels, normalize, batch_size, audio_conf: Config containing the sample rate, window and the window length/stride in seconds manifest_filepath (str): manifest_file path. labels (list): list containing all the possible characters to map to - normalize: Apply standard mean and deviation normalization to audio tensor + normalize: Apply standard mean and deviation Normalization to audio tensor train_mode (bool): Whether dataset is use for train or eval (default=True). batch_size (int): Dataset batch size rank (int): The shard ID within num_shards (default=None). diff --git a/model_zoo/research/audio/wavenet/README.md b/model_zoo/research/audio/wavenet/README.md index 20650e7e691..5ef503c4af7 100644 --- a/model_zoo/research/audio/wavenet/README.md +++ b/model_zoo/research/audio/wavenet/README.md @@ -75,13 +75,13 @@ Dataset used: [The LJ Speech Dataset]() ├──egs // Note the egs folder should be downloaded from the above link ├──utils // Note the utils folder should be downloaded from the above link ├── audio.py // Audio utils. Note this script should be downloaded from the above link - ├── compute-meanvar-stats.py // Compute mean-variance normalization stats. Note this script should be downloaded from the above link + ├── compute-meanvar-stats.py // Compute mean-variance Normalization stats. Note this script should be downloaded from the above link ├── evaluate.py // Evaluation ├── export.py // Convert mindspore model to air model ├── hparams.py // Hyper-parameter configuration. Note this script should be downloaded from the above link ├── mksubset.py // Make subset of dataset. Note this script should be downloaded from the above link ├── preprocess.py // Preprocess dataset. Note this script should be downloaded from the above link - ├── preprocess_normalize.py // Perform meanvar normalization to preprocessed features. Note this script should be downloaded from the above link + ├── preprocess_normalize.py // Perform meanvar Normalization to preprocessed features. Note this script should be downloaded from the above link ├── README.md // Descriptions about WaveNet ├── train.py // Training scripts ├── train_pytorch.py // Note this script should be downloaded from the above link. The initial name of this script is train.py in the project from the link diff --git a/tests/st/networks/models/bert/src/CRF.py b/tests/st/networks/models/bert/src/CRF.py index 6c9fd5ea961..5c276fb0b46 100644 --- a/tests/st/networks/models/bert/src/CRF.py +++ b/tests/st/networks/models/bert/src/CRF.py @@ -30,7 +30,7 @@ class CRF(nn.Cell): Args: tag_to_index: The dict for tag to index mapping with extra "" and ""sign. batch_size: Batch size, i.e., the length of the first dimension. - seq_length: Sequence length, i.e., the length of the second dimention. + seq_length: Sequence length, i.e., the length of the second dimension. is_training: Specifies whether to use training mode. Returns: Training mode: Tensor, total loss. @@ -74,7 +74,7 @@ class CRF(nn.Cell): def log_sum_exp(self, logits): ''' - Compute the log_sum_exp score for normalization factor. + Compute the log_sum_exp score for Normalization factor. ''' max_score = self.reduce_max(logits, -1) #16 5 5 score = self.log(self.reduce_sum(self.exp(logits - max_score), -1)) diff --git a/tests/ut/python/dataset/test_normalizeOp.py b/tests/ut/python/dataset/test_normalizeOp.py index e4788a055f6..2160ee73d0b 100644 --- a/tests/ut/python/dataset/test_normalizeOp.py +++ b/tests/ut/python/dataset/test_normalizeOp.py @@ -31,7 +31,7 @@ GENERATE_GOLDEN = False def normalize_np(image, mean, std): """ - Apply the normalization + Apply the Normalization """ # DE decodes the image in RGB by default, hence # the values here are in RGB